Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2022-12-20 21:09:05 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2022-12-20 21:09:05 +0300
commit883d5720994852248f18cb3053dc9f053f28d6f9 (patch)
tree409c976ddc659f34afaae3b2e97f1d0325f6455c /workhorse
parent5e97da08cba997aefba6f6d13850f95536a80477 (diff)
Add latest changes from gitlab-org/gitlab@master
Diffstat (limited to 'workhorse')
-rw-r--r--workhorse/Makefile7
-rwxr-xr-xworkhorse/_support/detect-external-tests.sh11
-rw-r--r--workhorse/internal/upload/destination/destination_test.go69
-rw-r--r--workhorse/internal/upload/destination/objectstore/gocloud_object_test.go11
-rw-r--r--workhorse/internal/upload/destination/objectstore/multipart.go10
-rw-r--r--workhorse/internal/upload/destination/objectstore/multipart_test.go5
-rw-r--r--workhorse/internal/upload/destination/objectstore/object_test.go11
-rw-r--r--workhorse/internal/upload/destination/objectstore/s3_complete_multipart_api.go38
-rw-r--r--workhorse/internal/upload/destination/objectstore/s3_object_test.go11
-rw-r--r--workhorse/internal/upload/destination/objectstore/s3api/s3api.go37
-rw-r--r--workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go10
-rw-r--r--workhorse/internal/upload/destination/upload_opts_test.go23
-rw-r--r--workhorse/internal/upload/object_storage_preparer_test.go7
-rw-r--r--workhorse/internal/zipartifacts/metadata_test.go12
14 files changed, 139 insertions, 123 deletions
diff --git a/workhorse/Makefile b/workhorse/Makefile
index a0412f5e2e1..4236a1a0d8e 100644
--- a/workhorse/Makefile
+++ b/workhorse/Makefile
@@ -144,7 +144,7 @@ testdata/scratch:
mkdir -p testdata/scratch
.PHONY: verify
-verify: lint vet detect-context detect-assert check-formatting staticcheck deps-check
+verify: lint vet detect-context detect-assert detect-external-tests check-formatting staticcheck deps-check
.PHONY: lint
lint:
@@ -167,6 +167,11 @@ detect-assert:
$(call message,Verify: $@)
_support/detect-assert.sh
+.PHONY: detect-external-tests
+detect-external-tests:
+ $(call message,Verify: $@)
+ _support/detect-external-tests.sh
+
.PHONY: check-formatting
check-formatting: install-goimports
$(call message,Verify: $@)
diff --git a/workhorse/_support/detect-external-tests.sh b/workhorse/_support/detect-external-tests.sh
new file mode 100755
index 00000000000..865bd1447e1
--- /dev/null
+++ b/workhorse/_support/detect-external-tests.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+go list -f '{{join .XTestGoFiles "\n"}}' ./... | awk '
+ { print }
+ END {
+ if(NR>0) {
+ print "Please avoid using external test packages (package foobar_test) in Workhorse."
+ print "See https://gitlab.com/gitlab-org/gitlab/-/merge_requests/107373."
+ exit(1)
+ }
+ }
+'
diff --git a/workhorse/internal/upload/destination/destination_test.go b/workhorse/internal/upload/destination/destination_test.go
index 97645be168f..b355935e347 100644
--- a/workhorse/internal/upload/destination/destination_test.go
+++ b/workhorse/internal/upload/destination/destination_test.go
@@ -1,4 +1,4 @@
-package destination_test
+package destination
import (
"context"
@@ -17,12 +17,11 @@ import (
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/testhelper"
- "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore/test"
)
func testDeadline() time.Time {
- return time.Now().Add(destination.DefaultObjectStoreTimeout)
+ return time.Now().Add(DefaultObjectStoreTimeout)
}
func requireFileGetsRemovedAsync(t *testing.T, filePath string) {
@@ -44,10 +43,10 @@ func TestUploadWrongSize(t *testing.T) {
tmpFolder := t.TempDir()
- opts := &destination.UploadOpts{LocalTempPath: tmpFolder}
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize+1, "upload", opts)
+ opts := &UploadOpts{LocalTempPath: tmpFolder}
+ fh, err := Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize+1, "upload", opts)
require.Error(t, err)
- _, isSizeError := err.(destination.SizeError)
+ _, isSizeError := err.(SizeError)
require.True(t, isSizeError, "Should fail with SizeError")
require.Nil(t, fh)
}
@@ -58,10 +57,10 @@ func TestUploadWithKnownSizeExceedLimit(t *testing.T) {
tmpFolder := t.TempDir()
- opts := &destination.UploadOpts{LocalTempPath: tmpFolder, MaximumSize: test.ObjectSize - 1}
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", opts)
+ opts := &UploadOpts{LocalTempPath: tmpFolder, MaximumSize: test.ObjectSize - 1}
+ fh, err := Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", opts)
require.Error(t, err)
- _, isSizeError := err.(destination.SizeError)
+ _, isSizeError := err.(SizeError)
require.True(t, isSizeError, "Should fail with SizeError")
require.Nil(t, fh)
}
@@ -72,9 +71,9 @@ func TestUploadWithUnknownSizeExceedLimit(t *testing.T) {
tmpFolder := t.TempDir()
- opts := &destination.UploadOpts{LocalTempPath: tmpFolder, MaximumSize: test.ObjectSize - 1}
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), -1, "upload", opts)
- require.Equal(t, err, destination.ErrEntityTooLarge)
+ opts := &UploadOpts{LocalTempPath: tmpFolder, MaximumSize: test.ObjectSize - 1}
+ fh, err := Upload(ctx, strings.NewReader(test.ObjectContent), -1, "upload", opts)
+ require.Equal(t, err, ErrEntityTooLarge)
require.Nil(t, fh)
}
@@ -94,7 +93,7 @@ func TestUploadWrongETag(t *testing.T) {
objectURL := ts.URL + test.ObjectPath
- opts := &destination.UploadOpts{
+ opts := &UploadOpts{
RemoteID: "test-file",
RemoteURL: objectURL,
PresignedPut: objectURL + "?Signature=ASignature",
@@ -110,7 +109,7 @@ func TestUploadWrongETag(t *testing.T) {
osStub.InitiateMultipartUpload(test.ObjectPath)
}
ctx, cancel := context.WithCancel(context.Background())
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", opts)
+ fh, err := Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", opts)
require.Nil(t, fh)
require.Error(t, err)
require.Equal(t, 1, osStub.PutsCnt(), "File not uploaded")
@@ -146,7 +145,7 @@ func TestUpload(t *testing.T) {
for _, spec := range tests {
t.Run(spec.name, func(t *testing.T) {
- var opts destination.UploadOpts
+ var opts UploadOpts
var expectedDeletes, expectedPuts int
osStub, ts := test.StartObjectStore()
@@ -187,7 +186,7 @@ func TestUpload(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
+ fh, err := Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
require.NoError(t, err)
require.NotNil(t, fh)
@@ -206,7 +205,7 @@ func TestUpload(t *testing.T) {
}
require.Equal(t, test.ObjectSize, fh.Size)
- if destination.FIPSEnabled() {
+ if FIPSEnabled() {
require.Empty(t, fh.MD5())
} else {
require.Equal(t, test.ObjectMD5, fh.MD5())
@@ -255,7 +254,7 @@ func TestUploadWithS3WorkhorseClient(t *testing.T) {
name: "unknown object size with limit",
objectSize: -1,
maxSize: test.ObjectSize - 1,
- expectedErr: destination.ErrEntityTooLarge,
+ expectedErr: ErrEntityTooLarge,
},
}
@@ -269,12 +268,12 @@ func TestUploadWithS3WorkhorseClient(t *testing.T) {
defer cancel()
remoteObject := "tmp/test-file/1"
- opts := destination.UploadOpts{
+ opts := UploadOpts{
RemoteID: "test-file",
Deadline: testDeadline(),
UseWorkhorseClient: true,
RemoteTempObjectID: remoteObject,
- ObjectStorageConfig: destination.ObjectStorageConfig{
+ ObjectStorageConfig: ObjectStorageConfig{
Provider: "AWS",
S3Credentials: s3Creds,
S3Config: s3Config,
@@ -282,7 +281,7 @@ func TestUploadWithS3WorkhorseClient(t *testing.T) {
MaximumSize: tc.maxSize,
}
- _, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), tc.objectSize, "upload", &opts)
+ _, err := Upload(ctx, strings.NewReader(test.ObjectContent), tc.objectSize, "upload", &opts)
if tc.expectedErr == nil {
require.NoError(t, err)
@@ -302,19 +301,19 @@ func TestUploadWithAzureWorkhorseClient(t *testing.T) {
defer cancel()
remoteObject := "tmp/test-file/1"
- opts := destination.UploadOpts{
+ opts := UploadOpts{
RemoteID: "test-file",
Deadline: testDeadline(),
UseWorkhorseClient: true,
RemoteTempObjectID: remoteObject,
- ObjectStorageConfig: destination.ObjectStorageConfig{
+ ObjectStorageConfig: ObjectStorageConfig{
Provider: "AzureRM",
URLMux: mux,
GoCloudConfig: config.GoCloudConfig{URL: "azblob://test-container"},
},
}
- _, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
+ _, err := Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
require.NoError(t, err)
test.GoCloudObjectExists(t, bucketDir, remoteObject)
@@ -327,19 +326,19 @@ func TestUploadWithUnknownGoCloudScheme(t *testing.T) {
mux := new(blob.URLMux)
remoteObject := "tmp/test-file/1"
- opts := destination.UploadOpts{
+ opts := UploadOpts{
RemoteID: "test-file",
Deadline: testDeadline(),
UseWorkhorseClient: true,
RemoteTempObjectID: remoteObject,
- ObjectStorageConfig: destination.ObjectStorageConfig{
+ ObjectStorageConfig: ObjectStorageConfig{
Provider: "SomeCloud",
URLMux: mux,
GoCloudConfig: config.GoCloudConfig{URL: "foo://test-container"},
},
}
- _, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
+ _, err := Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
require.Error(t, err)
}
@@ -351,7 +350,7 @@ func TestUploadMultipartInBodyFailure(t *testing.T) {
// this is the only way to get an in-body failure from our ObjectStoreStub
objectPath := "/bucket-but-no-object-key"
objectURL := ts.URL + objectPath
- opts := destination.UploadOpts{
+ opts := UploadOpts{
RemoteID: "test-file",
RemoteURL: objectURL,
PartSize: test.ObjectSize,
@@ -365,7 +364,7 @@ func TestUploadMultipartInBodyFailure(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
+ fh, err := Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
require.Nil(t, fh)
require.Error(t, err)
require.EqualError(t, err, test.MultipartUploadInternalError().Error())
@@ -405,20 +404,20 @@ func TestUploadRemoteFileWithLimit(t *testing.T) {
testData: test.ObjectContent,
objectSize: -1,
maxSize: test.ObjectSize - 1,
- expectedErr: destination.ErrEntityTooLarge,
+ expectedErr: ErrEntityTooLarge,
},
{
name: "large object with unknown size with limit",
testData: string(make([]byte, 20000)),
objectSize: -1,
maxSize: 19000,
- expectedErr: destination.ErrEntityTooLarge,
+ expectedErr: ErrEntityTooLarge,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- var opts destination.UploadOpts
+ var opts UploadOpts
for _, remoteType := range remoteTypes {
osStub, ts := test.StartObjectStore()
@@ -454,7 +453,7 @@ func TestUploadRemoteFileWithLimit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- fh, err := destination.Upload(ctx, strings.NewReader(tc.testData), tc.objectSize, "upload", &opts)
+ fh, err := Upload(ctx, strings.NewReader(tc.testData), tc.objectSize, "upload", &opts)
if tc.expectedErr == nil {
require.NoError(t, err)
@@ -468,7 +467,7 @@ func TestUploadRemoteFileWithLimit(t *testing.T) {
}
}
-func checkFileHandlerWithFields(t *testing.T, fh *destination.FileHandler, fields map[string]string, prefix string) {
+func checkFileHandlerWithFields(t *testing.T, fh *FileHandler, fields map[string]string, prefix string) {
key := func(field string) string {
if prefix == "" {
return field
@@ -482,7 +481,7 @@ func checkFileHandlerWithFields(t *testing.T, fh *destination.FileHandler, field
require.Equal(t, fh.RemoteURL, fields[key("remote_url")])
require.Equal(t, fh.RemoteID, fields[key("remote_id")])
require.Equal(t, strconv.FormatInt(test.ObjectSize, 10), fields[key("size")])
- if destination.FIPSEnabled() {
+ if FIPSEnabled() {
require.Empty(t, fields[key("md5")])
} else {
require.Equal(t, test.ObjectMD5, fields[key("md5")])
diff --git a/workhorse/internal/upload/destination/objectstore/gocloud_object_test.go b/workhorse/internal/upload/destination/objectstore/gocloud_object_test.go
index 55d886087be..5a6a4b90b34 100644
--- a/workhorse/internal/upload/destination/objectstore/gocloud_object_test.go
+++ b/workhorse/internal/upload/destination/objectstore/gocloud_object_test.go
@@ -1,4 +1,4 @@
-package objectstore_test
+package objectstore
import (
"context"
@@ -10,7 +10,6 @@ import (
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/testhelper"
- "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore/test"
)
@@ -22,8 +21,8 @@ func TestGoCloudObjectUpload(t *testing.T) {
objectName := "test.png"
testURL := "azuretest://azure.example.com/test-container"
- p := &objectstore.GoCloudObjectParams{Ctx: ctx, Mux: mux, BucketURL: testURL, ObjectName: objectName}
- object, err := objectstore.NewGoCloudObject(p)
+ p := &GoCloudObjectParams{Ctx: ctx, Mux: mux, BucketURL: testURL, ObjectName: objectName}
+ object, err := NewGoCloudObject(p)
require.NotNil(t, object)
require.NoError(t, err)
@@ -48,8 +47,8 @@ func TestGoCloudObjectUpload(t *testing.T) {
if exists {
return fmt.Errorf("file %s is still present", objectName)
- } else {
- return nil
}
+
+ return nil
})
}
diff --git a/workhorse/internal/upload/destination/objectstore/multipart.go b/workhorse/internal/upload/destination/objectstore/multipart.go
index df336d2d901..900ca040dad 100644
--- a/workhorse/internal/upload/destination/objectstore/multipart.go
+++ b/workhorse/internal/upload/destination/objectstore/multipart.go
@@ -11,6 +11,8 @@ import (
"os"
"gitlab.com/gitlab-org/labkit/mask"
+
+ "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore/s3api"
)
// ErrNotEnoughParts will be used when writing more than size * len(partURLs)
@@ -51,7 +53,7 @@ func NewMultipart(partURLs []string, completeURL, abortURL, deleteURL string, pu
}
func (m *Multipart) Upload(ctx context.Context, r io.Reader) error {
- cmu := &CompleteMultipartUpload{}
+ cmu := &s3api.CompleteMultipartUpload{}
for i, partURL := range m.PartURLs {
src := io.LimitReader(r, m.partSize)
part, err := m.readAndUploadOnePart(ctx, partURL, m.PutHeaders, src, i+1)
@@ -91,7 +93,7 @@ func (m *Multipart) Delete() {
deleteURL(m.DeleteURL)
}
-func (m *Multipart) readAndUploadOnePart(ctx context.Context, partURL string, putHeaders map[string]string, src io.Reader, partNumber int) (*completeMultipartUploadPart, error) {
+func (m *Multipart) readAndUploadOnePart(ctx context.Context, partURL string, putHeaders map[string]string, src io.Reader, partNumber int) (*s3api.CompleteMultipartUploadPart, error) {
file, err := os.CreateTemp("", "part-buffer")
if err != nil {
return nil, fmt.Errorf("create temporary buffer file: %v", err)
@@ -118,7 +120,7 @@ func (m *Multipart) readAndUploadOnePart(ctx context.Context, partURL string, pu
if err != nil {
return nil, fmt.Errorf("upload part %d: %v", partNumber, err)
}
- return &completeMultipartUploadPart{PartNumber: partNumber, ETag: etag}, nil
+ return &s3api.CompleteMultipartUploadPart{PartNumber: partNumber, ETag: etag}, nil
}
func (m *Multipart) uploadPart(ctx context.Context, url string, headers map[string]string, body io.Reader, size int64) (string, error) {
@@ -142,7 +144,7 @@ func (m *Multipart) uploadPart(ctx context.Context, url string, headers map[stri
return part.ETag(), nil
}
-func (m *Multipart) complete(ctx context.Context, cmu *CompleteMultipartUpload) error {
+func (m *Multipart) complete(ctx context.Context, cmu *s3api.CompleteMultipartUpload) error {
body, err := xml.Marshal(cmu)
if err != nil {
return fmt.Errorf("marshal CompleteMultipartUpload request: %v", err)
diff --git a/workhorse/internal/upload/destination/objectstore/multipart_test.go b/workhorse/internal/upload/destination/objectstore/multipart_test.go
index 2a5161e42e7..00244a5c50b 100644
--- a/workhorse/internal/upload/destination/objectstore/multipart_test.go
+++ b/workhorse/internal/upload/destination/objectstore/multipart_test.go
@@ -1,4 +1,4 @@
-package objectstore_test
+package objectstore
import (
"context"
@@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/require"
- "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore/test"
)
@@ -48,7 +47,7 @@ func TestMultipartUploadWithUpcaseETags(t *testing.T) {
deadline := time.Now().Add(testTimeout)
- m, err := objectstore.NewMultipart(
+ m, err := NewMultipart(
[]string{ts.URL}, // a single presigned part URL
ts.URL, // the complete multipart upload URL
"", // no abort
diff --git a/workhorse/internal/upload/destination/objectstore/object_test.go b/workhorse/internal/upload/destination/objectstore/object_test.go
index 24117891b6d..2b94cd9e3b1 100644
--- a/workhorse/internal/upload/destination/objectstore/object_test.go
+++ b/workhorse/internal/upload/destination/objectstore/object_test.go
@@ -1,4 +1,4 @@
-package objectstore_test
+package objectstore
import (
"context"
@@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/require"
- "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore/test"
)
@@ -35,7 +34,7 @@ func testObjectUploadNoErrors(t *testing.T, startObjectStore osFactory, useDelet
defer cancel()
deadline := time.Now().Add(testTimeout)
- object, err := objectstore.NewObject(objectURL, deleteURL, putHeaders, test.ObjectSize)
+ object, err := NewObject(objectURL, deleteURL, putHeaders, test.ObjectSize)
require.NoError(t, err)
// copy data
@@ -97,12 +96,12 @@ func TestObjectUpload404(t *testing.T) {
deadline := time.Now().Add(testTimeout)
objectURL := ts.URL + test.ObjectPath
- object, err := objectstore.NewObject(objectURL, "", map[string]string{}, test.ObjectSize)
+ object, err := NewObject(objectURL, "", map[string]string{}, test.ObjectSize)
require.NoError(t, err)
_, err = object.Consume(ctx, strings.NewReader(test.ObjectContent), deadline)
require.Error(t, err)
- _, isStatusCodeError := err.(objectstore.StatusCodeError)
+ _, isStatusCodeError := err.(StatusCodeError)
require.True(t, isStatusCodeError, "Should fail with StatusCodeError")
require.Contains(t, err.Error(), "404")
}
@@ -140,7 +139,7 @@ func TestObjectUploadBrokenConnection(t *testing.T) {
deadline := time.Now().Add(testTimeout)
objectURL := ts.URL + test.ObjectPath
- object, err := objectstore.NewObject(objectURL, "", map[string]string{}, -1)
+ object, err := NewObject(objectURL, "", map[string]string{}, -1)
require.NoError(t, err)
_, copyErr := object.Consume(ctx, &endlessReader{}, deadline)
diff --git a/workhorse/internal/upload/destination/objectstore/s3_complete_multipart_api.go b/workhorse/internal/upload/destination/objectstore/s3_complete_multipart_api.go
index b84f5757f49..02799d0b9b0 100644
--- a/workhorse/internal/upload/destination/objectstore/s3_complete_multipart_api.go
+++ b/workhorse/internal/upload/destination/objectstore/s3_complete_multipart_api.go
@@ -2,45 +2,15 @@ package objectstore
import (
"encoding/xml"
- "fmt"
-)
-
-// CompleteMultipartUpload is the S3 CompleteMultipartUpload body
-type CompleteMultipartUpload struct {
- Part []*completeMultipartUploadPart
-}
-type completeMultipartUploadPart struct {
- PartNumber int
- ETag string
-}
-
-// CompleteMultipartUploadResult is the S3 answer to CompleteMultipartUpload request
-type CompleteMultipartUploadResult struct {
- Location string
- Bucket string
- Key string
- ETag string
-}
-
-// CompleteMultipartUploadError is the in-body error structure
-// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html#mpUploadComplete-examples
-// the answer contains other fields we are not using
-type CompleteMultipartUploadError struct {
- XMLName xml.Name `xml:"Error"`
- Code string
- Message string
-}
-
-func (c *CompleteMultipartUploadError) Error() string {
- return fmt.Sprintf("CompleteMultipartUpload remote error %q: %s", c.Code, c.Message)
-}
+ "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore/s3api"
+)
// compoundCompleteMultipartUploadResult holds both CompleteMultipartUploadResult and CompleteMultipartUploadError
// this allow us to deserialize the response body where the root element can either be Error orCompleteMultipartUploadResult
type compoundCompleteMultipartUploadResult struct {
- *CompleteMultipartUploadResult
- *CompleteMultipartUploadError
+ *s3api.CompleteMultipartUploadResult
+ *s3api.CompleteMultipartUploadError
// XMLName this overrides CompleteMultipartUploadError.XMLName tags
XMLName xml.Name
diff --git a/workhorse/internal/upload/destination/objectstore/s3_object_test.go b/workhorse/internal/upload/destination/objectstore/s3_object_test.go
index 0ed14a2e844..c99712d18ad 100644
--- a/workhorse/internal/upload/destination/objectstore/s3_object_test.go
+++ b/workhorse/internal/upload/destination/objectstore/s3_object_test.go
@@ -1,4 +1,4 @@
-package objectstore_test
+package objectstore
import (
"context"
@@ -17,7 +17,6 @@ import (
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/testhelper"
- "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore/test"
)
@@ -50,7 +49,7 @@ func TestS3ObjectUpload(t *testing.T) {
objectName := filepath.Join(tmpDir, "s3-test-data")
ctx, cancel := context.WithCancel(context.Background())
- object, err := objectstore.NewS3Object(objectName, creds, config)
+ object, err := NewS3Object(objectName, creds, config)
require.NoError(t, err)
// copy data
@@ -107,7 +106,7 @@ func TestConcurrentS3ObjectUpload(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- object, err := objectstore.NewS3Object(objectName, creds, config)
+ object, err := NewS3Object(objectName, creds, config)
require.NoError(t, err)
// copy data
@@ -134,7 +133,7 @@ func TestS3ObjectUploadCancel(t *testing.T) {
objectName := filepath.Join(tmpDir, "s3-test-data")
- object, err := objectstore.NewS3Object(objectName, creds, config)
+ object, err := NewS3Object(objectName, creds, config)
require.NoError(t, err)
@@ -155,7 +154,7 @@ func TestS3ObjectUploadLimitReached(t *testing.T) {
tmpDir := t.TempDir()
objectName := filepath.Join(tmpDir, "s3-test-data")
- object, err := objectstore.NewS3Object(objectName, creds, config)
+ object, err := NewS3Object(objectName, creds, config)
require.NoError(t, err)
_, err = object.Consume(context.Background(), &failedReader{}, deadline)
diff --git a/workhorse/internal/upload/destination/objectstore/s3api/s3api.go b/workhorse/internal/upload/destination/objectstore/s3api/s3api.go
new file mode 100644
index 00000000000..49ab9347911
--- /dev/null
+++ b/workhorse/internal/upload/destination/objectstore/s3api/s3api.go
@@ -0,0 +1,37 @@
+package s3api
+
+import (
+ "encoding/xml"
+ "fmt"
+)
+
+// CompleteMultipartUploadError is the in-body error structure
+// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html#mpUploadComplete-examples
+// the answer contains other fields we are not using
+type CompleteMultipartUploadError struct {
+ XMLName xml.Name `xml:"Error"`
+ Code string
+ Message string
+}
+
+func (c *CompleteMultipartUploadError) Error() string {
+ return fmt.Sprintf("CompleteMultipartUpload remote error %q: %s", c.Code, c.Message)
+}
+
+// CompleteMultipartUploadResult is the S3 answer to CompleteMultipartUpload request
+type CompleteMultipartUploadResult struct {
+ Location string
+ Bucket string
+ Key string
+ ETag string
+}
+
+// CompleteMultipartUpload is the S3 CompleteMultipartUpload body
+type CompleteMultipartUpload struct {
+ Part []*CompleteMultipartUploadPart
+}
+
+type CompleteMultipartUploadPart struct {
+ PartNumber int
+ ETag string
+}
diff --git a/workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go b/workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go
index 1a380bd5083..8fbb746d6ce 100644
--- a/workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go
+++ b/workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go
@@ -12,7 +12,7 @@ import (
"strings"
"sync"
- "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore"
+ "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore/s3api"
)
type partsEtagMap map[int]string
@@ -190,8 +190,8 @@ func (o *ObjectstoreStub) putObject(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
}
-func MultipartUploadInternalError() *objectstore.CompleteMultipartUploadError {
- return &objectstore.CompleteMultipartUploadError{Code: "InternalError", Message: "malformed object path"}
+func MultipartUploadInternalError() *s3api.CompleteMultipartUploadError {
+ return &s3api.CompleteMultipartUploadError{Code: "InternalError", Message: "malformed object path"}
}
func (o *ObjectstoreStub) completeMultipartUpload(w http.ResponseWriter, r *http.Request) {
@@ -212,7 +212,7 @@ func (o *ObjectstoreStub) completeMultipartUpload(w http.ResponseWriter, r *http
return
}
- var msg objectstore.CompleteMultipartUpload
+ var msg s3api.CompleteMultipartUpload
err = xml.Unmarshal(buf, &msg)
if err != nil {
http.Error(w, err.Error(), 400)
@@ -245,7 +245,7 @@ func (o *ObjectstoreStub) completeMultipartUpload(w http.ResponseWriter, r *http
bucket := split[0]
key := split[1]
- answer := objectstore.CompleteMultipartUploadResult{
+ answer := s3api.CompleteMultipartUploadResult{
Location: r.URL.String(),
Bucket: bucket,
Key: key,
diff --git a/workhorse/internal/upload/destination/upload_opts_test.go b/workhorse/internal/upload/destination/upload_opts_test.go
index fd9e56db194..a420e842e4d 100644
--- a/workhorse/internal/upload/destination/upload_opts_test.go
+++ b/workhorse/internal/upload/destination/upload_opts_test.go
@@ -1,4 +1,4 @@
-package destination_test
+package destination
import (
"testing"
@@ -8,7 +8,6 @@ import (
"gitlab.com/gitlab-org/gitlab/workhorse/internal/api"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
- "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/upload/destination/objectstore/test"
)
@@ -43,7 +42,7 @@ func TestUploadOptsLocalAndRemote(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- opts := destination.UploadOpts{
+ opts := UploadOpts{
LocalTempPath: test.localTempPath,
PresignedPut: test.presignedPut,
PartSize: test.partSize,
@@ -106,7 +105,7 @@ func TestGetOpts(t *testing.T) {
},
}
deadline := time.Now().Add(time.Duration(apiResponse.RemoteObject.Timeout) * time.Second)
- opts, err := destination.GetOpts(apiResponse)
+ opts, err := GetOpts(apiResponse)
require.NoError(t, err)
require.Equal(t, apiResponse.TempPath, opts.LocalTempPath)
@@ -155,22 +154,22 @@ func TestGetOptsFail(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
- _, err := destination.GetOpts(tc.in)
+ _, err := GetOpts(tc.in)
require.Error(t, err, "expect input to be rejected")
})
}
}
func TestGetOptsDefaultTimeout(t *testing.T) {
- deadline := time.Now().Add(destination.DefaultObjectStoreTimeout)
- opts, err := destination.GetOpts(&api.Response{TempPath: "/foo/bar"})
+ deadline := time.Now().Add(DefaultObjectStoreTimeout)
+ opts, err := GetOpts(&api.Response{TempPath: "/foo/bar"})
require.NoError(t, err)
require.WithinDuration(t, deadline, opts.Deadline, time.Minute)
}
func TestUseWorkhorseClientEnabled(t *testing.T) {
- cfg := destination.ObjectStorageConfig{
+ cfg := ObjectStorageConfig{
Provider: "AWS",
S3Config: config.S3Config{
Bucket: "test-bucket",
@@ -195,7 +194,7 @@ func TestUseWorkhorseClientEnabled(t *testing.T) {
name string
UseWorkhorseClient bool
remoteTempObjectID string
- objectStorageConfig destination.ObjectStorageConfig
+ objectStorageConfig ObjectStorageConfig
expected bool
}{
{
@@ -243,7 +242,7 @@ func TestUseWorkhorseClientEnabled(t *testing.T) {
name: "missing S3 bucket",
UseWorkhorseClient: true,
remoteTempObjectID: "test-object",
- objectStorageConfig: destination.ObjectStorageConfig{
+ objectStorageConfig: ObjectStorageConfig{
Provider: "AWS",
S3Config: config.S3Config{},
},
@@ -269,7 +268,7 @@ func TestUseWorkhorseClientEnabled(t *testing.T) {
},
}
deadline := time.Now().Add(time.Duration(apiResponse.RemoteObject.Timeout) * time.Second)
- opts, err := destination.GetOpts(apiResponse)
+ opts, err := GetOpts(apiResponse)
require.NoError(t, err)
opts.ObjectStorageConfig = test.objectStorageConfig
@@ -322,7 +321,7 @@ func TestGoCloudConfig(t *testing.T) {
},
}
deadline := time.Now().Add(time.Duration(apiResponse.RemoteObject.Timeout) * time.Second)
- opts, err := destination.GetOpts(apiResponse)
+ opts, err := GetOpts(apiResponse)
require.NoError(t, err)
opts.ObjectStorageConfig.URLMux = mux
diff --git a/workhorse/internal/upload/object_storage_preparer_test.go b/workhorse/internal/upload/object_storage_preparer_test.go
index 56de6bbf7d6..b983d68f1ad 100644
--- a/workhorse/internal/upload/object_storage_preparer_test.go
+++ b/workhorse/internal/upload/object_storage_preparer_test.go
@@ -1,4 +1,4 @@
-package upload_test
+package upload
import (
"testing"
@@ -7,7 +7,6 @@ import (
"gitlab.com/gitlab-org/gitlab/workhorse/internal/api"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
- "gitlab.com/gitlab-org/gitlab/workhorse/internal/upload"
"github.com/stretchr/testify/require"
)
@@ -38,7 +37,7 @@ func TestPrepareWithS3Config(t *testing.T) {
},
}
- p := upload.NewObjectStoragePreparer(c)
+ p := NewObjectStoragePreparer(c)
opts, err := p.Prepare(r)
require.NoError(t, err)
@@ -51,7 +50,7 @@ func TestPrepareWithS3Config(t *testing.T) {
func TestPrepareWithNoConfig(t *testing.T) {
c := config.Config{}
r := &api.Response{RemoteObject: api.RemoteObject{ID: "id"}}
- p := upload.NewObjectStoragePreparer(c)
+ p := NewObjectStoragePreparer(c)
opts, err := p.Prepare(r)
require.NoError(t, err)
diff --git a/workhorse/internal/zipartifacts/metadata_test.go b/workhorse/internal/zipartifacts/metadata_test.go
index e4799ba4a59..6bde56ef27d 100644
--- a/workhorse/internal/zipartifacts/metadata_test.go
+++ b/workhorse/internal/zipartifacts/metadata_test.go
@@ -1,4 +1,4 @@
-package zipartifacts_test
+package zipartifacts
import (
"bytes"
@@ -11,8 +11,6 @@ import (
"github.com/stretchr/testify/require"
zip "gitlab.com/gitlab-org/golang-archive-zip"
-
- "gitlab.com/gitlab-org/gitlab/workhorse/internal/zipartifacts"
)
func generateTestArchive(w io.Writer) error {
@@ -72,10 +70,10 @@ func TestGenerateZipMetadataFromFile(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- archive, err := zipartifacts.OpenArchive(ctx, f.Name())
+ archive, err := OpenArchive(ctx, f.Name())
require.NoError(t, err, "zipartifacts: OpenArchive failed")
- err = zipartifacts.GenerateZipMetadata(&metaBuffer, archive)
+ err = GenerateZipMetadata(&metaBuffer, archive)
require.NoError(t, err, "zipartifacts: GenerateZipMetadata failed")
err = validateMetadata(&metaBuffer)
@@ -96,6 +94,6 @@ func TestErrNotAZip(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- _, err = zipartifacts.OpenArchive(ctx, f.Name())
- require.Equal(t, zipartifacts.ErrorCode[zipartifacts.CodeNotZip], err, "OpenArchive requires a zip file")
+ _, err = OpenArchive(ctx, f.Name())
+ require.Equal(t, ErrorCode[CodeNotZip], err, "OpenArchive requires a zip file")
}