Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2022-06-20 14:10:13 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2022-06-20 14:10:13 +0300
commit0ea3fcec397b69815975647f5e2aa5fe944a8486 (patch)
tree7979381b89d26011bcf9bdc989a40fcc2f1ed4ff /workhorse/internal/upload/destination
parent72123183a20411a36d607d70b12d57c484394c8e (diff)
Add latest changes from gitlab-org/gitlab@15-1-stable-eev15.1.0-rc42
Diffstat (limited to 'workhorse/internal/upload/destination')
-rw-r--r--workhorse/internal/upload/destination/destination.go25
-rw-r--r--workhorse/internal/upload/destination/destination_test.go54
-rw-r--r--workhorse/internal/upload/destination/filestore/filestore_test.go5
-rw-r--r--workhorse/internal/upload/destination/objectstore/gocloud_object_test.go3
-rw-r--r--workhorse/internal/upload/destination/objectstore/multipart.go9
-rw-r--r--workhorse/internal/upload/destination/objectstore/multipart_test.go4
-rw-r--r--workhorse/internal/upload/destination/objectstore/object.go3
-rw-r--r--workhorse/internal/upload/destination/objectstore/s3_object_test.go18
-rw-r--r--workhorse/internal/upload/destination/objectstore/s3_session.go7
-rw-r--r--workhorse/internal/upload/destination/objectstore/test/gocloud_stub.go12
-rw-r--r--workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go24
-rw-r--r--workhorse/internal/upload/destination/objectstore/test/s3_stub.go10
-rw-r--r--workhorse/internal/upload/destination/reader_test.go4
-rw-r--r--workhorse/internal/upload/destination/upload_opts.go2
-rw-r--r--workhorse/internal/upload/destination/upload_opts_test.go3
15 files changed, 75 insertions, 108 deletions
diff --git a/workhorse/internal/upload/destination/destination.go b/workhorse/internal/upload/destination/destination.go
index b18b6e22a99..5e145e2cb2a 100644
--- a/workhorse/internal/upload/destination/destination.go
+++ b/workhorse/internal/upload/destination/destination.go
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"strconv"
"time"
@@ -54,7 +53,7 @@ type FileHandler struct {
type uploadClaims struct {
Upload map[string]string `json:"upload"`
- jwt.StandardClaims
+ jwt.RegisteredClaims
}
// SHA256 hash of the handled file
@@ -97,7 +96,7 @@ func (fh *FileHandler) GitLabFinalizeFields(prefix string) (map[string]string, e
signedData[hashName] = hash
}
- claims := uploadClaims{Upload: signedData, StandardClaims: secret.DefaultClaims}
+ claims := uploadClaims{Upload: signedData, RegisteredClaims: secret.DefaultClaims}
jwtData, err := secret.JWTTokenString(claims)
if err != nil {
return nil, err
@@ -113,9 +112,9 @@ type consumer interface {
// Upload persists the provided reader content to all the location specified in opts. A cleanup will be performed once ctx is Done
// Make sure the provided context will not expire before finalizing upload with GitLab Rails.
-func Upload(ctx context.Context, reader io.Reader, size int64, opts *UploadOpts) (*FileHandler, error) {
+func Upload(ctx context.Context, reader io.Reader, size int64, name string, opts *UploadOpts) (*FileHandler, error) {
fh := &FileHandler{
- Name: opts.TempFilePrefix,
+ Name: name,
RemoteID: opts.RemoteID,
RemoteURL: opts.RemoteURL,
}
@@ -199,13 +198,13 @@ func Upload(ctx context.Context, reader io.Reader, size int64, opts *UploadOpts)
}
logger := log.WithContextFields(ctx, log.Fields{
- "copied_bytes": fh.Size,
- "is_local": opts.IsLocalTempFile(),
- "is_multipart": opts.IsMultipart(),
- "is_remote": !opts.IsLocalTempFile(),
- "remote_id": opts.RemoteID,
- "temp_file_prefix": opts.TempFilePrefix,
- "client_mode": clientMode,
+ "copied_bytes": fh.Size,
+ "is_local": opts.IsLocalTempFile(),
+ "is_multipart": opts.IsMultipart(),
+ "is_remote": !opts.IsLocalTempFile(),
+ "remote_id": opts.RemoteID,
+ "client_mode": clientMode,
+ "filename": fh.Name,
})
if opts.IsLocalTempFile() {
@@ -226,7 +225,7 @@ func (fh *FileHandler) newLocalFile(ctx context.Context, opts *UploadOpts) (cons
return nil, fmt.Errorf("newLocalFile: mkdir %q: %v", opts.LocalTempPath, err)
}
- file, err := ioutil.TempFile(opts.LocalTempPath, opts.TempFilePrefix)
+ file, err := os.CreateTemp(opts.LocalTempPath, "gitlab-workhorse-upload")
if err != nil {
return nil, fmt.Errorf("newLocalFile: create file: %v", err)
}
diff --git a/workhorse/internal/upload/destination/destination_test.go b/workhorse/internal/upload/destination/destination_test.go
index ddf0ea24d60..6ebe163468b 100644
--- a/workhorse/internal/upload/destination/destination_test.go
+++ b/workhorse/internal/upload/destination/destination_test.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
"os"
"path"
"strconv"
@@ -43,12 +42,10 @@ func TestUploadWrongSize(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- tmpFolder, err := ioutil.TempDir("", "workhorse-test-tmp")
- require.NoError(t, err)
- defer os.RemoveAll(tmpFolder)
+ tmpFolder := t.TempDir()
- opts := &destination.UploadOpts{LocalTempPath: tmpFolder, TempFilePrefix: "test-file"}
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize+1, opts)
+ opts := &destination.UploadOpts{LocalTempPath: tmpFolder}
+ fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize+1, "upload", opts)
require.Error(t, err)
_, isSizeError := err.(destination.SizeError)
require.True(t, isSizeError, "Should fail with SizeError")
@@ -59,12 +56,10 @@ func TestUploadWithKnownSizeExceedLimit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- tmpFolder, err := ioutil.TempDir("", "workhorse-test-tmp")
- require.NoError(t, err)
- defer os.RemoveAll(tmpFolder)
+ tmpFolder := t.TempDir()
- opts := &destination.UploadOpts{LocalTempPath: tmpFolder, TempFilePrefix: "test-file", MaximumSize: test.ObjectSize - 1}
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, opts)
+ opts := &destination.UploadOpts{LocalTempPath: tmpFolder, MaximumSize: test.ObjectSize - 1}
+ fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", opts)
require.Error(t, err)
_, isSizeError := err.(destination.SizeError)
require.True(t, isSizeError, "Should fail with SizeError")
@@ -75,12 +70,10 @@ func TestUploadWithUnknownSizeExceedLimit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- tmpFolder, err := ioutil.TempDir("", "workhorse-test-tmp")
- require.NoError(t, err)
- defer os.RemoveAll(tmpFolder)
+ tmpFolder := t.TempDir()
- opts := &destination.UploadOpts{LocalTempPath: tmpFolder, TempFilePrefix: "test-file", MaximumSize: test.ObjectSize - 1}
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), -1, opts)
+ opts := &destination.UploadOpts{LocalTempPath: tmpFolder, MaximumSize: test.ObjectSize - 1}
+ fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), -1, "upload", opts)
require.Equal(t, err, destination.ErrEntityTooLarge)
require.Nil(t, fh)
}
@@ -117,7 +110,7 @@ func TestUploadWrongETag(t *testing.T) {
osStub.InitiateMultipartUpload(test.ObjectPath)
}
ctx, cancel := context.WithCancel(context.Background())
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, opts)
+ fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", opts)
require.Nil(t, fh)
require.Error(t, err)
require.Equal(t, 1, osStub.PutsCnt(), "File not uploaded")
@@ -139,9 +132,7 @@ func TestUpload(t *testing.T) {
remoteMultipart
)
- tmpFolder, err := ioutil.TempDir("", "workhorse-test-tmp")
- require.NoError(t, err)
- defer os.RemoveAll(tmpFolder)
+ tmpFolder := t.TempDir()
tests := []struct {
name string
@@ -191,13 +182,12 @@ func TestUpload(t *testing.T) {
if spec.local {
opts.LocalTempPath = tmpFolder
- opts.TempFilePrefix = "test-file"
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, &opts)
+ fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
require.NoError(t, err)
require.NotNil(t, fh)
@@ -211,9 +201,6 @@ func TestUpload(t *testing.T) {
dir := path.Dir(fh.LocalPath)
require.Equal(t, opts.LocalTempPath, dir)
- filename := path.Base(fh.LocalPath)
- beginsWithPrefix := strings.HasPrefix(filename, opts.TempFilePrefix)
- require.True(t, beginsWithPrefix, fmt.Sprintf("LocalPath filename %q do not begin with TempFilePrefix %q", filename, opts.TempFilePrefix))
} else {
require.Empty(t, fh.LocalPath, "LocalPath must be empty for non local uploads")
}
@@ -291,7 +278,7 @@ func TestUploadWithS3WorkhorseClient(t *testing.T) {
MaximumSize: tc.maxSize,
}
- _, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), tc.objectSize, &opts)
+ _, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), tc.objectSize, "upload", &opts)
if tc.expectedErr == nil {
require.NoError(t, err)
@@ -305,8 +292,7 @@ func TestUploadWithS3WorkhorseClient(t *testing.T) {
}
func TestUploadWithAzureWorkhorseClient(t *testing.T) {
- mux, bucketDir, cleanup := test.SetupGoCloudFileBucket(t, "azblob")
- defer cleanup()
+ mux, bucketDir := test.SetupGoCloudFileBucket(t, "azblob")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -324,7 +310,7 @@ func TestUploadWithAzureWorkhorseClient(t *testing.T) {
},
}
- _, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, &opts)
+ _, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
require.NoError(t, err)
test.GoCloudObjectExists(t, bucketDir, remoteObject)
@@ -349,7 +335,7 @@ func TestUploadWithUnknownGoCloudScheme(t *testing.T) {
},
}
- _, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, &opts)
+ _, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
require.Error(t, err)
}
@@ -375,7 +361,7 @@ func TestUploadMultipartInBodyFailure(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, &opts)
+ fh, err := destination.Upload(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, "upload", &opts)
require.Nil(t, fh)
require.Error(t, err)
require.EqualError(t, err, test.MultipartUploadInternalError().Error())
@@ -431,10 +417,6 @@ func TestUploadRemoteFileWithLimit(t *testing.T) {
var opts destination.UploadOpts
for _, remoteType := range remoteTypes {
- tmpFolder, err := ioutil.TempDir("", "workhorse-test-tmp")
- require.NoError(t, err)
- defer os.RemoveAll(tmpFolder)
-
osStub, ts := test.StartObjectStore()
defer ts.Close()
@@ -468,7 +450,7 @@ func TestUploadRemoteFileWithLimit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- fh, err := destination.Upload(ctx, strings.NewReader(tc.testData), tc.objectSize, &opts)
+ fh, err := destination.Upload(ctx, strings.NewReader(tc.testData), tc.objectSize, "upload", &opts)
if tc.expectedErr == nil {
require.NoError(t, err)
diff --git a/workhorse/internal/upload/destination/filestore/filestore_test.go b/workhorse/internal/upload/destination/filestore/filestore_test.go
index ec67eae96b9..6f0425809fe 100644
--- a/workhorse/internal/upload/destination/filestore/filestore_test.go
+++ b/workhorse/internal/upload/destination/filestore/filestore_test.go
@@ -2,7 +2,6 @@ package filestore
import (
"context"
- "io/ioutil"
"os"
"strings"
"testing"
@@ -12,7 +11,7 @@ import (
)
func TestConsume(t *testing.T) {
- f, err := ioutil.TempFile("", "filestore-local-file")
+ f, err := os.CreateTemp("", "filestore-local-file")
if f != nil {
defer os.Remove(f.Name())
}
@@ -32,7 +31,7 @@ func TestConsume(t *testing.T) {
require.NoError(t, err)
require.Equal(t, int64(len(content)), n)
- consumedContent, err := ioutil.ReadFile(f.Name())
+ consumedContent, err := os.ReadFile(f.Name())
require.NoError(t, err)
require.Equal(t, content, string(consumedContent))
}
diff --git a/workhorse/internal/upload/destination/objectstore/gocloud_object_test.go b/workhorse/internal/upload/destination/objectstore/gocloud_object_test.go
index 57b3a35b41e..55d886087be 100644
--- a/workhorse/internal/upload/destination/objectstore/gocloud_object_test.go
+++ b/workhorse/internal/upload/destination/objectstore/gocloud_object_test.go
@@ -15,8 +15,7 @@ import (
)
func TestGoCloudObjectUpload(t *testing.T) {
- mux, _, cleanup := test.SetupGoCloudFileBucket(t, "azuretest")
- defer cleanup()
+ mux, _ := test.SetupGoCloudFileBucket(t, "azuretest")
ctx, cancel := context.WithCancel(context.Background())
deadline := time.Now().Add(testTimeout)
diff --git a/workhorse/internal/upload/destination/objectstore/multipart.go b/workhorse/internal/upload/destination/objectstore/multipart.go
index 4c5b64b27ee..df336d2d901 100644
--- a/workhorse/internal/upload/destination/objectstore/multipart.go
+++ b/workhorse/internal/upload/destination/objectstore/multipart.go
@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net/http"
"os"
@@ -66,7 +65,7 @@ func (m *Multipart) Upload(ctx context.Context, r io.Reader) error {
}
}
- n, err := io.Copy(ioutil.Discard, r)
+ n, err := io.Copy(io.Discard, r)
if err != nil {
return fmt.Errorf("drain pipe: %v", err)
}
@@ -93,19 +92,19 @@ func (m *Multipart) Delete() {
}
func (m *Multipart) readAndUploadOnePart(ctx context.Context, partURL string, putHeaders map[string]string, src io.Reader, partNumber int) (*completeMultipartUploadPart, error) {
- file, err := ioutil.TempFile("", "part-buffer")
+ file, err := os.CreateTemp("", "part-buffer")
if err != nil {
return nil, fmt.Errorf("create temporary buffer file: %v", err)
}
defer file.Close()
if err := os.Remove(file.Name()); err != nil {
- return nil, err
+ return nil, fmt.Errorf("remove temporary buffer file: %v", err)
}
n, err := io.Copy(file, src)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("copy to temporary buffer file: %v", err)
}
if n == 0 {
return nil, nil
diff --git a/workhorse/internal/upload/destination/objectstore/multipart_test.go b/workhorse/internal/upload/destination/objectstore/multipart_test.go
index 4aff3467e30..2a5161e42e7 100644
--- a/workhorse/internal/upload/destination/objectstore/multipart_test.go
+++ b/workhorse/internal/upload/destination/objectstore/multipart_test.go
@@ -2,7 +2,7 @@ package objectstore_test
import (
"context"
- "io/ioutil"
+ "io"
"net/http"
"net/http/httptest"
"strings"
@@ -22,7 +22,7 @@ func TestMultipartUploadWithUpcaseETags(t *testing.T) {
var putCnt, postCnt int
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- _, err := ioutil.ReadAll(r.Body)
+ _, err := io.ReadAll(r.Body)
require.NoError(t, err)
defer r.Body.Close()
diff --git a/workhorse/internal/upload/destination/objectstore/object.go b/workhorse/internal/upload/destination/objectstore/object.go
index b7c4f12f009..68c566861af 100644
--- a/workhorse/internal/upload/destination/objectstore/object.go
+++ b/workhorse/internal/upload/destination/objectstore/object.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"net/http"
"gitlab.com/gitlab-org/labkit/mask"
@@ -53,7 +52,7 @@ func newObject(putURL, deleteURL string, putHeaders map[string]string, size int6
func (o *Object) Upload(ctx context.Context, r io.Reader) error {
// we should prevent pr.Close() otherwise it may shadow error set with pr.CloseWithError(err)
- req, err := http.NewRequest(http.MethodPut, o.putURL, ioutil.NopCloser(r))
+ req, err := http.NewRequest(http.MethodPut, o.putURL, io.NopCloser(r))
if err != nil {
return fmt.Errorf("PUT %q: %v", mask.URL(o.putURL), err)
diff --git a/workhorse/internal/upload/destination/objectstore/s3_object_test.go b/workhorse/internal/upload/destination/objectstore/s3_object_test.go
index b81b0ae2024..0ed14a2e844 100644
--- a/workhorse/internal/upload/destination/objectstore/s3_object_test.go
+++ b/workhorse/internal/upload/destination/objectstore/s3_object_test.go
@@ -4,8 +4,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
- "os"
"path/filepath"
"strings"
"sync"
@@ -47,9 +45,7 @@ func TestS3ObjectUpload(t *testing.T) {
defer ts.Close()
deadline := time.Now().Add(testTimeout)
- tmpDir, err := ioutil.TempDir("", "workhorse-test-")
- require.NoError(t, err)
- defer os.Remove(tmpDir)
+ tmpDir := t.TempDir()
objectName := filepath.Join(tmpDir, "s3-test-data")
ctx, cancel := context.WithCancel(context.Background())
@@ -87,9 +83,7 @@ func TestConcurrentS3ObjectUpload(t *testing.T) {
defer artifactsServer.Close()
deadline := time.Now().Add(testTimeout)
- tmpDir, err := ioutil.TempDir("", "workhorse-test-")
- require.NoError(t, err)
- defer os.Remove(tmpDir)
+ tmpDir := t.TempDir()
var wg sync.WaitGroup
@@ -136,9 +130,7 @@ func TestS3ObjectUploadCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
deadline := time.Now().Add(testTimeout)
- tmpDir, err := ioutil.TempDir("", "workhorse-test-")
- require.NoError(t, err)
- defer os.Remove(tmpDir)
+ tmpDir := t.TempDir()
objectName := filepath.Join(tmpDir, "s3-test-data")
@@ -160,9 +152,7 @@ func TestS3ObjectUploadLimitReached(t *testing.T) {
defer ts.Close()
deadline := time.Now().Add(testTimeout)
- tmpDir, err := ioutil.TempDir("", "workhorse-test-")
- require.NoError(t, err)
- defer os.Remove(tmpDir)
+ tmpDir := t.TempDir()
objectName := filepath.Join(tmpDir, "s3-test-data")
object, err := objectstore.NewS3Object(objectName, creds, config)
diff --git a/workhorse/internal/upload/destination/objectstore/s3_session.go b/workhorse/internal/upload/destination/objectstore/s3_session.go
index aa38f18ed7a..d71b38eb22e 100644
--- a/workhorse/internal/upload/destination/objectstore/s3_session.go
+++ b/workhorse/internal/upload/destination/objectstore/s3_session.go
@@ -10,6 +10,8 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
+
+ "gitlab.com/gitlab-org/labkit/fips"
)
type s3Session struct {
@@ -61,8 +63,9 @@ func setupS3Session(s3Credentials config.S3Credentials, s3Config config.S3Config
}
cfg := &aws.Config{
- Region: aws.String(s3Config.Region),
- S3ForcePathStyle: aws.Bool(s3Config.PathStyle),
+ Region: aws.String(s3Config.Region),
+ S3ForcePathStyle: aws.Bool(s3Config.PathStyle),
+ S3DisableContentMD5Validation: aws.Bool(fips.Enabled()),
}
// In case IAM profiles aren't being used, use the static credentials
diff --git a/workhorse/internal/upload/destination/objectstore/test/gocloud_stub.go b/workhorse/internal/upload/destination/objectstore/test/gocloud_stub.go
index cf22075e407..bff0eabaee5 100644
--- a/workhorse/internal/upload/destination/objectstore/test/gocloud_stub.go
+++ b/workhorse/internal/upload/destination/objectstore/test/gocloud_stub.go
@@ -2,9 +2,7 @@ package test
import (
"context"
- "io/ioutil"
"net/url"
- "os"
"testing"
"github.com/stretchr/testify/require"
@@ -20,18 +18,14 @@ func (o *dirOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket
return fileblob.OpenBucket(o.tmpDir, nil)
}
-func SetupGoCloudFileBucket(t *testing.T, scheme string) (m *blob.URLMux, bucketDir string, cleanup func()) {
- tmpDir, err := ioutil.TempDir("", "")
- require.NoError(t, err)
+func SetupGoCloudFileBucket(t *testing.T, scheme string) (m *blob.URLMux, bucketDir string) {
+ tmpDir := t.TempDir()
mux := new(blob.URLMux)
fake := &dirOpener{tmpDir: tmpDir}
mux.RegisterBucket(scheme, fake)
- cleanup = func() {
- os.RemoveAll(tmpDir)
- }
- return mux, tmpDir, cleanup
+ return mux, tmpDir
}
func GoCloudObjectExists(t *testing.T, bucketDir string, objectName string) {
diff --git a/workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go b/workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go
index d51a2de7456..1a380bd5083 100644
--- a/workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go
+++ b/workhorse/internal/upload/destination/objectstore/test/objectstore_stub.go
@@ -6,7 +6,6 @@ import (
"encoding/xml"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/http/httptest"
"strconv"
@@ -22,7 +21,8 @@ type partsEtagMap map[int]string
// Instead of storing objects it will just save md5sum.
type ObjectstoreStub struct {
// bucket contains md5sum of uploaded objects
- bucket map[string]string
+ bucket map[string]string
+ contents map[string][]byte
// overwriteMD5 contains overwrites for md5sum that should be return instead of the regular hash
overwriteMD5 map[string]string
// multipart is a map of MultipartUploads
@@ -48,6 +48,7 @@ func StartObjectStoreWithCustomMD5(md5Hashes map[string]string) (*ObjectstoreStu
multipart: make(map[string]partsEtagMap),
overwriteMD5: make(map[string]string),
headers: make(map[string]*http.Header),
+ contents: make(map[string][]byte),
}
for k, v := range md5Hashes {
@@ -82,6 +83,15 @@ func (o *ObjectstoreStub) GetObjectMD5(path string) string {
return o.bucket[path]
}
+// GetObject returns the contents of the uploaded object. The caller must
+// not modify the byte slice.
+func (o *ObjectstoreStub) GetObject(path string) []byte {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ return o.contents[path]
+}
+
// GetHeader returns a given HTTP header of the object uploaded to the path
func (o *ObjectstoreStub) GetHeader(path, key string) string {
o.m.Lock()
@@ -154,11 +164,11 @@ func (o *ObjectstoreStub) putObject(w http.ResponseWriter, r *http.Request) {
etag, overwritten := o.overwriteMD5[objectPath]
if !overwritten {
+ buf, _ := io.ReadAll(r.Body)
+ o.contents[objectPath] = buf
hasher := md5.New()
- io.Copy(hasher, r.Body)
-
- checksum := hasher.Sum(nil)
- etag = hex.EncodeToString(checksum)
+ hasher.Write(buf)
+ etag = hex.EncodeToString(hasher.Sum(nil))
}
o.headers[objectPath] = &r.Header
@@ -196,7 +206,7 @@ func (o *ObjectstoreStub) completeMultipartUpload(w http.ResponseWriter, r *http
return
}
- buf, err := ioutil.ReadAll(r.Body)
+ buf, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), 500)
return
diff --git a/workhorse/internal/upload/destination/objectstore/test/s3_stub.go b/workhorse/internal/upload/destination/objectstore/test/s3_stub.go
index 6b83426b852..6a6b4662904 100644
--- a/workhorse/internal/upload/destination/objectstore/test/s3_stub.go
+++ b/workhorse/internal/upload/destination/objectstore/test/s3_stub.go
@@ -1,7 +1,6 @@
package test
import (
- "io/ioutil"
"net/http/httptest"
"os"
"strings"
@@ -76,7 +75,7 @@ func S3ObjectExists(t *testing.T, sess *session.Session, config config.S3Config,
require.NoError(t, err)
require.Equal(t, int64(len(expectedBytes)), numBytes)
- output, err := ioutil.ReadFile(tmpfile.Name())
+ output, err := os.ReadFile(tmpfile.Name())
require.NoError(t, err)
require.Equal(t, []byte(expectedBytes), output)
@@ -124,13 +123,10 @@ func S3ObjectDoesNotExist(t *testing.T, sess *session.Session, config config.S3C
}
func downloadObject(t *testing.T, sess *session.Session, config config.S3Config, objectName string, handler func(tmpfile *os.File, numBytes int64, err error)) {
- tmpDir, err := ioutil.TempDir("", "workhorse-test-")
- require.NoError(t, err)
- defer os.Remove(tmpDir)
+ tmpDir := t.TempDir()
- tmpfile, err := ioutil.TempFile(tmpDir, "s3-output")
+ tmpfile, err := os.CreateTemp(tmpDir, "s3-output")
require.NoError(t, err)
- defer os.Remove(tmpfile.Name())
downloadSvc := s3manager.NewDownloader(sess)
numBytes, err := downloadSvc.Download(tmpfile, &s3.GetObjectInput{
diff --git a/workhorse/internal/upload/destination/reader_test.go b/workhorse/internal/upload/destination/reader_test.go
index a26f7746a13..40ff76d3866 100644
--- a/workhorse/internal/upload/destination/reader_test.go
+++ b/workhorse/internal/upload/destination/reader_test.go
@@ -2,7 +2,7 @@ package destination
import (
"fmt"
- "io/ioutil"
+ "io"
"strings"
"testing"
"testing/iotest"
@@ -19,7 +19,7 @@ func TestHardLimitReader(t *testing.T) {
},
)
- out, err := ioutil.ReadAll(r)
+ out, err := io.ReadAll(r)
require.NoError(t, err)
require.Equal(t, text, string(out))
}
diff --git a/workhorse/internal/upload/destination/upload_opts.go b/workhorse/internal/upload/destination/upload_opts.go
index 77a8927d34f..b2223fac912 100644
--- a/workhorse/internal/upload/destination/upload_opts.go
+++ b/workhorse/internal/upload/destination/upload_opts.go
@@ -29,8 +29,6 @@ type ObjectStorageConfig struct {
// UploadOpts represents all the options available for saving a file to object store
type UploadOpts struct {
- // TempFilePrefix is the prefix used to create temporary local file
- TempFilePrefix string
// LocalTempPath is the directory where to write a local copy of the file
LocalTempPath string
// RemoteID is the remote ObjectID provided by GitLab
diff --git a/workhorse/internal/upload/destination/upload_opts_test.go b/workhorse/internal/upload/destination/upload_opts_test.go
index 24a372495c6..fd9e56db194 100644
--- a/workhorse/internal/upload/destination/upload_opts_test.go
+++ b/workhorse/internal/upload/destination/upload_opts_test.go
@@ -283,8 +283,7 @@ func TestUseWorkhorseClientEnabled(t *testing.T) {
}
func TestGoCloudConfig(t *testing.T) {
- mux, _, cleanup := test.SetupGoCloudFileBucket(t, "azblob")
- defer cleanup()
+ mux, _ := test.SetupGoCloudFileBucket(t, "azblob")
tests := []struct {
name string