Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'workhorse/internal/objectstore/test')
-rw-r--r--workhorse/internal/objectstore/test/consts.go19
-rw-r--r--workhorse/internal/objectstore/test/gocloud_stub.go47
-rw-r--r--workhorse/internal/objectstore/test/objectstore_stub.go278
-rw-r--r--workhorse/internal/objectstore/test/objectstore_stub_test.go167
-rw-r--r--workhorse/internal/objectstore/test/s3_stub.go142
5 files changed, 653 insertions, 0 deletions
diff --git a/workhorse/internal/objectstore/test/consts.go b/workhorse/internal/objectstore/test/consts.go
new file mode 100644
index 00000000000..7a1bcc28d45
--- /dev/null
+++ b/workhorse/internal/objectstore/test/consts.go
@@ -0,0 +1,19 @@
+package test
+
+// Some useful const for testing purpose
+const (
+ // ObjectContent an example textual content
+ ObjectContent = "TEST OBJECT CONTENT"
+ // ObjectSize is the ObjectContent size
+ ObjectSize = int64(len(ObjectContent))
+ // Objectpath is an example remote object path (including bucket name)
+ ObjectPath = "/bucket/object"
+ // ObjectMD5 is ObjectContent MD5 hash
+ ObjectMD5 = "42d000eea026ee0760677e506189cb33"
+ // ObjectSHA1 is ObjectContent SHA1 hash
+ ObjectSHA1 = "173cfd58c6b60cb910f68a26cbb77e3fc5017a6d"
+ // ObjectSHA256 is ObjectContent SHA256 hash
+ ObjectSHA256 = "b0257e9e657ef19b15eed4fbba975bd5238d651977564035ef91cb45693647aa"
+ // ObjectSHA512 is ObjectContent SHA512 hash
+ ObjectSHA512 = "51af8197db2047f7894652daa7437927bf831d5aa63f1b0b7277c4800b06f5e3057251f0e4c2d344ca8c2daf1ffc08a28dd3b2f5fe0e316d3fd6c3af58c34b97"
+)
diff --git a/workhorse/internal/objectstore/test/gocloud_stub.go b/workhorse/internal/objectstore/test/gocloud_stub.go
new file mode 100644
index 00000000000..cf22075e407
--- /dev/null
+++ b/workhorse/internal/objectstore/test/gocloud_stub.go
@@ -0,0 +1,47 @@
+package test
+
+import (
+ "context"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "gocloud.dev/blob"
+ "gocloud.dev/blob/fileblob"
+)
+
+type dirOpener struct {
+ tmpDir string
+}
+
+func (o *dirOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
+ return fileblob.OpenBucket(o.tmpDir, nil)
+}
+
+func SetupGoCloudFileBucket(t *testing.T, scheme string) (m *blob.URLMux, bucketDir string, cleanup func()) {
+ tmpDir, err := ioutil.TempDir("", "")
+ require.NoError(t, err)
+
+ mux := new(blob.URLMux)
+ fake := &dirOpener{tmpDir: tmpDir}
+ mux.RegisterBucket(scheme, fake)
+ cleanup = func() {
+ os.RemoveAll(tmpDir)
+ }
+
+ return mux, tmpDir, cleanup
+}
+
+func GoCloudObjectExists(t *testing.T, bucketDir string, objectName string) {
+ bucket, err := fileblob.OpenBucket(bucketDir, nil)
+ require.NoError(t, err)
+
+ ctx, cancel := context.WithCancel(context.Background()) // lint:allow context.Background
+ defer cancel()
+
+ exists, err := bucket.Exists(ctx, objectName)
+ require.NoError(t, err)
+ require.True(t, exists)
+}
diff --git a/workhorse/internal/objectstore/test/objectstore_stub.go b/workhorse/internal/objectstore/test/objectstore_stub.go
new file mode 100644
index 00000000000..31ef4913305
--- /dev/null
+++ b/workhorse/internal/objectstore/test/objectstore_stub.go
@@ -0,0 +1,278 @@
+package test
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "strings"
+ "sync"
+
+ "gitlab.com/gitlab-org/gitlab-workhorse/internal/objectstore"
+)
+
+type partsEtagMap map[int]string
+
+// ObjectstoreStub is a testing implementation of ObjectStore.
+// Instead of storing objects it will just save md5sum.
+type ObjectstoreStub struct {
+ // bucket contains md5sum of uploaded objects
+ bucket map[string]string
+ // overwriteMD5 contains overwrites for md5sum that should be return instead of the regular hash
+ overwriteMD5 map[string]string
+ // multipart is a map of MultipartUploads
+ multipart map[string]partsEtagMap
+ // HTTP header sent along request
+ headers map[string]*http.Header
+
+ puts int
+ deletes int
+
+ m sync.Mutex
+}
+
+// StartObjectStore will start an ObjectStore stub
+func StartObjectStore() (*ObjectstoreStub, *httptest.Server) {
+ return StartObjectStoreWithCustomMD5(make(map[string]string))
+}
+
+// StartObjectStoreWithCustomMD5 will start an ObjectStore stub: md5Hashes contains overwrites for md5sum that should be return on PutObject
+func StartObjectStoreWithCustomMD5(md5Hashes map[string]string) (*ObjectstoreStub, *httptest.Server) {
+ os := &ObjectstoreStub{
+ bucket: make(map[string]string),
+ multipart: make(map[string]partsEtagMap),
+ overwriteMD5: make(map[string]string),
+ headers: make(map[string]*http.Header),
+ }
+
+ for k, v := range md5Hashes {
+ os.overwriteMD5[k] = v
+ }
+
+ return os, httptest.NewServer(os)
+}
+
+// PutsCnt counts PutObject invocations
+func (o *ObjectstoreStub) PutsCnt() int {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ return o.puts
+}
+
+// DeletesCnt counts DeleteObject invocation of a valid object
+func (o *ObjectstoreStub) DeletesCnt() int {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ return o.deletes
+}
+
+// GetObjectMD5 return the calculated MD5 of the object uploaded to path
+// it will return an empty string if no object has been uploaded on such path
+func (o *ObjectstoreStub) GetObjectMD5(path string) string {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ return o.bucket[path]
+}
+
+// GetHeader returns a given HTTP header of the object uploaded to the path
+func (o *ObjectstoreStub) GetHeader(path, key string) string {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ if val, ok := o.headers[path]; ok {
+ return val.Get(key)
+ }
+
+ return ""
+}
+
+// InitiateMultipartUpload prepare the ObjectstoreStob to receive a MultipartUpload on path
+// It will return an error if a MultipartUpload is already in progress on that path
+// InitiateMultipartUpload is only used during test setup.
+// Workhorse's production code does not know how to initiate a multipart upload.
+//
+// Real S3 multipart uploads are more complicated than what we do here,
+// but this is enough to verify that workhorse's production code behaves as intended.
+func (o *ObjectstoreStub) InitiateMultipartUpload(path string) error {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ if o.multipart[path] != nil {
+ return fmt.Errorf("MultipartUpload for %q already in progress", path)
+ }
+
+ o.multipart[path] = make(partsEtagMap)
+ return nil
+}
+
+// IsMultipartUpload check if the given path has a MultipartUpload in progress
+func (o *ObjectstoreStub) IsMultipartUpload(path string) bool {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ return o.isMultipartUpload(path)
+}
+
+// isMultipartUpload is the lock free version of IsMultipartUpload
+func (o *ObjectstoreStub) isMultipartUpload(path string) bool {
+ return o.multipart[path] != nil
+}
+
+func (o *ObjectstoreStub) removeObject(w http.ResponseWriter, r *http.Request) {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ objectPath := r.URL.Path
+ if o.isMultipartUpload(objectPath) {
+ o.deletes++
+ delete(o.multipart, objectPath)
+
+ w.WriteHeader(200)
+ } else if _, ok := o.bucket[objectPath]; ok {
+ o.deletes++
+ delete(o.bucket, objectPath)
+
+ w.WriteHeader(200)
+ } else {
+ w.WriteHeader(404)
+ }
+}
+
+func (o *ObjectstoreStub) putObject(w http.ResponseWriter, r *http.Request) {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ objectPath := r.URL.Path
+
+ etag, overwritten := o.overwriteMD5[objectPath]
+ if !overwritten {
+ hasher := md5.New()
+ io.Copy(hasher, r.Body)
+
+ checksum := hasher.Sum(nil)
+ etag = hex.EncodeToString(checksum)
+ }
+
+ o.headers[objectPath] = &r.Header
+ o.puts++
+ if o.isMultipartUpload(objectPath) {
+ pNumber := r.URL.Query().Get("partNumber")
+ idx, err := strconv.Atoi(pNumber)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("malformed partNumber: %v", err), 400)
+ return
+ }
+
+ o.multipart[objectPath][idx] = etag
+ } else {
+ o.bucket[objectPath] = etag
+ }
+
+ w.Header().Set("ETag", etag)
+ w.WriteHeader(200)
+}
+
+func MultipartUploadInternalError() *objectstore.CompleteMultipartUploadError {
+ return &objectstore.CompleteMultipartUploadError{Code: "InternalError", Message: "malformed object path"}
+}
+
+func (o *ObjectstoreStub) completeMultipartUpload(w http.ResponseWriter, r *http.Request) {
+ o.m.Lock()
+ defer o.m.Unlock()
+
+ objectPath := r.URL.Path
+
+ multipart := o.multipart[objectPath]
+ if multipart == nil {
+ http.Error(w, "Unknown MultipartUpload", 404)
+ return
+ }
+
+ buf, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ var msg objectstore.CompleteMultipartUpload
+ err = xml.Unmarshal(buf, &msg)
+ if err != nil {
+ http.Error(w, err.Error(), 400)
+ return
+ }
+
+ for _, part := range msg.Part {
+ etag := multipart[part.PartNumber]
+ if etag != part.ETag {
+ msg := fmt.Sprintf("ETag mismatch on part %d. Expected %q got %q", part.PartNumber, etag, part.ETag)
+ http.Error(w, msg, 400)
+ return
+ }
+ }
+
+ etag, overwritten := o.overwriteMD5[objectPath]
+ if !overwritten {
+ etag = "CompleteMultipartUploadETag"
+ }
+
+ o.bucket[objectPath] = etag
+ delete(o.multipart, objectPath)
+
+ w.Header().Set("ETag", etag)
+ split := strings.SplitN(objectPath[1:], "/", 2)
+ if len(split) < 2 {
+ encodeXMLAnswer(w, MultipartUploadInternalError())
+ return
+ }
+
+ bucket := split[0]
+ key := split[1]
+ answer := objectstore.CompleteMultipartUploadResult{
+ Location: r.URL.String(),
+ Bucket: bucket,
+ Key: key,
+ ETag: etag,
+ }
+ encodeXMLAnswer(w, answer)
+}
+
+func encodeXMLAnswer(w http.ResponseWriter, answer interface{}) {
+ w.Header().Set("Content-Type", "text/xml")
+
+ enc := xml.NewEncoder(w)
+ if err := enc.Encode(answer); err != nil {
+ http.Error(w, err.Error(), 500)
+ }
+}
+
+func (o *ObjectstoreStub) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Body != nil {
+ defer r.Body.Close()
+ }
+
+ fmt.Println("ObjectStore Stub:", r.Method, r.URL.String())
+
+ if r.URL.Path == "" {
+ http.Error(w, "No path provided", 404)
+ return
+ }
+
+ switch r.Method {
+ case "DELETE":
+ o.removeObject(w, r)
+ case "PUT":
+ o.putObject(w, r)
+ case "POST":
+ o.completeMultipartUpload(w, r)
+ default:
+ w.WriteHeader(404)
+ }
+}
diff --git a/workhorse/internal/objectstore/test/objectstore_stub_test.go b/workhorse/internal/objectstore/test/objectstore_stub_test.go
new file mode 100644
index 00000000000..8c0d52a2d79
--- /dev/null
+++ b/workhorse/internal/objectstore/test/objectstore_stub_test.go
@@ -0,0 +1,167 @@
+package test
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func doRequest(method, url string, body io.Reader) error {
+ req, err := http.NewRequest(method, url, body)
+ if err != nil {
+ return err
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+
+ return resp.Body.Close()
+}
+
+func TestObjectStoreStub(t *testing.T) {
+ stub, ts := StartObjectStore()
+ defer ts.Close()
+
+ require.Equal(t, 0, stub.PutsCnt())
+ require.Equal(t, 0, stub.DeletesCnt())
+
+ objectURL := ts.URL + ObjectPath
+
+ require.NoError(t, doRequest(http.MethodPut, objectURL, strings.NewReader(ObjectContent)))
+
+ require.Equal(t, 1, stub.PutsCnt())
+ require.Equal(t, 0, stub.DeletesCnt())
+ require.Equal(t, ObjectMD5, stub.GetObjectMD5(ObjectPath))
+
+ require.NoError(t, doRequest(http.MethodDelete, objectURL, nil))
+
+ require.Equal(t, 1, stub.PutsCnt())
+ require.Equal(t, 1, stub.DeletesCnt())
+}
+
+func TestObjectStoreStubDelete404(t *testing.T) {
+ stub, ts := StartObjectStore()
+ defer ts.Close()
+
+ objectURL := ts.URL + ObjectPath
+
+ req, err := http.NewRequest(http.MethodDelete, objectURL, nil)
+ require.NoError(t, err)
+
+ resp, err := http.DefaultClient.Do(req)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ require.Equal(t, 404, resp.StatusCode)
+
+ require.Equal(t, 0, stub.DeletesCnt())
+}
+
+func TestObjectStoreInitiateMultipartUpload(t *testing.T) {
+ stub, ts := StartObjectStore()
+ defer ts.Close()
+
+ path := "/my-multipart"
+ err := stub.InitiateMultipartUpload(path)
+ require.NoError(t, err)
+
+ err = stub.InitiateMultipartUpload(path)
+ require.Error(t, err, "second attempt to open the same MultipartUpload")
+}
+
+func TestObjectStoreCompleteMultipartUpload(t *testing.T) {
+ stub, ts := StartObjectStore()
+ defer ts.Close()
+
+ objectURL := ts.URL + ObjectPath
+ parts := []struct {
+ number int
+ content string
+ contentMD5 string
+ }{
+ {
+ number: 1,
+ content: "first part",
+ contentMD5: "550cf6b6e60f65a0e3104a26e70fea42",
+ }, {
+ number: 2,
+ content: "second part",
+ contentMD5: "920b914bca0a70780b40881b8f376135",
+ },
+ }
+
+ stub.InitiateMultipartUpload(ObjectPath)
+
+ require.True(t, stub.IsMultipartUpload(ObjectPath))
+ require.Equal(t, 0, stub.PutsCnt())
+ require.Equal(t, 0, stub.DeletesCnt())
+
+ // Workhorse knows nothing about S3 MultipartUpload, it receives some URLs
+ // from GitLab-rails and PUTs chunk of data to each of them.
+ // Then it completes the upload with a final POST
+ partPutURLs := []string{
+ fmt.Sprintf("%s?partNumber=%d", objectURL, 1),
+ fmt.Sprintf("%s?partNumber=%d", objectURL, 2),
+ }
+ completePostURL := objectURL
+
+ for i, partPutURL := range partPutURLs {
+ part := parts[i]
+
+ require.NoError(t, doRequest(http.MethodPut, partPutURL, strings.NewReader(part.content)))
+
+ require.Equal(t, i+1, stub.PutsCnt())
+ require.Equal(t, 0, stub.DeletesCnt())
+ require.Equal(t, part.contentMD5, stub.multipart[ObjectPath][part.number], "Part %d was not uploaded into ObjectStorage", part.number)
+ require.Empty(t, stub.GetObjectMD5(ObjectPath), "Part %d was mistakenly uploaded as a single object", part.number)
+ require.True(t, stub.IsMultipartUpload(ObjectPath), "MultipartUpload completed or aborted")
+ }
+
+ completeBody := fmt.Sprintf(`<CompleteMultipartUpload>
+ <Part>
+ <PartNumber>1</PartNumber>
+ <ETag>%s</ETag>
+ </Part>
+ <Part>
+ <PartNumber>2</PartNumber>
+ <ETag>%s</ETag>
+ </Part>
+ </CompleteMultipartUpload>`, parts[0].contentMD5, parts[1].contentMD5)
+ require.NoError(t, doRequest(http.MethodPost, completePostURL, strings.NewReader(completeBody)))
+
+ require.Equal(t, len(parts), stub.PutsCnt())
+ require.Equal(t, 0, stub.DeletesCnt())
+ require.False(t, stub.IsMultipartUpload(ObjectPath), "MultipartUpload is still in progress")
+}
+
+func TestObjectStoreAbortMultipartUpload(t *testing.T) {
+ stub, ts := StartObjectStore()
+ defer ts.Close()
+
+ stub.InitiateMultipartUpload(ObjectPath)
+
+ require.True(t, stub.IsMultipartUpload(ObjectPath))
+ require.Equal(t, 0, stub.PutsCnt())
+ require.Equal(t, 0, stub.DeletesCnt())
+
+ objectURL := ts.URL + ObjectPath
+ require.NoError(t, doRequest(http.MethodPut, fmt.Sprintf("%s?partNumber=%d", objectURL, 1), strings.NewReader(ObjectContent)))
+
+ require.Equal(t, 1, stub.PutsCnt())
+ require.Equal(t, 0, stub.DeletesCnt())
+ require.Equal(t, ObjectMD5, stub.multipart[ObjectPath][1], "Part was not uploaded into ObjectStorage")
+ require.Empty(t, stub.GetObjectMD5(ObjectPath), "Part was mistakenly uploaded as a single object")
+ require.True(t, stub.IsMultipartUpload(ObjectPath), "MultipartUpload completed or aborted")
+
+ require.NoError(t, doRequest(http.MethodDelete, objectURL, nil))
+
+ require.Equal(t, 1, stub.PutsCnt())
+ require.Equal(t, 1, stub.DeletesCnt())
+ require.Empty(t, stub.GetObjectMD5(ObjectPath), "MultiUpload has been completed")
+ require.False(t, stub.IsMultipartUpload(ObjectPath), "MultiUpload is still in progress")
+}
diff --git a/workhorse/internal/objectstore/test/s3_stub.go b/workhorse/internal/objectstore/test/s3_stub.go
new file mode 100644
index 00000000000..36514b3b887
--- /dev/null
+++ b/workhorse/internal/objectstore/test/s3_stub.go
@@ -0,0 +1,142 @@
+package test
+
+import (
+ "io/ioutil"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/stretchr/testify/require"
+
+ "gitlab.com/gitlab-org/gitlab-workhorse/internal/config"
+
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+
+ "github.com/johannesboyne/gofakes3"
+ "github.com/johannesboyne/gofakes3/backend/s3mem"
+)
+
+func SetupS3(t *testing.T, encryption string) (config.S3Credentials, config.S3Config, *session.Session, *httptest.Server) {
+ return SetupS3WithBucket(t, "test-bucket", encryption)
+}
+
+func SetupS3WithBucket(t *testing.T, bucket string, encryption string) (config.S3Credentials, config.S3Config, *session.Session, *httptest.Server) {
+ backend := s3mem.New()
+ faker := gofakes3.New(backend)
+ ts := httptest.NewServer(faker.Server())
+
+ creds := config.S3Credentials{
+ AwsAccessKeyID: "YOUR-ACCESSKEYID",
+ AwsSecretAccessKey: "YOUR-SECRETACCESSKEY",
+ }
+
+ config := config.S3Config{
+ Bucket: bucket,
+ Endpoint: ts.URL,
+ Region: "eu-central-1",
+ PathStyle: true,
+ }
+
+ if encryption != "" {
+ config.ServerSideEncryption = encryption
+
+ if encryption == s3.ServerSideEncryptionAwsKms {
+ config.SSEKMSKeyID = "arn:aws:1234"
+ }
+ }
+
+ sess, err := session.NewSession(&aws.Config{
+ Credentials: credentials.NewStaticCredentials(creds.AwsAccessKeyID, creds.AwsSecretAccessKey, ""),
+ Endpoint: aws.String(ts.URL),
+ Region: aws.String(config.Region),
+ DisableSSL: aws.Bool(true),
+ S3ForcePathStyle: aws.Bool(true),
+ })
+ require.NoError(t, err)
+
+ // Create S3 service client
+ svc := s3.New(sess)
+
+ _, err = svc.CreateBucket(&s3.CreateBucketInput{
+ Bucket: aws.String(bucket),
+ })
+ require.NoError(t, err)
+
+ return creds, config, sess, ts
+}
+
+// S3ObjectExists will fail the test if the file does not exist.
+func S3ObjectExists(t *testing.T, sess *session.Session, config config.S3Config, objectName string, expectedBytes string) {
+ downloadObject(t, sess, config, objectName, func(tmpfile *os.File, numBytes int64, err error) {
+ require.NoError(t, err)
+ require.Equal(t, int64(len(expectedBytes)), numBytes)
+
+ output, err := ioutil.ReadFile(tmpfile.Name())
+ require.NoError(t, err)
+
+ require.Equal(t, []byte(expectedBytes), output)
+ })
+}
+
+func CheckS3Metadata(t *testing.T, sess *session.Session, config config.S3Config, objectName string) {
+ // In a real S3 provider, s3crypto.NewDecryptionClient should probably be used
+ svc := s3.New(sess)
+ result, err := svc.GetObject(&s3.GetObjectInput{
+ Bucket: aws.String(config.Bucket),
+ Key: aws.String(objectName),
+ })
+ require.NoError(t, err)
+
+ if config.ServerSideEncryption != "" {
+ require.Equal(t, aws.String(config.ServerSideEncryption), result.ServerSideEncryption)
+
+ if config.ServerSideEncryption == s3.ServerSideEncryptionAwsKms {
+ require.Equal(t, aws.String(config.SSEKMSKeyID), result.SSEKMSKeyId)
+ } else {
+ require.Nil(t, result.SSEKMSKeyId)
+ }
+ } else {
+ require.Nil(t, result.ServerSideEncryption)
+ require.Nil(t, result.SSEKMSKeyId)
+ }
+}
+
+// S3ObjectDoesNotExist returns true if the object has been deleted,
+// false otherwise. The return signature is different from
+// S3ObjectExists because deletion may need to be retried since deferred
+// clean up callsinternal/objectstore/test/s3_stub.go may cause the actual deletion to happen after the
+// initial check.
+func S3ObjectDoesNotExist(t *testing.T, sess *session.Session, config config.S3Config, objectName string) bool {
+ deleted := false
+
+ downloadObject(t, sess, config, objectName, func(tmpfile *os.File, numBytes int64, err error) {
+ if err != nil && strings.Contains(err.Error(), "NoSuchKey") {
+ deleted = true
+ }
+ })
+
+ return deleted
+}
+
+func downloadObject(t *testing.T, sess *session.Session, config config.S3Config, objectName string, handler func(tmpfile *os.File, numBytes int64, err error)) {
+ tmpDir, err := ioutil.TempDir("", "workhorse-test-")
+ require.NoError(t, err)
+ defer os.Remove(tmpDir)
+
+ tmpfile, err := ioutil.TempFile(tmpDir, "s3-output")
+ require.NoError(t, err)
+ defer os.Remove(tmpfile.Name())
+
+ downloadSvc := s3manager.NewDownloader(sess)
+ numBytes, err := downloadSvc.Download(tmpfile, &s3.GetObjectInput{
+ Bucket: aws.String(config.Bucket),
+ Key: aws.String(objectName),
+ })
+
+ handler(tmpfile, numBytes, err)
+}