Commit 824f842f authored by Alessio Caiazza's avatar Alessio Caiazza

Merge branch 'sh-return-413-for-s3-artifact-uploads' into 'master'

Return 413 HTTP status for S3 uploads if max upload limit is reached

See merge request gitlab-org/gitlab-workhorse!655
parents 38a07d72 41bbe6ac
---
title: Return 413 HTTP status for S3 uploads if max upload limit is reached
merge_request: 655
author:
type: fixed
...@@ -293,29 +293,62 @@ func TestSaveFile(t *testing.T) { ...@@ -293,29 +293,62 @@ func TestSaveFile(t *testing.T) {
} }
func TestSaveFileWithS3WorkhorseClient(t *testing.T) { func TestSaveFileWithS3WorkhorseClient(t *testing.T) {
s3Creds, s3Config, sess, ts := test.SetupS3(t, "") tests := []struct {
defer ts.Close() name string
objectSize int64
ctx, cancel := context.WithCancel(context.Background()) maxSize int64
defer cancel() expectedErr error
}{
remoteObject := "tmp/test-file/1" {
opts := filestore.SaveFileOpts{ name: "known size with no limit",
RemoteID: "test-file", objectSize: test.ObjectSize,
Deadline: testDeadline(), },
UseWorkhorseClient: true, {
RemoteTempObjectID: remoteObject, name: "unknown size with no limit",
ObjectStorageConfig: filestore.ObjectStorageConfig{ objectSize: -1,
Provider: "AWS", },
S3Credentials: s3Creds, {
S3Config: s3Config, name: "unknown object size with limit",
objectSize: -1,
maxSize: test.ObjectSize - 1,
expectedErr: filestore.ErrEntityTooLarge,
}, },
} }
_, err := filestore.SaveFileFromReader(ctx, strings.NewReader(test.ObjectContent), test.ObjectSize, &opts) for _, tc := range tests {
require.NoError(t, err) t.Run(tc.name, func(t *testing.T) {
s3Creds, s3Config, sess, ts := test.SetupS3(t, "")
defer ts.Close()
test.S3ObjectExists(t, sess, s3Config, remoteObject, test.ObjectContent) ctx, cancel := context.WithCancel(context.Background())
defer cancel()
remoteObject := "tmp/test-file/1"
opts := filestore.SaveFileOpts{
RemoteID: "test-file",
Deadline: testDeadline(),
UseWorkhorseClient: true,
RemoteTempObjectID: remoteObject,
ObjectStorageConfig: filestore.ObjectStorageConfig{
Provider: "AWS",
S3Credentials: s3Creds,
S3Config: s3Config,
},
MaximumSize: tc.maxSize,
}
_, err := filestore.SaveFileFromReader(ctx, strings.NewReader(test.ObjectContent), tc.objectSize, &opts)
if tc.expectedErr == nil {
require.NoError(t, err)
test.S3ObjectExists(t, sess, s3Config, remoteObject, test.ObjectContent)
} else {
require.Equal(t, tc.expectedErr, err)
test.S3ObjectDoesNotExist(t, sess, s3Config, remoteObject)
}
})
}
} }
func TestSaveFileWithAzureWorkhorseClient(t *testing.T) { func TestSaveFileWithAzureWorkhorseClient(t *testing.T) {
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"time" "time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager"
"gitlab.com/gitlab-org/labkit/log" "gitlab.com/gitlab-org/labkit/log"
...@@ -63,7 +64,8 @@ func (s *S3Object) Upload(ctx context.Context, r io.Reader) error { ...@@ -63,7 +64,8 @@ func (s *S3Object) Upload(ctx context.Context, r io.Reader) error {
_, err = uploader.UploadWithContext(ctx, input) _, err = uploader.UploadWithContext(ctx, input)
if err != nil { if err != nil {
log.WithError(err).Error("error uploading S3 session") log.WithError(err).Error("error uploading S3 session")
return err // Get the root cause, such as ErrEntityTooLarge, so we can return the proper HTTP status code
return unwrapAWSError(err)
} }
s.uploaded = true s.uploaded = true
...@@ -106,3 +108,12 @@ func (s *S3Object) Delete() { ...@@ -106,3 +108,12 @@ func (s *S3Object) Delete() {
log.WithError(err).Error("error deleting S3 object", err) log.WithError(err).Error("error deleting S3 object", err)
} }
} }
// This is needed until https://github.com/aws/aws-sdk-go/issues/2820 is closed.
func unwrapAWSError(e error) error {
if awsErr, ok := e.(awserr.Error); ok {
return unwrapAWSError(awsErr.OrigErr())
}
return e
}
...@@ -3,6 +3,7 @@ package objectstore_test ...@@ -3,6 +3,7 @@ package objectstore_test
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
...@@ -11,6 +12,7 @@ import ( ...@@ -11,6 +12,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -21,6 +23,15 @@ import ( ...@@ -21,6 +23,15 @@ import (
"gitlab.com/gitlab-org/gitlab-workhorse/internal/testhelper" "gitlab.com/gitlab-org/gitlab-workhorse/internal/testhelper"
) )
type failedReader struct {
io.Reader
}
func (r *failedReader) Read(p []byte) (int, error) {
origErr := fmt.Errorf("entity is too large")
return 0, awserr.New("Read", "read failed", origErr)
}
func TestS3ObjectUpload(t *testing.T) { func TestS3ObjectUpload(t *testing.T) {
testCases := []struct { testCases := []struct {
encryption string encryption string
...@@ -141,4 +152,23 @@ func TestS3ObjectUploadCancel(t *testing.T) { ...@@ -141,4 +152,23 @@ func TestS3ObjectUploadCancel(t *testing.T) {
_, err = object.Consume(ctx, strings.NewReader(test.ObjectContent), deadline) _, err = object.Consume(ctx, strings.NewReader(test.ObjectContent), deadline)
require.Error(t, err) require.Error(t, err)
require.Equal(t, "context canceled", err.Error())
}
func TestS3ObjectUploadLimitReached(t *testing.T) {
creds, config, _, ts := test.SetupS3(t, "")
defer ts.Close()
deadline := time.Now().Add(testTimeout)
tmpDir, err := ioutil.TempDir("", "workhorse-test-")
require.NoError(t, err)
defer os.Remove(tmpDir)
objectName := filepath.Join(tmpDir, "s3-test-data")
object, err := objectstore.NewS3Object(objectName, creds, config)
require.NoError(t, err)
_, err = object.Consume(context.Background(), &failedReader{}, deadline)
require.Error(t, err)
require.Equal(t, "entity is too large", err.Error())
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment