Commit 6ab17e66 authored by allison.browne's avatar allison.browne

Updates base on code review

Break apart services and functions into
smaller peices for attachment uploads
parent 375e1bc3
# frozen_string_literal: true
class UploaderFinder
# Instantiates a a new FileUploader
# FileUploader can be opened via .open agnostic of storage type
# Arguments correspond to Upload.secret, Upload.model_type and Upload.file_path
# Returns a FileUploader with uploaded file retrieved into the object state
def initialize(project, secret, file_path)
@project = project
@secret = secret
......@@ -8,10 +12,21 @@ class UploaderFinder
end
def execute
prevent_path_traversal_attack!
retrieve_file_state!
uploader
end
def prevent_path_traversal_attack!
Gitlab::Utils.check_path_traversal!(@file_path)
uploader = FileUploader.new(@project, secret: @secret)
end
def retrieve_file_state!
uploader.retrieve_from_store!(@file_path)
end
uploader
def uploader
@uploader ||= FileUploader.new(@project, secret: @secret)
end
end
......@@ -72,7 +72,7 @@ The incident detail page shows detailed information about a particular incident.
- Status on the incident, including when the incident was last updated.
- The incident title, including any emojis.
- The description of the incident, including emojis.
- Including any file attachments that are image type.[Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/205166) in GitLab 13.0.
- Any file attachments provided in the incident description or comments with a valid image extension. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/205166) in GitLab 13.1.
- A chronological ordered list of updates to the incident.
![Status Page detail](../img/status_page_detail_v12_10.png)
......@@ -112,9 +112,11 @@ To change the incident status from `open` to `closed`, close the incident issue
## Attachment storage
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/205166) in GitLab 13.0.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/205166) in GitLab 13.1.
Starting with GitLab 13.0, any file that is attached to incident issue descriptions or comments will be published and unpublished to the status page storage, as part of the publication flow described in the [How it works](#how-it-works) section.
Beginning with GitLab 13.1, files attached to incident issue descriptions or
comments are published and unpublished to the status page storage as part of
the [publication flow](#how-it-works).
### Limit
......
# frozen_string_literal: true
module StatusPage
class PublishAttachmentsService < PublishBaseService
private
def process(issue, user_notes)
total_uploads = existing_keys(issue).size
publish_description_attachments(issue, total_uploads)
publish_user_note_attachments(issue, total_uploads, user_notes)
success
end
def publish_description_attachments(issue, total_uploads)
publish_markdown_uploads(
markdown_field: issue.description,
issue_iid: issue.iid,
total_uploads: total_uploads
)
end
def publish_user_note_attachments(issue, total_uploads, user_notes)
user_notes.each do |user_note|
publish_markdown_uploads(
markdown_field: user_note.note,
issue_iid: issue.iid,
total_uploads: total_uploads
)
end
end
def existing_keys(issue = nil)
strong_memoize(:existing_keys) do
storage_client.list_object_keys(
uploads_path(issue)
)
end
end
def uploads_path(issue)
StatusPage::Storage.uploads_path(issue.iid)
end
def publish_markdown_uploads(markdown_field:, issue_iid:, total_uploads:)
markdown_field.scan(FileUploader::MARKDOWN_PATTERN).map do |secret, file_name|
break if total_uploads >= StatusPage::Storage::MAX_IMAGE_UPLOADS
key = StatusPage::Storage.upload_path(issue_iid, secret, file_name)
next if existing_keys.include? key
uploader = UploaderFinder.new(@project, secret, file_name).execute
uploader.open do |open_file|
storage_client.multipart_upload(key, open_file)
total_uploads += 1
end
end
end
end
end
......@@ -11,8 +11,8 @@ module StatusPage
private
def process(issue, user_notes)
publish_json_response = publish_json(issue, user_notes)
return publish_json_response if publish_json_response.error?
response = publish_json(issue, user_notes)
return response if response.error?
publish_images(issue, user_notes)
......@@ -40,12 +40,11 @@ module StatusPage
StatusPage::Storage.details_path(id)
end
# Publish Images
def publish_images(issue, user_notes)
existing_image_keys = storage_client.list_object_keys(StatusPage::Storage.uploads_path(issue.iid))
# Send all description images to s3
total_uploads = existing_image_keys.size
# Send all description file attachments to s3
publish_markdown_uploads(
markdown_field: issue.description,
issue_iid: issue.iid,
......@@ -53,7 +52,7 @@ module StatusPage
total_uploads: total_uploads
)
# Send all comment images to s3
# Send all comment file attachments to s3
user_notes.each do |user_note|
publish_markdown_uploads(
markdown_field: user_note.note,
......
......@@ -4,8 +4,7 @@ module StatusPage
module Storage
# Implements a minimal AWS S3 client.
class S3Client
# 5 megabytes is the minimum part size specified in the amazon SDK
MULTIPART_UPLOAD_PART_SIZE = 5.megabytes
include StatusPage::Storage::S3Helpers
def initialize(region:, bucket_name:, access_key_id:, secret_access_key:)
@bucket_name = bucket_name
......@@ -71,78 +70,18 @@ module StatusPage
# key: s3 key at which file is stored
# file: An open file or file-like io object
def multipart_upload(key, file)
# AWS sdk v2 has upload_file which supports multipart
# However Gitlab::HttpIO used when object storage is enabled
# cannot be used with upload_file
wrap_errors(key: key) do
upload_id = client.create_multipart_upload({ bucket: bucket_name, key: key }).to_h[:upload_id]
begin
parts = upload_in_parts(key, file, upload_id)
complete_multipart_upload(key, upload_id, parts)
# Rescue on Exception since even on keyboard inturrupt we want to abort the upload and re-raise
rescue
# Abort clears the already uploaded parts so that they do not cost the bucket owner
abort_multipart_upload(key, upload_id)
raise
end
end
StatusPage::Storage::S3Client.new(
client: client, bucket_name: bucket_name, key: key, open_file: file
).call
end
private
attr_reader :client, :bucket_name
def upload_in_parts(key, file, upload_id)
parts = []
part_number = 1
file.seek(0)
until file.eof?
part = client.upload_part({
body: file.read(MULTIPART_UPLOAD_PART_SIZE),
bucket: bucket_name,
key: key,
part_number: part_number, # required
upload_id: upload_id
})
parts << part.to_h.merge(part_number: part_number)
part_number += 1
end
parts
end
def complete_multipart_upload(key, upload_id, parts)
client.complete_multipart_upload({
bucket: bucket_name,
key: key,
multipart_upload: {
parts: parts
},
upload_id: upload_id
})
end
def abort_multipart_upload(key, upload_id)
if upload_id
client.abort_multipart_upload(
bucket: bucket_name,
key: key,
upload_id: upload_id
)
end
end
def list_objects(prefix)
client.list_objects_v2(bucket: bucket_name, prefix: prefix, max_keys: StatusPage::Storage::MAX_KEYS_PER_PAGE)
end
def wrap_errors(**args)
yield
rescue Aws::Errors::ServiceError => e
raise Error, bucket: bucket_name, error: e, **args
end
end
end
end
# frozen_string_literal: true
module StatusPage
module Storage
module S3Helpers
def wrap_errors(**args)
yield
rescue Aws::Errors::ServiceError => e
raise Error, bucket: bucket_name, error: e, **args
end
end
end
end
# frozen_string_literal: true
module StatusPage
module Storage
# Implements multipart upload in s3
class S3MultipartUpload
include StatusPage::Storage::S3Helpers
# 5 megabytes is the minimum part size specified in the amazon SDK
MULTIPART_UPLOAD_PART_SIZE = 5.megabytes
def initialize(client:, bucket_name:, key:, open_file:)
@client = client
@bucket_name = bucket_name
@key = key
@file = open_file
end
# Stores +file+ as +key+ in storage using multipart upload
#
# key: s3 key at which file is stored
# file: An open file or file-like io object
def call
# AWS sdk v2 has upload_file which supports multipart
# However Gitlab::HttpIO used when object storage is enabled
# cannot be used with upload_file
wrap_errors(key: key) do
upload_id = create.to_h[:upload_id]
begin
parts = upload_part(upload_id)
complete(upload_id, parts)
# Rescue on Exception since even on keyboard inturrupt we want to abort the upload and re-raise
# abort clears the already uploaded parts so that they do not cost the bucket owner
rescue Exception => e # rubocop:disable Lint/RescueException
abort(upload_id)
raise e
end
end
end
private
attr_reader :key, :file, :client, :bucket_name
def create
client.create_multipart_upload({ bucket: bucket_name, key: key })
end
def upload_part(upload_id)
parts = []
part_number = 1
file.seek(0)
until file.eof?
part = client.upload_part({
body: file.read(MULTIPART_UPLOAD_PART_SIZE),
bucket: bucket_name,
key: key,
part_number: part_number, # required
upload_id: upload_id
})
parts << part.to_h.merge(part_number: part_number)
part_number += 1
end
parts
end
def complete(upload_id, parts)
client.complete_multipart_upload({
bucket: bucket_name,
key: key,
multipart_upload: {
parts: parts
},
upload_id: upload_id
})
end
def abort(upload_id)
client.abort_multipart_upload(
bucket: bucket_name,
key: key,
upload_id: upload_id
)
end
end
end
end
......@@ -3,12 +3,12 @@
require 'spec_helper'
describe StatusPage::Storage::S3Client, :aws_s3 do
let!(:region) { 'eu-west-1' }
let!(:bucket_name) { 'bucket_name' }
let!(:access_key_id) { 'key_id' }
let!(:secret_access_key) { 'secret' }
let(:region) { 'eu-west-1' }
let(:bucket_name) { 'bucket_name' }
let(:access_key_id) { 'key_id' }
let(:secret_access_key) { 'secret' }
let!(:client) do
let(:client) do
described_class.new(
region: region, bucket_name: bucket_name, access_key_id: access_key_id,
secret_access_key: secret_access_key
......@@ -160,72 +160,6 @@ describe StatusPage::Storage::S3Client, :aws_s3 do
end
end
describe 'multipart_upload' do
let(:key) { '123' }
let(:file) { Tempfile.new('foo') }
let(:upload_id) { '123456789' }
let(:s3_client) { client.instance_variable_get(:@client) }
subject(:result) { client.multipart_upload(key, file) }
before do
file.open
file.write('hello world')
file.rewind
allow(s3_client).to receive(:create_multipart_upload).and_return(
instance_double(Aws::S3::Types::CompleteMultipartUploadOutput, { to_h: { upload_id: upload_id } })
)
end
after do
file.close
end
context 'when sucessful' do
before do
stub_responses(
:upload_part,
instance_double(Aws::S3::Types::UploadPartOutput, to_h: {})
)
end
it 'completes' do
expect(s3_client).to receive(:complete_multipart_upload)
result
end
context 'with more than one part' do
before do
stub_const("#{described_class}::MULTIPART_UPLOAD_PART_SIZE", 1.byte)
end
it 'completes' do
# Ensure size limit triggers more than one part upload
expect(s3_client).to receive(:upload_part).at_least(:twice)
expect(s3_client).to receive(:complete_multipart_upload)
result
end
end
end
context 'when failed' do
let(:aws_error) { 'SomeError' }
before do
stub_responses(:upload_part, aws_error)
end
it 'raises an error' do
expect(s3_client).to receive(:abort_multipart_upload)
msg = error_message(aws_error, key: key)
expect { result }.to raise_error(StatusPage::Storage::Error, msg)
end
end
end
private
def stub_responses(*args)
......
# frozen_string_literal: true
require 'spec_helper'
describe StatusPage::Storage::S3MultipartUpload, :aws_s3 do
let(:region) { 'eu-west-1' }
let(:bucket_name) { 'bucket_name' }
let(:access_key_id) { 'key_id' }
let(:secret_access_key) { 'secret' }
let(:s3_client) do
Aws::S3::Client.new(
region: region,
credentials: Aws::Credentials.new(access_key_id, secret_access_key)
)
end
describe 'multipart_upload' do
let(:key) { '123' }
let(:file) { Tempfile.new('foo') }
let(:upload_id) { '123456789' }
subject(:result) { described_class.new(client: s3_client, bucket_name: bucket_name, key: key, open_file: file).call }
before do
file.open
file.write('hello world')
file.rewind
stub_responses(
:create_multipart_upload,
instance_double(Aws::S3::Types::CreateMultipartUploadOutput, { to_h: { upload_id: upload_id } })
)
end
after do
file.close
end
context 'when sucessful' do
before do
stub_responses(
:upload_part,
instance_double(Aws::S3::Types::UploadPartOutput, to_h: {})
)
end
it 'completes' do
expect(s3_client).to receive(:complete_multipart_upload)
result
end
context 'with more than one part' do
before do
stub_const("#{described_class}::MULTIPART_UPLOAD_PART_SIZE", 1.byte)
end
it 'completes' do
# Ensure size limit triggers more than one part upload
expect(s3_client).to receive(:upload_part).at_least(:twice)
expect(s3_client).to receive(:complete_multipart_upload)
result
end
end
end
context 'when fails' do
let(:aws_error) { 'SomeError' }
context 'on upload part' do
before do
stub_responses(:upload_part, aws_error)
end
it 'raises an error' do
expect(s3_client).to receive(:abort_multipart_upload)
msg = error_message(aws_error, key: key)
expect { result }.to raise_error(StatusPage::Storage::Error, msg)
end
end
context 'on complete_multipart_upload' do
before do
stub_responses(:upload_part, {})
stub_responses(:complete_multipart_upload, aws_error)
end
it 'raises an error' do
expect(s3_client).to receive(:abort_multipart_upload)
msg = error_message(aws_error, key: key)
expect { result }.to raise_error(StatusPage::Storage::Error, msg)
end
end
end
end
private
def stub_responses(*args)
s3_client.stub_responses(*args)
end
def error_message(error_class, **args)
%{Error occured "Aws::S3::Errors::#{error_class}" } \
"for bucket #{bucket_name.inspect}. Arguments: #{args.inspect}"
end
end
# frozen_string_literal: true
require 'spec_helper'
describe StatusPage::PublishAttachmentsService do
let_it_be(:project, refind: true) { create(:project) }
let(:markdown_field) { 'Hello World' }
let(:user_notes) { [] }
let(:incident_id) { 1 }
let(:issue) { instance_double(Issue, notes: user_notes, description: markdown_field, iid: incident_id) }
let(:key) { StatusPage::Storage.details_path(incident_id) }
let(:content) { { id: incident_id } }
let(:storage_client) { instance_double(StatusPage::Storage::S3Client) }
let(:status_page_setting_enabled) { true }
let(:status_page_setting) do
instance_double(StatusPage::ProjectSetting, enabled?: status_page_setting_enabled,
storage_client: storage_client)
end
let(:service) { described_class.new(project: project) }
subject(:result) { service.execute(issue, user_notes) }
before do
allow(project).to receive(:status_page_setting)
.and_return(status_page_setting)
end
describe '#execute' do
context 'publishes file attachments' do
before do
stub_licensed_features(status_page: true)
allow(storage_client).to receive(:upload_object).with("data/incident/1.json", "{\"id\":1}")
allow(storage_client).to receive(:list_object_keys).and_return(Set.new)
end
context 'when not in markdown' do
it 'publishes no images' do
expect(storage_client).not_to receive(:multipart_upload)
expect(result.payload).to eq({})
expect(result).to be_success
end
end
context 'when in markdown' do
let(:upload_secret) { '734b8524a16d44eb0ff28a2c2e4ff3c0' }
let(:image_file_name) { 'tanuki.png'}
let(:upload_path) { "/uploads/#{upload_secret}/#{image_file_name}" }
let(:markdown_field) { "![tanuki](#{upload_path})" }
let(:status_page_upload_path) { StatusPage::Storage.upload_path(issue.iid, upload_secret, image_file_name) }
let(:user_notes) { [] }
let(:open_file) { instance_double(File, read: 'stubbed read') }
let(:uploader) { instance_double(FileUploader) }
before do
allow(uploader).to receive(:open).and_yield(open_file).twice
allow_next_instance_of(UploaderFinder) do |finder|
allow(finder).to receive(:execute).and_return(uploader)
end
allow(storage_client).to receive(:list_object_keys).and_return(Set[])
allow(storage_client).to receive(:upload_object)
end
it 'publishes description images' do
expect(storage_client).to receive(:multipart_upload).with(status_page_upload_path, open_file).once
expect(result).to be_success
expect(result.payload).to eq({})
end
context 'user notes uploads' do
let(:user_note) { instance_double(Note, note: markdown_field) }
let(:user_notes) { [user_note] }
let(:issue) { instance_double(Issue, notes: user_notes, description: '', iid: incident_id) }
it 'publishes images' do
expect(storage_client).to receive(:multipart_upload).with(status_page_upload_path, open_file).once
expect(result).to be_success
expect(result.payload).to eq({})
end
end
context 'when exceeds upload limit' do
before do
stub_const("StatusPage::Storage::MAX_IMAGE_UPLOADS", 1)
allow(storage_client).to receive(:list_object_keys).and_return(Set[status_page_upload_path])
end
it 'publishes no images' do
expect(storage_client).not_to receive(:multipart_upload)
expect(result).to be_success
expect(result.payload).to eq({})
end
end
context 'when all images are in s3' do
before do
allow(storage_client).to receive(:list_object_keys).and_return(Set[status_page_upload_path])
end
it 'publishes no images' do
expect(storage_client).not_to receive(:multipart_upload)
expect(result).to be_success
expect(result.payload).to eq({})
end
end
context 'when images are already in s3' do
let(:upload_secret_2) { '9cb61a79ce884d5b681dd42728d3c159' }
let(:image_file_name_2) { 'tanuki_2.png' }
let(:upload_path_2) { "/uploads/#{upload_secret_2}/#{image_file_name_2}" }
let(:markdown_field) { "![tanuki](#{upload_path}) and ![tanuki_2](#{upload_path_2})" }
let(:status_page_upload_path_2) { StatusPage::Storage.upload_path(issue.iid, upload_secret_2, image_file_name_2) }
before do
allow(storage_client).to receive(:list_object_keys).and_return(Set[status_page_upload_path])
end
it 'publishes only new images' do
expect(storage_client).to receive(:multipart_upload).with(status_page_upload_path_2, open_file).once
expect(storage_client).not_to receive(:multipart_upload).with(status_page_upload_path, open_file)
expect(result).to be_success
expect(result.payload).to eq({})
end
end
end
end
end
end
......@@ -26,8 +26,8 @@ describe StatusPage::PublishService do
describe 'publish details' do
context 'when upload succeeds' do
it 'uploads incident details and list' do
stub_service_called(StatusPage::PublishDetailsService, error?: false, success?: true)
stub_service_called(StatusPage::PublishListService, error?: false, success?: true)
expect_to_publish_details(error?: false, success?: true)
expect_to_publish_list(error?: false, success?: true)
expect(result).to be_success
end
......@@ -47,7 +47,7 @@ describe StatusPage::PublishService do
context 'when unpublish succeeds' do
it 'unpublishes incident details and uploads incident list' do
stub_service_called(StatusPage::UnpublishDetailsService, error?: false, success?: true)
expect_to_unpublish(error?: false, success?: true)
expect_to_upload_list
expect(result).to be_success
......@@ -56,7 +56,7 @@ describe StatusPage::PublishService do
context 'when unpublish service responses with error' do
it 'returns the response' do
response = stub_service_called(StatusPage::UnpublishDetailsService, error?: true)
response = expect_to_unpublish(error?: true)
expect(result).to be(response)
end
......@@ -66,8 +66,8 @@ describe StatusPage::PublishService do
describe 'publish list' do
context 'when upload fails' do
it 'returns error response' do
stub_service_called(StatusPage::PublishDetailsService, error?: false, success?: true)
stub_service_called(StatusPage::PublishListService, error?: true, success?: false)
expect_to_publish_details(error?: false, success?: true)
expect_to_publish_list(error?: true, success?: false)
expect(result.error?).to be(true)
end
......@@ -95,6 +95,18 @@ describe StatusPage::PublishService do
private
def expect_to_unpublish(**response_kwargs)
stub_service_called(StatusPage::UnpublishDetailsService, **response_kwargs)
end
def expect_to_publish_details(**response_kwargs)
stub_service_called(StatusPage::PublishDetailsService, **response_kwargs)
end
def expect_to_publish_list(**response_kwargs)
stub_service_called(StatusPage::PublishListService, **response_kwargs)
end
def stub_service_called(klass, **response_kwargs)
service_response = double(**response_kwargs)
expect_next_instance_of(klass) do |service|
......
......@@ -17,8 +17,9 @@ describe UploaderFinder do
it 'gets the uploader' do
allow_next_instance_of(FileUploader) do |uploader|
expect(uploader).to receive(:retrieve_from_store!).with(upload.path).and_return(uploader)
allow(uploader).to receive(:retrieve_from_store!).with(upload.path).and_return(uploader)
end
expect(subject).to be_an_instance_of(FileUploader)
expect(subject.model).to eq(project)
expect(subject.secret).to eq(secret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment