Commit c967cf3e authored by allison.browne's avatar allison.browne

Updates base on code review

Break apart services and functions into
smaller peices for attachment uploads
parent 6ab17e66
# frozen_string_literal: true
module StatusPage
# Publishes Attachments from incident comments and descriptions to s3
# Should only be called from publish details
class PublishAttachmentsService < PublishBaseService
private
......@@ -44,7 +46,7 @@ module StatusPage
def publish_markdown_uploads(markdown_field:, issue_iid:, total_uploads:)
markdown_field.scan(FileUploader::MARKDOWN_PATTERN).map do |secret, file_name|
break if total_uploads >= StatusPage::Storage::MAX_IMAGE_UPLOADS
break if total_uploads >= StatusPage::Storage::MAX_UPLOADS
key = StatusPage::Storage.upload_path(issue_iid, secret, file_name)
......
......@@ -14,13 +14,11 @@ module StatusPage
response = publish_json(issue, user_notes)
return response if response.error?
publish_images(issue, user_notes)
publish_attachments(issue, user_notes)
success
end
# Publish Json
def publish_json(issue, user_notes)
json = serialize(issue, user_notes)
key = json_object_key(json)
......@@ -40,43 +38,8 @@ module StatusPage
StatusPage::Storage.details_path(id)
end
def publish_images(issue, user_notes)
existing_image_keys = storage_client.list_object_keys(StatusPage::Storage.uploads_path(issue.iid))
total_uploads = existing_image_keys.size
# Send all description file attachments to s3
publish_markdown_uploads(
markdown_field: issue.description,
issue_iid: issue.iid,
existing_image_keys: existing_image_keys,
total_uploads: total_uploads
)
# Send all comment file attachments to s3
user_notes.each do |user_note|
publish_markdown_uploads(
markdown_field: user_note.note,
issue_iid: issue.iid,
existing_image_keys: existing_image_keys,
total_uploads: total_uploads
)
end
end
def publish_markdown_uploads(markdown_field:, issue_iid:, existing_image_keys:, total_uploads:)
markdown_field.scan(FileUploader::MARKDOWN_PATTERN).map do |secret, file_name|
break if total_uploads >= StatusPage::Storage::MAX_IMAGE_UPLOADS
key = StatusPage::Storage.upload_path(issue_iid, secret, file_name)
next if existing_image_keys.include? key
uploader = UploaderFinder.new(@project, secret, file_name).execute
uploader.open do |open_file|
storage_client.multipart_upload(key, open_file)
total_uploads += 1
end
end
def publish_attachments(issue, user_notes)
StatusPage::PublishAttachmentsService.new(project: @project).execute(issue, user_notes)
end
end
end
......@@ -11,7 +11,7 @@ module StatusPage
# Limit on paginated responses
MAX_KEYS_PER_PAGE = 1_000
MAX_PAGES = 5
MAX_IMAGE_UPLOADS = MAX_KEYS_PER_PAGE * MAX_PAGES
MAX_UPLOADS = MAX_KEYS_PER_PAGE * MAX_PAGES
def self.details_path(id)
"data/incident/#{id}.json"
......
......@@ -58,7 +58,7 @@ module StatusPage
def list_object_keys(prefix)
wrap_errors(prefix: prefix) do
list_objects(prefix).reduce(Set.new) do |objects, (response, index)|
break objects if objects.size >= StatusPage::Storage::MAX_IMAGE_UPLOADS
break objects if objects.size >= StatusPage::Storage::MAX_UPLOADS
objects | response.contents.map(&:key)
end
......
......@@ -24,14 +24,14 @@ module StatusPage
# However Gitlab::HttpIO used when object storage is enabled
# cannot be used with upload_file
wrap_errors(key: key) do
upload_id = create.to_h[:upload_id]
upload_id = create_upload.to_h[:upload_id]
begin
parts = upload_part(upload_id)
complete(upload_id, parts)
complete_upload(upload_id, parts)
# Rescue on Exception since even on keyboard inturrupt we want to abort the upload and re-raise
# abort clears the already uploaded parts so that they do not cost the bucket owner
rescue Exception => e # rubocop:disable Lint/RescueException
abort(upload_id)
abort_upload(upload_id)
raise e
end
end
......@@ -41,7 +41,7 @@ module StatusPage
attr_reader :key, :file, :client, :bucket_name
def create
def create_upload
client.create_multipart_upload({ bucket: bucket_name, key: key })
end
......@@ -66,7 +66,7 @@ module StatusPage
parts
end
def complete(upload_id, parts)
def complete_upload(upload_id, parts)
client.complete_multipart_upload({
bucket: bucket_name,
key: key,
......@@ -77,7 +77,7 @@ module StatusPage
})
end
def abort(upload_id)
def abort_upload(upload_id)
client.abort_multipart_upload(
bucket: bucket_name,
key: key,
......
......@@ -135,7 +135,7 @@ describe StatusPage::Storage::S3Client, :aws_s3 do
include_context 'oversized list_objects_v2 result'
it 'returns result at max size' do
expect(result.count).to eq(StatusPage::Storage::MAX_IMAGE_UPLOADS)
expect(result.count).to eq(StatusPage::Storage::MAX_UPLOADS)
end
end
......
......@@ -29,7 +29,7 @@ describe StatusPage::Storage do
it 'MAX_KEYS_PER_PAGE times MAX_PAGES establishes upload limit' do
# spec intended to fail if page related MAX constants change
# In order to ensure change to documented MAX_IMAGE_UPLOADS is considered
# In order to ensure change to documented MAX_UPLOADS is considered
expect(StatusPage::Storage::MAX_KEYS_PER_PAGE * StatusPage::Storage::MAX_PAGES).to eq(5000)
end
end
......@@ -86,7 +86,7 @@ describe StatusPage::PublishAttachmentsService do
context 'when exceeds upload limit' do
before do
stub_const("StatusPage::Storage::MAX_IMAGE_UPLOADS", 1)
stub_const("StatusPage::Storage::MAX_UPLOADS", 1)
allow(storage_client).to receive(:list_object_keys).and_return(Set[status_page_upload_path])
end
......
......@@ -32,108 +32,15 @@ describe StatusPage::PublishDetailsService do
end
end
context 'publishes image uploads' do
before do
allow(storage_client).to receive(:upload_object).with("data/incident/1.json", "{\"id\":1}")
allow(storage_client).to receive(:list_object_keys).and_return(Set.new)
end
context 'when not in markdown' do
it 'publishes no images' do
expect(storage_client).not_to receive(:multipart_upload)
expect(result.payload).to eq({})
expect(result).to be_success
end
end
context 'when in markdown' do
let(:upload_secret) { '734b8524a16d44eb0ff28a2c2e4ff3c0' }
let(:image_file_name) { 'tanuki.png'}
let(:upload_path) { "/uploads/#{upload_secret}/#{image_file_name}" }
let(:markdown_field) { "![tanuki](#{upload_path})" }
let(:status_page_upload_path) { StatusPage::Storage.upload_path(issue.iid, upload_secret, image_file_name) }
let(:user_notes) { [] }
let(:open_file) { instance_double(File, read: 'stubbed read') }
let(:uploader) { instance_double(FileUploader) }
before do
allow(uploader).to receive(:open).and_yield(open_file).twice
context 'publishes attachments' do
it 'sends attachements to s3' do
allow(storage_client).to receive(:upload_object)
allow_next_instance_of(UploaderFinder) do |finder|
allow(finder).to receive(:execute).and_return(uploader)
end
allow(storage_client).to receive(:list_object_keys).and_return(Set[])
allow(storage_client).to receive(:upload_object)
end
it 'publishes description images' do
expect(storage_client).to receive(:multipart_upload).with(status_page_upload_path, open_file).once
expect(result).to be_success
expect(result.payload).to eq({})
expect_next_instance_of(StatusPage::PublishAttachmentsService) do |service|
expect(service).to receive(:execute).with(issue, user_notes)
end
context 'user notes uploads' do
let(:user_note) { instance_double(Note, note: markdown_field) }
let(:user_notes) { [user_note] }
let(:issue) { instance_double(Issue, notes: user_notes, description: '', iid: incident_id) }
it 'publishes images' do
expect(storage_client).to receive(:multipart_upload).with(status_page_upload_path, open_file).once
expect(result).to be_success
expect(result.payload).to eq({})
end
end
context 'when exceeds upload limit' do
before do
stub_const("StatusPage::Storage::MAX_IMAGE_UPLOADS", 1)
allow(storage_client).to receive(:list_object_keys).and_return(Set[status_page_upload_path])
end
it 'publishes no images' do
expect(storage_client).not_to receive(:multipart_upload)
expect(result).to be_success
expect(result.payload).to eq({})
end
end
context 'when all images are in s3' do
before do
allow(storage_client).to receive(:list_object_keys).and_return(Set[status_page_upload_path])
end
it 'publishes no images' do
expect(storage_client).not_to receive(:multipart_upload)
expect(result).to be_success
expect(result.payload).to eq({})
end
end
context 'when images are already in s3' do
let(:upload_secret_2) { '9cb61a79ce884d5b681dd42728d3c159' }
let(:image_file_name_2) { 'tanuki_2.png' }
let(:upload_path_2) { "/uploads/#{upload_secret_2}/#{image_file_name_2}" }
let(:markdown_field) { "![tanuki](#{upload_path}) and ![tanuki_2](#{upload_path_2})" }
let(:status_page_upload_path_2) { StatusPage::Storage.upload_path(issue.iid, upload_secret_2, image_file_name_2) }
before do
allow(storage_client).to receive(:list_object_keys).and_return(Set[status_page_upload_path])
end
it 'publishes only new images' do
expect(storage_client).to receive(:multipart_upload).with(status_page_upload_path_2, open_file).once
expect(storage_client).not_to receive(:multipart_upload).with(status_page_upload_path, open_file)
expect(result).to be_success
expect(result.payload).to eq({})
end
end
subject
end
end
end
......
......@@ -22,7 +22,7 @@ RSpec.shared_context 'oversized list_objects_v2 result' do
before do
stub_const("StatusPage::Storage::MAX_KEYS_PER_PAGE", 2)
stub_const("StatusPage::Storage::MAX_PAGES", 1)
stub_const("StatusPage::Storage::MAX_IMAGE_UPLOADS", StatusPage::Storage::MAX_PAGES * StatusPage::Storage::MAX_KEYS_PER_PAGE)
stub_const("StatusPage::Storage::MAX_UPLOADS", StatusPage::Storage::MAX_PAGES * StatusPage::Storage::MAX_KEYS_PER_PAGE)
# AWS s3 client responses for list_objects is paginated
# stub_responses allows multiple responses as arguments and they will be returned in sequence
stub_responses(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment