Commit 013ca207 authored by Allison Browne's avatar Allison Browne Committed by Douglas Barbosa Alexandre

Remove project destory transaction behind flag

Remove project destroy transaction so that the job can continue
deleting the objects it did not get to
parent 3cb408f1
...@@ -28,7 +28,7 @@ module Projects ...@@ -28,7 +28,7 @@ module Projects
Projects::UnlinkForkService.new(project, current_user).execute Projects::UnlinkForkService.new(project, current_user).execute
attempt_destroy_transaction(project) attempt_destroy(project)
system_hook_service.execute_hooks_for(project, :destroy) system_hook_service.execute_hooks_for(project, :destroy)
log_info("Project \"#{project.full_path}\" was deleted") log_info("Project \"#{project.full_path}\" was deleted")
...@@ -98,29 +98,35 @@ module Projects ...@@ -98,29 +98,35 @@ module Projects
log_error("Deletion failed on #{project.full_path} with the following message: #{message}") log_error("Deletion failed on #{project.full_path} with the following message: #{message}")
end end
def attempt_destroy_transaction(project) def attempt_destroy(project)
unless remove_registry_tags unless remove_registry_tags
raise_error(s_('DeleteProject|Failed to remove some tags in project container registry. Please try again or contact administrator.')) raise_error(s_('DeleteProject|Failed to remove some tags in project container registry. Please try again or contact administrator.'))
end end
project.leave_pool_repository project.leave_pool_repository
Project.transaction do if Gitlab::Ci::Features.project_transactionless_destroy?(project)
log_destroy_event destroy_project_related_records(project)
trash_relation_repositories! else
trash_project_repositories! Project.transaction { destroy_project_related_records(project) }
# Rails attempts to load all related records into memory before
# destroying: https://github.com/rails/rails/issues/22510
# This ensures we delete records in batches.
#
# Exclude container repositories because its before_destroy would be
# called multiple times, and it doesn't destroy any database records.
project.destroy_dependent_associations_in_batches(exclude: [:container_repositories, :snippets])
project.destroy!
end end
end end
def destroy_project_related_records(project)
log_destroy_event
trash_relation_repositories!
trash_project_repositories!
# Rails attempts to load all related records into memory before
# destroying: https://github.com/rails/rails/issues/22510
# This ensures we delete records in batches.
#
# Exclude container repositories because its before_destroy would be
# called multiple times, and it doesn't destroy any database records.
project.destroy_dependent_associations_in_batches(exclude: [:container_repositories, :snippets])
project.destroy!
end
def log_destroy_event def log_destroy_event
log_info("Attempting to destroy #{project.full_path} (#{project.id})") log_info("Attempting to destroy #{project.full_path} (#{project.id})")
end end
......
...@@ -17,10 +17,30 @@ module EE ...@@ -17,10 +17,30 @@ module EE
end end
end end
override :log_destroy_event # Removes physical repository in a Geo replicated secondary node
def log_destroy_event # There is no need to do any database operation as it will be
super # replicated by itself.
def geo_replicate
return unless ::Gitlab::Geo.secondary?
# Flush the cache for both repositories. This has to be done _before_
# removing the physical repositories as some expiration code depends on
# Git data (e.g. a list of branch names).
flush_caches(project)
trash_project_repositories!
log_info("Project \"#{project.name}\" was removed")
end
private
override :destroy_project_related_records
def destroy_project_related_records(project)
super && log_destroy_events
end
def log_destroy_events
log_geo_event(project) log_geo_event(project)
log_audit_event(project) log_audit_event(project)
end end
...@@ -39,24 +59,6 @@ module EE ...@@ -39,24 +59,6 @@ module EE
).create! ).create!
end end
# Removes physical repository in a Geo replicated secondary node
# There is no need to do any database operation as it will be
# replicated by itself.
def geo_replicate
return unless ::Gitlab::Geo.secondary?
# Flush the cache for both repositories. This has to be done _before_
# removing the physical repositories as some expiration code depends on
# Git data (e.g. a list of branch names).
flush_caches(project)
trash_project_repositories!
log_info("Project \"#{project.name}\" was removed")
end
private
def log_audit_event(project) def log_audit_event(project)
::AuditEventService.new( ::AuditEventService.new(
current_user, current_user,
......
...@@ -20,79 +20,93 @@ RSpec.describe Projects::DestroyService do ...@@ -20,79 +20,93 @@ RSpec.describe Projects::DestroyService do
stub_container_registry_tags(repository: :any, tags: []) stub_container_registry_tags(repository: :any, tags: [])
end end
context 'when project is a mirror' do shared_examples 'project destroy ee' do
it 'decrements capacity if mirror was scheduled' do context 'when project is a mirror' do
max_capacity = Gitlab::CurrentSettings.mirror_max_capacity let(:max_capacity) { Gitlab::CurrentSettings.mirror_max_capacity }
project_mirror = create(:project, :mirror, :repository, :import_scheduled) let_it_be(:project_mirror) { create(:project, :mirror, :repository, :import_scheduled) }
let(:result) { described_class.new(project_mirror, project_mirror.owner, {}).execute }
Gitlab::Mirror.increment_capacity(project_mirror.id)
before do
Gitlab::Mirror.increment_capacity(project_mirror.id)
end
expect do it 'decrements capacity if mirror was scheduled' do
described_class.new(project_mirror, project_mirror.owner, {}).execute expect {result}.to change { Gitlab::Mirror.available_capacity }.from(max_capacity - 1).to(max_capacity)
end.to change { Gitlab::Mirror.available_capacity }.from(max_capacity - 1).to(max_capacity) end
end end
end
context 'when running on a primary node' do context 'when running on a primary node' do
let_it_be(:primary) { create(:geo_node, :primary) } let_it_be(:primary) { create(:geo_node, :primary) }
let_it_be(:secondary) { create(:geo_node) } let_it_be(:secondary) { create(:geo_node) }
before do before do
stub_current_geo_node(primary) stub_current_geo_node(primary)
end end
it 'logs an event to the Geo event log' do
# Run Sidekiq immediately to check that renamed repository will be removed
Sidekiq::Testing.inline! do
expect(subject).to receive(:log_destroy_events).and_call_original
expect { subject.execute }.to change(Geo::RepositoryDeletedEvent, :count).by(1)
end
end
it 'logs an event to the Geo event log' do it 'does not log event to the Geo log if project deletion fails' do
# Run Sidekiq immediately to check that renamed repository will be removed
Sidekiq::Testing.inline! do
expect(subject).to receive(:log_destroy_event).and_call_original expect(subject).to receive(:log_destroy_event).and_call_original
expect { subject.execute }.to change(Geo::RepositoryDeletedEvent, :count).by(1) expect(project).to receive(:destroy!).and_raise(StandardError.new('Other error message'))
Sidekiq::Testing.inline! do
expect { subject.execute }.not_to change(Geo::RepositoryDeletedEvent, :count)
end
end end
end end
it 'does not log event to the Geo log if project deletion fails' do context 'audit events' do
expect(subject).to receive(:log_destroy_event).and_call_original include_examples 'audit event logging' do
expect_any_instance_of(Project) let(:operation) { subject.execute }
.to receive(:destroy!).and_raise(StandardError.new('Other error message'))
let(:fail_condition!) do
Sidekiq::Testing.inline! do expect(project).to receive(:destroy!).and_raise(StandardError.new('Other error message'))
expect { subject.execute }.not_to change(Geo::RepositoryDeletedEvent, :count) end
let(:attributes) do
{
author_id: user.id,
entity_id: project.id,
entity_type: 'Project',
details: {
remove: 'project',
author_name: user.name,
target_id: project.full_path,
target_type: 'Project',
target_details: project.full_path
}
}
end
end end
end end
end
context 'audit events' do context 'system hooks exception' do
include_examples 'audit event logging' do before do
let(:operation) { subject.execute } allow_any_instance_of(SystemHooksService).to receive(:execute_hooks_for).and_raise('something went wrong')
let(:fail_condition!) do
expect_any_instance_of(Project)
.to receive(:destroy!).and_raise(StandardError.new('Other error message'))
end end
let(:attributes) do it 'logs an audit event' do
{ expect(subject).to receive(:log_destroy_event).and_call_original
author_id: user.id, expect { subject.execute }.to change(AuditEvent, :count)
entity_id: project.id,
entity_type: 'Project',
details: {
remove: 'project',
author_name: user.name,
target_id: project.full_path,
target_type: 'Project',
target_details: project.full_path
}
}
end end
end end
end end
context 'system hooks exception' do context 'when project_transactionless_destroy enabled' do
it_behaves_like 'project destroy ee'
end
context 'when project_transactionless_destroy disabled', :sidekiq_inline do
before do before do
allow_any_instance_of(SystemHooksService).to receive(:execute_hooks_for).and_raise('something went wrong') stub_feature_flags(project_transactionless_destroy: false)
end end
it 'logs an audit event' do it_behaves_like 'project destroy ee'
expect(subject).to receive(:log_destroy_event).and_call_original
expect { subject.execute }.to change(AuditEvent, :count)
end
end end
end end
...@@ -79,6 +79,10 @@ module Gitlab ...@@ -79,6 +79,10 @@ module Gitlab
def self.expand_names_for_cross_pipeline_artifacts?(project) def self.expand_names_for_cross_pipeline_artifacts?(project)
::Feature.enabled?(:ci_expand_names_for_cross_pipeline_artifacts, project) ::Feature.enabled?(:ci_expand_names_for_cross_pipeline_artifacts, project)
end end
def self.project_transactionless_destroy?(project)
Feature.enabled?(:project_transactionless_destroy, project, default_enabled: false)
end
end end
end end
end end
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
require 'spec_helper' require 'spec_helper'
RSpec.describe Projects::DestroyService do RSpec.describe Projects::DestroyService, :aggregate_failures do
include ProjectForksHelper include ProjectForksHelper
let_it_be(:user) { create(:user) } let_it_be(:user) { create(:user) }
...@@ -60,317 +60,353 @@ RSpec.describe Projects::DestroyService do ...@@ -60,317 +60,353 @@ RSpec.describe Projects::DestroyService do
end end
end end
it_behaves_like 'deleting the project' shared_examples 'project destroy' do
it_behaves_like 'deleting the project'
it 'invalidates personal_project_count cache' do
expect(user).to receive(:invalidate_personal_projects_count)
destroy_project(project, user, {})
end
context 'when project has remote mirrors' do
let!(:project) do
create(:project, :repository, namespace: user.namespace).tap do |project|
project.remote_mirrors.create(url: 'http://test.com')
end
end
it 'destroys them' do it 'invalidates personal_project_count cache' do
expect(RemoteMirror.count).to eq(1) expect(user).to receive(:invalidate_personal_projects_count)
destroy_project(project, user, {}) destroy_project(project, user, {})
expect(RemoteMirror.count).to eq(0)
end end
end
context 'when project has exports' do context 'when project has remote mirrors' do
let!(:project_with_export) do let!(:project) do
create(:project, :repository, namespace: user.namespace).tap do |project| create(:project, :repository, namespace: user.namespace).tap do |project|
create(:import_export_upload, project.remote_mirrors.create(url: 'http://test.com')
project: project, end
export_file: fixture_file_upload('spec/fixtures/project_export.tar.gz'))
end end
end
it 'destroys project and export' do it 'destroys them' do
expect do expect(RemoteMirror.count).to eq(1)
destroy_project(project_with_export, user, {})
end.to change(ImportExportUpload, :count).by(-1)
expect(Project.all).not_to include(project_with_export) destroy_project(project, user, {})
end
end
context 'Sidekiq fake' do expect(RemoteMirror.count).to eq(0)
before do end
# Dont run sidekiq to check if renamed repository exists
Sidekiq::Testing.fake! { destroy_project(project, user, {}) }
end end
it { expect(Project.all).not_to include(project) } context 'when project has exports' do
let!(:project_with_export) do
it do create(:project, :repository, namespace: user.namespace).tap do |project|
expect(project.gitlab_shell.repository_exists?(project.repository_storage, path + '.git')).to be_falsey create(:import_export_upload,
end project: project,
export_file: fixture_file_upload('spec/fixtures/project_export.tar.gz'))
end
end
it do it 'destroys project and export' do
expect(project.gitlab_shell.repository_exists?(project.repository_storage, remove_path + '.git')).to be_truthy expect do
end destroy_project(project_with_export, user, {})
end end.to change(ImportExportUpload, :count).by(-1)
context 'when flushing caches fail due to Git errors' do expect(Project.all).not_to include(project_with_export)
before do end
allow(project.repository).to receive(:before_delete).and_raise(::Gitlab::Git::CommandError)
allow(Gitlab::GitLogger).to receive(:warn).with(
class: Repositories::DestroyService.name,
container_id: project.id,
disk_path: project.disk_path,
message: 'Gitlab::Git::CommandError').and_call_original
end end
it_behaves_like 'deleting the project' context 'Sidekiq fake' do
end before do
# Dont run sidekiq to check if renamed repository exists
Sidekiq::Testing.fake! { destroy_project(project, user, {}) }
end
context 'when flushing caches fail due to Redis' do it { expect(Project.all).not_to include(project) }
before do
new_user = create(:user)
project.team.add_user(new_user, Gitlab::Access::DEVELOPER)
allow_any_instance_of(described_class).to receive(:flush_caches).and_raise(::Redis::CannotConnectError)
end
it 'keeps project team intact upon an error' do it do
perform_enqueued_jobs do expect(project.gitlab_shell.repository_exists?(project.repository_storage, path + '.git')).to be_falsey
destroy_project(project, user, {})
rescue ::Redis::CannotConnectError
end end
expect(project.team.members.count).to eq 2 it do
expect(project.gitlab_shell.repository_exists?(project.repository_storage, remove_path + '.git')).to be_truthy
end
end end
end
context 'with async_execute', :sidekiq_inline do context 'when flushing caches fail due to Git errors' do
let(:async) { true }
context 'async delete of project with private issue visibility' do
before do before do
project.project_feature.update_attribute("issues_access_level", ProjectFeature::PRIVATE) allow(project.repository).to receive(:before_delete).and_raise(::Gitlab::Git::CommandError)
allow(Gitlab::GitLogger).to receive(:warn).with(
class: Repositories::DestroyService.name,
container_id: project.id,
disk_path: project.disk_path,
message: 'Gitlab::Git::CommandError').and_call_original
end end
it_behaves_like 'deleting the project' it_behaves_like 'deleting the project'
end end
it_behaves_like 'deleting the project with pipeline and build' context 'when flushing caches fail due to Redis' do
before do
new_user = create(:user)
project.team.add_user(new_user, Gitlab::Access::DEVELOPER)
allow_any_instance_of(described_class).to receive(:flush_caches).and_raise(::Redis::CannotConnectError)
end
context 'errors' do it 'keeps project team intact upon an error' do
context 'when `remove_legacy_registry_tags` fails' do perform_enqueued_jobs do
before do destroy_project(project, user, {})
expect_any_instance_of(described_class) rescue ::Redis::CannotConnectError
.to receive(:remove_legacy_registry_tags).and_return(false)
end end
it_behaves_like 'handles errors thrown during async destroy', "Failed to remove some tags" expect(project.team.members.count).to eq 2
end end
end
context 'with async_execute', :sidekiq_inline do
let(:async) { true }
context 'when `remove_repository` fails' do context 'async delete of project with private issue visibility' do
before do before do
expect_any_instance_of(described_class) project.project_feature.update_attribute("issues_access_level", ProjectFeature::PRIVATE)
.to receive(:remove_repository).and_return(false)
end end
it_behaves_like 'handles errors thrown during async destroy', "Failed to remove project repository" it_behaves_like 'deleting the project'
end end
context 'when `execute` raises expected error' do it_behaves_like 'deleting the project with pipeline and build'
before do
expect_any_instance_of(Project) context 'errors' do
.to receive(:destroy!).and_raise(StandardError.new("Other error message")) context 'when `remove_legacy_registry_tags` fails' do
before do
expect_any_instance_of(described_class)
.to receive(:remove_legacy_registry_tags).and_return(false)
end
it_behaves_like 'handles errors thrown during async destroy', "Failed to remove some tags"
end end
it_behaves_like 'handles errors thrown during async destroy', "Other error message" context 'when `remove_repository` fails' do
end before do
expect_any_instance_of(described_class)
.to receive(:remove_repository).and_return(false)
end
context 'when `execute` raises unexpected error' do it_behaves_like 'handles errors thrown during async destroy', "Failed to remove project repository"
before do
expect_any_instance_of(Project)
.to receive(:destroy!).and_raise(Exception.new('Other error message'))
end end
it 'allows error to bubble up and rolls back project deletion' do context 'when `execute` raises expected error' do
expect do before do
destroy_project(project, user, {}) expect_any_instance_of(Project)
end.to raise_error(Exception, 'Other error message') .to receive(:destroy!).and_raise(StandardError.new("Other error message"))
end
expect(project.reload.pending_delete).to be(false) it_behaves_like 'handles errors thrown during async destroy', "Other error message"
expect(project.delete_error).to include("Other error message")
end end
end
end
end
describe 'container registry' do context 'when `execute` raises unexpected error' do
context 'when there are regular container repositories' do before do
let(:container_repository) { create(:container_repository) } expect_any_instance_of(Project)
.to receive(:destroy!).and_raise(Exception.new('Other error message'))
end
before do it 'allows error to bubble up and rolls back project deletion' do
stub_container_registry_tags(repository: project.full_path + '/image', expect do
tags: ['tag']) destroy_project(project, user, {})
project.container_repositories << container_repository end.to raise_error(Exception, 'Other error message')
expect(project.reload.pending_delete).to be(false)
expect(project.delete_error).to include("Other error message")
end
end
end end
end
context 'when image repository deletion succeeds' do describe 'container registry' do
it 'removes tags' do context 'when there are regular container repositories' do
expect_any_instance_of(ContainerRepository) let(:container_repository) { create(:container_repository) }
.to receive(:delete_tags!).and_return(true)
destroy_project(project, user) before do
stub_container_registry_tags(repository: project.full_path + '/image',
tags: ['tag'])
project.container_repositories << container_repository
end end
end
context 'when image repository deletion fails' do context 'when image repository deletion succeeds' do
it 'raises an exception' do it 'removes tags' do
expect_any_instance_of(ContainerRepository) expect_any_instance_of(ContainerRepository)
.to receive(:delete_tags!).and_raise(RuntimeError) .to receive(:delete_tags!).and_return(true)
expect(destroy_project(project, user)).to be false destroy_project(project, user)
end
end end
end
context 'when registry is disabled' do context 'when image repository deletion fails' do
before do it 'raises an exception' do
stub_container_registry_config(enabled: false) expect_any_instance_of(ContainerRepository)
.to receive(:delete_tags!).and_raise(RuntimeError)
expect(destroy_project(project, user)).to be false
end
end end
it 'does not attempting to remove any tags' do context 'when registry is disabled' do
expect(Projects::ContainerRepository::DestroyService).not_to receive(:new) before do
stub_container_registry_config(enabled: false)
end
destroy_project(project, user) it 'does not attempting to remove any tags' do
expect(Projects::ContainerRepository::DestroyService).not_to receive(:new)
destroy_project(project, user)
end
end end
end end
end
context 'when there are tags for legacy root repository' do context 'when there are tags for legacy root repository' do
before do before do
stub_container_registry_tags(repository: project.full_path, stub_container_registry_tags(repository: project.full_path,
tags: ['tag']) tags: ['tag'])
end end
context 'when image repository tags deletion succeeds' do context 'when image repository tags deletion succeeds' do
it 'removes tags' do it 'removes tags' do
expect_any_instance_of(ContainerRepository) expect_any_instance_of(ContainerRepository)
.to receive(:delete_tags!).and_return(true) .to receive(:delete_tags!).and_return(true)
destroy_project(project, user) destroy_project(project, user)
end
end end
end
context 'when image repository tags deletion fails' do context 'when image repository tags deletion fails' do
it 'raises an exception' do it 'raises an exception' do
expect_any_instance_of(ContainerRepository) expect_any_instance_of(ContainerRepository)
.to receive(:delete_tags!).and_return(false) .to receive(:delete_tags!).and_return(false)
expect(destroy_project(project, user)).to be false expect(destroy_project(project, user)).to be false
end
end end
end end
end end
end
context 'for a forked project with LFS objects' do context 'for a forked project with LFS objects' do
let(:forked_project) { fork_project(project, user) } let(:forked_project) { fork_project(project, user) }
before do before do
project.lfs_objects << create(:lfs_object) project.lfs_objects << create(:lfs_object)
forked_project.reload forked_project.reload
end end
it 'destroys the fork' do it 'destroys the fork' do
expect { destroy_project(forked_project, user) } expect { destroy_project(forked_project, user) }
.not_to raise_error .not_to raise_error
end
end end
end
context 'as the root of a fork network' do context 'as the root of a fork network' do
let!(:fork_1) { fork_project(project, user) } let!(:fork_1) { fork_project(project, user) }
let!(:fork_2) { fork_project(project, user) } let!(:fork_2) { fork_project(project, user) }
it 'updates the fork network with the project name' do it 'updates the fork network with the project name' do
fork_network = project.fork_network fork_network = project.fork_network
destroy_project(project, user) destroy_project(project, user)
fork_network.reload fork_network.reload
expect(fork_network.deleted_root_project_name).to eq(project.full_name) expect(fork_network.deleted_root_project_name).to eq(project.full_name)
expect(fork_network.root_project).to be_nil expect(fork_network.root_project).to be_nil
end
end end
end
context 'repository +deleted path removal' do context 'repository +deleted path removal' do
context 'regular phase' do context 'regular phase' do
it 'schedules +deleted removal of existing repos' do it 'schedules +deleted removal of existing repos' do
service = described_class.new(project, user, {}) service = described_class.new(project, user, {})
allow(service).to receive(:schedule_stale_repos_removal) allow(service).to receive(:schedule_stale_repos_removal)
expect(Repositories::ShellDestroyService).to receive(:new).and_call_original expect(Repositories::ShellDestroyService).to receive(:new).and_call_original
expect(GitlabShellWorker).to receive(:perform_in) expect(GitlabShellWorker).to receive(:perform_in)
.with(5.minutes, :remove_repository, project.repository_storage, removal_path(project.disk_path)) .with(5.minutes, :remove_repository, project.repository_storage, removal_path(project.disk_path))
service.execute service.execute
end
end end
end
context 'stale cleanup' do context 'stale cleanup' do
let(:async) { true } let(:async) { true }
it 'schedules +deleted wiki and repo removal' do it 'schedules +deleted wiki and repo removal' do
allow(ProjectDestroyWorker).to receive(:perform_async) allow(ProjectDestroyWorker).to receive(:perform_async)
expect(Repositories::ShellDestroyService).to receive(:new).with(project.repository).and_call_original expect(Repositories::ShellDestroyService).to receive(:new).with(project.repository).and_call_original
expect(GitlabShellWorker).to receive(:perform_in) expect(GitlabShellWorker).to receive(:perform_in)
.with(10.minutes, :remove_repository, project.repository_storage, removal_path(project.disk_path)) .with(10.minutes, :remove_repository, project.repository_storage, removal_path(project.disk_path))
expect(Repositories::ShellDestroyService).to receive(:new).with(project.wiki.repository).and_call_original expect(Repositories::ShellDestroyService).to receive(:new).with(project.wiki.repository).and_call_original
expect(GitlabShellWorker).to receive(:perform_in) expect(GitlabShellWorker).to receive(:perform_in)
.with(10.minutes, :remove_repository, project.repository_storage, removal_path(project.wiki.disk_path)) .with(10.minutes, :remove_repository, project.repository_storage, removal_path(project.wiki.disk_path))
destroy_project(project, user, {}) destroy_project(project, user, {})
end
end end
end end
end
context 'snippets' do context 'snippets' do
let!(:snippet1) { create(:project_snippet, project: project, author: user) } let!(:snippet1) { create(:project_snippet, project: project, author: user) }
let!(:snippet2) { create(:project_snippet, project: project, author: user) } let!(:snippet2) { create(:project_snippet, project: project, author: user) }
it 'does not include snippets when deleting in batches' do it 'does not include snippets when deleting in batches' do
expect(project).to receive(:destroy_dependent_associations_in_batches).with({ exclude: [:container_repositories, :snippets] }) expect(project).to receive(:destroy_dependent_associations_in_batches).with({ exclude: [:container_repositories, :snippets] })
destroy_project(project, user) destroy_project(project, user)
end end
it 'calls the bulk snippet destroy service' do it 'calls the bulk snippet destroy service' do
expect(project.snippets.count).to eq 2 expect(project.snippets.count).to eq 2
expect(Snippets::BulkDestroyService).to receive(:new) expect(Snippets::BulkDestroyService).to receive(:new)
.with(user, project.snippets).and_call_original .with(user, project.snippets).and_call_original
expect do expect do
destroy_project(project, user) destroy_project(project, user)
end.to change(Snippet, :count).by(-2) end.to change(Snippet, :count).by(-2)
end end
context 'when an error is raised deleting snippets' do context 'when an error is raised deleting snippets' do
it 'does not delete project' do it 'does not delete project' do
allow_next_instance_of(Snippets::BulkDestroyService) do |instance| allow_next_instance_of(Snippets::BulkDestroyService) do |instance|
allow(instance).to receive(:execute).and_return(ServiceResponse.error(message: 'foo')) allow(instance).to receive(:execute).and_return(ServiceResponse.error(message: 'foo'))
end
expect(destroy_project(project, user)).to be_falsey
expect(project.gitlab_shell.repository_exists?(project.repository_storage, path + '.git')).to be_truthy
end end
end
end
expect(destroy_project(project, user)).to be_falsey context 'error while destroying', :sidekiq_inline do
expect(project.gitlab_shell.repository_exists?(project.repository_storage, path + '.git')).to be_truthy let!(:pipeline) { create(:ci_pipeline, project: project) }
let!(:builds) { create_list(:ci_build, 2, :artifacts, pipeline: pipeline) }
let!(:build_trace) { create(:ci_build_trace_chunk, build: builds[0]) }
it 'deletes on retry' do
# We can expect this to timeout for very large projects
# TODO: remove allow_next_instance_of: https://gitlab.com/gitlab-org/gitlab/-/issues/220440
allow_any_instance_of(Ci::Build).to receive(:destroy).and_raise('boom')
destroy_project(project, user, {})
allow_any_instance_of(Ci::Build).to receive(:destroy).and_call_original
destroy_project(project, user, {})
expect(Project.unscoped.all).not_to include(project)
expect(project.gitlab_shell.repository_exists?(project.repository_storage, path + '.git')).to be_falsey
expect(project.gitlab_shell.repository_exists?(project.repository_storage, remove_path + '.git')).to be_falsey
expect(project.all_pipelines).to be_empty
expect(project.builds).to be_empty
end end
end end
end end
context 'when project_transactionless_destroy enabled' do
it_behaves_like 'project destroy'
end
context 'when project_transactionless_destroy disabled', :sidekiq_inline do
before do
stub_feature_flags(project_transactionless_destroy: false)
end
it_behaves_like 'project destroy'
end
def destroy_project(project, user, params = {}) def destroy_project(project, user, params = {})
described_class.new(project, user, params).public_send(async ? :async_execute : :execute) described_class.new(project, user, params).public_send(async ? :async_execute : :execute)
end end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment