Commit ec76b503 authored by Brett Walker's avatar Brett Walker Committed by Stan Hu

Limit number of times a migration is rescheudled

due to inablitty to obtain a lease.  Solves an
infinite loop problem.
parent 10c4720a
...@@ -24,10 +24,14 @@ class BackgroundMigrationWorker # rubocop:disable Scalability/IdempotentWorker ...@@ -24,10 +24,14 @@ class BackgroundMigrationWorker # rubocop:disable Scalability/IdempotentWorker
# class_name - The class name of the background migration to run. # class_name - The class name of the background migration to run.
# arguments - The arguments to pass to the migration class. # arguments - The arguments to pass to the migration class.
# lease_attempts - The number of times we will try to obtain an exclusive # lease_attempts - The number of times we will try to obtain an exclusive
# lease on the class before running anyway. Pass 0 to always run. # lease on the class before giving up. See MR for more discussion.
# https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45298#note_434304956
def perform(class_name, arguments = [], lease_attempts = 5) def perform(class_name, arguments = [], lease_attempts = 5)
with_context(caller_id: class_name.to_s) do with_context(caller_id: class_name.to_s) do
should_perform, ttl = perform_and_ttl(class_name) attempts_left = lease_attempts - 1
should_perform, ttl = perform_and_ttl(class_name, attempts_left)
break if should_perform.nil?
if should_perform if should_perform
Gitlab::BackgroundMigration.perform(class_name, arguments) Gitlab::BackgroundMigration.perform(class_name, arguments)
...@@ -37,32 +41,39 @@ class BackgroundMigrationWorker # rubocop:disable Scalability/IdempotentWorker ...@@ -37,32 +41,39 @@ class BackgroundMigrationWorker # rubocop:disable Scalability/IdempotentWorker
# we'll reschedule the job in such a way that it is picked up again around # we'll reschedule the job in such a way that it is picked up again around
# the time the lease expires. # the time the lease expires.
self.class self.class
.perform_in(ttl || self.class.minimum_interval, class_name, arguments) .perform_in(ttl || self.class.minimum_interval, class_name, arguments, attempts_left)
end end
end end
end end
def perform_and_ttl(class_name) def perform_and_ttl(class_name, attempts_left)
if always_perform? # In test environments `perform_in` will run right away. This can then
# In test environments `perform_in` will run right away. This can then # lead to stack level errors in the above `#perform`. To work around this
# lead to stack level errors in the above `#perform`. To work around this # we'll just perform the migration right away in the test environment.
# we'll just perform the migration right away in the test environment. return [true, nil] if always_perform?
[true, nil]
else lease = lease_for(class_name)
lease = lease_for(class_name) lease_obtained = !!lease.try_obtain
perform = !!lease.try_obtain healthy_db = healthy_database?
perform = lease_obtained && healthy_db
# If we managed to acquire the lease but the DB is not healthy, then we
# want to simply reschedule our job and try again _after_ the lease database_unhealthy_counter.increment if lease_obtained && !healthy_db
# expires.
if perform && !healthy_database?
database_unhealthy_counter.increment
perform = false
end
[perform, lease.ttl] # If we've tried several times to get a lease with a healthy DB without success, just give up.
# Otherwise we could end up in an infinite rescheduling loop.
if !perform && attempts_left < 0
msg = if !lease_obtained
'Job could not get an exclusive lease after several tries. Giving up.'
else
'Database was unhealthy after several tries. Giving up.'
end
Sidekiq.logger.warn(class: class_name, message: msg, job_id: jid)
return [nil, nil]
end end
[perform, lease.ttl]
end end
def lease_for(class_name) def lease_for(class_name)
......
---
title: Limit number of times a background migration is rescheduled
merge_request: 45298
author:
type: fixed
...@@ -12,45 +12,91 @@ RSpec.describe BackgroundMigrationWorker, :clean_gitlab_redis_shared_state do ...@@ -12,45 +12,91 @@ RSpec.describe BackgroundMigrationWorker, :clean_gitlab_redis_shared_state do
end end
describe '#perform' do describe '#perform' do
it 'performs a background migration' do before do
expect(Gitlab::BackgroundMigration) allow(worker).to receive(:jid).and_return(1)
.to receive(:perform) expect(worker).to receive(:always_perform?).and_return(false)
.with('Foo', [10, 20]) end
worker.perform('Foo', [10, 20]) context 'when lease can be obtained' do
before do
expect(Gitlab::BackgroundMigration)
.to receive(:perform)
.with('Foo', [10, 20])
end
it 'performs a background migration' do
worker.perform('Foo', [10, 20])
end
context 'when lease_attempts is 1' do
it 'performs a background migration' do
worker.perform('Foo', [10, 20], 1)
end
end
end end
it 'reschedules a migration if it was performed recently' do context 'when lease not obtained (migration of same class was performed recently)' do
expect(worker) before do
.to receive(:always_perform?) expect(Gitlab::BackgroundMigration).not_to receive(:perform)
.and_return(false)
worker.lease_for('Foo').try_obtain
end
worker.lease_for('Foo').try_obtain it 'reschedules the migration and decrements the lease_attempts' do
expect(described_class)
.to receive(:perform_in)
.with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
expect(Gitlab::BackgroundMigration) worker.perform('Foo', [10, 20], 5)
.not_to receive(:perform) end
expect(described_class) context 'when lease_attempts is 1' do
.to receive(:perform_in) it 'reschedules the migration and decrements the lease_attempts' do
.with(a_kind_of(Numeric), 'Foo', [10, 20]) expect(described_class)
.to receive(:perform_in)
.with(a_kind_of(Numeric), 'Foo', [10, 20], 0)
worker.perform('Foo', [10, 20]) worker.perform('Foo', [10, 20], 1)
end
end
context 'when lease_attempts is 0' do
it 'gives up performing the migration' do
expect(described_class).not_to receive(:perform_in)
expect(Sidekiq.logger).to receive(:warn).with(
class: 'Foo',
message: 'Job could not get an exclusive lease after several tries. Giving up.',
job_id: 1)
worker.perform('Foo', [10, 20], 0)
end
end
end end
it 'reschedules a migration if the database is not healthy' do context 'when database is not healthy' do
allow(worker) before do
.to receive(:always_perform?) allow(worker).to receive(:healthy_database?).and_return(false)
.and_return(false) end
allow(worker) it 'reschedules a migration if the database is not healthy' do
.to receive(:healthy_database?) expect(described_class)
.and_return(false) .to receive(:perform_in)
.with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
expect(described_class) worker.perform('Foo', [10, 20])
.to receive(:perform_in) end
.with(a_kind_of(Numeric), 'Foo', [10, 20])
worker.perform('Foo', [10, 20]) context 'when lease_attempts is 0' do
it 'gives up performing the migration' do
expect(described_class).not_to receive(:perform_in)
expect(Sidekiq.logger).to receive(:warn).with(
class: 'Foo',
message: 'Database was unhealthy after several tries. Giving up.',
job_id: 1)
worker.perform('Foo', [10, 20], 0)
end
end
end end
it 'sets the class that will be executed as the caller_id' do it 'sets the class that will be executed as the caller_id' do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment