Commit 67529808 authored by Bob Van Landuyt's avatar Bob Van Landuyt

Merge branch '330609-sidekiq-server-middleware-improve-caught-up-host-search' into 'master'

Improve finding caught up replica for SidekiqServerMW [RUN ALL RSPEC] [RUN AS-IF-FOSS]

See merge request gitlab-org/gitlab!63413
parents 08b7f421 78381ca1
---
name: sidekiq_load_balancing_rotate_up_to_date_replica
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/63413/
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/333153
milestone: '14.0'
type: development
group: group::memory
default_enabled: false
......@@ -176,6 +176,20 @@ module Gitlab
true
end
# Returns true if there was at least one host that has caught up with the given transaction.
# Similar to `#select_caught_up_hosts`, picks a random host, to rotate replicas we use.
# Unlike `#select_caught_up_hosts`, does not iterate over all hosts if finds any.
def select_up_to_date_host(location)
all_hosts = @host_list.hosts.shuffle
host = all_hosts.find { |host| host.caught_up?(location) }
return false unless host
RequestStore[CACHE_KEY] = host
true
end
def set_consistent_hosts_for_request(hosts)
RequestStore[VALID_HOSTS_CACHE_KEY] = hosts
end
......
......@@ -59,9 +59,13 @@ module Gitlab
end
def replica_caught_up?(location)
if Feature.enabled?(:sidekiq_load_balancing_rotate_up_to_date_replica)
load_balancer.select_up_to_date_host(location)
else
load_balancer.host.caught_up?(location)
end
end
end
end
end
end
......@@ -459,7 +459,7 @@ RSpec.describe Gitlab::Database::LoadBalancing::LoadBalancer, :request_store do
expect(hosts).to all(receive(:caught_up?).with(location).and_return(false))
end
it 'returns true and has does not set the valid hosts' do
it 'returns false and does not set the valid hosts' do
expect(subject).to be false
expect(valid_host_list).to be_nil
end
......@@ -487,4 +487,36 @@ RSpec.describe Gitlab::Database::LoadBalancing::LoadBalancer, :request_store do
end
end
end
describe '#select_caught_up_hosts' do
let(:location) { 'AB/12345'}
let(:hosts) { lb.host_list.hosts }
let(:set_host) { RequestStore[described_class::CACHE_KEY] }
subject { lb.select_up_to_date_host(location) }
context 'when none of the replicas are caught up' do
before do
expect(hosts).to all(receive(:caught_up?).with(location).and_return(false))
end
it 'returns false and does not update the host thread-local variable' do
expect(subject).to be false
expect(set_host).to be_nil
end
end
context 'when any of the replicas is caught up' do
before do
# `allow` for non-caught up host, because we may not even check it, if will find the caught up one earlier
allow(hosts[0]).to receive(:caught_up?).with(location).and_return(false)
expect(hosts[1]).to receive(:caught_up?).with(location).and_return(true)
end
it 'returns true and sets host thread-local variable' do
expect(subject).to be true
expect(set_host).to eq(hosts[1])
end
end
end
end
......@@ -126,7 +126,7 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do
context 'when replica is not up to date' do
before do
allow(::Gitlab::Database::LoadBalancing).to receive_message_chain(:proxy, :load_balancer, :release_host)
allow(::Gitlab::Database::LoadBalancing).to receive_message_chain(:proxy, :load_balancer, :host, :caught_up?).and_return(false)
allow(::Gitlab::Database::LoadBalancing).to receive_message_chain(:proxy, :load_balancer, :select_up_to_date_host).and_return(false)
end
around do |example|
......@@ -157,6 +157,20 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do
expect(job[:database_chosen]).to eq('primary')
end
end
context 'replica selection mechanism feature flag rollout' do
before do
stub_feature_flags(sidekiq_load_balancing_rotate_up_to_date_replica: false)
end
it 'uses different implmentation' do
expect(::Gitlab::Database::LoadBalancing).to receive_message_chain(:proxy, :load_balancer, :host, :caught_up?).and_return(false)
expect do
process_job(job)
end.to raise_error(Sidekiq::JobRetry::Skip)
end
end
end
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment