Commit 723c116b authored by Heinrich Lee Yu's avatar Heinrich Lee Yu

Switch ThreadMemoryCache to ProcessMemoryCache

Thread-local cache is ineffective when using Puma because unused threads
are reaped until it reaches the min threads setting.

We already tested this ProcessMemoryCache for feature flags so this
enables it for other classes where we're still using ThreadMemoryCache
parent ff10699e
...@@ -5,7 +5,7 @@ class Admin::ApplicationSettingsController < Admin::ApplicationController ...@@ -5,7 +5,7 @@ class Admin::ApplicationSettingsController < Admin::ApplicationController
# NOTE: Use @application_setting in this controller when you need to access # NOTE: Use @application_setting in this controller when you need to access
# application_settings after it has been modified. This is because the # application_settings after it has been modified. This is because the
# ApplicationSetting model uses Gitlab::ThreadMemoryCache for caching and the # ApplicationSetting model uses Gitlab::ProcessMemoryCache for caching and the
# cache might be stale immediately after an update. # cache might be stale immediately after an update.
# https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/30233 # https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/30233
before_action :set_application_setting, except: :integrations before_action :set_application_setting, except: :integrations
......
...@@ -421,7 +421,7 @@ class ApplicationSetting < ApplicationRecord ...@@ -421,7 +421,7 @@ class ApplicationSetting < ApplicationRecord
# can cause a significant amount of load on Redis, let's cache it in # can cause a significant amount of load on Redis, let's cache it in
# memory. # memory.
def self.cache_backend def self.cache_backend
Gitlab::ThreadMemoryCache.cache_backend Gitlab::ProcessMemoryCache.cache_backend
end end
def recaptcha_or_login_protection_enabled def recaptcha_or_login_protection_enabled
......
---
title: Use process-wide cache for application settings and performance bar
merge_request: 31135
author:
type: performance
require 'gitlab/testing/request_blocker_middleware' require 'gitlab/testing/request_blocker_middleware'
require 'gitlab/testing/request_inspector_middleware' require 'gitlab/testing/request_inspector_middleware'
require 'gitlab/testing/clear_thread_memory_cache_middleware' require 'gitlab/testing/clear_process_memory_cache_middleware'
Rails.application.configure do Rails.application.configure do
# Make sure the middleware is inserted first in middleware chain # Make sure the middleware is inserted first in middleware chain
config.middleware.insert_before(ActionDispatch::Static, Gitlab::Testing::RequestBlockerMiddleware) config.middleware.insert_before(ActionDispatch::Static, Gitlab::Testing::RequestBlockerMiddleware)
config.middleware.insert_before(ActionDispatch::Static, Gitlab::Testing::RequestInspectorMiddleware) config.middleware.insert_before(ActionDispatch::Static, Gitlab::Testing::RequestInspectorMiddleware)
config.middleware.insert_before(ActionDispatch::Static, Gitlab::Testing::ClearThreadMemoryCacheMiddleware) config.middleware.insert_before(ActionDispatch::Static, Gitlab::Testing::ClearProcessMemoryCacheMiddleware)
# Settings specified here will take precedence over those in config/application.rb # Settings specified here will take precedence over those in config/application.rb
......
# frozen_string_literal: true
Gitlab::ThreadMemoryCache.cache_backend
...@@ -87,7 +87,7 @@ module Gitlab ...@@ -87,7 +87,7 @@ module Gitlab
def self.l1_cache def self.l1_cache
SafeRequestStore[:geo_l1_cache] ||= SafeRequestStore[:geo_l1_cache] ||=
Gitlab::JsonCache.new(namespace: :geo, backend: ::Gitlab::ThreadMemoryCache.cache_backend) Gitlab::JsonCache.new(namespace: :geo, backend: ::Gitlab::ProcessMemoryCache.cache_backend)
end end
def self.l2_cache def self.l2_cache
......
...@@ -13,7 +13,7 @@ describe Gitlab::Geo, :geo, :request_store do ...@@ -13,7 +13,7 @@ describe Gitlab::Geo, :geo, :request_store do
it 'includes GitLab version and Rails.version in the cache key' do it 'includes GitLab version and Rails.version in the cache key' do
expanded_key = "geo:#{key}:#{Gitlab::VERSION}:#{Rails.version}" expanded_key = "geo:#{key}:#{Gitlab::VERSION}:#{Rails.version}"
expect(Gitlab::ThreadMemoryCache.cache_backend).to receive(:write) expect(Gitlab::ProcessMemoryCache.cache_backend).to receive(:write)
.with(expanded_key, an_instance_of(String), expires_in: 1.minute).and_call_original .with(expanded_key, an_instance_of(String), expires_in: 1.minute).and_call_original
expect(Rails.cache).to receive(:write) expect(Rails.cache).to receive(:write)
.with(expanded_key, an_instance_of(String), expires_in: 2.minutes) .with(expanded_key, an_instance_of(String), expires_in: 2.minutes)
......
...@@ -21,6 +21,8 @@ describe 'view audit events' do ...@@ -21,6 +21,8 @@ describe 'view audit events' do
end end
it 'avoids N+1 DB queries', :request_store do it 'avoids N+1 DB queries', :request_store do
send_request
control = ActiveRecord::QueryRecorder.new(skip_cached: false) { send_request } control = ActiveRecord::QueryRecorder.new(skip_cached: false) { send_request }
create_list(:project_audit_event, 2, entity_id: project.id) create_list(:project_audit_event, 2, entity_id: project.id)
......
...@@ -134,12 +134,8 @@ class Feature ...@@ -134,12 +134,8 @@ class Feature
end end
def l1_cache_backend def l1_cache_backend
if Gitlab::Utils.to_boolean(ENV['USE_THREAD_MEMORY_CACHE'])
Gitlab::ThreadMemoryCache.cache_backend
else
Gitlab::ProcessMemoryCache.cache_backend Gitlab::ProcessMemoryCache.cache_backend
end end
end
def l2_cache_backend def l2_cache_backend
Rails.cache Rails.cache
......
...@@ -44,7 +44,7 @@ module Gitlab ...@@ -44,7 +44,7 @@ module Gitlab
end end
def self.l1_cache_backend def self.l1_cache_backend
Gitlab::ThreadMemoryCache.cache_backend Gitlab::ProcessMemoryCache.cache_backend
end end
def self.l2_cache_backend def self.l2_cache_backend
......
...@@ -2,13 +2,13 @@ ...@@ -2,13 +2,13 @@
module Gitlab module Gitlab
module Testing module Testing
class ClearThreadMemoryCacheMiddleware class ClearProcessMemoryCacheMiddleware
def initialize(app) def initialize(app)
@app = app @app = app
end end
def call(env) def call(env)
Gitlab::ThreadMemoryCache.cache_backend.clear Gitlab::ProcessMemoryCache.cache_backend.clear
@app.call(env) @app.call(env)
end end
......
# frozen_string_literal: true
module Gitlab
class ThreadMemoryCache
THREAD_KEY = :thread_memory_cache
def self.cache_backend
# Note ActiveSupport::Cache::MemoryStore is thread-safe. Since
# each backend is local per thread we probably don't need to worry
# about synchronizing access, but this is a drop-in replacement
# for ActiveSupport::Cache::RedisStore.
Thread.current[THREAD_KEY] ||= ActiveSupport::Cache::MemoryStore.new
end
end
end
...@@ -146,14 +146,6 @@ describe Feature do ...@@ -146,14 +146,6 @@ describe Feature do
expect(described_class.enabled?(:enabled_feature_flag)).to be_truthy expect(described_class.enabled?(:enabled_feature_flag)).to be_truthy
end end
context 'with USE_THREAD_MEMORY_CACHE defined' do
before do
stub_env('USE_THREAD_MEMORY_CACHE', '1')
end
it { expect(described_class.l1_cache_backend).to eq(Gitlab::ThreadMemoryCache.cache_backend) }
end
it { expect(described_class.l1_cache_backend).to eq(Gitlab::ProcessMemoryCache.cache_backend) } it { expect(described_class.l1_cache_backend).to eq(Gitlab::ProcessMemoryCache.cache_backend) }
it { expect(described_class.l2_cache_backend).to eq(Rails.cache) } it { expect(described_class.l2_cache_backend).to eq(Rails.cache) }
......
...@@ -38,7 +38,7 @@ describe Gitlab::PerformanceBar do ...@@ -38,7 +38,7 @@ describe Gitlab::PerformanceBar do
end end
end end
it { expect(described_class.l1_cache_backend).to eq(Gitlab::ThreadMemoryCache.cache_backend) } it { expect(described_class.l1_cache_backend).to eq(Gitlab::ProcessMemoryCache.cache_backend) }
it { expect(described_class.l2_cache_backend).to eq(Rails.cache) } it { expect(described_class.l2_cache_backend).to eq(Rails.cache) }
describe '.enabled_for_user?' do describe '.enabled_for_user?' do
......
...@@ -205,11 +205,11 @@ describe CacheableAttributes do ...@@ -205,11 +205,11 @@ describe CacheableAttributes do
end end
end end
it 'uses RequestStore in addition to Thread memory cache', :request_store do it 'uses RequestStore in addition to process memory cache', :request_store do
# Warm up the cache # Warm up the cache
create(:application_setting).cache! create(:application_setting).cache!
expect(ApplicationSetting.cache_backend).to eq(Gitlab::ThreadMemoryCache.cache_backend) expect(ApplicationSetting.cache_backend).to eq(Gitlab::ProcessMemoryCache.cache_backend)
expect(ApplicationSetting.cache_backend).to receive(:read).with(ApplicationSetting.cache_key).once.and_call_original expect(ApplicationSetting.cache_backend).to receive(:read).with(ApplicationSetting.cache_key).once.and_call_original
2.times { ApplicationSetting.current } 2.times { ApplicationSetting.current }
......
...@@ -209,9 +209,7 @@ RSpec.configure do |config| ...@@ -209,9 +209,7 @@ RSpec.configure do |config|
# expect(Gitlab::Git::KeepAround).to receive(:execute).and_call_original # expect(Gitlab::Git::KeepAround).to receive(:execute).and_call_original
allow(Gitlab::Git::KeepAround).to receive(:execute) allow(Gitlab::Git::KeepAround).to receive(:execute)
[Gitlab::ThreadMemoryCache, Gitlab::ProcessMemoryCache].each do |cache| Gitlab::ProcessMemoryCache.cache_backend.clear
cache.cache_backend.clear
end
Sidekiq::Worker.clear_all Sidekiq::Worker.clear_all
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment