Commit fa464083 authored by Eugenia Grieff's avatar Eugenia Grieff

Add ability to refresh group issues count cache

- Add update_group_issues_counter_cache method
to Group model
- Refresh cached issues count when bulk closing
and reopening issues
- Add specs
parent aff5ebaf
......@@ -702,6 +702,12 @@ class Group < Namespace
Gitlab::ServiceDesk.supported? && all_projects.service_desk_enabled.exists?
end
# rubocop: disable CodeReuse/ServiceClass
def update_group_issues_counter_cache(current_user)
Groups::OpenIssuesCountService.new(self, current_user).refresh_cache_over_threshold
end
# rubocop: enable CodeReuse/ServiceClass
private
def update_two_factor_requirement
......
......@@ -5,6 +5,44 @@ module Groups
class OpenIssuesCountService < Groups::CountService
PUBLIC_COUNT_KEY = 'group_public_open_issues_count'
TOTAL_COUNT_KEY = 'group_total_open_issues_count'
CACHED_COUNT_THRESHOLD = 1000
EXPIRATION_TIME = 24.hours
attr_reader :group, :user
def initialize(group, user = nil)
@group = group
@user = user
end
# Reads count value from cache and return it if present.
# If empty or expired, #uncached_count will calculate the issues count for the group and
# compare it with the threshold. If it is greater, it will be written to the cache and returned.
# If below, it will be returned without being cached.
# This results in only caching large counts and calculating the rest with every call to maintain
# accuracy.
def count
cached_count = Rails.cache.read(cache_key)
return cached_count unless cached_count.blank?
refresh_cache_over_threshold(reset_cache: false)
end
def cache_key(key = nil)
['groups', 'open_issues_count_service', VERSION, group.id, cache_key_name]
end
def refresh_cache_over_threshold(reset_cache: true)
new_count = uncached_count
if new_count > CACHED_COUNT_THRESHOLD
update_cache_for_key(cache_key) { new_count }
elsif reset_cache
delete_cache
end
new_count
end
private
......
......@@ -15,9 +15,12 @@ module Issuable
def execute(type)
ids = params.delete(:issuable_ids).split(",")
set_update_params(type)
items = update_issuables(type, ids)
updated_issues_count = update_issuables(type, ids)
if updated_issues_count > 0 && requires_issues_count_cache_refresh?(type)
update_group_cached_counts
end
response_success(payload: { count: items.size })
response_success(payload: { count: updated_issues_count })
rescue ArgumentError => e
response_error(e.message, 422)
end
......@@ -56,7 +59,7 @@ module Issuable
update_class.new(issuable.issuing_parent, current_user, params).execute(issuable)
end
items
items.size
end
def find_issuables(parent, model_class, ids)
......@@ -81,6 +84,18 @@ module Issuable
def response_error(message, http_status)
ServiceResponse.error(message: message, http_status: http_status)
end
def requires_issues_count_cache_refresh?(type)
type == 'issue' && params.include?(:state_event)
end
def group
parent.is_a?(Group) ? parent : parent&.group
end
def update_group_cached_counts
group&.update_group_issues_counter_cache(current_user)
end
end
end
......
---
title: Refresh group open issues count cache when bulk updating issue state
merge_request: 56386
author:
type: added
......@@ -2390,4 +2390,17 @@ RSpec.describe Group do
it { is_expected.to eq(Set.new([child_1.id])) }
end
describe '#update_group_issues_counter_cache' do
let_it_be(:group) { create(:group) }
let_it_be(:user) { create(:user) }
it 'updates group issues count cache if over threshold' do
expect_any_instance_of(Groups::OpenIssuesCountService)
.to receive(:refresh_cache_over_threshold)
.and_call_original
group.update_group_issues_counter_cache(user)
end
end
end
......@@ -280,6 +280,65 @@ RSpec.describe Issuable::BulkUpdateService do
expect(issue2.reload.assignees).to be_empty
end
end
context 'when project belongs to a group' do
let_it_be(:project2) { create(:project, :repository, group: create(:group)) }
let_it_be(:open_issues) { create_list(:issue, 2, project: project2) }
let(:parent) { project2 }
let(:count_service) { Groups::OpenIssuesCountService }
before do
project2.add_reporter(user)
allow(Rails.cache).to receive(:delete).and_call_original
allow_next_instance_of(count_service) do |instance|
allow(instance).to receive(:update_cache_for_key)
end
end
shared_examples 'refreshing cached open issues count with state updates' do |state|
context 'when issues count is over cache threshold' do
before do
stub_const("#{count_service}::CACHED_COUNT_THRESHOLD", (issues.size - 1))
end
it 'updates issues counts cache' do
expect_next_instance_of(count_service) do |service|
expect(service).to receive(:update_cache_for_key).and_return(true)
end
expect(Rails.cache).not_to receive(:delete)
bulk_update(issues, state_event: state)
end
end
context 'when issues count is under cache threshold' do
before do
stub_const("#{count_service}::CACHED_COUNT_THRESHOLD", (issues.size + 2))
end
it 'does not update issues counts cache and delete cache' do
expect_next_instance_of(count_service) do |service|
expect(service).not_to receive(:update_cache_for_key)
end
expect(Rails.cache).to receive(:delete)
bulk_update(issues, state_event: state)
end
end
end
context 'when closing issues' do
let_it_be(:issues) { create_list(:issue, 2, project: project2) }
it_behaves_like 'refreshing cached open issues count with state updates', 'closed'
end
context 'when reopening issues' do
let_it_be(:issues) { create_list(:issue, 2, :closed, project: project2) }
it_behaves_like 'refreshing cached open issues count with state updates', 'reopened'
end
end
end
context 'with issuables at a group level' do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment