Commit 4a290307 authored by GitLab Bot's avatar GitLab Bot

Automatic merge of gitlab-org/gitlab master

parents ee6d4468 5a98e5e1
......@@ -102,7 +102,6 @@ module Ci
end
scope :unstarted, -> { where(runner_id: nil) }
scope :ignore_failures, -> { where(allow_failure: false) }
scope :with_downloadable_artifacts, -> do
where('EXISTS (?)',
Ci::JobArtifact.select(1)
......
......@@ -37,6 +37,7 @@ module Namespaces
module Traversal
module Linear
extend ActiveSupport::Concern
include LinearScopes
UnboundedSearch = Class.new(StandardError)
......@@ -44,14 +45,6 @@ module Namespaces
before_update :lock_both_roots, if: -> { sync_traversal_ids? && parent_id_changed? }
after_create :sync_traversal_ids, if: -> { sync_traversal_ids? }
after_update :sync_traversal_ids, if: -> { sync_traversal_ids? && saved_change_to_parent_id? }
scope :traversal_ids_contains, ->(ids) { where("traversal_ids @> (?)", ids) }
# When filtering namespaces by the traversal_ids column to compile a
# list of namespace IDs, it's much faster to reference the ID in
# traversal_ids than the primary key ID column.
# WARNING This scope must be used behind a linear query feature flag
# such as `use_traversal_ids`.
scope :as_ids, -> { select('traversal_ids[array_length(traversal_ids, 1)] AS id') }
end
def sync_traversal_ids?
......@@ -164,20 +157,14 @@ module Namespaces
Namespace.lock.select(:id).where(id: roots).order(id: :asc).load
end
# Make sure we drop the STI `type = 'Group'` condition for better performance.
# Logically equivalent so long as hierarchies remain homogeneous.
def without_sti_condition
self.class.unscope(where: :type)
end
# Search this namespace's lineage. Bound inclusively by top node.
def lineage(top: nil, bottom: nil, hierarchy_order: nil)
raise UnboundedSearch, 'Must bound search by either top or bottom' unless top || bottom
skope = without_sti_condition
skope = self.class.without_sti_condition
if top
skope = skope.traversal_ids_contains("{#{top.id}}")
skope = skope.where("traversal_ids @> ('{?}')", top.id)
end
if bottom
......
# frozen_string_literal: true
module Namespaces
module Traversal
module LinearScopes
extend ActiveSupport::Concern
class_methods do
# When filtering namespaces by the traversal_ids column to compile a
# list of namespace IDs, it can be faster to reference the ID in
# traversal_ids than the primary key ID column.
def as_ids
return super unless use_traversal_ids?
select('namespaces.traversal_ids[array_length(namespaces.traversal_ids, 1)] AS id')
end
def self_and_descendants
return super unless use_traversal_ids?
without_dups = self_and_descendants_with_duplicates
.select('DISTINCT on(namespaces.id) namespaces.*')
# Wrap the `SELECT DISTINCT on(....)` with a normal query so we
# retain expected Rails behavior. Otherwise count and other
# aggregates won't work.
unscoped.without_sti_condition.from(without_dups, :namespaces)
end
def self_and_descendant_ids
return super unless use_traversal_ids?
self_and_descendants_with_duplicates.select('DISTINCT namespaces.id')
end
# Make sure we drop the STI `type = 'Group'` condition for better performance.
# Logically equivalent so long as hierarchies remain homogeneous.
def without_sti_condition
unscope(where: :type)
end
private
def use_traversal_ids?
Feature.enabled?(:use_traversal_ids, default_enabled: :yaml)
end
def self_and_descendants_with_duplicates
base_ids = select(:id)
unscoped
.without_sti_condition
.from("namespaces, (#{base_ids.to_sql}) base")
.where('namespaces.traversal_ids @> ARRAY[base.id]')
end
end
end
end
end
......@@ -4,6 +4,7 @@ module Namespaces
module Traversal
module Recursive
extend ActiveSupport::Concern
include RecursiveScopes
def root_ancestor
return self if parent.nil?
......
# frozen_string_literal: true
module Namespaces
module Traversal
module RecursiveScopes
extend ActiveSupport::Concern
class_methods do
def as_ids
select('id')
end
def self_and_descendants
Gitlab::ObjectHierarchy.new(all).base_and_descendants
end
alias_method :recursive_self_and_descendants, :self_and_descendants
def self_and_descendant_ids
self_and_descendants.as_ids
end
alias_method :recursive_self_and_descendant_ids, :self_and_descendant_ids
end
end
end
end
# frozen_string_literal: true
class CreatePostgresAsyncIndexesTable < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
def change
create_table_with_constraints :postgres_async_indexes do |t|
t.timestamps_with_timezone null: false
t.text :name, null: false
t.text :definition, null: false
t.text :table_name, null: false
t.text_limit :name, 63
t.text_limit :definition, 2048
t.text_limit :table_name, 63
t.index :name, unique: true
end
end
end
# frozen_string_literal: true
class RemoveNullConstraintOnScheduleFromEscalationRules < ActiveRecord::Migration[6.1]
def up
change_column_null :incident_management_escalation_rules, :oncall_schedule_id, true
end
def down
exec_query 'DELETE FROM incident_management_escalation_rules WHERE oncall_schedule_id IS NULL'
change_column_null :incident_management_escalation_rules, :oncall_schedule_id, false
end
end
# frozen_string_literal: true
class AddUserToEscalationRules < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
def up
with_lock_retries do
add_column :incident_management_escalation_rules, :user_id, :bigint, null: true
end
end
def down
with_lock_retries do
remove_column :incident_management_escalation_rules, :user_id
end
end
end
# frozen_string_literal: true
class AddUserIndexToEscalationRules < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
disable_ddl_transaction!
USER_INDEX_NAME = 'index_escalation_rules_on_user'
OLD_UNIQUE_INDEX_NAME = 'index_on_policy_schedule_status_elapsed_time_escalation_rules'
NEW_UNIQUE_INDEX_NAME = 'index_escalation_rules_on_all_attributes'
def up
remove_concurrent_index_by_name :incident_management_escalation_rules, OLD_UNIQUE_INDEX_NAME
add_concurrent_index :incident_management_escalation_rules, :user_id, name: USER_INDEX_NAME
add_concurrent_index :incident_management_escalation_rules,
[:policy_id, :oncall_schedule_id, :status, :elapsed_time_seconds, :user_id],
unique: true,
name: NEW_UNIQUE_INDEX_NAME
end
def down
remove_concurrent_index_by_name :incident_management_escalation_rules, USER_INDEX_NAME
remove_concurrent_index_by_name :incident_management_escalation_rules, NEW_UNIQUE_INDEX_NAME
exec_query 'DELETE FROM incident_management_escalation_rules WHERE oncall_schedule_id IS NULL'
add_concurrent_index :incident_management_escalation_rules,
[:policy_id, :oncall_schedule_id, :status, :elapsed_time_seconds],
unique: true,
name: OLD_UNIQUE_INDEX_NAME
end
end
# frozen_string_literal: true
class AddUserFkToEscalationRules < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
disable_ddl_transaction!
def up
add_concurrent_foreign_key :incident_management_escalation_rules, :users, column: :user_id, on_delete: :cascade
end
def down
with_lock_retries do
remove_foreign_key_if_exists :incident_management_escalation_rules, column: :user_id
end
end
end
# frozen_string_literal: true
class AddXorCheckConstraintForEscalationRules < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
disable_ddl_transaction!
CONSTRAINT_NAME = 'escalation_rules_one_of_oncall_schedule_or_user'
def up
add_check_constraint :incident_management_escalation_rules, 'num_nonnulls(oncall_schedule_id, user_id) = 1', CONSTRAINT_NAME
end
def down
remove_check_constraint :incident_management_escalation_rules, CONSTRAINT_NAME
end
end
# frozen_string_literal: true
class ReScheduleLatestPipelineIdPopulationWithLogging < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
DELAY_INTERVAL = 2.minutes.to_i
BATCH_SIZE = 100
MIGRATION = 'PopulateLatestPipelineIds'
disable_ddl_transaction!
def up
return unless Gitlab.ee?
Gitlab::BackgroundMigration.steal(MIGRATION)
queue_background_migration_jobs_by_range_at_intervals(
Gitlab::BackgroundMigration::PopulateLatestPipelineIds::ProjectSetting.has_vulnerabilities_without_latest_pipeline_set,
MIGRATION,
DELAY_INTERVAL,
batch_size: BATCH_SIZE,
primary_column_name: 'project_id'
)
# no-op: The content of the migration has been moved to
# `ReScheduleLatestPipelineIdPopulationWithAllSecurityRelatedArtifactTypes`.
end
def down
......
# frozen_string_literal: true
class ReScheduleLatestPipelineIdPopulationWithAllSecurityRelatedArtifactTypes < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
DELAY_INTERVAL = 2.minutes.to_i
BATCH_SIZE = 100
MIGRATION = 'PopulateLatestPipelineIds'
disable_ddl_transaction!
def up
return unless Gitlab.ee?
Gitlab::BackgroundMigration.steal(MIGRATION)
queue_background_migration_jobs_by_range_at_intervals(
Gitlab::BackgroundMigration::PopulateLatestPipelineIds::ProjectSetting.has_vulnerabilities_without_latest_pipeline_set,
MIGRATION,
DELAY_INTERVAL,
batch_size: BATCH_SIZE,
primary_column_name: 'project_id'
)
end
def down
# no-op
end
end
1ef66bdf4a1c61d9a1e0e632d8728f86769ac727d43971e897284272e9f53581
\ No newline at end of file
cf276b9aa97fc7857499e1b103a8e09eda329a4db92d0e653cc6f7128987be39
\ No newline at end of file
5c6aff5b43a1e81e84a42f008a8a1ab90c77ee450884aa1ecc86bce551424f43
\ No newline at end of file
d49b1f48c2fa1cac8d7793f8bb025792f4bb85eed787ba3abdbaa4647523b70a
\ No newline at end of file
eab0f8488b0122ec6c5625c66ebcbd221579bdd9cc2cf670d1f26181709f23b7
\ No newline at end of file
a7a6697d86b71d59104af35a9d7d6f3caebf4ee1252e4f3e52133afb3f642e48
\ No newline at end of file
94978b93b1590cb2cfd9536a44a8817aa485a35d5372dfed31041261f5e12406
\ No newline at end of file
......@@ -13941,10 +13941,12 @@ ALTER SEQUENCE incident_management_escalation_policies_id_seq OWNED BY incident_
CREATE TABLE incident_management_escalation_rules (
id bigint NOT NULL,
policy_id bigint NOT NULL,
oncall_schedule_id bigint NOT NULL,
oncall_schedule_id bigint,
status smallint NOT NULL,
elapsed_time_seconds integer NOT NULL,
is_removed boolean DEFAULT false NOT NULL
is_removed boolean DEFAULT false NOT NULL,
user_id bigint,
CONSTRAINT escalation_rules_one_of_oncall_schedule_or_user CHECK ((num_nonnulls(oncall_schedule_id, user_id) = 1))
);
CREATE SEQUENCE incident_management_escalation_rules_id_seq
......@@ -16547,6 +16549,27 @@ CREATE SEQUENCE pool_repositories_id_seq
ALTER SEQUENCE pool_repositories_id_seq OWNED BY pool_repositories.id;
CREATE TABLE postgres_async_indexes (
id bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
name text NOT NULL,
definition text NOT NULL,
table_name text NOT NULL,
CONSTRAINT check_083b21157b CHECK ((char_length(definition) <= 2048)),
CONSTRAINT check_b732c6cd1d CHECK ((char_length(name) <= 63)),
CONSTRAINT check_e64ff4359e CHECK ((char_length(table_name) <= 63))
);
CREATE SEQUENCE postgres_async_indexes_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE postgres_async_indexes_id_seq OWNED BY postgres_async_indexes.id;
CREATE VIEW postgres_foreign_keys AS
SELECT pg_constraint.oid,
pg_constraint.conname AS name,
......@@ -20511,6 +20534,8 @@ ALTER TABLE ONLY plans ALTER COLUMN id SET DEFAULT nextval('plans_id_seq'::regcl
ALTER TABLE ONLY pool_repositories ALTER COLUMN id SET DEFAULT nextval('pool_repositories_id_seq'::regclass);
ALTER TABLE ONLY postgres_async_indexes ALTER COLUMN id SET DEFAULT nextval('postgres_async_indexes_id_seq'::regclass);
ALTER TABLE ONLY postgres_reindex_actions ALTER COLUMN id SET DEFAULT nextval('postgres_reindex_actions_id_seq'::regclass);
ALTER TABLE ONLY product_analytics_events_experimental ALTER COLUMN id SET DEFAULT nextval('product_analytics_events_experimental_id_seq'::regclass);
......@@ -22075,6 +22100,9 @@ ALTER TABLE ONLY plans
ALTER TABLE ONLY pool_repositories
ADD CONSTRAINT pool_repositories_pkey PRIMARY KEY (id);
ALTER TABLE ONLY postgres_async_indexes
ADD CONSTRAINT postgres_async_indexes_pkey PRIMARY KEY (id);
ALTER TABLE ONLY postgres_reindex_actions
ADD CONSTRAINT postgres_reindex_actions_pkey PRIMARY KEY (id);
......@@ -23723,6 +23751,10 @@ CREATE INDEX index_esc_protected_branches_on_external_status_check_id ON externa
CREATE INDEX index_esc_protected_branches_on_protected_branch_id ON external_status_checks_protected_branches USING btree (protected_branch_id);
CREATE UNIQUE INDEX index_escalation_rules_on_all_attributes ON incident_management_escalation_rules USING btree (policy_id, oncall_schedule_id, status, elapsed_time_seconds, user_id);
CREATE INDEX index_escalation_rules_on_user ON incident_management_escalation_rules USING btree (user_id);
CREATE INDEX index_events_on_action ON events USING btree (action);
CREATE INDEX index_events_on_author_id_and_created_at ON events USING btree (author_id, created_at);
......@@ -24441,8 +24473,6 @@ CREATE INDEX index_on_oncall_schedule_escalation_rule ON incident_management_esc
CREATE INDEX index_on_pages_metadata_not_migrated ON project_pages_metadata USING btree (project_id) WHERE ((deployed = true) AND (pages_deployment_id IS NULL));
CREATE UNIQUE INDEX index_on_policy_schedule_status_elapsed_time_escalation_rules ON incident_management_escalation_rules USING btree (policy_id, oncall_schedule_id, status, elapsed_time_seconds);
CREATE UNIQUE INDEX index_on_project_id_escalation_policy_name_unique ON incident_management_escalation_policies USING btree (project_id, name);
CREATE INDEX index_on_projects_lower_path ON projects USING btree (lower((path)::text));
......@@ -24631,6 +24661,8 @@ CREATE INDEX index_pool_repositories_on_shard_id ON pool_repositories USING btre
CREATE UNIQUE INDEX index_pool_repositories_on_source_project_id_and_shard_id ON pool_repositories USING btree (source_project_id, shard_id);
CREATE UNIQUE INDEX index_postgres_async_indexes_on_name ON postgres_async_indexes USING btree (name);
CREATE INDEX index_postgres_reindex_actions_on_index_identifier ON postgres_reindex_actions USING btree (index_identifier);
CREATE UNIQUE INDEX index_programming_languages_on_name ON programming_languages USING btree (name);
......@@ -25895,6 +25927,9 @@ ALTER TABLE ONLY epics
ALTER TABLE ONLY clusters_applications_runners
ADD CONSTRAINT fk_02de2ded36 FOREIGN KEY (runner_id) REFERENCES ci_runners(id) ON DELETE SET NULL;
ALTER TABLE ONLY incident_management_escalation_rules
ADD CONSTRAINT fk_0314ee86eb FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
ALTER TABLE ONLY design_management_designs_versions
ADD CONSTRAINT fk_03c671965c FOREIGN KEY (design_id) REFERENCES design_management_designs(id) ON DELETE CASCADE;
......@@ -9219,6 +9219,7 @@ Represents an escalation rule for an escalation policy.
| <a id="escalationruletypeid"></a>`id` | [`IncidentManagementEscalationRuleID`](#incidentmanagementescalationruleid) | ID of the escalation policy. |
| <a id="escalationruletypeoncallschedule"></a>`oncallSchedule` | [`IncidentManagementOncallSchedule`](#incidentmanagementoncallschedule) | The on-call schedule to notify. |
| <a id="escalationruletypestatus"></a>`status` | [`EscalationRuleStatus`](#escalationrulestatus) | The status required to prevent the rule from activating. |
| <a id="escalationruletypeuser"></a>`user` | [`UserCore`](#usercore) | The user to notify. |
### `Event`
......@@ -16737,8 +16738,9 @@ Represents an escalation rule.
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="escalationruleinputelapsedtimeseconds"></a>`elapsedTimeSeconds` | [`Int!`](#int) | The time in seconds before the rule is activated. |
| <a id="escalationruleinputoncallscheduleiid"></a>`oncallScheduleIid` | [`ID!`](#id) | The on-call schedule to notify. |
| <a id="escalationruleinputoncallscheduleiid"></a>`oncallScheduleIid` | [`ID`](#id) | The on-call schedule to notify. |
| <a id="escalationruleinputstatus"></a>`status` | [`EscalationRuleStatus!`](#escalationrulestatus) | The status required to prevent the rule from activating. |
| <a id="escalationruleinputusername"></a>`username` | [`String`](#string) | The username of the user to notify. |
### `JiraUsersMappingInputType`
......
......@@ -226,3 +226,88 @@ def down
remove_concurrent_index_by_name :projects, INDEX_NAME
end
```
## Create indexes asynchronously
For very large tables, index creation can be a challenge to manage.
While `add_concurrent_index` creates indexes in a way that does not block
normal traffic, it can still be problematic when index creation runs for
many hours. Necessary database operations like `autovacuum` cannot run, and
on GitLab.com, the deployment process is blocked waiting for index
creation to finish.
To limit impact on GitLab.com, a process exists to create indexes
asynchronously during weekend hours. Due to generally lower levels of
traffic and lack of regular deployments, this process allows the
creation of indexes to proceed with a lower level of risk. The below
sections describe the steps required to use these features:
1. [Schedule the index to be created](#schedule-the-index-to-be-created).
1. [Verify the MR was deployed and the index exists in production](#verify-the-mr-was-deployed-and-the-index-exists-in-production).
1. [Add a migration to create the index synchronously](#add-a-migration-to-create-the-index-synchronously).
### Schedule the index to be created
Create an MR with a post-deployment migration which prepares the index
for asynchronous creation. An example of creating an index using
the asynchronous index helpers can be seen in the block below. This migration
enters the index name and definition into the `postgres_async_indexes`
table. The process that runs on weekends pulls indexes from this
table and attempt to create them.
```ruby
# in db/post_migrate/
INDEX_NAME = 'index_ci_builds_on_some_column'
def up
prepare_async_index :ci_builds, :some_column, name: INDEX_NAME
end
def down
unprepare_async_index :ci_builds, :some_column, name: INDEX_NAME
end
```
### Verify the MR was deployed and the index exists in production
You can verify if the MR was deployed to GitLab.com by executing
`/chatops run auto_deploy status <merge_sha>`. To verify existence of
the index, you can:
- Use a meta-command in #database-lab, such as: `\di <index_name>`
- Ask someone in #database to check if the index exists
- With proper access, you can also verify directly on production or in a
production clone
### Add a migration to create the index synchronously
After the index is verified to exist on the production database, create a second
merge request that adds the index synchronously. The synchronous
migration results in a no-op on GitLab.com, but you should still add the
migration as expected for other installations. The below block
demonstrates how to create the second migration for the previous
asynchronous example.
WARNING:
The responsibility lies on the individual writing the migrations to verify
the index exists in production before merging a second migration that
adds the index using `add_concurrent_index`. If the second migration is
deployed and the index has not yet been created, the index is created
synchronously when the second migration executes.
```ruby
# in db/post_migrate/
INDEX_NAME = 'index_ci_builds_on_some_column'
disable_ddl_transaction!
def up
add_concurrent_index :ci_builds, :some_column, name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :ci_builds, INDEX_NAME
end
```
......@@ -48,6 +48,7 @@ feature is available.
> - DAST and SAST metrics [added](https://gitlab.com/gitlab-org/gitlab/-/issues/328033) in GitLab 14.1.
> - Fuzz Testing metrics [added](https://gitlab.com/gitlab-org/gitlab/-/issues/330398) in GitLab 14.2.
> - Dependency Scanning metrics [added](https://gitlab.com/gitlab-org/gitlab/-/issues/328034) in GitLab 14.2.
> - Multiselect [added](https://gitlab.com/gitlab-org/gitlab/-/issues/333586) in GitLab 14.2.
DevOps Adoption shows you which groups in your organization are using the most essential features of GitLab:
......@@ -66,15 +67,15 @@ DevOps Adoption shows you which groups in your organization are using the most e
- Pipelines
- Runners
To add your groups, in the top right-hand section the page, select **Add group to table**.
To add or remove your groups, in the top right-hand section the page, select **Add or remove groups**.
DevOps Adoption allows you to:
- Verify whether you are getting the return on investment that you expected from GitLab.
- Identify specific groups that are lagging in their adoption of GitLab so you can help them along in their DevOps journey.
- Find the groups that have adopted certain features and can provide guidance to other groups on how to use those features.
- Identify specific groups that are lagging in their adoption of GitLab, so you can help them along in their DevOps journey.
- Find the groups that have adopted certain features, and can provide guidance to other groups on how to use those features.
![DevOps Report](img/admin_devops_adoption_v14_1.png)
![DevOps Report](img/admin_devops_adoption_v14_2.png)
### Disable or enable DevOps Adoption
......
......@@ -12,6 +12,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> - DAST and SAST metrics [added](https://gitlab.com/gitlab-org/gitlab/-/issues/328033) in GitLab 14.1.
> - Fuzz Testing metrics [added](https://gitlab.com/gitlab-org/gitlab/-/issues/330398) in GitLab 14.2.
> - Dependency Scanning metrics [added](https://gitlab.com/gitlab-org/gitlab/-/issues/328034) in GitLab 14.2.
> - Multiselect [added](https://gitlab.com/gitlab-org/gitlab/-/issues/333586) in GitLab 14.2.
Prerequisites:
......@@ -19,7 +20,7 @@ Prerequisites:
To access Group DevOps Adoption, go to your group and select **Analytics > DevOps Adoption**.
Group DevOps Adoption shows you how individual groups and sub-groups within your organization use the following features:
Group DevOps Adoption shows you how individual groups and subgroups within your organization use the following features:
- Dev
- Approvals
......@@ -36,16 +37,16 @@ Group DevOps Adoption shows you how individual groups and sub-groups within your
- Pipelines
- Runners
When managing groups in the UI, you can add your sub-groups with the **Add sub-group to table**
When managing groups in the UI, you can add or remove your subgroups with the **Add or remove subgroups**
button, in the top right hand section of your Groups pages.
With DevOps Adoption you can:
- Verify whether you are getting the return on investment that you expected from GitLab.
- Identify specific sub-groups that are lagging in their adoption of GitLab so you can help them along in their DevOps journey.
- Find the sub-groups that have adopted certain features and can provide guidance to other sub-groups on how to use those features.
- Identify specific subgroups that are lagging in their adoption of GitLab, so you can help them along in their DevOps journey.
- Find the subgroups that have adopted certain features, and can provide guidance to other subgroups on how to use those features.
![DevOps Report](img/group_devops_adoption_v14_1.png)
![DevOps Report](img/group_devops_adoption_v14_2.png)
## Enable data processing
......@@ -59,10 +60,10 @@ GitLab requires around a minute to process it.
## What is displayed
DevOps Adoption displays feature adoption data for the given group
and any added sub-groups for the current calendar month.
and any added subgroups for the current calendar month.
Each group appears as a separate row in the table.
For each row, a feature is considered "adopted" if it has been used in a project in the given group
during the time period (including projects in any sub-groups of the given group).
during the time period (including projects in any subgroups of the given group).
## When is a feature considered adopted
......@@ -83,14 +84,14 @@ Following this guideline, GitLab doesn't penalize for:
over time, so we should not consider adoption to have decreased if GitLab adds features.
This means we should not measure adoption by percentages, only total counts.
## Add a sub-group
## Add a subgroup
DevOps Adoption can also display data for sub-groups within the given group,
DevOps Adoption can also display data for subgroups within the given group,
to show you differences in adoption across the group.
To add a sub-group to your Group DevOps Adoption report:
To add subgroups to your Group DevOps Adoption report:
1. Select **Add/remove sub-groups**.
1. Select the sub-group you want to add and select **Save changes**.
1. Select **Add or remove subgroups**.
1. Select the subgroups you want to add and select **Save changes**.
The sub-group data might not appear immediately, because GitLab requires around a minute to collect
The subgroup data might not appear immediately, because GitLab requires around a minute to collect
the data.
......@@ -410,7 +410,7 @@ To create a task list, follow the format of an ordered or unordered list:
A table of contents is an unordered list that links to subheadings in the document.
To add a table of contents to a Markdown file, wiki page, issue request, or merge request
description, add the `[[_TOC_]]` tag on its own line.
description, add either the `[[_TOC_]]` or `[TOC]` tag on its own line.
NOTE:
You can add a table of contents to issues and merge requests, but you can't add one
......
......@@ -8,7 +8,7 @@ import {
} from '@gitlab/ui';
import * as Sentry from '@sentry/browser';
import { TYPE_GROUP } from '~/graphql_shared/constants';
import { convertToGraphQLId } from '~/graphql_shared/utils';
import { convertToGraphQLId, getIdFromGraphQLId } from '~/graphql_shared/utils';
import {
DEBOUNCE_DELAY,
I18N_GROUP_DROPDOWN_TEXT,
......@@ -19,6 +19,7 @@ import {
I18N_NO_SUB_GROUPS,
} from '../constants';
import bulkEnableDevopsAdoptionNamespacesMutation from '../graphql/mutations/bulk_enable_devops_adoption_namespaces.mutation.graphql';
import disableDevopsAdoptionNamespaceMutation from '../graphql/mutations/disable_devops_adoption_namespace.mutation.graphql';
export default {
name: 'DevopsAdoptionAddDropdown',
......@@ -63,6 +64,11 @@ export default {
required: false,
default: false,
},
enabledNamespaces: {
type: Object,
required: false,
default: () => ({ nodes: [] }),
},
},
computed: {
filteredGroupsLength() {
......@@ -77,12 +83,31 @@ export default {
tooltipText() {
return this.isLoadingGroups || this.hasSubgroups ? false : I18N_NO_SUB_GROUPS;
},
enabledNamespaceIds() {
return this.enabledNamespaces.nodes.map((enabledNamespace) =>
getIdFromGraphQLId(enabledNamespace.namespace.id),
);
},
},
beforeDestroy() {
clearTimeout(this.timeout);
this.timeout = null;
},
methods: {
namespaceIdByGroupId(groupId) {
return this.enabledNamespaces.nodes?.find(
(enabledNamespace) => getIdFromGraphQLId(enabledNamespace.namespace.id) === groupId,
).id;
},
handleGroupSelect(id) {
const groupEnabled = this.isGroupEnabled(id);
if (groupEnabled) {
this.disableGroup(id);
} else {
this.enableGroup(id);
}
},
enableGroup(id) {
this.$apollo
.mutate({
......@@ -103,6 +128,28 @@ export default {
Sentry.captureException(error);
});
},
disableGroup(id) {
const gid = this.namespaceIdByGroupId(id);
this.$apollo
.mutate({
mutation: disableDevopsAdoptionNamespaceMutation,
variables: {
id: gid,
},
update: () => {
this.$emit('enabledNamespacesRemoved', gid);
},
})
.catch((error) => {
Sentry.captureException(error);
});
},
isGroupEnabled(groupId) {
return this.enabledNamespaceIds.some((namespaceId) => {
return namespaceId === groupId;
});
},
},
};
</script>
......@@ -126,8 +173,10 @@ export default {
<gl-dropdown-item
v-for="group in groups"
:key="group.id"
:is-check-item="true"
:is-checked="isGroupEnabled(group.id)"
data-testid="group-row"
@click="enableGroup(group.id)"
@click.native.capture.stop="handleGroupSelect(group.id)"
>
{{ group.full_name }}
</gl-dropdown-item>
......
......@@ -4,7 +4,6 @@ import * as Sentry from '@sentry/browser';
import dateformat from 'dateformat';
import DevopsScore from '~/analytics/devops_report/components/devops_score.vue';
import API from '~/api';
import { getIdFromGraphQLId } from '~/graphql_shared/utils';
import { mergeUrlParams, updateHistory, getParameterValues } from '~/lib/utils/url_utility';
import {
I18N_GROUPS_QUERY_ERROR,
......@@ -150,13 +149,6 @@ export default {
enabledNamespaces() {
return this.devopsAdoptionEnabledNamespaces?.nodes || [];
},
disabledGroupNodes() {
const enabledNamespaceIds = this.enabledNamespaces.map((group) =>
getIdFromGraphQLId(group.namespace.id),
);
return this.availableGroups.filter((group) => !enabledNamespaceIds.includes(group.id));
},
},
created() {
this.fetchGroups();
......@@ -328,7 +320,7 @@ export default {
:cols="tab.cols"
:enabled-namespaces="devopsAdoptionEnabledNamespaces"
:search-term="searchTerm"
:disabled-group-nodes="disabledGroupNodes"
:groups="availableGroups"
:is-loading-groups="isLoadingGroups"
:has-subgroups="hasSubgroups"
@enabledNamespacesRemoved="deleteEnabledNamespacesFromCache"
......@@ -350,11 +342,13 @@ export default {
>
<devops-adoption-add-dropdown
:search-term="searchTerm"
:groups="disabledGroupNodes"
:groups="availableGroups"
:enabled-namespaces="devopsAdoptionEnabledNamespaces"
:is-loading-groups="isLoadingGroups"
:has-subgroups="hasSubgroups"
@fetchGroups="fetchGroups"
@enabledNamespacesAdded="addEnabledNamespacesToCache"
@enabledNamespacesRemoved="deleteEnabledNamespacesFromCache"
/>
</span>
</template>
......
......@@ -42,7 +42,7 @@ export default {
required: false,
default: () => {},
},
disabledGroupNodes: {
groups: {
type: Array,
required: true,
},
......@@ -75,11 +75,13 @@ export default {
<devops-adoption-add-dropdown
class="gl-mb-3 gl-md-display-none"
:search-term="searchTerm"
:groups="disabledGroupNodes"
:groups="groups"
:enabled-namespaces="enabledNamespaces"
:is-loading-groups="isLoadingGroups"
:has-subgroups="hasSubgroups"
@fetchGroups="$emit('fetchGroups', $event)"
@enabledNamespacesAdded="$emit('enabledNamespacesAdded', $event)"
@enabledNamespacesRemoved="$emit('enabledNamespacesRemoved', $event)"
@trackModalOpenState="$emit('trackModalOpenState', $event)"
/>
</div>
......
......@@ -34,13 +34,13 @@ export const I18N_TABLE_REMOVE_BUTTON_DISABLED = s__(
'DevopsAdoption|You cannot remove the group you are currently in.',
);
export const I18N_GROUP_DROPDOWN_TEXT = s__('DevopsAdoption|Add sub-group to table');
export const I18N_GROUP_DROPDOWN_HEADER = s__('DevopsAdoption|Add sub-group');
export const I18N_ADMIN_DROPDOWN_TEXT = s__('DevopsAdoption|Add group to table');
export const I18N_ADMIN_DROPDOWN_HEADER = s__('DevopsAdoption|Add group');
export const I18N_GROUP_DROPDOWN_TEXT = s__('DevopsAdoption|Add or remove subgroups');
export const I18N_GROUP_DROPDOWN_HEADER = s__('DevopsAdoption|Edit subgroups');
export const I18N_ADMIN_DROPDOWN_TEXT = s__('DevopsAdoption|Add or remove groups');
export const I18N_ADMIN_DROPDOWN_HEADER = s__('DevopsAdoption|Edit groups');
export const I18N_NO_RESULTS = s__('DevopsAdoption|No results…');
export const I18N_NO_SUB_GROUPS = s__('DevopsAdoption|This group has no sub-groups');
export const I18N_NO_SUB_GROUPS = s__('DevopsAdoption|This group has no subgroups');
export const I18N_FEATURES_ADOPTED_TEXT = s__(
'DevopsAdoption|%{adoptedCount}/%{featuresCount} %{title} features adopted',
......
......@@ -105,7 +105,7 @@ export default {
axios({ method, url, data })
.then(({ data: responseData }) => {
this.isEditingComment = false;
this.$emit(emitName, responseData, this.comment);
this.$emit(emitName, { response: responseData, comment: this.comment });
})
.catch(() => {
createFlash({
......
......@@ -36,11 +36,11 @@ export default {
addComment(comment) {
this.notes.push(comment);
},
updateComment(data, comment) {
updateComment({ response, comment }) {
const index = this.notes.indexOf(comment);
if (index > -1) {
this.notes.splice(index, 1, { ...comment, ...data });
this.notes.splice(index, 1, { ...comment, ...response });
}
},
removeComment(comment) {
......
......@@ -43,23 +43,42 @@ module Mutations
def prepare_rules_attributes(project, args)
return args unless rules = args.delete(:rules)
iids = rules.collect { |rule| rule[:oncall_schedule_iid] }
found_schedules = schedules_for_iids(project, iids)
rules_attributes = rules.map { |rule| prepare_rule(found_schedules, rule.to_h) }
schedules = find_schedules(project, rules)
users = find_users(rules)
rules_attributes = rules.map { |rule| prepare_rule(rule.to_h, schedules, users) }
args.merge(rules_attributes: rules_attributes)
end
def prepare_rule(schedules, rule)
def prepare_rule(rule, schedules, users)
iid = rule.delete(:oncall_schedule_iid).to_i
username = rule.delete(:username)
rule.merge(oncall_schedule: schedules[iid])
rule.merge(
oncall_schedule: schedules[iid],
user: users[username]
)
end
def schedules_for_iids(project, iids)
schedules = ::IncidentManagement::OncallSchedulesFinder.new(current_user, project, iid: iids).execute
def find_schedules(project, rules)
find_resource(rules, :oncall_schedule_iid) do |iids|
::IncidentManagement::OncallSchedulesFinder.new(current_user, project, iid: iids).execute.index_by(&:iid)
end
end
def find_users(rules)
find_resource(rules, :username) do |usernames|
UsersFinder.new(current_user, username: usernames).execute.index_by(&:username)
end
end
def find_resource(rules, attribute)
identifiers = rules.collect { |rule| rule[attribute] }.uniq.compact
resources = yield(identifiers)
return resources if resources.length == identifiers.length
schedules.index_by(&:iid)
raise_resource_not_available_error!
end
end
end
......
......@@ -8,7 +8,11 @@ module Types
argument :oncall_schedule_iid, GraphQL::Types::ID, # rubocop: disable Graphql/IDType
description: 'The on-call schedule to notify.',
required: true
required: false
argument :username, GraphQL::Types::String,
description: 'The username of the user to notify.',
required: false
argument :elapsed_time_seconds, GraphQL::Types::Int,
description: 'The time in seconds before the rule is activated.',
......@@ -17,6 +21,18 @@ module Types
argument :status, Types::IncidentManagement::EscalationRuleStatusEnum,
description: 'The status required to prevent the rule from activating.',
required: true
def prepare
unless schedule_iid_or_username
raise Gitlab::Graphql::Errors::ArgumentError, 'One of oncall_schedule_iid or username must be provided'
end
super
end
def schedule_iid_or_username
oncall_schedule_iid.present? ^ username.present?
end
end
end
end
......@@ -15,6 +15,10 @@ module Types
null: true,
description: 'The on-call schedule to notify.'
field :user, Types::UserType,
null: true,
description: 'The user to notify.'
field :elapsed_time_seconds, GraphQL::Types::Int,
null: true,
description: 'The time in seconds before the rule is activated.'
......
......@@ -11,7 +11,6 @@ module IncidentManagement
validates :project_id, uniqueness: { message: _('can only have one escalation policy') }, on: :create
validates :name, presence: true, uniqueness: { scope: [:project_id] }, length: { maximum: 72 }
validates :description, length: { maximum: 160 }
validates :rules, presence: true
accepts_nested_attributes_for :rules
end
......
......@@ -5,19 +5,35 @@ module IncidentManagement
self.table_name = 'incident_management_escalation_rules'
belongs_to :policy, class_name: 'EscalationPolicy', inverse_of: 'rules', foreign_key: 'policy_id'
belongs_to :oncall_schedule, class_name: 'OncallSchedule', inverse_of: 'rotations', foreign_key: 'oncall_schedule_id'
belongs_to :oncall_schedule, class_name: 'OncallSchedule', foreign_key: 'oncall_schedule_id', optional: true
belongs_to :user, optional: true
enum status: AlertManagement::Alert::STATUSES.slice(:acknowledged, :resolved)
validates :status, presence: true
validates :oncall_schedule, presence: true
validates :elapsed_time_seconds,
presence: true,
numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 24.hours }
validates :policy_id, uniqueness: { scope: [:oncall_schedule_id, :status, :elapsed_time_seconds], message: _('must have a unique schedule, status, and elapsed time') }
validate :schedule_or_rule_present
validates :oncall_schedule_id,
uniqueness: { scope: [:policy_id, :status, :elapsed_time_seconds],
message: _('must be unique by status and elapsed time within a policy') },
allow_nil: true
validates :user_id,
uniqueness: { scope: [:policy_id, :status, :elapsed_time_seconds],
message: _('must be unique by status and elapsed time within a policy') },
allow_nil: true
scope :not_removed, -> { where(is_removed: false) }
scope :removed, -> { where(is_removed: true) }
private
def schedule_or_rule_present
unless oncall_schedule.present? ^ user.present?
errors.add(:base, 'must have either an on-call schedule or user')
end
end
end
end
......@@ -14,7 +14,13 @@ module IncidentManagement
end
def invalid_schedules?
params[:rules_attributes]&.any? { |attrs| attrs[:oncall_schedule]&.project != project }
params[:rules_attributes]&.any? { |attrs| attrs[:oncall_schedule] && attrs[:oncall_schedule].project != project }
end
def users_without_permissions?
DeclarativePolicy.subject_scope do
params[:rules_attributes]&.any? { |attrs| attrs[:user] && !attrs[:user].can?(:read_project, project) }
end
end
def error(message)
......@@ -38,7 +44,11 @@ module IncidentManagement
end
def error_bad_schedules
error(_('All escalations rules must have a schedule in the same project as the policy'))
error(_('Schedule-based escalation rules must have a schedule in the same project as the policy'))
end
def error_user_without_permission
error(_('User-based escalation rules must have a user with access to the project'))
end
def error_in_save(policy)
......
......@@ -23,6 +23,7 @@ module IncidentManagement
return error_no_rules if params[:rules_attributes].blank?
return error_too_many_rules if too_many_rules?
return error_bad_schedules if invalid_schedules?
return error_user_without_permission if users_without_permissions?
escalation_policy = project.incident_management_escalation_policies.create(params)
......
......@@ -28,6 +28,7 @@ module IncidentManagement
return error_no_rules if empty_rules?
return error_too_many_rules if too_many_rules?
return error_bad_schedules if invalid_schedules?
return error_user_without_permission if users_without_permissions?
reconcile_rules!
......@@ -85,7 +86,7 @@ module IncidentManagement
end
def unique_id(rule)
rule.slice(:oncall_schedule_id, :elapsed_time_seconds, :status)
rule.slice(:oncall_schedule_id, :user_id, :elapsed_time_seconds, :status)
end
end
end
......
......@@ -55,10 +55,14 @@ module IncidentManagement
def oncall_notification_recipients
strong_memoize(:oncall_notification_recipients) do
::IncidentManagement::OncallUsersFinder.new(project, schedule: rule.oncall_schedule).execute.to_a
rule.user_id ? [rule.user] : schedule_recipients
end
end
def schedule_recipients
::IncidentManagement::OncallUsersFinder.new(project, schedule: rule.oncall_schedule).execute.to_a
end
def destroy_escalation!
escalation.destroy!
end
......
......@@ -10,4 +10,9 @@
%span.gl-mr-3.gl-text-green-400= sprite_icon 'check-circle', css_class: 'gl-icon'
= value_prop
%p.gl-mb-3.gl-text-gray-400.gl-font-sm.gl-line-height-24= s_('InProductMarketing|Used by more than 100,000 organizations from around the globe.')
%p.gl-mb-3.gl-text-gray-400.gl-font-sm.gl-line-height-24= s_('InProductMarketing|Used by more than 100,000 organizations from around the globe:')
%ul.gl-display-flex.gl-flex-wrap.gl-pl-0.gl-font-sm.gl-font-weight-bold.gl-opacity-5
- ['Siemens', 'Chorus', 'KnowBe4', 'Wish', 'Hotjar'].each do |company|
%li.gl-display-flex.gl-align-items-center.gl-min-w-8.gl-h-8.gl-mr-6
= company
......@@ -66,7 +66,9 @@ module EE
# dast: 8
# secret_detection: 21
# coverage_fuzzing: 23
FILE_TYPES = [5, 6, 7, 8, 21, 23].freeze
# api_fuzzing: 26
# cluster_image_scanning: 27
FILE_TYPES = [5, 6, 7, 8, 21, 23, 26, 27].freeze
LATEST_PIPELINE_WITH_REPORTS_SQL = <<~SQL
SELECT
"ci_pipelines"."id"
......
......@@ -2,7 +2,7 @@
FactoryBot.define do
factory :incident_management_escalation_rule, class: 'IncidentManagement::EscalationRule' do
policy { association :incident_management_escalation_policy }
policy { association :incident_management_escalation_policy, rule_count: 0 }
oncall_schedule { association :incident_management_oncall_schedule, project: policy.project }
status { IncidentManagement::EscalationRule.statuses[:acknowledged] }
elapsed_time_seconds { 5.minutes }
......@@ -15,5 +15,10 @@ FactoryBot.define do
trait :removed do
is_removed { true }
end
trait :with_user do
oncall_schedule {}
user { association :user, developer_projects: [policy.project] }
end
end
end
......@@ -4,7 +4,15 @@ import { createLocalVue } from '@vue/test-utils';
import Vue from 'vue';
import VueApollo from 'vue-apollo';
import DevopsAdoptionAddDropdown from 'ee/analytics/devops_report/devops_adoption/components/devops_adoption_add_dropdown.vue';
import {
I18N_GROUP_DROPDOWN_TEXT,
I18N_GROUP_DROPDOWN_HEADER,
I18N_ADMIN_DROPDOWN_TEXT,
I18N_ADMIN_DROPDOWN_HEADER,
I18N_NO_SUB_GROUPS,
} from 'ee/analytics/devops_report/devops_adoption/constants';
import bulkEnableDevopsAdoptionNamespacesMutation from 'ee/analytics/devops_report/devops_adoption/graphql/mutations/bulk_enable_devops_adoption_namespaces.mutation.graphql';
import disableDevopsAdoptionNamespaceMutation from 'ee/analytics/devops_report/devops_adoption/graphql/mutations/disable_devops_adoption_namespace.mutation.graphql';
import createMockApollo from 'helpers/mock_apollo_helper';
import { createMockDirective, getBinding } from 'helpers/vue_mock_directive';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
......@@ -18,7 +26,7 @@ import {
const localVue = createLocalVue();
Vue.use(VueApollo);
const mutate = jest.fn().mockResolvedValue({
const mutateAdd = jest.fn().mockResolvedValue({
data: {
bulkEnableDevopsAdoptionNamespaces: {
enabledNamespaces: [devopsAdoptionNamespaceData.nodes[0]],
......@@ -26,14 +34,28 @@ const mutate = jest.fn().mockResolvedValue({
},
},
});
const mutateDisable = jest.fn().mockResolvedValue({
data: {
disableDevopsAdoptionNamespace: {
errors: [],
},
},
});
const mutateWithErrors = jest.fn().mockRejectedValue(genericDeleteErrorMessage);
describe('DevopsAdoptionAddDropdown', () => {
let wrapper;
const createComponent = ({ enableNamespaceSpy = mutate, provide = {}, props = {} } = {}) => {
const createComponent = ({
enableNamespaceSpy = mutateAdd,
disableNamespaceSpy = mutateDisable,
provide = {},
props = {},
} = {}) => {
const mockApollo = createMockApollo([
[bulkEnableDevopsAdoptionNamespacesMutation, enableNamespaceSpy],
[disableDevopsAdoptionNamespaceMutation, disableNamespaceSpy],
]);
wrapper = shallowMountExtended(DevopsAdoptionAddDropdown, {
......@@ -55,7 +77,7 @@ describe('DevopsAdoptionAddDropdown', () => {
const findDropdown = () => wrapper.findComponent(GlDropdown);
const clickFirstRow = () => wrapper.findByTestId('group-row').vm.$emit('click');
const clickFirstRow = () => wrapper.findByTestId('group-row').trigger('click');
describe('default behaviour', () => {
beforeEach(() => {
......@@ -69,8 +91,8 @@ describe('DevopsAdoptionAddDropdown', () => {
it('displays the correct text', () => {
const dropdown = findDropdown();
expect(dropdown.props('text')).toBe('Add group to table');
expect(dropdown.props('headerText')).toBe('Add group');
expect(dropdown.props('text')).toBe(I18N_ADMIN_DROPDOWN_TEXT);
expect(dropdown.props('headerText')).toBe(I18N_ADMIN_DROPDOWN_HEADER);
});
it('is disabled', () => {
......@@ -81,7 +103,7 @@ describe('DevopsAdoptionAddDropdown', () => {
const tooltip = getBinding(findDropdown().element, 'gl-tooltip');
expect(tooltip).toBeDefined();
expect(tooltip.value).toBe('This group has no sub-groups');
expect(tooltip.value).toBe(I18N_NO_SUB_GROUPS);
});
});
......@@ -91,8 +113,8 @@ describe('DevopsAdoptionAddDropdown', () => {
const dropdown = findDropdown();
expect(dropdown.props('text')).toBe('Add sub-group to table');
expect(dropdown.props('headerText')).toBe('Add sub-group');
expect(dropdown.props('text')).toBe(I18N_GROUP_DROPDOWN_TEXT);
expect(dropdown.props('headerText')).toBe(I18N_GROUP_DROPDOWN_HEADER);
});
});
......@@ -129,31 +151,47 @@ describe('DevopsAdoptionAddDropdown', () => {
describe('on row click', () => {
describe.each`
level | groupGid
${'group'} | ${groupGids[0]}
${'admin'} | ${null}
`('$level level sucessful request', ({ groupGid }) => {
level | groupGid | enabledNamespaces
${'group'} | ${groupGids[0]} | ${undefined}
${'group'} | ${groupGids[0]} | ${devopsAdoptionNamespaceData}
${'admin'} | ${null} | ${undefined}
${'admin'} | ${null} | ${devopsAdoptionNamespaceData}
`('$level level sucessful request', ({ groupGid, enabledNamespaces }) => {
beforeEach(() => {
createComponent({
props: { hasSubgroups: true, groups: groupNodes },
props: { hasSubgroups: true, groups: groupNodes, enabledNamespaces },
provide: { groupGid },
});
clickFirstRow();
});
it('makes a request to enable the selected group', () => {
expect(mutate).toHaveBeenCalledWith({
displayNamespaceId: groupGid,
namespaceIds: ['gid://gitlab/Group/1'],
if (!enabledNamespaces) {
it('makes a request to enable the selected group', () => {
expect(mutateAdd).toHaveBeenCalledWith({
displayNamespaceId: groupGid,
namespaceIds: ['gid://gitlab/Group/1'],
});
});
});
it('emits the enabledNamespacesAdded event', () => {
const [params] = wrapper.emitted().enabledNamespacesAdded[0];
it('emits the enabledNamespacesAdded event', () => {
const [params] = wrapper.emitted().enabledNamespacesAdded[0];
expect(params).toStrictEqual([devopsAdoptionNamespaceData.nodes[0]]);
});
expect(params).toStrictEqual([devopsAdoptionNamespaceData.nodes[0]]);
});
} else {
it('makes a request to disable the selected group', () => {
expect(mutateDisable).toHaveBeenCalledWith({
id: devopsAdoptionNamespaceData.nodes[0].id,
});
});
it('emits the enabledNamespacesRemoved event', () => {
const [params] = wrapper.emitted().enabledNamespacesRemoved[0];
expect(params).toBe(devopsAdoptionNamespaceData.nodes[0].id);
});
}
});
describe('on error', () => {
......
......@@ -22,7 +22,7 @@ describe('DevopsAdoptionSection', () => {
hasGroupData: true,
cols: DEVOPS_ADOPTION_TABLE_CONFIGURATION[0].cols,
enabledNamespaces: devopsAdoptionNamespaceData,
disabledGroupNodes: groupNodes,
groups: groupNodes,
searchTerm: '',
isLoadingGroups: false,
hasSubgroups: true,
......
export const groupData = [
{ id: '1', full_name: 'Foo' },
{ id: '2', full_name: 'Bar' },
{ id: 1, full_name: 'Foo' },
{ id: 2, full_name: 'Bar' },
];
export const groupNodes = [
{
__typename: 'Group',
full_name: 'Foo',
id: '1',
id: 1,
},
{
__typename: 'Group',
full_name: 'Bar',
id: '2',
id: 2,
},
];
export const groupNodeLabelValues = [
{ label: 'Foo', value: '1' },
{ label: 'Bar', value: '2' },
{ label: 'Foo', value: 1 },
{ label: 'Bar', value: 2 },
];
export const groupIds = [1, 2];
......@@ -28,7 +28,7 @@ export const groupGids = ['gid://gitlab/Group/1', 'gid://gitlab/Group/2'];
export const devopsAdoptionNamespaceData = {
nodes: [
{
id: 1,
id: 'gid://gitlab/EnabledNamespace/1',
namespace: {
fullName: 'Group 1',
id: 'gid://gitlab/Group/1',
......@@ -51,7 +51,7 @@ export const devopsAdoptionNamespaceData = {
__typename: 'devopsAdoptionEnabledNamespace',
},
{
id: 2,
id: 'gid://gitlab/EnabledNamespace/2',
namespace: {
fullName: 'Group 2',
id: 'gid://gitlab/Group/2',
......
......@@ -115,8 +115,9 @@ describe('History Comment', () => {
})
.then(() => {
expect(mockAxios.history.post).toHaveLength(1);
expect(wrapper.emitted().onCommentAdded).toBeTruthy();
expect(wrapper.emitted().onCommentAdded[0][0]).toEqual(comment);
expect(wrapper.emitted().onCommentAdded[0]).toEqual([
{ response: comment, comment: undefined },
]);
});
});
......@@ -239,9 +240,9 @@ describe('History Comment', () => {
})
.then(() => {
expect(mockAxios.history.put).toHaveLength(1);
expect(wrapper.emitted().onCommentUpdated).toBeTruthy();
expect(wrapper.emitted().onCommentUpdated[0][0]).toEqual(responseData);
expect(wrapper.emitted().onCommentUpdated[0][1]).toEqual(comment);
expect(wrapper.emitted().onCommentUpdated[0]).toEqual([
{ response: responseData, comment },
]);
});
});
......
......@@ -39,9 +39,9 @@ describe('History Entry', () => {
});
};
const eventItem = () => wrapper.find(EventItem);
const newComment = () => wrapper.find({ ref: 'newComment' });
const existingComments = () => wrapper.findAll({ ref: 'existingComment' });
const eventItem = () => wrapper.findComponent(EventItem);
const newComment = () => wrapper.findComponent({ ref: 'newComment' });
const existingComments = () => wrapper.findAllComponents({ ref: 'existingComment' });
const commentAt = (index) => existingComments().at(index);
afterEach(() => wrapper.destroy());
......@@ -92,23 +92,19 @@ describe('History Entry', () => {
});
});
it('updates an existing comment correctly', () => {
const note = 'new note';
it('updates an existing comment correctly', async () => {
const response = { note: 'new note' };
createWrapper(systemNote, commentNote);
commentAt(0).vm.$emit('onCommentUpdated', { note }, commentNote);
await commentAt(0).vm.$emit('onCommentUpdated', { response, comment: commentNote });
return wrapper.vm.$nextTick().then(() => {
expect(commentAt(0).props('comment').note).toBe(note);
});
expect(commentAt(0).props('comment').note).toBe(response.note);
});
it('deletes an existing comment correctly', () => {
it('deletes an existing comment correctly', async () => {
createWrapper(systemNote, commentNote);
commentAt(0).vm.$emit('onCommentDeleted', commentNote);
await commentAt(0).vm.$emit('onCommentDeleted', commentNote);
return wrapper.vm.$nextTick().then(() => {
expect(newComment().exists()).toBe(true);
expect(existingComments()).toHaveLength(0);
});
expect(newComment().exists()).toBe(true);
expect(existingComments()).toHaveLength(0);
});
});
......@@ -19,7 +19,7 @@ RSpec.describe Mutations::IncidentManagement::EscalationPolicy::Create do
status: ::IncidentManagement::EscalationRule.statuses[:acknowledged]
},
{
oncall_schedule_iid: oncall_schedule.iid,
username: current_user&.username,
elapsed_time_seconds: 600,
status: ::IncidentManagement::EscalationRule.statuses[:resolved]
}
......@@ -71,8 +71,8 @@ RSpec.describe Mutations::IncidentManagement::EscalationPolicy::Create do
expect(rules.size).to eq(2)
expect(rules).to match_array([
have_attributes(oncall_schedule_id: oncall_schedule.id, elapsed_time_seconds: 300, status: 'acknowledged'),
have_attributes(oncall_schedule_id: oncall_schedule.id, elapsed_time_seconds: 600, status: 'resolved')
have_attributes(oncall_schedule_id: oncall_schedule.id, user: nil, elapsed_time_seconds: 300, status: 'acknowledged'),
have_attributes(oncall_schedule_id: nil, user: current_user, elapsed_time_seconds: 600, status: 'resolved')
])
end
......@@ -91,7 +91,7 @@ RSpec.describe Mutations::IncidentManagement::EscalationPolicy::Create do
args[:rules][0][:oncall_schedule_iid] = other_schedule.iid
end
it_behaves_like 'returns a GraphQL error', 'All escalations rules must have a schedule in the same project as the policy'
it_behaves_like 'raises a resource not available error'
context 'user does not have permission for project' do
before do
......@@ -101,6 +101,14 @@ RSpec.describe Mutations::IncidentManagement::EscalationPolicy::Create do
it_behaves_like 'raises a resource not available error'
end
end
context 'user for rule does not exist' do
before do
args[:rules][1][:username] = 'junk-username'
end
it_behaves_like 'raises a resource not available error'
end
end
context 'user does not have permission for project' do
......
......@@ -110,6 +110,7 @@ RSpec.describe Mutations::IncidentManagement::EscalationPolicy::Update do
context 'with rule updates' do
let(:oncall_schedule_iid) { oncall_schedule.iid }
let(:username) { reporter.username }
let(:rule_args) do
[
{
......@@ -119,6 +120,13 @@ RSpec.describe Mutations::IncidentManagement::EscalationPolicy::Update do
},
{
oncall_schedule_iid: oncall_schedule_iid,
username: nil,
elapsed_time_seconds: 800,
status: :acknowledged
},
{
oncall_schedule_iid: nil,
username: username,
elapsed_time_seconds: 800,
status: :acknowledged
}
......@@ -128,22 +136,17 @@ RSpec.describe Mutations::IncidentManagement::EscalationPolicy::Update do
let(:expected_rules) do
[
first_rule,
have_attributes(oncall_schedule_id: oncall_schedule.id, elapsed_time_seconds: 800, status: 'acknowledged')
have_attributes(oncall_schedule_id: oncall_schedule.id, user: nil, elapsed_time_seconds: 800, status: 'acknowledged'),
have_attributes(oncall_schedule_id: nil, user: reporter, elapsed_time_seconds: 800, status: 'acknowledged')
]
end
it_behaves_like 'successful update with no errors'
context 'when schedule does not exist' do
let(:error_message) { eq("The oncall schedule for iid #{non_existing_record_iid} could not be found") }
let(:oncall_schedule_iid) { non_existing_record_iid }
it 'returns errors in the body of the response' do
expect(resolve).to eq(
escalation_policy: nil,
errors: ['All escalations rules must have a schedule in the same project as the policy']
)
end
it_behaves_like 'failed update with a top-level access error'
context 'the user does not have permission to update policies regardless' do
let(:current_user) { reporter }
......@@ -151,6 +154,12 @@ RSpec.describe Mutations::IncidentManagement::EscalationPolicy::Update do
it_behaves_like 'failed update with a top-level access error'
end
end
context "when rule's user does not exist" do
let(:username) { 'invalid-username' }
it_behaves_like 'failed update with a top-level access error'
end
end
end
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe GitlabSchema.types['EscalationRuleInput'] do
context 'mutually exclusive arguments' do
let(:input) do
{
oncall_schedule_iid: schedule_iid,
username: username,
elapsed_time_seconds: 0,
status: 'RESOLVED'
}
end
let(:output) { input.merge(status: 'resolved', oncall_schedule_iid: schedule_iid&.to_s) }
let(:schedule_iid) {}
let(:username) {}
subject { described_class.coerce_isolated_input(input).to_h }
context 'with neither username nor schedule provided' do
specify { expect { subject }.to raise_error(Gitlab::Graphql::Errors::ArgumentError, 'One of oncall_schedule_iid or username must be provided') }
end
context 'with both username and schedule provided' do
let(:schedule_iid) { 3 }
let(:username) { 'username' }
specify { expect { subject }.to raise_error(Gitlab::Graphql::Errors::ArgumentError, 'One of oncall_schedule_iid or username must be provided') }
end
context 'with only on-call schedule provided' do
let(:schedule_iid) { 3 }
it { is_expected.to eq(output) }
end
context 'with only user schedule provided' do
let(:username) { 'username' }
it { is_expected.to eq(output) }
end
end
it 'has specific fields' do
allowed_args = %w(oncallScheduleIid username elapsedTimeSeconds status)
expect(described_class.arguments.keys).to include(*allowed_args)
end
end
......@@ -9,6 +9,7 @@ RSpec.describe GitlabSchema.types['EscalationRuleType'] do
expected_fields = %i[
id
oncall_schedule
user
elapsed_time_seconds
status
]
......
......@@ -25,7 +25,6 @@ RSpec.describe IncidentManagement::EscalationPolicy do
describe 'validations' do
it { is_expected.to validate_presence_of(:name) }
it { is_expected.to validate_presence_of(:rules) }
it { is_expected.to validate_uniqueness_of(:project_id).with_message(/can only have one escalation policy/).on(:create) }
it { is_expected.to validate_uniqueness_of(:name).scoped_to(:project_id) }
it { is_expected.to validate_length_of(:name).is_at_most(72) }
......
......@@ -3,27 +3,54 @@
require 'spec_helper'
RSpec.describe IncidentManagement::EscalationRule do
let_it_be(:policy) { create(:incident_management_escalation_policy) }
subject { build(:incident_management_escalation_rule, policy: policy) }
it { is_expected.to be_valid }
subject { build(:incident_management_escalation_rule) }
describe 'associations' do
it { is_expected.to belong_to(:policy) }
it { is_expected.to belong_to(:oncall_schedule) }
it { is_expected.to belong_to(:oncall_schedule).optional }
it { is_expected.to belong_to(:user).optional }
end
describe 'validations' do
it { is_expected.to be_valid }
it { is_expected.to validate_presence_of(:status) }
it { is_expected.to validate_presence_of(:elapsed_time_seconds) }
it { is_expected.to validate_numericality_of(:elapsed_time_seconds).is_greater_than_or_equal_to(0).is_less_than_or_equal_to(24.hours) }
it { is_expected.to validate_uniqueness_of(:policy_id).scoped_to([:oncall_schedule_id, :status, :elapsed_time_seconds] ).with_message('must have a unique schedule, status, and elapsed time') }
it { is_expected.to validate_uniqueness_of(:oncall_schedule_id).scoped_to([:policy_id, :status, :elapsed_time_seconds] ).with_message('must be unique by status and elapsed time within a policy') }
context 'user-based rules' do
subject { build(:incident_management_escalation_rule, :with_user) }
it { is_expected.to be_valid }
it { is_expected.to validate_uniqueness_of(:user_id).scoped_to([:policy_id, :status, :elapsed_time_seconds] ).with_message('must be unique by status and elapsed time within a policy') }
end
context 'mutually exclusive attributes' do
context 'when user and schedule are both provided' do
let_it_be(:schedule) { create(:incident_management_oncall_schedule) }
subject { build(:incident_management_escalation_rule, :with_user, oncall_schedule: schedule) }
specify do
expect(subject).to be_invalid
expect(subject.errors.messages[:base]).to eq(['must have either an on-call schedule or user'])
end
end
context 'neither user nor schedule are provided' do
subject { build(:incident_management_escalation_rule, oncall_schedule: nil) }
specify do
expect(subject).to be_invalid
expect(subject.errors.messages[:base]).to eq(['must have either an on-call schedule or user'])
end
end
end
end
describe 'scopes' do
let_it_be(:rule) { policy.rules.first }
let_it_be(:removed_rule) { create(:incident_management_escalation_rule, :removed, policy: policy) }
let_it_be(:rule) { create(:incident_management_escalation_rule) }
let_it_be(:removed_rule) { create(:incident_management_escalation_rule, :removed, policy: rule.policy) }
describe '.not_removed' do
subject { described_class.not_removed }
......
......@@ -118,7 +118,8 @@ RSpec.describe 'getting Incident Management escalation policies' do
'name' => last_policy_rule.oncall_schedule.name,
'description' => last_policy_rule.oncall_schedule.description,
'timezone' => last_policy_rule.oncall_schedule.timezone
}
},
'user' => nil
}
]
)
......
......@@ -10,6 +10,7 @@ RSpec.describe 'getting Incident Management escalation policies' do
let_it_be(:policy) { create(:incident_management_escalation_policy, project: project) }
let_it_be(:rule) { policy.rules.first }
let_it_be(:schedule) { rule.oncall_schedule }
let_it_be(:user_rule) { create(:incident_management_escalation_rule, :with_user, policy: policy) }
let(:params) { {} }
......@@ -25,6 +26,9 @@ RSpec.describe 'getting Incident Management escalation policies' do
iid
name
}
user {
username
}
}
}
QUERY
......@@ -49,15 +53,25 @@ RSpec.describe 'getting Incident Management escalation policies' do
it 'includes expected data' do
post_graphql(query, current_user: current_user)
expect(escalation_rules_response).to eq([{
'id' => global_id(rule),
'elapsedTimeSeconds' => rule.elapsed_time_seconds, # 5 min
'status' => rule.status.upcase, # 'ACKNOWLEDGED'
'oncallSchedule' => {
'iid' => schedule.iid.to_s,
'name' => schedule.name
expect(escalation_rules_response).to eq([
{
'id' => global_id(rule),
'elapsedTimeSeconds' => rule.elapsed_time_seconds, # 5 min
'status' => rule.status.upcase, # 'ACKNOWLEDGED'
'oncallSchedule' => {
'iid' => schedule.iid.to_s,
'name' => schedule.name
},
'user' => nil
},
{
'id' => global_id(user_rule),
'elapsedTimeSeconds' => user_rule.elapsed_time_seconds, # 5 min
'status' => user_rule.status.upcase, # 'ACKNOWLEDGED'
'oncallSchedule' => nil,
'user' => { 'username' => user_rule.user.username }
}
}])
])
end
context 'with multiple rules' do
......@@ -68,10 +82,11 @@ RSpec.describe 'getting Incident Management escalation policies' do
it 'orders rules by time and status' do
post_graphql(query, current_user: current_user)
expect(escalation_rules_response.length).to eq(4)
expect(escalation_rules_response.length).to eq(5)
expect(escalation_rules_response.map { |rule| rule['id'] }).to eq([
global_id(earlier_resolved_rule),
global_id(rule),
global_id(user_rule),
global_id(equivalent_resolved_rule),
global_id(later_acknowledged_rule)
])
......
......@@ -84,20 +84,26 @@ RSpec.describe IncidentManagement::EscalationPolicies::CreateService do
it_behaves_like 'error response', 'Escalation policies may not have more than 10 rules'
end
context 'oncall schedule is blank' do
context 'oncall schedule is on the wrong project' do
before do
rule_params[0][:oncall_schedule] = nil
rule_params[0][:oncall_schedule] = create(:incident_management_oncall_schedule)
end
it_behaves_like 'error response', 'All escalations rules must have a schedule in the same project as the policy'
it_behaves_like 'error response', 'Schedule-based escalation rules must have a schedule in the same project as the policy'
end
context 'oncall schedule is on the wrong project' do
before do
rule_params[0][:oncall_schedule] = create(:incident_management_oncall_schedule)
context 'user for rule does not have project access' do
let(:rule_params) do
[
{
user: create(:user),
elapsed_time_seconds: 60,
status: :resolved
}
]
end
it_behaves_like 'error response', 'All escalations rules must have a schedule in the same project as the policy'
it_behaves_like 'error response', 'User-based escalation rules must have a user with access to the project'
end
context 'project has an existing escalation policy' do
......@@ -115,6 +121,39 @@ RSpec.describe IncidentManagement::EscalationPolicies::CreateService do
policy = execute.payload[:escalation_policy]
expect(policy).to be_a(::IncidentManagement::EscalationPolicy)
expect(policy.rules.length).to eq(1)
expect(policy.rules.first).to have_attributes(
oncall_schedule: oncall_schedule,
user: nil,
elapsed_time_seconds: 60,
status: 'resolved'
)
end
context 'for a user-based escalation rule' do
let(:rule_params) do
[
{
user: user_with_permissions,
elapsed_time_seconds: 60,
status: :resolved
}
]
end
it 'creates the policy and rules' do
expect(execute).to be_success
policy = execute.payload[:escalation_policy]
expect(policy).to be_a(::IncidentManagement::EscalationPolicy)
expect(policy.rules.length).to eq(1)
expect(policy.rules.first).to have_attributes(
oncall_schedule: nil,
user: user_with_permissions,
elapsed_time_seconds: 60,
status: 'resolved'
)
end
end
end
end
......
......@@ -7,8 +7,11 @@ RSpec.describe IncidentManagement::EscalationPolicies::UpdateService do
let_it_be(:user_without_permissions) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:oncall_schedule) { create(:incident_management_oncall_schedule, project: project) }
let_it_be_with_reload(:escalation_policy) { create(:incident_management_escalation_policy, project: project, rule_count: 2) }
let_it_be_with_reload(:escalation_rules) { escalation_policy.rules }
let_it_be_with_reload(:escalation_policy) { create(:incident_management_escalation_policy, project: project) }
let_it_be_with_reload(:schedule_escalation_rule) { escalation_policy.rules.first }
let_it_be_with_reload(:user_escalation_rule) { create(:incident_management_escalation_rule, :with_user, policy: escalation_policy) }
let_it_be_with_reload(:escalation_rules) { escalation_policy.reload.rules }
let(:service) { described_class.new(escalation_policy, current_user, params) }
let(:current_user) { user_with_permissions }
......@@ -24,14 +27,16 @@ RSpec.describe IncidentManagement::EscalationPolicies::UpdateService do
let(:rule_params) { [*existing_rules_params, new_rule_params] }
let(:existing_rules_params) do
escalation_rules.map do |rule|
rule.slice(:oncall_schedule, :elapsed_time_seconds)
rule.slice(:oncall_schedule, :user, :elapsed_time_seconds)
.merge(status: rule.status.to_sym)
end
end
let(:user_for_rule) {}
let(:new_rule_params) do
{
oncall_schedule: oncall_schedule,
user: user_for_rule,
elapsed_time_seconds: 800,
status: :acknowledged
}
......@@ -94,6 +99,13 @@ RSpec.describe IncidentManagement::EscalationPolicies::UpdateService do
let(:expected_rules) { [*escalation_rules, new_rule] }
it_behaves_like 'successful update with no errors'
context 'with a user-based rule' do
let(:oncall_schedule) { nil }
let(:user_for_rule) { user_with_permissions }
it_behaves_like 'successful update with no errors'
end
end
context 'when all old rules are replaced' do
......@@ -166,17 +178,18 @@ RSpec.describe IncidentManagement::EscalationPolicies::UpdateService do
it_behaves_like 'error response', 'Escalation policies may not have more than 10 rules'
end
context 'when the on-call schedule is not present on the rule' do
let(:rule_params) { [new_rule_params.except(:oncall_schedule)] }
it_behaves_like 'error response', 'All escalations rules must have a schedule in the same project as the policy'
end
context 'when the on-call schedule is not on the project' do
let(:other_schedule) { create(:incident_management_oncall_schedule) }
let(:rule_params) { [new_rule_params.merge(oncall_schedule: other_schedule)] }
it_behaves_like 'error response', 'All escalations rules must have a schedule in the same project as the policy'
it_behaves_like 'error response', 'Schedule-based escalation rules must have a schedule in the same project as the policy'
end
context "when the rule's user does not have access to the project" do
let(:oncall_schedule) { nil }
let(:user_for_rule) { user_without_permissions }
it_behaves_like 'error response', 'User-based escalation rules must have a user with access to the project'
end
context 'when an error occurs during update' do
......
......@@ -7,7 +7,7 @@ RSpec.describe IncidentManagement::PendingEscalations::ProcessService do
let_it_be(:schedule_1) { create(:incident_management_oncall_schedule, :with_rotation, project: project) }
let_it_be(:schedule_1_users) { schedule_1.participants.map(&:user) }
let(:escalation_rule) { build(:incident_management_escalation_rule, oncall_schedule: schedule_1 ) }
let(:escalation_rule) { build(:incident_management_escalation_rule, oncall_schedule: schedule_1) }
let!(:escalation_policy) { create(:incident_management_escalation_policy, project: project, rules: [escalation_rule]) }
let(:alert) { create(:alert_management_alert, project: project, **alert_params) }
......@@ -55,6 +55,14 @@ RSpec.describe IncidentManagement::PendingEscalations::ProcessService do
expect { execute }.to change(Note, :count).by(1)
end
context 'when escalation rule is for a user' do
let(:escalation_rule) { build(:incident_management_escalation_rule, :with_user) }
let(:users) { [escalation_rule.user] }
it_behaves_like 'sends on-call notification'
it_behaves_like 'deletes the escalation'
end
end
context 'target is already resolved' do
......
......@@ -2,26 +2,31 @@
module Banzai
module Filter
# Using `[[_TOC_]]`, inserts a Table of Contents list.
# This syntax is based on the Gollum syntax. This way we have
# some consistency between with wiki and normal markdown.
# If there ever emerges a markdown standard, we can implement
# that here.
# Using `[[_TOC_]]` or `[TOC]` (both case insensitive), inserts a Table of Contents list.
#
# `[[_TOC_]]` is based on the Gollum syntax. This way we have
# some consistency between with wiki and normal markdown.
# The support for this has been removed from GollumTagsFilter
#
# `[toc]` is a generally accepted form, used by Typora for example.
#
# Based on Banzai::Filter::GollumTagsFilter
class TableOfContentsTagFilter < HTML::Pipeline::Filter
TEXT_QUERY = %q(descendant-or-self::text()[ancestor::p and contains(., 'TOC')])
TEXT_QUERY = %q(descendant-or-self::text()[ancestor::p and contains(translate(., 'TOC', 'toc'), 'toc')])
def call
return doc if context[:no_header_anchors]
doc.xpath(TEXT_QUERY).each do |node|
# A Gollum ToC tag is `[[_TOC_]]`, but due to MarkdownFilter running
# before this one, it will be converted into `[[<em>TOC</em>]]`, so it
# needs special-case handling
process_toc_tag(node) if toc_tag?(node)
if toc_tag?(node)
# Support [TOC] / [toc] tags, which don't have a wrapping <em>-tag
process_toc_tag(node)
elsif toc_tag_em?(node)
# Support Gollum like ToC tag (`[[_TOC_]]` / `[[_toc_]]`), which will be converted
# into `[[<em>TOC</em>]]` by the markdown filter, so it
# needs special-case handling
process_toc_tag_em(node)
end
end
doc
......@@ -31,14 +36,25 @@ module Banzai
# Replace an entire `[[<em>TOC</em>]]` node with the result generated by
# TableOfContentsFilter
def process_toc_tag_em(node)
process_toc_tag(node.parent)
end
# Replace an entire `[TOC]` node with the result generated by
# TableOfContentsFilter
def process_toc_tag(node)
node.parent.parent.replace(result[:toc].presence || '')
# we still need to go one step up to also replace the surrounding <p></p>
node.parent.replace(result[:toc].presence || '')
end
def toc_tag?(node)
node.content == 'TOC' &&
def toc_tag_em?(node)
node.content.casecmp?('toc') &&
node.parent.name == 'em' &&
node.parent.parent.text == '[[TOC]]'
node.parent.parent.text.casecmp?('[[toc]]')
end
def toc_tag?(node)
node.parent.text.casecmp?('[toc]')
end
end
end
......
# frozen_string_literal: true
module Gitlab
module Database
module AsyncIndexes
DEFAULT_INDEXES_PER_INVOCATION = 2
def self.create_pending_indexes!(how_many: DEFAULT_INDEXES_PER_INVOCATION)
PostgresAsyncIndex.order(:id).limit(how_many).each do |async_index|
IndexCreator.new(async_index).perform
end
end
end
end
end
# frozen_string_literal: true
module Gitlab
module Database
module AsyncIndexes
class IndexCreator
include ExclusiveLeaseGuard
TIMEOUT_PER_ACTION = 1.day
STATEMENT_TIMEOUT = 9.hours
def initialize(async_index)
@async_index = async_index
end
def perform
try_obtain_lease do
if index_exists?
log_index_info('Skipping index creation as the index exists')
else
log_index_info('Creating async index')
set_statement_timeout do
connection.execute(async_index.definition)
end
end
async_index.destroy
end
end
private
attr_reader :async_index
def index_exists?
connection.indexes(async_index.table_name).any? { |index| index.name == async_index.name }
end
def connection
@connection ||= ApplicationRecord.connection
end
def lease_timeout
TIMEOUT_PER_ACTION
end
def set_statement_timeout
connection.execute("SET statement_timeout TO '%ds'" % STATEMENT_TIMEOUT)
yield
ensure
connection.execute('RESET statement_timeout')
end
def log_index_info(message)
Gitlab::AppLogger.info(message: message, table_name: async_index.table_name, index_name: async_index.name)
end
end
end
end
end
# frozen_string_literal: true
module Gitlab
module Database
module AsyncIndexes
module MigrationHelpers
def unprepare_async_index(table_name, column_name, **options)
return unless async_index_creation_available?
index_name = options[:name] || index_name(table_name, column_name)
raise 'Specifying index name is mandatory - specify name: argument' unless index_name
unprepare_async_index_by_name(table_name, index_name)
end
def unprepare_async_index_by_name(table_name, index_name, **options)
return unless async_index_creation_available?
PostgresAsyncIndex.find_by(name: index_name).try do |async_index|
async_index.destroy
end
end
# Prepares an index for asynchronous creation.
#
# Stores the index information in the postgres_async_indexes table to be created later. The
# index will be always be created CONCURRENTLY, so that option does not need to be given.
# If an existing asynchronous definition exists with the same name, the existing entry will be
# updated with the new definition.
#
# If the requested index has already been created, it is not stored in the table for
# asynchronous creation.
def prepare_async_index(table_name, column_name, **options)
return unless async_index_creation_available?
index_name = options[:name] || index_name(table_name, column_name)
raise 'Specifying index name is mandatory - specify name: argument' unless index_name
options = options.merge({ algorithm: :concurrently })
if index_exists?(table_name, column_name, **options)
Gitlab::AppLogger.warn(
message: 'Index not prepared because it already exists',
table_name: table_name,
index_name: index_name)
return
end
index, algorithm, if_not_exists = add_index_options(table_name, column_name, **options)
create_index = ActiveRecord::ConnectionAdapters::CreateIndexDefinition.new(index, algorithm, if_not_exists)
schema_creation = ActiveRecord::ConnectionAdapters::PostgreSQL::SchemaCreation.new(ApplicationRecord.connection)
definition = schema_creation.accept(create_index)
async_index = PostgresAsyncIndex.safe_find_or_create_by!(name: index_name) do |rec|
rec.table_name = table_name
rec.definition = definition
end
Gitlab::AppLogger.info(
message: 'Prepared index for async creation',
table_name: async_index.table_name,
index_name: async_index.name)
async_index
end
private
def async_index_creation_available?
ApplicationRecord.connection.table_exists?(:postgres_async_indexes) &&
Feature.enabled?(:database_async_index_creation, type: :ops)
end
end
end
end
end
# frozen_string_literal: true
module Gitlab
module Database
module AsyncIndexes
class PostgresAsyncIndex < ApplicationRecord
self.table_name = 'postgres_async_indexes'
MAX_IDENTIFIER_LENGTH = Gitlab::Database::MigrationHelpers::MAX_IDENTIFIER_NAME_LENGTH
MAX_DEFINITION_LENGTH = 2048
validates :name, presence: true, length: { maximum: MAX_IDENTIFIER_LENGTH }
validates :table_name, presence: true, length: { maximum: MAX_IDENTIFIER_LENGTH }
validates :definition, presence: true, length: { maximum: MAX_DEFINITION_LENGTH }
def to_s
definition
end
end
end
end
end
......@@ -6,6 +6,7 @@ module Gitlab
include Migrations::BackgroundMigrationHelpers
include DynamicModelHelpers
include RenameTableHelpers
include AsyncIndexes::MigrationHelpers
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
MAX_IDENTIFIER_NAME_LENGTH = 63
......@@ -152,6 +153,9 @@ module Gitlab
disable_statement_timeout do
add_index(table_name, column_name, **options)
end
# We created this index. Now let's remove the queuing entry for async creation in case it's still there.
unprepare_async_index(table_name, column_name, **options)
end
# Removes an existed index, concurrently
......@@ -178,6 +182,9 @@ module Gitlab
disable_statement_timeout do
remove_index(table_name, **options.merge({ column: column_name }))
end
# We removed this index. Now let's make sure it's not queued for async creation.
unprepare_async_index(table_name, column_name, **options)
end
# Removes an existing index, concurrently
......@@ -208,6 +215,9 @@ module Gitlab
disable_statement_timeout do
remove_index(table_name, **options.merge({ name: index_name }))
end
# We removed this index. Now let's make sure it's not queued for async creation.
unprepare_async_index_by_name(table_name, index_name, **options)
end
# Adds a foreign key with only minimal locking on the tables involved.
......
......@@ -176,6 +176,9 @@ namespace :gitlab do
# Cleanup leftover temporary indexes from previous, possibly aborted runs (if any)
Gitlab::Database::Reindexing.cleanup_leftovers!
# Hack: Before we do actual reindexing work, create async indexes
Gitlab::Database::AsyncIndexes.create_pending_indexes! if Feature.enabled?(:database_async_index_creation, type: :ops)
Gitlab::Database::Reindexing.perform(indexes)
rescue StandardError => e
Gitlab::AppLogger.error(e)
......
......@@ -3252,9 +3252,6 @@ msgstr ""
msgid "All epics"
msgstr ""
msgid "All escalations rules must have a schedule in the same project as the policy"
msgstr ""
msgid "All groups and projects"
msgstr ""
......@@ -11337,16 +11334,10 @@ msgstr ""
msgid "DevopsAdoption|Add a group to get started"
msgstr ""
msgid "DevopsAdoption|Add group"
msgstr ""
msgid "DevopsAdoption|Add group to table"
msgid "DevopsAdoption|Add or remove groups"
msgstr ""
msgid "DevopsAdoption|Add sub-group"
msgstr ""
msgid "DevopsAdoption|Add sub-group to table"
msgid "DevopsAdoption|Add or remove subgroups"
msgstr ""
msgid "DevopsAdoption|Adopted"
......@@ -11406,6 +11397,12 @@ msgstr ""
msgid "DevopsAdoption|DevOps adoption tracks the use of key features across your favorite groups. Add a group to the table to begin."
msgstr ""
msgid "DevopsAdoption|Edit groups"
msgstr ""
msgid "DevopsAdoption|Edit subgroups"
msgstr ""
msgid "DevopsAdoption|Feature adoption is based on usage in the previous calendar month. Last updated: %{timestamp}."
msgstr ""
......@@ -11466,7 +11463,7 @@ msgstr ""
msgid "DevopsAdoption|There was an error fetching Groups. Please refresh the page."
msgstr ""
msgid "DevopsAdoption|This group has no sub-groups"
msgid "DevopsAdoption|This group has no subgroups"
msgstr ""
msgid "DevopsAdoption|You cannot remove the group you are currently in."
......@@ -17205,7 +17202,7 @@ msgstr ""
msgid "InProductMarketing|Use GitLab CI/CD"
msgstr ""
msgid "InProductMarketing|Used by more than 100,000 organizations from around the globe."
msgid "InProductMarketing|Used by more than 100,000 organizations from around the globe:"
msgstr ""
msgid "InProductMarketing|Very difficult"
......@@ -28823,6 +28820,9 @@ msgstr ""
msgid "Schedule a new pipeline"
msgstr ""
msgid "Schedule-based escalation rules must have a schedule in the same project as the policy"
msgstr ""
msgid "Scheduled"
msgstr ""
......@@ -35992,6 +35992,9 @@ msgstr ""
msgid "User was successfully updated."
msgstr ""
msgid "User-based escalation rules must have a user with access to the project"
msgstr ""
msgid "UserAvailability|%{author} %{spanStart}(Busy)%{spanEnd}"
msgstr ""
......@@ -39604,7 +39607,7 @@ msgstr ""
msgid "must be inside the fork network"
msgstr ""
msgid "must have a unique schedule, status, and elapsed time"
msgid "must be unique by status and elapsed time within a policy"
msgstr ""
msgid "my-awesome-group"
......
# frozen_string_literal: true
FactoryBot.define do
factory :postgres_async_index, class: 'Gitlab::Database::AsyncIndexes::PostgresAsyncIndex' do
sequence(:name) { |n| "users_id_#{n}" }
definition { "CREATE INDEX #{name} ON #{table_name} (id)" }
table_name { "users" }
end
end
......@@ -6,18 +6,42 @@ RSpec.describe Banzai::Filter::TableOfContentsTagFilter do
include FilterSpecHelper
context 'table of contents' do
let(:html) { '<p>[[<em>TOC</em>]]</p>' }
shared_examples 'table of contents tag' do
it 'replaces toc tag with ToC result' do
doc = filter(html, {}, { toc: "FOO" })
it 'replaces [[<em>TOC</em>]] with ToC result' do
doc = filter(html, {}, { toc: "FOO" })
expect(doc.to_html).to eq("FOO")
end
expect(doc.to_html).to eq("FOO")
it 'handles an empty ToC result' do
doc = filter(html)
expect(doc.to_html).to eq ''
end
end
context '[[_TOC_]] as tag' do
it_behaves_like 'table of contents tag' do
let(:html) { '<p>[[<em>TOC</em>]]</p>' }
end
end
it 'handles an empty ToC result' do
doc = filter(html)
context '[[_toc_]] as tag' do
it_behaves_like 'table of contents tag' do
let(:html) { '<p>[[<em>toc</em>]]</p>' }
end
end
context '[TOC] as tag' do
it_behaves_like 'table of contents tag' do
let(:html) { '<p>[TOC]</p>' }
end
end
expect(doc.to_html).to eq ''
context '[toc] as tag' do
it_behaves_like 'table of contents tag' do
let(:html) { '<p>[toc]</p>' }
end
end
end
end
......@@ -102,33 +102,45 @@ RSpec.describe Banzai::Pipeline::FullPipeline do
describe 'table of contents' do
let(:project) { create(:project, :public) }
let(:markdown) do
<<-MARKDOWN.strip_heredoc
[[_TOC_]]
shared_examples 'table of contents tag' do |tag, tag_html|
let(:markdown) do
<<-MARKDOWN.strip_heredoc
#{tag}
# Header
MARKDOWN
end
MARKDOWN
end
let(:invalid_markdown) do
<<-MARKDOWN.strip_heredoc
test [[_TOC_]]
let(:invalid_markdown) do
<<-MARKDOWN.strip_heredoc
test #{tag}
# Header
MARKDOWN
end
MARKDOWN
end
it 'inserts a table of contents' do
output = described_class.to_html(markdown, project: project)
it 'inserts a table of contents' do
output = described_class.to_html(markdown, project: project)
expect(output).to include("<ul class=\"section-nav\">")
expect(output).to include("<li><a href=\"#header\">Header</a></li>")
expect(output).to include("<ul class=\"section-nav\">")
expect(output).to include("<li><a href=\"#header\">Header</a></li>")
end
it 'does not insert a table of contents' do
output = described_class.to_html(invalid_markdown, project: project)
expect(output).to include("test #{tag_html}")
end
end
it 'does not insert a table of contents' do
output = described_class.to_html(invalid_markdown, project: project)
context 'with [[_TOC_]] as tag' do
it_behaves_like 'table of contents tag', '[[_TOC_]]', '[[<em>TOC</em>]]'
end
expect(output).to include("test [[<em>TOC</em>]]")
context 'with [toc] as tag' do
it_behaves_like 'table of contents tag', '[toc]', '[toc]'
it_behaves_like 'table of contents tag', '[TOC]', '[TOC]'
end
end
......
......@@ -27,7 +27,7 @@ RSpec.describe Banzai::Pipeline::WikiPipeline do
end
end
it 'is case-sensitive' do
it 'is not case-sensitive' do
markdown = <<-MD.strip_heredoc
[[_toc_]]
......@@ -36,9 +36,22 @@ RSpec.describe Banzai::Pipeline::WikiPipeline do
Foo
MD
output = described_class.to_html(markdown, project: project, wiki: wiki)
result = described_class.call(markdown, project: project, wiki: wiki)
expect(result[:output].to_html).to include(result[:toc])
end
it 'works with alternative [toc] tag' do
markdown = <<-MD.strip_heredoc
[toc]
expect(output).to include('[[<em>toc</em>]]')
# Header 1
Foo
MD
result = described_class.call(markdown, project: project, wiki: wiki)
expect(result[:output].to_html).to include(result[:toc])
end
it 'handles an empty pipeline result' do
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Database::AsyncIndexes::IndexCreator do
describe '#perform' do
subject { described_class.new(async_index) }
let(:async_index) { create(:postgres_async_index) }
let(:index_model) { Gitlab::Database::AsyncIndexes::PostgresAsyncIndex }
let(:connection) { ApplicationRecord.connection }
context 'when the index already exists' do
before do
connection.execute(async_index.definition)
end
it 'skips index creation' do
expect(connection).not_to receive(:execute).with(/CREATE INDEX/)
subject.perform
end
end
it 'creates the index while controlling statement timeout' do
allow(connection).to receive(:execute).and_call_original
expect(connection).to receive(:execute).with("SET statement_timeout TO '32400s'").ordered.and_call_original
expect(connection).to receive(:execute).with(async_index.definition).ordered.and_call_original
expect(connection).to receive(:execute).with("RESET statement_timeout").ordered.and_call_original
subject.perform
end
it 'removes the index preparation record from postgres_async_indexes' do
expect(async_index).to receive(:destroy).and_call_original
expect { subject.perform }.to change { index_model.count }.by(-1)
end
it 'skips logic if not able to acquire exclusive lease' do
expect(subject).to receive(:try_obtain_lease).and_return(false)
expect(connection).not_to receive(:execute).with(/CREATE INDEX/)
expect(async_index).not_to receive(:destroy)
expect { subject.perform }.not_to change { index_model.count }
end
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Database::AsyncIndexes::MigrationHelpers do
let(:migration) { ActiveRecord::Migration.new.extend(described_class) }
let(:index_model) { Gitlab::Database::AsyncIndexes::PostgresAsyncIndex }
let(:connection) { ApplicationRecord.connection }
let(:table_name) { '_test_async_indexes' }
let(:index_name) { "index_#{table_name}_on_id" }
before do
allow(migration).to receive(:puts)
end
describe '#unprepare_async_index' do
let!(:async_index) { create(:postgres_async_index, name: index_name) }
context 'when the flag is enabled' do
before do
stub_feature_flags(database_async_index_creation: true)
end
it 'destroys the record' do
expect do
migration.unprepare_async_index(table_name, 'id')
end.to change { index_model.where(name: index_name).count }.by(-1)
end
context 'when an explicit name is given' do
let(:index_name) { 'my_test_async_index' }
it 'destroys the record' do
expect do
migration.unprepare_async_index(table_name, 'id', name: index_name)
end.to change { index_model.where(name: index_name).count }.by(-1)
end
end
context 'when the async index table does not exist' do
it 'does not raise an error' do
connection.drop_table(:postgres_async_indexes)
expect(index_model).not_to receive(:find_by)
expect { migration.unprepare_async_index(table_name, 'id') }.not_to raise_error
end
end
end
context 'when the feature flag is disabled' do
it 'does not destroy the record' do
stub_feature_flags(database_async_index_creation: false)
expect do
migration.unprepare_async_index(table_name, 'id')
end.not_to change { index_model.where(name: index_name).count }
end
end
end
describe '#unprepare_async_index_by_name' do
let(:index_name) { "index_#{table_name}_on_id" }
let!(:async_index) { create(:postgres_async_index, name: index_name) }
context 'when the flag is enabled' do
before do
stub_feature_flags(database_async_index_creation: true)
end
it 'destroys the record' do
expect do
migration.unprepare_async_index_by_name(table_name, index_name)
end.to change { index_model.where(name: index_name).count }.by(-1)
end
context 'when the async index table does not exist' do
it 'does not raise an error' do
connection.drop_table(:postgres_async_indexes)
expect(index_model).not_to receive(:find_by)
expect { migration.unprepare_async_index_by_name(table_name, index_name) }.not_to raise_error
end
end
end
context 'when the feature flag is disabled' do
it 'does not destroy the record' do
stub_feature_flags(database_async_index_creation: false)
expect do
migration.unprepare_async_index_by_name(table_name, index_name)
end.not_to change { index_model.where(name: index_name).count }
end
end
end
describe '#prepare_async_index' do
before do
connection.create_table(table_name)
end
context 'when the feature flag is enabled' do
before do
stub_feature_flags(database_async_index_creation: true)
end
it 'creates the record for the async index' do
expect do
migration.prepare_async_index(table_name, 'id')
end.to change { index_model.where(name: index_name).count }.by(1)
record = index_model.find_by(name: index_name)
expect(record.table_name).to eq(table_name)
expect(record.definition).to match(/CREATE INDEX CONCURRENTLY "#{index_name}"/)
end
context 'when an explicit name is given' do
let(:index_name) { 'my_async_index_name' }
it 'creates the record with the given name' do
expect do
migration.prepare_async_index(table_name, 'id', name: index_name)
end.to change { index_model.where(name: index_name).count }.by(1)
record = index_model.find_by(name: index_name)
expect(record.table_name).to eq(table_name)
expect(record.definition).to match(/CREATE INDEX CONCURRENTLY "#{index_name}"/)
end
end
context 'when the index already exists' do
it 'does not create the record' do
connection.add_index(table_name, 'id', name: index_name)
expect do
migration.prepare_async_index(table_name, 'id')
end.not_to change { index_model.where(name: index_name).count }
end
end
context 'when the record already exists' do
it 'does attempt to create the record' do
create(:postgres_async_index, table_name: table_name, name: index_name)
expect do
migration.prepare_async_index(table_name, 'id')
end.not_to change { index_model.where(name: index_name).count }
end
end
context 'when the async index table does not exist' do
it 'does not raise an error' do
connection.drop_table(:postgres_async_indexes)
expect(index_model).not_to receive(:safe_find_or_create_by!)
expect { migration.prepare_async_index(table_name, 'id') }.not_to raise_error
end
end
end
context 'when the feature flag is disabled' do
it 'does not create the record' do
stub_feature_flags(database_async_index_creation: false)
expect do
migration.prepare_async_index(table_name, 'id')
end.not_to change { index_model.where(name: index_name).count }
end
end
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Database::AsyncIndexes::PostgresAsyncIndex, type: :model do
describe 'validations' do
let(:identifier_limit) { described_class::MAX_IDENTIFIER_LENGTH }
let(:definition_limit) { described_class::MAX_DEFINITION_LENGTH }
it { is_expected.to validate_presence_of(:name) }
it { is_expected.to validate_length_of(:name).is_at_most(identifier_limit) }
it { is_expected.to validate_presence_of(:table_name) }
it { is_expected.to validate_length_of(:table_name).is_at_most(identifier_limit) }
it { is_expected.to validate_presence_of(:definition) }
it { is_expected.to validate_length_of(:definition).is_at_most(definition_limit) }
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Database::AsyncIndexes do
describe '.create_pending_indexes!' do
subject { described_class.create_pending_indexes! }
before do
create_list(:postgres_async_index, 4)
end
it 'takes 2 pending indexes and creates those' do
Gitlab::Database::AsyncIndexes::PostgresAsyncIndex.order(:id).limit(2).each do |index|
creator = double('index creator')
expect(Gitlab::Database::AsyncIndexes::IndexCreator).to receive(:new).with(index).and_return(creator)
expect(creator).to receive(:perform)
end
subject
end
end
end
......@@ -278,6 +278,16 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
model.add_concurrent_index(:users, :foo, unique: true)
end
it 'unprepares the async index creation' do
expect(model).to receive(:add_index)
.with(:users, :foo, algorithm: :concurrently)
expect(model).to receive(:unprepare_async_index)
.with(:users, :foo, algorithm: :concurrently)
model.add_concurrent_index(:users, :foo)
end
end
context 'inside a transaction' do
......@@ -314,6 +324,16 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
model.remove_concurrent_index(:users, :foo, unique: true)
end
it 'unprepares the async index creation' do
expect(model).to receive(:remove_index)
.with(:users, { algorithm: :concurrently, column: :foo })
expect(model).to receive(:unprepare_async_index)
.with(:users, :foo, { algorithm: :concurrently })
model.remove_concurrent_index(:users, :foo)
end
describe 'by index name' do
before do
allow(model).to receive(:index_exists_by_name?).with(:users, "index_x_by_y").and_return(true)
......@@ -345,6 +365,16 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
model.remove_concurrent_index_by_name(:users, wrong_key: "index_x_by_y")
end.to raise_error 'remove_concurrent_index_by_name must get an index name as the second argument'
end
it 'unprepares the async index creation' do
expect(model).to receive(:remove_index)
.with(:users, { algorithm: :concurrently, name: "index_x_by_y" })
expect(model).to receive(:unprepare_async_index_by_name)
.with(:users, "index_x_by_y", { algorithm: :concurrently })
model.remove_concurrent_index_by_name(:users, "index_x_by_y")
end
end
end
end
......
......@@ -3,7 +3,7 @@
require 'spec_helper'
require_migration!
RSpec.describe ReScheduleLatestPipelineIdPopulationWithLogging do
RSpec.describe ReScheduleLatestPipelineIdPopulationWithAllSecurityRelatedArtifactTypes do
let(:namespaces) { table(:namespaces) }
let(:pipelines) { table(:ci_pipelines) }
let(:projects) { table(:projects) }
......
......@@ -207,9 +207,23 @@ RSpec.describe Namespace do
it { is_expected.to include_module(Gitlab::VisibilityLevel) }
it { is_expected.to include_module(Namespaces::Traversal::Recursive) }
it { is_expected.to include_module(Namespaces::Traversal::Linear) }
it { is_expected.to include_module(Namespaces::Traversal::RecursiveScopes) }
it { is_expected.to include_module(Namespaces::Traversal::LinearScopes) }
end
it_behaves_like 'linear namespace traversal'
context 'traversal scopes' do
context 'recursive' do
before do
stub_feature_flags(use_traversal_ids: false)
end
it_behaves_like 'namespace traversal scopes'
end
context 'linear' do
it_behaves_like 'namespace traversal scopes'
end
end
context 'traversal_ids on create' do
context 'default traversal_ids' do
......
# frozen_string_literal: true
# Traversal examples common to linear and recursive methods are in
# spec/support/shared_examples/namespaces/traversal_examples.rb
RSpec.shared_examples 'linear namespace traversal' do
context 'when use_traversal_ids feature flag is enabled' do
before do
stub_feature_flags(use_traversal_ids: true)
end
context 'scopes' do
describe '.as_ids' do
let_it_be(:namespace1) { create(:group) }
let_it_be(:namespace2) { create(:group) }
subject { Namespace.where(id: [namespace1, namespace2]).as_ids.pluck(:id) }
it { is_expected.to contain_exactly(namespace1.id, namespace2.id) }
end
end
end
end
# frozen_string_literal: true
RSpec.shared_examples 'namespace traversal scopes' do
# Hierarchy 1
let_it_be(:group_1) { create(:group) }
let_it_be(:nested_group_1) { create(:group, parent: group_1) }
let_it_be(:deep_nested_group_1) { create(:group, parent: nested_group_1) }
# Hierarchy 2
let_it_be(:group_2) { create(:group) }
let_it_be(:nested_group_2) { create(:group, parent: group_2) }
let_it_be(:deep_nested_group_2) { create(:group, parent: nested_group_2) }
# All groups
let_it_be(:groups) do
[
group_1, nested_group_1, deep_nested_group_1,
group_2, nested_group_2, deep_nested_group_2
]
end
describe '.as_ids' do
subject { described_class.where(id: [group_1, group_2]).as_ids.pluck(:id) }
it { is_expected.to contain_exactly(group_1.id, group_2.id) }
end
describe '.without_sti_condition' do
subject { described_class.without_sti_condition }
it { expect(subject.where_values_hash).not_to have_key(:type) }
end
describe '.self_and_descendants' do
subject { described_class.where(id: [nested_group_1, nested_group_2]).self_and_descendants }
it { is_expected.to contain_exactly(nested_group_1, deep_nested_group_1, nested_group_2, deep_nested_group_2) }
context 'with duplicate descendants' do
subject { described_class.where(id: [group_1, group_2, nested_group_1]).self_and_descendants }
it { is_expected.to match_array(groups) }
end
end
describe '.self_and_descendant_ids' do
subject { described_class.where(id: [nested_group_1, nested_group_2]).self_and_descendant_ids.pluck(:id) }
it { is_expected.to contain_exactly(nested_group_1.id, deep_nested_group_1.id, nested_group_2.id, deep_nested_group_2.id) }
end
end
......@@ -207,6 +207,27 @@ RSpec.describe 'gitlab:db namespace rake task', :silence_stdout do
run_rake_task('gitlab:db:reindex')
end
context 'when async index creation is enabled' do
it 'executes async index creation prior to any reindexing actions' do
stub_feature_flags(database_async_index_creation: true)
expect(Gitlab::Database::AsyncIndexes).to receive(:create_pending_indexes!).ordered
expect(Gitlab::Database::Reindexing).to receive(:perform).ordered
run_rake_task('gitlab:db:reindex')
end
end
context 'when async index creation is disabled' do
it 'does not execute async index creation' do
stub_feature_flags(database_async_index_creation: false)
expect(Gitlab::Database::AsyncIndexes).not_to receive(:create_pending_indexes!)
run_rake_task('gitlab:db:reindex')
end
end
context 'when no index_name is given' do
it 'uses all candidate indexes' do
expect(Gitlab::Database::PostgresIndex).to receive(:reindexing_support).and_return(indexes)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment