Commit c2060944 authored by GitLab Bot's avatar GitLab Bot

Automatic merge of gitlab-org/gitlab master

parents 596dfee5 5cc0b39e
<script>
import { __, sprintf } from '~/locale';
import { GlSprintf, GlLink } from '@gitlab/ui';
import { __ } from '~/locale';
const alertMessage = __(
'Someone edited the issue at the same time you did. Please check out %{linkStart}the issue%{linkEnd} and make sure your changes will not unintentionally remove theirs.',
);
export default {
alertMessage,
components: {
GlSprintf,
GlLink,
},
computed: {
currentPath() {
return window.location.pathname;
},
alertMessage() {
return sprintf(
__(
'Someone edited the issue at the same time you did. Please check out %{linkStart}the issue%{linkEnd} and make sure your changes will not unintentionally remove theirs.',
),
{
linkStart: `<a href="${this.currentPath}" target="_blank" rel="nofollow">`,
linkEnd: `</a>`,
},
false,
);
},
},
};
</script>
<template>
<div
class="alert alert-danger"
v-html="alertMessage /* eslint-disable-line vue/no-v-html */"
></div>
<div class="alert alert-danger">
<gl-sprintf :message="$options.alertMessage">
<template #link="{ content }">
<gl-link :href="currentPath" target="_blank" rel="nofollow">
{{ content }}
</gl-link>
</template>
</gl-sprintf>
</div>
</template>
......@@ -76,6 +76,7 @@
}
.dropdown-toggle,
.dropdown-menu-toggle,
.confidential-merge-request-fork-group .dropdown-toggle {
padding: 6px 8px 6px 10px;
background-color: $white;
......@@ -131,7 +132,6 @@
// This is double classed to solve a specificity issue with the gitlab ui buttons
.dropdown-menu-toggle.dropdown-menu-toggle {
@extend .dropdown-toggle;
justify-content: flex-start;
overflow: hidden;
padding-right: 25px;
......
......@@ -198,22 +198,6 @@ h1 {
.dropdown {
position: relative;
}
.dropdown-menu-toggle {
white-space: nowrap;
}
.dropdown-menu-toggle::after {
display: inline-block;
margin-left: 0.255em;
vertical-align: 0.255em;
content: "";
border-top: 0.3em solid;
border-right: 0.3em solid transparent;
border-bottom: 0;
border-left: 0.3em solid transparent;
}
.dropdown-menu-toggle:empty::after {
margin-left: 0;
}
.dropdown-menu {
position: absolute;
top: 100%;
......@@ -466,9 +450,6 @@ a {
.hide {
display: none;
}
.dropdown-menu-toggle::after {
display: none;
}
.badge:not(.gl-badge) {
padding: 4px 5px;
font-size: 12px;
......@@ -548,7 +529,7 @@ body {
border-radius: 0.25rem;
white-space: nowrap;
}
.no-outline.dropdown-menu-toggle {
.dropdown-menu-toggle.no-outline {
outline: 0;
}
.dropdown-menu-toggle.dropdown-menu-toggle {
......
......@@ -178,22 +178,6 @@ h1 {
.dropdown {
position: relative;
}
.dropdown-menu-toggle {
white-space: nowrap;
}
.dropdown-menu-toggle::after {
display: inline-block;
margin-left: 0.255em;
vertical-align: 0.255em;
content: "";
border-top: 0.3em solid;
border-right: 0.3em solid transparent;
border-bottom: 0;
border-left: 0.3em solid transparent;
}
.dropdown-menu-toggle:empty::after {
margin-left: 0;
}
.dropdown-menu {
position: absolute;
top: 100%;
......@@ -446,9 +430,6 @@ a {
.hide {
display: none;
}
.dropdown-menu-toggle::after {
display: none;
}
.badge:not(.gl-badge) {
padding: 4px 5px;
font-size: 12px;
......@@ -528,7 +509,7 @@ body {
border-radius: 0.25rem;
white-space: nowrap;
}
.no-outline.dropdown-menu-toggle {
.dropdown-menu-toggle.no-outline {
outline: 0;
}
.dropdown-menu-toggle.dropdown-menu-toggle {
......
......@@ -36,7 +36,7 @@ class Dashboard::ProjectsController < Dashboard::ApplicationController
# rubocop: disable CodeReuse/ActiveRecord
def starred
@projects = load_projects(params.merge(starred: true))
.includes(:forked_from_project, :topics, :topics_acts_as_taggable)
.includes(:forked_from_project, :topics)
@groups = []
......
......@@ -182,8 +182,8 @@ class ProjectsFinder < UnionFinder
def by_topics(items)
return items unless params[:topic].present?
topics = params[:topic].instance_of?(String) ? params[:topic].strip.split(/\s*,\s*/) : params[:topic]
topics.each do |topic|
topics = params[:topic].instance_of?(String) ? params[:topic].split(',') : params[:topic]
topics.map(&:strip).uniq.reject(&:empty?).each do |topic|
items = items.with_topic(topic)
end
......
......@@ -400,7 +400,8 @@ module ApplicationSettingsHelper
:user_deactivation_emails_enabled,
:sidekiq_job_limiter_mode,
:sidekiq_job_limiter_compression_threshold_bytes,
:sidekiq_job_limiter_limit_bytes
:sidekiq_job_limiter_limit_bytes,
:suggest_pipeline_enabled
].tap do |settings|
settings << :deactivate_dormant_users unless Gitlab.com?
end
......
......@@ -159,6 +159,7 @@ module ApplicationSettingImplementation
spam_check_endpoint_enabled: false,
spam_check_endpoint_url: nil,
spam_check_api_key: nil,
suggest_pipeline_enabled: true,
terminal_max_session_time: 0,
throttle_authenticated_api_enabled: false,
throttle_authenticated_api_period_in_seconds: 3600,
......
......@@ -128,26 +128,9 @@ class Project < ApplicationRecord
after_initialize :use_hashed_storage
after_create :check_repository_absence!
# Required during the `ActsAsTaggableOn::Tag -> Topic` migration
# TODO: remove 'acts_as_ordered_taggable_on' and ':topics_acts_as_taggable' in the further process of the migration
# https://gitlab.com/gitlab-org/gitlab/-/issues/335946
acts_as_ordered_taggable_on :topics
has_many :topics_acts_as_taggable, -> { order("#{ActsAsTaggableOn::Tagging.table_name}.id") },
class_name: 'ActsAsTaggableOn::Tag',
through: :topic_taggings,
source: :tag
has_many :project_topics, -> { order(:id) }, class_name: 'Projects::ProjectTopic'
has_many :topics, through: :project_topics, class_name: 'Projects::Topic'
# Required during the `ActsAsTaggableOn::Tag -> Topic` migration
# TODO: remove 'topics' in the further process of the migration
# https://gitlab.com/gitlab-org/gitlab/-/issues/335946
alias_method :topics_new, :topics
def topics
self.topics_acts_as_taggable + self.topics_new
end
attr_accessor :old_path_with_namespace
attr_accessor :template_name
attr_writer :pipeline_status
......@@ -653,15 +636,8 @@ class Project < ApplicationRecord
scope :with_topic, ->(topic_name) do
topic = Projects::Topic.find_by_name(topic_name)
acts_as_taggable_on_topic = ActsAsTaggableOn::Tag.find_by_name(topic_name)
return none unless topic || acts_as_taggable_on_topic
relations = []
relations << where(id: topic.project_topics.select(:project_id)) if topic
relations << where(id: acts_as_taggable_on_topic.taggings.select(:taggable_id)) if acts_as_taggable_on_topic
Project.from_union(relations)
topic ? where(id: topic.project_topics.select(:project_id)) : none
end
enum auto_cancel_pending_pipelines: { disabled: 0, enabled: 1 }
......@@ -679,7 +655,7 @@ class Project < ApplicationRecord
mount_uploader :bfg_object_map, AttachmentUploader
def self.with_api_entity_associations
preload(:project_feature, :route, :topics, :topics_acts_as_taggable, :group, :timelogs, namespace: [:route, :owner])
preload(:project_feature, :route, :topics, :group, :timelogs, namespace: [:route, :owner])
end
def self.with_web_entity_associations
......@@ -2735,15 +2711,9 @@ class Project < ApplicationRecord
@topic_list = @topic_list.split(',') if @topic_list.instance_of?(String)
@topic_list = @topic_list.map(&:strip).uniq.reject(&:empty?)
if @topic_list != self.topic_list || self.topics_acts_as_taggable.any?
self.topics_new.delete_all
if @topic_list != self.topic_list
self.topics.delete_all
self.topics = @topic_list.map { |topic| Projects::Topic.find_or_create_by(name: topic) }
# Remove old topics (ActsAsTaggableOn::Tag)
# Required during the `ActsAsTaggableOn::Tag -> Topic` migration
# TODO: remove in the further process of the migration
# https://gitlab.com/gitlab-org/gitlab/-/issues/335946
self.topic_taggings.clear
end
@topic_list = nil
......
......@@ -24,7 +24,7 @@ module Search
# rubocop: disable CodeReuse/ActiveRecord
def projects
@projects ||= ProjectsFinder.new(params: { non_archived: true }, current_user: current_user).execute.preload(:topics, :taggings)
@projects ||= ProjectsFinder.new(params: { non_archived: true }, current_user: current_user).execute.preload(:topics, :project_topics)
end
def allowed_scopes
......
......@@ -69,5 +69,12 @@
%p.form-text.text-muted
= _("The default CI/CD configuration file and path for new projects.").html_safe
= link_to sprite_icon('question-o'), help_page_path('ci/pipelines/settings', anchor: 'specify-a-custom-cicd-configuration-file'), target: '_blank'
.form-group
.form-check
= f.check_box :suggest_pipeline_enabled, class: 'form-check-input'
= f.label :suggest_pipeline_enabled, class: 'form-check-label' do
= s_('AdminSettings|Enable pipeline suggestion banner')
.form-text.text-muted
= s_('AdminSettings|Display a banner on merge requests in projects with no pipelines to initiate steps to add a .gitlab-ci.yml file.')
= f.submit _('Save changes'), class: "gl-button btn btn-confirm"
# frozen_string_literal: true
class RemoveTemporaryIndexForProjectTopicsOnTaggings < Gitlab::Database::Migration[1.0]
MIGRATION = 'ExtractProjectTopicsIntoSeparateTable'
INDEX_NAME = 'tmp_index_taggings_on_id_where_taggable_type_project'
INDEX_CONDITION = "taggable_type = 'Project'"
disable_ddl_transaction!
def up
# Ensure that no background jobs of 20210730104800_schedule_extract_project_topics_into_separate_table remain
finalize_background_migration MIGRATION
# this index was used in 20210730104800_schedule_extract_project_topics_into_separate_table
remove_concurrent_index_by_name :taggings, INDEX_NAME
end
def down
add_concurrent_index :taggings, :id, where: INDEX_CONDITION, name: INDEX_NAME # rubocop:disable Migration/PreventIndexCreation
end
end
a0ba9fb9e2f7f738926a2273f9ff644c43acb999f4d27adf192e5006582a2a0e
\ No newline at end of file
......@@ -26916,8 +26916,6 @@ CREATE INDEX tmp_index_namespaces_empty_traversal_ids_with_root_namespaces ON na
CREATE INDEX tmp_index_on_vulnerabilities_non_dismissed ON vulnerabilities USING btree (id) WHERE (state <> 2);
CREATE INDEX tmp_index_taggings_on_id_where_taggable_type_project ON taggings USING btree (id) WHERE ((taggable_type)::text = 'Project'::text);
CREATE UNIQUE INDEX uniq_pkgs_deb_grp_architectures_on_distribution_id_and_name ON packages_debian_group_architectures USING btree (distribution_id, name);
CREATE UNIQUE INDEX uniq_pkgs_deb_grp_components_on_distribution_id_and_name ON packages_debian_group_components USING btree (distribution_id, name);
......@@ -23,10 +23,7 @@ replication and failover requires:
- A minimum of three database nodes.
- A minimum of three `Consul` server nodes.
- A minimum of one `pgbouncer` service node, but it's recommended to have one
per database node.
- An internal load balancer (TCP) is required when there is more than one
`pgbouncer` service node.
- A minimum of one `pgbouncer` service node, but it's recommended to have one per database node. An internal load balancer (TCP) is required when there is more than one `pgbouncer` service node.
![PostgreSQL HA Architecture](img/pg_ha_architecture.png)
......@@ -35,40 +32,31 @@ sure you have redundant connectivity between all Database and GitLab instances
to avoid the network becoming a single point of failure.
NOTE:
As of GitLab 13.3, PostgreSQL 12 is shipped with Omnibus GitLab. Clustering for PostgreSQL 12 is only supported with
As of GitLab 13.3, PostgreSQL 12 is shipped with Omnibus GitLab. Clustering for PostgreSQL 12 is supported only with
Patroni. See the [Patroni](#patroni) section for further details. Starting with GitLab 14.0, only PostgreSQL 12 is
shipped with Omnibus GitLab and thus Patroni becomes mandatory for replication and failover.
shipped with Omnibus GitLab, and thus Patroni becomes mandatory for replication and failover.
### Database node
Each database node runs three services:
`PostgreSQL` - The database itself.
`Patroni` - Communicates with other Patroni services in the cluster and handles
failover when issues with the leader server occurs. The failover procedure
consists of:
- Selecting a new leader for the cluster.
- Promoting the new node to leader.
- Instructing remaining servers to follow the new leader node.
`Consul` agent - To communicate with Consul cluster which stores the current Patroni state. The agent monitors the status of each node in the database cluster and tracks its health in a service definition on the Consul cluster.
- `PostgreSQL`: The database itself.
- `Patroni`: Communicates with other Patroni services in the cluster and handles failover when issues with the leader server occurs. The failover procedure consists of:
- Selecting a new leader for the cluster.
- Promoting the new node to leader.
- Instructing remaining servers to follow the new leader node.
- `Consul` agent: To communicate with Consul cluster which stores the current Patroni state. The agent monitors the status of each node in the database cluster and tracks its health in a service definition on the Consul cluster.
### Consul server node
The Consul server node runs the Consul server service. These nodes must have reached the quorum and elected a leader _before_ Patroni cluster bootstrap otherwise database nodes wait until such Consul leader is elected.
The Consul server node runs the Consul server service. These nodes must have reached the quorum and elected a leader _before_ Patroni cluster bootstrap; otherwise, database nodes wait until such Consul leader is elected.
### PgBouncer node
Each PgBouncer node runs two services:
`PgBouncer` - The database connection pooler itself.
`Consul` agent - Watches the status of the PostgreSQL service definition on the
Consul cluster. If that status changes, Consul runs a script which updates the
PgBouncer configuration to point to the new PostgreSQL leader node and reloads
the PgBouncer service.
- `PgBouncer`: The database connection pooler itself.
- `Consul` agent: Watches the status of the PostgreSQL service definition on the Consul cluster. If that status changes, Consul runs a script which updates the PgBouncer configuration to point to the new PostgreSQL leader node and reloads the PgBouncer service.
### Connection flow
......@@ -106,8 +94,7 @@ When using default setup, minimum configuration requires:
- `CONSUL_USERNAME`. The default user for Omnibus GitLab is `gitlab-consul`
- `CONSUL_DATABASE_PASSWORD`. Password for the database user.
- `CONSUL_PASSWORD_HASH`. This is a hash generated out of Consul username/password pair.
Can be generated with:
- `CONSUL_PASSWORD_HASH`. This is a hash generated out of Consul username/password pair. It can be generated with:
```shell
sudo gitlab-ctl pg-password-md5 CONSUL_USERNAME
......@@ -118,8 +105,7 @@ When using default setup, minimum configuration requires:
Few notes on the service itself:
- The service runs under a system account, by default `gitlab-consul`.
- If you are using a different username, you have to specify it through the
`CONSUL_USERNAME` variable.
- If you are using a different username, you have to specify it through the `CONSUL_USERNAME` variable.
- Passwords are stored in the following locations:
- `/etc/gitlab/gitlab.rb`: hashed
- `/var/opt/gitlab/pgbouncer/pg_auth`: hashed
......@@ -129,10 +115,8 @@ Few notes on the service itself:
When configuring PostgreSQL, we do the following:
- Set `max_replication_slots` to double the number of database nodes.
Patroni uses one extra slot per node when initiating the replication.
- Set `max_wal_senders` to one more than the allocated number of replication slots in the cluster.
This prevents replication from using up all of the available database connections.
- Set `max_replication_slots` to double the number of database nodes. Patroni uses one extra slot per node when initiating the replication.
- Set `max_wal_senders` to one more than the allocated number of replication slots in the cluster. This prevents replication from using up all of the available database connections.
In this document we are assuming 3 database nodes, which makes this configuration:
......@@ -151,7 +135,7 @@ You need the following password information for the application's database user:
- `POSTGRESQL_USERNAME`. The default user for Omnibus GitLab is `gitlab`
- `POSTGRESQL_USER_PASSWORD`. The password for the database user
- `POSTGRESQL_PASSWORD_HASH`. This is a hash generated out of the username/password pair.
Can be generated with:
It can be generated with:
```shell
sudo gitlab-ctl pg-password-md5 POSTGRESQL_USERNAME
......@@ -170,8 +154,7 @@ When using a default setup, the minimum configuration requires:
- `PGBOUNCER_USERNAME`. The default user for Omnibus GitLab is `pgbouncer`
- `PGBOUNCER_PASSWORD`. This is a password for PgBouncer service.
- `PGBOUNCER_PASSWORD_HASH`. This is a hash generated out of PgBouncer username/password pair.
Can be generated with:
- `PGBOUNCER_PASSWORD_HASH`. This is a hash generated out of PgBouncer username/password pair. It can be generated with:
```shell
sudo gitlab-ctl pg-password-md5 PGBOUNCER_USERNAME
......@@ -181,8 +164,7 @@ When using a default setup, the minimum configuration requires:
Few things to remember about the service itself:
- The service runs as the same system account as the database
- In the package, this is by default `gitlab-psql`
- The service runs as the same system account as the database. In the package, this is by default `gitlab-psql`
- If you use a non-default user account for PgBouncer service (by default `pgbouncer`), you need to specify this username.
- Passwords are stored in the following locations:
- `/etc/gitlab/gitlab.rb`: hashed, and in plain text
......@@ -206,7 +188,7 @@ When installing the GitLab package, do not supply `EXTERNAL_URL` value.
You must enable Patroni explicitly to be able to use it (with `patroni['enable'] = true`).
Any PostgreSQL configuration item that controls replication, for example `wal_level`, `max_wal_senders`, etc, are strictly
Any PostgreSQL configuration item that controls replication, for example `wal_level`, `max_wal_senders`, or others are strictly
controlled by Patroni. These configurations override the original settings that you make with the `postgresql[...]` configuration key.
Hence, they are all separated and placed under `patroni['postgresql'][...]`. This behavior is limited to replication.
Patroni honours any other PostgreSQL configuration that was made with the `postgresql[...]` configuration key. For example,
......@@ -215,7 +197,7 @@ configuration key.
NOTE:
The configuration of a Patroni node is very similar to a repmgr but shorter. When Patroni is enabled, first you can ignore
any replication setting of PostgreSQL (it is overwritten anyway). Then you can remove any `repmgr[...]` or
any replication setting of PostgreSQL (which is overwritten). Then, you can remove any `repmgr[...]` or
repmgr-specific configuration as well. Especially, make sure that you remove `postgresql['shared_preload_libraries'] = 'repmgr_funcs'`.
Here is an example:
......@@ -282,7 +264,7 @@ on each node for the changes to take effect.
Generally, when Consul cluster is ready, the first node that [reconfigures](../restart_gitlab.md#omnibus-gitlab-reconfigure)
becomes the leader. You do not need to sequence the nodes reconfiguration. You can run them in parallel or in any order.
If you choose an arbitrary order you do not have any predetermined leader.
If you choose an arbitrary order, you do not have any predetermined leader.
#### Enable Monitoring
......@@ -296,7 +278,7 @@ If you enable Monitoring, it must be enabled on **all** database servers.
# Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# Set the network addresses that the exporters will listen on
# Set the network addresses that the exporters must listen on
node_exporter['listen_address'] = '0.0.0.0:9100'
postgres_exporter['listen_address'] = '0.0.0.0:9187'
```
......@@ -340,9 +322,9 @@ patroni['tls_ca_file'] = '/path/to/ca.pem'
When TLS is enabled, mutual authentication of the API server and client is possible for all endpoints, the extent of which depends on
the `patroni['tls_client_mode']` attribute:
- `none` (default): the API will not check for any client certificates.
- `optional`: client certificates are required for all [unsafe](https://patroni.readthedocs.io/en/latest/security.html#protecting-the-rest-api) API calls.
- `required`: client certificates are required for all API calls.
- `none` (default): The API does not check for any client certificates.
- `optional`: Client certificates are required for all [unsafe](https://patroni.readthedocs.io/en/latest/security.html#protecting-the-rest-api) API calls.
- `required`: Client certificates are required for all API calls.
The client certificates are verified against the CA certificate that is specified with the `patroni['tls_ca_file']` attribute. Therefore,
this attribute is required for mutual TLS authentication. You also need to specify PEM-formatted client certificate and private key files.
......@@ -450,9 +432,9 @@ authentication mode (`patroni['tls_client_mode']`), must each have the same valu
#### Configure the internal load balancer
If you're running more than one PgBouncer node as recommended, then you need to set up a TCP internal load balancer to serve each correctly. This can be accomplished with any reputable TCP load balancer.
If you're running more than one PgBouncer node as recommended, you must set up a TCP internal load balancer to serve each correctly. This can be accomplished with any reputable TCP load balancer.
As an example here's how you could do it with [HAProxy](https://www.haproxy.org/):
As an example, here's how you could do it with [HAProxy](https://www.haproxy.org/):
```plaintext
global
......@@ -554,7 +536,7 @@ Here is a list and description of each machine and the assigned IP:
- `10.6.0.33`: PostgreSQL 3
- `10.6.0.41`: GitLab application
All passwords are set to `toomanysecrets`, please do not use this password or derived hashes and the `external_url` for GitLab is `http://gitlab.example.com`.
All passwords are set to `toomanysecrets`. Please do not use this password or derived hashes and the `external_url` for GitLab is `http://gitlab.example.com`.
After the initial configuration, if a failover occurs, the PostgresSQL leader node changes to one of the available secondaries until it is failed back.
......@@ -675,7 +657,7 @@ This example uses 3 PostgreSQL servers, and 1 application node (with PgBouncer s
It differs from the [recommended setup](#example-recommended-setup) by moving the Consul servers into the same servers we use for PostgreSQL.
The trade-off is between reducing server counts, against the increased operational complexity of needing to deal with PostgreSQL [failover](#manual-failover-procedure-for-patroni) procedures in addition to [Consul outage recovery](../consul.md#outage-recovery) on the same set of machines.
In this example we start with all servers on the same 10.6.0.0/16 private network range, they can connect to each freely other on those addresses.
In this example, we start with all servers on the same 10.6.0.0/16 private network range; they can connect to each freely other on those addresses.
Here is a list and description of each machine and the assigned IP:
......@@ -684,7 +666,7 @@ Here is a list and description of each machine and the assigned IP:
- `10.6.0.23`: PostgreSQL 3
- `10.6.0.31`: GitLab application
All passwords are set to `toomanysecrets`, please do not use this password or derived hashes.
All passwords are set to `toomanysecrets`. Please do not use this password or derived hashes.
The `external_url` for GitLab is `http://gitlab.example.com`
......@@ -787,7 +769,7 @@ Patroni is an opinionated solution for PostgreSQL high-availability. It takes th
The fundamental [architecture](#example-recommended-setup-manual-steps) (mentioned above) does not change for Patroni.
You do not need any special consideration for Patroni while provisioning your database nodes. Patroni heavily relies on Consul to store the state of the cluster and elect a leader. Any failure in Consul cluster and its leader election propagates to the Patroni cluster as well.
Patroni monitors the cluster and handles any failover. When the primary node fails it works with Consul to notify PgBouncer. On failure, Patroni handles the transitioning of the old primary to a replica and rejoins it to the cluster automatically.
Patroni monitors the cluster and handles any failover. When the primary node fails, it works with Consul to notify PgBouncer. On failure, Patroni handles the transitioning of the old primary to a replica and rejoins it to the cluster automatically.
With Patroni, the connection flow is slightly different. Patroni on each node connects to Consul agent to join the cluster. Only after this point it decides if the node is the primary or a replica. Based on this decision, it configures and starts PostgreSQL which it communicates with directly over a Unix socket. This means that if the Consul cluster is not functional or does not have a leader, Patroni and by extension PostgreSQL does not start. Patroni also exposes a REST API which can be accessed via its [default port](../package_information/defaults.md)
on each node.
......@@ -918,7 +900,7 @@ patroni['remove_data_directory_on_diverged_timelines'] = false
|-|-|
|`use_pg_rewind`|Try running `pg_rewind` on the former cluster leader before it rejoins the database cluster.|
|`remove_data_directory_on_rewind_failure`|If `pg_rewind` fails, remove the local PostgreSQL data directory and re-replicate from the current cluster leader.|
|`remove_data_directory_on_diverged_timelines`|If `pg_rewind` cannot be used and the former leader's timeline has diverged from the current one, then delete the local data directory and re-replicate from the current cluster leader.|
|`remove_data_directory_on_diverged_timelines`|If `pg_rewind` cannot be used and the former leader's timeline has diverged from the current one, delete the local data directory and re-replicate from the current cluster leader.|
### Database authorization for Patroni
......@@ -936,7 +918,7 @@ You can use `gitlab-ctl patroni members` to check the status of the cluster memb
is the primary or a replica.
When Patroni is enabled, it exclusively controls PostgreSQL's startup,
shutdown, and restart. This means, to shut down PostgreSQL on a certain node you must shutdown Patroni on the same node with:
shutdown, and restart. This means, to shut down PostgreSQL on a certain node, you must shutdown Patroni on the same node with:
```shell
sudo gitlab-ctl stop patroni
......@@ -974,7 +956,7 @@ When a Geo secondary site is replicating from a primary site that uses `Patroni`
sudo gitlab-ctl replicate-geo-database --host=<new_leader_ip> --replication-slot=<slot_name>
```
Otherwise, the replication will not happen, even if the original node gets re-added as a follower node. This re-syncs your secondary site database and may take a long time depending on the amount of data to sync. You may also need to run `gitlab-ctl reconfigure` if replication is still not working after re-syncing.
Otherwise, the replication does not happen, even if the original node gets re-added as a follower node. This re-syncs your secondary site database and may take a long time depending on the amount of data to sync. You may also need to run `gitlab-ctl reconfigure` if replication is still not working after re-syncing.
### Recovering the Patroni cluster
......@@ -1097,7 +1079,7 @@ Considering these, you should carefully plan your PostgreSQL upgrade:
sudo gitlab-ctl pg-upgrade -V 12
```
1. Check the status of the leader and cluster. You can only proceed if you have a healthy leader:
1. Check the status of the leader and cluster. You can proceed only if you have a healthy leader:
```shell
gitlab-ctl patroni check-leader
......@@ -1192,7 +1174,7 @@ If replication is not occurring, it may be necessary to reinitialize a replica.
WARNING:
This is a destructive process and may lead the cluster into a bad state. Make sure that you have a healthy backup before running this process.
As a last resort, if your Patroni cluster is in an unknown/bad state and no node can start, you can
As a last resort, if your Patroni cluster is in an unknown or bad state and no node can start, you can
reset the Patroni state in Consul completely, resulting in a reinitialized Patroni cluster when
the first Patroni node starts.
......
......@@ -14,7 +14,8 @@ Even though Git is very resilient and tries to prevent data integrity issues,
there are times when things go wrong. The following Rake tasks intend to
help GitLab administrators diagnose problem repositories so they can be fixed.
There are 3 things that are checked to determine integrity.
These Rake tasks use three different methods to determine the integrity of Git
repositories.
1. Git repository file system check ([`git fsck`](https://git-scm.com/docs/git-fsck)).
This step verifies the connectivity and validity of objects in the repository.
......@@ -37,7 +38,7 @@ exactly which repositories are causing the trouble.
### Check project code repositories
This task loops through the project code repositories and runs the integrity check
described previously. If a project uses a pool repository, that will also be checked.
described previously. If a project uses a pool repository, that is also checked.
Other types of Git repositories [are not checked](https://gitlab.com/gitlab-org/gitaly/-/issues/3643).
**Omnibus Installation**
......@@ -67,7 +68,7 @@ source repository.
This task loops through all repositories on the GitLab server and outputs
checksums in the format `<PROJECT ID>,<CHECKSUM>`.
- If a repository doesn't exist, the project ID will have a blank checksum.
- If a repository doesn't exist, the project ID is a blank checksum.
- If a repository exists but is empty, the output checksum is `0000000000000000000000000000000000000000`.
- Projects which don't exist are skipped.
......@@ -85,9 +86,9 @@ sudo -u git -H bundle exec rake gitlab:git:checksum_projects RAILS_ENV=productio
For example, if:
- Project with ID#2 doesn't exist, it will be skipped.
- Project with ID#4 doesn't have a repository, its checksum will be blank.
- Project with ID#5 has an empty repository, its checksum will be `0000000000000000000000000000000000000000`.
- Project with ID#2 doesn't exist, it is skipped.
- Project with ID#4 doesn't have a repository, its checksum is blank.
- Project with ID#5 has an empty repository, its checksum is `0000000000000000000000000000000000000000`.
The output would then look something like:
......@@ -115,7 +116,7 @@ These integrity checks can detect missing files. Additionally, for locally
stored files, checksums are generated and stored in the database upon upload,
and these checks verify them against current files.
Currently, integrity checks are supported for the following types of file:
Integrity checks are supported for the following types of file:
- CI artifacts (Available from version 10.7.0)
- LFS objects (Available from version 10.6.0)
......
......@@ -216,6 +216,20 @@ of your GitLab instance (`.gitlab-ci.yml` if not set):
It is also possible to specify a [custom CI/CD configuration file for a specific project](../../../ci/pipelines/settings.md#specify-a-custom-cicd-configuration-file).
## Enable or disable the pipeline suggestion banner
By default, a banner displays in merge requests with no pipeline suggesting a
walkthrough on how to add one.
![Suggest pipeline banner](img/suggest_pipeline_banner.png)
To enable or disable the banner:
1. On the top bar, select **Menu > Admin**.
1. On the left sidebar, select **Settings > CI/CD**.
1. Select or clear the **Enable pipeline suggestion banner** checkbox.
1. Select **Save changes**.
## Required pipeline configuration **(PREMIUM SELF)**
WARNING:
......
......@@ -16,7 +16,7 @@ export default function initTestCaseShow({ mountPointSelector }) {
}
const apolloProvider = new VueApollo({
defaultClient: createDefaultClient(),
defaultClient: createDefaultClient({}, { assumeImmutableResults: true }),
});
const sidebarOptions = JSON.parse(el.dataset.sidebarOptions);
......
......@@ -198,22 +198,6 @@ h1 {
.dropdown {
position: relative;
}
.dropdown-menu-toggle {
white-space: nowrap;
}
.dropdown-menu-toggle::after {
display: inline-block;
margin-left: 0.255em;
vertical-align: 0.255em;
content: "";
border-top: 0.3em solid;
border-right: 0.3em solid transparent;
border-bottom: 0;
border-left: 0.3em solid transparent;
}
.dropdown-menu-toggle:empty::after {
margin-left: 0;
}
.dropdown-menu {
position: absolute;
top: 100%;
......@@ -466,9 +450,6 @@ a {
.hide {
display: none;
}
.dropdown-menu-toggle::after {
display: none;
}
.badge:not(.gl-badge) {
padding: 4px 5px;
font-size: 12px;
......@@ -548,7 +529,7 @@ body {
border-radius: 0.25rem;
white-space: nowrap;
}
.no-outline.dropdown-menu-toggle {
.dropdown-menu-toggle.no-outline {
outline: 0;
}
.dropdown-menu-toggle.dropdown-menu-toggle {
......
......@@ -178,22 +178,6 @@ h1 {
.dropdown {
position: relative;
}
.dropdown-menu-toggle {
white-space: nowrap;
}
.dropdown-menu-toggle::after {
display: inline-block;
margin-left: 0.255em;
vertical-align: 0.255em;
content: "";
border-top: 0.3em solid;
border-right: 0.3em solid transparent;
border-bottom: 0;
border-left: 0.3em solid transparent;
}
.dropdown-menu-toggle:empty::after {
margin-left: 0;
}
.dropdown-menu {
position: absolute;
top: 100%;
......@@ -446,9 +430,6 @@ a {
.hide {
display: none;
}
.dropdown-menu-toggle::after {
display: none;
}
.badge:not(.gl-badge) {
padding: 4px 5px;
font-size: 12px;
......@@ -528,7 +509,7 @@ body {
border-radius: 0.25rem;
white-space: nowrap;
}
.no-outline.dropdown-menu-toggle {
.dropdown-menu-toggle.no-outline {
outline: 0;
}
.dropdown-menu-toggle.dropdown-menu-toggle {
......
......@@ -11,8 +11,6 @@ module Ci
idempotent!
def perform(from_id, to_id)
return unless Feature.enabled?(:ci_parallel_minutes_reset, default_enabled: true)
::Ci::Minutes::BatchResetService.new.execute!(ids_range: (from_id..to_id))
end
end
......
......@@ -17,34 +17,21 @@ class ClearSharedRunnersMinutesWorker # rubocop:disable Scalability/IdempotentWo
BATCH_SIZE = 100_000
def perform
if Feature.enabled?(:ci_parallel_minutes_reset, default_enabled: true)
start_id = Namespace.minimum(:id)
last_id = Namespace.maximum(:id)
start_id = Namespace.minimum(:id)
last_id = Namespace.maximum(:id)
batches = [(last_id - start_id) / BATCH_SIZE, 1].max
execution_offset = (TIME_SPREAD / batches).to_i
batches = [(last_id - start_id) / BATCH_SIZE, 1].max
execution_offset = (TIME_SPREAD / batches).to_i
(start_id..last_id).step(BATCH_SIZE).with_index do |batch_start_id, batch_index|
batch_end_id = batch_start_id + BATCH_SIZE - 1
(start_id..last_id).step(BATCH_SIZE).with_index do |batch_start_id, batch_index|
batch_end_id = batch_start_id + BATCH_SIZE - 1
delay = execution_offset * batch_index
delay = execution_offset * batch_index
# #perform_in is used instead of #perform_async to spread the load
# evenly accross the first three hours of the month to avoid stressing
# the database.
Ci::BatchResetMinutesWorker.perform_in(delay, batch_start_id, batch_end_id)
end
else
return unless try_obtain_lease
Ci::Minutes::BatchResetService.new.execute!
# #perform_in is used instead of #perform_async to spread the load
# evenly accross the first three hours of the month to avoid stressing
# the database.
Ci::BatchResetMinutesWorker.perform_in(delay, batch_start_id, batch_end_id)
end
end
private
def try_obtain_lease
Gitlab::ExclusiveLease.new('gitlab_clear_shared_runners_minutes_worker',
timeout: LEASE_TIMEOUT).try_obtain
end
end
---
name: ci_parallel_minutes_reset
introduced_by_url:
rollout_issue_url:
milestone: '12.10'
type: development
group: group::pipeline execution
default_enabled: true
......@@ -30,7 +30,7 @@ module Gitlab
case scope
when 'projects'
eager_load(projects, page, per_page, preload_method, [:route, :namespace, :topics, :topics_acts_as_taggable])
eager_load(projects, page, per_page, preload_method, [:route, :namespace, :topics])
when 'issues'
eager_load(issues, page, per_page, preload_method, project: [:route, :namespace], labels: [], timelogs: [], assignees: [])
when 'merge_requests'
......
......@@ -78,17 +78,5 @@ RSpec.describe Ci::BatchResetMinutesWorker do
expect(last_namespace.reset.extra_shared_runners_minutes_limit).to eq 50
end
end
context 'when feature flag ci_parallel_minutes_reset is disabled' do
before do
stub_feature_flags(ci_parallel_minutes_reset: false)
end
it 'does not call Ci::Minutes::BatchResetService' do
expect(Ci::Minutes::BatchResetService).not_to receive(:new)
worker.perform(first_namespace.id, last_namespace.id)
end
end
end
end
......@@ -8,167 +8,36 @@ RSpec.describe ClearSharedRunnersMinutesWorker do
describe '#perform' do
subject { worker.perform }
context 'when ci_parallel_minutes_reset feature flag is disabled' do
let(:namespace) { create(:namespace) }
before do
stub_feature_flags(ci_parallel_minutes_reset: false)
expect_next_instance_of(described_class) do |instance|
expect(instance).to receive(:try_obtain_lease).and_return(true)
end
end
context 'when project statistics are defined' do
let(:project) { create(:project, namespace: namespace) }
let(:statistics) { project.statistics }
before do
statistics.update!(shared_runners_seconds: 100)
end
it 'clears counters' do
subject
expect(statistics.reload.shared_runners_seconds).to be_zero
end
it 'resets timer' do
subject
expect(statistics.reload.shared_runners_seconds_last_reset).to be_like_time(Time.current)
end
context 'when there are namespaces that were not reset after the reset steps' do
let(:namespace_ids) { [namespace.id] }
before do
allow(Namespace).to receive(:each_batch).and_yield(Namespace.all)
allow(Namespace).to receive(:transaction).and_raise(ActiveRecord::ActiveRecordError)
end
it 'raises an exception' do
expect { worker.perform }.to raise_error(
Ci::Minutes::BatchResetService::BatchNotResetError,
'Some namespace shared runner minutes were not reset'
)
end
end
end
context 'when namespace statistics are defined' do
let!(:statistics) { create(:namespace_statistics, namespace: namespace, shared_runners_seconds: 100) }
it 'clears counters' do
subject
expect(statistics.reload.shared_runners_seconds).to be_zero
end
it 'resets timer' do
subject
expect(statistics.reload.shared_runners_seconds_last_reset).to be_like_time(Time.current)
end
end
context 'when namespace has extra shared runner minutes' do
let!(:namespace) do
create(:namespace, shared_runners_minutes_limit: 100, extra_shared_runners_minutes_limit: 10 )
end
let!(:statistics) do
create(:namespace_statistics, namespace: namespace, shared_runners_seconds: minutes_used * 60)
end
let(:minutes_used) { 0 }
context 'when consumption is below the default quota' do
let(:minutes_used) { 50 }
it 'does not modify the extra minutes quota' do
subject
expect(namespace.reload.extra_shared_runners_minutes_limit).to eq(10)
end
end
context 'when consumption is above the default quota' do
context 'when all extra minutes are used' do
let(:minutes_used) { 115 }
it 'sets extra minutes to 0' do
subject
expect(namespace.reload.extra_shared_runners_minutes_limit).to eq(0)
end
end
context 'when some extra minutes are used' do
let(:minutes_used) { 105 }
it 'discounts the extra minutes used' do
subject
expect(namespace.reload.extra_shared_runners_minutes_limit).to eq(5)
end
end
end
[:last_ci_minutes_notification_at, :last_ci_minutes_usage_notification_level].each do |attr|
context "when #{attr} is present" do
before do
namespace.update_attribute(attr, Time.current)
end
it 'nullifies the field' do
expect(namespace.send(attr)).to be_present
subject
expect(namespace.reload.send(attr)).not_to be_present
end
end
end
before do
[2, 3, 4, 5, 7, 8, 10, 14].each do |id|
create(:namespace, id: id)
end
end
context 'when ci_parallel_minutes_reset feature flag is enabled' do
subject { worker.perform }
context 'with batch size lower than count of namespaces' do
before do
stub_feature_flags(ci_parallel_minutes_reset: true)
[2, 3, 4, 5, 7, 8, 10, 14].each do |id|
create(:namespace, id: id)
end
stub_const("#{described_class}::BATCH_SIZE", 3)
end
context 'with batch size lower than count of namespaces' do
before do
stub_const("#{described_class}::BATCH_SIZE", 3)
end
it 'runs a worker per batch', :aggregate_failures do
# Spreads evenly accross 8 hours (28,800 seconds)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(0.seconds, 2, 4)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(7200.seconds, 5, 7)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(14400.seconds, 8, 10)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(21600.seconds, 11, 13)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(28800.seconds, 14, 16)
it 'runs a worker per batch', :aggregate_failures do
# Spreads evenly accross 8 hours (28,800 seconds)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(0.seconds, 2, 4)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(7200.seconds, 5, 7)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(14400.seconds, 8, 10)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(21600.seconds, 11, 13)
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(28800.seconds, 14, 16)
subject
end
subject
end
end
context 'with batch size higher than count of namespaces' do
# Uses default BATCH_SIZE
it 'runs the worker in a single batch', :aggregate_failures do
# Runs a single batch, immediately
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(0.seconds, 2, 100001)
context 'with batch size higher than count of namespaces' do
# Uses default BATCH_SIZE
it 'runs the worker in a single batch', :aggregate_failures do
# Runs a single batch, immediately
expect(Ci::BatchResetMinutesWorker).to receive(:perform_in).with(0.seconds, 2, 100001)
subject
end
subject
end
end
end
......
......@@ -39,11 +39,11 @@ module API
# rubocop: disable CodeReuse/ActiveRecord
def self.preload_relation(projects_relation, options = {})
# Preloading topics, should be done with using only `:topics`,
# as `:topics` are defined as: `has_many :topics, through: :taggings`
# as `:topics` are defined as: `has_many :topics, through: :project_topics`
# N+1 is solved then by using `subject.topics.map(&:name)`
# MR describing the solution: https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/20555
projects_relation.preload(:project_feature, :route)
.preload(:import_state, :topics, :topics_acts_as_taggable)
.preload(:import_state, :topics)
.preload(:auto_devops)
.preload(namespace: [:route, :owner])
end
......
......@@ -132,7 +132,7 @@ module API
def self.preload_relation(projects_relation, options = {})
# Preloading topics, should be done with using only `:topics`,
# as `:topics` are defined as: `has_many :topics, through: :taggings`
# as `:topics` are defined as: `has_many :topics, through: :project_topics`
# N+1 is solved then by using `subject.topics.map(&:name)`
# MR describing the solution: https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/20555
super(projects_relation).preload(group: :namespace_settings)
......@@ -144,7 +144,7 @@ module API
.preload(project_group_links: { group: :route },
fork_network: :root_project,
fork_network_member: :forked_from_project,
forked_from_project: [:route, :topics, :topics_acts_as_taggable, :group, :project_feature, namespace: [:route, :owner]])
forked_from_project: [:route, :topics, :group, :project_feature, namespace: [:route, :owner]])
end
# rubocop: enable CodeReuse/ActiveRecord
......
......@@ -2396,9 +2396,15 @@ msgstr ""
msgid "AdminSettings|Disable public access to Pages sites"
msgstr ""
msgid "AdminSettings|Display a banner on merge requests in projects with no pipelines to initiate steps to add a .gitlab-ci.yml file."
msgstr ""
msgid "AdminSettings|Domain verification is an essential security measure for public GitLab sites. Users are required to demonstrate they control a domain before it is enabled. %{link_start}Learn more.%{link_end}"
msgstr ""
msgid "AdminSettings|Enable pipeline suggestion banner"
msgstr ""
msgid "AdminSettings|Enable shared runners for new projects"
msgstr ""
......
......@@ -314,12 +314,14 @@ RSpec.describe 'Admin updates settings' do
check 'Default to Auto DevOps pipeline for all projects'
fill_in 'application_setting_auto_devops_domain', with: 'domain.com'
uncheck 'Keep the latest artifacts for all jobs in the latest successful pipelines'
uncheck 'Enable pipeline suggestion banner'
click_button 'Save changes'
end
expect(current_settings.auto_devops_enabled?).to be true
expect(current_settings.auto_devops_domain).to eq('domain.com')
expect(current_settings.keep_latest_artifact).to be false
expect(current_settings.suggest_pipeline_enabled).to be false
expect(page).to have_content "Application settings saved successfully"
end
......
......@@ -355,10 +355,7 @@ container_repositories:
- name
project:
- external_status_checks
- taggings
- base_tags
- topic_taggings
- topics_acts_as_taggable
- project_topics
- topics
- chat_services
......
......@@ -7240,35 +7240,6 @@ RSpec.describe Project, factory_default: :keep do
expect(project.reload.topics.map(&:name)).to eq(%w[topic1 topic2 topic3])
end
end
context 'during ExtractProjectTopicsIntoSeparateTable migration' do
before do
topic_a = ActsAsTaggableOn::Tag.find_or_create_by!(name: 'topicA')
topic_b = ActsAsTaggableOn::Tag.find_or_create_by!(name: 'topicB')
project.reload.topics_acts_as_taggable = [topic_a, topic_b]
project.save!
project.reload
end
it 'topic_list returns correct string array' do
expect(project.topic_list).to eq(%w[topicA topicB topic1 topic2 topic3])
end
it 'topics returns correct topic records' do
expect(project.topics.map(&:class)).to eq([ActsAsTaggableOn::Tag, ActsAsTaggableOn::Tag, Projects::Topic, Projects::Topic, Projects::Topic])
expect(project.topics.map(&:name)).to eq(%w[topicA topicB topic1 topic2 topic3])
end
it 'topic_list= sets new topics and removes old topics' do
project.topic_list = 'new-topic1, new-topic2'
project.save!
project.reload
expect(project.topics.map(&:class)).to eq([Projects::Topic, Projects::Topic])
expect(project.topics.map(&:name)).to eq(%w[new-topic1 new-topic2])
end
end
end
shared_examples 'all_runners' do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment