Commit be2f4c57 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 2711c26b
# frozen_string_literal: true
module Clusters
module Management
class CreateProjectService
CreateError = Class.new(StandardError)
attr_reader :cluster, :current_user
def initialize(cluster, current_user:)
@cluster = cluster
@current_user = current_user
end
def execute
return unless management_project_required?
ActiveRecord::Base.transaction do
project = create_management_project!
update_cluster!(project)
end
end
private
def management_project_required?
Feature.enabled?(:auto_create_cluster_management_project) && cluster.management_project.nil?
end
def project_params
{
name: project_name,
description: project_description,
namespace_id: namespace.id,
visibility_level: Gitlab::VisibilityLevel::PRIVATE
}
end
def project_name
"#{cluster.name} Cluster Management"
end
def project_description
"This project is automatically generated and will be used to manage your Kubernetes cluster. [More information](#{docs_path})"
end
def docs_path
Rails.application.routes.url_helpers.help_page_path('user/clusters/management_project')
end
def create_management_project!
::Projects::CreateService.new(current_user, project_params).execute.tap do |project|
errors = project.errors.full_messages
if errors.any?
raise CreateError.new("Failed to create project: #{errors}")
end
end
end
def update_cluster!(project)
unless cluster.update(management_project: project)
raise CreateError.new("Failed to update cluster: #{cluster.errors.full_messages}")
end
end
def namespace
case cluster.cluster_type
when 'project_type'
cluster.project.namespace
when 'group_type'
cluster.group
when 'instance_type'
instance_administrators_group
else
raise NotImplementedError
end
end
def instance_administrators_group
Gitlab::CurrentSettings.instance_administrators_group ||
raise(CreateError.new('Instance administrators group not found'))
end
end
end
end
---
title: Optimize projects_enforcing_code_owner_approval counter query performance for usage ping
merge_request: 27526
author:
type: performance
---
title: Add atomic and cleanup-on-fail parameters for Helm
merge_request: 27721
author:
type: changed
# frozen_string_literal: true
class AddIndexOnIdAndArchivedAndPendingDeleteToProjects < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
INDEX_NAME = 'index_projects_on_id_and_archived_and_pending_delete'
disable_ddl_transaction!
def up
add_concurrent_index :projects, :id, where: "archived = FALSE AND pending_delete = FALSE", name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :projects, INDEX_NAME
end
end
...@@ -9627,6 +9627,8 @@ CREATE INDEX index_projects_on_creator_id_and_created_at ON public.projects USIN ...@@ -9627,6 +9627,8 @@ CREATE INDEX index_projects_on_creator_id_and_created_at ON public.projects USIN
CREATE INDEX index_projects_on_description_trigram ON public.projects USING gin (description public.gin_trgm_ops); CREATE INDEX index_projects_on_description_trigram ON public.projects USING gin (description public.gin_trgm_ops);
CREATE INDEX index_projects_on_id_and_archived_and_pending_delete ON public.projects USING btree (id) WHERE ((archived = false) AND (pending_delete = false));
CREATE UNIQUE INDEX index_projects_on_id_partial_for_visibility ON public.projects USING btree (id) WHERE (visibility_level = ANY (ARRAY[10, 20])); CREATE UNIQUE INDEX index_projects_on_id_partial_for_visibility ON public.projects USING btree (id) WHERE (visibility_level = ANY (ARRAY[10, 20]));
CREATE INDEX index_projects_on_id_service_desk_enabled ON public.projects USING btree (id) WHERE (service_desk_enabled = true); CREATE INDEX index_projects_on_id_service_desk_enabled ON public.projects USING btree (id) WHERE (service_desk_enabled = true);
...@@ -12710,6 +12712,7 @@ INSERT INTO "schema_migrations" (version) VALUES ...@@ -12710,6 +12712,7 @@ INSERT INTO "schema_migrations" (version) VALUES
('20200318163148'), ('20200318163148'),
('20200318164448'), ('20200318164448'),
('20200318165448'), ('20200318165448'),
('20200318175008'),
('20200319203901'), ('20200319203901'),
('20200323075043'); ('20200323075043');
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# #
# For a list of all options, see https://errata-ai.github.io/vale/styles/ # For a list of all options, see https://errata-ai.github.io/vale/styles/
extends: substitution extends: substitution
message: Use "%s" instead of "%s". message: 'Use "%s" instead of "%s".'
link: https://about.gitlab.com/handbook/communication/#top-misused-terms link: https://about.gitlab.com/handbook/communication/#top-misused-terms
level: error level: error
ignorecase: true ignorecase: true
...@@ -11,3 +11,4 @@ swap: ...@@ -11,3 +11,4 @@ swap:
GitLabber: GitLab team member GitLabber: GitLab team member
self hosted: self-managed self hosted: self-managed
self-hosted: self-managed self-hosted: self-managed
postgres: PostgreSQL
---
# Checks that version text is formatted correctly.
#
# Specifically looks for either of the following that is immediately followed on the next line
# by content, which will break rendering:
#
# - `> Introduced` (version text without a link)
# - `> [Introduced` (version text with a link)
#
# Because it excludes `-`, it doesn't look for multi-line version text, for which content
# immediately on the next line is ok. However, this will often highlight where multi-line version
# text is attempted without `-` characters.
#
# For a list of all options, see https://errata-ai.github.io/vale/styles/
extends: existence
message: '"%s" is not formatted correctly.'
link: https://docs.gitlab.com/ee/development/documentation/styleguide.html#text-for-documentation-requiring-version-text
level: error
scope: raw
raw:
- '> (- ){0}\[?Introduced.+\n.+'
...@@ -134,7 +134,7 @@ on adding these events into GitLab: ...@@ -134,7 +134,7 @@ on adding these events into GitLab:
The current architecture of audit events is not prepared to receive a very high amount of records. The current architecture of audit events is not prepared to receive a very high amount of records.
It may make the user interface for your project or audit logs very busy, and the disk space consumed by the It may make the user interface for your project or audit logs very busy, and the disk space consumed by the
`audit_events` Postgres table will increase considerably. It's disabled by default `audit_events` PostgreSQL table will increase considerably. It's disabled by default
to prevent performance degradations on GitLab instances with very high Git write traffic. to prevent performance degradations on GitLab instances with very high Git write traffic.
In an upcoming release, Audit Logs for Git push events will be enabled In an upcoming release, Audit Logs for Git push events will be enabled
......
...@@ -25,10 +25,11 @@ GitLab supports two authentication methods: ...@@ -25,10 +25,11 @@ GitLab supports two authentication methods:
### Authentication against a local database with X.509 certificates ### Authentication against a local database with X.509 certificates
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/726) in > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/726) in [GitLab Premium](https://about.gitlab.com/pricing/) 11.6 as an experimental feature.
[GitLab Premium](https://about.gitlab.com/pricing/) 11.6 as an experimental
feature. Smartcard authentication against local databases may change or be CAUTION: **Caution:**
removed completely in future releases. Smartcard authentication against local databases may change or be removed completely in future
releases.
Smartcards with X.509 certificates can be used to authenticate with GitLab. Smartcards with X.509 certificates can be used to authenticate with GitLab.
......
# File hooks # File hooks
> Introduced in GitLab 10.6. > - Introduced in GitLab 10.6.
> Until 12.8 the feature name was Plugins. > - Until GitLab 12.8, the feature name was Plugins.
With custom file hooks, GitLab administrators can introduce custom integrations With custom file hooks, GitLab administrators can introduce custom integrations
without modifying GitLab's source code. without modifying GitLab's source code.
......
...@@ -21,9 +21,9 @@ verification methods: ...@@ -21,9 +21,9 @@ verification methods:
| Database | Application data in PostgreSQL | Native | Native | | Database | Application data in PostgreSQL | Native | Native |
| Database | Redis | _N/A_ (*1*) | _N/A_ | | Database | Redis | _N/A_ (*1*) | _N/A_ |
| Database | Elasticsearch | Native | Native | | Database | Elasticsearch | Native | Native |
| Database | Personal snippets | Postgres Replication | Postgres Replication | | Database | Personal snippets | PostgreSQL Replication | PostgreSQL Replication |
| Database | Project snippets | Postgres Replication | Postgres Replication | | Database | Project snippets | PostgreSQL Replication | PostgreSQL Replication |
| Database | SSH public keys | Postgres Replication | Postgres Replication | | Database | SSH public keys | PostgreSQL Replication | PostgreSQL Replication |
| Git | Project repository | Geo with Gitaly | Gitaly Checksum | | Git | Project repository | Geo with Gitaly | Gitaly Checksum |
| Git | Project wiki repository | Geo with Gitaly | Gitaly Checksum | | Git | Project wiki repository | Geo with Gitaly | Gitaly Checksum |
| Git | Project designs repository | Geo with Gitaly | Gitaly Checksum | | Git | Project designs repository | Geo with Gitaly | Gitaly Checksum |
......
...@@ -242,7 +242,7 @@ sudo gitlab-rake gitlab:geo:check ...@@ -242,7 +242,7 @@ sudo gitlab-rake gitlab:geo:check
Checking Geo ... Finished Checking Geo ... Finished
``` ```
When performing a Postgres major version (9 > 10) update this is expected. Follow: When performing a PostgreSQL major version (9 > 10) update this is expected. Follow:
- [initiate-the-replication-process](database.md#step-3-initiate-the-replication-process) - [initiate-the-replication-process](database.md#step-3-initiate-the-replication-process)
- [Geo database has an outdated FDW remote schema](troubleshooting.md#geo-database-has-an-outdated-fdw-remote-schema-error) - [Geo database has an outdated FDW remote schema](troubleshooting.md#geo-database-has-an-outdated-fdw-remote-schema-error)
......
...@@ -146,7 +146,7 @@ Each service in the package comes with a set of [default ports](https://docs.git ...@@ -146,7 +146,7 @@ Each service in the package comes with a set of [default ports](https://docs.git
- Application servers connect to either PgBouncer directly via its [default port](https://docs.gitlab.com/omnibus/package-information/defaults.html#pgbouncer) or via a configured Internal Load Balancer (TCP) that serves multiple PgBouncers. - Application servers connect to either PgBouncer directly via its [default port](https://docs.gitlab.com/omnibus/package-information/defaults.html#pgbouncer) or via a configured Internal Load Balancer (TCP) that serves multiple PgBouncers.
- PgBouncer connects to the primary database servers [PostgreSQL default port](https://docs.gitlab.com/omnibus/package-information/defaults.html#postgresql) - PgBouncer connects to the primary database servers [PostgreSQL default port](https://docs.gitlab.com/omnibus/package-information/defaults.html#postgresql)
- Repmgr connects to the database servers [PostgreSQL default port](https://docs.gitlab.com/omnibus/package-information/defaults.html#postgresql) - Repmgr connects to the database servers [PostgreSQL default port](https://docs.gitlab.com/omnibus/package-information/defaults.html#postgresql)
- Postgres secondaries connect to the primary database servers [PostgreSQL default port](https://docs.gitlab.com/omnibus/package-information/defaults.html#postgresql) - PostgreSQL secondaries connect to the primary database servers [PostgreSQL default port](https://docs.gitlab.com/omnibus/package-information/defaults.html#postgresql)
- Consul servers and agents connect to each others [Consul default ports](https://docs.gitlab.com/omnibus/package-information/defaults.html#consul) - Consul servers and agents connect to each others [Consul default ports](https://docs.gitlab.com/omnibus/package-information/defaults.html#consul)
#### Required information #### Required information
...@@ -899,7 +899,7 @@ after it has been restored to service. ...@@ -899,7 +899,7 @@ after it has been restored to service.
```shell ```shell
gitlab-ctl repmgr standby unregister --node=959789412 gitlab-ctl repmgr standby unregister --node=959789412
``` ```
##### Add a node as a standby server ##### Add a node as a standby server
From the stnadby node, run: From the stnadby node, run:
...@@ -916,24 +916,24 @@ after it has been restored to service. ...@@ -916,24 +916,24 @@ after it has been restored to service.
scratch by performing a `gitlab-ctl repmgr standby setup NEW_MASTER`. scratch by performing a `gitlab-ctl repmgr standby setup NEW_MASTER`.
##### Add a failed master back into the cluster as a standby node ##### Add a failed master back into the cluster as a standby node
Once `repmgrd` and PostgreSQL are runnning, the node will need to follow the new Once `repmgrd` and PostgreSQL are runnning, the node will need to follow the new
as a standby node. as a standby node.
``` ```
gitlab-ctl repmgr standby follow NEW_MASTER gitlab-ctl repmgr standby follow NEW_MASTER
``` ```
Once the node is following the new master as a standby, the node needs to be Once the node is following the new master as a standby, the node needs to be
[unregistered from the cluster on the new master node](#remove-a-standby-from-the-cluster). [unregistered from the cluster on the new master node](#remove-a-standby-from-the-cluster).
Once the old master node has been unregistered from the cluster, it will need Once the old master node has been unregistered from the cluster, it will need
to be setup as a new standby: to be setup as a new standby:
``` ```
gitlab-ctl repmgr standby setup NEW_MASTER gitlab-ctl repmgr standby setup NEW_MASTER
``` ```
Failure to unregister and readd the old master node can lead to subsequent failovers Failure to unregister and readd the old master node can lead to subsequent failovers
not working. not working.
......
...@@ -132,7 +132,7 @@ across NFS. The GitLab support team will not be able to assist on performance is ...@@ -132,7 +132,7 @@ across NFS. The GitLab support team will not be able to assist on performance is
this configuration. this configuration.
Additionally, this configuration is specifically warned against in the Additionally, this configuration is specifically warned against in the
[Postgres Documentation](https://www.postgresql.org/docs/current/creating-cluster.html#CREATING-CLUSTER-NFS): [PostgreSQL Documentation](https://www.postgresql.org/docs/current/creating-cluster.html#CREATING-CLUSTER-NFS):
>PostgreSQL does nothing special for NFS file systems, meaning it assumes NFS behaves exactly like >PostgreSQL does nothing special for NFS file systems, meaning it assumes NFS behaves exactly like
>locally-connected drives. If the client or server NFS implementation does not provide standard file >locally-connected drives. If the client or server NFS implementation does not provide standard file
......
...@@ -55,7 +55,7 @@ you want using steps 1 and 2 from the GitLab downloads page. ...@@ -55,7 +55,7 @@ you want using steps 1 and 2 from the GitLab downloads page.
gitlab_rails['gitaly_token'] = 'YOUR_TOKEN' gitlab_rails['gitaly_token'] = 'YOUR_TOKEN'
``` ```
1. Setup Sidekiq's connection to Postgres: 1. Setup Sidekiq's connection to PostgreSQL:
```ruby ```ruby
gitlab_rails['db_host'] = '10.10.1.30' gitlab_rails['db_host'] = '10.10.1.30'
...@@ -66,7 +66,7 @@ you want using steps 1 and 2 from the GitLab downloads page. ...@@ -66,7 +66,7 @@ you want using steps 1 and 2 from the GitLab downloads page.
gitlab_rails['auto_migrate'] = false gitlab_rails['auto_migrate'] = false
``` ```
Remember to add the Sidekiq nodes to the Postgres whitelist: Remember to add the Sidekiq nodes to the PostgreSQL whitelist:
```ruby ```ruby
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32 10.10.1.31/32 10.10.1.32/32 10.10.1.33/32 10.10.1.38/32) postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.10.1.30/32 10.10.1.31/32 10.10.1.32/32 10.10.1.33/32 10.10.1.38/32)
......
# Web terminals # Web terminals
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/7690) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/7690) in GitLab 8.15.
in GitLab 8.15. Only project maintainers and owners can access web terminals.
NOTE: **Note:**
Only project maintainers and owners can access web terminals.
With the introduction of the [Kubernetes integration](../../user/project/clusters/index.md), With the introduction of the [Kubernetes integration](../../user/project/clusters/index.md),
GitLab gained the ability to store and use credentials for a Kubernetes cluster. GitLab gained the ability to store and use credentials for a Kubernetes cluster.
...@@ -92,8 +94,7 @@ they will receive a `Connection failed` message. ...@@ -92,8 +94,7 @@ they will receive a `Connection failed` message.
## Limiting WebSocket connection time ## Limiting WebSocket connection time
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/8413) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/8413) in GitLab 8.17.
in GitLab 8.17.
Terminal sessions use long-lived connections; by default, these may last Terminal sessions use long-lived connections; by default, these may last
forever. You can configure a maximum session time in the Admin Area of your forever. You can configure a maximum session time in the Admin Area of your
......
...@@ -13,7 +13,7 @@ This guide talks about how to read and use these system log files. ...@@ -13,7 +13,7 @@ This guide talks about how to read and use these system log files.
This file lives in `/var/log/gitlab/gitlab-rails/production_json.log` for This file lives in `/var/log/gitlab/gitlab-rails/production_json.log` for
Omnibus GitLab packages or in `/home/git/gitlab/log/production_json.log` for Omnibus GitLab packages or in `/home/git/gitlab/log/production_json.log` for
installations from source. When GitLab is running in an environment installations from source. When GitLab is running in an environment
other than production, the corresponding logfile is shown here. other than production, the corresponding log file is shown here.
It contains a structured log for Rails controller requests received from It contains a structured log for Rails controller requests received from
GitLab, thanks to [Lograge](https://github.com/roidrage/lograge/). Note that GitLab, thanks to [Lograge](https://github.com/roidrage/lograge/). Note that
...@@ -105,7 +105,7 @@ NOTE: **Note:** Starting with GitLab 12.5, if an error occurs, an ...@@ -105,7 +105,7 @@ NOTE: **Note:** Starting with GitLab 12.5, if an error occurs, an
This file lives in `/var/log/gitlab/gitlab-rails/production.log` for This file lives in `/var/log/gitlab/gitlab-rails/production.log` for
Omnibus GitLab packages or in `/home/git/gitlab/log/production.log` for Omnibus GitLab packages or in `/home/git/gitlab/log/production.log` for
installations from source. (When GitLab is running in an environment installations from source. (When GitLab is running in an environment
other than production, the corresponding logfile is shown here.) other than production, the corresponding log file is shown here.)
It contains information about all performed requests. You can see the It contains information about all performed requests. You can see the
URL and type of request, IP address, and what parts of code were URL and type of request, IP address, and what parts of code were
......
...@@ -39,8 +39,8 @@ Test Connection to ensure the configuration is correct. ...@@ -39,8 +39,8 @@ Test Connection to ensure the configuration is correct.
- **Name**: `InfluxDB` - **Name**: `InfluxDB`
- **Default**: Checked - **Default**: Checked
- **Type**: `InfluxDB 0.9.x` (Even if you're using InfluxDB 0.10.x) - **Type**: `InfluxDB 0.9.x` (Even if you're using InfluxDB 0.10.x)
- **Url**: `https://localhost:8086` (Or the remote URL if you've installed InfluxDB - For the URL, use `https://localhost:8086`, or provide the remote URL if you've installed InfluxDB
on a separate server) on a separate server
- **Access**: `proxy` - **Access**: `proxy`
- **Database**: `gitlab` - **Database**: `gitlab`
- **User**: `admin` (Or the username configured when setting up InfluxDB) - **User**: `admin` (Or the username configured when setting up InfluxDB)
...@@ -52,7 +52,7 @@ Test Connection to ensure the configuration is correct. ...@@ -52,7 +52,7 @@ Test Connection to ensure the configuration is correct.
If you intend to import the GitLab provided Grafana dashboards, you will need to If you intend to import the GitLab provided Grafana dashboards, you will need to
set up the right retention policies and continuous queries. The easiest way of set up the right retention policies and continuous queries. The easiest way of
doing this is by using the [influxdb-management](https://gitlab.com/gitlab-org/influxdb-management) doing this is by using the [InfluxDB Management](https://gitlab.com/gitlab-org/influxdb-management)
repository. repository.
To use this repository you must first clone it: To use this repository you must first clone it:
...@@ -74,7 +74,7 @@ and then editing the `.env` file to contain the correct InfluxDB settings. Once ...@@ -74,7 +74,7 @@ and then editing the `.env` file to contain the correct InfluxDB settings. Once
configured you can simply run `bundle exec rake` and the InfluxDB database will configured you can simply run `bundle exec rake` and the InfluxDB database will
be configured for you. be configured for you.
For more information see the [influxdb-management README](https://gitlab.com/gitlab-org/influxdb-management/blob/master/README.md). For more information see the [InfluxDB Management README](https://gitlab.com/gitlab-org/influxdb-management/blob/master/README.md).
## Import Dashboards ## Import Dashboards
......
...@@ -33,7 +33,7 @@ page was open. Only the first two requests per unique URL are captured. ...@@ -33,7 +33,7 @@ page was open. Only the first two requests per unique URL are captured.
## Request warnings ## Request warnings
For requests exceeding pre-defined limits, a warning icon will be shown For requests exceeding predefined limits, a warning icon will be shown
next to the failing metric, along with an explanation. In this example, next to the failing metric, along with an explanation. In this example,
the Gitaly call duration exceeded the threshold: the Gitaly call duration exceeded the threshold:
......
...@@ -201,7 +201,7 @@ When Puma is used instead of Unicorn, the following metrics are available: ...@@ -201,7 +201,7 @@ When Puma is used instead of Unicorn, the following metrics are available:
| `puma_running_workers` | Gauge | 12.0 | Number of booted workers | | `puma_running_workers` | Gauge | 12.0 | Number of booted workers |
| `puma_stale_workers` | Gauge | 12.0 | Number of old workers | | `puma_stale_workers` | Gauge | 12.0 | Number of old workers |
| `puma_running` | Gauge | 12.0 | Number of running threads | | `puma_running` | Gauge | 12.0 | Number of running threads |
| `puma_queued_connections` | Gauge | 12.0 | Number of connections in that worker's "todo" set waiting for a worker thread | | `puma_queued_connections` | Gauge | 12.0 | Number of connections in that worker's "to do" set waiting for a worker thread |
| `puma_active_connections` | Gauge | 12.0 | Number of threads processing a request | | `puma_active_connections` | Gauge | 12.0 | Number of threads processing a request |
| `puma_pool_capacity` | Gauge | 12.0 | Number of requests the worker is capable of taking right now | | `puma_pool_capacity` | Gauge | 12.0 | Number of requests the worker is capable of taking right now |
| `puma_max_threads` | Gauge | 12.0 | Maximum number of worker threads | | `puma_max_threads` | Gauge | 12.0 | Maximum number of worker threads |
......
...@@ -268,42 +268,42 @@ export Prometheus metrics. ...@@ -268,42 +268,42 @@ export Prometheus metrics.
The node exporter allows you to measure various machine resources, such as The node exporter allows you to measure various machine resources, such as
memory, disk, and CPU utilization. memory, disk, and CPU utilization.
[➔ Read more about the node exporter.](node_exporter.md) [Read more about the node exporter](node_exporter.md).
### Redis exporter ### Redis exporter
The Redis exporter allows you to measure various Redis metrics. The Redis exporter allows you to measure various Redis metrics.
[➔ Read more about the Redis exporter.](redis_exporter.md) [Read more about the Redis exporter](redis_exporter.md).
### Postgres exporter ### PostgreSQL exporter
The Postgres exporter allows you to measure various PostgreSQL metrics. The PostgreSQL exporter allows you to measure various PostgreSQL metrics.
[➔ Read more about the Postgres exporter.](postgres_exporter.md) [Read more about the PostgreSQL exporter](postgres_exporter.md).
### PgBouncer exporter ### PgBouncer exporter
The PgBouncer exporter allows you to measure various PgBouncer metrics. The PgBouncer exporter allows you to measure various PgBouncer metrics.
[➔ Read more about the PgBouncer exporter.](pgbouncer_exporter.md) [Read more about the PgBouncer exporter](pgbouncer_exporter.md).
### Registry exporter ### Registry exporter
The Registry exporter allows you to measure various Registry metrics. The Registry exporter allows you to measure various Registry metrics.
[➔ Read more about the Registry exporter.](registry_exporter.md) [Read more about the Registry exporter](registry_exporter.md).
### GitLab exporter ### GitLab exporter
The GitLab exporter allows you to measure various GitLab metrics, pulled from Redis and the database. The GitLab exporter allows you to measure various GitLab metrics, pulled from Redis and the database.
[➔ Read more about the GitLab exporter.](gitlab_exporter.md) [Read more about the GitLab exporter](gitlab_exporter.md).
## Configuring Prometheus to monitor Kubernetes ## Configuring Prometheus to monitor Kubernetes
> Introduced in GitLab 9.0. > - Introduced in GitLab 9.0.
> Pod monitoring introduced in GitLab 9.4. > - Pod monitoring introduced in GitLab 9.4.
If your GitLab server is running within Kubernetes, Prometheus will collect metrics from the Nodes and [annotated Pods](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config) in the cluster, including performance data on each container. This is particularly helpful if your CI/CD environments run in the same cluster, as you can use the [Prometheus project integration][prometheus integration] to monitor them. If your GitLab server is running within Kubernetes, Prometheus will collect metrics from the Nodes and [annotated Pods](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config) in the cluster, including performance data on each container. This is particularly helpful if your CI/CD environments run in the same cluster, as you can use the [Prometheus project integration][prometheus integration] to monitor them.
......
...@@ -22,8 +22,8 @@ To enable the PgBouncer exporter: ...@@ -22,8 +22,8 @@ To enable the PgBouncer exporter:
Prometheus will now automatically begin collecting performance data from Prometheus will now automatically begin collecting performance data from
the PgBouncer exporter exposed under `localhost:9188`. the PgBouncer exporter exposed under `localhost:9188`.
The PgBouncer exporter will also be enabled by default if the [pgbouncer_role][postgres roles] The PgBouncer exporter will also be enabled by default if the [`pgbouncer_role`][postgres roles]
is enabled. role is enabled.
[← Back to the main Prometheus page](index.md) [← Back to the main Prometheus page](index.md)
......
# Fast lookup of authorized SSH keys in the database # Fast lookup of authorized SSH keys in the database
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/1631) in [GitLab Starter](https://about.gitlab.com/pricing/) 9.3.
> - [Available in](https://gitlab.com/gitlab-org/gitlab/issues/3953) GitLab Community Edition 10.4.
NOTE: **Note:** This document describes a drop-in replacement for the NOTE: **Note:** This document describes a drop-in replacement for the
`authorized_keys` file for normal (non-deploy key) users. Consider `authorized_keys` file for normal (non-deploy key) users. Consider
using [SSH certificates](ssh_certificates.md), they are even faster, using [SSH certificates](ssh_certificates.md), they are even faster,
but are not a drop-in replacement. but are not a drop-in replacement.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/1631) in
> [GitLab Starter](https://about.gitlab.com/pricing/) 9.3.
>
> [Available in](https://gitlab.com/gitlab-org/gitlab/issues/3953) GitLab
> Community Edition 10.4.
Regular SSH operations become slow as the number of users grows because OpenSSH Regular SSH operations become slow as the number of users grows because OpenSSH
searches for a key to authorize a user via a linear search. In the worst case, searches for a key to authorize a user via a linear search. In the worst case,
such as when the user is not authorized to access GitLab, OpenSSH will scan the such as when the user is not authorized to access GitLab, OpenSSH will scan the
...@@ -101,7 +98,7 @@ This is a brief overview. Please refer to the above instructions for more contex ...@@ -101,7 +98,7 @@ This is a brief overview. Please refer to the above instructions for more contex
1. [Rebuild the `authorized_keys` file](../raketasks/maintenance.md#rebuild-authorized_keys-file) 1. [Rebuild the `authorized_keys` file](../raketasks/maintenance.md#rebuild-authorized_keys-file)
1. Enable writes to the `authorized_keys` file in Application Settings 1. Enable writes to the `authorized_keys` file in Application Settings
1. Remove the `AuthorizedKeysCommand` lines from `/etc/ssh/sshd_config` or from `/assets/sshd_config` if you are using Omnibus Docker. 1. Remove the `AuthorizedKeysCommand` lines from `/etc/ssh/sshd_config` or from `/assets/sshd_config` if you are using Omnibus Docker.
1. Reload sshd: `sudo service sshd reload` 1. Reload `sshd`: `sudo service sshd reload`
1. Remove the `/opt/gitlab-shell/authorized_keys` file 1. Remove the `/opt/gitlab-shell/authorized_keys` file
## Compiling a custom version of OpenSSH for CentOS 6 ## Compiling a custom version of OpenSSH for CentOS 6
...@@ -187,7 +184,7 @@ the database. The following instructions can be used to build OpenSSH 7.5: ...@@ -187,7 +184,7 @@ the database. The following instructions can be used to build OpenSSH 7.5:
You should see a line that reads: "debug1: Remote protocol version 2.0, remote software version OpenSSH_7.5" You should see a line that reads: "debug1: Remote protocol version 2.0, remote software version OpenSSH_7.5"
If not, you may need to restart sshd (e.g. `systemctl restart sshd.service`). If not, you may need to restart `sshd` (e.g. `systemctl restart sshd.service`).
1. *IMPORTANT!* Open a new SSH session to your server before exiting to make 1. *IMPORTANT!* Open a new SSH session to your server before exiting to make
sure everything is working! If you need to downgrade, simple install the sure everything is working! If you need to downgrade, simple install the
......
...@@ -31,7 +31,7 @@ If you want to see progress, replace `-xf` with `-xvf`. ...@@ -31,7 +31,7 @@ If you want to see progress, replace `-xf` with `-xvf`.
### Tar pipe to another server ### Tar pipe to another server
You can also use a tar pipe to copy data to another server. If your You can also use a tar pipe to copy data to another server. If your
`git` user has SSH access to the newserver as `git@newserver`, you `git` user has SSH access to the new server as `git@newserver`, you
can pipe the data through SSH. can pipe the data through SSH.
```shell ```shell
......
...@@ -33,7 +33,7 @@ uploading user SSH keys to GitLab entirely. ...@@ -33,7 +33,7 @@ uploading user SSH keys to GitLab entirely.
How to fully set up SSH certificates is outside the scope of this How to fully set up SSH certificates is outside the scope of this
document. See [OpenSSH's document. See [OpenSSH's
PROTOCOL.certkeys](https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?annotate=HEAD) `PROTOCOL.certkeys`](https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?annotate=HEAD)
for how it works, and e.g. [RedHat's documentation about for how it works, and e.g. [RedHat's documentation about
it](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/sec-using_openssh_certificate_authentication). it](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/sec-using_openssh_certificate_authentication).
......
...@@ -57,7 +57,7 @@ local location or even use object storage. ...@@ -57,7 +57,7 @@ local location or even use object storage.
The packages for Omnibus GitLab installations are stored under The packages for Omnibus GitLab installations are stored under
`/var/opt/gitlab/gitlab-rails/shared/packages/` and for source `/var/opt/gitlab/gitlab-rails/shared/packages/` and for source
installations under `shared/packages/` (relative to the Git homedir). installations under `shared/packages/` (relative to the Git home directory).
To change the local storage path: To change the local storage path:
**Omnibus GitLab installations** **Omnibus GitLab installations**
......
...@@ -38,7 +38,7 @@ which you can set it up: ...@@ -38,7 +38,7 @@ which you can set it up:
the Pages daemon is installed, so you will have to share it via network. the Pages daemon is installed, so you will have to share it via network.
- Run the Pages daemon in the same server as GitLab, listening on the same IP - Run the Pages daemon in the same server as GitLab, listening on the same IP
but on different ports. In that case, you will have to proxy the traffic with but on different ports. In that case, you will have to proxy the traffic with
a loadbalancer. If you choose that route note that you should use TCP load a load balancer. If you choose that route note that you should use TCP load
balancing for HTTPS. If you use TLS-termination (HTTPS-load balancing) the balancing for HTTPS. If you use TLS-termination (HTTPS-load balancing) the
pages will not be able to be served with user provided certificates. For pages will not be able to be served with user provided certificates. For
HTTP it's OK to use HTTP or TCP load balancing. HTTP it's OK to use HTTP or TCP load balancing.
...@@ -256,7 +256,7 @@ GitLab supports [custom domain verification](../../user/project/pages/custom_dom ...@@ -256,7 +256,7 @@ GitLab supports [custom domain verification](../../user/project/pages/custom_dom
When adding a custom domain, users will be required to prove they own it by When adding a custom domain, users will be required to prove they own it by
adding a GitLab-controlled verification code to the DNS records for that domain. adding a GitLab-controlled verification code to the DNS records for that domain.
If your userbase is private or otherwise trusted, you can disable the If your user base is private or otherwise trusted, you can disable the
verification requirement. Navigate to **Admin Area > Settings > Preferences** and verification requirement. Navigate to **Admin Area > Settings > Preferences** and
uncheck **Require users to prove ownership of custom domains** in the **Pages** section. uncheck **Require users to prove ownership of custom domains** in the **Pages** section.
This setting is enabled by default. This setting is enabled by default.
...@@ -358,7 +358,7 @@ For Omnibus, normally this would be fixed by [installing a custom CA in GitLab O ...@@ -358,7 +358,7 @@ For Omnibus, normally this would be fixed by [installing a custom CA in GitLab O
but a [bug](https://gitlab.com/gitlab-org/gitlab/issues/25411) is currently preventing but a [bug](https://gitlab.com/gitlab-org/gitlab/issues/25411) is currently preventing
that method from working. Use the following workaround: that method from working. Use the following workaround:
1. Append your GitLab server TLS/SSL certficate to `/opt/gitlab/embedded/ssl/certs/cacert.pem` where `gitlab-domain-example.com` is your GitLab application URL 1. Append your GitLab server TLS/SSL certificate to `/opt/gitlab/embedded/ssl/certs/cacert.pem` where `gitlab-domain-example.com` is your GitLab application URL
```shell ```shell
printf "\ngitlab-domain-example.com\n===========================\n" | sudo tee --append /opt/gitlab/embedded/ssl/certs/cacert.pem printf "\ngitlab-domain-example.com\n===========================\n" | sudo tee --append /opt/gitlab/embedded/ssl/certs/cacert.pem
...@@ -582,7 +582,7 @@ but commented out to help encourage others to add to it in the future. --> ...@@ -582,7 +582,7 @@ but commented out to help encourage others to add to it in the future. -->
### `open /etc/ssl/ca-bundle.pem: permission denied` ### `open /etc/ssl/ca-bundle.pem: permission denied`
GitLab Pages runs inside a `chroot` jail, usually in a uniquely numbered directory like GitLab Pages runs inside a chroot jail, usually in a uniquely numbered directory like
`/tmp/gitlab-pages-*`. `/tmp/gitlab-pages-*`.
Within the jail, a bundle of trusted certificates is Within the jail, a bundle of trusted certificates is
...@@ -592,7 +592,7 @@ from `/opt/gitlab/embedded/ssl/certs/cacert.pem` ...@@ -592,7 +592,7 @@ from `/opt/gitlab/embedded/ssl/certs/cacert.pem`
as part of starting up Pages. as part of starting up Pages.
If the permissions on the source file are incorrect (they should be `0644`) then If the permissions on the source file are incorrect (they should be `0644`) then
the file inside the `chroot` jail will also be wrong. the file inside the chroot jail will also be wrong.
Pages will log errors in `/var/log/gitlab/gitlab-pages/current` like: Pages will log errors in `/var/log/gitlab/gitlab-pages/current` like:
...@@ -601,7 +601,7 @@ x509: failed to load system roots and no roots provided ...@@ -601,7 +601,7 @@ x509: failed to load system roots and no roots provided
open /etc/ssl/ca-bundle.pem: permission denied open /etc/ssl/ca-bundle.pem: permission denied
``` ```
The use of a `chroot` jail makes this error misleading, as it is not The use of a chroot jail makes this error misleading, as it is not
referring to `/etc/ssl` on the root filesystem. referring to `/etc/ssl` on the root filesystem.
The fix is to correct the source file permissions and restart Pages: The fix is to correct the source file permissions and restart Pages:
......
...@@ -35,7 +35,7 @@ which you can set it up: ...@@ -35,7 +35,7 @@ which you can set it up:
the Pages daemon is installed, so you will have to share it via network. the Pages daemon is installed, so you will have to share it via network.
1. Run the Pages daemon in the same server as GitLab, listening on the same IP 1. Run the Pages daemon in the same server as GitLab, listening on the same IP
but on different ports. In that case, you will have to proxy the traffic with but on different ports. In that case, you will have to proxy the traffic with
a loadbalancer. If you choose that route note that you should use TCP load a load balancer. If you choose that route note that you should use TCP load
balancing for HTTPS. If you use TLS-termination (HTTPS-load balancing) the balancing for HTTPS. If you use TLS-termination (HTTPS-load balancing) the
pages will not be able to be served with user provided certificates. For pages will not be able to be served with user provided certificates. For
HTTP it's OK to use HTTP or TCP load balancing. HTTP it's OK to use HTTP or TCP load balancing.
...@@ -51,7 +51,7 @@ Before proceeding with the Pages configuration, make sure that: ...@@ -51,7 +51,7 @@ Before proceeding with the Pages configuration, make sure that:
this document we assume that to be `example.io`. this document we assume that to be `example.io`.
1. You have configured a **wildcard DNS record** for that domain. 1. You have configured a **wildcard DNS record** for that domain.
1. You have installed the `zip` and `unzip` packages in the same server that 1. You have installed the `zip` and `unzip` packages in the same server that
GitLab is installed since they are needed to compress/uncompress the GitLab is installed since they are needed to compress and decompress the
Pages artifacts. Pages artifacts.
1. (Optional) You have a **wildcard certificate** for the Pages domain if you 1. (Optional) You have a **wildcard certificate** for the Pages domain if you
decide to serve Pages (`*.example.io`) under HTTPS. decide to serve Pages (`*.example.io`) under HTTPS.
...@@ -388,7 +388,7 @@ Each request to view a resource in a private site is authenticated by Pages ...@@ -388,7 +388,7 @@ Each request to view a resource in a private site is authenticated by Pages
using that token. For each request it receives, it makes a request to the GitLab using that token. For each request it receives, it makes a request to the GitLab
API to check that the user is authorized to read that site. API to check that the user is authorized to read that site.
From [GitLab 12.8](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/3689) onwards, From [GitLab 12.8](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/3689) onward,
Access Control parameters for Pages are set in a configuration file, which Access Control parameters for Pages are set in a configuration file, which
by convention is named `gitlab-pages-config`. The configuration file is passed to by convention is named `gitlab-pages-config`. The configuration file is passed to
pages using the `-config flag` or CONFIG environment variable. pages using the `-config flag` or CONFIG environment variable.
......
...@@ -129,7 +129,7 @@ Done! ...@@ -129,7 +129,7 @@ Done!
## LDAP Check ## LDAP Check
The LDAP check Rake task will test the bind_dn and password credentials The LDAP check Rake task will test the bind DN and password credentials
(if configured) and will list a sample of LDAP users. This task is also (if configured) and will list a sample of LDAP users. This task is also
executed as part of the `gitlab:check` task, but can run independently. executed as part of the `gitlab:check` task, but can run independently.
See [LDAP Rake Tasks - LDAP Check](ldap.md#check) for details. See [LDAP Rake Tasks - LDAP Check](ldap.md#check) for details.
......
...@@ -184,13 +184,13 @@ Courier, which we will install later to add IMAP authentication, requires mailbo ...@@ -184,13 +184,13 @@ Courier, which we will install later to add IMAP authentication, requires mailbo
imapd start imapd start
``` ```
1. The courier-authdaemon isn't started after installation. Without it, imap authentication will fail: 1. The `courier-authdaemon` isn't started after installation. Without it, IMAP authentication will fail:
```shell ```shell
sudo service courier-authdaemon start sudo service courier-authdaemon start
``` ```
You can also configure courier-authdaemon to start on boot: You can also configure `courier-authdaemon` to start on boot:
```shell ```shell
sudo systemctl enable courier-authdaemon sudo systemctl enable courier-authdaemon
......
# Repository checks # Repository checks
> [Introduced][ce-3232] in GitLab 8.7. It is OFF by default because it still > [Introduced][ce-3232] in GitLab 8.7.
causes too many false alarms.
Git has a built-in mechanism, [`git fsck`][git-fsck], to verify the Git has a built-in mechanism, [`git fsck`][git-fsck], to verify the
integrity of all data committed to a repository. GitLab administrators integrity of all data committed to a repository. GitLab administrators
...@@ -11,6 +10,9 @@ before the check result is visible on the project admin page. If the ...@@ -11,6 +10,9 @@ before the check result is visible on the project admin page. If the
checks failed you can see their output on the admin log page under checks failed you can see their output on the admin log page under
'repocheck.log'. 'repocheck.log'.
NOTE: **Note:**
It is OFF by default because it still causes too many false alarms.
## Periodic checks ## Periodic checks
When enabled, GitLab periodically runs a repository check on all project When enabled, GitLab periodically runs a repository check on all project
......
...@@ -37,7 +37,7 @@ sudo gitlab-ctl restart ...@@ -37,7 +37,7 @@ sudo gitlab-ctl restart
The output should be similar to this: The output should be similar to this:
``` ```plaintext
ok: run: gitlab-workhorse: (pid 11291) 1s ok: run: gitlab-workhorse: (pid 11291) 1s
ok: run: logrotate: (pid 11299) 0s ok: run: logrotate: (pid 11299) 0s
ok: run: mailroom: (pid 11306) 0s ok: run: mailroom: (pid 11306) 0s
...@@ -103,13 +103,13 @@ depend on those files. ...@@ -103,13 +103,13 @@ depend on those files.
If you have followed the official installation guide to [install GitLab from If you have followed the official installation guide to [install GitLab from
source][install], run the following command to restart GitLab: source][install], run the following command to restart GitLab:
``` ```shell
sudo service gitlab restart sudo service gitlab restart
``` ```
The output should be similar to this: The output should be similar to this:
``` ```plaintext
Shutting down GitLab Unicorn Shutting down GitLab Unicorn
Shutting down GitLab Sidekiq Shutting down GitLab Sidekiq
Shutting down GitLab Workhorse Shutting down GitLab Workhorse
......
...@@ -118,7 +118,7 @@ downtime. Otherwise skip to the next section. ...@@ -118,7 +118,7 @@ downtime. Otherwise skip to the next section.
``` ```
1. This forces the process to generate a Ruby backtrace. Check 1. This forces the process to generate a Ruby backtrace. Check
`/var/log/gitlab/unicorn/unicorn_stderr.log` for the backtace. For example, you may see: `/var/log/gitlab/unicorn/unicorn_stderr.log` for the backtrace. For example, you may see:
```plaintext ```plaintext
from /opt/gitlab/embedded/service/gitlab-rails/lib/gitlab/metrics/sampler.rb:33:in `block in start' from /opt/gitlab/embedded/service/gitlab-rails/lib/gitlab/metrics/sampler.rb:33:in `block in start'
......
...@@ -19,7 +19,7 @@ running on. ...@@ -19,7 +19,7 @@ running on.
## strace-parser ## strace-parser
[strace-parser](https://gitlab.com/wchandler/strace-parser) is a small tool to analyze [strace-parser](https://gitlab.com/wchandler/strace-parser) is a small tool to analyze
and summarize raw strace data. and summarize raw `strace` data.
## Pritaly ## Pritaly
......
...@@ -8,7 +8,7 @@ Troubleshooting Elasticsearch requires: ...@@ -8,7 +8,7 @@ Troubleshooting Elasticsearch requires:
## Common terminology ## Common terminology
- **Lucene**: A full-text search library written in Java. - **Lucene**: A full-text search library written in Java.
- **Near Realtime (NRT)**: Refers to the slight latency from the time to index a - **Near real time (NRT)**: Refers to the slight latency from the time to index a
document to the time when it becomes searchable. document to the time when it becomes searchable.
- **Cluster**: A collection of one or more nodes that work together to hold all - **Cluster**: A collection of one or more nodes that work together to hold all
the data, providing indexing and search capabilities. the data, providing indexing and search capabilities.
...@@ -271,7 +271,7 @@ Generally speaking, ensure: ...@@ -271,7 +271,7 @@ Generally speaking, ensure:
- The Elasticsearch server have enough RAM and CPU cores. - The Elasticsearch server have enough RAM and CPU cores.
- That sharding **is** being used. - That sharding **is** being used.
Going into some more detail here, if Elasticsearch is running on the same server as GitLab, resource contention is **very** likely to occur. Ideally, Elasticsearch, which requires ample resources, should be running on its own server (maybe coupled with logstash and kibana). Going into some more detail here, if Elasticsearch is running on the same server as GitLab, resource contention is **very** likely to occur. Ideally, Elasticsearch, which requires ample resources, should be running on its own server (maybe coupled with Logstash and Kibana).
When it comes to Elasticsearch, RAM is the key resource. Elasticsearch themselves recommend: When it comes to Elasticsearch, RAM is the key resource. Elasticsearch themselves recommend:
......
...@@ -162,7 +162,7 @@ and they will assist you with any issues you are having. ...@@ -162,7 +162,7 @@ and they will assist you with any issues you are having.
kubectl get secret <secret-name> -ojsonpath={.data.password} | base64 --decode ; echo kubectl get secret <secret-name> -ojsonpath={.data.password} | base64 --decode ; echo
``` ```
- How to connect to a GitLab Postgres database: - How to connect to a GitLab PostgreSQL database:
```shell ```shell
kubectl exec -it <task-runner-pod-name> -- /srv/gitlab/bin/rails dbconsole -p kubectl exec -it <task-runner-pod-name> -- /srv/gitlab/bin/rails dbconsole -p
......
...@@ -25,7 +25,7 @@ the `SIDEKIQ_LOG_ARGUMENTS` [environment variable](https://docs.gitlab.com/omnib ...@@ -25,7 +25,7 @@ the `SIDEKIQ_LOG_ARGUMENTS` [environment variable](https://docs.gitlab.com/omnib
Example: Example:
``` ```ruby
gitlab_rails['env'] = {"SIDEKIQ_LOG_ARGUMENTS" => "1"} gitlab_rails['env'] = {"SIDEKIQ_LOG_ARGUMENTS" => "1"}
``` ```
...@@ -43,7 +43,7 @@ single argument containing the string `"..."`. ...@@ -43,7 +43,7 @@ single argument containing the string `"..."`.
Send the Sidekiq process ID the `TTIN` signal and it will output thread Send the Sidekiq process ID the `TTIN` signal and it will output thread
backtraces in the log file. backtraces in the log file.
``` ```shell
kill -TTIN <sidekiq_pid> kill -TTIN <sidekiq_pid>
``` ```
...@@ -95,7 +95,7 @@ sudo perf record -p <sidekiq_pid> ...@@ -95,7 +95,7 @@ sudo perf record -p <sidekiq_pid>
Let this run for 30-60 seconds and then press Ctrl-C. Then view the perf report: Let this run for 30-60 seconds and then press Ctrl-C. Then view the perf report:
```shell ```shell
sudo perf report $ sudo perf report
# Sample output # Sample output
Samples: 348K of event 'cycles', Event count (approx.): 280908431073 Samples: 348K of event 'cycles', Event count (approx.): 280908431073
......
...@@ -181,8 +181,7 @@ Impersonation tokens are used exactly like regular personal access tokens, and c ...@@ -181,8 +181,7 @@ Impersonation tokens are used exactly like regular personal access tokens, and c
#### Disable impersonation #### Disable impersonation
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/40385) in GitLab > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/40385) in GitLab 11.6.
11.6.
By default, impersonation is enabled. To disable impersonation: By default, impersonation is enabled. To disable impersonation:
......
# Group badges API # Group badges API
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/17082) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/17082) in GitLab 10.6.
in GitLab 10.6.
## Placeholder tokens ## Placeholder tokens
......
# Group clusters API # Group clusters API
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/30213) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/30213) in GitLab 12.1.
in GitLab 12.1.
NOTE: **Note:** NOTE: **Note:**
User will need at least maintainer access for the group to use these endpoints. User will need at least maintainer access for the group to use these endpoints.
......
# Project badges API # Project badges API
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/17082) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/17082) in GitLab 10.6.
in GitLab 10.6.
## Placeholder tokens ## Placeholder tokens
......
...@@ -350,8 +350,7 @@ Alias support for the Kubernetes executor was [introduced](https://gitlab.com/gi ...@@ -350,8 +350,7 @@ Alias support for the Kubernetes executor was [introduced](https://gitlab.com/gi
### Starting multiple services from the same image ### Starting multiple services from the same image
> Introduced in GitLab and GitLab Runner 9.4. Read more about the [extended > Introduced in GitLab and GitLab Runner 9.4. Read more about the [extended configuration options](#extended-docker-configuration-options).
configuration options](#extended-docker-configuration-options).
Before the new extended Docker configuration options, the following configuration Before the new extended Docker configuration options, the following configuration
would not work properly: would not work properly:
...@@ -384,8 +383,7 @@ in `.gitlab-ci.yml` file. ...@@ -384,8 +383,7 @@ in `.gitlab-ci.yml` file.
### Setting a command for the service ### Setting a command for the service
> Introduced in GitLab and GitLab Runner 9.4. Read more about the [extended > Introduced in GitLab and GitLab Runner 9.4. Read more about the [extended configuration options](#extended-docker-configuration-options).
configuration options](#extended-docker-configuration-options).
Let's assume you have a `super/sql:latest` image with some SQL database Let's assume you have a `super/sql:latest` image with some SQL database
inside it and you would like to use it as a service for your job. Let's also inside it and you would like to use it as a service for your job. Let's also
...@@ -426,8 +424,7 @@ As you can see, the syntax of `command` is similar to [Dockerfile's `CMD`][cmd]. ...@@ -426,8 +424,7 @@ As you can see, the syntax of `command` is similar to [Dockerfile's `CMD`][cmd].
### Overriding the entrypoint of an image ### Overriding the entrypoint of an image
> Introduced in GitLab and GitLab Runner 9.4. Read more about the [extended > Introduced in GitLab and GitLab Runner 9.4. Read more about the [extended configuration options](#extended-docker-configuration-options).
configuration options](#extended-docker-configuration-options).
Before showing the available entrypoint override methods, let's describe shortly Before showing the available entrypoint override methods, let's describe shortly
how the Runner starts and uses a Docker image for the containers used in the how the Runner starts and uses a Docker image for the containers used in the
......
...@@ -4,8 +4,7 @@ type: howto ...@@ -4,8 +4,7 @@ type: howto
# Building images with kaniko and GitLab CI/CD # Building images with kaniko and GitLab CI/CD
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/45512) in GitLab 11.2. > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/45512) in GitLab 11.2. Requires GitLab Runner 11.2 and above.
Requires GitLab Runner 11.2 and above.
[kaniko](https://github.com/GoogleContainerTools/kaniko) is a tool to build [kaniko](https://github.com/GoogleContainerTools/kaniko) is a tool to build
container images from a Dockerfile, inside a container or Kubernetes cluster. container images from a Dockerfile, inside a container or Kubernetes cluster.
......
...@@ -77,6 +77,6 @@ gitlab-runner register \ ...@@ -77,6 +77,6 @@ gitlab-runner register \
--docker-services latest --docker-services latest
``` ```
With the command above, you create a Runner that uses the [ruby:2.6](https://hub.docker.com/_/ruby) image and uses a [postgres](https://hub.docker.com/_/postgres) database. With the command above, you create a Runner that uses the [ruby:2.6](https://hub.docker.com/_/ruby) image and uses a [PostgreSQL](https://hub.docker.com/_/postgres) database.
To access the PostgreSQL database, connect to `host: postgres` as user `postgres` with no password. To access the PostgreSQL database, connect to `host: postgres` as user `postgres` with no password.
...@@ -4,8 +4,7 @@ type: reference ...@@ -4,8 +4,7 @@ type: reference
# JUnit test reports # JUnit test reports
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/45318) in GitLab 11.2. > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/45318) in GitLab 11.2. Requires GitLab Runner 11.2 and above.
Requires GitLab Runner 11.2 and above.
## Overview ## Overview
......
...@@ -5,8 +5,8 @@ last_update: 2019-07-03 ...@@ -5,8 +5,8 @@ last_update: 2019-07-03
# Merge Trains **(PREMIUM)** # Merge Trains **(PREMIUM)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/9186) in [GitLab Premium](https://about.gitlab.com/pricing/) 12.0. > - [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/9186) in [GitLab Premium](https://about.gitlab.com/pricing/) 12.0.
> [Squash and merge](../../../../user/project/merge_requests/squash_and_merge.md) support [introduced](https://gitlab.com/gitlab-org/gitlab/issues/13001) in [GitLab Premium](https://about.gitlab.com/pricing/) 12.6. > - [Squash and merge](../../../../user/project/merge_requests/squash_and_merge.md) support [introduced](https://gitlab.com/gitlab-org/gitlab/issues/13001) in [GitLab Premium](https://about.gitlab.com/pricing/) 12.6.
[Pipelines for merged results](../index.md#pipelines-for-merged-results-premium) introduces [Pipelines for merged results](../index.md#pipelines-for-merged-results-premium) introduces
running a build on the result of the merged code prior to merging, as a way to keep master green. running a build on the result of the merged code prior to merging, as a way to keep master green.
......
...@@ -4,8 +4,7 @@ type: reference ...@@ -4,8 +4,7 @@ type: reference
# Metrics Reports **(PREMIUM)** # Metrics Reports **(PREMIUM)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/9788) in [GitLab Premium](https://about.gitlab.com/pricing/) 11.10. > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/9788) in [GitLab Premium](https://about.gitlab.com/pricing/) 11.10. Requires GitLab Runner 11.10 and above.
Requires GitLab Runner 11.10 and above.
## Overview ## Overview
......
...@@ -110,8 +110,7 @@ For example: ...@@ -110,8 +110,7 @@ For example:
### Expanding and collapsing job log sections ### Expanding and collapsing job log sections
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/14664) in GitLab > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/14664) in GitLab 12.0.
> 12.0.
Job logs are divided into sections that can be collapsed or expanded. Each section will display Job logs are divided into sections that can be collapsed or expanded. Each section will display
the duration. the duration.
......
...@@ -2218,8 +2218,7 @@ job: ...@@ -2218,8 +2218,7 @@ job:
#### `artifacts:reports` #### `artifacts:reports`
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/20390) in > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/20390) in GitLab 11.2. Requires GitLab Runner 11.2 and above.
GitLab 11.2. Requires GitLab Runner 11.2 and above.
The `reports` keyword is used for collecting test reports, code quality reports, and security reports from jobs. The `reports` keyword is used for collecting test reports, code quality reports, and security reports from jobs.
It also exposes these reports in GitLab's UI (merge requests, pipeline views, and security dashboards). It also exposes these reports in GitLab's UI (merge requests, pipeline views, and security dashboards).
...@@ -2235,8 +2234,7 @@ If you also want the ability to browse the report output files, include the ...@@ -2235,8 +2234,7 @@ If you also want the ability to browse the report output files, include the
##### `artifacts:reports:junit` ##### `artifacts:reports:junit`
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/20390) in > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/20390) in GitLab 11.2. Requires GitLab Runner 11.2 and above.
GitLab 11.2. Requires GitLab Runner 11.2 and above.
The `junit` report collects [JUnit XML files](https://www.ibm.com/support/knowledgecenter/en/SSQ2R2_14.1.0/com.ibm.rsar.analysis.codereview.cobol.doc/topics/cac_useresults_junit.html) The `junit` report collects [JUnit XML files](https://www.ibm.com/support/knowledgecenter/en/SSQ2R2_14.1.0/com.ibm.rsar.analysis.codereview.cobol.doc/topics/cac_useresults_junit.html)
as artifacts. Although JUnit was originally developed in Java, there are many as artifacts. Although JUnit was originally developed in Java, there are many
...@@ -2288,8 +2286,7 @@ There are a couple of limitations on top of the [original dotenv rules](https:// ...@@ -2288,8 +2286,7 @@ There are a couple of limitations on top of the [original dotenv rules](https://
##### `artifacts:reports:cobertura` ##### `artifacts:reports:cobertura`
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/3708) in GitLab 12.9. > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/3708) in GitLab 12.9. Requires [GitLab Runner](https://docs.gitlab.com/runner/) 11.5 and above.
> Requires [GitLab Runner](https://docs.gitlab.com/runner/) 11.5 and above.
The `cobertura` report collects [Cobertura coverage XML files](../../user/project/merge_requests/test_coverage_visualization.md). The `cobertura` report collects [Cobertura coverage XML files](../../user/project/merge_requests/test_coverage_visualization.md).
The collected Cobertura coverage reports will be uploaded to GitLab as an artifact The collected Cobertura coverage reports will be uploaded to GitLab as an artifact
...@@ -3620,9 +3617,11 @@ Learn more about [variables and their priority][variables]. ...@@ -3620,9 +3617,11 @@ Learn more about [variables and their priority][variables].
#### Git strategy #### Git strategy
> Introduced in GitLab 8.9 as an experimental feature. May change or be removed > - Introduced in GitLab 8.9 as an experimental feature.
> completely in future releases. `GIT_STRATEGY=none` requires GitLab Runner > - `GIT_STRATEGY=none` requires GitLab Runner v1.7+.
> v1.7+.
CAUTION: **Caution:**
May change or be removed completely in future releases.
You can set the `GIT_STRATEGY` used for getting recent application code, either You can set the `GIT_STRATEGY` used for getting recent application code, either
globally or per-job in the [`variables`](#variables) section. If left globally or per-job in the [`variables`](#variables) section. If left
...@@ -3783,8 +3782,10 @@ You can set them globally or per-job in the [`variables`](#variables) section. ...@@ -3783,8 +3782,10 @@ You can set them globally or per-job in the [`variables`](#variables) section.
#### Shallow cloning #### Shallow cloning
> Introduced in GitLab 8.9 as an experimental feature. May change in future > Introduced in GitLab 8.9 as an experimental feature.
releases or be removed completely.
CAUTION: **Caution:**
May change in future releases or be removed completely.
You can specify the depth of fetching and cloning using `GIT_DEPTH`. This allows You can specify the depth of fetching and cloning using `GIT_DEPTH`. This allows
shallow cloning of the repository which can significantly speed up cloning for shallow cloning of the repository which can significantly speed up cloning for
......
...@@ -144,7 +144,7 @@ Component statuses are linked to configuration documentation for each component. ...@@ -144,7 +144,7 @@ Component statuses are linked to configuration documentation for each component.
| [GitLab self-monitoring: Sentry](#sentry) | Track errors generated by the GitLab instance | [][sentry-omnibus] | [][sentry-charts] | [][sentry-charts] | [](https://about.gitlab.com/handbook/support/workflows/500_errors.html#searching-sentry) | [][gitlab-yml] | [][gitlab-yml] | CE & EE | | [GitLab self-monitoring: Sentry](#sentry) | Track errors generated by the GitLab instance | [][sentry-omnibus] | [][sentry-charts] | [][sentry-charts] | [](https://about.gitlab.com/handbook/support/workflows/500_errors.html#searching-sentry) | [][gitlab-yml] | [][gitlab-yml] | CE & EE |
| [GitLab self-monitoring: Jaeger](#jaeger) | View traces generated by the GitLab instance | [][jaeger-omnibus] | [][jaeger-charts] | [][jaeger-charts] | [](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/4104) | [][jaeger-source] | [][jaeger-gdk] | CE & EE | | [GitLab self-monitoring: Jaeger](#jaeger) | View traces generated by the GitLab instance | [][jaeger-omnibus] | [][jaeger-charts] | [][jaeger-charts] | [](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/4104) | [][jaeger-source] | [][jaeger-gdk] | CE & EE |
| [Redis Exporter](#redis-exporter) | Prometheus endpoint with Redis metrics | [][redis-exporter-omnibus] | [][redis-exporter-charts] | [][redis-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE | | [Redis Exporter](#redis-exporter) | Prometheus endpoint with Redis metrics | [][redis-exporter-omnibus] | [][redis-exporter-charts] | [][redis-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE |
| [Postgres Exporter](#postgres-exporter) | Prometheus endpoint with PostgreSQL metrics | [][postgres-exporter-omnibus] | [][postgres-exporter-charts] | [][postgres-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE | | [PostgreSQL Exporter](#postgresql-exporter) | Prometheus endpoint with PostgreSQL metrics | [][postgres-exporter-omnibus] | [][postgres-exporter-charts] | [][postgres-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE |
| [PgBouncer Exporter](#pgbouncer-exporter) | Prometheus endpoint with PgBouncer metrics | [][pgbouncer-exporter-omnibus] | [][pgbouncer-exporter-charts] | [][pgbouncer-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE | | [PgBouncer Exporter](#pgbouncer-exporter) | Prometheus endpoint with PgBouncer metrics | [][pgbouncer-exporter-omnibus] | [][pgbouncer-exporter-charts] | [][pgbouncer-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE |
| [GitLab Exporter](#gitlab-exporter) | Generates a variety of GitLab metrics | [][gitlab-exporter-omnibus] | [][gitlab-exporter-charts] | [][gitlab-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE | | [GitLab Exporter](#gitlab-exporter) | Generates a variety of GitLab metrics | [][gitlab-exporter-omnibus] | [][gitlab-exporter-charts] | [][gitlab-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE |
| [Node Exporter](#node-exporter) | Prometheus endpoint with system metrics | [][node-exporter-omnibus] | [N/A][node-exporter-charts] | [N/A][node-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE | | [Node Exporter](#node-exporter) | Prometheus endpoint with system metrics | [][node-exporter-omnibus] | [N/A][node-exporter-charts] | [N/A][node-exporter-charts] | [](https://about.gitlab.com/handbook/engineering/monitoring/) | ❌ | ❌ | CE & EE |
...@@ -366,14 +366,14 @@ Prometheus exporter for PgBouncer. Exports metrics at 9127/metrics. ...@@ -366,14 +366,14 @@ Prometheus exporter for PgBouncer. Exports metrics at 9127/metrics.
GitLab packages the popular Database to provide storage for Application meta data and user information. GitLab packages the popular Database to provide storage for Application meta data and user information.
#### Postgres Exporter #### PostgreSQL Exporter
- [Project page](https://github.com/wrouesnel/postgres_exporter/blob/master/README.md) - [Project page](https://github.com/wrouesnel/postgres_exporter/blob/master/README.md)
- Configuration: [Omnibus][postgres-exporter-omnibus], [Charts][postgres-exporter-charts] - Configuration: [Omnibus][postgres-exporter-omnibus], [Charts][postgres-exporter-charts]
- Layer: Monitoring - Layer: Monitoring
- Process: `postgres-exporter` - Process: `postgres-exporter`
[Postgres-exporter](https://github.com/wrouesnel/postgres_exporter) is the community provided Prometheus exporter that will deliver data about Postgres to Prometheus for use in Grafana Dashboards. [`postgres_exporter`](https://github.com/wrouesnel/postgres_exporter) is the community provided Prometheus exporter that will deliver data about PostgreSQL to Prometheus for use in Grafana Dashboards.
#### Prometheus #### Prometheus
...@@ -486,7 +486,7 @@ When making a request to an HTTP Endpoint (think `/users/sign_in`) the request w ...@@ -486,7 +486,7 @@ When making a request to an HTTP Endpoint (think `/users/sign_in`) the request w
- NGINX - Acts as our first line reverse proxy. - NGINX - Acts as our first line reverse proxy.
- GitLab Workhorse - This determines if it needs to go to the Rails application or somewhere else to reduce load on Unicorn. - GitLab Workhorse - This determines if it needs to go to the Rails application or somewhere else to reduce load on Unicorn.
- Unicorn - Since this is a web request, and it needs to access the application it will go to Unicorn. - Unicorn - Since this is a web request, and it needs to access the application it will go to Unicorn.
- Postgres/Gitaly/Redis - Depending on the type of request, it may hit these services to store or retrieve data. - PostgreSQL/Gitaly/Redis - Depending on the type of request, it may hit these services to store or retrieve data.
### GitLab Git Request Cycle ### GitLab Git Request Cycle
......
...@@ -26,7 +26,7 @@ The diffs fetching process _limits_ single file diff sizes and the overall size ...@@ -26,7 +26,7 @@ The diffs fetching process _limits_ single file diff sizes and the overall size
then persisted on `merge_request_diff_files` table. then persisted on `merge_request_diff_files` table.
Even though diffs larger than 10% of the value of `ApplicationSettings#diff_max_patch_bytes` are collapsed, Even though diffs larger than 10% of the value of `ApplicationSettings#diff_max_patch_bytes` are collapsed,
we still keep them on Postgres. However, diff files larger than defined _safety limits_ we still keep them on PostgreSQL. However, diff files larger than defined _safety limits_
(see the [Diff limits section](#diff-limits)) are _not_ persisted in the database. (see the [Diff limits section](#diff-limits)) are _not_ persisted in the database.
In order to present diffs information on the Merge Request diffs page, we: In order to present diffs information on the Merge Request diffs page, we:
......
...@@ -1132,26 +1132,28 @@ a helpful link back to how the feature was developed. ...@@ -1132,26 +1132,28 @@ a helpful link back to how the feature was developed.
- For features that need to declare the GitLab version that the feature was introduced. Text similar - For features that need to declare the GitLab version that the feature was introduced. Text similar
to the following should be added immediately below the heading as a blockquote: to the following should be added immediately below the heading as a blockquote:
- `> Introduced in GitLab 11.3.`.
```md
> Introduced in GitLab 11.3.
```
- Whenever possible, version text should have a link to the issue, merge request, or epic that introduced the feature. - Whenever possible, version text should have a link to the issue, merge request, or epic that introduced the feature.
An issue is preferred over a merge request, and a merge request is preferred over an epic. For example: An issue is preferred over a merge request, and a merge request is preferred over an epic. For example:
- `> [Introduced](<link-to-issue>) in GitLab 11.3.`.
```md
> [Introduced](<link-to-issue>) in GitLab 11.3.
```
- If the feature is only available in GitLab Enterprise Edition, mention - If the feature is only available in GitLab Enterprise Edition, mention
the [paid tier](https://about.gitlab.com/handbook/marketing/product-marketing/#tiers) the [paid tier](https://about.gitlab.com/handbook/marketing/product-marketing/#tiers)
the feature is available in: the feature is available in:
- `> [Introduced](<link-to-issue>) in [GitLab Starter](https://about.gitlab.com/pricing/) 11.3.`.
- If listing information for multiple version as a feature evolves, add the information to a
block-quoted bullet list. For example:
```md ```md
> [Introduced](<link-to-issue>) in [GitLab Starter](https://about.gitlab.com/pricing/) 11.3. > - [Introduced](<link-to-issue>) in GitLab 11.3.
> - Enabled by default in GitLab 11.4.
``` ```
NOTE: **Note:**
Version text must be on its own line and surounded by blank lines to render correctly.
### Importance of referencing GitLab versions and tiers ### Importance of referencing GitLab versions and tiers
Mentioning GitLab versions and tiers is important to all users and contributors Mentioning GitLab versions and tiers is important to all users and contributors
......
...@@ -80,7 +80,7 @@ it did not improve query performance. ...@@ -80,7 +80,7 @@ it did not improve query performance.
## Attempt B: Denormalize using an array column ## Attempt B: Denormalize using an array column
Having [removed MySQL support in GitLab 12.1](https://about.gitlab.com/blog/2019/06/27/removing-mysql-support/), Having [removed MySQL support in GitLab 12.1](https://about.gitlab.com/blog/2019/06/27/removing-mysql-support/),
using [Postgres's arrays](https://www.postgresql.org/docs/9.6/arrays.html) became more using [PostgreSQL's arrays](https://www.postgresql.org/docs/9.6/arrays.html) became more
tractable as we didn't have to support two databases. We discussed denormalizing tractable as we didn't have to support two databases. We discussed denormalizing
the `label_links` table for querying in the `label_links` table for querying in
[issue #49651](https://gitlab.com/gitlab-org/gitlab-foss/issues/49651), [issue #49651](https://gitlab.com/gitlab-org/gitlab-foss/issues/49651),
......
...@@ -205,7 +205,7 @@ Using [`ReactiveCaching`](utilities.md#reactivecaching) is one of the best solut ...@@ -205,7 +205,7 @@ Using [`ReactiveCaching`](utilities.md#reactivecaching) is one of the best solut
**Summary:** You should avoid accessing to external services like Gitaly during database **Summary:** You should avoid accessing to external services like Gitaly during database
transactions, otherwise it leads to severe contention problems transactions, otherwise it leads to severe contention problems
as an open transaction basically blocks the release of a Postgres backend connection. as an open transaction basically blocks the release of a PostgreSQL backend connection.
For keeping transaction as minimal as possible, please consider using `AfterCommitQueue` For keeping transaction as minimal as possible, please consider using `AfterCommitQueue`
module or `after_commit` AR hook. module or `after_commit` AR hook.
......
...@@ -181,7 +181,7 @@ execution latency requirements (but also has lower scheduling targets). ...@@ -181,7 +181,7 @@ execution latency requirements (but also has lower scheduling targets).
## Jobs with External Dependencies ## Jobs with External Dependencies
Most background jobs in the GitLab application communicate with other GitLab Most background jobs in the GitLab application communicate with other GitLab
services. For example, Postgres, Redis, Gitaly, and Object Storage. These are considered services. For example, PostgreSQL, Redis, Gitaly, and Object Storage. These are considered
to be "internal" dependencies for a job. to be "internal" dependencies for a job.
However, some jobs will be dependent on external services in order to complete However, some jobs will be dependent on external services in order to complete
...@@ -224,7 +224,7 @@ Workers that are constrained by CPU or memory resource limitations should be ...@@ -224,7 +224,7 @@ Workers that are constrained by CPU or memory resource limitations should be
annotated with the `worker_resource_boundary` method. annotated with the `worker_resource_boundary` method.
Most workers tend to spend most of their time blocked, wait on network responses Most workers tend to spend most of their time blocked, wait on network responses
from other services such as Redis, Postgres and Gitaly. Since Sidekiq is a from other services such as Redis, PostgreSQL, and Gitaly. Since Sidekiq is a
multithreaded environment, these jobs can be scheduled with high concurrency. multithreaded environment, these jobs can be scheduled with high concurrency.
Some workers, however, spend large amounts of time _on-CPU_ running logic in Some workers, however, spend large amounts of time _on-CPU_ running logic in
......
...@@ -651,8 +651,8 @@ different queries. The only _rule_ is that you _must always measure_ your query ...@@ -651,8 +651,8 @@ different queries. The only _rule_ is that you _must always measure_ your query
(preferably using a production-like database) using `EXPLAIN (ANALYZE, BUFFERS)` (preferably using a production-like database) using `EXPLAIN (ANALYZE, BUFFERS)`
and related tools such as: and related tools such as:
- <https://explain.depesz.com/> - [`explain.depesz.com`](https://explain.depesz.com/).
- <http://tatiyants.com/postgres-query-plan-visualization/> - [Pev](http://tatiyants.com/postgres-query-plan-visualization/).
## Producing query plans ## Producing query plans
...@@ -707,7 +707,13 @@ For more information about the available options, run: ...@@ -707,7 +707,13 @@ For more information about the available options, run:
### `#database-lab` ### `#database-lab`
Another tool GitLab employees can use is a chatbot powered by [Joe](https://gitlab.com/postgres-ai/joe) which uses [Database Lab](https://gitlab.com/postgres-ai/database-lab) to instantly provide developers with their own clone of the production database. Joe is available in the [`#database-lab`](https://gitlab.slack.com/archives/CLJMDRD8C) channel on Slack. Another tool GitLab employees can use is a chatbot powered by [Joe](https://gitlab.com/postgres-ai/joe)
which uses [Database Lab](https://gitlab.com/postgres-ai/database-lab) to instantly provide developers
with their own clone of the production database.
Joe is available in the
[`#database-lab`](https://gitlab.slack.com/archives/CLJMDRD8C) channel on Slack.
Unlike chatops, it gives you a way to execute DDL statements (like creating indexes and tables) and get query plan not only for `SELECT` but also `UPDATE` and `DELETE`. Unlike chatops, it gives you a way to execute DDL statements (like creating indexes and tables) and get query plan not only for `SELECT` but also `UPDATE` and `DELETE`.
For example, in order to test new index you can do the following: For example, in order to test new index you can do the following:
......
...@@ -21,7 +21,7 @@ The `JenkinsService` and `GithubService` classes are only available in the Enter ...@@ -21,7 +21,7 @@ The `JenkinsService` and `GithubService` classes are only available in the Enter
so if you downgrade to the Community Edition, you'll come across the following so if you downgrade to the Community Edition, you'll come across the following
error: error:
``` ```plaintext
Completed 500 Internal Server Error in 497ms (ActiveRecord: 32.2ms) Completed 500 Internal Server Error in 497ms (ActiveRecord: 32.2ms)
ActionView::Template::Error (The single-table inheritance mechanism failed to locate the subclass: 'JenkinsService'. This ActionView::Template::Error (The single-table inheritance mechanism failed to locate the subclass: 'JenkinsService'. This
...@@ -32,7 +32,7 @@ use another column for that information.) ...@@ -32,7 +32,7 @@ use another column for that information.)
or or
``` ```plaintext
Completed 500 Internal Server Error in 497ms (ActiveRecord: 32.2ms) Completed 500 Internal Server Error in 497ms (ActiveRecord: 32.2ms)
ActionView::Template::Error (The single-table inheritance mechanism failed to locate the subclass: 'GithubService'. This ActionView::Template::Error (The single-table inheritance mechanism failed to locate the subclass: 'GithubService'. This
......
...@@ -43,7 +43,7 @@ The list below is not exhaustive, but contains many of the most commonly used co ...@@ -43,7 +43,7 @@ The list below is not exhaustive, but contains many of the most commonly used co
To create a text file from the command line, for example `README.md`, follow these To create a text file from the command line, for example `README.md`, follow these
steps: steps:
``` ```shell
touch README.md touch README.md
nano README.md nano README.md
#### ADD YOUR INFORMATION #### ADD YOUR INFORMATION
...@@ -59,14 +59,14 @@ It is easy to delete (remove) a file or directory, but be careful: ...@@ -59,14 +59,14 @@ It is easy to delete (remove) a file or directory, but be careful:
DANGER: **Danger:** DANGER: **Danger:**
This will **permanently** delete a file. This will **permanently** delete a file.
``` ```shell
rm NAME-OF-FILE rm NAME-OF-FILE
``` ```
DANGER: **Danger:** DANGER: **Danger:**
This will **permanently** delete a directory and **all** of its contents. This will **permanently** delete a directory and **all** of its contents.
``` ```shell
rm -r NAME-OF-DIRECTORY rm -r NAME-OF-DIRECTORY
``` ```
...@@ -77,14 +77,14 @@ and then execute any of them again, if needed. ...@@ -77,14 +77,14 @@ and then execute any of them again, if needed.
First, list the commands you executed previously: First, list the commands you executed previously:
``` ```shell
history history
``` ```
Then, choose a command from the list and check the number next to the command (`123`, Then, choose a command from the list and check the number next to the command (`123`,
for example) . Execute the same full command with: for example) . Execute the same full command with:
``` ```shell
!123 !123
``` ```
...@@ -95,7 +95,7 @@ need administrator's rights to execute commands that affect the system, or try t ...@@ -95,7 +95,7 @@ need administrator's rights to execute commands that affect the system, or try t
protected data, for example. You can use `sudo` to execute these commands, but you protected data, for example. You can use `sudo` to execute these commands, but you
will likely be asked for an administrator password. will likely be asked for an administrator password.
``` ```shell
sudo RESTRICTED-COMMAND sudo RESTRICTED-COMMAND
``` ```
......
...@@ -87,8 +87,7 @@ You can improve the existing built-in templates or contribute new ones in the ...@@ -87,8 +87,7 @@ You can improve the existing built-in templates or contribute new ones in the
#### Custom project templates **(PREMIUM)** #### Custom project templates **(PREMIUM)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/6860) in > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/6860) in [GitLab Premium](https://about.gitlab.com/pricing/) 11.2.
[GitLab Premium](https://about.gitlab.com/pricing/) 11.2.
Creating new projects based on custom project templates is a convenient option for Creating new projects based on custom project templates is a convenient option for
quickly starting projects. quickly starting projects.
......
...@@ -487,7 +487,7 @@ gitlab=# \q ...@@ -487,7 +487,7 @@ gitlab=# \q
--- ---
### Configuring GitLab to connect with postgres and Redis ### Configuring GitLab to connect with PostgreSQL and Redis
Edit the `gitlab.rb` file at `/etc/gitlab/gitlab.rb` Edit the `gitlab.rb` file at `/etc/gitlab/gitlab.rb`
find the `external_url 'http://gitlab.example.com'` option and change it find the `external_url 'http://gitlab.example.com'` option and change it
......
...@@ -376,7 +376,7 @@ terminal window: ...@@ -376,7 +376,7 @@ terminal window:
Once the update process has completed, you'll see a message like this: Once the update process has completed, you'll see a message like this:
``` ```plaintext
Upgrade complete! If your GitLab server is misbehaving try running Upgrade complete! If your GitLab server is misbehaving try running
sudo gitlab-ctl restart sudo gitlab-ctl restart
......
...@@ -90,14 +90,14 @@ here's how you configure GitLab to be aware of the change: ...@@ -90,14 +90,14 @@ here's how you configure GitLab to be aware of the change:
1. Edit the config file of Omnibus GitLab using your favorite text editor: 1. Edit the config file of Omnibus GitLab using your favorite text editor:
``` ```shell
sudo vim /etc/gitlab/gitlab.rb sudo vim /etc/gitlab/gitlab.rb
``` ```
1. Set the `external_url` value to the domain name you wish GitLab to have 1. Set the `external_url` value to the domain name you wish GitLab to have
**without** `https`: **without** `https`:
``` ```ruby
external_url 'http://gitlab.example.com' external_url 'http://gitlab.example.com'
``` ```
...@@ -105,7 +105,7 @@ here's how you configure GitLab to be aware of the change: ...@@ -105,7 +105,7 @@ here's how you configure GitLab to be aware of the change:
1. Reconfigure GitLab for the changes to take effect: 1. Reconfigure GitLab for the changes to take effect:
``` ```shell
sudo gitlab-ctl reconfigure sudo gitlab-ctl reconfigure
``` ```
......
...@@ -47,7 +47,7 @@ If the highest number stable branch is unclear, check the [GitLab blog](https:// ...@@ -47,7 +47,7 @@ If the highest number stable branch is unclear, check the [GitLab blog](https://
This is the main directory structure you will end up with following the instructions This is the main directory structure you will end up with following the instructions
of this page: of this page:
``` ```plaintext
|-- home |-- home
| |-- git | |-- git
| |-- .ssh | |-- .ssh
...@@ -147,7 +147,7 @@ ldd /usr/local/bin/git | grep pcre2 ...@@ -147,7 +147,7 @@ ldd /usr/local/bin/git | grep pcre2
The output should be similar to: The output should be similar to:
``` ```plaintext
libpcre2-8.so.0 => /usr/lib/libpcre2-8.so.0 (0x00007f08461c3000) libpcre2-8.so.0 => /usr/lib/libpcre2-8.so.0 (0x00007f08461c3000)
``` ```
...@@ -904,7 +904,7 @@ for the changes to take effect. ...@@ -904,7 +904,7 @@ for the changes to take effect.
If you'd like to connect to a Redis server on a non-standard port or a different host, you can configure its connection string via the `config/resque.yml` file. If you'd like to connect to a Redis server on a non-standard port or a different host, you can configure its connection string via the `config/resque.yml` file.
``` ```yaml
# example # example
production: production:
url: redis://redis.example.tld:6379 url: redis://redis.example.tld:6379
...@@ -912,7 +912,7 @@ production: ...@@ -912,7 +912,7 @@ production:
If you want to connect the Redis server via socket, use the "unix:" URL scheme and the path to the Redis socket file in the `config/resque.yml` file. If you want to connect the Redis server via socket, use the "unix:" URL scheme and the path to the Redis socket file in the `config/resque.yml` file.
``` ```yaml
# example # example
production: production:
url: unix:/path/to/redis/socket url: unix:/path/to/redis/socket
...@@ -920,7 +920,7 @@ production: ...@@ -920,7 +920,7 @@ production:
Also, you can use environment variables in the `config/resque.yml` file: Also, you can use environment variables in the `config/resque.yml` file:
``` ```yaml
# example # example
production: production:
url: <%= ENV.fetch('GITLAB_REDIS_URL') %> url: <%= ENV.fetch('GITLAB_REDIS_URL') %>
...@@ -930,7 +930,7 @@ production: ...@@ -930,7 +930,7 @@ production:
If you are running SSH on a non-standard port, you must change the GitLab user's SSH config. If you are running SSH on a non-standard port, you must change the GitLab user's SSH config.
``` ```plaintext
# Add to /home/git/.ssh/config # Add to /home/git/.ssh/config
host localhost # Give your setup a name (here: override localhost) host localhost # Give your setup a name (here: override localhost)
user git # Your remote git user user git # Your remote git user
......
...@@ -184,7 +184,7 @@ Sometimes though, you may encounter some issues, like OpenShift not running ...@@ -184,7 +184,7 @@ Sometimes though, you may encounter some issues, like OpenShift not running
when booting up the VM. The web UI may not responding or you may see issues when booting up the VM. The web UI may not responding or you may see issues
when trying to login with `oc`, like: when trying to login with `oc`, like:
``` ```plaintext
The connection to the server 10.2.2.2:8443 was refused - did you specify the right host or port? The connection to the server 10.2.2.2:8443 was refused - did you specify the right host or port?
``` ```
...@@ -403,7 +403,7 @@ Let's see how to do that using the following steps. ...@@ -403,7 +403,7 @@ Let's see how to do that using the following steps.
The output will be similar to: The output will be similar to:
``` ```plaintext
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
gitlab-ce 172.30.243.177 <none> 22/TCP,80/TCP 5d gitlab-ce 172.30.243.177 <none> 22/TCP,80/TCP 5d
gitlab-ce-postgresql 172.30.116.75 <none> 5432/TCP 5d gitlab-ce-postgresql 172.30.116.75 <none> 5432/TCP 5d
...@@ -436,7 +436,7 @@ Let's see how to do that using the following steps. ...@@ -436,7 +436,7 @@ Let's see how to do that using the following steps.
which will return something like: which will return something like:
``` ```plaintext
NAME DESIRED CURRENT AGE NAME DESIRED CURRENT AGE
gitlab-ce-2 2 2 5d gitlab-ce-2 2 2 5d
``` ```
......
...@@ -147,7 +147,7 @@ Users using PostgreSQL must ensure the `pg_trgm` extension is loaded into every ...@@ -147,7 +147,7 @@ Users using PostgreSQL must ensure the `pg_trgm` extension is loaded into every
GitLab database. This extension can be enabled (using a PostgreSQL super user) GitLab database. This extension can be enabled (using a PostgreSQL super user)
by running the following query for every database: by running the following query for every database:
``` ```sql
CREATE EXTENSION pg_trgm; CREATE EXTENSION pg_trgm;
``` ```
...@@ -170,7 +170,7 @@ If you are using [GitLab Geo](../development/geo.md): ...@@ -170,7 +170,7 @@ If you are using [GitLab Geo](../development/geo.md):
[postgres_fdw](https://www.postgresql.org/docs/9.6/postgres-fdw.html) [postgres_fdw](https://www.postgresql.org/docs/9.6/postgres-fdw.html)
extension. extension.
``` ```sql
CREATE EXTENSION postgres_fdw; CREATE EXTENSION postgres_fdw;
``` ```
......
...@@ -67,7 +67,7 @@ To enable the Microsoft Azure OAuth2 OmniAuth provider you must register your ap ...@@ -67,7 +67,7 @@ To enable the Microsoft Azure OAuth2 OmniAuth provider you must register your ap
For installations from source: For installations from source:
``` ```yaml
- { name: 'azure_oauth2', - { name: 'azure_oauth2',
args: { client_id: "CLIENT ID", args: { client_id: "CLIENT ID",
client_secret: "CLIENT SECRET", client_secret: "CLIENT SECRET",
......
...@@ -57,7 +57,7 @@ you to use. ...@@ -57,7 +57,7 @@ you to use.
And grant at least the following permissions: And grant at least the following permissions:
``` ```plaintext
Account: Email, Read Account: Email, Read
Projects: Read Projects: Read
Repositories: Read Repositories: Read
...@@ -77,7 +77,7 @@ you to use. ...@@ -77,7 +77,7 @@ you to use.
1. On your GitLab server, open the configuration file: 1. On your GitLab server, open the configuration file:
``` ```shell
# For Omnibus packages # For Omnibus packages
sudo editor /etc/gitlab/gitlab.rb sudo editor /etc/gitlab/gitlab.rb
......
...@@ -41,7 +41,7 @@ To enable the CAS OmniAuth provider you must register your application with your ...@@ -41,7 +41,7 @@ To enable the CAS OmniAuth provider you must register your application with your
For installations from source: For installations from source:
``` ```yaml
- { name: 'cas3', - { name: 'cas3',
label: 'cas', label: 'cas',
args: { args: {
......
# Elasticsearch integration **(STARTER ONLY)** # Elasticsearch integration **(STARTER ONLY)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/109 "Elasticsearch Merge Request") in GitLab [Starter](https://about.gitlab.com/pricing/) 8.4. Support > - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/109 "Elasticsearch Merge Request") in GitLab [Starter](https://about.gitlab.com/pricing/) 8.4.
> for [Amazon Elasticsearch](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-gsg.html) was [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/1305) in GitLab > - Support for [Amazon Elasticsearch](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-gsg.html) was [introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/1305) in GitLab [Starter](https://about.gitlab.com/pricing/) 9.0.
> [Starter](https://about.gitlab.com/pricing/) 9.0.
This document describes how to set up Elasticsearch with GitLab. Once enabled, This document describes how to set up Elasticsearch with GitLab. Once enabled,
you'll have the benefit of fast search response times and the advantage of two you'll have the benefit of fast search response times and the advantage of two
...@@ -563,7 +562,7 @@ Here are some common pitfalls and how to overcome them: ...@@ -563,7 +562,7 @@ Here are some common pitfalls and how to overcome them:
If you enabled Elasticsearch before GitLab 8.12 and have not rebuilt indexes you will get If you enabled Elasticsearch before GitLab 8.12 and have not rebuilt indexes you will get
exception in lots of different cases: exception in lots of different cases:
```text ```plaintext
Elasticsearch::Transport::Transport::Errors::BadRequest([400] { Elasticsearch::Transport::Transport::Errors::BadRequest([400] {
"error": { "error": {
"root_cause": [{ "root_cause": [{
...@@ -587,7 +586,7 @@ Here are some common pitfalls and how to overcome them: ...@@ -587,7 +586,7 @@ Here are some common pitfalls and how to overcome them:
- Exception `Elasticsearch::Transport::Transport::Errors::RequestEntityTooLarge` - Exception `Elasticsearch::Transport::Transport::Errors::RequestEntityTooLarge`
```text ```plaintext
[413] {"Message":"Request size exceeded 10485760 bytes"} [413] {"Message":"Request size exceeded 10485760 bytes"}
``` ```
...@@ -619,7 +618,7 @@ Here are some common pitfalls and how to overcome them: ...@@ -619,7 +618,7 @@ Here are some common pitfalls and how to overcome them:
- **I'm getting a `health check timeout: no Elasticsearch node available` error in Sidekiq during the indexing process** - **I'm getting a `health check timeout: no Elasticsearch node available` error in Sidekiq during the indexing process**
``` ```plaintext
Gitlab::Elastic::Indexer::Error: time="2020-01-23T09:13:00Z" level=fatal msg="health check timeout: no Elasticsearch node available" Gitlab::Elastic::Indexer::Error: time="2020-01-23T09:13:00Z" level=fatal msg="health check timeout: no Elasticsearch node available"
``` ```
...@@ -632,5 +631,5 @@ Sometimes there may be issues with your Elasticsearch index data and as such ...@@ -632,5 +631,5 @@ Sometimes there may be issues with your Elasticsearch index data and as such
GitLab will allow you to revert to "basic search" when there are no search GitLab will allow you to revert to "basic search" when there are no search
results and assuming that basic search is supported in that scope. This "basic results and assuming that basic search is supported in that scope. This "basic
search" will behave as though you don't have Elasticsearch enabled at all for search" will behave as though you don't have Elasticsearch enabled at all for
your instance and search using other data sources (ie. Postgres data and Git your instance and search using other data sources (ie. PostgreSQL data and Git
data). data).
...@@ -81,7 +81,7 @@ To enable the Facebook OmniAuth provider you must register your application with ...@@ -81,7 +81,7 @@ To enable the Facebook OmniAuth provider you must register your application with
For installations from source: For installations from source:
``` ```yaml
- { name: 'facebook', app_id: 'YOUR_APP_ID', - { name: 'facebook', app_id: 'YOUR_APP_ID',
app_secret: 'YOUR_APP_SECRET' } app_secret: 'YOUR_APP_SECRET' }
``` ```
......
...@@ -69,7 +69,7 @@ Follow these steps to incorporate the GitHub OAuth 2 app in your GitLab server: ...@@ -69,7 +69,7 @@ Follow these steps to incorporate the GitHub OAuth 2 app in your GitLab server:
For GitHub.com: For GitHub.com:
``` ```yaml
- { name: 'github', app_id: 'YOUR_APP_ID', - { name: 'github', app_id: 'YOUR_APP_ID',
app_secret: 'YOUR_APP_SECRET', app_secret: 'YOUR_APP_SECRET',
args: { scope: 'user:email' } } args: { scope: 'user:email' } }
...@@ -77,7 +77,7 @@ Follow these steps to incorporate the GitHub OAuth 2 app in your GitLab server: ...@@ -77,7 +77,7 @@ Follow these steps to incorporate the GitHub OAuth 2 app in your GitLab server:
For GitHub Enterprise: For GitHub Enterprise:
``` ```yaml
- { name: 'github', app_id: 'YOUR_APP_ID', - { name: 'github', app_id: 'YOUR_APP_ID',
app_secret: 'YOUR_APP_SECRET', app_secret: 'YOUR_APP_SECRET',
url: "https://github.example.com/", url: "https://github.example.com/",
...@@ -124,7 +124,7 @@ omnibus_gitconfig['system'] = { "http" => ["sslVerify = false"] } ...@@ -124,7 +124,7 @@ omnibus_gitconfig['system'] = { "http" => ["sslVerify = false"] }
For installation from source: For installation from source:
``` ```yaml
- { name: 'github', app_id: 'YOUR_APP_ID', - { name: 'github', app_id: 'YOUR_APP_ID',
app_secret: 'YOUR_APP_SECRET', app_secret: 'YOUR_APP_SECRET',
url: "https://github.example.com/", url: "https://github.example.com/",
...@@ -134,7 +134,7 @@ For installation from source: ...@@ -134,7 +134,7 @@ For installation from source:
You will also need to disable Git SSL verification on the server hosting GitLab. You will also need to disable Git SSL verification on the server hosting GitLab.
``` ```shell
git config --global http.sslVerify false git config --global http.sslVerify false
``` ```
......
...@@ -850,7 +850,7 @@ Example: Amazon EBS ...@@ -850,7 +850,7 @@ Example: Amazon EBS
> A GitLab server using Omnibus GitLab hosted on Amazon AWS. > A GitLab server using Omnibus GitLab hosted on Amazon AWS.
> An EBS drive containing an ext4 filesystem is mounted at `/var/opt/gitlab`. > An EBS drive containing an ext4 filesystem is mounted at `/var/opt/gitlab`.
> In this case you could make an application backup by taking an EBS snapshot. > In this case you could make an application backup by taking an EBS snapshot.
> The backup includes all repositories, uploads and Postgres data. > The backup includes all repositories, uploads and PostgreSQL data.
Example: LVM snapshots + rsync Example: LVM snapshots + rsync
...@@ -858,7 +858,7 @@ Example: LVM snapshots + rsync ...@@ -858,7 +858,7 @@ Example: LVM snapshots + rsync
> Replicating the `/var/opt/gitlab` directory using rsync would not be reliable because too many files would change while rsync is running. > Replicating the `/var/opt/gitlab` directory using rsync would not be reliable because too many files would change while rsync is running.
> Instead of rsync-ing `/var/opt/gitlab`, we create a temporary LVM snapshot, which we mount as a read-only filesystem at `/mnt/gitlab_backup`. > Instead of rsync-ing `/var/opt/gitlab`, we create a temporary LVM snapshot, which we mount as a read-only filesystem at `/mnt/gitlab_backup`.
> Now we can have a longer running rsync job which will create a consistent replica on the remote server. > Now we can have a longer running rsync job which will create a consistent replica on the remote server.
> The replica includes all repositories, uploads and Postgres data. > The replica includes all repositories, uploads and PostgreSQL data.
If you are running GitLab on a virtualized server you can possibly also create VM snapshots of the entire GitLab server. If you are running GitLab on a virtualized server you can possibly also create VM snapshots of the entire GitLab server.
It is not uncommon however for a VM snapshot to require you to power down the server, so this approach is probably of limited practical use. It is not uncommon however for a VM snapshot to require you to power down the server, so this approach is probably of limited practical use.
......
...@@ -373,7 +373,7 @@ Either way, the resulting Docker image is automatically pushed to the ...@@ -373,7 +373,7 @@ Either way, the resulting Docker image is automatically pushed to the
#### Auto Build using a Dockerfile #### Auto Build using a Dockerfile
If a project's repository contains a `Dockerfile`, Auto Build will use If a project's repository contains a `Dockerfile` at its root, Auto Build will use
`docker build` to create a Docker image. `docker build` to create a Docker image.
If you are also using Auto Review Apps and Auto Deploy and choose to provide If you are also using Auto Review Apps and Auto Deploy and choose to provide
...@@ -1351,8 +1351,7 @@ service: ...@@ -1351,8 +1351,7 @@ service:
#### Deploy policy for staging and production environments #### Deploy policy for staging and production environments
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ci-yml/-/merge_requests/160) > [Introduced](https://gitlab.com/gitlab-org/gitlab-ci-yml/-/merge_requests/160) in GitLab 10.8.
in GitLab 10.8.
TIP: **Tip:** TIP: **Tip:**
You can also set this inside your [project's settings](#deployment-strategy). You can also set this inside your [project's settings](#deployment-strategy).
...@@ -1370,8 +1369,7 @@ you when you're ready to manually deploy to production. ...@@ -1370,8 +1369,7 @@ you when you're ready to manually deploy to production.
#### Deploy policy for canary environments **(PREMIUM)** #### Deploy policy for canary environments **(PREMIUM)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ci-yml/-/merge_requests/171) > [Introduced](https://gitlab.com/gitlab-org/gitlab-ci-yml/-/merge_requests/171) in GitLab 11.0.
in GitLab 11.0.
A [canary environment](../../user/project/canary_deployments.md) can be used A [canary environment](../../user/project/canary_deployments.md) can be used
before any changes are deployed to production. before any changes are deployed to production.
......
...@@ -212,7 +212,7 @@ specific environments, e.g. `staging`. ...@@ -212,7 +212,7 @@ specific environments, e.g. `staging`.
1. Once connected to the pod, run the following command to restore the database. 1. Once connected to the pod, run the following command to restore the database.
- You will be asked for the database password, the default is `testing-password`. - You will be asked for the database password, the default is `testing-password`.
- `USERNAME` is the username you have configured for postgres. The default is `user`. - `USERNAME` is the username you have configured for PostgreSQL. The default is `user`.
- `DATABASE_NAME` is usually the environment name. - `DATABASE_NAME` is usually the environment name.
```sh ```sh
......
...@@ -18,7 +18,6 @@ details. ...@@ -18,7 +18,6 @@ details.
## Repository size limit **(STARTER ONLY)** ## Repository size limit **(STARTER ONLY)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/740) in [GitLab Enterprise Edition 8.12](https://about.gitlab.com/releases/2016/09/22/gitlab-8-12-released/#limit-project-size-ee). > [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/740) in [GitLab Enterprise Edition 8.12](https://about.gitlab.com/releases/2016/09/22/gitlab-8-12-released/#limit-project-size-ee).
> Available in [GitLab Starter](https://about.gitlab.com/pricing/).
Repositories within your GitLab instance can grow quickly, especially if you are Repositories within your GitLab instance can grow quickly, especially if you are
using LFS. Their size can grow exponentially, rapidly consuming available storage. using LFS. Their size can grow exponentially, rapidly consuming available storage.
......
...@@ -86,8 +86,7 @@ artifacts, as described in the [troubleshooting documentation](../../../administ ...@@ -86,8 +86,7 @@ artifacts, as described in the [troubleshooting documentation](../../../administ
## Shared Runners pipeline minutes quota **(STARTER ONLY)** ## Shared Runners pipeline minutes quota **(STARTER ONLY)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/1078) > [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/1078) in GitLab Starter 8.16.
in GitLab Starter 8.16.
If you have enabled shared Runners for your GitLab instance, you can limit their If you have enabled shared Runners for your GitLab instance, you can limit their
usage by setting a maximum number of pipeline minutes that a group can use on usage by setting a maximum number of pipeline minutes that a group can use on
......
...@@ -4,10 +4,8 @@ type: reference ...@@ -4,10 +4,8 @@ type: reference
# External authorization control **(CORE ONLY)** # External authorization control **(CORE ONLY)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4216) in > - [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4216) in [GitLab Premium](https://about.gitlab.com/pricing/) 10.6.
> [GitLab Premium](https://about.gitlab.com/pricing/) 10.6. > - [Moved](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/27056) to [GitLab Core](https://about.gitlab.com/pricing/) in 11.10.
> [Moved](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/27056) to
> [GitLab Core](https://about.gitlab.com/pricing/) in 11.10.
In highly controlled environments, it may be necessary for access policy to be In highly controlled environments, it may be necessary for access policy to be
controlled by an external service that permits access based on project controlled by an external service that permits access based on project
......
...@@ -4,8 +4,7 @@ type: reference ...@@ -4,8 +4,7 @@ type: reference
# Instance template repository **(PREMIUM ONLY)** # Instance template repository **(PREMIUM ONLY)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/5986) in > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/5986) in [GitLab Premium](https://about.gitlab.com/pricing/) 11.3.
> [GitLab Premium](https://about.gitlab.com/pricing/) 11.3.
## Overview ## Overview
......
...@@ -55,10 +55,10 @@ sequenceDiagram ...@@ -55,10 +55,10 @@ sequenceDiagram
## Usage Ping **(CORE ONLY)** ## Usage Ping **(CORE ONLY)**
> [Introduced][ee-557] in GitLab Enterprise Edition 8.10. More statistics > - [Introduced][ee-557] in GitLab Enterprise Edition 8.10.
[were added][ee-735] in GitLab Enterprise Edition > - More statistics [were added][ee-735] in GitLab Enterprise Edition 8.12.
8.12. [Moved to GitLab Core][ce-23361] in 9.1. More statistics > - [Moved to GitLab Core][ce-23361] in 9.1.
[were added][ee-6602] in GitLab Ultimate 11.2. > - More statistics [were added][ee-6602] in GitLab Ultimate 11.2.
GitLab sends a weekly payload containing usage data to GitLab Inc. The usage GitLab sends a weekly payload containing usage data to GitLab Inc. The usage
ping uses high-level data to help our product, support, and sales teams. It does ping uses high-level data to help our product, support, and sales teams. It does
......
...@@ -4,8 +4,7 @@ type: reference, howto ...@@ -4,8 +4,7 @@ type: reference, howto
# Container Scanning **(ULTIMATE)** # Container Scanning **(ULTIMATE)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/3672) > [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/3672) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.4.
in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.4.
## Overview ## Overview
...@@ -176,12 +175,12 @@ using environment variables. ...@@ -176,12 +175,12 @@ using environment variables.
| `CLAIR_OUTPUT` | Severity level threshold. Vulnerabilities with severity level higher than or equal to this threshold will be outputted. Supported levels are `Unknown`, `Negligible`, `Low`, `Medium`, `High`, `Critical` and `Defcon1`. | `Unknown` | | `CLAIR_OUTPUT` | Severity level threshold. Vulnerabilities with severity level higher than or equal to this threshold will be outputted. Supported levels are `Unknown`, `Negligible`, `Low`, `Medium`, `High`, `Critical` and `Defcon1`. | `Unknown` |
| `REGISTRY_INSECURE` | Allow [Klar](https://github.com/optiopay/klar) to access insecure registries (HTTP only). Should only be set to `true` when testing the image locally. | `"false"` | | `REGISTRY_INSECURE` | Allow [Klar](https://github.com/optiopay/klar) to access insecure registries (HTTP only). Should only be set to `true` when testing the image locally. | `"false"` |
| `DOCKER_INSECURE` | Allow [Klar](https://github.com/optiopay/klar) to access secure Docker registries using HTTPS with bad (or self-signed) SSL certificates. | `"false"` | | `DOCKER_INSECURE` | Allow [Klar](https://github.com/optiopay/klar) to access secure Docker registries using HTTPS with bad (or self-signed) SSL certificates. | `"false"` |
| `CLAIR_VULNERABILITIES_DB_URL` | (**DEPRECATED - use `CLAIR_DB_CONNECTION_STRING` instead**) This variable is explicitly set in the [services section](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) of the `Container-Scanning.gitlab-ci.yml` file and defaults to `clair-vulnerabilities-db`. This value represents the address that the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) is running on and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone Container Scanning Tool](#running-the-standalone-container-scanning-tool) section. | `clair-vulnerabilities-db` | | `CLAIR_VULNERABILITIES_DB_URL` | (**DEPRECATED - use `CLAIR_DB_CONNECTION_STRING` instead**) This variable is explicitly set in the [services section](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) of the `Container-Scanning.gitlab-ci.yml` file and defaults to `clair-vulnerabilities-db`. This value represents the address that the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) is running on and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone Container Scanning Tool](#running-the-standalone-container-scanning-tool) section. | `clair-vulnerabilities-db` |
| `CLAIR_DB_CONNECTION_STRING` | This variable represents the [connection string](https://www.postgresql.org/docs/9.3/libpq-connect.html#AEN39692) to the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) database and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone Container Scanning Tool](#running-the-standalone-container-scanning-tool) section. The host value for the connection string must match the [alias](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) value of the `Container-Scanning.gitlab-ci.yml` template file, which defaults to `clair-vulnerabilities-db`. | `postgresql://postgres:password@clair-vulnerabilities-db:5432/postgres?sslmode=disable&statement_timeout=60000` | | `CLAIR_DB_CONNECTION_STRING` | This variable represents the [connection string](https://www.postgresql.org/docs/9.3/libpq-connect.html#AEN39692) to the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) database and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone Container Scanning Tool](#running-the-standalone-container-scanning-tool) section. The host value for the connection string must match the [alias](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) value of the `Container-Scanning.gitlab-ci.yml` template file, which defaults to `clair-vulnerabilities-db`. | `postgresql://postgres:password@clair-vulnerabilities-db:5432/postgres?sslmode=disable&statement_timeout=60000` |
| `CI_APPLICATION_REPOSITORY` | Docker repository URL for the image to be scanned. | `$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG` | | `CI_APPLICATION_REPOSITORY` | Docker repository URL for the image to be scanned. | `$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG` |
| `CI_APPLICATION_TAG` | Docker respository tag for the image to be scanned. | `$CI_COMMIT_SHA` | | `CI_APPLICATION_TAG` | Docker respository tag for the image to be scanned. | `$CI_COMMIT_SHA` |
| `CLAIR_DB_IMAGE` | The Docker image name and tag for the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes, or to refer to a locally hosted vulnerabilities database for an on-premise air-gapped installation. | `arminc/clair-db:latest` | | `CLAIR_DB_IMAGE` | The Docker image name and tag for the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes, or to refer to a locally hosted vulnerabilities database for an on-premise air-gapped installation. | `arminc/clair-db:latest` |
| `CLAIR_DB_IMAGE_TAG` | (**DEPRECATED - use `CLAIR_DB_IMAGE` instead**) The Docker image tag for the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes. | `latest` | | `CLAIR_DB_IMAGE_TAG` | (**DEPRECATED - use `CLAIR_DB_IMAGE` instead**) The Docker image tag for the [PostgreSQL server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes. | `latest` |
| `DOCKERFILE_PATH` | The path to the `Dockerfile` to be used for generating remediations. By default, the scanner will look for a file named `Dockerfile` in the root directory of the project, so this variable should only be configured if your `Dockerfile` is in a non-standard location, such as a subdirectory. See [Solutions for vulnerabilities](#solutions-for-vulnerabilities-auto-remediation) for more details. | `Dockerfile` | | `DOCKERFILE_PATH` | The path to the `Dockerfile` to be used for generating remediations. By default, the scanner will look for a file named `Dockerfile` in the root directory of the project, so this variable should only be configured if your `Dockerfile` is in a non-standard location, such as a subdirectory. See [Solutions for vulnerabilities](#solutions-for-vulnerabilities-auto-remediation) for more details. | `Dockerfile` |
| `ADDITIONAL_CA_CERT_BUNDLE` | Bundle of CA certs that you want to trust. | "" | | `ADDITIONAL_CA_CERT_BUNDLE` | Bundle of CA certs that you want to trust. | "" |
......
...@@ -4,8 +4,7 @@ type: reference, howto ...@@ -4,8 +4,7 @@ type: reference, howto
# Dynamic Application Security Testing (DAST) **(ULTIMATE)** # Dynamic Application Security Testing (DAST) **(ULTIMATE)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4348) > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4348) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.4.
in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.4.
NOTE: **4 of the top 6 attacks were application based.** NOTE: **4 of the top 6 attacks were application based.**
Download our whitepaper, Download our whitepaper,
......
...@@ -4,8 +4,7 @@ type: reference, howto ...@@ -4,8 +4,7 @@ type: reference, howto
# Static Application Security Testing (SAST) **(ULTIMATE)** # Static Application Security Testing (SAST) **(ULTIMATE)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/3775) > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/3775) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.3.
in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.3.
NOTE: **4 of the top 6 attacks were application based.** NOTE: **4 of the top 6 attacks were application based.**
Download our whitepaper, Download our whitepaper,
......
...@@ -351,8 +351,8 @@ file. ...@@ -351,8 +351,8 @@ file.
#### Jupyter Git Integration #### Jupyter Git Integration
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/28783) in GitLab 12.0 for project-level clusters. > - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/28783) in GitLab 12.0 for project-level clusters.
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/32512) in GitLab 12.3 for group and instance-level clusters. > - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/32512) in GitLab 12.3 for group and instance-level clusters.
When installing JupyterHub onto your Kubernetes cluster, [JupyterLab's Git extension](https://github.com/jupyterlab/jupyterlab-git) When installing JupyterHub onto your Kubernetes cluster, [JupyterLab's Git extension](https://github.com/jupyterlab/jupyterlab-git)
is automatically provisioned and configured using the authenticated user's: is automatically provisioned and configured using the authenticated user's:
......
...@@ -143,9 +143,9 @@ kubectl describe globaladdress.compute.gcp.crossplane.io gitlab-ad-globaladdress ...@@ -143,9 +143,9 @@ kubectl describe globaladdress.compute.gcp.crossplane.io gitlab-ad-globaladdress
## Setting up Resource classes ## Setting up Resource classes
Resource classes are a way of defining a configuration for the required managed service. We will define the Postgres Resource class Resource classes are a way of defining a configuration for the required managed service. We will define the PostgreSQL Resource class
- Define a gcp-postgres-standard.yaml resourceclass which contains - Define a `gcp-postgres-standard.yaml` resourceclass which contains
1. A default CloudSQLInstanceClass. 1. A default CloudSQLInstanceClass.
1. A CloudSQLInstanceClass with labels. 1. A CloudSQLInstanceClass with labels.
...@@ -285,4 +285,4 @@ serverCACertificateSha1Fingerprint: 40 bytes ...@@ -285,4 +285,4 @@ serverCACertificateSha1Fingerprint: 40 bytes
## Connect to the PostgreSQL instance ## Connect to the PostgreSQL instance
Follow this [GCP guide](https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine) if you Follow this [GCP guide](https://cloud.google.com/sql/docs/postgres/connect-kubernetes-engine) if you
would like to connect to the newly provisioned Postgres database instance on CloudSQL. would like to connect to the newly provisioned PostgreSQL database instance on CloudSQL.
...@@ -95,8 +95,8 @@ The options are: ...@@ -95,8 +95,8 @@ The options are:
##### View logs ##### View logs
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/201846) in GitLab Ultimate 12.8. > - [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/201846) in GitLab Ultimate 12.8.
> [Moved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25455) to [GitLab Core](https://about.gitlab.com/pricing/) 12.9. > - [Moved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25455) to [GitLab Core](https://about.gitlab.com/pricing/) 12.9.
This can be useful if you are triaging an application incident and need to This can be useful if you are triaging an application incident and need to
[explore logs](../project/integrations/prometheus.md#view-logs-ultimate) [explore logs](../project/integrations/prometheus.md#view-logs-ultimate)
......
# DevOps Score # DevOps Score
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/30469) in GitLab 9.3. > - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/30469) in GitLab 9.3.
> [Renamed from Conversational Development Index](https://gitlab.com/gitlab-org/gitlab/issues/20976) in GitLab 12.6. > - [Renamed from Conversational Development Index](https://gitlab.com/gitlab-org/gitlab/issues/20976) in GitLab 12.6.
NOTE: **Note:** NOTE: **Note:**
Your GitLab instance's [usage ping](../admin_area/settings/usage_statistics.md#usage-ping-core-only) must be activated in order to use this feature. Your GitLab instance's [usage ping](../admin_area/settings/usage_statistics.md#usage-ping-core-only) must be activated in order to use this feature.
......
# Instance statistics # Instance statistics
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/41416) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/41416) in GitLab 11.2.
in GitLab 11.2.
Instance statistics gives users or admins access to instance-wide analytics. Instance statistics gives users or admins access to instance-wide analytics.
They are accessible to all users by default (GitLab admins can restrict its They are accessible to all users by default (GitLab admins can restrict its
......
# Cohorts # Cohorts
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/23361) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/23361) in GitLab 9.1.
in GitLab 9.1.
As a benefit of having the [usage ping active](../admin_area/settings/usage_statistics.md), As a benefit of having the [usage ping active](../admin_area/settings/usage_statistics.md),
GitLab lets you analyze the users' activities over time of your GitLab installation. GitLab lets you analyze the users' activities over time of your GitLab installation.
......
...@@ -158,8 +158,7 @@ It's possible to generate diagrams and flowcharts from text in GitLab using [Mer ...@@ -158,8 +158,7 @@ It's possible to generate diagrams and flowcharts from text in GitLab using [Mer
#### Mermaid #### Mermaid
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/15107) in > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/15107) in GitLab 10.3.
GitLab 10.3.
Visit the [official page](https://mermaidjs.github.io/) for more details. If you're new to using Mermaid or need help identifying issues in your Mermaid code, the [Mermaid Live Editor](https://mermaid-js.github.io/mermaid-live-editor/) is a helpful tool for creating and resolving issues within Mermaid diagrams. Visit the [official page](https://mermaidjs.github.io/) for more details. If you're new to using Mermaid or need help identifying issues in your Mermaid code, the [Mermaid Live Editor](https://mermaid-js.github.io/mermaid-live-editor/) is a helpful tool for creating and resolving issues within Mermaid diagrams.
......
# Badges # Badges
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/41174) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/41174) in GitLab 10.7.
in GitLab 10.7.
Badges are a unified way to present condensed pieces of information about your Badges are a unified way to present condensed pieces of information about your
projects. They consist of a small image and additionally a URL that the image projects. They consist of a small image and additionally a URL that the image
......
...@@ -660,14 +660,14 @@ kubectl create clusterrolebinding permissive-binding \ ...@@ -660,14 +660,14 @@ kubectl create clusterrolebinding permissive-binding \
Amazon EKS doesn't have a default Storage Class out of the box, which means Amazon EKS doesn't have a default Storage Class out of the box, which means
requests for persistent volumes will not be automatically fulfilled. As part requests for persistent volumes will not be automatically fulfilled. As part
of Auto DevOps, the deployed Postgres instance requests persistent storage, of Auto DevOps, the deployed PostgreSQL instance requests persistent storage,
and without a default storage class it will fail to start. and without a default storage class it will fail to start.
If a default Storage Class doesn't already exist and is desired, follow Amazon's If a default Storage Class doesn't already exist and is desired, follow Amazon's
[guide on storage classes](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html) [guide on storage classes](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html)
to create one. to create one.
Alternatively, disable Postgres by setting the project variable Alternatively, disable PostgreSQL by setting the project variable
[`POSTGRES_ENABLED`](../../../topics/autodevops/#environment-variables) to `false`. [`POSTGRES_ENABLED`](../../../topics/autodevops/#environment-variables) to `false`.
#### Deploy the app to EKS #### Deploy the app to EKS
......
# Kubernetes Logs # Kubernetes Logs
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4752) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 11.0. > - [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4752) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 11.0.
> [Moved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25455) to [GitLab Core](https://about.gitlab.com/pricing/) 12.9. > - [Moved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25455) to [GitLab Core](https://about.gitlab.com/pricing/) 12.9.
GitLab makes it easy to view the logs of running pods in [connected Kubernetes clusters](index.md). GitLab makes it easy to view the logs of running pods in [connected Kubernetes clusters](index.md).
By displaying the logs directly in GitLab, developers can avoid having to manage console tools or jump to a different interface. By displaying the logs directly in GitLab, developers can avoid having to manage console tools or jump to a different interface.
......
...@@ -120,7 +120,7 @@ VARIABLE_VALUE = project.variables.get('PRIVATE_TOKEN').value ...@@ -120,7 +120,7 @@ VARIABLE_VALUE = project.variables.get('PRIVATE_TOKEN').value
### 5. Configure an operation ### 5. Configure an operation
For this example we'll use the "**Run SQL queries in Notebook**" section in the sample runbook to query For this example we'll use the "**Run SQL queries in Notebook**" section in the sample runbook to query
a postgres database. The first 4 lines of the section define the variables that are required for this query to function. a PostgreSQL database. The first 4 lines of the section define the variables that are required for this query to function.
```sql ```sql
%env DB_USER={project.variables.get('DB_USER').value} %env DB_USER={project.variables.get('DB_USER').value}
...@@ -136,7 +136,7 @@ Create the matching variables in your project's **Settings >> CI/CD >> Variables ...@@ -136,7 +136,7 @@ Create the matching variables in your project's **Settings >> CI/CD >> Variables
Back in Jupyter, click the "Run SQL queries in Notebook" heading and the click *Run*. The results will be Back in Jupyter, click the "Run SQL queries in Notebook" heading and the click *Run*. The results will be
displayed in-line as follows: displayed in-line as follows:
![postgres query](img/postgres-query.png) ![PostgreSQL query](img/postgres-query.png)
You can try other operations such as running shell scripts or interacting with a Kubernetes cluster. Visit the You can try other operations such as running shell scripts or interacting with a Kubernetes cluster. Visit the
[Nurtch Documentation](http://docs.nurtch.com/) for more information. [Nurtch Documentation](http://docs.nurtch.com/) for more information.
# Import your project from Bitbucket Server to GitLab # Import your project from Bitbucket Server to GitLab
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/20164) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/20164) in GitLab 11.2.
in GitLab 11.2.
NOTE: **Note:** NOTE: **Note:**
The Bitbucket Server importer does not work with [Bitbucket Cloud](https://bitbucket.org). The Bitbucket Server importer does not work with [Bitbucket Cloud](https://bitbucket.org).
......
# Import Phabricator tasks into a GitLab project # Import Phabricator tasks into a GitLab project
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/60562) in > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/60562) in GitLab 12.0.
GitLab 12.0.
GitLab allows you to import all tasks from a Phabricator instance into GitLab allows you to import all tasks from a Phabricator instance into
GitLab issues. The import creates a single project with the GitLab issues. The import creates a single project with the
......
...@@ -6,6 +6,8 @@ description: "Automatic Let's Encrypt SSL certificates for GitLab Pages." ...@@ -6,6 +6,8 @@ description: "Automatic Let's Encrypt SSL certificates for GitLab Pages."
# GitLab Pages integration with Let's Encrypt # GitLab Pages integration with Let's Encrypt
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/28996) in GitLab 12.1. For versions earlier than GitLab 12.1, see the [manual Let's Encrypt instructions](../lets_encrypt_for_gitlab_pages.md). > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/28996) in GitLab 12.1. For versions earlier than GitLab 12.1, see the [manual Let's Encrypt instructions](../lets_encrypt_for_gitlab_pages.md).
NOTE: **Note:**
This feature is in **beta** and may still have bugs. See all the related issues linked from this [issue's description](https://gitlab.com/gitlab-org/gitlab-foss/issues/28996) for more information. This feature is in **beta** and may still have bugs. See all the related issues linked from this [issue's description](https://gitlab.com/gitlab-org/gitlab-foss/issues/28996) for more information.
The GitLab Pages integration with Let's Encrypt (LE) allows you The GitLab Pages integration with Let's Encrypt (LE) allows you
......
...@@ -4,8 +4,7 @@ type: reference, howto ...@@ -4,8 +4,7 @@ type: reference, howto
# New Pages website from a bundled template # New Pages website from a bundled template
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/47857) > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/47857) in GitLab 11.8.
in GitLab 11.8.
The simplest way to create a GitLab Pages site is to use one of the most The simplest way to create a GitLab Pages site is to use one of the most
popular templates, which come already bundled with GitLab and are ready to go. popular templates, which come already bundled with GitLab and are ready to go.
......
...@@ -161,8 +161,7 @@ Release tag. Once the `released_at` date and time has passed, the badge is autom ...@@ -161,8 +161,7 @@ Release tag. Once the `released_at` date and time has passed, the badge is autom
## Creating a Release ## Creating a Release
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/32812) in GitLab > [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/32812) in GitLab 12.9, Releases can be created directly through the GitLab Releases UI.
12.9, Releases can be created directly through the GitLab Releases UI.
NOTE: **Note:** NOTE: **Note:**
Only users with Developer permissions or higher can create Releases. Only users with Developer permissions or higher can create Releases.
......
# Web IDE # Web IDE
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4539) in [GitLab Ultimate][ee] 10.4. > - [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/4539) in [GitLab Ultimate][ee] 10.4.
> [Brought to GitLab Core](https://gitlab.com/gitlab-org/gitlab-foss/issues/44157) in 10.7. > - [Brought to GitLab Core](https://gitlab.com/gitlab-org/gitlab-foss/issues/44157) in 10.7.
The Web IDE editor makes it faster and easier to contribute changes to your The Web IDE editor makes it faster and easier to contribute changes to your
projects by providing an advanced editor with commit staging. projects by providing an advanced editor with commit staging.
......
...@@ -44,6 +44,7 @@ module Gitlab ...@@ -44,6 +44,7 @@ module Gitlab
def install_command def install_command
command = ['helm', 'upgrade', name, chart] + command = ['helm', 'upgrade', name, chart] +
install_flag + install_flag +
rollback_support_flag +
reset_values_flag + reset_values_flag +
tls_flags_if_remote_tiller + tls_flags_if_remote_tiller +
optional_version_flag + optional_version_flag +
...@@ -83,6 +84,10 @@ module Gitlab ...@@ -83,6 +84,10 @@ module Gitlab
['--version', version] ['--version', version]
end end
def rollback_support_flag
['--atomic', '--cleanup-on-fail']
end
end end
end end
end end
......
...@@ -11,9 +11,9 @@ describe('Boards blank state', () => { ...@@ -11,9 +11,9 @@ describe('Boards blank state', () => {
boardsStore.create(); boardsStore.create();
spyOn(boardsStore, 'addList').and.stub(); jest.spyOn(boardsStore, 'addList').mockImplementation();
spyOn(boardsStore, 'removeList').and.stub(); jest.spyOn(boardsStore, 'removeList').mockImplementation();
spyOn(boardsStore, 'generateDefaultLists').and.callFake( jest.spyOn(boardsStore, 'generateDefaultLists').mockImplementation(
() => () =>
new Promise((resolve, reject) => { new Promise((resolve, reject) => {
if (fail) { if (fail) {
...@@ -39,7 +39,7 @@ describe('Boards blank state', () => { ...@@ -39,7 +39,7 @@ describe('Boards blank state', () => {
vm = new Comp(); vm = new Comp();
setTimeout(() => { setImmediate(() => {
vm.$mount(); vm.$mount();
done(); done();
}); });
...@@ -60,7 +60,7 @@ describe('Boards blank state', () => { ...@@ -60,7 +60,7 @@ describe('Boards blank state', () => {
it('clears blank state', done => { it('clears blank state', done => {
vm.$el.querySelector('.btn-default').click(); vm.$el.querySelector('.btn-default').click();
setTimeout(() => { setImmediate(() => {
expect(boardsStore.welcomeIsHidden()).toBeTruthy(); expect(boardsStore.welcomeIsHidden()).toBeTruthy();
done(); done();
...@@ -70,15 +70,11 @@ describe('Boards blank state', () => { ...@@ -70,15 +70,11 @@ describe('Boards blank state', () => {
it('creates pre-defined labels', done => { it('creates pre-defined labels', done => {
vm.$el.querySelector('.btn-success').click(); vm.$el.querySelector('.btn-success').click();
setTimeout(() => { setImmediate(() => {
expect(boardsStore.addList).toHaveBeenCalledTimes(2); expect(boardsStore.addList).toHaveBeenCalledTimes(2);
expect(boardsStore.addList).toHaveBeenCalledWith( expect(boardsStore.addList).toHaveBeenCalledWith(expect.objectContaining({ title: 'To Do' }));
jasmine.objectContaining({ title: 'To Do' }),
);
expect(boardsStore.addList).toHaveBeenCalledWith( expect(boardsStore.addList).toHaveBeenCalledWith(expect.objectContaining({ title: 'Doing' }));
jasmine.objectContaining({ title: 'Doing' }),
);
done(); done();
}); });
...@@ -89,7 +85,7 @@ describe('Boards blank state', () => { ...@@ -89,7 +85,7 @@ describe('Boards blank state', () => {
vm.$el.querySelector('.btn-success').click(); vm.$el.querySelector('.btn-success').click();
setTimeout(() => { setImmediate(() => {
expect(boardsStore.welcomeIsHidden()).toBeFalsy(); expect(boardsStore.welcomeIsHidden()).toBeFalsy();
expect(boardsStore.removeList).toHaveBeenCalledWith(undefined, 'label'); expect(boardsStore.removeList).toHaveBeenCalledWith(undefined, 'label');
......
/* global List */ /* global List */
import $ from 'jquery';
import Vue from 'vue'; import Vue from 'vue';
import MockAdapter from 'axios-mock-adapter'; import MockAdapter from 'axios-mock-adapter';
import axios from '~/lib/utils/axios_utils'; import axios from '~/lib/utils/axios_utils';
...@@ -14,6 +15,9 @@ describe('Issue boards new issue form', () => { ...@@ -14,6 +15,9 @@ describe('Issue boards new issue form', () => {
let list; let list;
let mock; let mock;
let newIssueMock; let newIssueMock;
const jQueryMock = {
enable: jest.fn(),
};
const promiseReturn = { const promiseReturn = {
data: { data: {
iid: 100, iid: 100,
...@@ -28,7 +32,7 @@ describe('Issue boards new issue form', () => { ...@@ -28,7 +32,7 @@ describe('Issue boards new issue form', () => {
return vm.submit(dummySubmitEvent); return vm.submit(dummySubmitEvent);
}; };
beforeEach(done => { beforeEach(() => {
setFixtures('<div class="test-container"></div>'); setFixtures('<div class="test-container"></div>');
const BoardNewIssueComp = Vue.extend(boardNewIssue); const BoardNewIssueComp = Vue.extend(boardNewIssue);
...@@ -41,7 +45,7 @@ describe('Issue boards new issue form', () => { ...@@ -41,7 +45,7 @@ describe('Issue boards new issue form', () => {
list = new List(listObj); list = new List(listObj);
newIssueMock = Promise.resolve(promiseReturn); newIssueMock = Promise.resolve(promiseReturn);
spyOn(list, 'newIssue').and.callFake(() => newIssueMock); jest.spyOn(list, 'newIssue').mockImplementation(() => newIssueMock);
vm = new BoardNewIssueComp({ vm = new BoardNewIssueComp({
propsData: { propsData: {
...@@ -49,9 +53,9 @@ describe('Issue boards new issue form', () => { ...@@ -49,9 +53,9 @@ describe('Issue boards new issue form', () => {
}, },
}).$mount(document.querySelector('.test-container')); }).$mount(document.querySelector('.test-container'));
Vue.nextTick() $.fn.extend(jQueryMock);
.then(done)
.catch(done.fail); return Vue.nextTick();
}); });
afterEach(() => { afterEach(() => {
...@@ -59,142 +63,116 @@ describe('Issue boards new issue form', () => { ...@@ -59,142 +63,116 @@ describe('Issue boards new issue form', () => {
mock.restore(); mock.restore();
}); });
it('calls submit if submit button is clicked', done => { it('calls submit if submit button is clicked', () => {
spyOn(vm, 'submit').and.callFake(e => e.preventDefault()); jest.spyOn(vm, 'submit').mockImplementation(e => e.preventDefault());
vm.title = 'Testing Title'; vm.title = 'Testing Title';
Vue.nextTick() return Vue.nextTick().then(() => {
.then(() => { vm.$el.querySelector('.btn-success').click();
vm.$el.querySelector('.btn-success').click();
expect(vm.submit.calls.count()).toBe(1); expect(vm.submit.mock.calls.length).toBe(1);
}) });
.then(done)
.catch(done.fail);
}); });
it('disables submit button if title is empty', () => { it('disables submit button if title is empty', () => {
expect(vm.$el.querySelector('.btn-success').disabled).toBe(true); expect(vm.$el.querySelector('.btn-success').disabled).toBe(true);
}); });
it('enables submit button if title is not empty', done => { it('enables submit button if title is not empty', () => {
vm.title = 'Testing Title'; vm.title = 'Testing Title';
Vue.nextTick() return Vue.nextTick().then(() => {
.then(() => { expect(vm.$el.querySelector('.form-control').value).toBe('Testing Title');
expect(vm.$el.querySelector('.form-control').value).toBe('Testing Title'); expect(vm.$el.querySelector('.btn-success').disabled).not.toBe(true);
expect(vm.$el.querySelector('.btn-success').disabled).not.toBe(true); });
})
.then(done)
.catch(done.fail);
}); });
it('clears title after clicking cancel', done => { it('clears title after clicking cancel', () => {
vm.$el.querySelector('.btn-default').click(); vm.$el.querySelector('.btn-default').click();
Vue.nextTick() return Vue.nextTick().then(() => {
.then(() => { expect(vm.title).toBe('');
expect(vm.title).toBe(''); });
})
.then(done)
.catch(done.fail);
}); });
it('does not create new issue if title is empty', done => { it('does not create new issue if title is empty', () => {
submitIssue() return submitIssue().then(() => {
.then(() => { expect(list.newIssue).not.toHaveBeenCalled();
expect(list.newIssue).not.toHaveBeenCalled(); });
})
.then(done)
.catch(done.fail);
}); });
describe('submit success', () => { describe('submit success', () => {
it('creates new issue', done => { it('creates new issue', () => {
vm.title = 'submit title'; vm.title = 'submit title';
Vue.nextTick() return Vue.nextTick()
.then(submitIssue) .then(submitIssue)
.then(() => { .then(() => {
expect(list.newIssue).toHaveBeenCalled(); expect(list.newIssue).toHaveBeenCalled();
}) });
.then(done)
.catch(done.fail);
}); });
it('enables button after submit', done => { it('enables button after submit', () => {
vm.title = 'submit issue'; vm.title = 'submit issue';
Vue.nextTick() return Vue.nextTick()
.then(submitIssue) .then(submitIssue)
.then(() => { .then(() => {
expect(vm.$el.querySelector('.btn-success').disabled).toBe(false); expect(jQueryMock.enable).toHaveBeenCalled();
}) });
.then(done)
.catch(done.fail);
}); });
it('clears title after submit', done => { it('clears title after submit', () => {
vm.title = 'submit issue'; vm.title = 'submit issue';
Vue.nextTick() return Vue.nextTick()
.then(submitIssue) .then(submitIssue)
.then(() => { .then(() => {
expect(vm.title).toBe(''); expect(vm.title).toBe('');
}) });
.then(done)
.catch(done.fail);
}); });
it('sets detail issue after submit', done => { it('sets detail issue after submit', () => {
expect(boardsStore.detail.issue.title).toBe(undefined); expect(boardsStore.detail.issue.title).toBe(undefined);
vm.title = 'submit issue'; vm.title = 'submit issue';
Vue.nextTick() return Vue.nextTick()
.then(submitIssue) .then(submitIssue)
.then(() => { .then(() => {
expect(boardsStore.detail.issue.title).toBe('submit issue'); expect(boardsStore.detail.issue.title).toBe('submit issue');
}) });
.then(done)
.catch(done.fail);
}); });
it('sets detail list after submit', done => { it('sets detail list after submit', () => {
vm.title = 'submit issue'; vm.title = 'submit issue';
Vue.nextTick() return Vue.nextTick()
.then(submitIssue) .then(submitIssue)
.then(() => { .then(() => {
expect(boardsStore.detail.list.id).toBe(list.id); expect(boardsStore.detail.list.id).toBe(list.id);
}) });
.then(done)
.catch(done.fail);
}); });
it('sets detail weight after submit', done => { it('sets detail weight after submit', () => {
boardsStore.weightFeatureAvailable = true; boardsStore.weightFeatureAvailable = true;
vm.title = 'submit issue'; vm.title = 'submit issue';
Vue.nextTick() return Vue.nextTick()
.then(submitIssue) .then(submitIssue)
.then(() => { .then(() => {
expect(boardsStore.detail.list.weight).toBe(list.weight); expect(boardsStore.detail.list.weight).toBe(list.weight);
}) });
.then(done)
.catch(done.fail);
}); });
it('does not set detail weight after submit', done => { it('does not set detail weight after submit', () => {
boardsStore.weightFeatureAvailable = false; boardsStore.weightFeatureAvailable = false;
vm.title = 'submit issue'; vm.title = 'submit issue';
Vue.nextTick() return Vue.nextTick()
.then(submitIssue) .then(submitIssue)
.then(() => { .then(() => {
expect(boardsStore.detail.list.weight).toBe(list.weight); expect(boardsStore.detail.list.weight).toBe(list.weight);
}) });
.then(done)
.catch(done.fail);
}); });
}); });
...@@ -204,24 +182,21 @@ describe('Issue boards new issue form', () => { ...@@ -204,24 +182,21 @@ describe('Issue boards new issue form', () => {
vm.title = 'error'; vm.title = 'error';
}); });
it('removes issue', done => { it('removes issue', () => {
Vue.nextTick() const lengthBefore = list.issues.length;
return Vue.nextTick()
.then(submitIssue) .then(submitIssue)
.then(() => { .then(() => {
expect(list.issues.length).toBe(1); expect(list.issues.length).toBe(lengthBefore);
}) });
.then(done)
.catch(done.fail);
}); });
it('shows error', done => { it('shows error', () => {
Vue.nextTick() return Vue.nextTick()
.then(submitIssue) .then(submitIssue)
.then(() => { .then(() => {
expect(vm.error).toBe(true); expect(vm.error).toBe(true);
}) });
.then(done)
.catch(done.fail);
}); });
}); });
}); });
import $ from 'jquery'; import { mount } from '@vue/test-utils';
import Vue from 'vue';
import mountComponent from 'spec/helpers/vue_mount_component_helper';
import boardsStore from '~/boards/stores/boards_store'; import boardsStore from '~/boards/stores/boards_store';
import boardForm from '~/boards/components/board_form.vue'; import boardForm from '~/boards/components/board_form.vue';
import DeprecatedModal from '~/vue_shared/components/deprecated_modal.vue';
describe('board_form.vue', () => { describe('board_form.vue', () => {
const props = { let wrapper;
const propsData = {
canAdminBoard: false, canAdminBoard: false,
labelsPath: `${gl.TEST_HOST}/labels/path`, labelsPath: `${gl.TEST_HOST}/labels/path`,
milestonePath: `${gl.TEST_HOST}/milestone/path`, milestonePath: `${gl.TEST_HOST}/milestone/path`,
}; };
let vm;
const findModal = () => wrapper.find(DeprecatedModal);
beforeEach(() => { beforeEach(() => {
spyOn($, 'ajax');
boardsStore.state.currentPage = 'edit'; boardsStore.state.currentPage = 'edit';
const Component = Vue.extend(boardForm); wrapper = mount(boardForm, { propsData });
vm = mountComponent(Component, props);
}); });
afterEach(() => { afterEach(() => {
vm.$destroy(); wrapper.destroy();
wrapper = null;
}); });
describe('methods', () => { describe('methods', () => {
describe('cancel', () => { describe('cancel', () => {
it('resets currentPage', done => { it('resets currentPage', () => {
vm.cancel(); wrapper.vm.cancel();
expect(boardsStore.state.currentPage).toBe('');
Vue.nextTick()
.then(() => {
expect(boardsStore.state.currentPage).toBe('');
})
.then(done)
.catch(done.fail);
}); });
}); });
}); });
describe('buttons', () => { describe('buttons', () => {
it('cancel button triggers cancel()', done => { it('cancel button triggers cancel()', () => {
spyOn(vm, 'cancel'); wrapper.setMethods({ cancel: jest.fn() });
findModal().vm.$emit('cancel');
Vue.nextTick()
.then(() => { return wrapper.vm.$nextTick().then(() => {
const cancelButton = vm.$el.querySelector('button[data-dismiss="modal"]'); expect(wrapper.vm.cancel).toHaveBeenCalled();
cancelButton.click(); });
expect(vm.cancel).toHaveBeenCalled();
})
.then(done)
.catch(done.fail);
}); });
}); });
}); });
...@@ -174,7 +174,7 @@ describe('Issue model', () => { ...@@ -174,7 +174,7 @@ describe('Issue model', () => {
describe('update', () => { describe('update', () => {
it('passes assignee ids when there are assignees', done => { it('passes assignee ids when there are assignees', done => {
spyOn(axios, 'patch').and.callFake((url, data) => { jest.spyOn(axios, 'patch').mockImplementation((url, data) => {
expect(data.issue.assignee_ids).toEqual([1]); expect(data.issue.assignee_ids).toEqual([1]);
done(); done();
return Promise.resolve(); return Promise.resolve();
...@@ -184,7 +184,7 @@ describe('Issue model', () => { ...@@ -184,7 +184,7 @@ describe('Issue model', () => {
}); });
it('passes assignee ids of [0] when there are no assignees', done => { it('passes assignee ids of [0] when there are no assignees', done => {
spyOn(axios, 'patch').and.callFake((url, data) => { jest.spyOn(axios, 'patch').mockImplementation((url, data) => {
expect(data.issue.assignee_ids).toEqual([0]); expect(data.issue.assignee_ids).toEqual([0]);
done(); done();
return Promise.resolve(); return Promise.resolve();
......
import boardsStore from '~/boards/stores/boards_store';
export const boardObj = { export const boardObj = {
id: 1, id: 1,
name: 'test', name: 'test',
...@@ -89,3 +91,54 @@ export const mockMilestone = { ...@@ -89,3 +91,54 @@ export const mockMilestone = {
start_date: '2018-01-01', start_date: '2018-01-01',
due_date: '2019-12-31', due_date: '2019-12-31',
}; };
export const BoardsMockData = {
GET: {
'/test/-/boards/1/lists/300/issues?id=300&page=1': {
issues: [
{
title: 'Testing',
id: 1,
iid: 1,
confidential: false,
labels: [],
assignees: [],
},
],
},
'/test/issue-boards/-/milestones.json': [
{
id: 1,
title: 'test',
},
],
},
POST: {
'/test/-/boards/1/lists': listObj,
},
PUT: {
'/test/issue-boards/-/board/1/lists{/id}': {},
},
DELETE: {
'/test/issue-boards/-/board/1/lists{/id}': {},
},
};
export const boardsMockInterceptor = config => {
const body = BoardsMockData[config.method.toUpperCase()][config.url];
return [200, body];
};
export const setMockEndpoints = (opts = {}) => {
const boardsEndpoint = opts.boardsEndpoint || '/test/issue-boards/-/boards.json';
const listsEndpoint = opts.listsEndpoint || '/test/-/boards/1/lists';
const bulkUpdatePath = opts.bulkUpdatePath || '';
const boardId = opts.boardId || '1';
boardsStore.setEndpoints({
boardsEndpoint,
listsEndpoint,
bulkUpdatePath,
boardId,
});
};
import boardsStore from '~/boards/stores/boards_store';
import { listObj } from '../../frontend/boards/mock_data';
export * from '../../frontend/boards/mock_data'; export * from '../../frontend/boards/mock_data';
export const BoardsMockData = {
GET: {
'/test/-/boards/1/lists/300/issues?id=300&page=1': {
issues: [
{
title: 'Testing',
id: 1,
iid: 1,
confidential: false,
labels: [],
assignees: [],
},
],
},
'/test/issue-boards/-/milestones.json': [
{
id: 1,
title: 'test',
},
],
},
POST: {
'/test/-/boards/1/lists': listObj,
},
PUT: {
'/test/issue-boards/-/board/1/lists{/id}': {},
},
DELETE: {
'/test/issue-boards/-/board/1/lists{/id}': {},
},
};
export const boardsMockInterceptor = config => {
const body = BoardsMockData[config.method.toUpperCase()][config.url];
return [200, body];
};
export const setMockEndpoints = (opts = {}) => {
const boardsEndpoint = opts.boardsEndpoint || '/test/issue-boards/-/boards.json';
const listsEndpoint = opts.listsEndpoint || '/test/-/boards/1/lists';
const bulkUpdatePath = opts.bulkUpdatePath || '';
const boardId = opts.boardId || '1';
boardsStore.setEndpoints({
boardsEndpoint,
listsEndpoint,
bulkUpdatePath,
boardId,
});
};
...@@ -41,6 +41,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do ...@@ -41,6 +41,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do
<<~EOS.squish <<~EOS.squish
helm upgrade app-name chart-name helm upgrade app-name chart-name
--install --install
--atomic
--cleanup-on-fail
--reset-values --reset-values
--version 1.2.3 --version 1.2.3
--set rbac.create\\=false,rbac.enabled\\=false --set rbac.create\\=false,rbac.enabled\\=false
...@@ -79,6 +81,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do ...@@ -79,6 +81,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do
<<~EOS.squish <<~EOS.squish
helm upgrade app-name chart-name helm upgrade app-name chart-name
--install --install
--atomic
--cleanup-on-fail
--reset-values --reset-values
#{tls_flags} #{tls_flags}
--version 1.2.3 --version 1.2.3
...@@ -109,6 +113,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do ...@@ -109,6 +113,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do
<<~EOS.squish <<~EOS.squish
helm upgrade app-name chart-name helm upgrade app-name chart-name
--install --install
--atomic
--cleanup-on-fail
--reset-values --reset-values
--version 1.2.3 --version 1.2.3
--set rbac.create\\=true,rbac.enabled\\=true --set rbac.create\\=true,rbac.enabled\\=true
...@@ -140,6 +146,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do ...@@ -140,6 +146,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do
<<~EOS.squish <<~EOS.squish
helm upgrade app-name chart-name helm upgrade app-name chart-name
--install --install
--atomic
--cleanup-on-fail
--reset-values --reset-values
--version 1.2.3 --version 1.2.3
--set rbac.create\\=false,rbac.enabled\\=false --set rbac.create\\=false,rbac.enabled\\=false
...@@ -171,6 +179,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do ...@@ -171,6 +179,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do
<<~EOS.squish <<~EOS.squish
helm upgrade app-name chart-name helm upgrade app-name chart-name
--install --install
--atomic
--cleanup-on-fail
--reset-values --reset-values
--version 1.2.3 --version 1.2.3
--set rbac.create\\=false,rbac.enabled\\=false --set rbac.create\\=false,rbac.enabled\\=false
...@@ -200,6 +210,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do ...@@ -200,6 +210,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do
<<~EOS.squish <<~EOS.squish
helm upgrade app-name chart-name helm upgrade app-name chart-name
--install --install
--atomic
--cleanup-on-fail
--reset-values --reset-values
--version 1.2.3 --version 1.2.3
--set rbac.create\\=false,rbac.enabled\\=false --set rbac.create\\=false,rbac.enabled\\=false
...@@ -229,6 +241,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do ...@@ -229,6 +241,8 @@ describe Gitlab::Kubernetes::Helm::InstallCommand do
<<~EOS.squish <<~EOS.squish
helm upgrade app-name chart-name helm upgrade app-name chart-name
--install --install
--atomic
--cleanup-on-fail
--reset-values --reset-values
--set rbac.create\\=false,rbac.enabled\\=false --set rbac.create\\=false,rbac.enabled\\=false
--namespace gitlab-managed-apps --namespace gitlab-managed-apps
......
# frozen_string_literal: true
require 'spec_helper'
describe Clusters::Management::CreateProjectService do
let(:cluster) { create(:cluster, :project) }
let(:current_user) { create(:user) }
subject { described_class.new(cluster, current_user: current_user).execute }
shared_examples 'management project is not required' do
it 'does not create a project' do
expect { subject }.not_to change(cluster, :management_project)
end
end
context ':auto_create_cluster_management_project feature flag is disabled' do
before do
stub_feature_flags(auto_create_cluster_management_project: false)
end
include_examples 'management project is not required'
end
context 'cluster already has a management project' do
let(:cluster) { create(:cluster, :management_project) }
include_examples 'management project is not required'
end
shared_examples 'creates a management project' do
let(:project_params) do
{
name: "#{cluster.name} Cluster Management",
description: 'This project is automatically generated and will be used to manage your Kubernetes cluster. [More information](/help/user/clusters/management_project)',
namespace_id: namespace&.id,
visibility_level: Gitlab::VisibilityLevel::PRIVATE
}
end
it 'creates a management project' do
expect(Projects::CreateService).to receive(:new)
.with(current_user, project_params)
.and_call_original
subject
management_project = cluster.management_project
expect(management_project).to be_present
expect(management_project).to be_private
expect(management_project.name).to eq "#{cluster.name} Cluster Management"
expect(management_project.namespace).to eq namespace
end
end
context 'project cluster' do
let(:cluster) { create(:cluster, projects: [project]) }
let(:project) { create(:project, namespace: current_user.namespace) }
let(:namespace) { project.namespace }
include_examples 'creates a management project'
end
context 'group cluster' do
let(:cluster) { create(:cluster, :group, user: current_user) }
let(:namespace) { cluster.group }
before do
namespace.add_user(current_user, Gitlab::Access::MAINTAINER)
end
include_examples 'creates a management project'
end
context 'instance cluster' do
let(:cluster) { create(:cluster, :instance, user: current_user) }
let(:namespace) { create(:group) }
before do
stub_application_setting(instance_administrators_group: namespace)
namespace.add_user(current_user, Gitlab::Access::MAINTAINER)
end
include_examples 'creates a management project'
end
describe 'error handling' do
let(:project) { cluster.project }
before do
allow(Projects::CreateService).to receive(:new)
.and_return(double(execute: project))
end
context 'project is invalid' do
let(:errors) { double(full_messages: ["Error message"]) }
let(:project) { instance_double(Project, errors: errors) }
it { expect { subject }.to raise_error(described_class::CreateError, /Failed to create project/) }
end
context 'instance administrators group is missing' do
let(:cluster) { create(:cluster, :instance) }
it { expect { subject }.to raise_error(described_class::CreateError, /Instance administrators group not found/) }
end
context 'cluster is invalid' do
before do
allow(cluster).to receive(:update).and_return(false)
end
it { expect { subject }.to raise_error(described_class::CreateError, /Failed to update cluster/) }
end
context 'unknown cluster type' do
before do
allow(cluster).to receive(:cluster_type).and_return("unknown_type")
end
it { expect { subject }.to raise_error(NotImplementedError) }
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment