Commit e50050a8 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 1dffba3b
...@@ -96,6 +96,7 @@ gitlab:assets:compile pull-cache: ...@@ -96,6 +96,7 @@ gitlab:assets:compile pull-cache:
- node_modules - node_modules
- public/assets - public/assets
- assets-compile.log - assets-compile.log
when: always
compile-assets pull-push-cache: compile-assets pull-push-cache:
extends: extends:
......
...@@ -83,6 +83,7 @@ ...@@ -83,6 +83,7 @@
"markdownlint", "markdownlint",
"Mattermost", "Mattermost",
"Microsoft", "Microsoft",
"Minikube",
"MinIO", "MinIO",
"NGINX Ingress", "NGINX Ingress",
"NGINX", "NGINX",
......
...@@ -120,6 +120,7 @@ ...@@ -120,6 +120,7 @@
&:hover:not(.tree-truncated-warning) { &:hover:not(.tree-truncated-warning) {
td { td {
background-color: $blue-50; background-color: $blue-50;
background-clip: padding-box;
border-top: 1px solid $blue-200; border-top: 1px solid $blue-200;
border-bottom: 1px solid $blue-200; border-bottom: 1px solid $blue-200;
cursor: pointer; cursor: pointer;
......
...@@ -65,10 +65,6 @@ module NavHelper ...@@ -65,10 +65,6 @@ module NavHelper
%w(groups#issues labels#index milestones#index boards#index boards#show) %w(groups#issues labels#index milestones#index boards#index boards#show)
end end
def show_user_notification_dot?
experiment_enabled?(:ci_notification_dot)
end
private private
def get_header_links def get_header_links
......
...@@ -26,7 +26,8 @@ module SystemNoteHelper ...@@ -26,7 +26,8 @@ module SystemNoteHelper
'duplicate' => 'duplicate', 'duplicate' => 'duplicate',
'locked' => 'lock', 'locked' => 'lock',
'unlocked' => 'lock-open', 'unlocked' => 'lock-open',
'due_date' => 'calendar' 'due_date' => 'calendar',
'health_status' => 'status-health'
}.freeze }.freeze
def system_note_icon_name(note) def system_note_icon_name(note)
......
...@@ -27,6 +27,7 @@ module MergeRequests ...@@ -27,6 +27,7 @@ module MergeRequests
success success
end end
end end
log_info("Merge process finished on JID #{merge_jid} with state #{state}") log_info("Merge process finished on JID #{merge_jid} with state #{state}")
rescue MergeError => e rescue MergeError => e
handle_merge_error(log_message: e.message, save_message_on_model: true) handle_merge_error(log_message: e.message, save_message_on_model: true)
...@@ -54,7 +55,7 @@ module MergeRequests ...@@ -54,7 +55,7 @@ module MergeRequests
error = error =
if @merge_request.should_be_rebased? if @merge_request.should_be_rebased?
'Only fast-forward merge is allowed for your project. Please update your source branch' 'Only fast-forward merge is allowed for your project. Please update your source branch'
elsif !@merge_request.mergeable? elsif !@merge_request.merged? && !@merge_request.mergeable?
'Merge request is not mergeable' 'Merge request is not mergeable'
end end
......
...@@ -8,17 +8,28 @@ module MergeRequests ...@@ -8,17 +8,28 @@ module MergeRequests
# #
class PostMergeService < MergeRequests::BaseService class PostMergeService < MergeRequests::BaseService
def execute(merge_request) def execute(merge_request)
merge_request.mark_as_merged # These operations need to happen transactionally
close_issues(merge_request) ActiveRecord::Base.transaction(requires_new: true) do
todo_service.merge_merge_request(merge_request, current_user) merge_request.mark_as_merged
create_event(merge_request)
create_note(merge_request) # These options do not call external services and should be
# relatively quick enough to put in a Transaction
create_event(merge_request)
todo_service.merge_merge_request(merge_request, current_user)
end
# These operations are idempotent so can be safely run multiple times
notification_service.merge_mr(merge_request, current_user) notification_service.merge_mr(merge_request, current_user)
execute_hooks(merge_request, 'merge') create_note(merge_request)
close_issues(merge_request)
invalidate_cache_counts(merge_request, users: merge_request.assignees) invalidate_cache_counts(merge_request, users: merge_request.assignees)
merge_request.update_project_counter_caches merge_request.update_project_counter_caches
delete_non_latest_diffs(merge_request) delete_non_latest_diffs(merge_request)
cleanup_environments(merge_request) cleanup_environments(merge_request)
# Anything after this point will be executed at-most-once. Less important activity only
# TODO: make all the work in here a separate sidekiq job so it can go in the transaction
execute_hooks(merge_request, 'merge')
end end
private private
......
...@@ -4,12 +4,14 @@ module MergeRequests ...@@ -4,12 +4,14 @@ module MergeRequests
class SquashService < MergeRequests::BaseService class SquashService < MergeRequests::BaseService
include Git::Logger include Git::Logger
def idempotent?
true
end
def execute def execute
# If performing a squash would result in no change, then # If performing a squash would result in no change, then
# immediately return a success message without performing a squash # immediately return a success message without performing a squash
if merge_request.commits_count < 2 && message.nil? return success(squash_sha: merge_request.diff_head_sha) if squash_redundant?
return success(squash_sha: merge_request.diff_head_sha)
end
if merge_request.squash_in_progress? if merge_request.squash_in_progress?
return error(s_('MergeRequests|Squash task canceled: another squash is already in progress.')) return error(s_('MergeRequests|Squash task canceled: another squash is already in progress.'))
...@@ -20,6 +22,12 @@ module MergeRequests ...@@ -20,6 +22,12 @@ module MergeRequests
private private
def squash_redundant?
return true if merge_request.merged?
merge_request.commits_count < 2 && message.nil?
end
def squash! def squash!
squash_sha = repository.squash(current_user, merge_request, message || merge_request.default_squash_commit_message) squash_sha = repository.squash(current_user, merge_request, message || merge_request.default_squash_commit_message)
......
...@@ -68,8 +68,7 @@ ...@@ -68,8 +68,7 @@
%li.nav-item.header-user.dropdown{ data: { track_label: "profile_dropdown", track_event: "click_dropdown", track_value: "", qa_selector: 'user_menu' }, class: ('mr-0' if has_impersonation_link) } %li.nav-item.header-user.dropdown{ data: { track_label: "profile_dropdown", track_event: "click_dropdown", track_value: "", qa_selector: 'user_menu' }, class: ('mr-0' if has_impersonation_link) }
= link_to current_user, class: user_dropdown_class, data: { toggle: "dropdown" } do = link_to current_user, class: user_dropdown_class, data: { toggle: "dropdown" } do
= image_tag avatar_icon_for_user(current_user, 23), width: 23, height: 23, class: "header-user-avatar qa-user-avatar" = image_tag avatar_icon_for_user(current_user, 23), width: 23, height: 23, class: "header-user-avatar qa-user-avatar"
- if show_user_notification_dot? = render_if_exists 'layouts/header/user_notification_dot', project: project, namespace: group
%span.header-user-notification-dot.rounded-circle.position-relative
= sprite_icon('angle-down', css_class: 'caret-down') = sprite_icon('angle-down', css_class: 'caret-down')
.dropdown-menu.dropdown-menu-right .dropdown-menu.dropdown-menu-right
= render 'layouts/header/current_user_dropdown' = render 'layouts/header/current_user_dropdown'
......
---
title: Make MergeService idempotent
merge_request: 24708
author:
type: changed
...@@ -18,7 +18,7 @@ If verification succeeds on the **primary** node but fails on the **secondary** ...@@ -18,7 +18,7 @@ If verification succeeds on the **primary** node but fails on the **secondary**
this indicates that the object was corrupted during the replication process. this indicates that the object was corrupted during the replication process.
Geo actively try to correct verification failures marking the repository to Geo actively try to correct verification failures marking the repository to
be resynced with a back-off period. If you want to reset the verification for be resynced with a back-off period. If you want to reset the verification for
these failures, so you should follow [these instructions][reset-verification]. these failures, so you should follow [these instructions](background_verification.md#reset-verification-for-projects-where-verification-has-failed).
If verification is lagging significantly behind replication, consider giving If verification is lagging significantly behind replication, consider giving
the node more time before scheduling a planned failover. the node more time before scheduling a planned failover.
...@@ -172,8 +172,10 @@ If the **primary** and **secondary** nodes have a checksum verification mismatch ...@@ -172,8 +172,10 @@ If the **primary** and **secondary** nodes have a checksum verification mismatch
Automatic background verification doesn't cover attachments, LFS objects, Automatic background verification doesn't cover attachments, LFS objects,
job artifacts, and user uploads in file storage. You can keep track of the job artifacts, and user uploads in file storage. You can keep track of the
progress to include them in [ee-1430]. For now, you can verify their integrity progress to include them in [Geo: Verify all replicated data](https://gitlab.com/groups/gitlab-org/-/epics/1430).
manually by following [these instructions][foreground-verification] on both
For now, you can verify their integrity
manually by following [these instructions](../../raketasks/check.md) on both
nodes, and comparing the output between them. nodes, and comparing the output between them.
In GitLab EE 12.1, Geo calculates checksums for attachments, LFS objects, and In GitLab EE 12.1, Geo calculates checksums for attachments, LFS objects, and
...@@ -184,7 +186,3 @@ been synced before GitLab EE 12.1. ...@@ -184,7 +186,3 @@ been synced before GitLab EE 12.1.
Data in object storage is **not verified**, as the object store is responsible Data in object storage is **not verified**, as the object store is responsible
for ensuring the integrity of the data. for ensuring the integrity of the data.
[reset-verification]: background_verification.md#reset-verification-for-projects-where-verification-has-failed
[foreground-verification]: ../../raketasks/check.md
[ee-1430]: https://gitlab.com/groups/gitlab-org/-/epics/1430
...@@ -14,7 +14,7 @@ If you have any doubts about the consistency of the data on this node, we recomm ...@@ -14,7 +14,7 @@ If you have any doubts about the consistency of the data on this node, we recomm
Since the former **primary** node will be out of sync with the current **primary** node, the first step is to bring the former **primary** node up to date. Note, deletion of data stored on disk like Since the former **primary** node will be out of sync with the current **primary** node, the first step is to bring the former **primary** node up to date. Note, deletion of data stored on disk like
repositories and uploads will not be replayed when bringing the former **primary** node back repositories and uploads will not be replayed when bringing the former **primary** node back
into sync, which may result in increased disk usage. into sync, which may result in increased disk usage.
Alternatively, you can [set up a new **secondary** GitLab instance][setup-geo] to avoid this. Alternatively, you can [set up a new **secondary** GitLab instance](../replication/index.md#setup-instructions) to avoid this.
To bring the former **primary** node up to date: To bring the former **primary** node up to date:
...@@ -25,28 +25,28 @@ To bring the former **primary** node up to date: ...@@ -25,28 +25,28 @@ To bring the former **primary** node up to date:
sudo gitlab-ctl start sudo gitlab-ctl start
``` ```
NOTE: **Note:** If you [disabled the **primary** node permanently][disaster-recovery-disable-primary], NOTE: **Note:** If you [disabled the **primary** node permanently](index.md#step-2-permanently-disable-the-primary-node),
you need to undo those steps now. For Debian/Ubuntu you just need to run you need to undo those steps now. For Debian/Ubuntu you just need to run
`sudo systemctl enable gitlab-runsvdir`. For CentOS 6, you need to install `sudo systemctl enable gitlab-runsvdir`. For CentOS 6, you need to install
the GitLab instance from scratch and set it up as a **secondary** node by the GitLab instance from scratch and set it up as a **secondary** node by
following [Setup instructions][setup-geo]. In this case, you don't need to follow the next step. following [Setup instructions](../replication/index.md#setup-instructions). In this case, you don't need to follow the next step.
NOTE: **Note:** If you [changed the DNS records](index.md#step-4-optional-updating-the-primary-domain-dns-record) NOTE: **Note:** If you [changed the DNS records](index.md#step-4-optional-updating-the-primary-domain-dns-record)
for this node during disaster recovery procedure you may need to [block for this node during disaster recovery procedure you may need to [block
all the writes to this node](planned_failover.md#prevent-updates-to-the-primary-node) all the writes to this node](planned_failover.md#prevent-updates-to-the-primary-node)
during this procedure. during this procedure.
1. [Setup database replication][database-replication]. Note that in this 1. [Setup database replication](../replication/database.md). Note that in this
case, **primary** node refers to the current **primary** node, and **secondary** node refers to the case, **primary** node refers to the current **primary** node, and **secondary** node refers to the
former **primary** node. former **primary** node.
If you have lost your original **primary** node, follow the If you have lost your original **primary** node, follow the
[setup instructions][setup-geo] to set up a new **secondary** node. [setup instructions](../replication/index.md#setup-instructions) to set up a new **secondary** node.
## Promote the **secondary** node to **primary** node ## Promote the **secondary** node to **primary** node
When the initial replication is complete and the **primary** node and **secondary** node are When the initial replication is complete and the **primary** node and **secondary** node are
closely in sync, you can do a [planned failover]. closely in sync, you can do a [planned failover](planned_failover.md).
## Restore the **secondary** node ## Restore the **secondary** node
...@@ -54,8 +54,3 @@ If your objective is to have two nodes again, you need to bring your **secondary ...@@ -54,8 +54,3 @@ If your objective is to have two nodes again, you need to bring your **secondary
node back online as well by repeating the first step node back online as well by repeating the first step
([configure the former **primary** node to be a **secondary** node](#configure-the-former-primary-node-to-be-a-secondary-node)) ([configure the former **primary** node to be a **secondary** node](#configure-the-former-primary-node-to-be-a-secondary-node))
for the **secondary** node. for the **secondary** node.
[setup-geo]: ../replication/index.md#setup-instructions
[database-replication]: ../replication/database.md
[disaster-recovery-disable-primary]: index.md#step-2-permanently-disable-the-primary-node
[planned failover]: planned_failover.md
...@@ -4,11 +4,11 @@ Geo replicates your database, your Git repositories, and few other assets. ...@@ -4,11 +4,11 @@ Geo replicates your database, your Git repositories, and few other assets.
We will support and replicate more data in the future, that will enable you to We will support and replicate more data in the future, that will enable you to
failover with minimal effort, in a disaster situation. failover with minimal effort, in a disaster situation.
See [Geo current limitations][geo-limitations] for more information. See [Geo current limitations](../replication/index.md#current-limitations) for more information.
CAUTION: **Warning:** CAUTION: **Warning:**
Disaster recovery for multi-secondary configurations is in **Alpha**. Disaster recovery for multi-secondary configurations is in **Alpha**.
For the latest updates, check the multi-secondary [Disaster Recovery epic][gitlab-org&65]. For the latest updates, check the multi-secondary [Disaster Recovery epic](https://gitlab.com/groups/gitlab-org/-/epics/65).
## Promoting a **secondary** Geo node in single-secondary configurations ## Promoting a **secondary** Geo node in single-secondary configurations
...@@ -22,7 +22,7 @@ immediately after following these instructions. ...@@ -22,7 +22,7 @@ immediately after following these instructions.
### Step 1. Allow replication to finish if possible ### Step 1. Allow replication to finish if possible
If the **secondary** node is still replicating data from the **primary** node, follow If the **secondary** node is still replicating data from the **primary** node, follow
[the planned failover docs][planned-failover] as closely as possible in [the planned failover docs](planned_failover.md) as closely as possible in
order to avoid unnecessary data loss. order to avoid unnecessary data loss.
### Step 2. Permanently disable the **primary** node ### Step 2. Permanently disable the **primary** node
...@@ -235,7 +235,7 @@ secondary domain, like changing Git remotes and API URLs. ...@@ -235,7 +235,7 @@ secondary domain, like changing Git remotes and API URLs.
Promoting a **secondary** node to **primary** node using the process above does not enable Promoting a **secondary** node to **primary** node using the process above does not enable
Geo on the new **primary** node. Geo on the new **primary** node.
To bring a new **secondary** node online, follow the [Geo setup instructions][setup-geo]. To bring a new **secondary** node online, follow the [Geo setup instructions](../replication/index.md#setup-instructions).
### Step 6. (Optional) Removing the secondary's tracking database ### Step 6. (Optional) Removing the secondary's tracking database
...@@ -284,7 +284,7 @@ and after that you also need two extra steps. ...@@ -284,7 +284,7 @@ and after that you also need two extra steps.
gitlab_rails['auto_migrate'] = false gitlab_rails['auto_migrate'] = false
``` ```
(For more details about these settings you can read [Configure the primary server][configure-the-primary-server]) (For more details about these settings you can read [Configure the primary server](../replication/database.md#step-1-configure-the-primary-server))
1. Save the file and reconfigure GitLab for the database listen changes and 1. Save the file and reconfigure GitLab for the database listen changes and
the replication slot changes to be applied. the replication slot changes to be applied.
...@@ -317,7 +317,7 @@ and after that you also need two extra steps. ...@@ -317,7 +317,7 @@ and after that you also need two extra steps.
### Step 2. Initiate the replication process ### Step 2. Initiate the replication process
Now we need to make each **secondary** node listen to changes on the new **primary** node. To do that you need Now we need to make each **secondary** node listen to changes on the new **primary** node. To do that you need
to [initiate the replication process][initiate-the-replication-process] again but this time to [initiate the replication process](../replication/database.md#step-3-initiate-the-replication-process) again but this time
for another **primary** node. All the old replication settings will be overwritten. for another **primary** node. All the old replication settings will be overwritten.
## Troubleshooting ## Troubleshooting
...@@ -332,15 +332,6 @@ after a failover. ...@@ -332,15 +332,6 @@ after a failover.
If you still have access to the old **primary** node, you can follow the If you still have access to the old **primary** node, you can follow the
instructions in the instructions in the
[Upgrading to GitLab 10.5][updating-geo] [Upgrading to GitLab 10.5](../replication/version_specific_updates.md#updating-to-gitlab-105)
section to resolve the error. Otherwise, the secret is lost and you'll need to section to resolve the error. Otherwise, the secret is lost and you'll need to
[reset two-factor authentication for all users][sec-tfa]. [reset two-factor authentication for all users](../../../security/two_factor_authentication.md#disabling-2fa-for-everyone).
[gitlab-org&65]: https://gitlab.com/groups/gitlab-org/-/epics/65
[geo-limitations]: ../replication/index.md#current-limitations
[planned-failover]: planned_failover.md
[setup-geo]: ../replication/index.md#setup-instructions
[updating-geo]: ../replication/version_specific_updates.md#updating-to-gitlab-105
[sec-tfa]: ../../../security/two_factor_authentication.md#disabling-2fa-for-everyone
[initiate-the-replication-process]: ../replication/database.html#step-3-initiate-the-replication-process
[configure-the-primary-server]: ../replication/database.html#step-1-configure-the-primary-server
...@@ -12,7 +12,7 @@ length of this window is determined by your replication capacity - once the ...@@ -12,7 +12,7 @@ length of this window is determined by your replication capacity - once the
data loss. data loss.
This document assumes you already have a fully configured, working Geo setup. This document assumes you already have a fully configured, working Geo setup.
Please read it and the [Disaster Recovery][disaster-recovery] failover Please read it and the [Disaster Recovery](index.md) failover
documentation in full before proceeding. Planned failover is a major operation, documentation in full before proceeding. Planned failover is a major operation,
and if performed incorrectly, there is a high risk of data loss. Consider and if performed incorrectly, there is a high risk of data loss. Consider
rehearsing the procedure until you are comfortable with the necessary steps and rehearsing the procedure until you are comfortable with the necessary steps and
...@@ -20,7 +20,7 @@ have a high degree of confidence in being able to perform them accurately. ...@@ -20,7 +20,7 @@ have a high degree of confidence in being able to perform them accurately.
## Not all data is automatically replicated ## Not all data is automatically replicated
If you are using any GitLab features that Geo [doesn't support][limitations], If you are using any GitLab features that Geo [doesn't support](../replication/index.md#current-limitations),
you must make separate provisions to ensure that the **secondary** node has an you must make separate provisions to ensure that the **secondary** node has an
up-to-date copy of any data associated with that feature. This may extend the up-to-date copy of any data associated with that feature. This may extend the
required scheduled maintenance period significantly. required scheduled maintenance period significantly.
...@@ -32,7 +32,7 @@ final transfer inside the maintenance window) will then transfer only the ...@@ -32,7 +32,7 @@ final transfer inside the maintenance window) will then transfer only the
*changes* between the **primary** node and the **secondary** nodes. *changes* between the **primary** node and the **secondary** nodes.
Repository-centric strategies for using `rsync` effectively can be found in the Repository-centric strategies for using `rsync` effectively can be found in the
[moving repositories][moving-repositories] documentation; these strategies can [moving repositories](../../operations/moving_repositories.md) documentation; these strategies can
be adapted for use with any other file-based data, such as GitLab Pages (to be adapted for use with any other file-based data, such as GitLab Pages (to
be found in `/var/opt/gitlab/gitlab-rails/shared/pages` if using Omnibus). be found in `/var/opt/gitlab/gitlab-rails/shared/pages` if using Omnibus).
...@@ -44,12 +44,12 @@ will go smoothly. ...@@ -44,12 +44,12 @@ will go smoothly.
### Object storage ### Object storage
If you have a large GitLab installation or cannot tolerate downtime, consider If you have a large GitLab installation or cannot tolerate downtime, consider
[migrating to Object Storage][os-conf] **before** scheduling a planned failover. [migrating to Object Storage](../replication/object_storage.md) **before** scheduling a planned failover.
Doing so reduces both the length of the maintenance window, and the risk of data Doing so reduces both the length of the maintenance window, and the risk of data
loss as a result of a poorly executed planned failover. loss as a result of a poorly executed planned failover.
In GitLab 12.4, you can optionally allow GitLab to manage replication of Object Storage for In GitLab 12.4, you can optionally allow GitLab to manage replication of Object Storage for
**secondary** nodes. For more information, see [Object Storage replication][os-conf]. **secondary** nodes. For more information, see [Object Storage replication](../replication/object_storage.md).
### Review the configuration of each **secondary** node ### Review the configuration of each **secondary** node
...@@ -113,7 +113,7 @@ or removing references to the missing data. ...@@ -113,7 +113,7 @@ or removing references to the missing data.
### Verify the integrity of replicated data ### Verify the integrity of replicated data
This [content was moved to another location][background-verification]. This [content was moved to another location](background_verification.md).
### Notify users of scheduled maintenance ### Notify users of scheduled maintenance
...@@ -126,7 +126,7 @@ will take to finish syncing. An example message would be: ...@@ -126,7 +126,7 @@ will take to finish syncing. An example message would be:
## Prevent updates to the **primary** node ## Prevent updates to the **primary** node
Until a [read-only mode][ce-19739] is implemented, updates must be prevented Until a [read-only mode](https://gitlab.com/gitlab-org/gitlab-foss/issues/19739) is implemented, updates must be prevented
from happening manually. Note that your **secondary** node still needs read-only from happening manually. Note that your **secondary** node still needs read-only
access to the **primary** node during the maintenance window. access to the **primary** node during the maintenance window.
...@@ -186,7 +186,7 @@ access to the **primary** node during the maintenance window. ...@@ -186,7 +186,7 @@ access to the **primary** node during the maintenance window.
1. On the **secondary** node, navigate to **{admin}** **Admin Area >** **{monitor}** **Monitoring > Background Jobs > Queues** 1. On the **secondary** node, navigate to **{admin}** **Admin Area >** **{monitor}** **Monitoring > Background Jobs > Queues**
and wait for all the `geo` queues to drop to 0 queued and 0 running jobs. and wait for all the `geo` queues to drop to 0 queued and 0 running jobs.
1. On the **secondary** node, use [these instructions][foreground-verification] 1. On the **secondary** node, use [these instructions](../../raketasks/check.md)
to verify the integrity of CI artifacts, LFS objects, and uploads in file to verify the integrity of CI artifacts, LFS objects, and uploads in file
storage. storage.
...@@ -195,24 +195,12 @@ At this point, your **secondary** node will contain an up-to-date copy of everyt ...@@ -195,24 +195,12 @@ At this point, your **secondary** node will contain an up-to-date copy of everyt
## Promote the **secondary** node ## Promote the **secondary** node
Finally, follow the [Disaster Recovery docs][disaster-recovery] to promote the Finally, follow the [Disaster Recovery docs](index.md) to promote the
**secondary** node to a **primary** node. This process will cause a brief outage on the **secondary** node, and users may need to log in again. **secondary** node to a **primary** node. This process will cause a brief outage on the **secondary** node, and users may need to log in again.
Once it is completed, the maintenance window is over! Your new **primary** node will now Once it is completed, the maintenance window is over! Your new **primary** node will now
begin to diverge from the old one. If problems do arise at this point, failing begin to diverge from the old one. If problems do arise at this point, failing
back to the old **primary** node [is possible][bring-primary-back], but likely to result back to the old **primary** node [is possible](bring_primary_back.md), but likely to result
in the loss of any data uploaded to the new **primary** in the meantime. in the loss of any data uploaded to the new **primary** in the meantime.
Don't forget to remove the broadcast message after failover is complete. Don't forget to remove the broadcast message after failover is complete.
[bring-primary-back]: bring_primary_back.md
[ce-19739]: https://gitlab.com/gitlab-org/gitlab-foss/issues/19739
[container-registry]: ../replication/container_registry.md
[disaster-recovery]: index.md
[ee-4930]: https://gitlab.com/gitlab-org/gitlab/issues/4930
[ee-5064]: https://gitlab.com/gitlab-org/gitlab/issues/5064
[foreground-verification]: ../../raketasks/check.md
[background-verification]: background_verification.md
[limitations]: ../replication/index.md#current-limitations
[moving-repositories]: ../../operations/moving_repositories.md
[os-conf]: ../replication/object_storage.md
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
NOTE: **Note:** NOTE: **Note:**
This is the final step in setting up a **secondary** Geo node. Stages of the This is the final step in setting up a **secondary** Geo node. Stages of the
setup process must be completed in the documented order. setup process must be completed in the documented order.
Before attempting the steps in this stage, [complete all prior stages][setup-geo-omnibus]. Before attempting the steps in this stage, [complete all prior stages](index.md#using-omnibus-gitlab).
The basic steps of configuring a **secondary** node are to: The basic steps of configuring a **secondary** node are to:
...@@ -77,7 +77,7 @@ they must be manually replicated to the **secondary** node. ...@@ -77,7 +77,7 @@ they must be manually replicated to the **secondary** node.
GitLab integrates with the system-installed SSH daemon, designating a user GitLab integrates with the system-installed SSH daemon, designating a user
(typically named `git`) through which all access requests are handled. (typically named `git`) through which all access requests are handled.
In a [Disaster Recovery] situation, GitLab system In a [Disaster Recovery](../disaster_recovery/index.md) situation, GitLab system
administrators will promote a **secondary** node to the **primary** node. DNS records for the administrators will promote a **secondary** node to the **primary** node. DNS records for the
**primary** domain should also be updated to point to the new **primary** node **primary** domain should also be updated to point to the new **primary** node
(previously a **secondary** node). Doing so will avoid the need to update Git remotes and API URLs. (previously a **secondary** node). Doing so will avoid the need to update Git remotes and API URLs.
...@@ -242,7 +242,7 @@ You can safely skip this step if your **primary** node uses a CA-issued HTTPS ce ...@@ -242,7 +242,7 @@ You can safely skip this step if your **primary** node uses a CA-issued HTTPS ce
If your **primary** node is using a self-signed certificate for *HTTPS* support, you will If your **primary** node is using a self-signed certificate for *HTTPS* support, you will
need to add that certificate to the **secondary** node's trust store. Retrieve the need to add that certificate to the **secondary** node's trust store. Retrieve the
certificate from the **primary** node and follow certificate from the **primary** node and follow
[these instructions][omnibus-ssl] [these instructions](https://docs.gitlab.com/omnibus/settings/ssl.html)
on the **secondary** node. on the **secondary** node.
### Step 6. Enable Git access over HTTP/HTTPS ### Step 6. Enable Git access over HTTP/HTTPS
...@@ -283,7 +283,7 @@ Please note that disabling a **secondary** node will stop the synchronization pr ...@@ -283,7 +283,7 @@ Please note that disabling a **secondary** node will stop the synchronization pr
Please note that if `git_data_dirs` is customized on the **primary** node for multiple Please note that if `git_data_dirs` is customized on the **primary** node for multiple
repository shards you must duplicate the same configuration on each **secondary** node. repository shards you must duplicate the same configuration on each **secondary** node.
Point your users to the ["Using a Geo Server" guide][using-geo]. Point your users to the ["Using a Geo Server" guide](using_a_geo_server.md).
Currently, this is what is synced: Currently, this is what is synced:
...@@ -334,10 +334,3 @@ See the [updating the Geo nodes document](updating_the_geo_nodes.md). ...@@ -334,10 +334,3 @@ See the [updating the Geo nodes document](updating_the_geo_nodes.md).
## Troubleshooting ## Troubleshooting
See the [troubleshooting document](troubleshooting.md). See the [troubleshooting document](troubleshooting.md).
[setup-geo-omnibus]: index.md#using-omnibus-gitlab
[Hashed Storage]: ../../repository_storage_types.md
[Disaster Recovery]: ../disaster_recovery/index.md
[gitlab-com/infrastructure#2821]: https://gitlab.com/gitlab-com/infrastructure/issues/2821
[omnibus-ssl]: https://docs.gitlab.com/omnibus/settings/ssl.html
[using-geo]: using_a_geo_server.md
...@@ -8,7 +8,7 @@ configuration steps. In this case, ...@@ -8,7 +8,7 @@ configuration steps. In this case,
NOTE: **Note:** NOTE: **Note:**
The stages of the setup process must be completed in the documented order. The stages of the setup process must be completed in the documented order.
Before attempting the steps in this stage, [complete all prior stages][toc]. Before attempting the steps in this stage, [complete all prior stages](index.md#using-omnibus-gitlab).
This document describes the minimal steps you have to take in order to This document describes the minimal steps you have to take in order to
replicate your **primary** GitLab database to a **secondary** node's database. You may replicate your **primary** GitLab database to a **secondary** node's database. You may
...@@ -27,7 +27,7 @@ NOTE: **Note:** ...@@ -27,7 +27,7 @@ NOTE: **Note:**
In database documentation, you may see "**primary**" being referenced as "master" In database documentation, you may see "**primary**" being referenced as "master"
and "**secondary**" as either "slave" or "standby" server (read-only). and "**secondary**" as either "slave" or "standby" server (read-only).
We recommend using [PostgreSQL replication slots][replication-slots-article] We recommend using [PostgreSQL replication slots](https://medium.com/@tk512/replication-slots-in-postgresql-b4b03d277c75)
to ensure that the **primary** node retains all the data necessary for the **secondary** nodes to to ensure that the **primary** node retains all the data necessary for the **secondary** nodes to
recover. See below for more details. recover. See below for more details.
...@@ -97,7 +97,7 @@ There is an [issue where support is being discussed](https://gitlab.com/gitlab-o ...@@ -97,7 +97,7 @@ There is an [issue where support is being discussed](https://gitlab.com/gitlab-o
gitlab_rails['db_password'] = '<your_password_here>' gitlab_rails['db_password'] = '<your_password_here>'
``` ```
1. Omnibus GitLab already has a [replication user] 1. Omnibus GitLab already has a [replication user](https://wiki.postgresql.org/wiki/Streaming_Replication)
called `gitlab_replicator`. You must set the password for this user manually. called `gitlab_replicator`. You must set the password for this user manually.
You will be prompted to enter a password: You will be prompted to enter a password:
...@@ -280,7 +280,7 @@ There is an [issue where support is being discussed](https://gitlab.com/gitlab-o ...@@ -280,7 +280,7 @@ There is an [issue where support is being discussed](https://gitlab.com/gitlab-o
NOTE: **Note:** NOTE: **Note:**
This step is important so we don't try to execute anything before the node is fully configured. This step is important so we don't try to execute anything before the node is fully configured.
1. [Check TCP connectivity][rake-maintenance] to the **primary** node's PostgreSQL server: 1. [Check TCP connectivity](../../raketasks/maintenance.md) to the **primary** node's PostgreSQL server:
```shell ```shell
gitlab-rake gitlab:tcp_check[<primary_node_ip>,5432] gitlab-rake gitlab:tcp_check[<primary_node_ip>,5432]
...@@ -508,8 +508,3 @@ work: ...@@ -508,8 +508,3 @@ work:
## Troubleshooting ## Troubleshooting
Read the [troubleshooting document](troubleshooting.md). Read the [troubleshooting document](troubleshooting.md).
[replication-slots-article]: https://medium.com/@tk512/replication-slots-in-postgresql-b4b03d277c75
[replication user]:https://wiki.postgresql.org/wiki/Streaming_Replication
[toc]: index.md#using-omnibus-gitlab
[rake-maintenance]: ../../raketasks/maintenance.md
...@@ -8,7 +8,7 @@ described, it is possible to adapt these instructions to your needs. ...@@ -8,7 +8,7 @@ described, it is possible to adapt these instructions to your needs.
![Geo HA Diagram](../../high_availability/img/geo-ha-diagram.png) ![Geo HA Diagram](../../high_availability/img/geo-ha-diagram.png)
_[diagram source - GitLab employees only][diagram-source]_ _[diagram source - GitLab employees only](https://docs.google.com/drawings/d/1z0VlizKiLNXVVVaERFwgsIOuEgjcUqDTWPdQYsE7Z4c/edit)_
The topology above assumes that the **primary** and **secondary** Geo clusters The topology above assumes that the **primary** and **secondary** Geo clusters
are located in two separate locations, on their own virtual network are located in two separate locations, on their own virtual network
...@@ -81,7 +81,7 @@ The following steps enable a GitLab cluster to serve as the **primary** node. ...@@ -81,7 +81,7 @@ The following steps enable a GitLab cluster to serve as the **primary** node.
gitlab_rails['auto_migrate'] = false gitlab_rails['auto_migrate'] = false
``` ```
After making these changes, [reconfigure GitLab][gitlab-reconfigure] so the changes take effect. After making these changes, [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) so the changes take effect.
NOTE: **Note:** PostgreSQL and Redis should have already been disabled on the NOTE: **Note:** PostgreSQL and Redis should have already been disabled on the
application servers, and connections from the application servers to those application servers, and connections from the application servers to those
...@@ -193,7 +193,7 @@ the **primary** database. Use the following as a guide. ...@@ -193,7 +193,7 @@ the **primary** database. Use the following as a guide.
geo_logcursor['enable'] = false geo_logcursor['enable'] = false
``` ```
After making these changes, [reconfigure GitLab][gitlab-reconfigure] so the changes take effect. After making these changes, [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) so the changes take effect.
If using an external PostgreSQL instance, refer also to If using an external PostgreSQL instance, refer also to
[Geo with external PostgreSQL instances](external_database.md). [Geo with external PostgreSQL instances](external_database.md).
...@@ -264,7 +264,7 @@ Configure the tracking database. ...@@ -264,7 +264,7 @@ Configure the tracking database.
unicorn['enable'] = false unicorn['enable'] = false
``` ```
After making these changes, [reconfigure GitLab][gitlab-reconfigure] so the changes take effect. After making these changes, [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) so the changes take effect.
If using an external PostgreSQL instance, refer also to If using an external PostgreSQL instance, refer also to
[Geo with external PostgreSQL instances](external_database.md). [Geo with external PostgreSQL instances](external_database.md).
...@@ -342,7 +342,7 @@ servers connect to the databases. ...@@ -342,7 +342,7 @@ servers connect to the databases.
NOTE: **Note:** NOTE: **Note:**
Make sure that current node IP is listed in `postgresql['md5_auth_cidr_addresses']` setting of your remote database. Make sure that current node IP is listed in `postgresql['md5_auth_cidr_addresses']` setting of your remote database.
After making these changes [Reconfigure GitLab][gitlab-reconfigure] so the changes take effect. After making these changes [Reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) so the changes take effect.
On the secondary the following GitLab frontend services will be enabled: On the secondary the following GitLab frontend services will be enabled:
...@@ -458,6 +458,3 @@ application servers above, with some changes to run only the `sidekiq` service: ...@@ -458,6 +458,3 @@ application servers above, with some changes to run only the `sidekiq` service:
`sidekiq['enable'] = false`. `sidekiq['enable'] = false`.
These servers do not need to be attached to the load balancer. These servers do not need to be attached to the load balancer.
[diagram-source]: https://docs.google.com/drawings/d/1z0VlizKiLNXVVVaERFwgsIOuEgjcUqDTWPdQYsE7Z4c/edit
[gitlab-reconfigure]: ../../restart_gitlab.md#omnibus-gitlab-reconfigure
...@@ -261,7 +261,7 @@ default to 1. You may need to increase this value if you have more ...@@ -261,7 +261,7 @@ default to 1. You may need to increase this value if you have more
Be sure to restart PostgreSQL for this to take Be sure to restart PostgreSQL for this to take
effect. See the [PostgreSQL replication effect. See the [PostgreSQL replication
setup][database-pg-replication] guide for more details. setup](database.md#postgresql-replication) guide for more details.
### Message: `FATAL: could not start WAL streaming: ERROR: replication slot "geo_secondary_my_domain_com" does not exist`? ### Message: `FATAL: could not start WAL streaming: ERROR: replication slot "geo_secondary_my_domain_com" does not exist`?
...@@ -273,7 +273,7 @@ process](database.md) on the **secondary** node . ...@@ -273,7 +273,7 @@ process](database.md) on the **secondary** node .
### Message: "Command exceeded allowed execution time" when setting up replication? ### Message: "Command exceeded allowed execution time" when setting up replication?
This may happen while [initiating the replication process][database-start-replication] on the **secondary** node, This may happen while [initiating the replication process](database.md#step-3-initiate-the-replication-process) on the **secondary** node,
and indicates that your initial dataset is too large to be replicated in the default timeout (30 minutes). and indicates that your initial dataset is too large to be replicated in the default timeout (30 minutes).
Re-run `gitlab-ctl replicate-geo-database`, but include a larger value for Re-run `gitlab-ctl replicate-geo-database`, but include a larger value for
...@@ -767,9 +767,6 @@ reload of the FDW schema. To manually reload the FDW schema: ...@@ -767,9 +767,6 @@ reload of the FDW schema. To manually reload the FDW schema:
SELECT * FROM gitlab_secondary.projects limit 1; SELECT * FROM gitlab_secondary.projects limit 1;
``` ```
[database-start-replication]: database.md#step-3-initiate-the-replication-process
[database-pg-replication]: database.md#postgresql-replication
### "Geo database has an outdated FDW remote schema" error ### "Geo database has an outdated FDW remote schema" error
GitLab can error with a `Geo database has an outdated FDW remote schema` message. GitLab can error with a `Geo database has an outdated FDW remote schema` message.
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Using a Geo Server **(PREMIUM ONLY)** # Using a Geo Server **(PREMIUM ONLY)**
After you set up the [database replication and configure the Geo nodes][req], use your closest GitLab node as you would a normal standalone GitLab instance. After you set up the [database replication and configure the Geo nodes](index.md#setup-instructions), use your closest GitLab node as you would a normal standalone GitLab instance.
Pushing directly to a **secondary** node (for both HTTP, SSH including Git LFS) was [introduced](https://about.gitlab.com/releases/2018/09/22/gitlab-11-3-released/) in [GitLab Premium](https://about.gitlab.com/pricing/#self-managed) 11.3. Pushing directly to a **secondary** node (for both HTTP, SSH including Git LFS) was [introduced](https://about.gitlab.com/releases/2018/09/22/gitlab-11-3-released/) in [GitLab Premium](https://about.gitlab.com/pricing/#self-managed) 11.3.
...@@ -18,5 +18,3 @@ remote: ssh://git@primary.geo/user/repo.git ...@@ -18,5 +18,3 @@ remote: ssh://git@primary.geo/user/repo.git
remote: remote:
Everything up-to-date Everything up-to-date
``` ```
[req]: index.md#setup-instructions
...@@ -83,7 +83,7 @@ deploy the bundled PostgreSQL. ...@@ -83,7 +83,7 @@ deploy the bundled PostgreSQL.
NOTE: **Note:** The role `postgres_role` was introduced with GitLab 10.3 NOTE: **Note:** The role `postgres_role` was introduced with GitLab 10.3
1. [Reconfigure GitLab] for the changes to take effect. 1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
1. Note the PostgreSQL node's IP address or hostname, port, and 1. Note the PostgreSQL node's IP address or hostname, port, and
plain text password. These will be necessary when configuring the GitLab plain text password. These will be necessary when configuring the GitLab
application servers later. application servers later.
...@@ -338,7 +338,7 @@ When installing the GitLab package, do not supply `EXTERNAL_URL` value. ...@@ -338,7 +338,7 @@ When installing the GitLab package, do not supply `EXTERNAL_URL` value.
repmgr['master_on_initialization'] = false repmgr['master_on_initialization'] = false
``` ```
1. [Reconfigure GitLab] for the changes to take effect. 1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
1. [Enable Monitoring](#enable-monitoring) 1. [Enable Monitoring](#enable-monitoring)
> Please note: > Please note:
...@@ -487,7 +487,7 @@ attributes set, but the following need to be set. ...@@ -487,7 +487,7 @@ attributes set, but the following need to be set.
gitlab_rails['auto_migrate'] = false gitlab_rails['auto_migrate'] = false
``` ```
1. [Reconfigure GitLab] for the changes to take effect. 1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
##### Application node post-configuration ##### Application node post-configuration
...@@ -554,7 +554,7 @@ consul['configuration'] = { ...@@ -554,7 +554,7 @@ consul['configuration'] = {
consul['monitoring_service_discovery'] = true consul['monitoring_service_discovery'] = true
``` ```
[Reconfigure Omnibus GitLab][reconfigure GitLab] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
##### Example recommended setup for PgBouncer servers ##### Example recommended setup for PgBouncer servers
...@@ -584,7 +584,7 @@ consul['configuration'] = { ...@@ -584,7 +584,7 @@ consul['configuration'] = {
consul['monitoring_service_discovery'] = true consul['monitoring_service_discovery'] = true
``` ```
[Reconfigure Omnibus GitLab][reconfigure GitLab] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
##### Internal load balancer setup ##### Internal load balancer setup
...@@ -625,7 +625,7 @@ consul['configuration'] = { ...@@ -625,7 +625,7 @@ consul['configuration'] = {
consul['monitoring_service_discovery'] = true consul['monitoring_service_discovery'] = true
``` ```
[Reconfigure Omnibus GitLab][reconfigure GitLab] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
###### Secondary nodes ###### Secondary nodes
...@@ -638,7 +638,7 @@ configuration: ...@@ -638,7 +638,7 @@ configuration:
repmgr['master_on_initialization'] = false repmgr['master_on_initialization'] = false
``` ```
[Reconfigure Omnibus GitLab][reconfigure GitLab] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
##### Example recommended setup for application server ##### Example recommended setup for application server
...@@ -673,7 +673,7 @@ consul['configuration'] = { ...@@ -673,7 +673,7 @@ consul['configuration'] = {
} }
``` ```
[Reconfigure Omnibus GitLab][reconfigure GitLab] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
##### Example recommended setup manual steps ##### Example recommended setup manual steps
...@@ -778,7 +778,7 @@ consul['configuration'] = { ...@@ -778,7 +778,7 @@ consul['configuration'] = {
} }
``` ```
[Reconfigure Omnibus GitLab][reconfigure GitLab] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
###### Secondary nodes ###### Secondary nodes
...@@ -826,7 +826,7 @@ consul['configuration'] = { ...@@ -826,7 +826,7 @@ consul['configuration'] = {
} }
``` ```
[Reconfigure Omnibus GitLab][reconfigure GitLab] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
##### Example minimal setup manual steps ##### Example minimal setup manual steps
...@@ -1005,7 +1005,7 @@ the previous section: ...@@ -1005,7 +1005,7 @@ the previous section:
1. On each PgBouncer node, edit `/etc/gitlab/gitlab.rb`: 1. On each PgBouncer node, edit `/etc/gitlab/gitlab.rb`:
1. Ensure `gitlab_rails['db_password']` is set to the plaintext password for 1. Ensure `gitlab_rails['db_password']` is set to the plaintext password for
the `gitlab` database user the `gitlab` database user
1. [Reconfigure GitLab] for the changes to take effect 1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect
## Enable Monitoring ## Enable Monitoring
...@@ -1047,7 +1047,7 @@ steps to fix the problem: ...@@ -1047,7 +1047,7 @@ steps to fix the problem:
1. On the master database node, connect to the database prompt - `gitlab-psql -d template1` 1. On the master database node, connect to the database prompt - `gitlab-psql -d template1`
1. Delete the `gitlab-consul` user - `DROP USER "gitlab-consul";` 1. Delete the `gitlab-consul` user - `DROP USER "gitlab-consul";`
1. Exit the database prompt - `\q` 1. Exit the database prompt - `\q`
1. [Reconfigure GitLab] and the user will be re-added with the proper permissions. 1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) and the user will be re-added with the proper permissions.
1. Change to the `gitlab-consul` user - `su - gitlab-consul` 1. Change to the `gitlab-consul` user - `su - gitlab-consul`
1. Try the check command again - `gitlab-ctl repmgr-check-master`. 1. Try the check command again - `gitlab-ctl repmgr-check-master`.
...@@ -1079,7 +1079,7 @@ To fix the problem, add the IP address to `/etc/gitlab/gitlab.rb`. ...@@ -1079,7 +1079,7 @@ To fix the problem, add the IP address to `/etc/gitlab/gitlab.rb`.
postgresql['trust_auth_cidr_addresses'] = %w(123.123.123.123/32 <other_cidrs>) postgresql['trust_auth_cidr_addresses'] = %w(123.123.123.123/32 <other_cidrs>)
``` ```
[Reconfigure GitLab] for the changes to take effect. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
### Issues with other components ### Issues with other components
...@@ -1101,5 +1101,3 @@ Read more on high-availability configuration: ...@@ -1101,5 +1101,3 @@ Read more on high-availability configuration:
1. [Configure the GitLab application servers](gitlab.md) 1. [Configure the GitLab application servers](gitlab.md)
1. [Configure the load balancers](load_balancer.md) 1. [Configure the load balancers](load_balancer.md)
1. [Manage the bundled Consul cluster](consul.md) 1. [Manage the bundled Consul cluster](consul.md)
[reconfigure GitLab]: ../restart_gitlab.md#omnibus-gitlab-reconfigure
...@@ -229,7 +229,7 @@ following are the 4 locations need to be shared: ...@@ -229,7 +229,7 @@ following are the 4 locations need to be shared:
Other GitLab directories should not be shared between nodes. They contain Other GitLab directories should not be shared between nodes. They contain
node-specific files and GitLab code that does not need to be shared. To ship node-specific files and GitLab code that does not need to be shared. To ship
logs to a central location consider using remote syslog. GitLab Omnibus packages logs to a central location consider using remote syslog. GitLab Omnibus packages
provide configuration for [UDP log shipping][udp-log-shipping]. provide configuration for [UDP log shipping](https://docs.gitlab.com/omnibus/settings/logs.html#udp-log-shipping-gitlab-enterprise-edition-only).
Having multiple NFS mounts will require manually making sure the data directories Having multiple NFS mounts will require manually making sure the data directories
are empty before attempting a restore. Read more about the are empty before attempting a restore. Read more about the
...@@ -244,8 +244,6 @@ Read more on high-availability configuration: ...@@ -244,8 +244,6 @@ Read more on high-availability configuration:
1. [Configure the GitLab application servers](gitlab.md) 1. [Configure the GitLab application servers](gitlab.md)
1. [Configure the load balancers](load_balancer.md) 1. [Configure the load balancers](load_balancer.md)
[udp-log-shipping]: https://docs.gitlab.com/omnibus/settings/logs.html#udp-log-shipping-gitlab-enterprise-edition-only "UDP log shipping"
<!-- ## Troubleshooting <!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues Include any troubleshooting steps that you can foresee. If you know beforehand what issues
......
...@@ -74,7 +74,7 @@ Omnibus: ...@@ -74,7 +74,7 @@ Omnibus:
gitlab_rails['enable'] = false gitlab_rails['enable'] = false
``` ```
1. [Reconfigure Omnibus GitLab][reconfigure] for the changes to take effect. 1. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
1. Note the Redis node's IP address or hostname, port, and 1. Note the Redis node's IP address or hostname, port, and
Redis password. These will be necessary when configuring the GitLab Redis password. These will be necessary when configuring the GitLab
application servers later. application servers later.
...@@ -88,13 +88,13 @@ Continue configuration of other components by going back to the ...@@ -88,13 +88,13 @@ Continue configuration of other components by going back to the
### High Availability with GitLab Omnibus **(PREMIUM ONLY)** ### High Availability with GitLab Omnibus **(PREMIUM ONLY)**
> Experimental Redis Sentinel support was [introduced in GitLab 8.11][ce-1877]. > Experimental Redis Sentinel support was [introduced in GitLab 8.11](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/1877).
Starting with 8.14, Redis Sentinel is no longer experimental. Starting with 8.14, Redis Sentinel is no longer experimental.
If you've used it with versions `< 8.14` before, please check the updated If you've used it with versions `< 8.14` before, please check the updated
documentation here. documentation here.
High Availability with [Redis] is possible using a **Master** x **Slave** High Availability with [Redis](https://redis.io/) is possible using a **Master** x **Slave**
topology with a [Redis Sentinel][sentinel] service to watch and automatically topology with a [Redis Sentinel](https://redis.io/topics/sentinel) service to watch and automatically
start the failover procedure. start the failover procedure.
You can choose to install and manage Redis and Sentinel yourself, use You can choose to install and manage Redis and Sentinel yourself, use
...@@ -107,7 +107,7 @@ Omnibus GitLab packages. ...@@ -107,7 +107,7 @@ Omnibus GitLab packages.
> [Redis Security](https://redis.io/topics/security) documentation for more > [Redis Security](https://redis.io/topics/security) documentation for more
> information. We recommend using a combination of a Redis password and tight > information. We recommend using a combination of a Redis password and tight
> firewall rules to secure your Redis service. > firewall rules to secure your Redis service.
> - You are highly encouraged to read the [Redis Sentinel][sentinel] documentation > - You are highly encouraged to read the [Redis Sentinel](https://redis.io/topics/sentinel) documentation
> before configuring Redis HA with GitLab to fully understand the topology and > before configuring Redis HA with GitLab to fully understand the topology and
> architecture. > architecture.
> - This is the documentation for the Omnibus GitLab packages. For installations > - This is the documentation for the Omnibus GitLab packages. For installations
...@@ -296,7 +296,7 @@ multiple ways to configure Redis HA. Omnibus GitLab packages have Redis and/or ...@@ -296,7 +296,7 @@ multiple ways to configure Redis HA. Omnibus GitLab packages have Redis and/or
Redis Sentinel bundled with them so you only need to focus on configuration. Redis Sentinel bundled with them so you only need to focus on configuration.
Pick the one that suits your needs. Pick the one that suits your needs.
- [Installations from source][source]: You need to install Redis and Sentinel - [Installations from source](../../install/installation.md): You need to install Redis and Sentinel
yourself. Use the [Redis HA installation from source](redis_source.md) yourself. Use the [Redis HA installation from source](redis_source.md)
documentation. documentation.
- [Omnibus GitLab **Community Edition** (CE) package](https://about.gitlab.com/install/?version=ce): Redis is bundled, so you - [Omnibus GitLab **Community Edition** (CE) package](https://about.gitlab.com/install/?version=ce): Redis is bundled, so you
...@@ -341,7 +341,7 @@ The prerequisites for a HA Redis setup are the following: ...@@ -341,7 +341,7 @@ The prerequisites for a HA Redis setup are the following:
change the default ones). change the default ones).
1. The server that hosts the GitLab application must be able to access the 1. The server that hosts the GitLab application must be able to access the
Redis nodes. Redis nodes.
1. Protect the nodes from access from external networks ([Internet][it]), using 1. Protect the nodes from access from external networks ([Internet](https://gitlab.com/gitlab-org/gitlab-foss/uploads/c4cc8cd353604bd80315f9384035ff9e/The_Internet_IT_Crowd.png)), using
firewall. firewall.
### Step 1. Configuring the master Redis instance ### Step 1. Configuring the master Redis instance
...@@ -381,7 +381,7 @@ The prerequisites for a HA Redis setup are the following: ...@@ -381,7 +381,7 @@ The prerequisites for a HA Redis setup are the following:
gitlab_rails['auto_migrate'] = false gitlab_rails['auto_migrate'] = false
``` ```
1. [Reconfigure Omnibus GitLab][reconfigure] for the changes to take effect. 1. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
> Note: You can specify multiple roles like sentinel and Redis as: > Note: You can specify multiple roles like sentinel and Redis as:
> `roles ['redis_sentinel_role', 'redis_master_role']`. Read more about high > `roles ['redis_sentinel_role', 'redis_master_role']`. Read more about high
...@@ -429,7 +429,7 @@ The prerequisites for a HA Redis setup are the following: ...@@ -429,7 +429,7 @@ The prerequisites for a HA Redis setup are the following:
sudo touch /etc/gitlab/skip-auto-reconfigure sudo touch /etc/gitlab/skip-auto-reconfigure
``` ```
1. [Reconfigure Omnibus GitLab][reconfigure] for the changes to take effect. 1. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
1. Go through the steps again for all the other slave nodes. 1. Go through the steps again for all the other slave nodes.
> Note: You can specify multiple roles like sentinel and Redis as: > Note: You can specify multiple roles like sentinel and Redis as:
...@@ -561,7 +561,7 @@ multiple machines with the Sentinel daemon. ...@@ -561,7 +561,7 @@ multiple machines with the Sentinel daemon.
Only the primary GitLab application server should handle migrations. Only the primary GitLab application server should handle migrations.
1. [Reconfigure Omnibus GitLab][reconfigure] for the changes to take effect. 1. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
1. Go through the steps again for all the other Sentinel nodes. 1. Go through the steps again for all the other Sentinel nodes.
### Step 4. Configuring the GitLab application ### Step 4. Configuring the GitLab application
...@@ -598,7 +598,7 @@ which ideally should not have Redis or Sentinels on it for a HA setup. ...@@ -598,7 +598,7 @@ which ideally should not have Redis or Sentinels on it for a HA setup.
] ]
``` ```
1. [Reconfigure Omnibus GitLab][reconfigure] for the changes to take effect. 1. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
## Switching from an existing single-machine installation to Redis HA ## Switching from an existing single-machine installation to Redis HA
...@@ -677,7 +677,7 @@ sentinel['quorum'] = 2 ...@@ -677,7 +677,7 @@ sentinel['quorum'] = 2
# sentinel['failover_timeout'] = 60000 # sentinel['failover_timeout'] = 60000
``` ```
[Reconfigure Omnibus GitLab][reconfigure] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
### Example configuration for Redis slave 1 and Sentinel 2 ### Example configuration for Redis slave 1 and Sentinel 2
...@@ -699,7 +699,7 @@ sentinel['quorum'] = 2 ...@@ -699,7 +699,7 @@ sentinel['quorum'] = 2
# sentinel['failover_timeout'] = 60000 # sentinel['failover_timeout'] = 60000
``` ```
[Reconfigure Omnibus GitLab][reconfigure] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
### Example configuration for Redis slave 2 and Sentinel 3 ### Example configuration for Redis slave 2 and Sentinel 3
...@@ -721,7 +721,7 @@ sentinel['quorum'] = 2 ...@@ -721,7 +721,7 @@ sentinel['quorum'] = 2
# sentinel['failover_timeout'] = 60000 # sentinel['failover_timeout'] = 60000
``` ```
[Reconfigure Omnibus GitLab][reconfigure] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
### Example configuration for the GitLab application ### Example configuration for the GitLab application
...@@ -737,7 +737,7 @@ gitlab_rails['redis_sentinels'] = [ ...@@ -737,7 +737,7 @@ gitlab_rails['redis_sentinels'] = [
] ]
``` ```
[Reconfigure Omnibus GitLab][reconfigure] for the changes to take effect. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
## Enable Monitoring ## Enable Monitoring
...@@ -862,7 +862,7 @@ mailroom['enable'] = false ...@@ -862,7 +862,7 @@ mailroom['enable'] = false
redis['master'] = false redis['master'] = false
``` ```
You can find the relevant attributes defined in [`gitlab_rails.rb`][omnifile]. You can find the relevant attributes defined in [`gitlab_rails.rb`](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/libraries/gitlab_rails.rb).
## Troubleshooting ## Troubleshooting
...@@ -929,7 +929,7 @@ repl_backlog_histlen:0 ...@@ -929,7 +929,7 @@ repl_backlog_histlen:0
If you get an error like: `Redis::CannotConnectError: No sentinels available.`, If you get an error like: `Redis::CannotConnectError: No sentinels available.`,
there may be something wrong with your configuration files or it can be related there may be something wrong with your configuration files or it can be related
to [this issue][gh-531]. to [this issue](https://github.com/redis/redis-rb/issues/531).
You must make sure you are defining the same value in `redis['master_name']` You must make sure you are defining the same value in `redis['master_name']`
and `redis['master_pasword']` as you defined for your sentinel node. and `redis['master_pasword']` as you defined for your sentinel node.
...@@ -1001,14 +1001,3 @@ Read more on High Availability: ...@@ -1001,14 +1001,3 @@ Read more on High Availability:
1. [Configure NFS](nfs.md) 1. [Configure NFS](nfs.md)
1. [Configure the GitLab application servers](gitlab.md) 1. [Configure the GitLab application servers](gitlab.md)
1. [Configure the load balancers](load_balancer.md) 1. [Configure the load balancers](load_balancer.md)
[ce-1877]: https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/1877
[restart]: ../restart_gitlab.md#installations-from-source
[reconfigure]: ../restart_gitlab.md#omnibus-gitlab-reconfigure
[gh-531]: https://github.com/redis/redis-rb/issues/531
[gh-534]: https://github.com/redis/redis-rb/issues/534
[redis]: https://redis.io/
[sentinel]: https://redis.io/topics/sentinel
[omnifile]: https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-cookbooks/gitlab/libraries/gitlab_rails.rb
[source]: ../../install/installation.md
[it]: https://gitlab.com/gitlab-org/gitlab-foss/uploads/c4cc8cd353604bd80315f9384035ff9e/The_Internet_IT_Crowd.png
...@@ -182,7 +182,7 @@ which ideally should not have Redis or Sentinels in the same machine for a HA ...@@ -182,7 +182,7 @@ which ideally should not have Redis or Sentinels in the same machine for a HA
setup: setup:
1. Edit `/home/git/gitlab/config/resque.yml` following the example in 1. Edit `/home/git/gitlab/config/resque.yml` following the example in
[resque.yml.example][resque], and uncomment the Sentinel lines, pointing to [resque.yml.example](https://gitlab.com/gitlab-org/gitlab/blob/master/config/resque.yml.example), and uncomment the Sentinel lines, pointing to
the correct server credentials: the correct server credentials:
```yaml ```yaml
...@@ -201,7 +201,7 @@ setup: ...@@ -201,7 +201,7 @@ setup:
port: 26379 # point to sentinel, not to redis port port: 26379 # point to sentinel, not to redis port
``` ```
1. [Restart GitLab][restart] for the changes to take effect. 1. [Restart GitLab](../restart_gitlab.md#installations-from-source) for the changes to take effect.
## Example of minimal configuration with 1 master, 2 slaves and 3 Sentinels ## Example of minimal configuration with 1 master, 2 slaves and 3 Sentinels
...@@ -211,7 +211,7 @@ to each other using these IPs. ...@@ -211,7 +211,7 @@ to each other using these IPs.
In a real world usage, you would also set up firewall rules to prevent In a real world usage, you would also set up firewall rules to prevent
unauthorized access from other machines, and block traffic from the unauthorized access from other machines, and block traffic from the
outside ([Internet][it]). outside ([Internet](https://gitlab.com/gitlab-org/gitlab-foss/uploads/c4cc8cd353604bd80315f9384035ff9e/The_Internet_IT_Crowd.png)).
For this example, **Sentinel 1** will be configured in the same machine as the For this example, **Sentinel 1** will be configured in the same machine as the
**Redis Master**, **Sentinel 2** and **Sentinel 3** in the same machines as the **Redis Master**, **Sentinel 2** and **Sentinel 3** in the same machines as the
...@@ -326,7 +326,7 @@ or a failover promotes a different **Master** node. ...@@ -326,7 +326,7 @@ or a failover promotes a different **Master** node.
port: 26379 # point to sentinel, not to redis port port: 26379 # point to sentinel, not to redis port
``` ```
1. [Restart GitLab][restart] for the changes to take effect. 1. [Restart GitLab](../restart_gitlab.md#installations-from-source) for the changes to take effect.
## Troubleshooting ## Troubleshooting
...@@ -336,7 +336,7 @@ the things that are specific to a source installation. ...@@ -336,7 +336,7 @@ the things that are specific to a source installation.
If you get an error in GitLab like `Redis::CannotConnectError: No sentinels available.`, If you get an error in GitLab like `Redis::CannotConnectError: No sentinels available.`,
there may be something wrong with your configuration files or it can be related there may be something wrong with your configuration files or it can be related
to [this upstream issue][gh-531]. to [this upstream issue](https://github.com/redis/redis-rb/issues/531).
You must make sure that `resque.yml` and `sentinel.conf` are configured correctly, You must make sure that `resque.yml` and `sentinel.conf` are configured correctly,
otherwise `redis-rb` will not work properly. otherwise `redis-rb` will not work properly.
...@@ -369,9 +369,3 @@ production: ...@@ -369,9 +369,3 @@ production:
``` ```
When in doubt, please read [Redis Sentinel documentation](https://redis.io/topics/sentinel). When in doubt, please read [Redis Sentinel documentation](https://redis.io/topics/sentinel).
[gh-531]: https://github.com/redis/redis-rb/issues/531
[downloads]: https://about.gitlab.com/downloads
[restart]: ../restart_gitlab.md#installations-from-source
[it]: https://gitlab.com/gitlab-org/gitlab-foss/uploads/c4cc8cd353604bd80315f9384035ff9e/The_Internet_IT_Crowd.png
[resque]: https://gitlab.com/gitlab-org/gitlab/blob/master/config/resque.yml.example
# GraphQL API # GraphQL API
> - [Introduced][ce-19008] in GitLab 11.0 (enabled by feature flag `graphql`). > - [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/19008) in GitLab 11.0 (enabled by feature flag `graphql`).
> - [Always enabled](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/30444) > - [Always enabled](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/30444) in GitLab 12.1.
in GitLab 12.1.
## Getting Started ## Getting Started
...@@ -87,6 +86,3 @@ Machine-readable versions are also available: ...@@ -87,6 +86,3 @@ Machine-readable versions are also available:
- [JSON format](reference/gitlab_schema.json) - [JSON format](reference/gitlab_schema.json)
- [IDL format](reference/gitlab_schema.graphql) - [IDL format](reference/gitlab_schema.graphql)
[ce-19008]: https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/19008
[features-api]: ../features.md
...@@ -3506,6 +3506,52 @@ type Group { ...@@ -3506,6 +3506,52 @@ type Group {
""" """
visibility: String visibility: String
"""
Vulnerabilities reported on the projects in the group and its subgroups.
Available only when feature flag `first_class_vulnerabilities` is enabled
"""
vulnerabilities(
"""
Returns the elements in the list that come after the specified cursor.
"""
after: String
"""
Returns the elements in the list that come before the specified cursor.
"""
before: String
"""
Returns the first _n_ elements from the list.
"""
first: Int
"""
Returns the last _n_ elements from the list.
"""
last: Int
"""
Filter vulnerabilities by project
"""
projectId: [ID!]
"""
Filter vulnerabilities by report type
"""
reportType: [VulnerabilityReportType!]
"""
Filter vulnerabilities by severity
"""
severity: [VulnerabilitySeverity!]
"""
Filter vulnerabilities by state
"""
state: [VulnerabilityState!]
): VulnerabilityConnection
""" """
Web URL of the group Web URL of the group
""" """
......
...@@ -9932,6 +9932,131 @@ ...@@ -9932,6 +9932,131 @@
"isDeprecated": false, "isDeprecated": false,
"deprecationReason": null "deprecationReason": null
}, },
{
"name": "vulnerabilities",
"description": "Vulnerabilities reported on the projects in the group and its subgroups. Available only when feature flag `first_class_vulnerabilities` is enabled",
"args": [
{
"name": "projectId",
"description": "Filter vulnerabilities by project",
"type": {
"kind": "LIST",
"name": null,
"ofType": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "SCALAR",
"name": "ID",
"ofType": null
}
}
},
"defaultValue": null
},
{
"name": "reportType",
"description": "Filter vulnerabilities by report type",
"type": {
"kind": "LIST",
"name": null,
"ofType": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "ENUM",
"name": "VulnerabilityReportType",
"ofType": null
}
}
},
"defaultValue": null
},
{
"name": "severity",
"description": "Filter vulnerabilities by severity",
"type": {
"kind": "LIST",
"name": null,
"ofType": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "ENUM",
"name": "VulnerabilitySeverity",
"ofType": null
}
}
},
"defaultValue": null
},
{
"name": "state",
"description": "Filter vulnerabilities by state",
"type": {
"kind": "LIST",
"name": null,
"ofType": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "ENUM",
"name": "VulnerabilityState",
"ofType": null
}
}
},
"defaultValue": null
},
{
"name": "after",
"description": "Returns the elements in the list that come after the specified cursor.",
"type": {
"kind": "SCALAR",
"name": "String",
"ofType": null
},
"defaultValue": null
},
{
"name": "before",
"description": "Returns the elements in the list that come before the specified cursor.",
"type": {
"kind": "SCALAR",
"name": "String",
"ofType": null
},
"defaultValue": null
},
{
"name": "first",
"description": "Returns the first _n_ elements from the list.",
"type": {
"kind": "SCALAR",
"name": "Int",
"ofType": null
},
"defaultValue": null
},
{
"name": "last",
"description": "Returns the last _n_ elements from the list.",
"type": {
"kind": "SCALAR",
"name": "Int",
"ofType": null
},
"defaultValue": null
}
],
"type": {
"kind": "OBJECT",
"name": "VulnerabilityConnection",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
},
{ {
"name": "webUrl", "name": "webUrl",
"description": "Web URL of the group", "description": "Web URL of the group",
......
...@@ -36,9 +36,9 @@ In the rare case that you need the feature flag to be on automatically, use ...@@ -36,9 +36,9 @@ In the rare case that you need the feature flag to be on automatically, use
Feature.enabled?(:feature_flag, project, default_enabled: true) Feature.enabled?(:feature_flag, project, default_enabled: true)
``` ```
The [`Project#feature_available?`][project-fa], The [`Project#feature_available?`](https://gitlab.com/gitlab-org/gitlab/blob/4cc1c62918aa4c31750cb21dfb1a6c3492d71080/app/models/project_feature.rb#L63-68),
[`Namespace#feature_available?`][namespace-fa] (EE), and [`Namespace#feature_available?`](https://gitlab.com/gitlab-org/gitlab/blob/4cc1c62918aa4c31750cb21dfb1a6c3492d71080/ee/app/models/ee/namespace.rb#L71-85) (EE), and
[`License.feature_available?`][license-fa] (EE) methods all implicitly check for [`License.feature_available?`](https://gitlab.com/gitlab-org/gitlab/blob/4cc1c62918aa4c31750cb21dfb1a6c3492d71080/ee/app/models/license.rb#L293-300) (EE) methods all implicitly check for
a by default enabled feature flag with the same name as the provided argument. a by default enabled feature flag with the same name as the provided argument.
For example if a feature is license-gated, there's no need to add an additional For example if a feature is license-gated, there's no need to add an additional
...@@ -49,10 +49,6 @@ feature flag once the feature has reached general availability. ...@@ -49,10 +49,6 @@ feature flag once the feature has reached general availability.
You'd still want to use an explicit `Feature.enabled?` check if your new feature You'd still want to use an explicit `Feature.enabled?` check if your new feature
isn't gated by a License or Plan. isn't gated by a License or Plan.
[project-fa]: https://gitlab.com/gitlab-org/gitlab/blob/4cc1c62918aa4c31750cb21dfb1a6c3492d71080/app/models/project_feature.rb#L63-68
[namespace-fa]: https://gitlab.com/gitlab-org/gitlab/blob/4cc1c62918aa4c31750cb21dfb1a6c3492d71080/ee/app/models/ee/namespace.rb#L71-85
[license-fa]: https://gitlab.com/gitlab-org/gitlab/blob/4cc1c62918aa4c31750cb21dfb1a6c3492d71080/ee/app/models/license.rb#L293-300
**An important side-effect of the implicit feature flags mentioned above is that **An important side-effect of the implicit feature flags mentioned above is that
unless the feature is explicitly disabled or limited to a percentage of users, unless the feature is explicitly disabled or limited to a percentage of users,
the feature flag check will default to `true`.** the feature flag check will default to `true`.**
......
...@@ -5,13 +5,17 @@ Workhorse and GitLab-Shell. ...@@ -5,13 +5,17 @@ Workhorse and GitLab-Shell.
## Deep Dive ## Deep Dive
In May 2019, Bob Van Landuyt hosted a [Deep Dive] on GitLab's [Gitaly project] and how to contribute to it as a Ruby developer, to share his domain specific knowledge with anyone who may work in this part of the code base in the future. You can find the [recording on YouTube], and the slides on [Google Slides] and in [PDF]. Everything covered in this deep dive was accurate as of GitLab 11.11, and while specific details may have changed since then, it should still serve as a good introduction. In May 2019, Bob Van Landuyt hosted a [Deep Dive](https://gitlab.com/gitlab-org/create-stage/issues/1)
on GitLab's [Gitaly project](https://gitlab.com/gitlab-org/gitaly) and how to contribute to it as a
Ruby developer, to share his domain specific knowledge with anyone who may work in this part of the
code base in the future.
[Deep Dive]: https://gitlab.com/gitlab-org/create-stage/issues/1 You can find the [recording on YouTube](https://www.youtube.com/watch?v=BmlEWFS8ORo), and the slides
[Gitaly project]: https://gitlab.com/gitlab-org/gitaly on [Google Slides](https://docs.google.com/presentation/d/1VgRbiYih9ODhcPnL8dS0W98EwFYpJ7GXMPpX-1TM6YE/edit)
[recording on YouTube]: https://www.youtube.com/watch?v=BmlEWFS8ORo and in [PDF](https://gitlab.com/gitlab-org/create-stage/uploads/a4fdb1026278bda5c1c5bb574379cf80/Create_Deep_Dive__Gitaly_for_Create_Ruby_Devs.pdf).
[Google Slides]: https://docs.google.com/presentation/d/1VgRbiYih9ODhcPnL8dS0W98EwFYpJ7GXMPpX-1TM6YE/edit
[PDF]: https://gitlab.com/gitlab-org/create-stage/uploads/a4fdb1026278bda5c1c5bb574379cf80/Create_Deep_Dive__Gitaly_for_Create_Ruby_Devs.pdf Everything covered in this deep dive was accurate as of GitLab 11.11, and while specific details may
have changed since then, it should still serve as a good introduction.
## Beginner's guide ## Beginner's guide
......
...@@ -8,8 +8,7 @@ The version of the chart used to provision PostgreSQL: ...@@ -8,8 +8,7 @@ The version of the chart used to provision PostgreSQL:
- Is 0.7.1 in GitLab 12.8 and earlier. - Is 0.7.1 in GitLab 12.8 and earlier.
- Can be set to from 0.7.1 to 8.2.1 in GitLab 12.9 and later. - Can be set to from 0.7.1 to 8.2.1 in GitLab 12.9 and later.
GitLab encourages users to GitLab encourages users to migrate their database to the GitLab encourages users to migrate their database to the newer PostgreSQL chart.
newer PostgreSQL chart.
This guide provides instructions on how to migrate your PostgreSQL database, which This guide provides instructions on how to migrate your PostgreSQL database, which
involves: involves:
......
...@@ -117,24 +117,4 @@ describe NavHelper, :do_not_mock_admin_mode do ...@@ -117,24 +117,4 @@ describe NavHelper, :do_not_mock_admin_mode do
it { is_expected.to all(be_a(String)) } it { is_expected.to all(be_a(String)) }
end end
describe '.show_user_notification_dot?' do
subject { helper.show_user_notification_dot? }
context 'when experiment is disabled' do
before do
allow(helper).to receive(:experiment_enabled?).with(:ci_notification_dot).and_return(false)
end
it { is_expected.to be_falsey }
end
context 'when experiment is enabled' do
before do
allow(helper).to receive(:experiment_enabled?).with(:ci_notification_dot).and_return(true)
end
it { is_expected.to be_truthy }
end
end
end end
# frozen_string_literal: true
require 'spec_helper'
describe Ci::GenerateCoverageReportsService do
let(:service) { described_class.new(project) }
let(:project) { create(:project, :repository) }
describe '#execute' do
subject { service.execute(base_pipeline, head_pipeline) }
context 'when head pipeline has coverage reports' do
let!(:merge_request) { create(:merge_request, :with_coverage_reports, source_project: project) }
let!(:service) { described_class.new(project, nil, id: merge_request.id) }
let!(:head_pipeline) { merge_request.head_pipeline }
let!(:base_pipeline) { nil }
it 'returns status and data' do
expect(subject[:status]).to eq(:parsed)
expect(subject[:data]).to eq(files: {})
end
end
context 'when head pipeline has corrupted coverage reports' do
let!(:merge_request) { create(:merge_request, :with_coverage_reports, source_project: project) }
let!(:service) { described_class.new(project, nil, id: merge_request.id) }
let!(:head_pipeline) { merge_request.head_pipeline }
let!(:base_pipeline) { nil }
before do
build = create(:ci_build, pipeline: head_pipeline, project: head_pipeline.project)
create(:ci_job_artifact, :coverage_with_corrupted_data, job: build, project: project)
end
it 'returns status and error message' do
expect(subject[:status]).to eq(:error)
expect(subject[:status_reason]).to include('An error occurred while fetching coverage reports.')
end
end
context 'when head pipeline has coverage reports and no merge request associated' do
let!(:head_pipeline) { create(:ci_pipeline, :with_coverage_reports, project: project) }
let!(:base_pipeline) { nil }
it 'returns status and error message' do
expect(subject[:status]).to eq(:error)
expect(subject[:status_reason]).to include('An error occurred while fetching coverage reports.')
end
end
end
describe '#latest?' do
subject { service.latest?(base_pipeline, head_pipeline, data) }
let!(:base_pipeline) { nil }
let!(:head_pipeline) { create(:ci_pipeline, :with_test_reports, project: project) }
let!(:key) { service.send(:key, base_pipeline, head_pipeline) }
context 'when cache key is latest' do
let(:data) { { key: key } }
it { is_expected.to be_truthy }
end
context 'when cache key is outdated' do
before do
head_pipeline.update_column(:updated_at, 10.minutes.ago)
end
let(:data) { { key: key } }
it { is_expected.to be_falsy }
end
context 'when cache key is empty' do
let(:data) { { key: nil } }
it { is_expected.to be_falsy }
end
end
end
...@@ -47,6 +47,23 @@ describe MergeRequests::MergeService do ...@@ -47,6 +47,23 @@ describe MergeRequests::MergeService do
expect(note.note).to include 'merged' expect(note.note).to include 'merged'
end end
it 'is idempotent' do
repository = project.repository
commit_count = repository.commit_count
merge_commit = merge_request.merge_commit.id
# a first invocation of execute is performed on the before block
service.execute(merge_request)
expect(merge_request.merge_error).to be_falsey
expect(merge_request).to be_valid
expect(merge_request).to be_merged
expect(repository.commits_by(oids: [merge_commit]).size).to eq(1)
expect(repository.commit_count).to eq(commit_count)
expect(merge_request.in_progress_merge_commit_sha).to be_nil
end
context 'when squashing' do context 'when squashing' do
let(:merge_params) do let(:merge_params) do
{ commit_message: 'Merge commit message', { commit_message: 'Merge commit message',
......
...@@ -17,7 +17,6 @@ describe MergeRequests::PostMergeService do ...@@ -17,7 +17,6 @@ describe MergeRequests::PostMergeService do
it 'refreshes the number of open merge requests for a valid MR', :use_clean_rails_memory_store_caching do it 'refreshes the number of open merge requests for a valid MR', :use_clean_rails_memory_store_caching do
# Cache the counter before the MR changed state. # Cache the counter before the MR changed state.
project.open_merge_requests_count project.open_merge_requests_count
merge_request.update!(state: 'merged')
service = described_class.new(project, user, {}) service = described_class.new(project, user, {})
......
...@@ -137,6 +137,24 @@ describe MergeRequests::SquashService do ...@@ -137,6 +137,24 @@ describe MergeRequests::SquashService do
include_examples 'the squash succeeds' include_examples 'the squash succeeds'
end end
context 'when the merge request has already been merged' do
let(:merge_request) { merge_request_with_one_commit }
it 'checks the side-effects for multiple calls' do
merge_request.mark_as_merged
expect(service).to be_idempotent
expect { IdempotentWorkerHelper::WORKER_EXEC_TIMES.times { service.execute } }.not_to raise_error
end
it 'idempotently returns a success' do
merge_request.mark_as_merged
result = service.execute
expect(result).to match(status: :success, squash_sha: merge_request.diff_head_sha)
end
end
context 'git errors' do context 'git errors' do
let(:merge_request) { merge_request_with_only_new_files } let(:merge_request) { merge_request_with_only_new_files }
let(:error) { 'A test error' } let(:error) { 'A test error' }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment