Commit 5c275490 authored by Amit Rathi's avatar Amit Rathi

Merge branch 'master' into certmanager-temp

parents f6b20cc2 078fab66
image: "dev.gitlab.org:5005/gitlab/gitlab-build-images:ruby-2.4.5-golang-1.9-git-2.18-chrome-69.0-node-8.x-yarn-1.2-postgresql-9.6-graphicsmagick-1.3.29" image: "dev.gitlab.org:5005/gitlab/gitlab-build-images:ruby-2.5.3-golang-1.9-git-2.18-chrome-69.0-node-10.x-yarn-1.12-postgresql-9.6-graphicsmagick-1.3.29"
.dedicated-runner: &dedicated-runner .dedicated-runner: &dedicated-runner
retry: 1 retry: 1
...@@ -6,7 +6,7 @@ image: "dev.gitlab.org:5005/gitlab/gitlab-build-images:ruby-2.4.5-golang-1.9-git ...@@ -6,7 +6,7 @@ image: "dev.gitlab.org:5005/gitlab/gitlab-build-images:ruby-2.4.5-golang-1.9-git
- gitlab-org - gitlab-org
.default-cache: &default-cache .default-cache: &default-cache
key: "ruby-2.4.5-debian-stretch-with-yarn" key: "debian-stretch-ruby-2.5.3-node-10.x"
paths: paths:
- vendor/ruby - vendor/ruby
- .yarn-cache/ - .yarn-cache/
...@@ -121,7 +121,7 @@ stages: ...@@ -121,7 +121,7 @@ stages:
<<: *except-docs-and-qa <<: *except-docs-and-qa
.single-script-job: &single-script-job .single-script-job: &single-script-job
image: ruby:2.4-alpine image: ruby:2.5-alpine
stage: test stage: test
cache: {} cache: {}
dependencies: [] dependencies: []
...@@ -227,6 +227,8 @@ stages: ...@@ -227,6 +227,8 @@ stages:
script: script:
- git fetch https://gitlab.com/gitlab-org/gitlab-ce.git v9.3.0 - git fetch https://gitlab.com/gitlab-org/gitlab-ce.git v9.3.0
- git checkout -f FETCH_HEAD - git checkout -f FETCH_HEAD
- sed -i "s/gem 'oj', '~> 2.17.4'//" Gemfile
- bundle update google-protobuf grpc
- bundle install $BUNDLE_INSTALL_FLAGS - bundle install $BUNDLE_INSTALL_FLAGS
- date - date
- cp config/gitlab.yml.example config/gitlab.yml - cp config/gitlab.yml.example config/gitlab.yml
...@@ -316,7 +318,7 @@ review-docs-cleanup: ...@@ -316,7 +318,7 @@ review-docs-cleanup:
# Trigger a docker image build in CNG (Cloud Native GitLab) repository # Trigger a docker image build in CNG (Cloud Native GitLab) repository
# #
cloud-native-image: cloud-native-image:
image: ruby:2.4-alpine image: ruby:2.5-alpine
before_script: [] before_script: []
dependencies: [] dependencies: []
stage: test stage: test
...@@ -369,7 +371,7 @@ update-tests-metadata: ...@@ -369,7 +371,7 @@ update-tests-metadata:
flaky-examples-check: flaky-examples-check:
<<: *dedicated-runner <<: *dedicated-runner
image: ruby:2.4-alpine image: ruby:2.5-alpine
services: [] services: []
before_script: [] before_script: []
variables: variables:
...@@ -589,7 +591,7 @@ static-analysis: ...@@ -589,7 +591,7 @@ static-analysis:
script: script:
- scripts/static-analysis - scripts/static-analysis
cache: cache:
key: "ruby-2.4.5-debian-stretch-with-yarn-and-rubocop" key: "debian-stretch-ruby-2.5.3-node-10.x-and-rubocop"
paths: paths:
- vendor/ruby - vendor/ruby
- .yarn-cache/ - .yarn-cache/
...@@ -696,7 +698,7 @@ gitlab:setup-mysql: ...@@ -696,7 +698,7 @@ gitlab:setup-mysql:
# Frontend-related jobs # Frontend-related jobs
gitlab:assets:compile: gitlab:assets:compile:
<<: *dedicated-no-docs-and-no-qa-pull-cache-job <<: *dedicated-no-docs-and-no-qa-pull-cache-job
image: dev.gitlab.org:5005/gitlab/gitlab-build-images:ruby-2.4.4-git-2.18-chrome-69.0-node-8.x-yarn-1.2-graphicsmagick-1.3.29-docker-18.06.1 image: dev.gitlab.org:5005/gitlab/gitlab-build-images:ruby-2.5.3-git-2.18-chrome-69.0-node-8.x-yarn-1.2-graphicsmagick-1.3.29-docker-18.06.1
dependencies: [] dependencies: []
services: services:
- docker:stable-dind - docker:stable-dind
......
...@@ -44,8 +44,11 @@ export default { ...@@ -44,8 +44,11 @@ export default {
isNew() { isNew() {
return this.diffMode === diffModes.new; return this.diffMode === diffModes.new;
}, },
isRenamed() {
return this.diffMode === diffModes.renamed;
},
imagePath() { imagePath() {
return this.isNew ? this.newPath : this.oldPath; return this.isNew || this.isRenamed ? this.newPath : this.oldPath;
}, },
}, },
methods: { methods: {
...@@ -114,7 +117,7 @@ export default { ...@@ -114,7 +117,7 @@ export default {
}]" }]"
> >
<slot <slot
v-if="isNew" v-if="isNew || isRenamed"
slot="image-overlay" slot="image-overlay"
name="image-overlay" name="image-overlay"
> >
......
<script> <script>
import { GlTooltipDirective, GlLink, GlButton } from '@gitlab-org/gitlab-ui';
import CiIconBadge from './ci_badge_link.vue'; import CiIconBadge from './ci_badge_link.vue';
import TimeagoTooltip from './time_ago_tooltip.vue'; import TimeagoTooltip from './time_ago_tooltip.vue';
import tooltip from '../directives/tooltip';
import UserAvatarImage from './user_avatar/user_avatar_image.vue'; import UserAvatarImage from './user_avatar/user_avatar_image.vue';
import LoadingButton from '~/vue_shared/components/loading_button.vue';
/** /**
* Renders header component for job and pipeline page based on UI mockups * Renders header component for job and pipeline page based on UI mockups
...@@ -16,9 +17,12 @@ export default { ...@@ -16,9 +17,12 @@ export default {
CiIconBadge, CiIconBadge,
TimeagoTooltip, TimeagoTooltip,
UserAvatarImage, UserAvatarImage,
GlLink,
GlButton,
LoadingButton,
}, },
directives: { directives: {
tooltip, GlTooltip: GlTooltipDirective,
}, },
props: { props: {
status: { status: {
...@@ -98,8 +102,8 @@ export default { ...@@ -98,8 +102,8 @@ export default {
by by
<template v-if="user"> <template v-if="user">
<a <gl-link
v-tooltip v-gl-tooltip
:href="user.path" :href="user.path"
:title="user.email" :title="user.email"
class="js-user-link commit-committer-link" class="js-user-link commit-committer-link"
...@@ -113,7 +117,7 @@ export default { ...@@ -113,7 +117,7 @@ export default {
/> />
{{ user.name }} {{ user.name }}
</a> </gl-link>
<span <span
v-if="user.status_tooltip_html" v-if="user.status_tooltip_html"
v-html="user.status_tooltip_html"></span> v-html="user.status_tooltip_html"></span>
...@@ -127,16 +131,16 @@ export default { ...@@ -127,16 +131,16 @@ export default {
<template <template
v-for="(action, i) in actions" v-for="(action, i) in actions"
> >
<a <gl-link
v-if="action.type === 'link'" v-if="action.type === 'link'"
:key="i" :key="i"
:href="action.path" :href="action.path"
:class="action.cssClass" :class="action.cssClass"
> >
{{ action.label }} {{ action.label }}
</a> </gl-link>
<a <gl-link
v-else-if="action.type === 'ujs-link'" v-else-if="action.type === 'ujs-link'"
:key="i" :key="i"
:href="action.path" :href="action.path"
...@@ -145,31 +149,24 @@ export default { ...@@ -145,31 +149,24 @@ export default {
rel="nofollow" rel="nofollow"
> >
{{ action.label }} {{ action.label }}
</a> </gl-link>
<button <loading-button
v-else-if="action.type === 'button'" v-else-if="action.type === 'button'"
:key="i" :key="i"
:loading="action.isLoading"
:disabled="action.isLoading" :disabled="action.isLoading"
:class="action.cssClass" :class="action.cssClass"
type="button" container-class="d-inline"
:label="action.label"
@click="onClickAction(action)" @click="onClickAction(action)"
> />
{{ action.label }}
<i
v-show="action.isLoading"
class="fa fa-spin fa-spinner"
aria-hidden="true"
>
</i>
</button>
</template> </template>
</section> </section>
<button <gl-button
v-if="hasSidebarButton" v-if="hasSidebarButton"
id="toggleSidebar" id="toggleSidebar"
type="button" class="d-block d-sm-none
class="btn btn-default d-block d-sm-none
sidebar-toggle-btn js-sidebar-build-toggle js-sidebar-build-toggle-header" sidebar-toggle-btn js-sidebar-build-toggle js-sidebar-build-toggle-header"
@click="onClickSidebarButton" @click="onClickSidebarButton"
> >
...@@ -179,6 +176,6 @@ sidebar-toggle-btn js-sidebar-build-toggle js-sidebar-build-toggle-header" ...@@ -179,6 +176,6 @@ sidebar-toggle-btn js-sidebar-build-toggle js-sidebar-build-toggle-header"
aria-labelledby="toggleSidebar" aria-labelledby="toggleSidebar"
> >
</i> </i>
</button> </gl-button>
</header> </header>
</template> </template>
...@@ -47,12 +47,6 @@ ...@@ -47,12 +47,6 @@
@extend .fixed-width-container; @extend .fixed-width-container;
} }
} }
.diffs {
.mr-version-controls {
@extend .fixed-width-container;
}
}
} }
.issuable-details { .issuable-details {
......
...@@ -176,8 +176,10 @@ ...@@ -176,8 +176,10 @@
background-color: $white-light; background-color: $white-light;
} }
.discussion-form-container { table {
padding: $gl-padding-top $gl-padding $gl-padding; .discussion-form-container {
padding: $gl-padding-top $gl-padding $gl-padding;
}
} }
.discussion-notes .disabled-comment { .discussion-notes .disabled-comment {
......
...@@ -13,12 +13,32 @@ $note-form-margin-left: 72px; ...@@ -13,12 +13,32 @@ $note-form-margin-left: 72px;
} }
} }
@mixin outline-comment() {
margin: $gl-padding;
border: 1px solid $border-color;
border-radius: $border-radius-default;
}
.note-wrapper { .note-wrapper {
padding: $gl-padding; padding: $gl-padding;
&.outlined {
@include outline-comment();
}
} }
.issuable-discussion { .main-notes-list {
.notes.timeline > .timeline-entry { @include vertical-line(39px);
}
.notes {
display: block;
list-style: none;
margin: 0;
padding: 0;
position: relative;
&.timeline > .timeline-entry {
border: 1px solid $border-color; border: 1px solid $border-color;
border-radius: $border-radius-default; border-radius: $border-radius-default;
margin: $gl-padding 0; margin: $gl-padding 0;
...@@ -51,18 +71,6 @@ $note-form-margin-left: 72px; ...@@ -51,18 +71,6 @@ $note-form-margin-left: 72px;
border-top: 1px solid $border-color; border-top: 1px solid $border-color;
} }
} }
}
.main-notes-list {
@include vertical-line(36px);
}
.notes {
display: block;
list-style: none;
margin: 0;
padding: 0;
position: relative;
> .note-discussion { > .note-discussion {
.card { .card {
...@@ -71,10 +79,6 @@ $note-form-margin-left: 72px; ...@@ -71,10 +79,6 @@ $note-form-margin-left: 72px;
li.note { li.note {
border-bottom: 1px solid $border-color; border-bottom: 1px solid $border-color;
&:first-child {
border-radius: $border-radius-default $border-radius-default 0 0;
}
} }
} }
...@@ -387,6 +391,7 @@ $note-form-margin-left: 72px; ...@@ -387,6 +391,7 @@ $note-form-margin-left: 72px;
line-height: 42px; line-height: 42px;
padding: 0 $gl-padding; padding: 0 $gl-padding;
border-top: 1px solid $border-color; border-top: 1px solid $border-color;
border-radius: 0;
&:hover { &:hover {
background-color: $gray-light; background-color: $gray-light;
...@@ -479,9 +484,7 @@ $note-form-margin-left: 72px; ...@@ -479,9 +484,7 @@ $note-form-margin-left: 72px;
} }
.note-wrapper { .note-wrapper {
margin: $gl-padding; @include outline-comment();
border: 1px solid $border-color;
border-radius: $border-radius-default;
} }
.discussion-reply-holder { .discussion-reply-holder {
...@@ -491,6 +494,16 @@ $note-form-margin-left: 72px; ...@@ -491,6 +494,16 @@ $note-form-margin-left: 72px;
} }
} }
.commit-diff {
.notes {
@include vertical-line(52px);
}
.discussion-reply-holder {
border-top: 1px solid $border-color;
}
}
.discussion-header, .discussion-header,
.note-header-info { .note-header-info {
a { a {
......
...@@ -173,9 +173,7 @@ module Clusters ...@@ -173,9 +173,7 @@ module Clusters
kubeclient = build_kube_client! kubeclient = build_kube_client!
kubeclient.get_pods(namespace: actual_namespace).as_json kubeclient.get_pods(namespace: actual_namespace).as_json
rescue Kubeclient::HttpError => err rescue Kubeclient::ResourceNotFoundError
raise err unless err.error_code == 404
[] []
end end
......
...@@ -97,9 +97,9 @@ module Mentionable ...@@ -97,9 +97,9 @@ module Mentionable
# Allows heavy processing to be skipped # Allows heavy processing to be skipped
def matches_cross_reference_regex? def matches_cross_reference_regex?
reference_pattern = if !project || project.default_issues_tracker? reference_pattern = if !project || project.default_issues_tracker?
ReferenceRegexes::DEFAULT_PATTERN ReferenceRegexes.default_pattern
else else
ReferenceRegexes::EXTERNAL_PATTERN ReferenceRegexes.external_pattern
end end
self.class.mentionable_attrs.any? do |attr, _| self.class.mentionable_attrs.any? do |attr, _|
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
module Mentionable module Mentionable
module ReferenceRegexes module ReferenceRegexes
extend Gitlab::Utils::StrongMemoize
def self.reference_pattern(link_patterns, issue_pattern) def self.reference_pattern(link_patterns, issue_pattern)
Regexp.union(link_patterns, Regexp.union(link_patterns,
issue_pattern, issue_pattern,
...@@ -15,16 +17,20 @@ module Mentionable ...@@ -15,16 +17,20 @@ module Mentionable
] ]
end end
DEFAULT_PATTERN = begin def self.default_pattern
issue_pattern = Issue.reference_pattern strong_memoize(:default_pattern) do
link_patterns = Regexp.union([Issue, Commit, MergeRequest, Epic].map(&:link_reference_pattern).compact) issue_pattern = Issue.reference_pattern
reference_pattern(link_patterns, issue_pattern) link_patterns = Regexp.union([Issue, Commit, MergeRequest, Epic].map(&:link_reference_pattern).compact)
reference_pattern(link_patterns, issue_pattern)
end
end end
EXTERNAL_PATTERN = begin def self.external_pattern
issue_pattern = IssueTrackerService.reference_pattern strong_memoize(:external_pattern) do
link_patterns = URI.regexp(%w(http https)) issue_pattern = IssueTrackerService.reference_pattern
reference_pattern(link_patterns, issue_pattern) link_patterns = URI.regexp(%w(http https))
reference_pattern(link_patterns, issue_pattern)
end
end end
end end
end end
# frozen_string_literal: true # frozen_string_literal: true
class Identity < ActiveRecord::Base class Identity < ActiveRecord::Base
def self.uniqueness_scope
:provider
end
include Sortable include Sortable
include CaseSensitivity include CaseSensitivity
belongs_to :user belongs_to :user
validates :provider, presence: true validates :provider, presence: true
validates :extern_uid, allow_blank: true, uniqueness: { scope: uniqueness_scope, case_sensitive: false } validates :extern_uid, allow_blank: true, uniqueness: { scope: UniquenessScopes.scopes, case_sensitive: false }
validates :user_id, uniqueness: { scope: uniqueness_scope } validates :user_id, uniqueness: { scope: UniquenessScopes.scopes }
before_save :ensure_normalized_extern_uid, if: :extern_uid_changed? before_save :ensure_normalized_extern_uid, if: :extern_uid_changed?
after_destroy :clear_user_synced_attributes, if: :user_synced_attributes_metadata_from_provider? after_destroy :clear_user_synced_attributes, if: :user_synced_attributes_metadata_from_provider?
......
# frozen_string_literal: true
class Identity < ActiveRecord::Base
# This module and method are defined in a separate file to allow EE to
# redefine the `scopes` method before it is used in the `Identity` model.
module UniquenessScopes
def self.scopes
[:provider]
end
end
end
...@@ -9,7 +9,7 @@ class IssueTrackerService < Service ...@@ -9,7 +9,7 @@ class IssueTrackerService < Service
# Override this method on services that uses different patterns # Override this method on services that uses different patterns
# This pattern does not support cross-project references # This pattern does not support cross-project references
# The other code assumes that this pattern is a superset of all # The other code assumes that this pattern is a superset of all
# overridden patterns. See ReferenceRegexes::EXTERNAL_PATTERN # overridden patterns. See ReferenceRegexes.external_pattern
def self.reference_pattern(only_long: false) def self.reference_pattern(only_long: false)
if only_long if only_long
/(\b[A-Z][A-Z0-9_]*-)(?<issue>\d+)/ /(\b[A-Z][A-Z0-9_]*-)(?<issue>\d+)/
......
...@@ -203,9 +203,7 @@ class KubernetesService < DeploymentService ...@@ -203,9 +203,7 @@ class KubernetesService < DeploymentService
kubeclient = build_kube_client! kubeclient = build_kube_client!
kubeclient.get_pods(namespace: actual_namespace).as_json kubeclient.get_pods(namespace: actual_namespace).as_json
rescue Kubeclient::HttpError => err rescue Kubeclient::ResourceNotFoundError
raise err unless err.error_code == 404
[] []
end end
......
...@@ -9,13 +9,12 @@ class Shard < ActiveRecord::Base ...@@ -9,13 +9,12 @@ class Shard < ActiveRecord::Base
# The GitLab config does not change for the lifecycle of the process # The GitLab config does not change for the lifecycle of the process
in_config = Gitlab.config.repositories.storages.keys.map(&:to_s) in_config = Gitlab.config.repositories.storages.keys.map(&:to_s)
in_db = all.pluck(:name)
transaction do # This may race with other processes creating shards at the same time, but
in_db = all.pluck(:name) # `by_name` will handle that correctly
missing = in_config - in_db missing = in_config - in_db
missing.map { |name| by_name(name) }
missing.map { |name| by_name(name) }
end
end end
def self.by_name(name) def self.by_name(name)
......
...@@ -15,8 +15,8 @@ module Clusters ...@@ -15,8 +15,8 @@ module Clusters
check_timeout check_timeout
end end
rescue Kubeclient::HttpError => e rescue Kubeclient::HttpError => e
Rails.logger.error "Kubernetes error: #{e.class.name} #{e.message}" Rails.logger.error("Kubernetes error: #{e.error_code} #{e.message}")
app.make_errored!("Kubernetes error") unless app.errored? app.make_errored!("Kubernetes error: #{e.error_code}") unless app.errored?
end end
private private
...@@ -53,7 +53,7 @@ module Clusters ...@@ -53,7 +53,7 @@ module Clusters
def remove_installation_pod def remove_installation_pod
helm_api.delete_pod!(install_command.pod_name) helm_api.delete_pod!(install_command.pod_name)
rescue => e rescue => e
Rails.logger.error "Kubernetes error: #{e.class.name} #{e.message}" Rails.logger.error("Kubernetes error: #{e.class.name} #{e.message}")
# no-op # no-op
end end
......
...@@ -13,8 +13,8 @@ module Clusters ...@@ -13,8 +13,8 @@ module Clusters
ClusterWaitForAppInstallationWorker.perform_in( ClusterWaitForAppInstallationWorker.perform_in(
ClusterWaitForAppInstallationWorker::INTERVAL, app.name, app.id) ClusterWaitForAppInstallationWorker::INTERVAL, app.name, app.id)
rescue Kubeclient::HttpError => e rescue Kubeclient::HttpError => e
Rails.logger.error "Kubernetes error: #{e.class.name} #{e.message}" Rails.logger.error("Kubernetes error: #{e.error_code} #{e.message}")
app.make_errored!("Kubernetes error.") app.make_errored!("Kubernetes error: #{e.error_code}")
rescue StandardError => e rescue StandardError => e
Rails.logger.error "Can't start installation process: #{e.class.name} #{e.message}" Rails.logger.error "Can't start installation process: #{e.class.name} #{e.message}"
app.make_errored!("Can't start installation process.") app.make_errored!("Can't start installation process.")
......
...@@ -21,10 +21,7 @@ module Clusters ...@@ -21,10 +21,7 @@ module Clusters
def get_secret def get_secret
kubeclient.get_secret(service_account_token_name, namespace).as_json kubeclient.get_secret(service_account_token_name, namespace).as_json
rescue Kubeclient::HttpError => err rescue Kubeclient::ResourceNotFoundError
raise err unless err.error_code == 404
nil
end end
end end
end end
......
...@@ -42,12 +42,12 @@ ...@@ -42,12 +42,12 @@
<code>4 mins 2 sec</code>, <code>2h42min</code>. <code>4 mins 2 sec</code>, <code>2h42min</code>.
= link_to icon('question-circle'), help_page_path('user/admin_area/settings/continuous_integration', anchor: 'default-artifacts-expiration') = link_to icon('question-circle'), help_page_path('user/admin_area/settings/continuous_integration', anchor: 'default-artifacts-expiration')
.form-group .form-group
= f.label :archive_builds_in_human_readable, 'Archive builds in', class: 'label-bold' = f.label :archive_builds_in_human_readable, 'Archive jobs', class: 'label-bold'
= f.text_field :archive_builds_in_human_readable, class: 'form-control', placeholder: 'never' = f.text_field :archive_builds_in_human_readable, class: 'form-control', placeholder: 'never'
.form-text.text-muted .form-text.text-muted
Set the duration when build gonna be considered old. Archived builds cannot be retried. Set the duration for which the jobs will be considered as old and expired.
Make it empty to never expire builds. It has to be larger than 1 day. Once that time passes, the jobs will be archived and no longer able to be
The default unit is in seconds, but you can define an alternative. For example: retried. Make it empty to never expire jobs. It has to be no less than 1 day,
<code>4 mins 2 sec</code>, <code>2h42min</code>. for example: <code>15 days</code>, <code>1 month</code>, <code>2 years</code>.
= f.submit 'Save changes', class: "btn btn-success" = f.submit 'Save changes', class: "btn btn-success"
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
.suppressed-container .suppressed-container
%a.show-suppressed-diff.js-show-suppressed-diff Changes suppressed. Click to show. %a.show-suppressed-diff.js-show-suppressed-diff Changes suppressed. Click to show.
%table.text-file.diff-wrap-lines.code.js-syntax-highlight{ data: diff_view_data, class: too_big ? 'hide' : '' } %table.text-file.diff-wrap-lines.code.js-syntax-highlight.commit-diff{ data: diff_view_data, class: too_big ? 'hide' : '' }
= render partial: "projects/diffs/line", = render partial: "projects/diffs/line",
collection: diff_file.highlighted_diff_lines, collection: diff_file.highlighted_diff_lines,
as: :line, as: :line,
......
...@@ -37,6 +37,6 @@ ...@@ -37,6 +37,6 @@
= link_to 'Reopen', merge_request_path(@merge_request, merge_request: { state_event: :reopen }), method: :put, class: 'reopen-mr-link', title: 'Reopen merge request' = link_to 'Reopen', merge_request_path(@merge_request, merge_request: { state_event: :reopen }), method: :put, class: 'reopen-mr-link', title: 'Reopen merge request'
- if can_update_merge_request - if can_update_merge_request
= link_to 'Edit', edit_project_merge_request_path(@project, @merge_request), class: "d-none d-sm-none d-md-block btn btn-grouped js-issuable-edit" = link_to 'Edit', edit_project_merge_request_path(@project, @merge_request), class: "d-none d-sm-none d-md-block btn btn-grouped js-issuable-edit qa-edit-button"
= render 'shared/issuable/close_reopen_button', issuable: @merge_request, can_update: can_update_merge_request, can_reopen: can_update_merge_request = render 'shared/issuable/close_reopen_button', issuable: @merge_request, can_update: can_update_merge_request, can_reopen: can_update_merge_request
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
- note_editable = can?(current_user, :admin_note, note) - note_editable = can?(current_user, :admin_note, note)
- note_counter = local_assigns.fetch(:note_counter, 0) - note_counter = local_assigns.fetch(:note_counter, 0)
%li.timeline-entry{ id: dom_id(note), %li.timeline-entry.note-wrapper.outlined{ id: dom_id(note),
class: ["note", "note-row-#{note.id}", ('system-note' if note.system)], class: ["note", "note-row-#{note.id}", ('system-note' if note.system)],
data: { author_id: note.author.id, data: { author_id: note.author.id,
editable: note_editable, editable: note_editable,
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
- if can_create_note? - if can_create_note?
.notes.notes-form.timeline .notes.notes-form.timeline
.timeline-entry .timeline-entry.note-form
.timeline-entry-inner .timeline-entry-inner
.flash-container.timeline-content .flash-container.timeline-content
......
---
title: Fix a race condition intermittently breaking GitLab startup
merge_request: 23028
author:
type: fixed
---
title: Show HTTP response code for Kubernetes errors
merge_request: 22964
author:
type: other
---
title: Upgrade to Ruby 2.5.3
merge_request: 2806
author:
type: performance
...@@ -6,7 +6,7 @@ Authentiq will generate a Client ID and the accompanying Client Secret for you t ...@@ -6,7 +6,7 @@ Authentiq will generate a Client ID and the accompanying Client Secret for you t
1. Get your Client credentials (Client ID and Client Secret) at [Authentiq](https://www.authentiq.com/developers). 1. Get your Client credentials (Client ID and Client Secret) at [Authentiq](https://www.authentiq.com/developers).
2. On your GitLab server, open the configuration file: 1. On your GitLab server, open the configuration file:
For omnibus installation For omnibus installation
```sh ```sh
...@@ -18,11 +18,11 @@ Authentiq will generate a Client ID and the accompanying Client Secret for you t ...@@ -18,11 +18,11 @@ Authentiq will generate a Client ID and the accompanying Client Secret for you t
```sh ```sh
sudo -u git -H editor /home/git/gitlab/config/gitlab.yml sudo -u git -H editor /home/git/gitlab/config/gitlab.yml
``` ```
3. See [Initial OmniAuth Configuration](../../integration/omniauth.md#initial-omniauth-configuration) for initial settings to enable single sign-on and add Authentiq as an OAuth provider.
4. Add the provider configuration for Authentiq: 1. See [Initial OmniAuth Configuration](../../integration/omniauth.md#initial-omniauth-configuration) for initial settings to enable single sign-on and add Authentiq as an OAuth provider.
1. Add the provider configuration for Authentiq:
For Omnibus packages: For Omnibus packages:
```ruby ```ruby
...@@ -31,15 +31,15 @@ Authentiq will generate a Client ID and the accompanying Client Secret for you t ...@@ -31,15 +31,15 @@ Authentiq will generate a Client ID and the accompanying Client Secret for you t
"name" => "authentiq", "name" => "authentiq",
"app_id" => "YOUR_CLIENT_ID", "app_id" => "YOUR_CLIENT_ID",
"app_secret" => "YOUR_CLIENT_SECRET", "app_secret" => "YOUR_CLIENT_SECRET",
"args" => { "args" => {
"scope": 'aq:name email~rs address aq:push' "scope": 'aq:name email~rs address aq:push'
} }
} }
] ]
``` ```
For installations from source: For installations from source:
```yaml ```yaml
- { name: 'authentiq', - { name: 'authentiq',
app_id: 'YOUR_CLIENT_ID', app_id: 'YOUR_CLIENT_ID',
...@@ -49,20 +49,20 @@ Authentiq will generate a Client ID and the accompanying Client Secret for you t ...@@ -49,20 +49,20 @@ Authentiq will generate a Client ID and the accompanying Client Secret for you t
} }
} }
``` ```
5. The `scope` is set to request the user's name, email (required and signed), and permission to send push notifications to sign in on subsequent visits. 1. The `scope` is set to request the user's name, email (required and signed), and permission to send push notifications to sign in on subsequent visits.
See [OmniAuth Authentiq strategy](https://github.com/AuthentiqID/omniauth-authentiq/wiki/Scopes,-callback-url-configuration-and-responses) for more information on scopes and modifiers. See [OmniAuth Authentiq strategy](https://github.com/AuthentiqID/omniauth-authentiq/wiki/Scopes,-callback-url-configuration-and-responses) for more information on scopes and modifiers.
6. Change `YOUR_CLIENT_ID` and `YOUR_CLIENT_SECRET` to the Client credentials you received in step 1. 1. Change `YOUR_CLIENT_ID` and `YOUR_CLIENT_SECRET` to the Client credentials you received in step 1.
7. Save the configuration file. 1. Save the configuration file.
8. [Reconfigure](../restart_gitlab.md#omnibus-gitlab-reconfigure) or [restart GitLab](../restart_gitlab.md#installations-from-source) for the changes to take effect if you installed GitLab via Omnibus or from source respectively. 1. [Reconfigure](../restart_gitlab.md#omnibus-gitlab-reconfigure) or [restart GitLab](../restart_gitlab.md#installations-from-source) for the changes to take effect if you installed GitLab via Omnibus or from source respectively.
On the sign in page there should now be an Authentiq icon below the regular sign in form. Click the icon to begin the authentication process. On the sign in page there should now be an Authentiq icon below the regular sign in form. Click the icon to begin the authentication process.
- If the user has the Authentiq ID app installed in their iOS or Android device, they can scan the QR code, decide what personal details to share and sign in to your GitLab installation. - If the user has the Authentiq ID app installed in their iOS or Android device, they can scan the QR code, decide what personal details to share and sign in to your GitLab installation.
- If not they will be prompted to download the app and then follow the procedure above. - If not they will be prompted to download the app and then follow the procedure above.
If everything goes right, the user will be returned to GitLab and will be signed in. If everything goes right, the user will be returned to GitLab and will be signed in.
\ No newline at end of file
...@@ -59,7 +59,7 @@ on an Linux NFS server, do the following: ...@@ -59,7 +59,7 @@ on an Linux NFS server, do the following:
sysctl -w fs.leases-enable=0 sysctl -w fs.leases-enable=0
``` ```
2. Restart the NFS server process. For example, on CentOS run `service nfs restart`. 1. Restart the NFS server process. For example, on CentOS run `service nfs restart`.
## Avoid using AWS's Elastic File System (EFS) ## Avoid using AWS's Elastic File System (EFS)
...@@ -87,12 +87,12 @@ this configuration. ...@@ -87,12 +87,12 @@ this configuration.
Additionally, this configuration is specifically warned against in the Additionally, this configuration is specifically warned against in the
[Postgres Documentation](https://www.postgresql.org/docs/current/static/creating-cluster.html#CREATING-CLUSTER-NFS): [Postgres Documentation](https://www.postgresql.org/docs/current/static/creating-cluster.html#CREATING-CLUSTER-NFS):
>PostgreSQL does nothing special for NFS file systems, meaning it assumes NFS behaves exactly like >PostgreSQL does nothing special for NFS file systems, meaning it assumes NFS behaves exactly like
>locally-connected drives. If the client or server NFS implementation does not provide standard file >locally-connected drives. If the client or server NFS implementation does not provide standard file
>system semantics, this can cause reliability problems. Specifically, delayed (asynchronous) writes >system semantics, this can cause reliability problems. Specifically, delayed (asynchronous) writes
>to the NFS server can cause data corruption problems. >to the NFS server can cause data corruption problems.
For supported database architecture, please see our documentation on For supported database architecture, please see our documentation on
[Configuring a Database for GitLab HA](https://docs.gitlab.com/ee/administration/high_availability/database.html). [Configuring a Database for GitLab HA](https://docs.gitlab.com/ee/administration/high_availability/database.html).
## NFS Client mount options ## NFS Client mount options
......
...@@ -665,7 +665,7 @@ cache, queues, and shared_state. To make this work with Sentinel: ...@@ -665,7 +665,7 @@ cache, queues, and shared_state. To make this work with Sentinel:
**Note**: Redis URLs should be in the format: `redis://:PASSWORD@SENTINEL_MASTER_NAME` **Note**: Redis URLs should be in the format: `redis://:PASSWORD@SENTINEL_MASTER_NAME`
1. PASSWORD is the plaintext password for the Redis instance 1. PASSWORD is the plaintext password for the Redis instance
2. SENTINEL_MASTER_NAME is the Sentinel master name (e.g. `gitlab-redis-cache`) 1. SENTINEL_MASTER_NAME is the Sentinel master name (e.g. `gitlab-redis-cache`)
1. Include an array of hashes with host/port combinations, such as the following: 1. Include an array of hashes with host/port combinations, such as the following:
```ruby ```ruby
......
...@@ -29,9 +29,9 @@ Each line contains a JSON line that can be ingested by Elasticsearch, Splunk, et ...@@ -29,9 +29,9 @@ Each line contains a JSON line that can be ingested by Elasticsearch, Splunk, et
In this example, you can see this was a GET request for a specific issue. Notice each line also contains performance data: In this example, you can see this was a GET request for a specific issue. Notice each line also contains performance data:
1. `duration`: the total time taken to retrieve the request 1. `duration`: the total time taken to retrieve the request
2. `view`: total time taken inside the Rails views 1. `view`: total time taken inside the Rails views
3. `db`: total time to retrieve data from the database 1. `db`: total time to retrieve data from the database
4. `gitaly_calls`: total number of calls made to Gitaly 1. `gitaly_calls`: total number of calls made to Gitaly
User clone/fetch activity using http transport appears in this log as `action: git_upload_pack`. User clone/fetch activity using http transport appears in this log as `action: git_upload_pack`.
...@@ -119,7 +119,7 @@ This file lives in `/var/log/gitlab/gitlab-rails/integrations_json.log` for ...@@ -119,7 +119,7 @@ This file lives in `/var/log/gitlab/gitlab-rails/integrations_json.log` for
Omnibus GitLab packages or in `/home/git/gitlab/log/integrations_json.log` for Omnibus GitLab packages or in `/home/git/gitlab/log/integrations_json.log` for
installations from source. installations from source.
It contains information about [integrations](../user/project/integrations/project_services.md) activities such as JIRA, Asana and Irker services. It uses JSON format like the example below: It contains information about [integrations](../user/project/integrations/project_services.md) activities such as JIRA, Asana and Irker services. It uses JSON format like the example below:
``` json ``` json
{"severity":"ERROR","time":"2018-09-06T14:56:20.439Z","service_class":"JiraService","project_id":8,"project_path":"h5bp/html5-boilerplate","message":"Error sending message","client_url":"http://jira.gitlap.com:8080","error":"execution expired"} {"severity":"ERROR","time":"2018-09-06T14:56:20.439Z","service_class":"JiraService","project_id":8,"project_path":"h5bp/html5-boilerplate","message":"Error sending message","client_url":"http://jira.gitlap.com:8080","error":"execution expired"}
...@@ -257,8 +257,8 @@ importer. Future importers may use this file. ...@@ -257,8 +257,8 @@ importer. Future importers may use this file.
## Reconfigure Logs ## Reconfigure Logs
Reconfigure log files live in `/var/log/gitlab/reconfigure` for Omnibus GitLab Reconfigure log files live in `/var/log/gitlab/reconfigure` for Omnibus GitLab
packages. Installations from source don't have reconfigure logs. A reconfigure log packages. Installations from source don't have reconfigure logs. A reconfigure log
is populated whenever `gitlab-ctl reconfigure` is run manually or as part of an upgrade. is populated whenever `gitlab-ctl reconfigure` is run manually or as part of an upgrade.
Reconfigure logs files are named according to the UNIX timestamp of when the reconfigure Reconfigure logs files are named according to the UNIX timestamp of when the reconfigure
......
...@@ -95,10 +95,10 @@ UDP can be done using the following settings: ...@@ -95,10 +95,10 @@ UDP can be done using the following settings:
This does the following: This does the following:
1. Enable UDP and bind it to port 8089 for all addresses. 1. Enable UDP and bind it to port 8089 for all addresses.
2. Store any data received in the "gitlab" database. 1. Store any data received in the "gitlab" database.
3. Define a batch of points to be 1000 points in size and allow a maximum of 1. Define a batch of points to be 1000 points in size and allow a maximum of
5 batches _or_ flush them automatically after 1 second. 5 batches _or_ flush them automatically after 1 second.
4. Define a UDP read buffer size of 200 MB. 1. Define a UDP read buffer size of 200 MB.
One of the most important settings here is the UDP read buffer size as if this One of the most important settings here is the UDP read buffer size as if this
value is set too low, packets will be dropped. You must also make sure the OS value is set too low, packets will be dropped. You must also make sure the OS
......
...@@ -5,7 +5,7 @@ NOTE: **Note:** This document describes a drop-in replacement for the ...@@ -5,7 +5,7 @@ NOTE: **Note:** This document describes a drop-in replacement for the
using [ssh certificates](ssh_certificates.md), they are even faster, using [ssh certificates](ssh_certificates.md), they are even faster,
but are not a drop-in replacement. but are not a drop-in replacement.
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ee/issues/1631) in > [Introduced](https://gitlab.com/gitlab-org/gitlab-ee/issues/1631) in
> [GitLab Starter](https://about.gitlab.com/gitlab-ee) 9.3. > [GitLab Starter](https://about.gitlab.com/gitlab-ee) 9.3.
> >
> [Available in](https://gitlab.com/gitlab-org/gitlab-ee/issues/3953) GitLab > [Available in](https://gitlab.com/gitlab-org/gitlab-ee/issues/3953) GitLab
...@@ -109,7 +109,7 @@ the database. The following instructions can be used to build OpenSSH 7.5: ...@@ -109,7 +109,7 @@ the database. The following instructions can be used to build OpenSSH 7.5:
yum install rpm-build gcc make wget openssl-devel krb5-devel pam-devel libX11-devel xmkmf libXt-devel yum install rpm-build gcc make wget openssl-devel krb5-devel pam-devel libX11-devel xmkmf libXt-devel
``` ```
3. Prepare the build by copying files to the right place: 1. Prepare the build by copying files to the right place:
``` ```
mkdir -p /root/rpmbuild/{SOURCES,SPECS} mkdir -p /root/rpmbuild/{SOURCES,SPECS}
...@@ -118,7 +118,7 @@ the database. The following instructions can be used to build OpenSSH 7.5: ...@@ -118,7 +118,7 @@ the database. The following instructions can be used to build OpenSSH 7.5:
cd /root/rpmbuild/SPECS cd /root/rpmbuild/SPECS
``` ```
3. Next, set the spec settings properly: 1. Next, set the spec settings properly:
``` ```
sed -i -e "s/%define no_gnome_askpass 0/%define no_gnome_askpass 1/g" openssh.spec sed -i -e "s/%define no_gnome_askpass 0/%define no_gnome_askpass 1/g" openssh.spec
...@@ -126,13 +126,13 @@ the database. The following instructions can be used to build OpenSSH 7.5: ...@@ -126,13 +126,13 @@ the database. The following instructions can be used to build OpenSSH 7.5:
sed -i -e "s/BuildPreReq/BuildRequires/g" openssh.spec sed -i -e "s/BuildPreReq/BuildRequires/g" openssh.spec
``` ```
3. Build the RPMs: 1. Build the RPMs:
``` ```
rpmbuild -bb openssh.spec rpmbuild -bb openssh.spec
``` ```
4. Ensure the RPMs were built: 1. Ensure the RPMs were built:
``` ```
ls -al /root/rpmbuild/RPMS/x86_64/ ls -al /root/rpmbuild/RPMS/x86_64/
...@@ -150,7 +150,7 @@ the database. The following instructions can be used to build OpenSSH 7.5: ...@@ -150,7 +150,7 @@ the database. The following instructions can be used to build OpenSSH 7.5:
-rw-r--r--. 1 root root 367516 Jun 20 19:37 openssh-server-7.5p1-1.x86_64.rpm -rw-r--r--. 1 root root 367516 Jun 20 19:37 openssh-server-7.5p1-1.x86_64.rpm
``` ```
5. Install the packages. OpenSSH packages will replace `/etc/pam.d/sshd` 1. Install the packages. OpenSSH packages will replace `/etc/pam.d/sshd`
with its own version, which may prevent users from logging in, so be sure with its own version, which may prevent users from logging in, so be sure
that the file is backed up and restored after installation: that the file is backed up and restored after installation:
...@@ -161,7 +161,7 @@ the database. The following instructions can be used to build OpenSSH 7.5: ...@@ -161,7 +161,7 @@ the database. The following instructions can be used to build OpenSSH 7.5:
yes | cp pam-ssh-conf-$timestamp /etc/pam.d/sshd yes | cp pam-ssh-conf-$timestamp /etc/pam.d/sshd
``` ```
6. Verify the installed version. In another window, attempt to login to the server: 1. Verify the installed version. In another window, attempt to login to the server:
``` ```
ssh -v <your-centos-machine> ssh -v <your-centos-machine>
...@@ -171,7 +171,7 @@ the database. The following instructions can be used to build OpenSSH 7.5: ...@@ -171,7 +171,7 @@ the database. The following instructions can be used to build OpenSSH 7.5:
If not, you may need to restart sshd (e.g. `systemctl restart sshd.service`). If not, you may need to restart sshd (e.g. `systemctl restart sshd.service`).
7. *IMPORTANT!* Open a new SSH session to your server before exiting to make 1. *IMPORTANT!* Open a new SSH session to your server before exiting to make
sure everything is working! If you need to downgrade, simple install the sure everything is working! If you need to downgrade, simple install the
older package: older package:
......
...@@ -8,7 +8,7 @@ The instructions make the assumption that you will be using the email address `i ...@@ -8,7 +8,7 @@ The instructions make the assumption that you will be using the email address `i
## Configure your server firewall ## Configure your server firewall
1. Open up port 25 on your server so that people can send email into the server over SMTP. 1. Open up port 25 on your server so that people can send email into the server over SMTP.
2. If the mail server is different from the server running GitLab, open up port 143 on your server so that GitLab can read email from the server over IMAP. 1. If the mail server is different from the server running GitLab, open up port 143 on your server so that GitLab can read email from the server over IMAP.
## Install packages ## Install packages
......
...@@ -20,7 +20,7 @@ an SMTP server, but you're not seeing mail delivered. Here's how to check the se ...@@ -20,7 +20,7 @@ an SMTP server, but you're not seeing mail delivered. Here's how to check the se
bundle exec rails console production bundle exec rails console production
``` ```
2. Look at the ActionMailer `delivery_method` to make sure it matches what you 1. Look at the ActionMailer `delivery_method` to make sure it matches what you
intended. If you configured SMTP, it should say `:smtp`. If you're using intended. If you configured SMTP, it should say `:smtp`. If you're using
Sendmail, it should say `:sendmail`: Sendmail, it should say `:sendmail`:
...@@ -29,7 +29,7 @@ an SMTP server, but you're not seeing mail delivered. Here's how to check the se ...@@ -29,7 +29,7 @@ an SMTP server, but you're not seeing mail delivered. Here's how to check the se
=> :smtp => :smtp
``` ```
3. If you're using SMTP, check the mail settings: 1. If you're using SMTP, check the mail settings:
```ruby ```ruby
irb(main):002:0> ActionMailer::Base.smtp_settings irb(main):002:0> ActionMailer::Base.smtp_settings
...@@ -39,7 +39,7 @@ an SMTP server, but you're not seeing mail delivered. Here's how to check the se ...@@ -39,7 +39,7 @@ an SMTP server, but you're not seeing mail delivered. Here's how to check the se
In the example above, the SMTP server is configured for the local machine. If this is intended, you may need to check your local mail In the example above, the SMTP server is configured for the local machine. If this is intended, you may need to check your local mail
logs (e.g. `/var/log/mail.log`) for more details. logs (e.g. `/var/log/mail.log`) for more details.
4. Send a test message via the console. 1. Send a test message via the console.
```ruby ```ruby
irb(main):003:0> Notify.test_email('youremail@email.com', 'Hello World', 'This is a test message').deliver_now irb(main):003:0> Notify.test_email('youremail@email.com', 'Hello World', 'This is a test message').deliver_now
......
...@@ -23,9 +23,9 @@ You need to have the Google Cloud SDK installed. e.g. ...@@ -23,9 +23,9 @@ You need to have the Google Cloud SDK installed. e.g.
On OSX, install [homebrew](https://brew.sh): On OSX, install [homebrew](https://brew.sh):
1. Install Brew Caskroom: `brew install caskroom/cask/brew-cask` 1. Install Brew Caskroom: `brew install caskroom/cask/brew-cask`
2. Install Google Cloud SDK: `brew cask install google-cloud-sdk` 1. Install Google Cloud SDK: `brew cask install google-cloud-sdk`
3. Add `kubectl`: `gcloud components install kubectl` 1. Add `kubectl`: `gcloud components install kubectl`
4. Log in: `gcloud auth login` 1. Log in: `gcloud auth login`
Now go back to the Google interface, find your cluster, and follow the instructions under `Connect to the cluster` and open the Kubernetes Dashboard. It will look something like `gcloud container clusters get-credentials ruby-autodeploy \ --zone europe-west2-c --project api-project-XXXXXXX` and then `kubectl proxy`. Now go back to the Google interface, find your cluster, and follow the instructions under `Connect to the cluster` and open the Kubernetes Dashboard. It will look something like `gcloud container clusters get-credentials ruby-autodeploy \ --zone europe-west2-c --project api-project-XXXXXXX` and then `kubectl proxy`.
......
...@@ -46,18 +46,18 @@ GitLab Runner then executes job scripts as the `gitlab-runner` user. ...@@ -46,18 +46,18 @@ GitLab Runner then executes job scripts as the `gitlab-runner` user.
--description "My Runner" --description "My Runner"
``` ```
2. Install Docker Engine on server. 1. Install Docker Engine on server.
For more information how to install Docker Engine on different systems For more information how to install Docker Engine on different systems
checkout the [Supported installations](https://docs.docker.com/engine/installation/). checkout the [Supported installations](https://docs.docker.com/engine/installation/).
3. Add `gitlab-runner` user to `docker` group: 1. Add `gitlab-runner` user to `docker` group:
```bash ```bash
sudo usermod -aG docker gitlab-runner sudo usermod -aG docker gitlab-runner
``` ```
4. Verify that `gitlab-runner` has access to Docker: 1. Verify that `gitlab-runner` has access to Docker:
```bash ```bash
sudo -u gitlab-runner -H docker info sudo -u gitlab-runner -H docker info
...@@ -75,7 +75,7 @@ GitLab Runner then executes job scripts as the `gitlab-runner` user. ...@@ -75,7 +75,7 @@ GitLab Runner then executes job scripts as the `gitlab-runner` user.
- docker run my-docker-image /script/to/run/tests - docker run my-docker-image /script/to/run/tests
``` ```
5. You can now use `docker` command and install `docker-compose` if needed. 1. You can now use `docker` command and install `docker-compose` if needed.
NOTE: **Note:** NOTE: **Note:**
By adding `gitlab-runner` to the `docker` group you are effectively granting `gitlab-runner` full root permissions. By adding `gitlab-runner` to the `docker` group you are effectively granting `gitlab-runner` full root permissions.
......
# Browser Performance Testing with the Sitespeed.io container # Browser Performance Testing with the Sitespeed.io container
CAUTION: **Caution:**
The job definition shown below is supported on GitLab 11.5 and later versions.
It also requires the GitLab Runner 11.5 or later.
For earlier versions, use the [previous job definitions](#previous-job-definitions).
This example shows how to run the This example shows how to run the
[Sitespeed.io container](https://hub.docker.com/r/sitespeedio/sitespeed.io/) on [Sitespeed.io container](https://hub.docker.com/r/sitespeedio/sitespeed.io/) on
your code by using GitLab CI/CD and [Sitespeed.io](https://www.sitespeed.io) your code by using GitLab CI/CD and [Sitespeed.io](https://www.sitespeed.io)
using Docker-in-Docker. using Docker-in-Docker.
First, you need a GitLab Runner with the First, you need GitLab Runner with
[docker-in-docker executor](../docker/using_docker_build.md#use-docker-in-docker-executor). [docker-in-docker executor](../docker/using_docker_build.md#use-docker-in-docker-executor).
Once you set up the Runner, add a new job to `.gitlab-ci.yml`, called
`performance`: Once you set up the Runner, add a new job to `.gitlab-ci.yml` that
generates the expected report:
```yaml ```yaml
performance: performance:
...@@ -26,19 +32,22 @@ performance: ...@@ -26,19 +32,22 @@ performance:
- mv sitespeed-results/data/performance.json performance.json - mv sitespeed-results/data/performance.json performance.json
artifacts: artifacts:
paths: paths:
- performance.json - sitespeed-results/
- sitespeed-results/ reports:
performance: performance.json
``` ```
The above example will: The above example will create a `performance` job in your CI/CD pipeline and will run
Sitespeed.io against the webpage you defined in `URL` to gather key metrics.
The [GitLab plugin](https://gitlab.com/gitlab-org/gl-performance) for
Sitespeed.io is downloaded in order to save the report as a
[Performance report artifact](https://docs.gitlab.com/ee//ci/yaml/README.html#artifactsreportsperformance)
that you can later download and analyze.
Due to implementation limitations we always take the latest Performance artifact available.
1. Create a `performance` job in your CI/CD pipeline and will run The full HTML Sitespeed.io report will also be saved as an artifact, and if you have
Sitespeed.io against the webpage you defined in `URL`. [GitLab Pages](../../user/project/pages/index.md) enabled, it can be viewed
1. The [GitLab plugin](https://gitlab.com/gitlab-org/gl-performance) for directly in your browser.
Sitespeed.io is downloaded in order to export key metrics to JSON. The full
HTML Sitespeed.io report will also be saved as an artifact, and if you have
[GitLab Pages](../../user/project/pages/index.md) enabled, it can be viewed
directly in your browser.
For further customization options of Sitespeed.io, including the ability to For further customization options of Sitespeed.io, including the ability to
provide a list of URLs to test, please consult provide a list of URLs to test, please consult
...@@ -46,8 +55,8 @@ provide a list of URLs to test, please consult ...@@ -46,8 +55,8 @@ provide a list of URLs to test, please consult
TIP: **Tip:** TIP: **Tip:**
For [GitLab Premium](https://about.gitlab.com/pricing/) users, key metrics are automatically For [GitLab Premium](https://about.gitlab.com/pricing/) users, key metrics are automatically
extracted and shown right in the merge request widget. Learn more about extracted and shown right in the merge request widget.
[Browser Performance Testing](https://docs.gitlab.com/ee/user/project/merge_requests/browser_performance_testing.html). [Learn more on Browser Performance Testing in merge requests](https://docs.gitlab.com/ee//user/project/merge_requests/browser_performance_testing.html).
## Performance testing on Review Apps ## Performance testing on Review Apps
...@@ -106,8 +115,40 @@ performance: ...@@ -106,8 +115,40 @@ performance:
- mv sitespeed-results/data/performance.json performance.json - mv sitespeed-results/data/performance.json performance.json
artifacts: artifacts:
paths: paths:
- performance.json
- sitespeed-results/ - sitespeed-results/
reports:
performance: performance.json
``` ```
A complete example can be found in our [Auto DevOps CI YML](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml). A complete example can be found in our [Auto DevOps CI YML](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml).
## Previous job definitions
CAUTION: **Caution:**
Before GitLab 11.5, Performance job and artifact had to be named specifically
to automatically extract report data and show it in the merge request widget.
While these old job definitions are still maintained they have been deprecated
and may be removed in next major release, GitLab 12.0.
You are advised to update your current `.gitlab-ci.yml` configuration to reflect that change.
For GitLab 11.4 and earlier, the job should look like:
```yaml
performance:
stage: performance
image: docker:git
variables:
URL: https://example.com
services:
- docker:stable-dind
script:
- mkdir gitlab-exporter
- wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js
- mkdir sitespeed-results
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL
- mv sitespeed-results/data/performance.json performance.json
artifacts:
paths:
- performance.json
- sitespeed-results/
```
\ No newline at end of file
# Analyze your project's Code Quality # Analyze your project's Code Quality
CAUTION: **Caution:**
The job definition shown below is supported on GitLab 11.5 and later versions.
It also requires the GitLab Runner 11.5 or later.
For earlier versions, use the [previous job definitions](#previous-job-definitions).
This example shows how to run Code Quality on your code by using GitLab CI/CD This example shows how to run Code Quality on your code by using GitLab CI/CD
and Docker. and Docker.
First, you need GitLab Runner with [docker-in-docker executor][dind]. First, you need GitLab Runner with
[docker-in-docker executor](../docker/using_docker_build.md#use-docker-in-docker-executor).
Once you set up the Runner, add a new job to `.gitlab-ci.yml`, called `code_quality`: Once you set up the Runner, add a new job to `.gitlab-ci.yml` that
generates the expected report:
```yaml ```yaml
code_quality: code_quality:
...@@ -23,27 +30,72 @@ code_quality: ...@@ -23,27 +30,72 @@ code_quality:
--volume /var/run/docker.sock:/var/run/docker.sock --volume /var/run/docker.sock:/var/run/docker.sock
"registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code "registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
artifacts: artifacts:
paths: [gl-code-quality-report.json] reports:
codequality: gl-code-quality-report.json
``` ```
The above example will create a `code_quality` job in your CI/CD pipeline which The above example will create a `code_quality` job in your CI/CD pipeline which
will scan your source code for code quality issues. The report will be saved will scan your source code for code quality issues. The report will be saved as a
as an artifact that you can later download and analyze. [Code Quality report artifact](../../ci/yaml/README.md#artifactsreportscodequality)
that you can later download and analyze.
Due to implementation limitations we always take the latest Code Quality artifact available.
TIP: **Tip:** TIP: **Tip:**
Starting with [GitLab Starter][ee] 9.3, this information will For [GitLab Starter][ee] users, this information will be automatically
be automatically extracted and shown right in the merge request widget. To do extracted and shown right in the merge request widget.
so, the CI/CD job must be named `code_quality` and the artifact path must be
`gl-code-quality-report.json`.
[Learn more on Code Quality in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html). [Learn more on Code Quality in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html).
## Previous job definitions
CAUTION: **Caution:** CAUTION: **Caution:**
Code Quality was previously using `codeclimate` and `codequality` for job name and Before GitLab 11.5, Code Quality job and artifact had to be named specifically
`codeclimate.json` for the artifact name. While these old names to automatically extract report data and show it in the merge request widget.
are still maintained they have been deprecated with GitLab 11.0 and may be removed While these old job definitions are still maintained they have been deprecated
in next major release, GitLab 12.0. You are advised to update your current `.gitlab-ci.yml` and may be removed in next major release, GitLab 12.0.
configuration to reflect that change. You are advised to update your current `.gitlab-ci.yml` configuration to reflect that change.
For GitLab 11.4 and earlier, the job should look like:
```yaml
code_quality:
image: docker:stable
variables:
DOCKER_DRIVER: overlay2
allow_failure: true
services:
- docker:stable-dind
script:
- export SP_VERSION=$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')
- docker run
--env SOURCE_CODE="$PWD"
--volume "$PWD":/code
--volume /var/run/docker.sock:/var/run/docker.sock
"registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
artifacts:
paths: [gl-code-quality-report.json]
```
Alternatively the job name could be `codeclimate` or `codequality`
and the artifact name could be `codeclimate.json`.
These names have been deprecated with GitLab 11.0
and may be removed in next major release, GitLab 12.0.
For GitLab 10.3 and earlier, the job should look like:
```yaml
codequality:
image: docker:latest
variables:
DOCKER_DRIVER: overlay
services:
- docker:dind
script:
- docker pull codeclimate/codeclimate:0.69.0
- docker run --env CODECLIMATE_CODE="$PWD" --volume "$PWD":/code --volume /var/run/docker.sock:/var/run/docker.sock --volume /tmp/cc:/tmp/cc codeclimate/codeclimate:0.69.0 init
- docker run --env CODECLIMATE_CODE="$PWD" --volume "$PWD":/code --volume /var/run/docker.sock:/var/run/docker.sock --volume /tmp/cc:/tmp/cc codeclimate/codeclimate:0.69.0 analyze -f json > codeclimate.json || true
artifacts:
paths: [codeclimate.json]
```
[cli]: https://github.com/codeclimate/codeclimate [cli]: https://github.com/codeclimate/codeclimate
[dind]: ../docker/using_docker_build.md#use-docker-in-docker-executor
[ee]: https://about.gitlab.com/pricing/ [ee]: https://about.gitlab.com/pricing/
# Container Scanning with GitLab CI/CD # Container Scanning with GitLab CI/CD
CAUTION: **Caution:**
The job definition shown below is supported on GitLab 11.5 and later versions.
It also requires the GitLab Runner 11.5 or later.
For earlier versions, use the [previous job definitions](#previous-job-definitions).
You can check your Docker images (or more precisely the containers) for known You can check your Docker images (or more precisely the containers) for known
vulnerabilities by using [Clair](https://github.com/coreos/clair) and vulnerabilities by using [Clair](https://github.com/coreos/clair) and
[clair-scanner](https://github.com/arminc/clair-scanner), two open source tools [clair-scanner](https://github.com/arminc/clair-scanner), two open source tools
for Vulnerability Static Analysis for containers. for Vulnerability Static Analysis for containers.
All you need is a GitLab Runner with the Docker executor (the shared Runners on First, you need GitLab Runner with
GitLab.com will work fine). You can then add a new job to `.gitlab-ci.yml`, [docker-in-docker executor](../docker/using_docker_build.md#use-docker-in-docker-executor).
called `container_scanning`:
Once you set up the Runner, add a new job to `.gitlab-ci.yml` that
generates the expected report:
```yaml ```yaml
container_scanning: container_scanning:
...@@ -36,32 +43,26 @@ container_scanning: ...@@ -36,32 +43,26 @@ container_scanning:
- while( ! wget -T 10 -q -O /dev/null http://docker:6060/v1/namespaces ) ; do sleep 1 ; echo -n "." ; if [ $retries -eq 10 ] ; then echo " Timeout, aborting." ; exit 1 ; fi ; retries=$(($retries+1)) ; done - while( ! wget -T 10 -q -O /dev/null http://docker:6060/v1/namespaces ) ; do sleep 1 ; echo -n "." ; if [ $retries -eq 10 ] ; then echo " Timeout, aborting." ; exit 1 ; fi ; retries=$(($retries+1)) ; done
- ./clair-scanner -c http://docker:6060 --ip $(hostname -i) -r gl-container-scanning-report.json -l clair.log -w clair-whitelist.yml ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} || true - ./clair-scanner -c http://docker:6060 --ip $(hostname -i) -r gl-container-scanning-report.json -l clair.log -w clair-whitelist.yml ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} || true
artifacts: artifacts:
paths: [gl-container-scanning-report.json] reports:
container_scanning: gl-container-scanning-report.json
``` ```
The above example will create a `container_scanning` job in your CI/CD pipeline, pull The above example will create a `container_scanning` job in your CI/CD pipeline, pull
the image from the [Container Registry](../../user/project/container_registry.md) the image from the [Container Registry](../../user/project/container_registry.md)
(whose name is defined from the two `CI_APPLICATION_` variables) and scan it (whose name is defined from the two `CI_APPLICATION_` variables) and scan it
for possible vulnerabilities. The report will be saved as an artifact that you for possible vulnerabilities. The report will be saved as a
can later download and analyze. [Container Scanning report artifact](https://docs.gitlab.com/ee//ci/yaml/README.html#artifactsreportscontainer_scanning)
that you can later download and analyze.
Due to implementation limitations we always take the latest Container Scanning artifact available.
If you want to whitelist some specific vulnerabilities, you can do so by defining If you want to whitelist some specific vulnerabilities, you can do so by defining
them in a [YAML file](https://github.com/arminc/clair-scanner/blob/master/README.md#example-whitelist-yaml-file), them in a [YAML file](https://github.com/arminc/clair-scanner/blob/master/README.md#example-whitelist-yaml-file),
in our case its named `clair-whitelist.yml`. in our case its named `clair-whitelist.yml`.
TIP: **Tip:** TIP: **Tip:**
Starting with [GitLab Ultimate][ee] 10.4, this information will For [GitLab Ultimate][ee] users, this information will
be automatically extracted and shown right in the merge request widget. To do be automatically extracted and shown right in the merge request widget.
so, the CI/CD job must be named `container_scanning` and the artifact path must be [Learn more on Container Scanning in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/container_scanning.html).
`gl-container-scanning-report.json`.
[Learn more on container scanning results shown in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/container_scanning.html).
CAUTION: **Caution:**
Before GitLab 11.0, Container Scanning was previously using `sast:container` for job name and
`gl-sast-container-report.json` for the artifact name. While these old names
are still maintained, they have been deprecated with GitLab 11.0 and may be removed
in next major release, GitLab 12.0. You are advised to update your current `.gitlab-ci.yml`
configuration to reflect that change.
CAUTION: **Caution:** CAUTION: **Caution:**
Starting with GitLab 11.5, Container Scanning feature is licensed under the name `container_scanning`. Starting with GitLab 11.5, Container Scanning feature is licensed under the name `container_scanning`.
...@@ -69,4 +70,50 @@ While the old name `sast_container` is still maintained, it has been deprecated ...@@ -69,4 +70,50 @@ While the old name `sast_container` is still maintained, it has been deprecated
may be removed in next major release, GitLab 12.0. You are advised to update your current `.gitlab-ci.yml` may be removed in next major release, GitLab 12.0. You are advised to update your current `.gitlab-ci.yml`
configuration to reflect that change if you are using the `$GITLAB_FEATURES` environment variable. configuration to reflect that change if you are using the `$GITLAB_FEATURES` environment variable.
## Previous job definitions
CAUTION: **Caution:**
Before GitLab 11.5, Container Scanning job and artifact had to be named specifically
to automatically extract report data and show it in the merge request widget.
While these old job definitions are still maintained they have been deprecated
and may be removed in next major release, GitLab 12.0.
You are advised to update your current `.gitlab-ci.yml` configuration to reflect that change.
For GitLab 11.4 and earlier, the job should look like:
```yaml
container_scanning:
image: docker:stable
variables:
DOCKER_DRIVER: overlay2
## Define two new variables based on GitLab's CI/CD predefined variables
## https://docs.gitlab.com/ee/ci/variables/#predefined-variables-environment-variables
CI_APPLICATION_REPOSITORY: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
CI_APPLICATION_TAG: $CI_COMMIT_SHA
allow_failure: true
services:
- docker:stable-dind
script:
- docker run -d --name db arminc/clair-db:latest
- docker run -p 6060:6060 --link db:postgres -d --name clair --restart on-failure arminc/clair-local-scan:v2.0.1
- apk add -U wget ca-certificates
- docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG}
- wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64
- mv clair-scanner_linux_amd64 clair-scanner
- chmod +x clair-scanner
- touch clair-whitelist.yml
- while( ! wget -q -O /dev/null http://docker:6060/v1/namespaces ) ; do sleep 1 ; done
- retries=0
- echo "Waiting for clair daemon to start"
- while( ! wget -T 10 -q -O /dev/null http://docker:6060/v1/namespaces ) ; do sleep 1 ; echo -n "." ; if [ $retries -eq 10 ] ; then echo " Timeout, aborting." ; exit 1 ; fi ; retries=$(($retries+1)) ; done
- ./clair-scanner -c http://docker:6060 --ip $(hostname -i) -r gl-container-scanning-report.json -l clair.log -w clair-whitelist.yml ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} || true
artifacts:
paths: [gl-container-scanning-report.json]
```
Alternatively the job name could be `sast:container`
and the artifact name could be `gl-sast-container-report.json`.
These names have been deprecated with GitLab 11.0
and may be removed in next major release, GitLab 12.0.
[ee]: https://about.gitlab.com/pricing/ [ee]: https://about.gitlab.com/pricing/
# Dynamic Application Security Testing with GitLab CI/CD # Dynamic Application Security Testing with GitLab CI/CD
CAUTION: **Caution:**
The job definition shown below is supported on GitLab 11.5 and later versions.
It also requires the GitLab Runner 11.5 or later.
For earlier versions, use the [previous job definitions](#previous-job-definitions).
[Dynamic Application Security Testing (DAST)](https://en.wikipedia.org/wiki/Dynamic_program_analysis) [Dynamic Application Security Testing (DAST)](https://en.wikipedia.org/wiki/Dynamic_program_analysis)
is using the popular open source tool [OWASP ZAProxy](https://github.com/zaproxy/zaproxy) is using the popular open source tool [OWASP ZAProxy](https://github.com/zaproxy/zaproxy)
to perform an analysis on your running web application. to perform an analysis on your running web application.
Since it is based on [ZAP Baseline](https://github.com/zaproxy/zaproxy/wiki/ZAP-Baseline-Scan)
DAST will perform passive scanning only;
it will not actively attack your application.
It can be very useful combined with [Review Apps](../review_apps/index.md). It can be very useful combined with [Review Apps](../review_apps/index.md).
## Example ## Example
All you need is a GitLab Runner with the Docker executor (the shared Runners on First, you need GitLab Runner with
GitLab.com will work fine). You can then add a new job to `.gitlab-ci.yml`, [docker-in-docker executor](../docker/using_docker_build.md#use-docker-in-docker-executor).
called `dast`:
Once you set up the Runner, add a new job to `.gitlab-ci.yml` that
generates the expected report:
```yaml ```yaml
dast: dast:
...@@ -23,13 +33,16 @@ dast: ...@@ -23,13 +33,16 @@ dast:
- /zap/zap-baseline.py -J gl-dast-report.json -t $website || true - /zap/zap-baseline.py -J gl-dast-report.json -t $website || true
- cp /zap/wrk/gl-dast-report.json . - cp /zap/wrk/gl-dast-report.json .
artifacts: artifacts:
paths: [gl-dast-report.json] reports:
dast: gl-dast-report.json
``` ```
The above example will create a `dast` job in your CI/CD pipeline which will run The above example will create a `dast` job in your CI/CD pipeline which will run
the tests on the URL defined in the `website` variable (change it to use your the tests on the URL defined in the `website` variable (change it to use your
own) and finally write the results in the `gl-dast-report.json` file. You can own) and scan it for possible vulnerabilities. The report will be saved as a
then download and analyze the report artifact in JSON format. [DAST report artifact](https://docs.gitlab.com/ee//ci/yaml/README.html#artifactsreportsdast)
that you can later download and analyze.
Due to implementation limitations we always take the latest DAST artifact available.
It's also possible to authenticate the user before performing DAST checks: It's also possible to authenticate the user before performing DAST checks:
...@@ -39,25 +52,51 @@ dast: ...@@ -39,25 +52,51 @@ dast:
variables: variables:
website: "https://example.com" website: "https://example.com"
login_url: "https://example.com/sign-in" login_url: "https://example.com/sign-in"
username: "john.doe@example.com"
password: "john-doe-password"
allow_failure: true allow_failure: true
script: script:
- mkdir /zap/wrk/ - mkdir /zap/wrk/
- /zap/zap-baseline.py -J gl-dast-report.json -t $website - /zap/zap-baseline.py -J gl-dast-report.json -t $website
--auth-url $login_url --auth-url $login_url
--auth-username "john.doe@example.com" --auth-username $username
--auth-password "john-doe-password" || true --auth-password $password || true
- cp /zap/wrk/gl-dast-report.json . - cp /zap/wrk/gl-dast-report.json .
artifacts: artifacts:
paths: [gl-dast-report.json] reports:
dast: gl-dast-report.json
``` ```
See [zaproxy documentation](https://gitlab.com/gitlab-org/security-products/zaproxy) See [zaproxy documentation](https://gitlab.com/gitlab-org/security-products/zaproxy)
to learn more about authentication settings. to learn more about authentication settings.
TIP: **Tip:** TIP: **Tip:**
Starting with [GitLab Ultimate][ee] 10.4, this information will For [GitLab Ultimate][ee] users, this information will
be automatically extracted and shown right in the merge request widget. To do be automatically extracted and shown right in the merge request widget.
so, the CI job must be named `dast` and the artifact path must be [Learn more on DAST in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/dast.html).
`gl-dast-report.json`.
[Learn more about DAST results shown in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/dast.html). ## Previous job definitions
CAUTION: **Caution:**
Before GitLab 11.5, DAST job and artifact had to be named specifically
to automatically extract report data and show it in the merge request widget.
While these old job definitions are still maintained they have been deprecated
and may be removed in next major release, GitLab 12.0.
You are advised to update your current `.gitlab-ci.yml` configuration to reflect that change.
For GitLab 11.4 and earlier, the job should look like:
```yaml
dast:
image: registry.gitlab.com/gitlab-org/security-products/zaproxy
variables:
website: "https://example.com"
allow_failure: true
script:
- mkdir /zap/wrk/
- /zap/zap-baseline.py -J gl-dast-report.json -t $website || true
- cp /zap/wrk/gl-dast-report.json .
artifacts:
paths: [gl-dast-report.json]
```
[ee]: https://about.gitlab.com/pricing/ [ee]: https://about.gitlab.com/pricing/
...@@ -33,9 +33,9 @@ before_script: ...@@ -33,9 +33,9 @@ before_script:
In this particular case, the `npm deploy` script is a Gulp script that does the following: In this particular case, the `npm deploy` script is a Gulp script that does the following:
1. Compile CSS & JS 1. Compile CSS & JS
2. Create sprites 1. Create sprites
3. Copy various assets (images, fonts) around 1. Copy various assets (images, fonts) around
4. Replace some strings 1. Replace some strings
All these operations will put all files into a `build` folder, which is ready to be deployed to a live server. All these operations will put all files into a `build` folder, which is ready to be deployed to a live server.
...@@ -62,10 +62,10 @@ before_script: ...@@ -62,10 +62,10 @@ before_script:
In order, this means that: In order, this means that:
1. We check if the `ssh-agent` is available and we install it if it's not; 1. We check if the `ssh-agent` is available and we install it if it's not.
2. We create the `~/.ssh` folder; 1. We create the `~/.ssh` folder.
3. We make sure we're running bash; 1. We make sure we're running bash.
4. We disable host checking (we don't ask for user accept when we first connect to a server; and since every job will equal a first connect, we kind of need this) 1. We disable host checking (we don't ask for user accept when we first connect to a server and since every job will equal a first connect, we kind of need this).
And this is basically all you need in the `before_script` section. And this is basically all you need in the `before_script` section.
...@@ -91,11 +91,11 @@ stage_deploy: ...@@ -91,11 +91,11 @@ stage_deploy:
Here's the breakdown: Here's the breakdown:
1. `only:dev` means that this build will run only when something is pushed to the `dev` branch. You can remove this block completely and have everything be ran on every push (but probably this is something you don't want) 1. `only:dev` means that this build will run only when something is pushed to the `dev` branch. You can remove this block completely and have everything be ran on every push (but probably this is something you don't want)
2. `ssh-add ...` we will add that private key you added on the web UI to the docker container 1. `ssh-add ...` we will add that private key you added on the web UI to the docker container
3. We will connect via `ssh` and create a new `_tmp` folder 1. We will connect via `ssh` and create a new `_tmp` folder
4. We will connect via `scp` and upload the `build` folder (which was generated by a `npm` script) to our previously created `_tmp` folder 1. We will connect via `scp` and upload the `build` folder (which was generated by a `npm` script) to our previously created `_tmp` folder
5. We will connect again to `ssh` and move the `live` folder to an `_old` folder, then move `_tmp` to `live`. 1. We will connect again to `ssh` and move the `live` folder to an `_old` folder, then move `_tmp` to `live`.
6. We connect to ssh and remove the `_old` folder 1. We connect to ssh and remove the `_old` folder
What's the deal with the artifacts? We just tell GitLab CI to keep the `build` directory (later on, you can download that as needed). What's the deal with the artifacts? We just tell GitLab CI to keep the `build` directory (later on, you can download that as needed).
......
...@@ -40,15 +40,17 @@ production: ...@@ -40,15 +40,17 @@ production:
``` ```
This project has three jobs: This project has three jobs:
1. `test` - used to test Django application,
2. `staging` - used to automatically deploy staging environment every push to `master` branch - `test` - used to test Django application,
3. `production` - used to automatically deploy production environment for every created tag - `staging` - used to automatically deploy staging environment every push to `master` branch
- `production` - used to automatically deploy production environment for every created tag
## Store API keys ## Store API keys
You'll need to create two variables in `Settings > CI/CD > Variables` on your GitLab project settings: You'll need to create two variables in `Settings > CI/CD > Variables` on your GitLab project settings:
1. `HEROKU_STAGING_API_KEY` - Heroku API key used to deploy staging app,
2. `HEROKU_PRODUCTION_API_KEY` - Heroku API key used to deploy production app. - `HEROKU_STAGING_API_KEY` - Heroku API key used to deploy staging app.
- `HEROKU_PRODUCTION_API_KEY` - Heroku API key used to deploy production app.
Find your Heroku API key in [Manage Account](https://dashboard.heroku.com/account). Find your Heroku API key in [Manage Account](https://dashboard.heroku.com/account).
......
...@@ -36,16 +36,17 @@ production: ...@@ -36,16 +36,17 @@ production:
``` ```
This project has three jobs: This project has three jobs:
1. `test` - used to test Rails application,
2. `staging` - used to automatically deploy staging environment every push to `master` branch - `test` - used to test Rails application.
3. `production` - used to automatically deploy production environment for every created tag - `staging` - used to automatically deploy staging environment every push to `master` branch.
- `production` - used to automatically deploy production environment for every created tag.
## Store API keys ## Store API keys
You'll need to create two variables in your project's **Settings > CI/CD > Variables**: You'll need to create two variables in your project's **Settings > CI/CD > Variables**:
1. `HEROKU_STAGING_API_KEY` - Heroku API key used to deploy staging app, - `HEROKU_STAGING_API_KEY` - Heroku API key used to deploy staging app.
2. `HEROKU_PRODUCTION_API_KEY` - Heroku API key used to deploy production app. - `HEROKU_PRODUCTION_API_KEY` - Heroku API key used to deploy production app.
Find your Heroku API key in [Manage Account](https://dashboard.heroku.com/account). Find your Heroku API key in [Manage Account](https://dashboard.heroku.com/account).
......
...@@ -168,7 +168,7 @@ can be found at <https://docs.gitlab.com/runner/>. ...@@ -168,7 +168,7 @@ can be found at <https://docs.gitlab.com/runner/>.
In order to have a functional Runner you need to follow two steps: In order to have a functional Runner you need to follow two steps:
1. [Install it][runner-install] 1. [Install it][runner-install]
2. [Configure it](../runners/README.md#registering-a-specific-runner) 1. [Configure it](../runners/README.md#registering-a-specific-runner)
Follow the links above to set up your own Runner or use a Shared Runner as Follow the links above to set up your own Runner or use a Shared Runner as
described in the next section. described in the next section.
......
...@@ -138,9 +138,9 @@ project without requiring your authorization, so use it with caution. ...@@ -138,9 +138,9 @@ project without requiring your authorization, so use it with caution.
An admin can enable/disable a specific Runner for projects: An admin can enable/disable a specific Runner for projects:
1. Navigate to **Admin > Runners** 1. Navigate to **Admin > Runners**
2. Find the Runner you wish to enable/disable 1. Find the Runner you wish to enable/disable
3. Click edit on the Runner 1. Click edit on the Runner
4. Click **Enable** or **Disable** on the project 1. Click **Enable** or **Disable** on the project
## Protected Runners ## Protected Runners
......
...@@ -103,7 +103,7 @@ rspec: ...@@ -103,7 +103,7 @@ rspec:
- $RSPEC - $RSPEC
``` ```
In the example above, the `rspec` job inherits from the `.tests` template job. In the example above, the `rspec` job inherits from the `.tests` template job.
GitLab will perform a reverse deep merge based on the keys. GitLab will: GitLab will perform a reverse deep merge based on the keys. GitLab will:
- Merge the `rspec` contents into `.tests` recursively. - Merge the `rspec` contents into `.tests` recursively.
...@@ -1337,6 +1337,81 @@ concatenated into a single file. Use a filename pattern (`junit: rspec-*.xml`), ...@@ -1337,6 +1337,81 @@ concatenated into a single file. Use a filename pattern (`junit: rspec-*.xml`),
an array of filenames (`junit: [rspec-1.xml, rspec-2.xml, rspec-3.xml]`), or a an array of filenames (`junit: [rspec-1.xml, rspec-2.xml, rspec-3.xml]`), or a
combination thereof (`junit: [rspec.xml, test-results/TEST-*.xml]`). combination thereof (`junit: [rspec.xml, test-results/TEST-*.xml]`).
#### `artifacts:reports:codequality` **[STARTER]**
> Introduced in GitLab 11.5. Requires GitLab Runner 11.5 and above.
The `codequality` report collects [CodeQuality issues](https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html)
as artifacts.
The collected Code Quality report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests.
#### `artifacts:reports:sast` **[ULTIMATE]**
> Introduced in GitLab 11.5. Requires GitLab Runner 11.5 and above.
The `sast` report collects [SAST vulnerabilities](https://docs.gitlab.com/ee/user/project/merge_requests/sast.html)
as artifacts.
The collected SAST report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests, pipeline view and provide data for security
dashboards.
#### `artifacts:reports:dependency_scanning` **[ULTIMATE]**
> Introduced in GitLab 11.5. Requires GitLab Runner 11.5 and above.
The `dependency_scanning` report collects [Dependency Scanning vulnerabilities](https://docs.gitlab.com/ee/user/project/merge_requests/dependency_scanning.html)
as artifacts.
The collected Dependency Scanning report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests, pipeline view and provide data for security
dashboards.
#### `artifacts:reports:container_scanning` **[ULTIMATE]**
> Introduced in GitLab 11.5. Requires GitLab Runner 11.5 and above.
The `container_scanning` report collects [Container Scanning vulnerabilities](https://docs.gitlab.com/ee/user/project/merge_requests/container_scanning.html)
as artifacts.
The collected Container Scanning report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests, pipeline view and provide data for security
dashboards.
#### `artifacts:reports:dast` **[ULTIMATE]**
> Introduced in GitLab 11.5. Requires GitLab Runner 11.5 and above.
The `dast` report collects [DAST vulnerabilities](https://docs.gitlab.com/ee/user/project/merge_requests/dast.html)
as artifacts.
The collected DAST report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests, pipeline view and provide data for security
dashboards.
#### `artifacts:reports:license_management` **[ULTIMATE]**
> Introduced in GitLab 11.5. Requires GitLab Runner 11.5 and above.
The `license_management` report collects [Licenses](https://docs.gitlab.com/ee/user/project/merge_requests/license_management.html)
as artifacts.
The collected License Management report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests, pipeline view and provide data for security
dashboards.
#### `artifacts:reports:performance` **[PREMIUM]**
> Introduced in GitLab 11.5. Requires GitLab Runner 11.5 and above.
The `performance` report collects [Performance metrics](https://docs.gitlab.com/ee//user/project/merge_requests/browser_performance_testing.html)
as artifacts.
The collected Performance report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests.
## `dependencies` ## `dependencies`
> Introduced in GitLab 8.6 and GitLab Runner v1.1.1. > Introduced in GitLab 8.6 and GitLab Runner v1.1.1.
......
...@@ -28,9 +28,9 @@ to filter data by. Instead one should ask themselves the following questions: ...@@ -28,9 +28,9 @@ to filter data by. Instead one should ask themselves the following questions:
1. Can I write my query in such a way that it re-uses as many existing indexes 1. Can I write my query in such a way that it re-uses as many existing indexes
as possible? as possible?
2. Is the data going to be large enough that using an index will actually be 1. Is the data going to be large enough that using an index will actually be
faster than just iterating over the rows in the table? faster than just iterating over the rows in the table?
3. Is the overhead of maintaining the index worth the reduction in query 1. Is the overhead of maintaining the index worth the reduction in query
timings? timings?
We'll explore every question in detail below. We'll explore every question in detail below.
...@@ -62,7 +62,7 @@ In short: ...@@ -62,7 +62,7 @@ In short:
1. Try to write your query in such a way that it re-uses as many existing 1. Try to write your query in such a way that it re-uses as many existing
indexes as possible. indexes as possible.
2. Run the query using `EXPLAIN ANALYZE` and study the output to find the most 1. Run the query using `EXPLAIN ANALYZE` and study the output to find the most
ideal query. ideal query.
## Data Size ## Data Size
......
...@@ -4,12 +4,13 @@ While developing a new feature or modifying an existing one, it is helpful if an ...@@ -4,12 +4,13 @@ While developing a new feature or modifying an existing one, it is helpful if an
installable package (or a docker image) containing those changes is available installable package (or a docker image) containing those changes is available
for testing. For this very purpose, a manual job is provided in the GitLab CI/CD for testing. For this very purpose, a manual job is provided in the GitLab CI/CD
pipeline that can be used to trigger a pipeline in the omnibus-gitlab repository pipeline that can be used to trigger a pipeline in the omnibus-gitlab repository
that will create that will create:
1. A deb package for Ubuntu 16.04, available as a build artifact, and
2. A docker image, which is pushed to [Omnibus GitLab's container - A deb package for Ubuntu 16.04, available as a build artifact, and
registry](https://gitlab.com/gitlab-org/omnibus-gitlab/container_registry) - A docker image, which is pushed to [Omnibus GitLab's container
(images titled `gitlab-ce` and `gitlab-ee` respectively and image tag is the registry](https://gitlab.com/gitlab-org/omnibus-gitlab/container_registry)
commit which triggered the pipeline). (images titled `gitlab-ce` and `gitlab-ee` respectively and image tag is the
commit which triggered the pipeline).
When you push a commit to either the gitlab-ce or gitlab-ee project, the When you push a commit to either the gitlab-ce or gitlab-ee project, the
pipeline for that commit will have a `build-package` manual action you can pipeline for that commit will have a `build-package` manual action you can
......
...@@ -9,11 +9,11 @@ code is effective, understandable, and maintainable. ...@@ -9,11 +9,11 @@ code is effective, understandable, and maintainable.
## Getting your merge request reviewed, approved, and merged ## Getting your merge request reviewed, approved, and merged
You are strongly encouraged to get your code **reviewed** by a You are strongly encouraged to get your code **reviewed** by a
[reviewer](https://about.gitlab.com/handbook/engineering/#reviewer) as soon as [reviewer](https://about.gitlab.com/handbook/engineering/#reviewer) as soon as
there is any code to review, to get a second opinion on the chosen solution and there is any code to review, to get a second opinion on the chosen solution and
implementation, and an extra pair of eyes looking for bugs, logic problems, or implementation, and an extra pair of eyes looking for bugs, logic problems, or
uncovered edge cases. The reviewer can be from a different team, but it is uncovered edge cases. The reviewer can be from a different team, but it is
recommended to pick someone who knows the domain well. You can read more about the recommended to pick someone who knows the domain well. You can read more about the
importance of involving reviewer(s) in the section on the responsibility of the author below. importance of involving reviewer(s) in the section on the responsibility of the author below.
...@@ -23,7 +23,7 @@ one of the [Merge request coaches][team]. ...@@ -23,7 +23,7 @@ one of the [Merge request coaches][team].
Depending on the areas your merge request touches, it must be **approved** by one Depending on the areas your merge request touches, it must be **approved** by one
or more [maintainers](https://about.gitlab.com/handbook/engineering/#maintainer): or more [maintainers](https://about.gitlab.com/handbook/engineering/#maintainer):
For approvals, we use the approval functionality found in the merge request For approvals, we use the approval functionality found in the merge request
widget. Reviewers can add their approval by [approving additionally](https://docs.gitlab.com/ee/user/project/merge_requests/merge_request_approvals.html#adding-or-removing-an-approval). widget. Reviewers can add their approval by [approving additionally](https://docs.gitlab.com/ee/user/project/merge_requests/merge_request_approvals.html#adding-or-removing-an-approval).
1. If your merge request includes backend changes [^1], it must be 1. If your merge request includes backend changes [^1], it must be
...@@ -42,43 +42,43 @@ widget. Reviewers can add their approval by [approving additionally](https://doc ...@@ -42,43 +42,43 @@ widget. Reviewers can add their approval by [approving additionally](https://doc
Getting your merge request **merged** also requires a maintainer. If it requires Getting your merge request **merged** also requires a maintainer. If it requires
more than one approval, the last maintainer to review and approve it will also merge it. more than one approval, the last maintainer to review and approve it will also merge it.
As described in the section on the responsibility of the maintainer below, you As described in the section on the responsibility of the maintainer below, you
are recommended to get your merge request approved and merged by maintainer(s) are recommended to get your merge request approved and merged by maintainer(s)
from other teams than your own. from other teams than your own.
### The responsibility of the merge request author ### The responsibility of the merge request author
The responsibility to find the best solution and implement it lies with the The responsibility to find the best solution and implement it lies with the
merge request author. merge request author.
Before assigning a merge request to a maintainer for approval and merge, they Before assigning a merge request to a maintainer for approval and merge, they
should be confident that it actually solves the problem it was meant to solve, should be confident that it actually solves the problem it was meant to solve,
that it does so in the most appropriate way, that it satisfies all requirements, that it does so in the most appropriate way, that it satisfies all requirements,
and that there are no remaining bugs, logical problems, or uncovered edge cases. and that there are no remaining bugs, logical problems, or uncovered edge cases.
The merge request should also have a completed task list in its description and The merge request should also have a completed task list in its description and
a passing CI pipeline to avoid unnecessary back and forth. a passing CI pipeline to avoid unnecessary back and forth.
To reach the required level of confidence in their solution, an author is expected To reach the required level of confidence in their solution, an author is expected
to involve other people in the investigation and implementation processes as to involve other people in the investigation and implementation processes as
appropriate. appropriate.
They are encouraged to reach out to domain experts to discuss different solutions They are encouraged to reach out to domain experts to discuss different solutions
or get an implementation reviewed, to product managers and UX designers to clear or get an implementation reviewed, to product managers and UX designers to clear
up confusion or verify that the end result matches what they had in mind, to up confusion or verify that the end result matches what they had in mind, to
database specialists to get input on the data model or specific queries, or to database specialists to get input on the data model or specific queries, or to
any other developer to get an in-depth review of the solution. any other developer to get an in-depth review of the solution.
If an author is unsure if a merge request needs a domain expert's opinion, that's If an author is unsure if a merge request needs a domain expert's opinion, that's
usually a pretty good sign that it does, since without it the required level of usually a pretty good sign that it does, since without it the required level of
confidence in their solution will not have been reached. confidence in their solution will not have been reached.
### The responsibility of the maintainer ### The responsibility of the maintainer
Maintainers are responsible for the overall health, quality, and consistency of Maintainers are responsible for the overall health, quality, and consistency of
the GitLab codebase, across domains and product areas. the GitLab codebase, across domains and product areas.
Consequently, their reviews will focus primarily on things like overall Consequently, their reviews will focus primarily on things like overall
architecture, code organization, separation of concerns, tests, DRYness, architecture, code organization, separation of concerns, tests, DRYness,
consistency, and readability. consistency, and readability.
Since a maintainer's job only depends on their knowledge of the overall GitLab Since a maintainer's job only depends on their knowledge of the overall GitLab
...@@ -87,12 +87,12 @@ merge requests from any team and in any product area. ...@@ -87,12 +87,12 @@ merge requests from any team and in any product area.
In fact, authors are recommended to get their merge requests merged by maintainers In fact, authors are recommended to get their merge requests merged by maintainers
from other teams than their own, to ensure that all code across GitLab is consistent from other teams than their own, to ensure that all code across GitLab is consistent
and can be easily understood by all contributors, from both inside and outside the and can be easily understood by all contributors, from both inside and outside the
company, without requiring team-specific expertise. company, without requiring team-specific expertise.
Maintainers will do their best to also review the specifics of the chosen solution Maintainers will do their best to also review the specifics of the chosen solution
before merging, but as they are not necessarily domain experts, they may be poorly before merging, but as they are not necessarily domain experts, they may be poorly
placed to do so without an unreasonable investment of time. In those cases, they placed to do so without an unreasonable investment of time. In those cases, they
will defer to the judgment of the author and earlier reviewers and involved domain will defer to the judgment of the author and earlier reviewers and involved domain
experts, in favor of focusing on their primary responsibilities. experts, in favor of focusing on their primary responsibilities.
...@@ -100,7 +100,7 @@ If a developer who happens to also be a maintainer was involved in a merge reque ...@@ -100,7 +100,7 @@ If a developer who happens to also be a maintainer was involved in a merge reque
as a domain expert and/or reviewer, it is recommended that they are not also picked as a domain expert and/or reviewer, it is recommended that they are not also picked
as the maintainer to ultimately approve and merge it. as the maintainer to ultimately approve and merge it.
Maintainers should check before merging if the merge request is approved by the Maintainers should check before merging if the merge request is approved by the
required approvers. required approvers.
## Best practices ## Best practices
...@@ -230,41 +230,41 @@ Enterprise Edition instance. This has some implications: ...@@ -230,41 +230,41 @@ Enterprise Edition instance. This has some implications:
1. **Query changes** should be tested to ensure that they don't result in worse 1. **Query changes** should be tested to ensure that they don't result in worse
performance at the scale of GitLab.com: performance at the scale of GitLab.com:
1. Generating large quantities of data locally can help. 1. Generating large quantities of data locally can help.
2. Asking for query plans from GitLab.com is the most reliable way to validate 1. Asking for query plans from GitLab.com is the most reliable way to validate
these. these.
2. **Database migrations** must be: 1. **Database migrations** must be:
1. Reversible. 1. Reversible.
2. Performant at the scale of GitLab.com - ask a maintainer to test the 1. Performant at the scale of GitLab.com - ask a maintainer to test the
migration on the staging environment if you aren't sure. migration on the staging environment if you aren't sure.
3. Categorised correctly: 1. Categorised correctly:
- Regular migrations run before the new code is running on the instance. - Regular migrations run before the new code is running on the instance.
- [Post-deployment migrations](post_deployment_migrations.md) run _after_ - [Post-deployment migrations](post_deployment_migrations.md) run _after_
the new code is deployed, when the instance is configured to do that. the new code is deployed, when the instance is configured to do that.
- [Background migrations](background_migrations.md) run in Sidekiq, and - [Background migrations](background_migrations.md) run in Sidekiq, and
should only be done for migrations that would take an extreme amount of should only be done for migrations that would take an extreme amount of
time at GitLab.com scale. time at GitLab.com scale.
3. **Sidekiq workers** 1. **Sidekiq workers**
[cannot change in a backwards-incompatible way](sidekiq_style_guide.md#removing-or-renaming-queues): [cannot change in a backwards-incompatible way](sidekiq_style_guide.md#removing-or-renaming-queues):
1. Sidekiq queues are not drained before a deploy happens, so there will be 1. Sidekiq queues are not drained before a deploy happens, so there will be
workers in the queue from the previous version of GitLab. workers in the queue from the previous version of GitLab.
2. If you need to change a method signature, try to do so across two releases, 1. If you need to change a method signature, try to do so across two releases,
and accept both the old and new arguments in the first of those. and accept both the old and new arguments in the first of those.
3. Similarly, if you need to remove a worker, stop it from being scheduled in 1. Similarly, if you need to remove a worker, stop it from being scheduled in
one release, then remove it in the next. This will allow existing jobs to one release, then remove it in the next. This will allow existing jobs to
execute. execute.
4. Don't forget, not every instance will upgrade to every intermediate version 1. Don't forget, not every instance will upgrade to every intermediate version
(some people may go from X.1.0 to X.10.0, or even try bigger upgrades!), so (some people may go from X.1.0 to X.10.0, or even try bigger upgrades!), so
try to be liberal in accepting the old format if it is cheap to do so. try to be liberal in accepting the old format if it is cheap to do so.
4. **Cached values** may persist across releases. If you are changing the type a 1. **Cached values** may persist across releases. If you are changing the type a
cached value returns (say, from a string or nil to an array), change the cached value returns (say, from a string or nil to an array), change the
cache key at the same time. cache key at the same time.
5. **Settings** should be added as a 1. **Settings** should be added as a
[last resort](https://about.gitlab.com/handbook/product/#convention-over-configuration). [last resort](https://about.gitlab.com/handbook/product/#convention-over-configuration).
If you're adding a new setting in `gitlab.yml`: If you're adding a new setting in `gitlab.yml`:
1. Try to avoid that, and add to `ApplicationSetting` instead. 1. Try to avoid that, and add to `ApplicationSetting` instead.
2. Ensure that it is also 1. Ensure that it is also
[added to Omnibus](https://docs.gitlab.com/omnibus/settings/gitlab.yml.html#adding-a-new-setting-to-gitlab-yml). [added to Omnibus](https://docs.gitlab.com/omnibus/settings/gitlab.yml.html#adding-a-new-setting-to-gitlab-yml).
6. **Filesystem access** can be slow, so try to avoid 1. **Filesystem access** can be slow, so try to avoid
[shared files](shared_files.md) when an alternative solution is available. [shared files](shared_files.md) when an alternative solution is available.
### Credits ### Credits
......
...@@ -17,20 +17,20 @@ The diffs fetching process _limits_ single file diff sizes and the overall size ...@@ -17,20 +17,20 @@ The diffs fetching process _limits_ single file diff sizes and the overall size
then persisted on `merge_request_diff_files` table. then persisted on `merge_request_diff_files` table.
Even though diffs larger than 10% of the value of `ApplicationSettings#diff_max_patch_bytes` are collapsed, Even though diffs larger than 10% of the value of `ApplicationSettings#diff_max_patch_bytes` are collapsed,
we still keep them on Postgres. However, diff files larger than defined _safety limits_ we still keep them on Postgres. However, diff files larger than defined _safety limits_
(see the [Diff limits section](#diff-limits)) are _not_ persisted in the database. (see the [Diff limits section](#diff-limits)) are _not_ persisted in the database.
In order to present diffs information on the Merge Request diffs page, we: In order to present diffs information on the Merge Request diffs page, we:
1. Fetch all diff files from database `merge_request_diff_files` 1. Fetch all diff files from database `merge_request_diff_files`
2. Fetch the _old_ and _new_ file blobs in batch to: 1. Fetch the _old_ and _new_ file blobs in batch to:
1. Highlight old and new file content - Highlight old and new file content
2. Know which viewer it should use for each file (text, image, deleted, etc) - Know which viewer it should use for each file (text, image, deleted, etc)
3. Know if the file content changed - Know if the file content changed
4. Know if it was stored externally - Know if it was stored externally
5. Know if it had storage errors - Know if it had storage errors
3. If the diff file is cacheable (text-based), it's cached on Redis 1. If the diff file is cacheable (text-based), it's cached on Redis
using `Gitlab::Diff::FileCollection::MergeRequestDiff` using `Gitlab::Diff::FileCollection::MergeRequestDiff`
### Note diffs ### Note diffs
...@@ -39,9 +39,9 @@ on `NoteDiffFile` (which is associated with the actual `DiffNote`). So instead ...@@ -39,9 +39,9 @@ on `NoteDiffFile` (which is associated with the actual `DiffNote`). So instead
of hitting the repository every time we need the diff of the file, we: of hitting the repository every time we need the diff of the file, we:
1. Check whether we have the `NoteDiffFile#diff` persisted and use it 1. Check whether we have the `NoteDiffFile#diff` persisted and use it
2. Otherwise, if it's a current MR revision, use the persisted 1. Otherwise, if it's a current MR revision, use the persisted
`MergeRequestDiffFile#diff` `MergeRequestDiffFile#diff`
3. In the last scenario, go the the repository and fetch the diff 1. In the last scenario, go the repository and fetch the diff
## Diff limits ## Diff limits
......
...@@ -119,10 +119,20 @@ This also applies to views. ...@@ -119,10 +119,20 @@ This also applies to views.
### EE features based on CE features ### EE features based on CE features
For features that build on existing CE features, write a module in the For features that build on existing CE features, write a module in the `EE`
`EE` namespace and `prepend` it in the CE class. This makes conflicts namespace and `prepend` it in the CE class, on the last line of the file that
less likely to happen during CE to EE merges because only one line is the class resides in. This makes conflicts less likely to happen during CE to EE
added to the CE class - the `prepend` line. merges because only one line is added to the CE class - the `prepend` line. For
example, to prepend a module into the `User` class you would use the following
approach:
```ruby
class User < ActiveRecord::Base
# ... lots of code here ...
end
User.prepend(EE::User)
```
Since the module would require an `EE` namespace, the file should also be Since the module would require an `EE` namespace, the file should also be
put in an `ee/` sub-directory. For example, we want to extend the user model put in an `ee/` sub-directory. For example, we want to extend the user model
...@@ -231,7 +241,6 @@ the existing file: ...@@ -231,7 +241,6 @@ the existing file:
```ruby ```ruby
class ApplicationController < ActionController::Base class ApplicationController < ActionController::Base
prepend EE::ApplicationController
# ... # ...
def after_sign_out_path_for(resource) def after_sign_out_path_for(resource)
...@@ -240,6 +249,8 @@ class ApplicationController < ActionController::Base ...@@ -240,6 +249,8 @@ class ApplicationController < ActionController::Base
# ... # ...
end end
ApplicationController.prepend(EE::ApplicationController)
``` ```
And create a new file in the `ee/` sub-directory with the altered And create a new file in the `ee/` sub-directory with the altered
...@@ -533,8 +544,6 @@ module API ...@@ -533,8 +544,6 @@ module API
end end
end end
prepend EE::API::MergeRequests
params :optional_params do params :optional_params do
# CE specific params go here... # CE specific params go here...
...@@ -542,6 +551,8 @@ module API ...@@ -542,6 +551,8 @@ module API
end end
end end
end end
API::MergeRequests.prepend(EE::API::MergeRequests)
``` ```
And then we could override it in EE module: And then we could override it in EE module:
...@@ -582,10 +593,10 @@ module API ...@@ -582,10 +593,10 @@ module API
authorize_read_builds! authorize_read_builds!
end end
end end
prepend EE::API::JobArtifacts
end end
end end
API::JobArtifacts.prepend(EE::API::JobArtifacts)
``` ```
And then we can follow regular object-oriented practices to override it: And then we can follow regular object-oriented practices to override it:
...@@ -626,8 +637,6 @@ module API ...@@ -626,8 +637,6 @@ module API
end end
end end
prepend EE::API::MergeRequests
put ':id/merge_requests/:merge_request_iid/merge' do put ':id/merge_requests/:merge_request_iid/merge' do
merge_request = find_project_merge_request(params[:merge_request_iid]) merge_request = find_project_merge_request(params[:merge_request_iid])
...@@ -639,6 +648,8 @@ module API ...@@ -639,6 +648,8 @@ module API
end end
end end
end end
API::MergeRequests.prepend(EE::API::MergeRequests)
``` ```
Note that `update_merge_request_ee` doesn't do anything in CE, but Note that `update_merge_request_ee` doesn't do anything in CE, but
...@@ -676,27 +687,37 @@ or not we really need to extend it from EE. For now we're not using it much. ...@@ -676,27 +687,37 @@ or not we really need to extend it from EE. For now we're not using it much.
Sometimes we need to use different arguments for a particular API route, and we Sometimes we need to use different arguments for a particular API route, and we
can't easily extend it with an EE module because Grape has different context in can't easily extend it with an EE module because Grape has different context in
different blocks. In order to overcome this, we could use class methods from the different blocks. In order to overcome this, we need to move the data to a class
API class. method that resides in a separate module or class. This allows us to extend that
module or class before its data is used, without having to place a `prepend` in
the middle of CE code.
For example, in one place we need to pass an extra argument to For example, in one place we need to pass an extra argument to
`at_least_one_of` so that the API could consider an EE-only argument as the `at_least_one_of` so that the API could consider an EE-only argument as the
least argument. This is not quite beautiful but it's working: least argument. We would approach this as follows:
```ruby ```ruby
# api/merge_requests/parameters.rb
module API module API
class MergeRequests < Grape::API class MergeRequests < Grape::API
def self.update_params_at_least_one_of module Parameters
%i[ def self.update_params_at_least_one_of
assignee_id %i[
description assignee_id
] description
]
end
end end
end
end
prepend EE::API::MergeRequests API::MergeRequests::Parameters.prepend(EE::API::MergeRequests::Parameters)
# api/merge_requests.rb
module API
class MergeRequests < Grape::API
params do params do
at_least_one_of(*::API::MergeRequests.update_params_at_least_one_of) at_least_one_of(*Parameters.update_params_at_least_one_of)
end end
end end
end end
...@@ -708,16 +729,18 @@ And then we could easily extend that argument in the EE class method: ...@@ -708,16 +729,18 @@ And then we could easily extend that argument in the EE class method:
module EE module EE
module API module API
module MergeRequests module MergeRequests
extend ActiveSupport::Concern module Parameters
extend ActiveSupport::Concern
class_methods do class_methods do
extend ::Gitlab::Utils::Override extend ::Gitlab::Utils::Override
override :update_params_at_least_one_of override :update_params_at_least_one_of
def update_params_at_least_one_of def update_params_at_least_one_of
super.push(*%i[ super.push(*%i[
squash squash
]) ])
end
end end
end end
end end
...@@ -728,6 +751,78 @@ end ...@@ -728,6 +751,78 @@ end
It could be annoying if we need this for a lot of routes, but it might be the It could be annoying if we need this for a lot of routes, but it might be the
simplest solution right now. simplest solution right now.
This approach can also be used when models define validations that depend on
class methods. For example:
```ruby
# app/models/identity.rb
class Identity < ActiveRecord::Base
def self.uniqueness_scope
[:provider]
end
prepend EE::Identity
validates :extern_uid,
allow_blank: true,
uniqueness: { scope: uniqueness_scope, case_sensitive: false }
end
# ee/app/models/ee/identity.rb
module EE
module Identity
extend ActiveSupport::Concern
class_methods do
extend ::Gitlab::Utils::Override
def uniqueness_scope
[*super, :saml_provider_id]
end
end
end
end
```
Instead of taking this approach, we would refactor our code into the following:
```ruby
# ee/app/models/ee/identity/uniqueness_scopes.rb
module EE
module Identity
module UniquenessScopes
extend ActiveSupport::Concern
class_methods do
extend ::Gitlab::Utils::Override
def uniqueness_scope
[*super, :saml_provider_id]
end
end
end
end
end
# app/models/identity/uniqueness_scopes.rb
class Identity < ActiveRecord::Base
module UniquenessScopes
def self.uniqueness_scope
[:provider]
end
end
end
Identity::UniquenessScopes.prepend(EE::Identity::UniquenessScopes)
# app/models/identity.rb
class Identity < ActiveRecord::Base
validates :extern_uid,
allow_blank: true,
uniqueness: { scope: Identity::UniquenessScopes.scopes, case_sensitive: false }
end
```
### Code in `spec/` ### Code in `spec/`
When you're testing EE-only features, avoid adding examples to the When you're testing EE-only features, avoid adding examples to the
......
...@@ -183,9 +183,11 @@ Don't use ID selectors in CSS. ...@@ -183,9 +183,11 @@ Don't use ID selectors in CSS.
``` ```
### Variables ### Variables
Before adding a new variable for a color or a size, guarantee: Before adding a new variable for a color or a size, guarantee:
1. There isn't already one
2. There isn't a similar one we can use instead. - There isn't already one
- There isn't a similar one we can use instead.
## Linting ## Linting
......
...@@ -221,6 +221,14 @@ const vm = mountComponent(Component, data); ...@@ -221,6 +221,14 @@ const vm = mountComponent(Component, data);
The main return value of a Vue component is the rendered output. In order to test the component we The main return value of a Vue component is the rendered output. In order to test the component we
need to test the rendered output. [Vue][vue-test] guide's to unit test show us exactly that: need to test the rendered output. [Vue][vue-test] guide's to unit test show us exactly that:
## Vue.js Expert Role
One should apply to be a Vue.js expert by opening an MR when the Merge Request's they create and review show:
- Deep understanding of Vue and Vuex reactivy
- Vue and Vuex code are structured according to both official and our guidelines
- Full understanding of testing a Vue and Vuex application
- Vuex code follows the [documented pattern](./vuex.md#actions-pattern-request-and-receive-namespaces)
- Knowledge about the existing Vue and Vuex applications and existing reusable components
[vue-docs]: http://vuejs.org/guide/index.html [vue-docs]: http://vuejs.org/guide/index.html
[issue-boards]: https://gitlab.com/gitlab-org/gitlab-ce/tree/master/app/assets/javascripts/boards [issue-boards]: https://gitlab.com/gitlab-org/gitlab-ce/tree/master/app/assets/javascripts/boards
......
...@@ -114,19 +114,21 @@ When a request is made we often want to show a loading state to the user. ...@@ -114,19 +114,21 @@ When a request is made we often want to show a loading state to the user.
Instead of creating an action to toggle the loading state and dispatch it in the component, Instead of creating an action to toggle the loading state and dispatch it in the component,
create: create:
1. An action `requestSomething`, to toggle the loading state 1. An action `requestSomething`, to toggle the loading state
1. An action `receiveSomethingSuccess`, to handle the success callback 1. An action `receiveSomethingSuccess`, to handle the success callback
1. An action `receiveSomethingError`, to handle the error callback 1. An action `receiveSomethingError`, to handle the error callback
1. An action `fetchSomething` to make the request. 1. An action `fetchSomething` to make the request.
1. In case your application does more than a `GET` request you can use these as examples: 1. In case your application does more than a `GET` request you can use these as examples:
1. `PUT`: `createSomething` - `PUT`: `createSomething`
2. `POST`: `updateSomething` - `POST`: `updateSomething`
3. `DELETE`: `deleteSomething` - `DELETE`: `deleteSomething`
The component MUST only dispatch the `fetchNamespace` action. Actions namespaced with `request` or `receive` should not be called from the component The component MUST only dispatch the `fetchNamespace` action. Actions namespaced with `request` or `receive` should not be called from the component
The `fetch` action will be responsible to dispatch `requestNamespace`, `receiveNamespaceSuccess` and `receiveNamespaceError` The `fetch` action will be responsible to dispatch `requestNamespace`, `receiveNamespaceSuccess` and `receiveNamespaceError`
By following this pattern we guarantee: By following this pattern we guarantee:
1. All applications follow the same pattern, making it easier for anyone to maintain the code 1. All applications follow the same pattern, making it easier for anyone to maintain the code
1. All data in the application follows the same lifecycle pattern 1. All data in the application follows the same lifecycle pattern
1. Actions are contained and human friendly 1. Actions are contained and human friendly
...@@ -297,12 +299,12 @@ export default { ...@@ -297,12 +299,12 @@ export default {
```javascript ```javascript
// component.vue // component.vue
// bad // bad
created() { created() {
this.$store.commit('mutation'); this.$store.commit('mutation');
} }
// good // good
created() { created() {
this.$store.dispatch('action'); this.$store.dispatch('action');
......
...@@ -99,8 +99,8 @@ This worker will wrap up the import process by performing some housekeeping ...@@ -99,8 +99,8 @@ This worker will wrap up the import process by performing some housekeeping
Advancing stages is done in one of two ways: Advancing stages is done in one of two ways:
1. Scheduling the worker for the next stage directly. - Scheduling the worker for the next stage directly.
2. Scheduling a job for `Gitlab::GithubImport::AdvanceStageWorker` which will - Scheduling a job for `Gitlab::GithubImport::AdvanceStageWorker` which will
advance the stage when all work of the current stage has been completed. advance the stage when all work of the current stage has been completed.
The first approach should only be used by workers that perform all their work in The first approach should only be used by workers that perform all their work in
...@@ -147,7 +147,7 @@ We handle this by doing the following: ...@@ -147,7 +147,7 @@ We handle this by doing the following:
1. Once we hit the rate limit all jobs will automatically reschedule themselves 1. Once we hit the rate limit all jobs will automatically reschedule themselves
in such a way that they are not executed until the rate limit has been reset. in such a way that they are not executed until the rate limit has been reset.
2. We cache the mapping of GitHub users to GitLab users in Redis. 1. We cache the mapping of GitHub users to GitLab users in Redis.
More information on user caching can be found below. More information on user caching can be found below.
...@@ -157,21 +157,21 @@ When mapping GitHub users to GitLab users we need to (in the worst case) ...@@ -157,21 +157,21 @@ When mapping GitHub users to GitLab users we need to (in the worst case)
perform: perform:
1. One API call to get the user's Email address. 1. One API call to get the user's Email address.
2. Two database queries to see if a corresponding GitLab user exists. One query 1. Two database queries to see if a corresponding GitLab user exists. One query
will try to find the user based on the GitHub user ID, while the second query will try to find the user based on the GitHub user ID, while the second query
is used to find the user using their GitHub Email address. is used to find the user using their GitHub Email address.
Because this process is quite expensive we cache the result of these lookups in Because this process is quite expensive we cache the result of these lookups in
Redis. For every user looked up we store three keys: Redis. For every user looked up we store three keys:
1. A Redis key mapping GitHub usernames to their Email addresses. - A Redis key mapping GitHub usernames to their Email addresses.
2. A Redis key mapping a GitHub Email addresses to a GitLab user ID. - A Redis key mapping a GitHub Email addresses to a GitLab user ID.
3. A Redis key mapping a GitHub user ID to GitLab user ID. - A Redis key mapping a GitHub user ID to GitLab user ID.
There are two types of lookups we cache: There are two types of lookups we cache:
1. A positive lookup, meaning we found a GitLab user ID. - A positive lookup, meaning we found a GitLab user ID.
2. A negative lookup, meaning we didn't find a GitLab user ID. Caching this - A negative lookup, meaning we didn't find a GitLab user ID. Caching this
prevents us from performing the same work for users that we know don't exist prevents us from performing the same work for users that we know don't exist
in our GitLab database. in our GitLab database.
......
...@@ -117,11 +117,11 @@ The block is executed and the execution time is stored as a set of fields in the ...@@ -117,11 +117,11 @@ The block is executed and the execution time is stored as a set of fields in the
currently running transaction. If no transaction is present the block is yielded currently running transaction. If no transaction is present the block is yielded
without measuring anything. without measuring anything.
3 values are measured for a block: Three values are measured for a block:
1. The real time elapsed, stored in NAME_real_time. - The real time elapsed, stored in NAME_real_time.
2. The CPU time elapsed, stored in NAME_cpu_time. - The CPU time elapsed, stored in NAME_cpu_time.
3. The call count, stored in NAME_call_count. - The call count, stored in NAME_call_count.
Both the real and CPU timings are measured in milliseconds. Both the real and CPU timings are measured in milliseconds.
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
## Monitoring ## Monitoring
We have a performance dashboard available in one of our [grafana instances](https://performance.gprd.gitlab.com/dashboard/db/sitespeed-page-summary?orgId=1). This dashboard automatically aggregates metric data from [sitespeed.io](https://sitespeed.io) every 6 hours. These changes are displayed after a set number of pages are aggregated. We have a performance dashboard available in one of our [grafana instances](https://dashboards.gitlab.net/d/1EBTz3Dmz/sitespeed-page-summary?orgId=1). This dashboard automatically aggregates metric data from [sitespeed.io](https://sitespeed.io) every 6 hours. These changes are displayed after a set number of pages are aggregated.
These pages can be found inside a text file in the gitlab-build-images [repository](https://gitlab.com/gitlab-org/gitlab-build-images) called [gitlab.txt](https://gitlab.com/gitlab-org/gitlab-build-images/blob/master/scripts/gitlab.txt) These pages can be found inside a text file in the gitlab-build-images [repository](https://gitlab.com/gitlab-org/gitlab-build-images) called [gitlab.txt](https://gitlab.com/gitlab-org/gitlab-build-images/blob/master/scripts/gitlab.txt)
Any frontend engineer can contribute to this dashboard. They can contribute by adding or removing urls of pages from this text file. Please have a [frontend monitoring expert](https://about.gitlab.com/team) review your changes before assigning to a maintainer of the `gitlab-build-images` project. The changes will go live on the next scheduled run after the changes are merged into `master`. Any frontend engineer can contribute to this dashboard. They can contribute by adding or removing urls of pages from this text file. Please have a [frontend monitoring expert](https://about.gitlab.com/team) review your changes before assigning to a maintainer of the `gitlab-build-images` project. The changes will go live on the next scheduled run after the changes are merged into `master`.
......
...@@ -9,17 +9,17 @@ The process of solving performance problems is roughly as follows: ...@@ -9,17 +9,17 @@ The process of solving performance problems is roughly as follows:
1. Make sure there's an issue open somewhere (e.g., on the GitLab CE issue 1. Make sure there's an issue open somewhere (e.g., on the GitLab CE issue
tracker), create one if there isn't. See [#15607][#15607] for an example. tracker), create one if there isn't. See [#15607][#15607] for an example.
2. Measure the performance of the code in a production environment such as 1. Measure the performance of the code in a production environment such as
GitLab.com (see the [Tooling](#tooling) section below). Performance should be GitLab.com (see the [Tooling](#tooling) section below). Performance should be
measured over a period of _at least_ 24 hours. measured over a period of _at least_ 24 hours.
3. Add your findings based on the measurement period (screenshots of graphs, 1. Add your findings based on the measurement period (screenshots of graphs,
timings, etc) to the issue mentioned in step 1. timings, etc) to the issue mentioned in step 1.
4. Solve the problem. 1. Solve the problem.
5. Create a merge request, assign the "Performance" label and assign it to 1. Create a merge request, assign the "Performance" label and assign it to
[@yorickpeterse][yorickpeterse] for reviewing. [@yorickpeterse][yorickpeterse] for reviewing.
6. Once a change has been deployed make sure to _again_ measure for at least 24 1. Once a change has been deployed make sure to _again_ measure for at least 24
hours to see if your changes have any impact on the production environment. hours to see if your changes have any impact on the production environment.
7. Repeat until you're done. 1. Repeat until you're done.
When providing timings make sure to provide: When providing timings make sure to provide:
...@@ -94,14 +94,14 @@ result of this should be used instead of the `Benchmark` module. ...@@ -94,14 +94,14 @@ result of this should be used instead of the `Benchmark` module.
In short: In short:
1. Don't trust benchmarks you find on the internet. - Don't trust benchmarks you find on the internet.
2. Never make claims based on just benchmarks, always measure in production to - Never make claims based on just benchmarks, always measure in production to
confirm your findings. confirm your findings.
3. X being N times faster than Y is meaningless if you don't know what impact it - X being N times faster than Y is meaningless if you don't know what impact it
will actually have on your production environment. will actually have on your production environment.
4. A production environment is the _only_ benchmark that always tells the truth - A production environment is the _only_ benchmark that always tells the truth
(unless your performance monitoring systems are not set up correctly). (unless your performance monitoring systems are not set up correctly).
5. If you must write a benchmark use the benchmark-ips Gem instead of Ruby's - If you must write a benchmark use the benchmark-ips Gem instead of Ruby's
`Benchmark` module. `Benchmark` module.
## Profiling ## Profiling
......
...@@ -57,13 +57,13 @@ depends on this column being present while it's running. Normally you'd follow ...@@ -57,13 +57,13 @@ depends on this column being present while it's running. Normally you'd follow
these steps in such a case: these steps in such a case:
1. Stop the GitLab instance 1. Stop the GitLab instance
2. Run the migration removing the column 1. Run the migration removing the column
3. Start the GitLab instance again 1. Start the GitLab instance again
Using post deployment migrations we can instead follow these steps: Using post deployment migrations we can instead follow these steps:
1. Deploy a new version of GitLab while ignoring post deployment migrations 1. Deploy a new version of GitLab while ignoring post deployment migrations
2. Re-run `rake db:migrate` but without the environment variable set 1. Re-run `rake db:migrate` but without the environment variable set
Here we don't need any downtime as the migration takes place _after_ a new Here we don't need any downtime as the migration takes place _after_ a new
version (which doesn't depend on the column anymore) has been deployed. version (which doesn't depend on the column anymore) has been deployed.
......
...@@ -8,8 +8,8 @@ in test environments we'll raise an error when this threshold is exceeded. ...@@ -8,8 +8,8 @@ in test environments we'll raise an error when this threshold is exceeded.
When a test fails because it executes more than 100 SQL queries there are two When a test fails because it executes more than 100 SQL queries there are two
solutions to this problem: solutions to this problem:
1. Reduce the number of SQL queries that are executed. - Reduce the number of SQL queries that are executed.
2. Whitelist the controller or API endpoint. - Whitelist the controller or API endpoint.
You should only resort to whitelisting when an existing controller or endpoint You should only resort to whitelisting when an existing controller or endpoint
is to blame as in this case reducing the number of SQL queries can take a lot of is to blame as in this case reducing the number of SQL queries can take a lot of
......
...@@ -8,8 +8,8 @@ Let's say you want to swap the table "events" with "events_for_migration". In ...@@ -8,8 +8,8 @@ Let's say you want to swap the table "events" with "events_for_migration". In
this case you need to follow 3 steps: this case you need to follow 3 steps:
1. Rename "events" to "events_temporary" 1. Rename "events" to "events_temporary"
2. Rename "events_for_migration" to "events" 1. Rename "events_for_migration" to "events"
3. Rename "events_temporary" to "events_for_migration" 1. Rename "events_temporary" to "events_for_migration"
Rails allows you to do this using the `rename_table` method: Rails allows you to do this using the `rename_table` method:
......
...@@ -54,12 +54,12 @@ information from database or file system ...@@ -54,12 +54,12 @@ information from database or file system
When exporting SVGs, be sure to follow the following guidelines: When exporting SVGs, be sure to follow the following guidelines:
1. Convert all strokes to outlines. - Convert all strokes to outlines.
2. Use pathfinder tools to combine overlapping paths and create compound paths. - Use pathfinder tools to combine overlapping paths and create compound paths.
3. SVGs that are limited to one color should be exported without a fill color so the color can be set using CSS. - SVGs that are limited to one color should be exported without a fill color so the color can be set using CSS.
4. Ensure that exported SVGs have been run through an [SVG cleaner](https://github.com/RazrFalcon/SVGCleaner) to remove unused elements and attributes. - Ensure that exported SVGs have been run through an [SVG cleaner](https://github.com/RazrFalcon/SVGCleaner) to remove unused elements and attributes.
You can open your svg in a text editor to ensure that it is clean. You can open your svg in a text editor to ensure that it is clean.
Incorrect files will look like this: Incorrect files will look like this:
```xml ```xml
......
...@@ -101,7 +101,7 @@ GitLab's interface initially attracted Nazim when he was comparing version contr ...@@ -101,7 +101,7 @@ GitLab's interface initially attracted Nazim when he was comparing version contr
### Demographics ### Demographics
**Age** **Age**
42 years old 42 years old
**Location** **Location**
...@@ -148,11 +148,11 @@ Matthieu describes GitLab as: ...@@ -148,11 +148,11 @@ Matthieu describes GitLab as:
>"the only tool that offers the real feeling of having everything you need in one place." >"the only tool that offers the real feeling of having everything you need in one place."
He credits himself as being entirely responsible for moving his company to GitLab. He credits himself as being entirely responsible for moving his company to GitLab.
### Frustrations ### Frustrations
#### Updating to the latest release #### Updating to the latest release
Matthieu introduced his company to GitLab. He is responsible for maintaining and managing the company's installation in addition to his day job. He feels updates are too frequent and he doesn't always have sufficient time to update GitLab. As a result, he's not up to date with releases. Matthieu introduced his company to GitLab. He is responsible for maintaining and managing the company's installation in addition to his day job. He feels updates are too frequent and he doesn't always have sufficient time to update GitLab. As a result, he's not up to date with releases.
Matthieu tried to set up automatic updates, however, as he isn't a Systems Administrator, he wasn't confident in his setup. He feels he should be able to "upgrade without users even noticing" but hasn't figured out how to do this yet. Matthieu would like the "update process to be triggered from the Admin Panel, perhaps accompanied with a changelog and the option to skip updates." Matthieu tried to set up automatic updates, however, as he isn't a Systems Administrator, he wasn't confident in his setup. He feels he should be able to "upgrade without users even noticing" but hasn't figured out how to do this yet. Matthieu would like the "update process to be triggered from the Admin Panel, perhaps accompanied with a changelog and the option to skip updates."
...@@ -173,11 +173,11 @@ It's Matthieu's responsibility to get teams across his organization up and runni ...@@ -173,11 +173,11 @@ It's Matthieu's responsibility to get teams across his organization up and runni
He states that there has been: "a sluggishness of others to adapt" and it's "a low-effort adaptation at that." He states that there has been: "a sluggishness of others to adapt" and it's "a low-effort adaptation at that."
### Goals ### Goals
* To save time. One of the reasons Matthieu moved his company to GitLab was to reduce the effort it took him to manage and configure multiple tools, thus saving him time. He has to balance his day job in addition to managing the company's GitLab installation and onboarding new teams to GitLab. * To save time. One of the reasons Matthieu moved his company to GitLab was to reduce the effort it took him to manage and configure multiple tools, thus saving him time. He has to balance his day job in addition to managing the company's GitLab installation and onboarding new teams to GitLab.
* To use a platform which is easy to manage. Matthieu isn't a Systems Administrator, and when updating GitLab, creating backups, etc. He would prefer to work within GitLab's UI. Explanations / guided instructions when configuring settings in GitLab's interface would really help Matthieu. He needs reassurance that what he is about to change is * To use a platform which is easy to manage. Matthieu isn't a Systems Administrator, and when updating GitLab, creating backups, etc. He would prefer to work within GitLab's UI. Explanations / guided instructions when configuring settings in GitLab's interface would really help Matthieu. He needs reassurance that what he is about to change is
1. the right setting - The right setting.
2. will provide him with the desired result he wants. - Will provide him with the desired result he wants.
* Matthieu needs to educate his colleagues about GitLab. Matthieu's colleagues won't adopt GitLab as they're unaware of its capabilities and the positive impact it could have on their work. Matthieu needs support in getting this message across to them. * Matthieu needs to educate his colleagues about GitLab. Matthieu's colleagues won't adopt GitLab as they're unaware of its capabilities and the positive impact it could have on their work. Matthieu needs support in getting this message across to them.
...@@ -307,4 +307,4 @@ Karolina has an interest in UX and therefore has strong opinions about how GitLa ...@@ -307,4 +307,4 @@ Karolina has an interest in UX and therefore has strong opinions about how GitLa
### Goals ### Goals
* To develop her programming experience and to learn from other developers. * To develop her programming experience and to learn from other developers.
* To contribute to both her own and other open source projects. * To contribute to both her own and other open source projects.
* To use a fast and intuitive version control platform. * To use a fast and intuitive version control platform.
\ No newline at end of file
...@@ -300,7 +300,7 @@ The same applies to `rename_column_using_background_migration`: ...@@ -300,7 +300,7 @@ The same applies to `rename_column_using_background_migration`:
1. Create a migration using the helper, which will schedule background 1. Create a migration using the helper, which will schedule background
migrations to spread the writes over a longer period of time. migrations to spread the writes over a longer period of time.
2. In the next monthly release, create a clean-up migration to steal from the 1. In the next monthly release, create a clean-up migration to steal from the
Sidekiq queues, migrate any missing rows, and cleanup the rename. This Sidekiq queues, migrate any missing rows, and cleanup the rename. This
migration should skip the steps after stealing from the Sidekiq queues if the migration should skip the steps after stealing from the Sidekiq queues if the
column has already been renamed. column has already been renamed.
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
![SSH Keys](img/profile_settings_ssh_keys.png) ![SSH Keys](img/profile_settings_ssh_keys.png)
3. Paste your **public** key that you generated in the first step in the 'Key' 1. Paste your **public** key that you generated in the first step in the 'Key'
box. box.
![Paste SSH public key](img/profile_settings_ssh_keys_paste_pub.png) ![Paste SSH public key](img/profile_settings_ssh_keys_paste_pub.png)
......
...@@ -132,9 +132,9 @@ Remove the old Ruby 1.8 if present: ...@@ -132,9 +132,9 @@ Remove the old Ruby 1.8 if present:
Download Ruby and compile it: Download Ruby and compile it:
mkdir /tmp/ruby && cd /tmp/ruby mkdir /tmp/ruby && cd /tmp/ruby
curl --remote-name --progress https://cache.ruby-lang.org/pub/ruby/2.4/ruby-2.4.5.tar.gz curl --remote-name --progress https://cache.ruby-lang.org/pub/ruby/2.5/ruby-2.5.3.tar.gz
echo '4d650f302f1ec00256450b112bb023644b6ab6dd ruby-2.4.5.tar.gz' | shasum -c - && tar xzf ruby-2.4.5.tar.gz echo 'f919a9fbcdb7abecd887157b49833663c5c15fda ruby-2.5.3.tar.gz' | shasum -c - && tar xzf ruby-2.5.3.tar.gz
cd ruby-2.4.5 cd ruby-2.5.3
./configure --disable-install-rdoc ./configure --disable-install-rdoc
make make
......
...@@ -20,17 +20,17 @@ To use Akismet: ...@@ -20,17 +20,17 @@ To use Akismet:
1. Go to the URL: https://akismet.com/account/ 1. Go to the URL: https://akismet.com/account/
2. Sign-in or create a new account. 1. Sign-in or create a new account.
3. Click on **Show** to reveal the API key. 1. Click on **Show** to reveal the API key.
4. Go to Applications Settings on Admin Area (`admin/application_settings`) 1. Go to Applications Settings on Admin Area (`admin/application_settings`)
5. Check the **Enable Akismet** checkbox 1. Check the **Enable Akismet** checkbox
6. Fill in the API key from step 3. 1. Fill in the API key from step 3.
7. Save the configuration. 1. Save the configuration.
![Screenshot of Akismet settings](img/akismet_settings.png) ![Screenshot of Akismet settings](img/akismet_settings.png)
...@@ -42,9 +42,9 @@ To use Akismet: ...@@ -42,9 +42,9 @@ To use Akismet:
As a way to better recognize between spam and ham, you can train the Akismet As a way to better recognize between spam and ham, you can train the Akismet
filter whenever there is a false positive or false negative. filter whenever there is a false positive or false negative.
When an entry is recognized as spam, it is rejected and added to the Spam Logs. When an entry is recognized as spam, it is rejected and added to the Spam Logs.
From here you can review if they are really spam. If one of them is not really From here you can review if they are really spam. If one of them is not really
spam, you can use the **Submit as ham** button to tell Akismet that it falsely spam, you can use the **Submit as ham** button to tell Akismet that it falsely
recognized an entry as spam. recognized an entry as spam.
![Screenshot of Spam Logs](img/spam_log.png) ![Screenshot of Spam Logs](img/spam_log.png)
......
...@@ -8,19 +8,13 @@ to confirm that a real user, not a bot, is attempting to create an account. ...@@ -8,19 +8,13 @@ to confirm that a real user, not a bot, is attempting to create an account.
To use reCAPTCHA, first you must create a site and private key. To use reCAPTCHA, first you must create a site and private key.
1. Go to the URL: https://www.google.com/recaptcha/admin 1. Go to the URL: <https://www.google.com/recaptcha/admin>.
1. Fill out the form necessary to obtain reCAPTCHA keys.
2. Fill out the form necessary to obtain reCAPTCHA keys. 1. Login to your GitLab server, with administrator credentials.
1. Go to Applications Settings on Admin Area (`admin/application_settings`).
3. Login to your GitLab server, with administrator credentials. 1. Fill all recaptcha fields with keys from previous steps.
1. Check the `Enable reCAPTCHA` checkbox.
4. Go to Applications Settings on Admin Area (`admin/application_settings`) 1. Save the configuration.
5. Fill all recaptcha fields with keys from previous steps
6. Check the `Enable reCAPTCHA` checkbox
7. Save the configuration.
## Enabling reCAPTCHA for user logins via passwords ## Enabling reCAPTCHA for user logins via passwords
......
...@@ -10,7 +10,7 @@ Rack Attack offers IP whitelisting, blacklisting, Fail2ban style filtering and ...@@ -10,7 +10,7 @@ Rack Attack offers IP whitelisting, blacklisting, Fail2ban style filtering and
tracking. tracking.
**Note:** Starting with 11.2, Rack Attack is disabled by default. To continue **Note:** Starting with 11.2, Rack Attack is disabled by default. To continue
using this feature, please enable it in your `gitlab.rb` by setting using this feature, please enable it in your `gitlab.rb` by setting
`gitlab_rails['rack_attack_git_basic_auth'] = true`. `gitlab_rails['rack_attack_git_basic_auth'] = true`.
By default, user sign-in, user sign-up (if enabled), and user password reset is By default, user sign-in, user sign-up (if enabled), and user password reset is
...@@ -41,7 +41,7 @@ For more information on how to use these options check out ...@@ -41,7 +41,7 @@ For more information on how to use these options check out
} }
``` ```
3. Reconfigure GitLab: 1. Reconfigure GitLab:
``` ```
sudo gitlab-ctl reconfigure sudo gitlab-ctl reconfigure
...@@ -98,26 +98,26 @@ In case you want to remove a blocked IP, follow these steps: ...@@ -98,26 +98,26 @@ In case you want to remove a blocked IP, follow these steps:
grep "Rack_Attack" /var/log/gitlab/gitlab-rails/production.log grep "Rack_Attack" /var/log/gitlab/gitlab-rails/production.log
``` ```
2. Since the blacklist is stored in Redis, you need to open up `redis-cli`: 1. Since the blacklist is stored in Redis, you need to open up `redis-cli`:
```sh ```sh
/opt/gitlab/embedded/bin/redis-cli -s /var/opt/gitlab/redis/redis.socket /opt/gitlab/embedded/bin/redis-cli -s /var/opt/gitlab/redis/redis.socket
``` ```
3. You can remove the block using the following syntax, replacing `<ip>` with 1. You can remove the block using the following syntax, replacing `<ip>` with
the actual IP that is blacklisted: the actual IP that is blacklisted:
``` ```
del cache:gitlab:rack::attack:allow2ban:ban:<ip> del cache:gitlab:rack::attack:allow2ban:ban:<ip>
``` ```
4. Confirm that the key with the IP no longer shows up: 1. Confirm that the key with the IP no longer shows up:
``` ```
keys *rack::attack* keys *rack::attack*
``` ```
5. Optionally, add the IP to the whitelist to prevent it from being blacklisted 1. Optionally, add the IP to the whitelist to prevent it from being blacklisted
again (see [settings](#settings)). again (see [settings](#settings)).
## Troubleshooting ## Troubleshooting
...@@ -129,11 +129,11 @@ the load balancer. In that case, you will need to: ...@@ -129,11 +129,11 @@ the load balancer. In that case, you will need to:
1. [Configure `nginx[real_ip_trusted_addresses]`](https://docs.gitlab.com/omnibus/settings/nginx.html#configuring-gitlab-trusted_proxies-and-the-nginx-real_ip-module). 1. [Configure `nginx[real_ip_trusted_addresses]`](https://docs.gitlab.com/omnibus/settings/nginx.html#configuring-gitlab-trusted_proxies-and-the-nginx-real_ip-module).
This will keep users' IPs from being listed as the load balancer IPs. This will keep users' IPs from being listed as the load balancer IPs.
2. Whitelist the load balancer's IP address(es) in the Rack Attack [settings](#settings). 1. Whitelist the load balancer's IP address(es) in the Rack Attack [settings](#settings).
3. Reconfigure GitLab: 1. Reconfigure GitLab:
``` ```
sudo gitlab-ctl reconfigure sudo gitlab-ctl reconfigure
``` ```
4. [Remove the block via Redis.](#remove-blocked-ips-from-rack-attack-via-redis) 1. [Remove the block via Redis.](#remove-blocked-ips-from-rack-attack-via-redis)
...@@ -13,8 +13,8 @@ You can read more about it here: ...@@ -13,8 +13,8 @@ You can read more about it here:
Users on GitLab, can enable it without any admin's intervention. If you want to Users on GitLab, can enable it without any admin's intervention. If you want to
enforce everyone to set up 2FA, you can choose from two different ways: enforce everyone to set up 2FA, you can choose from two different ways:
1. Enforce on next login - Enforce on next login.
2. Suggest on next login, but allow a grace period before enforcing. - Suggest on next login, but allow a grace period before enforcing.
In the Admin area under **Settings** (`/admin/application_settings`), look for In the Admin area under **Settings** (`/admin/application_settings`), look for
the "Sign-in Restrictions" area, where you can configure both. the "Sign-in Restrictions" area, where you can configure both.
......
...@@ -78,7 +78,7 @@ Workshop Time! ...@@ -78,7 +78,7 @@ Workshop Time!
```bash ```bash
git config --global user.name "Your Name" git config --global user.name "Your Name"
git config --global user.email you@example.com git config --global user.email you@example.com
``` ```
- If you don't use the global flag you can set up a different author for - If you don't use the global flag you can set up a different author for
each project each project
...@@ -107,14 +107,14 @@ cd ~/development ...@@ -107,14 +107,14 @@ cd ~/development
-or- -or-
mkdir ~/workspace mkdir ~/workspace
cd ~/workspace cd ~/workspace
``` ```
--- ---
## Git Basics ## Git Basics
--- ---
### Git Workflow ### Git Workflow
...@@ -136,7 +136,7 @@ cd ~/workspace ...@@ -136,7 +136,7 @@ cd ~/workspace
issue tracking, Merge Requests, and other features. issue tracking, Merge Requests, and other features.
- The hosted version of GitLab is gitlab.com - The hosted version of GitLab is gitlab.com
--- ---
### New Project ### New Project
...@@ -150,12 +150,12 @@ cd ~/workspace ...@@ -150,12 +150,12 @@ cd ~/workspace
### Git and GitLab basics ### Git and GitLab basics
1. Edit `edit_this_file.rb` in `training-examples` 1. Edit `edit_this_file.rb` in `training-examples`
2. See it listed as a changed file (working area) 1. See it listed as a changed file (working area)
3. View the differences 1. View the differences
4. Stage the file 1. Stage the file
5. Commit 1. Commit
6. Push the commit to the remote 1. Push the commit to the remote
7. View the git log 1. View the git log
--- ---
...@@ -169,14 +169,14 @@ git push origin master ...@@ -169,14 +169,14 @@ git push origin master
git log git log
``` ```
--- ---
### Feature Branching ### Feature Branching
1. Create a new feature branch called `squash_some_bugs` 1. Create a new feature branch called `squash_some_bugs`
2. Edit `bugs.rb` and remove all the bugs. 1. Edit `bugs.rb` and remove all the bugs.
3. Commit 1. Commit
4. Push 1. Push
--- ---
...@@ -250,16 +250,17 @@ git push origin squash_some_bugs ...@@ -250,16 +250,17 @@ git push origin squash_some_bugs
--- ---
### Example Plan ### Example Plan
1. Checkout a new branch and edit conflicts.rb. Add 'Line4' and 'Line5'. 1. Checkout a new branch and edit conflicts.rb. Add 'Line4' and 'Line5'.
2. Commit and push 1. Commit and push
3. Checkout master and edit conflicts.rb. Add 'Line6' and 'Line7' below 'Line3'. 1. Checkout master and edit conflicts.rb. Add 'Line6' and 'Line7' below 'Line3'.
4. Commit and push to master 1. Commit and push to master
5. Create a merge request and watch it fail 1. Create a merge request and watch it fail
6. Rebase our new branch with master 1. Rebase our new branch with master
7. Fix conflicts on the conflicts.rb file. 1. Fix conflicts on the conflicts.rb file.
8. Stage the file and continue rebasing 1. Stage the file and continue rebasing
9. Force push the changes 1. Force push the changes
10. Finally continue with the Merge Request 1. Finally continue with the Merge Request
--- ---
...@@ -362,15 +363,15 @@ Don't reset after pushing ...@@ -362,15 +363,15 @@ Don't reset after pushing
### Reset Workflow ### Reset Workflow
1. Edit file again 'edit_this_file.rb' 1. Edit file again 'edit_this_file.rb'
2. Check status 1. Check status
3. Add and commit with wrong message 1. Add and commit with wrong message
4. Check log 1. Check log
5. Amend commit 1. Amend commit
6. Check log 1. Check log
7. Soft reset 1. Soft reset
8. Check log 1. Check log
9. Pull for updates 1. Pull for updates
10. Push changes 1. Push changes
---- ----
...@@ -389,9 +390,9 @@ Don't reset after pushing ...@@ -389,9 +390,9 @@ Don't reset after pushing
### Note ### Note
git revert vs git reset git revert vs git reset
Reset removes the commit while revert removes the changes but leaves the commit Reset removes the commit while revert removes the changes but leaves the commit
Revert is safer considering we can revert a revert Revert is safer considering we can revert a revert
# Changed file # Changed file
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
comments: false comments: false
--- ---
# Bisect # Bisect
---------- ----------
...@@ -17,11 +17,11 @@ comments: false ...@@ -17,11 +17,11 @@ comments: false
## Bisect ## Bisect
1. Start the bisect process 1. Start the bisect process
2. Enter the bad revision (usually latest commit) 1. Enter the bad revision (usually latest commit)
3. Enter a known good revision (commit/branch) 1. Enter a known good revision (commit/branch)
4. Run code to see if bug still exists 1. Run code to see if bug still exists
5. Tell bisect the result 1. Tell bisect the result
6. Repeat the previous 2 items until you find the offending commit 1. Repeat the previous 2 items until you find the offending commit
---------- ----------
......
...@@ -35,11 +35,10 @@ comments: false ...@@ -35,11 +35,10 @@ comments: false
## Instantiate workflow with clone ## Instantiate workflow with clone
1. Create a project in your user namespace 1. Create a project in your user namespace.
- Choose to import from 'Any Repo by URL' and use - Choose to import from 'Any Repo by URL' and use <https://gitlab.com/gitlab-org/training-examples.git>.
https://gitlab.com/gitlab-org/training-examples.git 1. Create a '`Workspace`' directory in your home directory.
2. Create a '`Workspace`' directory in your home directory. 1. Clone the '`training-examples`' project.
3. Clone the '`training-examples`' project
---------- ----------
......
...@@ -46,11 +46,11 @@ Git log lists commit history. It allows searching and filtering. ...@@ -46,11 +46,11 @@ Git log lists commit history. It allows searching and filtering.
## Git Log Workflow ## Git Log Workflow
1. Change to workspace directory 1. Change to workspace directory
2. Clone the multi runner projects 1. Clone the multi runner projects
3. Change to project dir 1. Change to project dir
4. Search by author 1. Search by author
5. Search by date 1. Search by date
6. Combine 1. Combine
---------- ----------
......
...@@ -16,15 +16,15 @@ comments: false ...@@ -16,15 +16,15 @@ comments: false
## Merge conflicts ## Merge conflicts
1. Checkout a new branch and edit `conflicts.rb`. Add 'Line4' and 'Line5'. 1. Checkout a new branch and edit `conflicts.rb`. Add 'Line4' and 'Line5'.
2. Commit and push 1. Commit and push.
3. Checkout master and edit `conflicts.rb`. Add 'Line6' and 'Line7' below 'Line3'. 1. Checkout master and edit `conflicts.rb`. Add 'Line6' and 'Line7' below 'Line3'.
4. Commit and push to master 1. Commit and push to master.
5. Create a merge request and watch it fail 1. Create a merge request and watch it fail.
6. Rebase our new branch with master 1. Rebase our new branch with master.
7. Fix conflicts on the `conflicts.rb` file. 1. Fix conflicts on the `conflicts.rb` file.
8. Stage the file and continue rebasing 1. Stage the file and continue rebasing.
9. Force push the changes 1. Force push the changes.
10. Finally continue with the Merge Request 1. Finally continue with the Merge Request.
---------- ----------
......
...@@ -41,15 +41,15 @@ comments: false ...@@ -41,15 +41,15 @@ comments: false
## Reset Workflow ## Reset Workflow
1. Edit file again 'edit_this_file.rb' 1. Edit file again 'edit_this_file.rb'
2. Check status 1. Check status
3. Add and commit with wrong message 1. Add and commit with wrong message
4. Check log 1. Check log
5. Amend commit 1. Amend commit
6. Check log 1. Check log
7. Soft reset 1. Soft reset
8. Check log 1. Check log
9. Pull for updates 1. Pull for updates
10. Push changes 1. Push changes
---------- ----------
......
...@@ -66,12 +66,12 @@ stashes. ...@@ -66,12 +66,12 @@ stashes.
## Git Stash ## Git Stash
1. Modify a file 1. Modify a file
2. Stage file 1. Stage file
3. Stash it 1. Stash it
4. View our stash list 1. View our stash list
5. Confirm no pending changes through status 1. Confirm no pending changes through status
5. Apply with pop 1. Apply with pop
6. View list to confirm changes 1. View list to confirm changes
---------- ----------
......
...@@ -38,12 +38,12 @@ Starting with GitLab 9.1.0 it's possible to upgrade to a newer major, minor, or ...@@ -38,12 +38,12 @@ Starting with GitLab 9.1.0 it's possible to upgrade to a newer major, minor, or
patch version of GitLab without having to take your GitLab instance offline. patch version of GitLab without having to take your GitLab instance offline.
However, for this to work there are the following requirements: However, for this to work there are the following requirements:
1. You can only upgrade 1 minor release at a time. So from 9.1 to 9.2, not to - You can only upgrade 1 minor release at a time. So from 9.1 to 9.2, not to
9.3. 9.3.
2. You have to use [post-deployment - You have to use [post-deployment
migrations](../development/post_deployment_migrations.md) (included in migrations](../development/post_deployment_migrations.md) (included in
zero downtime update steps below) zero downtime update steps below).
3. You are using PostgreSQL. If you are using MySQL please look at the release - You are using PostgreSQL. If you are using MySQL please look at the release
post to see if downtime is required. post to see if downtime is required.
Most of the time you can safely upgrade from a patch release to the next minor Most of the time you can safely upgrade from a patch release to the next minor
......
...@@ -49,3 +49,19 @@ and the default value is `30 days`. On GitLab.com they ...@@ -49,3 +49,19 @@ and the default value is `30 days`. On GitLab.com they
This setting is set per job and can be overridden in This setting is set per job and can be overridden in
[`.gitlab-ci.yml`](../../../ci/yaml/README.md#artifacts-expire_in). [`.gitlab-ci.yml`](../../../ci/yaml/README.md#artifacts-expire_in).
To disable the expiration, set it to `0`. The default unit is in seconds. To disable the expiration, set it to `0`. The default unit is in seconds.
## Archive jobs **[CORE ONLY]**
Archiving jobs is useful for reducing the CI/CD footprint on the system by
removing some of the capabilities of the jobs (metadata needed to run the job),
but persisting the traces and artifacts for auditing purposes.
To set the duration for which the jobs will be considered as old and expired:
1. Go to **Admin area > Settings > CI/CD > Continuous Integration and Deployment**.
1. Change the value of "Archive jobs".
1. Hit **Save changes** for the changes to take effect.
Once that time passes, the jobs will be archived and no longer able to be
retried. Make it empty to never expire jobs. It has to be no less than 1 day,
for example: <code>15 days</code>, <code>1 month</code>, <code>2 years</code>.
...@@ -158,7 +158,7 @@ authentication. If an SSH key is added to your GitLab account, you can generate ...@@ -158,7 +158,7 @@ authentication. If an SSH key is added to your GitLab account, you can generate
a new set of recovery codes with SSH. a new set of recovery codes with SSH.
1. Run `ssh git@gitlab.example.com 2fa_recovery_codes`. 1. Run `ssh git@gitlab.example.com 2fa_recovery_codes`.
2. You are prompted to confirm that you want to generate new codes. Continuing this process invalidates previously saved codes. 1. You are prompted to confirm that you want to generate new codes. Continuing this process invalidates previously saved codes.
``` ```
bash bash
$ ssh git@gitlab.example.com 2fa_recovery_codes $ ssh git@gitlab.example.com 2fa_recovery_codes
...@@ -185,7 +185,7 @@ a new set of recovery codes with SSH. ...@@ -185,7 +185,7 @@ a new set of recovery codes with SSH.
so you do not lose access to your account again. so you do not lose access to your account again.
``` ```
3. Go to the GitLab sign-in page and enter your username/email and password. 1. Go to the GitLab sign-in page and enter your username/email and password.
When prompted for a two-factor code, enter one of the recovery codes obtained When prompted for a two-factor code, enter one of the recovery codes obtained
from the command-line output. from the command-line output.
......
...@@ -97,13 +97,13 @@ You and GitLab admins can see your the abovementioned information on your profil ...@@ -97,13 +97,13 @@ You and GitLab admins can see your the abovementioned information on your profil
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ce/issues/14078) in GitLab 11.3. > [Introduced](https://gitlab.com/gitlab-org/gitlab-ce/issues/14078) in GitLab 11.3.
Enabling private contributions will include contributions to private projects, in the user contribution calendar graph and user recent activity. Enabling private contributions will include contributions to private projects, in the user contribution calendar graph and user recent activity.
To enable private contributions: To enable private contributions:
1. Navigate to your personal [profile settings](#profile-settings). 1. Navigate to your personal [profile settings](#profile-settings).
2. Check the "Private contributions" option. 1. Check the "Private contributions" option.
3. Hit **Update profile settings**. 1. Hit **Update profile settings**.
## Current status ## Current status
......
---
author: Joshua Lambert
author_gitlab: joshlambert
level: intermediate
article_type: tutorial
date: 2018-06-05
---
# Connecting and deploying to an Amazon EKS cluster # Connecting and deploying to an Amazon EKS cluster
## Introduction ## Introduction
In this tutorial, we will show how easy it is to integrate an [Amazon EKS](https://aws.amazon.com/eks/) cluster with GitLab, and begin deploying applications. In this tutorial, we will show how to integrate an [Amazon EKS](https://aws.amazon.com/eks/) cluster with GitLab, and begin deploying applications.
For an end-to-end walkthrough we will: For an end-to-end walkthrough we will:
...@@ -21,7 +13,7 @@ For an end-to-end walkthrough we will: ...@@ -21,7 +13,7 @@ For an end-to-end walkthrough we will:
You will need: You will need:
1. An account on GitLab, like [GitLab.com](https://gitlab.com) 1. An account on GitLab, like [GitLab.com](https://gitlab.com)
1. An Amazon EKS cluster 1. An Amazon EKS cluster (with worker nodes properly configured)
1. `kubectl` [installed and configured for access to the EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#get-started-kubectl) 1. `kubectl` [installed and configured for access to the EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#get-started-kubectl)
If you don't have an Amazon EKS cluster, one can be created by following [the EKS getting started guide](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html). If you don't have an Amazon EKS cluster, one can be created by following [the EKS getting started guide](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html).
...@@ -38,26 +30,103 @@ Give the project a name, and then select `Create project`. ...@@ -38,26 +30,103 @@ Give the project a name, and then select `Create project`.
![Create Project](img/create_project.png) ![Create Project](img/create_project.png)
## Connecting the EKS cluster ## Configuring and connecting the EKS cluster
From the left side bar, hover over `Operations` and select `Kubernetes`, then click on `Add Kubernetes cluster`, and finally `Add an existing Kubernetes cluster`. From the left side bar, hover over `Operations` and select `Kubernetes`, then click on `Add Kubernetes cluster`, and finally `Add an existing Kubernetes cluster`.
A few details from the EKS cluster will be required to connect it to GitLab. A few details from the EKS cluster will be required to connect it to GitLab.
1. A valid Kubernetes certificate and token are needed to authenticate to the EKS cluster. A pair is created by default, which can be used. Open a shell and use `kubectl` to retrieve them: 1. **Retrieve the certificate**: A valid Kubernetes certificate is needed to authenticate to the EKS cluster. We will use the certificate created by default. Open a shell and use `kubectl` to retrieve it:
* List the secrets with `kubectl get secrets`, and one should named similar to `default-token-xxxxx`. Copy that token name for use below. - List the secrets with `kubectl get secrets`, and one should named similar to `default-token-xxxxx`. Copy that token name for use below.
* Get the certificate with `kubectl get secret <secret name> -o jsonpath="{['data']['ca\.crt']}" | base64 -D` - Get the certificate with `kubectl get secret <secret name> -o jsonpath="{['data']['ca\.crt']}" | base64 -D`
* Retrieve the token with `kubectl get secret <secret name> -o jsonpath="{['data']['token']}" | base64 -D`.
1. **Create admin token**: A `cluster-admin` token is required to install and manage Helm Tiller. GitLab establishes mutual SSL auth with Helm Tiller and creates limited service accounts for each application. To create the token we will create an admin service account as follows:
1. Create a file called `eks-admin-service-account.yaml` with the text below:
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: eks-admin
namespace: kube-system
```
2. Apply the service account to your cluster:
```bash
kubectl apply -f eks-admin-service-account.yaml
```
Output:
```bash
serviceaccount "eks-admin" created
```
3. Create a file called `eks-admin-cluster-role-binding.yaml` with the text below:
```yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: eks-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: eks-admin
namespace: kube-system
```
4. Apply the cluster role binding to your cluster:
```bash
kubectl apply -f eks-admin-cluster-role-binding.yaml
```
Output:
```bash
clusterrolebinding "eks-admin" created
```
5. Retrieve the token for the `eks-admin` service account. Copy the `<authentication_token>` value from the output.
```bash
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}')
```
Output:
```yaml
Name: eks-admin-token-b5zv4
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name=eks-admin
kubernetes.io/service-account.uid=bcfe66ac-39be-11e8-97e8-026dce96b6e8
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1025 bytes
namespace: 11 bytes
token: <authentication_token>
```
1. The API server endpoint is also required, so GitLab can connect to the cluster. This is displayed on the AWS EKS console, when viewing the EKS cluster details. 1. The API server endpoint is also required, so GitLab can connect to the cluster. This is displayed on the AWS EKS console, when viewing the EKS cluster details.
You now have all the information needed to connect the EKS cluster: You now have all the information needed to connect the EKS cluster:
* Kubernetes cluster name: Provide a name for the cluster to identify it within GitLab. - Kubernetes cluster name: Provide a name for the cluster to identify it within GitLab.
* Environment scope: Leave this as `*` for now, since we are only connecting a single cluster. - Environment scope: Leave this as `*` for now, since we are only connecting a single cluster.
* API URL: Paste in the API server endpoint retrieved above. - API URL: Paste in the API server endpoint retrieved above.
* CA Certificate: Paste the certificate data from the earlier step, as-is. - CA Certificate: Paste the certificate data from the earlier step, as-is.
* Paste the token value. - Paste the admin token value.
* Project namespace: This can be left blank to accept the default namespace, based on the project name. - Project namespace: This can be left blank to accept the default namespace, based on the project name.
![Add Cluster](img/add_cluster.png) ![Add Cluster](img/add_cluster.png)
...@@ -65,9 +134,11 @@ Click on `Add Kubernetes cluster`, the cluster is now connected to GitLab. At th ...@@ -65,9 +134,11 @@ Click on `Add Kubernetes cluster`, the cluster is now connected to GitLab. At th
If you would like to utilize your own CI/CD scripts to deploy to the cluster, you can stop here. If you would like to utilize your own CI/CD scripts to deploy to the cluster, you can stop here.
## Disable Role-Based Access Control (RBAC) ## Disable Role-Based Access Control (RBAC) - Optional
When connecting a cluster via GitLab integration, you may specify whether the cluster is RBAC-enabled or not. This will affect how GitLab interacts with the cluster for certain operations. If you **did not** check the "RBAC-enabled cluster" checkbox at creation time, GitLab will assume RBAC is disabled for your cluster when interacting with it. If so, you must disable RBAC on your cluster for the integration to work properly.
Presently, Auto DevOps and one-click app installs do not support [Kubernetes role-based access control](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). Support is [being worked on](https://gitlab.com/groups/gitlab-org/-/epics/136), but in the interim RBAC must be disabled to utilize for these features. ![rbac](img/rbac.png)
> **Note**: Disabling RBAC means that any application running in the cluster, or user who can authenticate to the cluster, has full API access. This is a [security concern](https://docs.gitlab.com/ee/user/project/clusters/#security-implications), and may not be desirable. > **Note**: Disabling RBAC means that any application running in the cluster, or user who can authenticate to the cluster, has full API access. This is a [security concern](https://docs.gitlab.com/ee/user/project/clusters/#security-implications), and may not be desirable.
......
...@@ -139,12 +139,12 @@ docker login registry.example.com -u <username> -p <token> ...@@ -139,12 +139,12 @@ docker login registry.example.com -u <username> -p <token>
1. Check to make sure that the system clock on your Docker client and GitLab server have 1. Check to make sure that the system clock on your Docker client and GitLab server have
been synchronized (e.g. via NTP). been synchronized (e.g. via NTP).
2. If you are using an S3-backed Registry, double check that the IAM 1. If you are using an S3-backed Registry, double check that the IAM
permissions and the S3 credentials (including region) are correct. See [the permissions and the S3 credentials (including region) are correct. See [the
sample IAM policy](https://docs.docker.com/registry/storage-drivers/s3/) sample IAM policy](https://docs.docker.com/registry/storage-drivers/s3/)
for more details. for more details.
3. Check the Registry logs (e.g. `/var/log/gitlab/registry/current`) and the GitLab production logs 1. Check the Registry logs (e.g. `/var/log/gitlab/registry/current`) and the GitLab production logs
for errors (e.g. `/var/log/gitlab/gitlab-rails/production.log`). You may be able to find clues for errors (e.g. `/var/log/gitlab/gitlab-rails/production.log`). You may be able to find clues
there. there.
......
...@@ -13,8 +13,8 @@ You can create as many deploy tokens as you like from the settings of your proje ...@@ -13,8 +13,8 @@ You can create as many deploy tokens as you like from the settings of your proje
1. Log in to your GitLab account. 1. Log in to your GitLab account.
1. Go to the project you want to create Deploy Tokens for. 1. Go to the project you want to create Deploy Tokens for.
1. Go to **Settings** > **Repository** 1. Go to **Settings** > **Repository**.
1. Click on "Expand" on **Deploy Tokens** section 1. Click on "Expand" on **Deploy Tokens** section.
1. Choose a name and optionally an expiry date for the token. 1. Choose a name and optionally an expiry date for the token.
1. Choose the [desired scopes](#limiting-scopes-of-a-deploy-token). 1. Choose the [desired scopes](#limiting-scopes-of-a-deploy-token).
1. Click on **Create deploy token**. 1. Click on **Create deploy token**.
...@@ -46,8 +46,8 @@ the following table. ...@@ -46,8 +46,8 @@ the following table.
To download a repository using a Deploy Token, you just need to: To download a repository using a Deploy Token, you just need to:
1. Create a Deploy Token with `read_repository` as a scope. 1. Create a Deploy Token with `read_repository` as a scope.
2. Take note of your `username` and `token` 1. Take note of your `username` and `token`.
3. `git clone` the project using the Deploy Token: 1. `git clone` the project using the Deploy Token:
```sh ```sh
git clone http://<username>:<deploy_token>@gitlab.example.com/tanuki/awesome_project.git git clone http://<username>:<deploy_token>@gitlab.example.com/tanuki/awesome_project.git
...@@ -60,8 +60,8 @@ Replace `<username>` and `<deploy_token>` with the proper values. ...@@ -60,8 +60,8 @@ Replace `<username>` and `<deploy_token>` with the proper values.
To read the container registry images, you'll need to: To read the container registry images, you'll need to:
1. Create a Deploy Token with `read_registry` as a scope. 1. Create a Deploy Token with `read_registry` as a scope.
2. Take note of your `username` and `token` 1. Take note of your `username` and `token`.
3. Log in to GitLab’s Container Registry using the deploy token: 1. Log in to GitLab’s Container Registry using the deploy token:
```sh ```sh
docker login registry.example.com -u <username> -p <deploy_token> docker login registry.example.com -u <username> -p <deploy_token>
......
...@@ -65,9 +65,9 @@ developer documentation. ...@@ -65,9 +65,9 @@ developer documentation.
Before you begin, ensure that any GitHub users who you want to map to GitLab users have either: Before you begin, ensure that any GitHub users who you want to map to GitLab users have either:
1. A GitLab account that has logged in using the GitHub icon - A GitLab account that has logged in using the GitHub icon
\- or - \- or -
2. A GitLab account with an email address that matches the [public email address](https://help.github.com/articles/setting-your-commit-email-address-on-github/) of the GitHub user - A GitLab account with an email address that matches the [public email address](https://help.github.com/articles/setting-your-commit-email-address-on-github/) of the GitHub user
User-matching attempts occur in that order, and if a user is not identified either way, the activity is associated with User-matching attempts occur in that order, and if a user is not identified either way, the activity is associated with
the user account that is performing the import. the user account that is performing the import.
...@@ -77,10 +77,10 @@ If you are using a self-hosted GitLab instance, this process requires that you h ...@@ -77,10 +77,10 @@ If you are using a self-hosted GitLab instance, this process requires that you h
[GitHub integration][gh-import]. [GitHub integration][gh-import].
1. From the top navigation bar, click **+** and select **New project**. 1. From the top navigation bar, click **+** and select **New project**.
2. Select the **Import project** tab and then select **GitHub**. 1. Select the **Import project** tab and then select **GitHub**.
3. Select the first button to **List your GitHub repositories**. You are redirected to a page on github.com to authorize the GitLab application. 1. Select the first button to **List your GitHub repositories**. You are redirected to a page on github.com to authorize the GitLab application.
4. Click **Authorize gitlabhq**. You are redirected back to GitLab's Import page and all of your GitHub repositories are listed. 1. Click **Authorize gitlabhq**. You are redirected back to GitLab's Import page and all of your GitHub repositories are listed.
5. Continue on to [selecting which repositories to import](#selecting-which-repositories-to-import). 1. Continue on to [selecting which repositories to import](#selecting-which-repositories-to-import).
### Using a GitHub token ### Using a GitHub token
...@@ -92,12 +92,12 @@ integration enabled, that should be the preferred method to import your reposito ...@@ -92,12 +92,12 @@ integration enabled, that should be the preferred method to import your reposito
If you are not using the GitHub integration, you can still perform an authorization with GitHub to grant GitLab access your repositories: If you are not using the GitHub integration, you can still perform an authorization with GitHub to grant GitLab access your repositories:
1. Go to https://github.com/settings/tokens/new 1. Go to https://github.com/settings/tokens/new
2. Enter a token description. 1. Enter a token description.
3. Select the repo scope. 1. Select the repo scope.
4. Click **Generate token**. 1. Click **Generate token**.
5. Copy the token hash. 1. Copy the token hash.
6. Go back to GitLab and provide the token to the GitHub importer. 1. Go back to GitLab and provide the token to the GitHub importer.
7. Hit the **List Your GitHub Repositories** button and wait while GitLab reads your repositories' information. 1. Hit the **List Your GitHub Repositories** button and wait while GitLab reads your repositories' information.
Once done, you'll be taken to the importer page to select the repositories to import. Once done, you'll be taken to the importer page to select the repositories to import.
### Selecting which repositories to import ### Selecting which repositories to import
...@@ -107,10 +107,10 @@ your GitHub repositories are listed. ...@@ -107,10 +107,10 @@ your GitHub repositories are listed.
1. By default, the proposed repository namespaces match the names as they exist in GitHub, but based on your permissions, 1. By default, the proposed repository namespaces match the names as they exist in GitHub, but based on your permissions,
you can choose to edit these names before you proceed to import any of them. you can choose to edit these names before you proceed to import any of them.
2. Select the **Import** button next to any number of repositories, or select **Import all repositories**. 1. Select the **Import** button next to any number of repositories, or select **Import all repositories**.
3. The **Status** column shows the import status of each repository. You can choose to leave the page open and it will 1. The **Status** column shows the import status of each repository. You can choose to leave the page open and it will
update in realtime or you can return to it later. update in realtime or you can return to it later.
4. Once a repository has been imported, click its GitLab path to open its GitLab URL. 1. Once a repository has been imported, click its GitLab path to open its GitLab URL.
## Mirroring and pipeline status sharing ## Mirroring and pipeline status sharing
......
...@@ -16,8 +16,9 @@ Once you have configured and enabled Bugzilla you'll see the Bugzilla link on th ...@@ -16,8 +16,9 @@ Once you have configured and enabled Bugzilla you'll see the Bugzilla link on th
## Referencing issues in Bugzilla ## Referencing issues in Bugzilla
Issues in Bugzilla can be referenced in two alternative ways: Issues in Bugzilla can be referenced in two alternative ways:
1. `#<ID>` where `<ID>` is a number (example `#143`).
2. `<PROJECT>-<ID>` where `<PROJECT>` starts with a capital letter which is - `#<ID>` where `<ID>` is a number (example `#143`).
- `<PROJECT>-<ID>` where `<PROJECT>` starts with a capital letter which is
then followed by capital letters, numbers or underscores, and `<ID>` is then followed by capital letters, numbers or underscores, and `<ID>` is
a number (example `API_32-143`). a number (example `API_32-143`).
......
...@@ -4,16 +4,15 @@ An API token is needed when integrating with JIRA Cloud, follow the steps ...@@ -4,16 +4,15 @@ An API token is needed when integrating with JIRA Cloud, follow the steps
below to create one: below to create one:
1. Log in to https://id.atlassian.com with your email. 1. Log in to https://id.atlassian.com with your email.
2. **Click API tokens**, then **Create API token**. 1. **Click API tokens**, then **Create API token**.
![JIRA API token](img/jira_api_token_menu.png) ![JIRA API token](img/jira_api_token_menu.png)
![JIRA API token](img/jira_api_token.png) ![JIRA API token](img/jira_api_token.png)
3. Make sure to write down your new API token as you will need it in the next [steps](jira.md#configuring-gitlab). 1. Make sure to write down your new API token as you will need it in the next [steps](jira.md#configuring-gitlab).
NOTE: **Note** NOTE: **Note**
It is important that the user associated with this email has 'write' access to projects in JIRA. It is important that the user associated with this email has 'write' access to projects in JIRA.
The JIRA configuration is complete. You are going to need this new created token and the email you used to log in when [configuring GitLab in the next section](jira.md#configuring-gitlab). The JIRA configuration is complete. You are going to need this new created token and the email you used to log in when [configuring GitLab in the next section](jira.md#configuring-gitlab).
...@@ -17,17 +17,17 @@ We have split this stage in steps so it is easier to follow. ...@@ -17,17 +17,17 @@ We have split this stage in steps so it is easier to follow.
![Jira user management link](img/jira_user_management_link.png) ![Jira user management link](img/jira_user_management_link.png)
2. The next step is to create a new user (e.g., `gitlab`) who has write access 1. The next step is to create a new user (e.g., `gitlab`) who has write access
to projects in Jira. Enter the user's name and a _valid_ e-mail address to projects in Jira. Enter the user's name and a _valid_ e-mail address
since Jira sends a verification e-mail to set up the password. since Jira sends a verification e-mail to set up the password.
_**Note:** Jira creates the username automatically by using the e-mail _**Note:** Jira creates the username automatically by using the e-mail
prefix. You can change it later, if needed. Our integration does not support SSO (such as SAML). You will need to create prefix. You can change it later, if needed. Our integration does not support SSO (such as SAML). You will need to create
an HTTP basic authentication password. You can do this by visiting the user an HTTP basic authentication password. You can do this by visiting the user
profile, looking up the username, and setting a password._ profile, looking up the username, and setting a password._
![Jira create new user](img/jira_create_new_user.png) ![Jira create new user](img/jira_create_new_user.png)
3. Create a `gitlab-developers` group which will have write access 1. Create a `gitlab-developers` group which will have write access
to projects in Jira. Go to the **Groups** tab and select **Create group**. to projects in Jira. Go to the **Groups** tab and select **Create group**.
![Jira create new user](img/jira_create_new_group.png) ![Jira create new user](img/jira_create_new_group.png)
...@@ -36,13 +36,13 @@ We have split this stage in steps so it is easier to follow. ...@@ -36,13 +36,13 @@ We have split this stage in steps so it is easier to follow.
![Jira create new group](img/jira_create_new_group_name.png) ![Jira create new group](img/jira_create_new_group_name.png)
4. To give the newly-created group 'write' access, go to 1. To give the newly-created group 'write' access, go to
**Application access > View configuration** and add the `gitlab-developers` **Application access > View configuration** and add the `gitlab-developers`
group to Jira Core. group to Jira Core.
![Jira group access](img/jira_group_access.png) ![Jira group access](img/jira_group_access.png)
5. Add the `gitlab` user to the `gitlab-developers` group by going to 1. Add the `gitlab` user to the `gitlab-developers` group by going to
**Users > GitLab user > Add group** and selecting the `gitlab-developers` **Users > GitLab user > Add group** and selecting the `gitlab-developers`
group from the dropdown menu. Notice that the group says _Access_, which is group from the dropdown menu. Notice that the group says _Access_, which is
intended as part of this process. intended as part of this process.
......
...@@ -18,15 +18,16 @@ in the table below. ...@@ -18,15 +18,16 @@ in the table below.
![Redmine configuration](img/redmine_configuration.png) ![Redmine configuration](img/redmine_configuration.png)
2. To disable the internal issue tracking system in a project, navigate to the General page, expand [Permissions](../settings/index.md#sharing-and-permissions), and slide the Issues switch invalid. 1. To disable the internal issue tracking system in a project, navigate to the General page, expand [Permissions](../settings/index.md#sharing-and-permissions), and slide the Issues switch invalid.
![Issue configuration](img/issue_configuration.png) ![Issue configuration](img/issue_configuration.png)
## Referencing issues in Redmine ## Referencing issues in Redmine
Issues in Redmine can be referenced in two alternative ways: Issues in Redmine can be referenced in two alternative ways:
1. `#<ID>` where `<ID>` is a number (example `#143`)
2. `<PROJECT>-<ID>` where `<PROJECT>` starts with a capital letter which is - `#<ID>` where `<ID>` is a number (example `#143`).
- `<PROJECT>-<ID>` where `<PROJECT>` starts with a capital letter which is
then followed by capital letters, numbers or underscores, and `<ID>` is then followed by capital letters, numbers or underscores, and `<ID>` is
a number (example `API_32-143`). a number (example `API_32-143`).
......
...@@ -338,10 +338,10 @@ payload will also include information about the target of the comment. For examp ...@@ -338,10 +338,10 @@ payload will also include information about the target of the comment. For examp
a comment on an issue will include the specific issue information under the `issue` key. a comment on an issue will include the specific issue information under the `issue` key.
Valid target types: Valid target types:
1. `commit` - `commit`
2. `merge_request` - `merge_request`
3. `issue` - `issue`
4. `snippet` - `snippet`
#### Comment on commit #### Comment on commit
......
...@@ -27,10 +27,11 @@ used: ...@@ -27,10 +27,11 @@ used:
Note that `%{issue_ref}` is a complex regular expression defined inside GitLab's Note that `%{issue_ref}` is a complex regular expression defined inside GitLab's
source code that can match references to: source code that can match references to:
1. a local issue (`#123`),
2. a cross-project issue (`group/project#123`) - A local issue (`#123`).
3. a link to an issue - A cross-project issue (`group/project#123`).
(`https://gitlab.example.com/group/project/issues/123`). - A link to an issue
(`https://gitlab.example.com/group/project/issues/123`).
--- ---
......
...@@ -60,7 +60,7 @@ Let's consider the following scenario: ...@@ -60,7 +60,7 @@ Let's consider the following scenario:
hosted in private repositories and you have multiple CI jobs that make use hosted in private repositories and you have multiple CI jobs that make use
of these repositories. of these repositories.
2. You invite a new [external user][ext]. CI jobs created by that user do not 1. You invite a new [external user][ext]. CI jobs created by that user do not
have access to internal repositories, because the user also doesn't have the have access to internal repositories, because the user also doesn't have the
access from within GitLab. You as an employee have to grant explicit access access from within GitLab. You as an employee have to grant explicit access
for this user. This allows us to prevent from accidental data leakage. for this user. This allows us to prevent from accidental data leakage.
......
...@@ -89,22 +89,14 @@ module Gitlab ...@@ -89,22 +89,14 @@ module Gitlab
end end
def service_account_exists?(resource) def service_account_exists?(resource)
resource_exists? do kubeclient.get_service_account(resource.metadata.name, resource.metadata.namespace)
kubeclient.get_service_account(resource.metadata.name, resource.metadata.namespace) rescue ::Kubeclient::ResourceNotFoundError
end false
end end
def cluster_role_binding_exists?(resource) def cluster_role_binding_exists?(resource)
resource_exists? do kubeclient.get_cluster_role_binding(resource.metadata.name)
kubeclient.get_cluster_role_binding(resource.metadata.name) rescue ::Kubeclient::ResourceNotFoundError
end
end
def resource_exists?
yield
rescue ::Kubeclient::HttpError => e
raise e unless e.error_code == 404
false false
end end
end end
......
...@@ -10,9 +10,7 @@ module Gitlab ...@@ -10,9 +10,7 @@ module Gitlab
def exists? def exists?
@client.get_namespace(name) @client.get_namespace(name)
rescue ::Kubeclient::HttpError => ke rescue ::Kubeclient::ResourceNotFoundError
raise ke unless ke.error_code == 404
false false
end end
......
...@@ -55,6 +55,10 @@ module QA ...@@ -55,6 +55,10 @@ module QA
element :labels_block element :labels_block
end end
view 'app/views/projects/merge_requests/_mr_title.html.haml' do
element :edit_button
end
def fast_forward_possible? def fast_forward_possible?
!has_text?('Fast-forward merge is not possible') !has_text?('Fast-forward merge is not possible')
end end
...@@ -163,6 +167,10 @@ module QA ...@@ -163,6 +167,10 @@ module QA
all_elements(:discussion_reply).last.click all_elements(:discussion_reply).last.click
fill_element :reply_input, reply_text fill_element :reply_input, reply_text
end end
def edit!
click_element :edit_button
end
end end
end end
end end
......
...@@ -11,7 +11,9 @@ module QA ...@@ -11,7 +11,9 @@ module QA
:target_branch, :target_branch,
:assignee, :assignee,
:milestone, :milestone,
:labels :labels,
:file_name,
:file_content
attribute :project do attribute :project do
Project.fabricate! do |resource| Project.fabricate! do |resource|
...@@ -35,8 +37,8 @@ module QA ...@@ -35,8 +37,8 @@ module QA
resource.branch_name = target_branch resource.branch_name = target_branch
resource.remote_branch = source_branch resource.remote_branch = source_branch
resource.new_branch = false resource.new_branch = false
resource.file_name = "added_file.txt" resource.file_name = file_name
resource.file_content = "File Added" resource.file_content = file_content
end end
end end
...@@ -48,6 +50,8 @@ module QA ...@@ -48,6 +50,8 @@ module QA
@assignee = nil @assignee = nil
@milestone = nil @milestone = nil
@labels = [] @labels = []
@file_name = "added_file.txt"
@file_content = "File Added"
end end
def fabricate! def fabricate!
......
...@@ -179,21 +179,35 @@ function delete() { ...@@ -179,21 +179,35 @@ function delete() {
track="${1-stable}" track="${1-stable}"
name="$CI_ENVIRONMENT_SLUG" name="$CI_ENVIRONMENT_SLUG"
if [ -z "$CI_ENVIRONMENT_SLUG" ]; then
echo "No release given, aborting the delete!"
return
fi
if [[ "$track" != "stable" ]]; then if [[ "$track" != "stable" ]]; then
name="$name-$track" name="$name-$track"
fi fi
if ! deployExists "${KUBE_NAMESPACE}" "${name}"; then
echo "The release $name doesn't exist, aborting the cleanup!"
return
fi
echo "Deleting release '$name'..." echo "Deleting release '$name'..."
helm delete --purge "$name" || true helm delete --purge "$name" || true
} }
function cleanup() { function cleanup() {
echo "Cleaning up $CI_ENVIRONMENT_SLUG..." if [ -z "$CI_ENVIRONMENT_SLUG" ]; then
kubectl -n "$KUBE_NAMESPACE" get ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa 2>&1 \ echo "No release given, aborting the delete!"
| grep "$CI_ENVIRONMENT_SLUG" \ return
| awk '{print $1}' \ fi
| xargs kubectl -n "$KUBE_NAMESPACE" delete \
|| true echo "Cleaning up '$CI_ENVIRONMENT_SLUG'..."
kubectl -n "$KUBE_NAMESPACE" delete \
ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa \
-l release="$CI_ENVIRONMENT_SLUG" \
|| true
} }
function install_external_dns() { function install_external_dns() {
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment