Commit 98f5c00e authored by Grzegorz Bizon's avatar Grzegorz Bizon

Merge branch 'master' into backstage/gb/build-stage-id-ref-bg-migration-cleanup

* master: (59 commits)
  Resolve "Clarify k8s service keys"
  Add Portuguese Brazil translations of Commits Page & Pipeline Charts
  Add Japanese Translation to i18n
  Update Prometheus gem to version that explicitly calls `munmap`
  Simplify width for dropdown-menu on mobile
  Update CHANGELOG.md for 9.3.7
  Remove developer documentation about not describing symbols
  Incorporate Gitaly's Commits#between RPC
  Adapt to new Gitaly commit message format
  Remove transitions on nav link hover
  Provide option to trigger build only for official CE and EE repos in .com
  Fix queries duration sorting in Performance Bar
  Rename Project nav items
  Add structured logging for Rails processes
  Disable Rails logging in CI test environments
  Fix download artifacts button alignment
  Update avatar border to be opaque for better stacking
  Fixed typos
  Fix typos
  Fix external issue trackers redirect
  ...

Conflicts:
	db/schema.rb
parents a6d1e92d 2ddcb1f1
......@@ -160,6 +160,9 @@ build-package:
when: manual
script:
- scripts/trigger-build
only:
- //@gitlab-org/gitlab-ce
- //@gitlab-org/gitlab-ee
# Prepare and merge knapsack tests
knapsack:
......@@ -180,6 +183,7 @@ update-knapsack:
<<: *only-canonical-masters
stage: post-test
script:
- retry gem install fog-aws mime-types
- scripts/merge-reports ${KNAPSACK_RSPEC_SUITE_REPORT_PATH} knapsack/${CI_PROJECT_NAME}/rspec-pg_node_*.json
- scripts/merge-reports ${KNAPSACK_SPINACH_SUITE_REPORT_PATH} knapsack/${CI_PROJECT_NAME}/spinach-pg_node_*.json
- '[[ -z ${KNAPSACK_S3_BUCKET} ]] || scripts/sync-reports put $KNAPSACK_S3_BUCKET $KNAPSACK_RSPEC_SUITE_REPORT_PATH $KNAPSACK_SPINACH_SUITE_REPORT_PATH'
......
......@@ -2,6 +2,11 @@
documentation](doc/development/changelog.md) for instructions on adding your own
entry.
## 9.3.7 (2017-07-18)
- Prevent bad data being added to application settings when Redis is unavailable. !12750
- Return `is_admin` attribute in the GET /user endpoint for admins. !12811
## 9.3.6 (2017-07-12)
- Fix API Scoping. !12300
......
......@@ -268,7 +268,7 @@ gem 'peek', '~> 1.0.1'
gem 'peek-gc', '~> 0.0.2'
gem 'peek-host', '~> 1.0.0'
gem 'peek-mysql2', '~> 1.1.0', group: :mysql
gem 'peek-performance_bar', '~> 1.2.1'
gem 'peek-performance_bar', '~> 1.3.0'
gem 'peek-pg', '~> 1.3.0', group: :postgres
gem 'peek-rblineprof', '~> 0.2.0'
gem 'peek-redis', '~> 1.2.0'
......@@ -281,7 +281,7 @@ group :metrics do
gem 'influxdb', '~> 0.2', require: false
# Prometheus
gem 'prometheus-client-mmap', '~>0.7.0.beta5'
gem 'prometheus-client-mmap', '~>0.7.0.beta9'
gem 'raindrops', '~> 0.18'
end
......@@ -390,3 +390,6 @@ gem 'toml-rb', '~> 0.3.15', require: false
# Feature toggles
gem 'flipper', '~> 0.10.2'
gem 'flipper-active_record', '~> 0.10.2'
# Structured logging
gem 'lograge', '~> 0.5'
......@@ -443,6 +443,10 @@ GEM
logging (2.2.2)
little-plugger (~> 1.1)
multi_json (~> 1.10)
lograge (0.5.1)
actionpack (>= 4, < 5.2)
activesupport (>= 4, < 5.2)
railties (>= 4, < 5.2)
loofah (2.0.3)
nokogiri (>= 1.5.9)
mail (2.6.5)
......@@ -553,7 +557,7 @@ GEM
atomic (>= 1.0.0)
mysql2
peek
peek-performance_bar (1.2.1)
peek-performance_bar (1.3.0)
peek (>= 0.1.0)
peek-pg (1.3.0)
concurrent-ruby
......@@ -588,7 +592,7 @@ GEM
premailer-rails (1.9.7)
actionmailer (>= 3, < 6)
premailer (~> 1.7, >= 1.7.9)
prometheus-client-mmap (0.7.0.beta8)
prometheus-client-mmap (0.7.0.beta9)
mmap2 (~> 2.2, >= 2.2.7)
pry (0.10.4)
coderay (~> 1.1.0)
......@@ -998,6 +1002,7 @@ DEPENDENCIES
letter_opener_web (~> 1.3.0)
license_finder (~> 2.1.0)
licensee (~> 8.7.0)
lograge (~> 0.5)
loofah (~> 2.0.3)
mail_room (~> 0.9.1)
method_source (~> 0.8)
......@@ -1029,7 +1034,7 @@ DEPENDENCIES
peek-gc (~> 0.0.2)
peek-host (~> 1.0.0)
peek-mysql2 (~> 1.1.0)
peek-performance_bar (~> 1.2.1)
peek-performance_bar (~> 1.3.0)
peek-pg (~> 1.3.0)
peek-rblineprof (~> 0.2.0)
peek-redis (~> 1.2.0)
......@@ -1037,7 +1042,7 @@ DEPENDENCIES
pg (~> 0.18.2)
poltergeist (~> 1.9.0)
premailer-rails (~> 1.9.7)
prometheus-client-mmap (~> 0.7.0.beta5)
prometheus-client-mmap (~> 0.7.0.beta9)
pry-byebug (~> 3.4.1)
pry-rails (~> 0.3.4)
rack-attack (~> 4.4.1)
......
......@@ -3,6 +3,7 @@
*/
export default {
ABORTED: 0,
NO_CONTENT: 204,
OK: 200,
};
......@@ -81,6 +81,9 @@ export default class Poll {
})
.catch((error) => {
notificationCallback(false);
if (error.status === httpStatusCodes.ABORTED) {
return;
}
errorCallback(error);
});
}
......
......@@ -60,7 +60,7 @@
}
&:not([href]):hover {
border-color: rgba($avatar-border, .2);
border-color: darken($avatar-border, 10%);
}
}
......@@ -99,7 +99,7 @@
.avatar-counter {
background-color: $gray-darkest;
color: $white-light;
border: 1px solid $border-color;
border: 1px solid $avatar-border;
border-radius: 1em;
font-family: $regular_font;
font-size: 9px;
......
......@@ -205,6 +205,7 @@
@media (max-width: $screen-sm-min) {
width: 100%;
min-width: 180px;
}
&.dropdown-open-left {
......@@ -288,12 +289,6 @@
padding: 5px 8px;
color: $gl-text-color-secondary;
}
.badge {
position: absolute;
right: 8px;
top: 5px;
}
}
.droplab-dropdown {
......@@ -466,10 +461,6 @@
left: auto;
right: 0;
margin-top: -5px;
@media (max-width: $screen-xs-max) {
left: 0;
}
}
.dropdown-menu-selectable {
......
......@@ -236,6 +236,8 @@ ul.content-list {
ul.controls {
float: right;
list-style: none;
display: flex;
align-items: center;
.btn {
padding: 10px 14px;
......@@ -259,6 +261,12 @@ ul.controls {
}
}
}
.issuable-pipeline-broken a,
.issuable-pipeline-status a,
.author_link {
display: flex;
}
}
ul.indent-list {
......
......@@ -325,7 +325,7 @@
position: absolute;
top: 7px;
right: 15px;
z-index: 2;
z-index: 300;
li.active {
font-weight: bold;
......
......@@ -2,6 +2,10 @@
color: $gl-text-color;
word-wrap: break-word;
[dir="auto"] {
text-align: initial;
}
a {
color: $md-link-color;
}
......
......@@ -379,7 +379,7 @@ $issue-boards-card-shadow: rgba(186, 186, 186, 0.5);
* Avatar
*/
$avatar_radius: 50%;
$avatar-border: rgba(0, 0, 0, .1);
$avatar-border: $border-color;
$gl-avatar-size: 40px;
/*
......
......@@ -65,7 +65,6 @@ $new-sidebar-width: 220px;
.settings-avatar {
background-color: $white-light;
transition: background-color 100ms linear;
i {
font-size: 20px;
......@@ -73,7 +72,6 @@ $new-sidebar-width: 220px;
color: $gl-text-color-secondary;
text-align: center;
align-self: center;
transition: color 100ms linear;
}
}
......@@ -90,6 +88,7 @@ $new-sidebar-width: 220px;
box-shadow: inset -2px 0 0 $border-color;
a {
transition: none;
text-decoration: none;
}
......@@ -177,7 +176,6 @@ $new-sidebar-width: 220px;
color: $hover-color;
.badge {
transition: background-color 100ms linear, color 100ms linear;
background-color: $indigo-500;
color: $hover-color;
}
......
......@@ -346,13 +346,9 @@
display: none;
}
.avatar:hover,
.avatar-counter:hover {
border-color: $issuable-sidebar-color;
}
.avatar-counter:hover {
color: $issuable-sidebar-color;
border-color: $issuable-sidebar-color;
}
.btn-clipboard {
......
......@@ -266,7 +266,7 @@ class Projects::IssuesController < Projects::ApplicationController
if action_name == 'new'
redirect_to external.new_issue_path
else
redirect_to external.project_path
redirect_to external.issue_tracker_path
end
end
......
......@@ -195,7 +195,7 @@ module ProjectsHelper
controller.controller_name,
controller.action_name,
current_application_settings.cache_key,
'v2.4'
'v2.5'
]
key << pipeline_status_cache_key(project.pipeline_status) if project.pipeline_status.has_status?
......
class Commit
extend ActiveModel::Naming
extend Gitlab::Cache::RequestCache
include ActiveModel::Conversion
include Noteable
......@@ -169,19 +170,9 @@ class Commit
end
def author
if RequestStore.active?
key = "commit_author:#{author_email.downcase}"
# nil is a valid value since no author may exist in the system
if RequestStore.store.key?(key)
@author = RequestStore.store[key]
else
@author = find_author_by_any_email
RequestStore.store[key] = @author
end
else
@author ||= find_author_by_any_email
end
User.find_by_any_email(author_email.downcase)
end
request_cache(:author) { author_email.downcase }
def committer
@committer ||= User.find_by_any_email(committer_email.downcase)
......@@ -368,10 +359,6 @@ class Commit
end
end
def find_author_by_any_email
User.find_by_any_email(author_email.downcase)
end
def repo_changes
changes = { added: [], modified: [], removed: [] }
......
......@@ -190,7 +190,7 @@ class Note < ActiveRecord::Base
# override to return commits, which are not active record
def noteable
if for_commit?
project.commit(commit_id)
@commit ||= project.commit(commit_id)
else
super
end
......
......@@ -23,7 +23,7 @@ class GitlabIssueTrackerService < IssueTrackerService
project_issue_url(project, id: iid)
end
def project_path
def issue_tracker_path
project_issues_path(project)
end
......
......@@ -20,8 +20,8 @@ class IssueTrackerService < Service
self.issues_url.gsub(':id', iid.to_s)
end
def project_path
read_attribute(:project_url)
def issue_tracker_path
project_url
end
def new_issue_path
......
......@@ -58,22 +58,22 @@ class KubernetesService < DeploymentService
def fields
[
{ type: 'text',
name: 'namespace',
title: 'Kubernetes namespace',
placeholder: namespace_placeholder },
{ type: 'text',
name: 'api_url',
title: 'API URL',
placeholder: 'Kubernetes API URL, like https://kube.example.com/' },
{ type: 'text',
name: 'token',
title: 'Service token',
placeholder: 'Service token' },
{ type: 'textarea',
name: 'ca_pem',
title: 'Custom CA bundle',
placeholder: 'Certificate Authority bundle (PEM format)' }
title: 'CA Certificate',
placeholder: 'Certificate Authority bundle (PEM format)' },
{ type: 'text',
name: 'namespace',
title: 'Project namespace (optional/unique)',
placeholder: namespace_placeholder },
{ type: 'text',
name: 'token',
title: 'Token',
placeholder: 'Service token' }
]
end
......
......@@ -3,9 +3,13 @@ module Ci
condition(:protected_action) do
next false unless @subject.action?
!::Gitlab::UserAccess
.new(@user, project: @subject.project)
.can_merge_to_branch?(@subject.ref)
access = ::Gitlab::UserAccess.new(@user, project: @subject.project)
if @subject.tag?
!access.can_create_tag?(@subject.ref)
else
!access.can_merge_to_branch?(@subject.ref)
end
end
rule { protected_action }.prevent :update_build
......
......@@ -31,6 +31,6 @@ class MetricsService
end
def multiprocess_metrics_path
@multiprocess_metrics_path ||= Rails.root.join(ENV['prometheus_multiproc_dir']).freeze
::Prometheus::Client.configuration.multiprocess_files_dir
end
end
......@@ -16,7 +16,7 @@ class GitlabUploader < CarrierWave::Uploader::Base
def self.base_dir
return root_dir unless file_storage?
File.join(root_dir, 'system')
File.join(root_dir, '-', 'system')
end
def self.file_storage?
......
......@@ -42,18 +42,18 @@
.key
= icon('arrow-up', 'aria-label' => 'hidden')
I
%span.badge.pull-right= number_with_delimiter(assigned_issuables_count(:issues))
%span
Issues
.badge= number_with_delimiter(assigned_issuables_count(:issues))
= nav_link(path: 'dashboard#merge_requests') do
= link_to assigned_mrs_dashboard_path, title: 'Merge Requests', class: 'dashboard-shortcuts-merge_requests' do
.shortcut-mappings
.key
= icon('arrow-up', 'aria-label' => 'hidden')
M
%span.badge.pull-right= number_with_delimiter(assigned_issuables_count(:merge_requests))
%span
Merge Requests
.badge= number_with_delimiter(assigned_issuables_count(:merge_requests))
= nav_link(controller: 'dashboard/snippets') do
= link_to dashboard_snippets_path, class: 'dashboard-shortcuts-snippets', title: 'Snippets' do
.shortcut-mappings
......
......@@ -6,15 +6,15 @@
= @group.name
%ul.sidebar-top-level-items
= nav_link(path: ['groups#show', 'groups#activity', 'groups#subgroups'], html_options: { class: 'home' }) do
= link_to group_path(@group), title: 'Home' do
= link_to group_path(@group), title: 'About group' do
%span
Group
About
%ul.sidebar-sub-level-items
= nav_link(path: ['groups#show', 'groups#subgroups'], html_options: { class: 'home' }) do
= link_to group_path(@group), title: 'Group Home' do
= link_to group_path(@group), title: 'Group details' do
%span
Home
Details
= nav_link(path: 'groups#activity') do
= link_to activity_group_path(@group), title: 'Activity' do
......
......@@ -7,14 +7,14 @@
= @project.name
%ul.sidebar-top-level-items
= nav_link(path: ['projects#show', 'projects#activity', 'cycle_analytics#show'], html_options: { class: 'home' }) do
= link_to project_path(@project), title: 'Project', class: 'shortcuts-project' do
= link_to project_path(@project), title: 'About project', class: 'shortcuts-project' do
%span
Project
About
%ul.sidebar-sub-level-items
= nav_link(path: 'projects#show') do
= link_to project_path(@project), title: _('Project home'), class: 'shortcuts-project' do
%span= _('Home')
= link_to project_path(@project), title: _('Project details'), class: 'shortcuts-project' do
%span= _('Details')
= nav_link(path: 'projects#activity') do
= link_to activity_project_path(@project), title: _('Activity'), class: 'shortcuts-project-activity' do
......
......@@ -5,12 +5,6 @@
.tree-holder
.nav-block
.tree-controls
= link_to download_project_job_artifacts_path(@project, @build),
rel: 'nofollow', download: '', class: 'btn btn-default download' do
= icon('download')
Download artifacts archive
%ul.breadcrumb.repo-breadcrumb
%li
= link_to 'Artifacts', browse_project_job_artifacts_path(@project, @build)
......@@ -18,6 +12,12 @@
%li
= link_to truncate(title, length: 40), browse_project_job_artifacts_path(@project, @build, path)
.tree-controls
= link_to download_project_job_artifacts_path(@project, @build),
rel: 'nofollow', download: '', class: 'btn btn-default download' do
= icon('download')
Download artifacts archive
.tree-content-holder
%table.table.tree-table
%thead
......
---
title: "reset text-align to initial to let elements with dir="auto" align texts to right in RTL languages ( default is left )"
merge_request: 12892
author: goshhob
---
title: Clarifies and rearranges the input variables on the kubernetes integration
page and adjusts the docs slightly to meet the same order
merge_request: !12188
author:
---
title: Add Portuguese Brazil translations of Commits Page
merge_request: 12408
author: Huang Tao
---
title: Return `is_admin` attribute in the GET /user endpoint for admins
merge_request: 12811
author:
---
title: Use smaller min-width for dropdown-menu-nav only on mobile
merge_request: 12528
author: Takuya Noguchi
---
title: Prevent bad data being added to application settings when Redis is unavailable
merge_request: 12750
author:
---
title: Add Japanese translations for Cycle Analytics & Project pages & Repository pages & Commits pages & Pipeline Charts.
merge_request: 12693
author: Huang Tao
---
title: Add Russian translations for Cycle Analytics & Project pages & Repository pages & Commits pages & Pipeline Charts.
merge_request: 12743
author: Huang Tao
---
title: Protect manual actions against protected tag too
merge_request: 12908
author:
---
title: Fix alignment of controls in mr issuable list
merge_request:
author:
---
title: fix transient js error in rspec tests
merge_request:
author:
---
title: "Move uploads from `uploads/system` to `uploads/-/system` to free up `system` as a group name"
merge_request: 11713
author:
---
title: Add RequestCache which makes caching with RequestStore easier
merge_request: 12920
author:
---
title: Add structured logging for Rails processes
merge_request:
author:
......@@ -4,8 +4,3 @@ require 'rubygems'
ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
require 'bundler/setup' if File.exist?(ENV['BUNDLE_GEMFILE'])
# set default directory for multiproces metrics gathering
if ENV['RAILS_ENV'] == 'development' || ENV['RAILS_ENV'] == 'test'
ENV['prometheus_multiproc_dir'] ||= 'tmp/prometheus_multiproc_dir'
end
......@@ -43,4 +43,9 @@ Rails.application.configure do
config.cache_store = :null_store
config.active_job.queue_adapter = :test
if ENV['CI'] && !ENV['RAILS_ENABLE_TEST_LOG']
config.logger = Logger.new(nil)
config.log_level = :fatal
end
end
require 'prometheus/client'
Prometheus::Client.configure do |config|
config.logger = Rails.logger
config.initial_mmap_file_size = 4 * 1024
config.multiprocess_files_dir = ENV['prometheus_multiproc_dir']
if Rails.env.development? && Rails.env.test?
config.multiprocess_files_dir ||= Rails.root.join('tmp/prometheus_multiproc_dir')
end
end
# Only use Lograge for Rails
unless Sidekiq.server?
filename = File.join(Rails.root, 'log', "#{Rails.env}_json.log")
Rails.application.configure do
config.lograge.enabled = true
# Store the lograge JSON files in a separate file
config.lograge.keep_original_rails_log = true
# Don't use the Logstash formatter since this requires logstash-event, an
# unmaintained gem that monkey patches `Time`
config.lograge.formatter = Lograge::Formatters::Json.new
config.lograge.logger = ActiveSupport::Logger.new(filename)
# Add request parameters to log output
config.lograge.custom_options = lambda do |event|
{
time: event.time,
params: event.payload[:params].except(%w(controller action format))
}
end
end
end
......@@ -26,7 +26,3 @@ class PEEK_DB_CLIENT
end
PEEK_DB_VIEW.prepend ::Gitlab::PerformanceBar::PeekQueryTracker
class Peek::Views::PerformanceBar::ProcessUtilization
prepend ::Gitlab::PerformanceBar::PeekPerformanceBarWithRackBody
end
scope path: :uploads do
# Note attachments and User/Group/Project avatars
get "system/:model/:mounted_as/:id/:filename",
get "-/system/:model/:mounted_as/:id/:filename",
to: "uploads#show",
constraints: { model: /note|user|group|project/, mounted_as: /avatar|attachment/, filename: /[^\/]+/ }
......@@ -15,7 +15,7 @@ scope path: :uploads do
constraints: { filename: /[^\/]+/ }
# Appearance
get "system/:model/:mounted_as/:id/:filename",
get "-/system/:model/:mounted_as/:id/:filename",
to: "uploads#show",
constraints: { model: /appearance/, mounted_as: /logo|header_logo/, filename: /.+/ }
......
class AddForeignKeyToMergeRequests < ActiveRecord::Migration
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
class MergeRequest < ActiveRecord::Base
self.table_name = 'merge_requests'
include ::EachBatch
end
def up
scope = <<-SQL.strip_heredoc
head_pipeline_id IS NOT NULL
AND NOT EXISTS (
SELECT 1 FROM ci_pipelines
WHERE ci_pipelines.id = merge_requests.head_pipeline_id
)
SQL
MergeRequest.where(scope).each_batch(of: 1000) do |merge_requests|
merge_requests.update_all(head_pipeline_id: nil)
end
unless foreign_key_exists?(:merge_requests, :head_pipeline_id)
add_concurrent_foreign_key(:merge_requests, :ci_pipelines,
column: :head_pipeline_id, on_delete: :nullify)
end
end
def down
if foreign_key_exists?(:merge_requests, :head_pipeline_id)
remove_foreign_key(:merge_requests, column: :head_pipeline_id)
end
end
private
def foreign_key_exists?(table, column)
foreign_keys(table).any? do |key|
key.options[:column] == column.to_s
end
end
end
class MoveSystemUploadFolder < ActiveRecord::Migration
include Gitlab::Database::MigrationHelpers
disable_ddl_transaction!
DOWNTIME = false
def up
unless file_storage?
say 'Using object storage, no need to move.'
return
end
unless File.directory?(old_directory)
say "#{old_directory} doesn't exist, no need to move it."
return
end
FileUtils.mkdir_p(File.join(base_directory, '-'))
say "Moving #{old_directory} -> #{new_directory}"
FileUtils.mv(old_directory, new_directory)
FileUtils.ln_s(new_directory, old_directory)
end
def down
unless file_storage?
say 'Using object storage, no need to move.'
return
end
unless File.directory?(new_directory)
say "#{new_directory} doesn't exist, no need to move it."
return
end
if File.symlink?(old_directory)
say "Removing #{old_directory} -> #{new_directory} symlink"
FileUtils.rm(old_directory)
end
say "Moving #{new_directory} -> #{old_directory}"
FileUtils.mv(new_directory, old_directory)
end
def new_directory
File.join(base_directory, '-', 'system')
end
def old_directory
File.join(base_directory, 'system')
end
def base_directory
File.join(Rails.root, 'public', 'uploads')
end
def file_storage?
CarrierWave::Uploader::Base.storage == CarrierWave::Storage::File
end
end
# See http://doc.gitlab.com/ce/development/migration_style_guide.html
# for more information on how to write migrations for GitLab.
class CleanupMoveSystemUploadFolderSymlink < ActiveRecord::Migration
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
if File.symlink?(old_directory)
say "Removing #{old_directory} -> #{new_directory} symlink"
FileUtils.rm(old_directory)
else
say "Symlink #{old_directory} non existant, nothing to do."
end
end
def down
if File.directory?(new_directory)
say "Symlinking #{old_directory} -> #{new_directory}"
FileUtils.ln_s(new_directory, old_directory)
else
say "#{new_directory} doesn't exist, skipping."
end
end
def new_directory
File.join(base_directory, '-', 'system')
end
def old_directory
File.join(base_directory, 'system')
end
def base_directory
File.join(Rails.root, 'public', 'uploads')
end
end
class EnqueueMigrateSystemUploadsToNewFolder < ActiveRecord::Migration
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
OLD_FOLDER = 'uploads/system/'
NEW_FOLDER = 'uploads/-/system/'
disable_ddl_transaction!
def up
BackgroundMigrationWorker.perform_async('MigrateSystemUploadsToNewFolder',
[OLD_FOLDER, NEW_FOLDER])
end
def down
BackgroundMigrationWorker.perform_async('MigrateSystemUploadsToNewFolder',
[NEW_FOLDER, OLD_FOLDER])
end
end
......@@ -11,7 +11,7 @@
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20170710083355) do
ActiveRecord::Schema.define(version: 20170717150329) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
......@@ -1615,6 +1615,7 @@ ActiveRecord::Schema.define(version: 20170710083355) do
add_foreign_key "merge_request_diffs", "merge_requests", name: "fk_8483f3258f", on_delete: :cascade
add_foreign_key "merge_request_metrics", "ci_pipelines", column: "pipeline_id", on_delete: :cascade
add_foreign_key "merge_request_metrics", "merge_requests", on_delete: :cascade
add_foreign_key "merge_requests", "ci_pipelines", column: "head_pipeline_id", name: "fk_fd82eae0b9", on_delete: :nullify
add_foreign_key "merge_requests", "projects", column: "target_project_id", name: "fk_a6963e8447", on_delete: :cascade
add_foreign_key "merge_requests_closing_issues", "issues", on_delete: :cascade
add_foreign_key "merge_requests_closing_issues", "merge_requests", on_delete: :cascade
......
......@@ -95,8 +95,9 @@ Sample Prometheus queries:
## Configuring Prometheus to monitor Kubernetes
> Introduced in GitLab 9.0.
> Pod monitoring introduced in GitLab 9.4.
If your GitLab server is running within Kubernetes, Prometheus will collect metrics from the Nodes in the cluster including performance data on each container. This is particularly helpful if your CI/CD environments run in the same cluster, as you can use the [Prometheus project integration][] to monitor them.
If your GitLab server is running within Kubernetes, Prometheus will collect metrics from the Nodes and [annotated Pods](https://prometheus.io/docs/operating/configuration/#<kubernetes_sd_config>) in the cluster, including performance data on each container. This is particularly helpful if your CI/CD environments run in the same cluster, as you can use the [Prometheus project integration][] to monitor them.
To disable the monitoring of Kubernetes:
......
......@@ -602,9 +602,8 @@ exist, you should see something like:
>**Notes:**
>
- For the monitor dashboard to appear, you need to:
- Have enabled the [Kubernetes integration][kube]
- Have your app deployed on Kubernetes
- Have enabled the [Prometheus integration][prom]
- Configured Prometheus to collect at least one [supported metric](../user/project/integrations/prometheus_library/metrics.md)
- With GitLab 9.2, all deployments to an environment are shown directly on the
monitoring dashboard
......
......@@ -3,35 +3,6 @@
The purpose of this guide is to document potential "gotchas" that contributors
might encounter or should avoid during development of GitLab CE and EE.
## Do not `describe` symbols
Consider the following model spec:
```ruby
require 'rails_helper'
describe User do
describe :to_param do
it 'converts the username to a param' do
user = described_class.new(username: 'John Smith')
expect(user.to_param).to eq 'john-smith'
end
end
end
```
When run, this spec doesn't do what we might expect:
```sh
spec/models/user_spec.rb|6 error| Failure/Error: u = described_class.new NoMethodError: undefined method `new' for :to_param:Symbol
```
### Solution
Except for the top-level `describe` block, always provide a String argument to
`describe`.
## Do not assert against the absolute value of a sequence-generated attribute
Consider the following factory:
......
......@@ -195,7 +195,6 @@ Please consult the [dedicated "Frontend testing" guide](./fe_guide/testing.md).
- Use `context` to test branching logic.
- Use multi-line `do...end` blocks for `before` and `after`, even when it would
fit on a single line.
- Don't `describe` symbols (see [Gotchas](gotchas.md#dont-describe-symbols)).
- Don't assert against the absolute value of a sequence-generated attribute (see [Gotchas](gotchas.md#dont-assert-against-the-absolute-value-of-a-sequence-generated-attribute)).
- Don't supply the `:each` argument to hooks since it's the default.
- Prefer `not_to` to `to_not` (_this is enforced by RuboCop_).
......@@ -479,6 +478,11 @@ slowest test files and try to improve them.
run the suite against MySQL for tags, `master`, and any branch that includes
`mysql` in the name.
- On EE, the test suite always runs both PostgreSQL and MySQL.
- Rails logging to `log/test.log` is disabled by default in CI [for
performance reasons][logging]. To override this setting, provide the
`RAILS_ENABLE_TEST_LOG` environment variable.
[logging]: https://jtway.co/speed-up-your-rails-test-suite-by-6-in-1-line-13fedb869ec4
## Spinach (feature) tests
......
......@@ -19,10 +19,10 @@ of your project and select the **Kubernetes** service to configure it.
The Kubernetes service takes the following arguments:
1. Kubernetes namespace
1. API URL
1. Service token
1. Custom CA bundle
1. Kubernetes namespace
1. Service token
The API URL is the URL that GitLab uses to access the Kubernetes API. Kubernetes
exposes several APIs - we want the "base" URL that is common to all of them,
......
......@@ -17,35 +17,30 @@ the settings page with a default template. To configure the template, see the
Integration with Prometheus requires the following:
1. GitLab 9.0 or higher
1. The [Kubernetes integration must be enabled][kube] on your project
1. Your app must be deployed on [Kubernetes][]
1. Prometheus must be configured to collect Kubernetes metrics
1. Prometheus must be configured to collect one of the [supported metrics](prometheus_library/metrics.md)
1. Each metric must be have a label to indicate the environment
1. GitLab must have network connectivity to the Prometheus sever
1. GitLab must have network connectivity to the Prometheus server
There are a few steps necessary to set up integration between Prometheus and
GitLab.
## Getting started with Prometheus monitoring
## Configuring Prometheus to collect Kubernetes metrics
Depending on your deployment and where you have located your GitLab server, there are a few options to get started with Prometheus monitoring.
In order for Prometheus to collect Kubernetes metrics, you first must have a
Prometheus server up and running. You have two options here:
* If both GitLab and your applications are installed in the same Kubernetes cluster, you can leverage the [bundled Prometheus server within GitLab](#configuring-omnibus-gitlab-prometheus-to-monitor-kubernetes).
* If your applications are deployed on Kubernetes, but GitLab is not in the same cluster, then you can [configure a Prometheus server in your Kubernetes cluster](#configuring-your-own-prometheus-server-within-kubernetes).
* If your applications are not running in Kubernetes, [get started with Prometheus](#getting-started-with-prometheus-outside-of-kubernetes).
- If you installed Omnibus GitLab inside of Kubernetes, you can simply use the
[bundled version of Prometheus][promgldocs]. In that case, follow the info in the
[Omnibus GitLab section](#configuring-omnibus-gitlab-prometheus-to-monitor-kubernetes)
below.
- If you are using GitLab.com or installed GitLab outside of Kubernetes, you
will likely need to run a Prometheus server within the Kubernetes cluster.
Once installed, the easiest way to monitor Kubernetes is to simply use
Prometheus' support for [Kubernetes Service Discovery][prometheus-k8s-sd].
In that case, follow the instructions on
[configuring your own Prometheus server within Kubernetes](#configuring-your-own-prometheus-server-within-kubernetes).
### Getting started with Prometheus outside of Kubernetes
### Configuring Omnibus GitLab Prometheus to monitor Kubernetes
Installing and configuring Prometheus to monitor applications is fairly straight forward.
1. [Install Prometheus](https://prometheus.io/docs/introduction/install/)
1. Set up one of the [supported monitoring targets](prometheus_library/metrics.md)
1. Configure the Prometheus server to [collect their metrics](https://prometheus.io/docs/operating/configuration/#scrape_config)
### Configuring Omnibus GitLab Prometheus to monitor Kubernetes deployments
With Omnibus GitLab running inside of Kubernetes, you can leverage the bundled
version of Prometheus to collect the required metrics.
version of Prometheus to collect the supported metrics. Once enabled, Prometheus will automatically begin monitoring Kubernetes Nodes and any [annotated Pods](https://prometheus.io/docs/operating/configuration/#<kubernetes_sd_config>).
1. Read how to configure the bundled Prometheus server in the
[Administration guide][gitlab-prometheus-k8s-monitor].
......@@ -74,7 +69,7 @@ kubectl apply -f path/to/prometheus.yml
Once deployed, you should see the Prometheus service, deployment, and
pod start within the `prometheus` namespace. The server will begin to collect
metrics from each Kubernetes Node in the cluster, based on the configuration
provided in the template.
provided in the template. It will also attempt to collect metrics from any Kubernetes Pods that have been [annotated for Prometheus](https://prometheus.io/docs/operating/configuration/#pod).
Since GitLab is not running within Kubernetes, the template provides external
network access via a `NodePort` running on `30090`. This method allows access
......@@ -133,30 +128,6 @@ to integrate with.
![Configure Prometheus Service](img/prometheus_service_configuration.png)
## Metrics and Labels
GitLab retrieves performance data from two metrics, `container_cpu_usage_seconds_total`
and `container_memory_usage_bytes`. These metrics are collected from the
Kubernetes pods via Prometheus, and report CPU and Memory utilization of each
container or Pod running in the cluster.
In order to isolate and only display relevant metrics for a given environment
however, GitLab needs a method to detect which pods are associated. To do that,
GitLab will specifically request metrics that have an `environment` tag that
matches the [$CI_ENVIRONMENT_SLUG][ci-environment-slug].
If you are using [GitLab Auto-Deploy][autodeploy] and one of the methods of
configuring Prometheus above, the `environment` will be automatically added.
### GitLab Prometheus queries
The queries utilized by GitLab are shown in the following table.
| Metric | Query |
| ------ | ----- |
| Average Memory (MB) | `(sum(container_memory_usage_bytes{container_name!="POD",environment="$CI_ENVIRONMENT_SLUG"}) / count(container_memory_usage_bytes{container_name!="POD",environment="$CI_ENVIRONMENT_SLUG"})) /1024/1024` |
| Average CPU Utilization (%) | `sum(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="$CI_ENVIRONMENT_SLUG"}[2m])) / count(container_cpu_usage_seconds_total{container_name!="POD",environment="$CI_ENVIRONMENT_SLUG"}) * 100` |
## Monitoring CI/CD Environments
Once configured, GitLab will attempt to retrieve performance metrics for any
......@@ -168,8 +139,9 @@ environment which has had a successful deployment.
> [Introduced][ce-10408] in GitLab 9.2.
> GitLab 9.3 added the [numeric comparison](https://gitlab.com/gitlab-org/gitlab-ce/issues/27439) of the 30 minute averages.
> Requires [Kubernetes](prometheus_library/kubernetes.md) metrics
Developers can view the performance impact of their changes within the merge
Developers can view theperformance impact of their changes within the merge
request workflow. When a source branch has been deployed to an environment, a sparkline and numeric comparison of the average memory consumption will appear. On the sparkline, a dot
indicates when the current changes were deployed, with up to 30 minutes of
performance data displayed before and after. The comparison shows the difference between the 30 minute average before and after the deployment. This information is updated after
......
# Monitoring AWS Resources
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12621) in GitLab 9.4
GitLab has support for automatically detecting and monitoring AWS resources, starting with the [Elastic Load Balancer](https://aws.amazon.com/elasticloadbalancing/). This is provided by leveraging the official [Cloudwatch exporter](https://github.com/prometheus/cloudwatch_exporter), which translates [Cloudwatch metrics](https://aws.amazon.com/cloudwatch/) into a Prometheus readable form.
## Metrics supported
| Name | Query |
| ---- | ----- |
| Throughput (req/sec) | sum(aws_elb_request_count_sum{%{environment_filter}}) / 60 |
| Latency (ms) | avg(aws_elb_latency_average{%{environment_filter}}) * 1000 |
| HTTP Error Rate (%) | sum(aws_elb_httpcode_backend_5_xx_sum{%{environment_filter}}) / sum(aws_elb_request_count_sum{%{environment_filter}}) |
## Configuring Prometheus to monitor for Cloudwatch metrics
To get started with Cloudwatch monitoring, you should install and configure the [Cloudwatch exporter](https://github.com/hnlq715/nginx-vts-exporter) which retrieves and parses the specified Cloudwatch metrics and translates them into a Prometheus monitoring endpoint.
Right now, the only AWS resource supported is the Elastic Load Balancer, whose Cloudwatch metrics can be found [here](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-cloudwatch-metrics.html).
A sample Cloudwatch Exporter configuration file, configured for basic AWS ELB monitoring, is [available for download](../samples/cloudwatch.yml).
## Specifying the Environment label
In order to isolate and only display relevant metrics for a given environment
however, GitLab needs a method to detect which labels are associated. To do this, GitLab will [look for an `environment` label](metrics.md#identifying-environments).
# Monitoring Kubernetes
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8935) in GitLab 9.0
GitLab has support for automatically detecting and monitoring Kubernetes metrics. Kubernetes exposes Node level metrics out of the box via the built-in [Prometheus metrics support in cAdvisor](https://github.com/google/cadvisor). No additional services or exporters are needed.
## Metrics supported
| Name | Query |
| ---- | ----- |
| Average Memory Usage (MB) | (sum(container_memory_usage_bytes{container_name!="POD",%{environment_filter}}) / count(container_memory_usage_bytes{container_name!="POD",%{environment_filter}})) /1024/1024 |
| Average CPU Utilization (%) | sum(rate(container_cpu_usage_seconds_total{container_name!="POD",%{environment_filter}}[2m])) / count(container_cpu_usage_seconds_total{container_name!="POD",%{environment_filter}}) * 100 |
## Configuring Prometheus to monitor for Kubernetes node metrics
In order for Prometheus to collect Kubernetes metrics, you first must have a
Prometheus server up and running. You have two options here:
- If you have an Omnibus based GitLab installation within your Kubernetes cluster, you can leverage the bundled Prometheus server to [monitor Kubernetes](../../../../administration/monitoring/prometheus/index.md#configuring-prometheus-to-monitor-kubernetes).
- To configure your own Prometheus server, you can follow the [Prometheus documentation](https://prometheus.io/docs/introduction/overview/) or [our guide](../../../../administration/monitoring/prometheus/index.md#configuring-your-own-prometheus-server-within-kubernetes).
## Specifying the Environment label
In order to isolate and only display relevant metrics for a given environment
however, GitLab needs a method to detect which labels are associated. To do this, GitLab will [look for an `environment` label](metrics.md#identifying-environments).
If you are using [GitLab Auto-Deploy][autodeploy] and one of the two [provided Kubernetes monitoring solutions](../prometheus.md#getting-started-with-prometheus-monitoring), the `environment` label will be automatically added.
# Prometheus Metrics library
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8935) in GitLab 9.0
GitLab offers automatic detection of select [Prometheus exporters](https://prometheus.io/docs/instrumenting/exporters/). Currently supported exporters are:
* [Kubernetes](kubernetes.md)
* [NGINX](nginx.md)
* [Amazon Cloud Watch](cloudwatch.md)
We have tried to surface the most important metrics for each exporter, and will be continuing to add support for additional exporters in future releases. If you would like to add support for other official exporters, [contributions](#adding-to-the-library) are welcome.
## Identifying Environments
GitLab retrieves performance data from the configured Prometheus server, and attempts to identifying the presence of known metrics. Once identified, GitLab then needs to be able to map the data to a particular environment.
In order to isolate and only display relevant metrics for a given environment, GitLab needs a method to detect which labels are associated. To do that,
GitLab will look for the required metrics which have a label that
matches the [$CI_ENVIRONMENT_SLUG][ci-environment-slug].
For example if you are deploying to an environment named `production`, there must be a label for the metric with the value of `production`.
## Adding to the library
We strive to support the 2-4 most important metrics for each common system service that supports Prometheus. If you are looking for support for a particular exporter which has not yet been added to the library, additions can be made [to the `additional_metrics.yml`](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/prometheus/additional_metrics.yml) file.
> Note: The library is only for monitoring public, common, system services which all customers can benefit from. Support for monitoring [customer proprietary metrics](https://gitlab.com/gitlab-org/gitlab-ee/issues/2273) will be added in a subsequent release.
# Monitoring NGINX
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12621) in GitLab 9.4
GitLab has support for automatically detecting and monitoring NGINX. This is provided by leveraging the [NGINX VTS exporter](https://github.com/hnlq715/nginx-vts-exporter), which translates [VTS statistics](https://github.com/vozlt/nginx-module-vts) into a Prometheus readable form.
## Metrics supported
| Name | Query |
| ---- | ----- |
| Throughput (req/sec) | sum(rate(nginx_requests_total{server_zone!="*", server_zone!="_", %{environment_filter}}[2m])) |
| Latency (ms) | avg(nginx_upstream_response_msecs_avg{%{environment_filter}}) * 1000 |
| HTTP Error Rate (%) | sum(nginx_responses_total{status_code="5xx", %{environment_filter}}) / sum(nginx_responses_total{server_zone!="*", server_zone!="_", %{environment_filter}}) |
## Configuring Prometheus to monitor for NGINX metrics
To get started with NGINX monitoring, you should first enable the [VTS statistics](https://github.com/vozlt/nginx-module-vts)) module for your NGINX server. This will capture and display statistics in an HTML readable form. Next, you should install and configure the [NGINX VTS exporter](https://github.com/hnlq715/nginx-vts-exporter) which parses these statistics and translates them into a Prometheus monitoring endpoint.
If you are using NGINX as your Kubernetes ingress, there is [upcoming direct support](https://github.com/kubernetes/ingress/pull/423) for enabling Prometheus monitoring in the 0.9.0 release.
## Specifying the Environment label
In order to isolate and only display relevant metrics for a given environment
however, GitLab needs a method to detect which labels are associated. To do this, GitLab will [look for an `environment` label](metrics.md#identifying-environments).
region: us-east-1
metrics:
- aws_namespace: AWS/ELB
aws_metric_name: RequestCount
aws_dimensions: [AvailabilityZone, LoadBalancerName]
aws_dimension_select:
LoadBalancerName: [gitlab-ha-lb]
aws_statistics: [Sum]
- aws_namespace: AWS/ELB
aws_metric_name: Latency
aws_dimensions: [AvailabilityZone, LoadBalancerName]
aws_dimension_select:
LoadBalancerName: [gitlab-ha-lb]
aws_statistics: [Average]
- aws_namespace: AWS/ELB
aws_metric_name: HTTPCode_Backend_2XX
aws_dimensions: [AvailabilityZone, LoadBalancerName]
aws_dimension_select:
LoadBalancerName: [gitlab-ha-lb]
aws_statistics: [Sum]
- aws_namespace: AWS/ELB
aws_metric_name: HTTPCode_Backend_5XX
aws_dimensions: [AvailabilityZone, LoadBalancerName]
aws_dimension_select:
LoadBalancerName: [gitlab-ha-lb]
aws_statistics: [Sum]
......@@ -24,6 +24,44 @@ data:
target_label: environment
regex: (.+)-.+-.+
replacement: $1
- job_name: kubernetes-pods
tls_config:
ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
insecure_skip_verify: true
bearer_token_file: "/var/run/secrets/kubernetes.io/serviceaccount/token"
kubernetes_sd_configs:
- role: pod
api_server: https://kubernetes.default.svc:443
tls_config:
ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
bearer_token_file: "/var/run/secrets/kubernetes.io/serviceaccount/token"
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
action: keep
regex: 'true'
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
action: replace
target_label: __metrics_path__
regex: "(.+)"
- source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
action: replace
regex: "([^:]+)(?::[0-9]+)?;([0-9]+)"
replacement: "$1:$2"
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_pod_name
action: replace
target_label: kubernetes_pod_name
---
apiVersion: v1
kind: Service
......
......@@ -81,7 +81,7 @@ class Spinach::Features::Groups < Spinach::FeatureSteps
step 'I should see new group "Owned" avatar' do
expect(owned_group.avatar).to be_instance_of AvatarUploader
expect(owned_group.avatar.url).to eq "/uploads/system/group/avatar/#{Group.find_by(name: "Owned").id}/banana_sample.gif"
expect(owned_group.avatar.url).to eq "/uploads/-/system/group/avatar/#{Group.find_by(name: "Owned").id}/banana_sample.gif"
end
step 'I should see the "Remove avatar" button' do
......
......@@ -36,7 +36,7 @@ class Spinach::Features::Profile < Spinach::FeatureSteps
step 'I should see new avatar' do
expect(@user.avatar).to be_instance_of AvatarUploader
expect(@user.avatar.url).to eq "/uploads/system/user/avatar/#{@user.id}/banana_sample.gif"
expect(@user.avatar.url).to eq "/uploads/-/system/user/avatar/#{@user.id}/banana_sample.gif"
end
step 'I should see the "Remove avatar" button' do
......
......@@ -38,7 +38,7 @@ class Spinach::Features::Project < Spinach::FeatureSteps
step 'I should see new project avatar' do
expect(@project.avatar).to be_instance_of AvatarUploader
url = @project.avatar.url
expect(url).to eq "/uploads/system/project/avatar/#{@project.id}/banana_sample.gif"
expect(url).to eq "/uploads/-/system/project/avatar/#{@project.id}/banana_sample.gif"
end
step 'I should see the "Remove avatar" button' do
......
......@@ -8,7 +8,12 @@ require_dependency 'declarative_policy/step'
require_dependency 'declarative_policy/base'
require 'thread'
module DeclarativePolicy
CLASS_CACHE_MUTEX = Mutex.new
CLASS_CACHE_IVAR = :@__DeclarativePolicy_CLASS_CACHE
class << self
def policy_for(user, subject, opts = {})
cache = opts[:cache] || {}
......@@ -23,7 +28,36 @@ module DeclarativePolicy
subject = find_delegate(subject)
subject.class.ancestors.each do |klass|
class_for_class(subject.class)
end
private
# This method is heavily cached because there are a lot of anonymous
# modules in play in a typical rails app, and #name performs quite
# slowly for anonymous classes and modules.
#
# See https://bugs.ruby-lang.org/issues/11119
#
# if the above bug is resolved, this caching could likely be removed.
def class_for_class(subject_class)
unless subject_class.instance_variable_defined?(CLASS_CACHE_IVAR)
CLASS_CACHE_MUTEX.synchronize do
# re-check in case of a race
break if subject_class.instance_variable_defined?(CLASS_CACHE_IVAR)
policy_class = compute_class_for_class(subject_class)
subject_class.instance_variable_set(CLASS_CACHE_IVAR, policy_class)
end
end
policy_class = subject_class.instance_variable_get(CLASS_CACHE_IVAR)
raise "no policy for #{subject.class.name}" if policy_class.nil?
policy_class
end
def compute_class_for_class(subject_class)
subject_class.ancestors.each do |klass|
next unless klass.name
begin
......@@ -37,12 +71,8 @@ module DeclarativePolicy
nil
end
end
raise "no policy for #{subject.class.name}"
end
private
def find_delegate(subject)
seen = Set.new
......
......@@ -21,11 +21,14 @@ module DeclarativePolicy
private
def id_for(obj)
if obj.respond_to?(:id) && obj.id
obj.id.to_s
else
"##{obj.object_id}"
id =
begin
obj.id
rescue NoMethodError
nil
end
id || "##{obj.object_id}"
end
end
end
......
......@@ -82,6 +82,7 @@ module DeclarativePolicy
# depending on the scope, we may cache only by the user or only by
# the subject, resulting in sharing across different policy objects.
def cache_key
@cache_key ||=
case @condition.scope
when :normal then "/dp/condition/#{@condition.key}/#{user_key},#{subject_key}"
when :user then "/dp/condition/#{@condition.key}/#{user_key}"
......
module Gitlab
module BackgroundMigration
class MigrateSystemUploadsToNewFolder
include Gitlab::Database::MigrationHelpers
attr_reader :old_folder, :new_folder
class Upload < ActiveRecord::Base
self.table_name = 'uploads'
include EachBatch
end
def perform(old_folder, new_folder)
replace_sql = replace_sql(uploads[:path], old_folder, new_folder)
affected_uploads = Upload.where(uploads[:path].matches("#{old_folder}%"))
affected_uploads.each_batch do |batch|
batch.update_all("path = #{replace_sql}")
end
end
def uploads
Arel::Table.new('uploads')
end
end
end
end
module Gitlab
module Cache
# This module provides a simple way to cache values in RequestStore,
# and the cache key would be based on the class name, method name,
# optionally customized instance level values, optionally customized
# method level values, and optional method arguments.
#
# A simple example:
#
# class UserAccess
# extend Gitlab::Cache::RequestCache
#
# request_cache_key do
# [user&.id, project&.id]
# end
#
# request_cache def can_push_to_branch?(ref)
# # ...
# end
# end
#
# This way, the result of `can_push_to_branch?` would be cached in
# `RequestStore.store` based on the cache key. If RequestStore is not
# currently active, then it would be stored in a hash saved in an
# instance variable, so the cache logic would be the same.
# Here's another example using customized method level values:
#
# class Commit
# extend Gitlab::Cache::RequestCache
#
# def author
# User.find_by_any_email(author_email.downcase)
# end
# request_cache(:author) { author_email.downcase }
# end
#
# So that we could have different strategies for different methods
#
module RequestCache
def self.extended(klass)
return if klass < self
extension = Module.new
klass.const_set(:RequestCacheExtension, extension)
klass.prepend(extension)
end
def request_cache_key(&block)
if block_given?
@request_cache_key = block
else
@request_cache_key
end
end
def request_cache(method_name, &method_key_block)
const_get(:RequestCacheExtension).module_eval do
cache_key_method_name = "#{method_name}_cache_key"
define_method(method_name) do |*args|
store =
if RequestStore.active?
RequestStore.store
else
ivar_name = # ! and ? cannot be used as ivar name
"@cache_#{method_name.to_s.tr('!?', "\u2605\u2606")}"
instance_variable_get(ivar_name) ||
instance_variable_set(ivar_name, {})
end
key = __send__(cache_key_method_name, args)
store.fetch(key) { store[key] = super(*args) }
end
define_method(cache_key_method_name) do |args|
klass = self.class
instance_key = instance_exec(&klass.request_cache_key) if
klass.request_cache_key
method_key = instance_exec(&method_key_block) if method_key_block
[klass.name, method_name, *instance_key, *method_key, *args]
.join(':')
end
private cache_key_method_name
end
end
end
end
end
......@@ -140,6 +140,8 @@ module Gitlab
return add_foreign_key(source, target,
column: column,
on_delete: on_delete)
else
on_delete = 'SET NULL' if on_delete == :nullify
end
disable_statement_timeout
......@@ -155,7 +157,7 @@ module Gitlab
ADD CONSTRAINT #{key_name}
FOREIGN KEY (#{column})
REFERENCES #{target} (id)
#{on_delete ? "ON DELETE #{on_delete}" : ''}
#{on_delete ? "ON DELETE #{on_delete.upcase}" : ''}
NOT VALID;
EOF
......
......@@ -98,7 +98,15 @@ module Gitlab
# Commit.between(repo, '29eda46b', 'master')
#
def between(repo, base, head)
repo.commits_between(base, head).map do |commit|
commits = Gitlab::GitalyClient.migrate(:commits_between) do |is_enabled|
if is_enabled
repo.gitaly_commit_client.between(base, head)
else
repo.commits_between(base, head)
end
end
commits.map do |commit|
decorate(commit)
end
rescue Rugged::ReferenceError
......@@ -210,6 +218,8 @@ module Gitlab
init_from_hash(raw_commit)
elsif raw_commit.is_a?(Rugged::Commit)
init_from_rugged(raw_commit)
elsif raw_commit.is_a?(Gitaly::GitCommit)
init_from_gitaly(raw_commit)
else
raise "Invalid raw commit type: #{raw_commit.class}"
end
......@@ -371,6 +381,22 @@ module Gitlab
@parent_ids = commit.parents.map(&:oid)
end
def init_from_gitaly(commit)
@raw_commit = commit
@id = commit.id
# TODO: Once gitaly "takes over" Rugged consider separating the
# subject from the message to make it clearer when there's one
# available but not the other.
@message = (commit.body.presence || commit.subject).dup
@authored_date = Time.at(commit.author.date.seconds)
@author_name = commit.author.name.dup
@author_email = commit.author.email.dup
@committed_date = Time.at(commit.committer.date.seconds)
@committer_name = commit.committer.name.dup
@committer_email = commit.committer.email.dup
@parent_ids = commit.parent_ids
end
def serialize_keys
SERIALIZE_KEYS
end
......
......@@ -807,6 +807,14 @@ module Gitlab
Gitlab::GitalyClient::Util.repository(@storage, @relative_path)
end
def gitaly_ref_client
@gitaly_ref_client ||= Gitlab::GitalyClient::RefService.new(self)
end
def gitaly_commit_client
@gitaly_commit_client ||= Gitlab::GitalyClient::CommitService.new(self)
end
private
# Gitaly note: JV: Trying to get rid of the 'filter' option so we can implement this with 'git'.
......@@ -1105,14 +1113,6 @@ module Gitlab
end
end
def gitaly_ref_client
@gitaly_ref_client ||= Gitlab::GitalyClient::RefService.new(self)
end
def gitaly_commit_client
@gitaly_commit_client ||= Gitlab::GitalyClient::CommitService.new(self)
end
def gitaly_migrate(method, &block)
Gitlab::GitalyClient.migrate(method, &block)
rescue GRPC::NotFound => e
......
......@@ -65,6 +65,17 @@ module Gitlab
GitalyClient.call(@repository.storage, :commit_service, :count_commits, request).count
end
def between(from, to)
request = Gitaly::CommitsBetweenRequest.new(
repository: @gitaly_repo,
from: from,
to: to
)
response = GitalyClient.call(@repository.storage, :commit_service, :commits_between, request)
consume_commits_response(response)
end
private
def commit_diff_request_params(commit, options = {})
......@@ -77,6 +88,10 @@ module Gitlab
paths: options.fetch(:paths, [])
}
end
def consume_commits_response(response)
response.flat_map { |r| r.commits }
end
end
end
end
......@@ -96,11 +96,11 @@ module Gitlab
id: response.commit_id,
message: message,
authored_date: Time.at(response.commit_author.date.seconds),
author_name: response.commit_author.name,
author_email: response.commit_author.email,
author_name: response.commit_author.name.dup,
author_email: response.commit_author.email.dup,
committed_date: Time.at(response.commit_committer.date.seconds),
committer_name: response.commit_committer.name,
committer_email: response.commit_committer.email
committer_name: response.commit_committer.name.dup,
committer_email: response.commit_committer.email.dup
}
Gitlab::Git::Commit.decorate(hash)
......
......@@ -12,8 +12,10 @@ module Gitlab
'zh_HK' => '繁體中文(香港)',
'zh_TW' => '繁體中文(臺灣)',
'bg' => 'български',
'ru' => 'Русский',
'eo' => 'Esperanto',
'it' => 'Italiano'
'it' => 'Italiano',
'ja' => '日本語'
}.freeze
def available_locales
......
......@@ -6,9 +6,11 @@ module Gitlab
include Gitlab::CurrentSettings
def metrics_folder_present?
ENV.has_key?('prometheus_multiproc_dir') &&
::Dir.exist?(ENV['prometheus_multiproc_dir']) &&
::File.writable?(ENV['prometheus_multiproc_dir'])
multiprocess_files_dir = ::Prometheus::Client.configuration.multiprocess_files_dir
multiprocess_files_dir &&
::Dir.exist?(multiprocess_files_dir) &&
::File.writable?(multiprocess_files_dir)
end
def prometheus_metrics_enabled?
......
......@@ -49,7 +49,6 @@ module Gitlab
sent_notifications
services
snippets
system
teams
u
unicorn_test
......
# This solves a bug with a X-Senfile header that wouldn't be set properly, see
# https://github.com/peek/peek-performance_bar/pull/27
module Gitlab
module PerformanceBar
module PeekPerformanceBarWithRackBody
def call(env)
@env = env
reset_stats
@total_requests += 1
first_request if @total_requests == 1
env['process.request_start'] = @start.to_f
env['process.total_requests'] = total_requests
status, headers, body = @app.call(env)
body = Rack::BodyProxy.new(body) { record_request }
[status, headers, body]
end
end
end
end
......@@ -37,7 +37,7 @@ module Gitlab
def track_query(raw_query, bindings, start, finish)
query = Gitlab::Sherlock::Query.new(raw_query, start, finish)
query_info = { duration: '%.3f' % query.duration, sql: query.formatted_query }
query_info = { duration: query.duration.round(3), sql: query.formatted_query }
PEEK_DB_CLIENT.query_details << query_info
end
......
module Gitlab
class UserAccess
extend Gitlab::Cache::RequestCache
request_cache_key do
[user&.id, project&.id]
end
attr_reader :user, :project
def initialize(user, project: nil)
......@@ -28,7 +34,7 @@ module Gitlab
true
end
def can_create_tag?(ref)
request_cache def can_create_tag?(ref)
return false unless can_access_git?
if ProtectedTag.protected?(project, ref)
......@@ -38,7 +44,7 @@ module Gitlab
end
end
def can_delete_branch?(ref)
request_cache def can_delete_branch?(ref)
return false unless can_access_git?
if ProtectedBranch.protected?(project, ref)
......@@ -48,7 +54,7 @@ module Gitlab
end
end
def can_push_to_branch?(ref)
request_cache def can_push_to_branch?(ref)
return false unless can_access_git?
if ProtectedBranch.protected?(project, ref)
......@@ -60,7 +66,7 @@ module Gitlab
end
end
def can_merge_to_branch?(ref)
request_cache def can_merge_to_branch?(ref)
return false unless can_access_git?
if ProtectedBranch.protected?(project, ref)
......
This diff is collapsed.
......@@ -6,20 +6,41 @@ msgid ""
msgstr ""
"Project-Id-Version: gitlab 1.0.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2017-06-15 21:59-0500\n"
"POT-Creation-Date: 2017-06-28 13:32+0200\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-07-05 02:56-0400\n"
"Last-Translator: Huang Tao <htve@outlook.com>\n"
"Language-Team: Portuguese (Brazil)\n"
"PO-Revision-Date: 2017-07-12 09:05-0400\n"
"Last-Translator: Leandro Nunes dos Santos <leandronunes@gmail.com>\n"
"Language-Team: Portuguese (Brazil) (https://translate.zanata.org/project/view/GitLab)\n"
"Language: pt-BR\n"
"X-Generator: Zanata 3.9.6\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
msgid "%s additional commit has been omitted to prevent performance issues."
msgid_plural ""
"%s additional commits have been omitted to prevent performance issues."
msgstr[0] ""
"%s commit adicional foi omitido para prevenir problemas de performance."
msgstr[1] ""
"%s commits adicionais foram omitidos para prevenir problemas de performance."
msgid "%d commit"
msgid_plural "%d commits"
msgstr[0] "%d commit"
msgstr[1] "%d commits"
msgid "%{commit_author_link} committed %{commit_timeago}"
msgstr "%{commit_author_link} fez commit %{commit_timeago}"
msgid "1 pipeline"
msgid_plural "%d pipelines"
msgstr[0] "1 pipeline"
msgstr[1] "%d pipelines"
msgid "A collection of graphs regarding Continuous Integration"
msgstr "Uma coleção de gráficos sobre Integração Contínua"
msgid "About auto deploy"
msgstr "Sobre a implantação automática"
......@@ -67,9 +88,24 @@ msgstr ""
"implantação automática, selecione um modelo de Yaml do GitLab CI e registre "
"suas mudanças. %{link_to_autodeploy_doc}"
msgid "BranchSwitcherPlaceholder|Search branches"
msgstr "BranchSwitcherPlaceholder|Procurar por branches"
msgid "BranchSwitcherTitle|Switch branch"
msgstr "BranchSwitcherTitle|Mudar de branch"
msgid "Branches"
msgstr "Branches"
msgid "Browse Directory"
msgstr "Navegar no Diretório"
msgid "Browse File"
msgstr "Pesquisar Arquivo"
msgid "Browse Files"
msgstr "Pesquisar Arquivos"
msgid "Browse files"
msgstr "Navegar pelos arquivos"
......@@ -165,6 +201,9 @@ msgid_plural "Commits"
msgstr[0] "Commit"
msgstr[1] "Commits"
msgid "Commit duration in minutes for last 30 commits"
msgstr "Duração do commit em minutos para os últimos 30 commits"
msgid "Commit message"
msgstr "Mensagem de commit"
......@@ -177,6 +216,9 @@ msgstr "Adicionar %{file_name}"
msgid "Commits"
msgstr "Commits"
msgid "Commits feed"
msgstr "Feed de commits"
msgid "Commits|History"
msgstr "Histórico"
......@@ -201,6 +243,13 @@ msgstr "Copiar SHA do commit para a área de transferência"
msgid "Create New Directory"
msgstr "Criar Novo Diretório"
msgid ""
"Create a personal access token on your account to pull or push via "
"%{protocol}."
msgstr ""
"Crie um token de acesso pessoal na sua conta para dar pull ou push via "
"%{protocol}."
msgid "Create directory"
msgstr "Criar diretório"
......@@ -219,6 +268,9 @@ msgstr "Fork"
msgid "CreateTag|Tag"
msgstr "Tag"
msgid "CreateTokenToCloneLink|create a personal access token"
msgstr "CreateTokenToCloneLink|criar um token de acesso pessoal"
msgid "Cron Timezone"
msgstr "Fuso horário do cron"
......@@ -340,6 +392,9 @@ msgstr "Erro ao excluir o agendamento do pipeline"
msgid "Files"
msgstr "Arquivos"
msgid "Filter by commit message"
msgstr "Filtrar por mensagem de commit"
msgid "Find by path"
msgstr "Localizar por caminho"
......@@ -388,6 +443,15 @@ msgstr "Padrão de intervalo"
msgid "Introducing Cycle Analytics"
msgstr "Apresentando a Análise de Ciclo"
msgid "Jobs for last month"
msgstr "Jobs no último mês"
msgid "Jobs for last week"
msgstr "Jobs na última semana"
msgid "Jobs for last year"
msgstr "Jobs no último ano"
msgid "LFSStatus|Disabled"
msgstr "Desabilitado"
......@@ -553,6 +617,21 @@ msgstr "Agendamento da Pipeline"
msgid "Pipeline Schedules"
msgstr "Agendamentos da Pipeline"
msgid "PipelineCharts|Failed:"
msgstr "PipelineCharts|Falhou:"
msgid "PipelineCharts|Overall statistics"
msgstr "PipelineCharts|Estatísticas gerais"
msgid "PipelineCharts|Success ratio:"
msgstr "PipelineCharts|Taxa de sucesso:"
msgid "PipelineCharts|Successful:"
msgstr "PipelineCharts|Sucesso:"
msgid "PipelineCharts|Total:"
msgstr "PipelineCharts|Total:"
msgid "PipelineSchedules|Activated"
msgstr "Ativado"
......@@ -583,6 +662,18 @@ msgstr "Destino"
msgid "PipelineSheduleIntervalPattern|Custom"
msgstr "Personalizado"
msgid "Pipelines"
msgstr "Pipelines"
msgid "Pipelines charts"
msgstr "Gráficos de pipelines"
msgid "Pipeline|all"
msgstr "Pipeline|todos"
msgid "Pipeline|success"
msgstr "Pipeline|sucesso"
msgid "Pipeline|with stage"
msgstr "com etapa"
......@@ -713,10 +804,10 @@ msgstr "Selecionar fuso horário"
msgid "Select target branch"
msgstr "Selecionar branch de destino"
msgid "Set a password on your account to pull or push via %{protocol}"
msgid "Set a password on your account to pull or push via %{protocol}."
msgstr ""
"Defina uma senha para sua conta para aceitar ou entregar código via "
"%{protocol}"
"%{protocol}."
msgid "Set up CI"
msgstr "Configurar CI"
......@@ -1032,9 +1123,15 @@ msgstr "Enviar Novo Arquivo"
msgid "Upload file"
msgstr "Enviar arquivo"
msgid "UploadLink|click to upload"
msgstr "UploadLink|clique para fazer upload"
msgid "Use your global notification setting"
msgstr "Utilizar configuração de notificação global"
msgid "View open merge request"
msgstr "Ver merge request aberto"
msgid "VisibilityLevel|Internal"
msgstr "Interno"
......
This diff is collapsed.
......@@ -10,7 +10,7 @@ fi
# Only install knapsack after bundle install! Otherwise oddly some native
# gems could not be found under some circumstance. No idea why, hours wasted.
retry gem install knapsack fog-aws mime-types
retry gem install knapsack
cp config/gitlab.yml.example config/gitlab.yml
......
......@@ -12,7 +12,7 @@ describe MetricsController do
before do
stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false')
stub_env('prometheus_multiproc_dir', metrics_multiproc_dir)
allow(Prometheus::Client.configuration).to receive(:multiprocess_files_dir).and_return(metrics_multiproc_dir)
allow(Gitlab::Metrics).to receive(:prometheus_metrics_enabled?).and_return(true)
allow(Settings.monitoring).to receive(:ip_whitelist).and_return([whitelisted_ip, whitelisted_ip_range])
end
......
......@@ -7,14 +7,16 @@ describe Projects::IssuesController do
describe "GET #index" do
context 'external issue tracker' do
let!(:service) do
create(:custom_issue_tracker_service, project: project, title: 'Custom Issue Tracker', project_url: 'http://test.com')
end
it 'redirects to the external issue tracker' do
external = double(project_path: 'https://example.com/project')
allow(project).to receive(:external_issue_tracker).and_return(external)
controller.instance_variable_set(:@project, project)
get :index, namespace_id: project.namespace, project_id: project
expect(response).to redirect_to('https://example.com/project')
expect(response).to redirect_to(service.issue_tracker_path)
end
end
......@@ -139,19 +141,21 @@ describe Projects::IssuesController do
end
context 'external issue tracker' do
let!(:service) do
create(:custom_issue_tracker_service, project: project, title: 'Custom Issue Tracker', new_issue_url: 'http://test.com')
end
before do
sign_in(user)
project.team << [user, :developer]
end
it 'redirects to the external issue tracker' do
external = double(new_issue_path: 'https://example.com/issues/new')
allow(project).to receive(:external_issue_tracker).and_return(external)
controller.instance_variable_set(:@project, project)
get :new, namespace_id: project.namespace, project_id: project
expect(response).to redirect_to('https://example.com/issues/new')
expect(response).to redirect_to('http://test.com')
end
end
end
......
......@@ -4,14 +4,19 @@ FactoryGirl.define do
factory :commit do
git_commit RepoHelpers.sample_commit
project factory: :empty_project
author { build(:author) }
initialize_with do
new(git_commit, project)
end
after(:build) do |commit|
allow(commit).to receive(:author).and_return build(:author)
end
trait :without_author do
author nil
after(:build) do |commit|
allow(commit).to receive(:author).and_return nil
end
end
end
end
FactoryGirl.define do
factory :upload do
model { build(:project) }
path { "uploads/system/project/avatar/avatar.jpg" }
path { "uploads/-/system/project/avatar/avatar.jpg" }
size 100.kilobytes
uploader "AvatarUploader"
end
......
......@@ -63,11 +63,11 @@ feature 'Admin Appearance', feature: true do
end
def logo_selector
'//img[@src^="/uploads/system/appearance/logo"]'
'//img[@src^="/uploads/-/system/appearance/logo"]'
end
def header_logo_selector
'//img[@src^="/uploads/system/appearance/header_logo"]'
'//img[@src^="/uploads/-/system/appearance/header_logo"]'
end
def logo_fixture
......
......@@ -54,7 +54,8 @@ feature 'Member autocomplete', :js do
let(:note) { create(:note_on_commit, project: project, commit_id: project.commit.id) }
before do
allow_any_instance_of(Commit).to receive(:author).and_return(author)
allow(User).to receive(:find_by_any_email)
.with(noteable.author_email.downcase).and_return(author)
visit project_commit_path(project, noteable)
end
......
......@@ -18,7 +18,7 @@ feature 'User uploads avatar to group', feature: true do
visit group_path(group)
expect(page).to have_selector(%Q(img[src$="/uploads/system/group/avatar/#{group.id}/dk.png"]))
expect(page).to have_selector(%Q(img[src$="/uploads/-/system/group/avatar/#{group.id}/dk.png"]))
# Cheating here to verify something that isn't user-facing, but is important
expect(group.reload.avatar.file).to exist
......
......@@ -16,7 +16,7 @@ feature 'User uploads avatar to profile', feature: true do
visit user_path(user)
expect(page).to have_selector(%Q(img[src$="/uploads/system/user/avatar/#{user.id}/dk.png"]))
expect(page).to have_selector(%Q(img[src$="/uploads/-/system/user/avatar/#{user.id}/dk.png"]))
# Cheating here to verify something that isn't user-facing, but is important
expect(user.reload.avatar.file).to exist
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment