Commit e949386c authored by GitLab Bot's avatar GitLab Bot

Merge remote-tracking branch 'upstream/master' into ce-to-ee-2018-06-01

# Conflicts:
#	app/assets/javascripts/api.js
#	doc/administration/auth/ldap.md
#	lib/gitlab/auth.rb

[ci skip]
parents b0872f44 709e8b26
...@@ -25,7 +25,10 @@ const Api = { ...@@ -25,7 +25,10 @@ const Api = {
commitPipelinesPath: '/:project_id/commit/:sha/pipelines', commitPipelinesPath: '/:project_id/commit/:sha/pipelines',
branchSinglePath: '/api/:version/projects/:id/repository/branches/:branch', branchSinglePath: '/api/:version/projects/:id/repository/branches/:branch',
createBranchPath: '/api/:version/projects/:id/repository/branches', createBranchPath: '/api/:version/projects/:id/repository/branches',
<<<<<<< HEAD
geoNodesPath: '/api/:version/geo_nodes', geoNodesPath: '/api/:version/geo_nodes',
=======
>>>>>>> upstream/master
group(groupId, callback) { group(groupId, callback) {
const url = Api.buildUrl(Api.groupPath).replace(':id', groupId); const url = Api.buildUrl(Api.groupPath).replace(':id', groupId);
...@@ -238,6 +241,7 @@ const Api = { ...@@ -238,6 +241,7 @@ const Api = {
}); });
}, },
<<<<<<< HEAD
approverUsers(search, options, callback = $.noop) { approverUsers(search, options, callback = $.noop) {
const url = Api.buildUrl('/autocomplete/users.json'); const url = Api.buildUrl('/autocomplete/users.json');
return axios return axios
...@@ -274,6 +278,8 @@ const Api = { ...@@ -274,6 +278,8 @@ const Api = {
}); });
}, },
=======
>>>>>>> upstream/master
buildUrl(url) { buildUrl(url) {
let urlRoot = ''; let urlRoot = '';
if (gon.relative_url_root != null) { if (gon.relative_url_root != null) {
......
...@@ -31,6 +31,7 @@ export default class Clusters { ...@@ -31,6 +31,7 @@ export default class Clusters {
installHelmPath, installHelmPath,
installIngressPath, installIngressPath,
installRunnerPath, installRunnerPath,
installJupyterPath,
installPrometheusPath, installPrometheusPath,
managePrometheusPath, managePrometheusPath,
clusterStatus, clusterStatus,
...@@ -51,6 +52,7 @@ export default class Clusters { ...@@ -51,6 +52,7 @@ export default class Clusters {
installIngressEndpoint: installIngressPath, installIngressEndpoint: installIngressPath,
installRunnerEndpoint: installRunnerPath, installRunnerEndpoint: installRunnerPath,
installPrometheusEndpoint: installPrometheusPath, installPrometheusEndpoint: installPrometheusPath,
installJupyterEndpoint: installJupyterPath,
}); });
this.installApplication = this.installApplication.bind(this); this.installApplication = this.installApplication.bind(this);
...@@ -209,11 +211,12 @@ export default class Clusters { ...@@ -209,11 +211,12 @@ export default class Clusters {
} }
} }
installApplication(appId) { installApplication(data) {
const appId = data.id;
this.store.updateAppProperty(appId, 'requestStatus', REQUEST_LOADING); this.store.updateAppProperty(appId, 'requestStatus', REQUEST_LOADING);
this.store.updateAppProperty(appId, 'requestReason', null); this.store.updateAppProperty(appId, 'requestReason', null);
this.service.installApplication(appId) this.service.installApplication(appId, data.params)
.then(() => { .then(() => {
this.store.updateAppProperty(appId, 'requestStatus', REQUEST_SUCCESS); this.store.updateAppProperty(appId, 'requestStatus', REQUEST_SUCCESS);
}) })
......
...@@ -52,6 +52,11 @@ ...@@ -52,6 +52,11 @@
type: String, type: String,
required: false, required: false,
}, },
installApplicationRequestParams: {
type: Object,
required: false,
default: () => ({}),
},
}, },
computed: { computed: {
rowJsClass() { rowJsClass() {
...@@ -109,7 +114,10 @@ ...@@ -109,7 +114,10 @@
}, },
methods: { methods: {
installClicked() { installClicked() {
eventHub.$emit('installApplication', this.id); eventHub.$emit('installApplication', {
id: this.id,
params: this.installApplicationRequestParams,
});
}, },
}, },
}; };
......
...@@ -121,6 +121,12 @@ export default { ...@@ -121,6 +121,12 @@ export default {
false, false,
); );
}, },
jupyterInstalled() {
return this.applications.jupyter.status === APPLICATION_INSTALLED;
},
jupyterHostname() {
return this.applications.jupyter.hostname;
},
}, },
}; };
</script> </script>
...@@ -278,11 +284,67 @@ export default { ...@@ -278,11 +284,67 @@ export default {
applications to production.`) }} applications to production.`) }}
</div> </div>
</application-row> </application-row>
<application-row
id="jupyter"
:title="applications.jupyter.title"
title-link="https://jupyterhub.readthedocs.io/en/stable/"
:status="applications.jupyter.status"
:status-reason="applications.jupyter.statusReason"
:request-status="applications.jupyter.requestStatus"
:request-reason="applications.jupyter.requestReason"
:install-application-request-params="{ hostname: applications.jupyter.hostname }"
>
<div slot="description">
<p>
{{ s__(`ClusterIntegration|JupyterHub, a multi-user Hub, spawns,
manages, and proxies multiple instances of the single-user
Jupyter notebook server. JupyterHub can be used to serve
notebooks to a class of students, a corporate data science group,
or a scientific research group.`) }}
</p>
<template v-if="ingressExternalIp">
<div class="form-group">
<label for="jupyter-hostname">
{{ s__('ClusterIntegration|Jupyter Hostname') }}
</label>
<div class="input-group">
<input
type="text"
class="form-control js-hostname"
v-model="applications.jupyter.hostname"
:readonly="jupyterInstalled"
/>
<span
class="input-group-btn"
>
<clipboard-button
:text="jupyterHostname"
:title="s__('ClusterIntegration|Copy Jupyter Hostname to clipboard')"
class="js-clipboard-btn"
/>
</span>
</div>
</div>
<p v-if="ingressInstalled">
{{ s__(`ClusterIntegration|Replace this with your own hostname if you want.
If you do so, point hostname to Ingress IP Address from above.`) }}
<a
:href="ingressDnsHelpPath"
target="_blank"
rel="noopener noreferrer"
>
{{ __('More information') }}
</a>
</p>
</template>
</div>
</application-row>
<!-- <!--
NOTE: Don't forget to update `clusters.scss` NOTE: Don't forget to update `clusters.scss`
min-height for this block and uncomment `application_spec` tests min-height for this block and uncomment `application_spec` tests
--> -->
<!-- Add GitLab Runner row, all other plumbing is complete -->
</div> </div>
</div> </div>
</section> </section>
......
...@@ -11,3 +11,4 @@ export const REQUEST_LOADING = 'request-loading'; ...@@ -11,3 +11,4 @@ export const REQUEST_LOADING = 'request-loading';
export const REQUEST_SUCCESS = 'request-success'; export const REQUEST_SUCCESS = 'request-success';
export const REQUEST_FAILURE = 'request-failure'; export const REQUEST_FAILURE = 'request-failure';
export const INGRESS = 'ingress'; export const INGRESS = 'ingress';
export const JUPYTER = 'jupyter';
...@@ -8,6 +8,7 @@ export default class ClusterService { ...@@ -8,6 +8,7 @@ export default class ClusterService {
ingress: this.options.installIngressEndpoint, ingress: this.options.installIngressEndpoint,
runner: this.options.installRunnerEndpoint, runner: this.options.installRunnerEndpoint,
prometheus: this.options.installPrometheusEndpoint, prometheus: this.options.installPrometheusEndpoint,
jupyter: this.options.installJupyterEndpoint,
}; };
} }
...@@ -15,8 +16,8 @@ export default class ClusterService { ...@@ -15,8 +16,8 @@ export default class ClusterService {
return axios.get(this.options.endpoint); return axios.get(this.options.endpoint);
} }
installApplication(appId) { installApplication(appId, params) {
return axios.post(this.appInstallEndpointMap[appId]); return axios.post(this.appInstallEndpointMap[appId], params);
} }
static updateCluster(endpoint, data) { static updateCluster(endpoint, data) {
......
import { s__ } from '../../locale'; import { s__ } from '../../locale';
import { INGRESS } from '../constants'; import { INGRESS, JUPYTER } from '../constants';
export default class ClusterStore { export default class ClusterStore {
constructor() { constructor() {
...@@ -38,6 +38,14 @@ export default class ClusterStore { ...@@ -38,6 +38,14 @@ export default class ClusterStore {
requestStatus: null, requestStatus: null,
requestReason: null, requestReason: null,
}, },
jupyter: {
title: s__('ClusterIntegration|JupyterHub'),
status: null,
statusReason: null,
requestStatus: null,
requestReason: null,
hostname: null,
},
}, },
}; };
} }
...@@ -83,6 +91,12 @@ export default class ClusterStore { ...@@ -83,6 +91,12 @@ export default class ClusterStore {
if (appId === INGRESS) { if (appId === INGRESS) {
this.state.applications.ingress.externalIp = serverAppEntry.external_ip; this.state.applications.ingress.externalIp = serverAppEntry.external_ip;
} else if (appId === JUPYTER) {
this.state.applications.jupyter.hostname =
serverAppEntry.hostname ||
(this.state.applications.ingress.externalIp
? `jupyter.${this.state.applications.ingress.externalIp}.xip.io`
: '');
} }
}); });
} }
......
...@@ -2,7 +2,9 @@ import $ from 'jquery'; ...@@ -2,7 +2,9 @@ import $ from 'jquery';
export default { export default {
bind(el) { bind(el) {
$(el).tooltip(); $(el).tooltip({
trigger: 'hover',
});
}, },
componentUpdated(el) { componentUpdated(el) {
......
...@@ -19,14 +19,23 @@ ...@@ -19,14 +19,23 @@
width: auto; width: auto;
display: inline-block; display: inline-block;
overflow-x: auto; overflow-x: auto;
border-left: 0; border: 0;
border-right: 0; border-color: $md-area-border;
border-bottom: 0;
@supports(width: fit-content) { @supports(width: fit-content) {
display: block; display: block;
width: fit-content; width: fit-content;
} }
tr {
th {
border-bottom: solid 2px $md-area-border;
}
td {
border-color: $md-area-border;
}
}
} }
/* /*
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
.cluster-applications-table { .cluster-applications-table {
// Wait for the Vue to kick-in and render the applications block // Wait for the Vue to kick-in and render the applications block
min-height: 400px; min-height: 628px;
} }
.clusters-dropdown-menu { .clusters-dropdown-menu {
......
...@@ -5,7 +5,17 @@ class Projects::Clusters::ApplicationsController < Projects::ApplicationControll ...@@ -5,7 +5,17 @@ class Projects::Clusters::ApplicationsController < Projects::ApplicationControll
before_action :authorize_create_cluster!, only: [:create] before_action :authorize_create_cluster!, only: [:create]
def create def create
application = @application_class.find_or_create_by!(cluster: @cluster) application = @application_class.find_or_initialize_by(cluster: @cluster)
if application.has_attribute?(:hostname)
application.hostname = params[:hostname]
end
if application.respond_to?(:oauth_application)
application.oauth_application = create_oauth_application(application)
end
application.save!
Clusters::Applications::ScheduleInstallationService.new(project, current_user).execute(application) Clusters::Applications::ScheduleInstallationService.new(project, current_user).execute(application)
...@@ -23,4 +33,15 @@ class Projects::Clusters::ApplicationsController < Projects::ApplicationControll ...@@ -23,4 +33,15 @@ class Projects::Clusters::ApplicationsController < Projects::ApplicationControll
def application_class def application_class
@application_class ||= Clusters::Cluster::APPLICATIONS[params[:application]] || render_404 @application_class ||= Clusters::Cluster::APPLICATIONS[params[:application]] || render_404
end end
def create_oauth_application(application)
oauth_application_params = {
name: params[:application],
redirect_uri: application.callback_url,
scopes: 'api read_user openid',
owner: current_user
}
Applications::CreateService.new(current_user, oauth_application_params).execute
end
end end
module Clusters
module Applications
class Jupyter < ActiveRecord::Base
VERSION = '0.0.1'.freeze
self.table_name = 'clusters_applications_jupyter'
include ::Clusters::Concerns::ApplicationCore
include ::Clusters::Concerns::ApplicationStatus
include ::Clusters::Concerns::ApplicationData
belongs_to :oauth_application, class_name: 'Doorkeeper::Application'
default_value_for :version, VERSION
def set_initial_status
return unless not_installable?
if cluster&.application_ingress_installed? && cluster.application_ingress.external_ip
self.status = 'installable'
end
end
def chart
"#{name}/jupyterhub"
end
def repository
'https://jupyterhub.github.io/helm-chart/'
end
def values
content_values.to_yaml
end
def install_command
Gitlab::Kubernetes::Helm::InstallCommand.new(
name,
chart: chart,
values: values,
repository: repository
)
end
def callback_url
"http://#{hostname}/hub/oauth_callback"
end
private
def specification
{
"ingress" => {
"hosts" => [hostname]
},
"hub" => {
"extraEnv" => {
"GITLAB_HOST" => gitlab_url
},
"cookieSecret" => cookie_secret
},
"proxy" => {
"secretToken" => secret_token
},
"auth" => {
"gitlab" => {
"clientId" => oauth_application.uid,
"clientSecret" => oauth_application.secret,
"callbackUrl" => callback_url
}
}
}
end
def gitlab_url
Gitlab.config.gitlab.url
end
def content_values
YAML.load_file(chart_values_file).deep_merge!(specification)
end
def secret_token
@secret_token ||= SecureRandom.hex(32)
end
def cookie_secret
@cookie_secret ||= SecureRandom.hex(32)
end
end
end
end
...@@ -10,7 +10,8 @@ module Clusters ...@@ -10,7 +10,8 @@ module Clusters
Applications::Helm.application_name => Applications::Helm, Applications::Helm.application_name => Applications::Helm,
Applications::Ingress.application_name => Applications::Ingress, Applications::Ingress.application_name => Applications::Ingress,
Applications::Prometheus.application_name => Applications::Prometheus, Applications::Prometheus.application_name => Applications::Prometheus,
Applications::Runner.application_name => Applications::Runner Applications::Runner.application_name => Applications::Runner,
Applications::Jupyter.application_name => Applications::Jupyter
}.freeze }.freeze
DEFAULT_ENVIRONMENT = '*'.freeze DEFAULT_ENVIRONMENT = '*'.freeze
...@@ -28,6 +29,7 @@ module Clusters ...@@ -28,6 +29,7 @@ module Clusters
has_one :application_ingress, class_name: 'Clusters::Applications::Ingress' has_one :application_ingress, class_name: 'Clusters::Applications::Ingress'
has_one :application_prometheus, class_name: 'Clusters::Applications::Prometheus' has_one :application_prometheus, class_name: 'Clusters::Applications::Prometheus'
has_one :application_runner, class_name: 'Clusters::Applications::Runner' has_one :application_runner, class_name: 'Clusters::Applications::Runner'
has_one :application_jupyter, class_name: 'Clusters::Applications::Jupyter'
accepts_nested_attributes_for :provider_gcp, update_only: true accepts_nested_attributes_for :provider_gcp, update_only: true
accepts_nested_attributes_for :platform_kubernetes, update_only: true accepts_nested_attributes_for :platform_kubernetes, update_only: true
...@@ -41,6 +43,7 @@ module Clusters ...@@ -41,6 +43,7 @@ module Clusters
delegate :active?, to: :platform_kubernetes, prefix: true, allow_nil: true delegate :active?, to: :platform_kubernetes, prefix: true, allow_nil: true
delegate :installed?, to: :application_helm, prefix: true, allow_nil: true delegate :installed?, to: :application_helm, prefix: true, allow_nil: true
delegate :installed?, to: :application_ingress, prefix: true, allow_nil: true
enum platform_type: { enum platform_type: {
kubernetes: 1 kubernetes: 1
...@@ -76,7 +79,8 @@ module Clusters ...@@ -76,7 +79,8 @@ module Clusters
application_helm || build_application_helm, application_helm || build_application_helm,
application_ingress || build_application_ingress, application_ingress || build_application_ingress,
application_prometheus || build_application_prometheus, application_prometheus || build_application_prometheus,
application_runner || build_application_runner application_runner || build_application_runner,
application_jupyter || build_application_jupyter
] ]
end end
......
...@@ -3,4 +3,5 @@ class ClusterApplicationEntity < Grape::Entity ...@@ -3,4 +3,5 @@ class ClusterApplicationEntity < Grape::Entity
expose :status_name, as: :status expose :status_name, as: :status
expose :status_reason expose :status_reason
expose :external_ip, if: -> (e, _) { e.respond_to?(:external_ip) } expose :external_ip, if: -> (e, _) { e.respond_to?(:external_ip) }
expose :hostname, if: -> (e, _) { e.respond_to?(:hostname) }
end end
...@@ -12,8 +12,8 @@ module Clusters ...@@ -12,8 +12,8 @@ module Clusters
ClusterWaitForAppInstallationWorker::INTERVAL, app.name, app.id) ClusterWaitForAppInstallationWorker::INTERVAL, app.name, app.id)
rescue Kubeclient::HttpError => ke rescue Kubeclient::HttpError => ke
app.make_errored!("Kubernetes error: #{ke.message}") app.make_errored!("Kubernetes error: #{ke.message}")
rescue StandardError rescue StandardError => e
app.make_errored!("Can't start installation process") app.make_errored!("Can't start installation process. #{e.message}")
end end
end end
end end
......
...@@ -5,14 +5,16 @@ module Projects ...@@ -5,14 +5,16 @@ module Projects
def execute(noteable) def execute(noteable)
@noteable = noteable @noteable = noteable
project_members = sorted(project.team.members)
participants = noteable_owner + participants_in_noteable + all_members + groups + project_members participants = noteable_owner + participants_in_noteable + all_members + groups + project_members
participants.uniq participants.uniq
end end
def project_members
@project_members ||= sorted(project.team.members)
end
def all_members def all_members
count = project.team.members.flatten.count [{ username: "all", name: "All Project and Group Members", count: project_members.count }]
[{ username: "all", name: "All Project and Group Members", count: count }]
end end
end end
end end
...@@ -11,16 +11,16 @@ ...@@ -11,16 +11,16 @@
In the next steps, you'll be able to map users and select the projects In the next steps, you'll be able to map users and select the projects
you want to import. you want to import.
.form-group.row .form-group.row
= label_tag :uri, 'FogBugz URL', class: 'col-form-label col-sm-8' = label_tag :uri, 'FogBugz URL', class: 'col-form-label col-md-2'
.col-sm-4 .col-md-4
= text_field_tag :uri, nil, placeholder: 'https://mycompany.fogbugz.com', class: 'form-control' = text_field_tag :uri, nil, placeholder: 'https://mycompany.fogbugz.com', class: 'form-control'
.form-group.row .form-group.row
= label_tag :email, 'FogBugz Email', class: 'col-form-label col-sm-8' = label_tag :email, 'FogBugz Email', class: 'col-form-label col-md-2'
.col-sm-4 .col-md-4
= text_field_tag :email, nil, class: 'form-control' = text_field_tag :email, nil, class: 'form-control'
.form-group.row .form-group.row
= label_tag :password, 'FogBugz Password', class: 'col-form-label col-sm-8' = label_tag :password, 'FogBugz Password', class: 'col-form-label col-md-2'
.col-sm-4 .col-md-4
= password_field_tag :password, nil, class: 'form-control' = password_field_tag :password, nil, class: 'form-control'
.form-actions .form-actions
= submit_tag 'Continue to the next step', class: 'btn btn-create' = submit_tag 'Continue to the next step', class: 'btn btn-create'
%p %p
Merge Request #{link_to @merge_request.to_reference, project_merge_request_url(@merge_request.target_project, @merge_request)} can no longer be merged due to the following #{pluralize(@reasons, 'reason')}: Merge Request #{link_to @merge_request.to_reference, project_merge_request_url(@merge_request.target_project, @merge_request)} can no longer be merged due to the following #{'reason'.pluralize(@reasons.count)}:
%ul %ul
- @reasons.each do |reason| - @reasons.each do |reason|
......
Merge Request #{@merge_request.to_reference} can no longer be merged due to the following #{pluralize(@reasons, 'reason')}: Merge Request #{@merge_request.to_reference} can no longer be merged due to the following #{'reason'.pluralize(@reasons.count)}:
- @reasons.each do |reason| - @reasons.each do |reason|
* #{reason} * #{reason}
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
= form_for @user, url: profile_preferences_path, remote: true, method: :put, html: { class: 'row prepend-top-default js-preferences-form' } do |f| = form_for @user, url: profile_preferences_path, remote: true, method: :put, html: { class: 'row prepend-top-default js-preferences-form' } do |f|
.col-lg-4.application-theme .col-lg-4.application-theme
%h4.prepend-top-0 %h4.prepend-top-0
GitLab navigation theme s_('Preferences|Navigation theme')
%p Customize the appearance of the application header and navigation sidebar. %p Customize the appearance of the application header and navigation sidebar.
.col-lg-8.application-theme .col-lg-8.application-theme
- Gitlab::Themes.each do |theme| - Gitlab::Themes.each do |theme|
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
install_ingress_path: install_applications_namespace_project_cluster_path(@cluster.project.namespace, @cluster.project, @cluster, :ingress), install_ingress_path: install_applications_namespace_project_cluster_path(@cluster.project.namespace, @cluster.project, @cluster, :ingress),
install_prometheus_path: install_applications_namespace_project_cluster_path(@cluster.project.namespace, @cluster.project, @cluster, :prometheus), install_prometheus_path: install_applications_namespace_project_cluster_path(@cluster.project.namespace, @cluster.project, @cluster, :prometheus),
install_runner_path: install_applications_namespace_project_cluster_path(@cluster.project.namespace, @cluster.project, @cluster, :runner), install_runner_path: install_applications_namespace_project_cluster_path(@cluster.project.namespace, @cluster.project, @cluster, :runner),
install_jupyter_path: install_applications_namespace_project_cluster_path(@cluster.project.namespace, @cluster.project, @cluster, :jupyter),
toggle_status: @cluster.enabled? ? 'true': 'false', toggle_status: @cluster.enabled? ? 'true': 'false',
cluster_status: @cluster.status_name, cluster_status: @cluster.status_name,
cluster_status_reason: @cluster.status_reason, cluster_status_reason: @cluster.status_reason,
......
---
title: Add missing migration for minimal Project build_timeout
merge_request: 18775
author:
type: fixed
---
title: Adds JupyterHub to cluster applications
merge_request: 19019
author:
type: added
---
title: Fix CarrierWave reads local files into memoery when migrates to ObjectStorage
merge_request: 19102
author:
type: performance
...@@ -447,8 +447,10 @@ repositories_storages = Settings.repositories.storages.values ...@@ -447,8 +447,10 @@ repositories_storages = Settings.repositories.storages.values
repository_downloads_path = Settings.gitlab['repository_downloads_path'].to_s.gsub(%r{/$}, '') repository_downloads_path = Settings.gitlab['repository_downloads_path'].to_s.gsub(%r{/$}, '')
repository_downloads_full_path = File.expand_path(repository_downloads_path, Settings.gitlab['user_home']) repository_downloads_full_path = File.expand_path(repository_downloads_path, Settings.gitlab['user_home'])
if repository_downloads_path.blank? || repositories_storages.any? { |rs| [repository_downloads_path, repository_downloads_full_path].include?(rs.legacy_disk_path.gsub(%r{/$}, '')) } Gitlab::GitalyClient::StorageSettings.allow_disk_access do
if repository_downloads_path.blank? || repositories_storages.any? { |rs| [repository_downloads_path, repository_downloads_full_path].include?(rs.legacy_disk_path.gsub(%r{/$}, '')) }
Settings.gitlab['repository_downloads_path'] = File.join(Settings.shared['path'], 'cache/archive') Settings.gitlab['repository_downloads_path'] = File.join(Settings.shared['path'], 'cache/archive')
end
end end
# #
......
...@@ -38,12 +38,14 @@ def validate_storages_config ...@@ -38,12 +38,14 @@ def validate_storages_config
end end
def validate_storages_paths def validate_storages_paths
Gitlab::GitalyClient::StorageSettings.allow_disk_access do
Gitlab.config.repositories.storages.each do |name, repository_storage| Gitlab.config.repositories.storages.each do |name, repository_storage|
parent_name, _parent_path = find_parent_path(name, repository_storage.legacy_disk_path) parent_name, _parent_path = find_parent_path(name, repository_storage.legacy_disk_path)
if parent_name if parent_name
storage_validation_error("#{name} is a nested path of #{parent_name}. Nested paths are not supported for repository storages") storage_validation_error("#{name} is a nested path of #{parent_name}. Nested paths are not supported for repository storages")
end end
end end
end
end end
validate_storages_config validate_storages_config
......
# This fixes the problem https://gitlab.com/gitlab-org/gitlab-ce/issues/46182 that carrierwave eagerly loads upoloading files into memory
# There is an PR https://github.com/carrierwaveuploader/carrierwave/pull/2314 which has the identical change.
module CarrierWave
module Storage
class Fog < Abstract
class File
module MonkeyPatch
##
# Read content of file from service
#
# === Returns
#
# [String] contents of file
def read
file_body = file.body
return if file_body.nil?
return file_body unless file_body.is_a?(::File)
# Fog::Storage::XXX::File#body could return the source file which was upoloaded to the remote server.
read_source_file(file_body) if ::File.exist?(file_body.path)
# If the source file doesn't exist, the remote content is read
@file = nil # rubocop:disable Gitlab/ModuleWithInstanceVariables
file.body
end
##
# Write file to service
#
# === Returns
#
# [Boolean] true on success or raises error
def store(new_file)
if new_file.is_a?(self.class) # rubocop:disable Cop/LineBreakAroundConditionalBlock
new_file.copy_to(path)
else
fog_file = new_file.to_file
@content_type ||= new_file.content_type # rubocop:disable Gitlab/ModuleWithInstanceVariables
@file = directory.files.create({ # rubocop:disable Gitlab/ModuleWithInstanceVariables
:body => fog_file ? fog_file : new_file.read, # rubocop:disable Style/HashSyntax
:content_type => @content_type, # rubocop:disable Style/HashSyntax,Gitlab/ModuleWithInstanceVariables
:key => path, # rubocop:disable Style/HashSyntax
:public => @uploader.fog_public # rubocop:disable Style/HashSyntax,Gitlab/ModuleWithInstanceVariables
}.merge(@uploader.fog_attributes)) # rubocop:disable Gitlab/ModuleWithInstanceVariables
fog_file.close if fog_file && !fog_file.closed?
end
true
end
private
def read_source_file(file_body)
return unless ::File.exist?(file_body.path)
begin
file_body = ::File.open(file_body.path) if file_body.closed? # Reopen if it's already closed
file_body.read
ensure
file_body.close
end
end
end
prepend MonkeyPatch
end
end
end
end
# See http://doc.gitlab.com/ce/development/migration_style_guide.html
# for more information on how to write migrations for GitLab.
class CreateClustersApplicationsJupyter < ActiveRecord::Migration
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
def change
create_table :clusters_applications_jupyter do |t|
t.references :cluster, null: false, unique: true, foreign_key: { on_delete: :cascade }
t.references :oauth_application, foreign_key: { on_delete: :nullify }
t.integer :status, null: false
t.string :version, null: false
t.string :hostname
t.timestamps_with_timezone null: false
t.text :status_reason
end
end
end
class SetMinimalProjectBuildTimeout < ActiveRecord::Migration
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
MINIMUM_TIMEOUT = 600
# Allow this migration to resume if it fails partway through
disable_ddl_transaction!
def up
update_column_in_batches(:projects, :build_timeout, MINIMUM_TIMEOUT) do |table, query|
query.where(table[:build_timeout].lt(MINIMUM_TIMEOUT))
end
end
def down
# no-op
end
end
...@@ -745,6 +745,17 @@ ActiveRecord::Schema.define(version: 20180529093006) do ...@@ -745,6 +745,17 @@ ActiveRecord::Schema.define(version: 20180529093006) do
t.string "external_ip" t.string "external_ip"
end end
create_table "clusters_applications_jupyter", force: :cascade do |t|
t.integer "cluster_id", null: false
t.integer "oauth_application_id"
t.integer "status", null: false
t.string "version", null: false
t.string "hostname"
t.datetime_with_timezone "created_at", null: false
t.datetime_with_timezone "updated_at", null: false
t.text "status_reason"
end
create_table "clusters_applications_prometheus", force: :cascade do |t| create_table "clusters_applications_prometheus", force: :cascade do |t|
t.integer "cluster_id", null: false t.integer "cluster_id", null: false
t.integer "status", null: false t.integer "status", null: false
...@@ -2807,6 +2818,8 @@ ActiveRecord::Schema.define(version: 20180529093006) do ...@@ -2807,6 +2818,8 @@ ActiveRecord::Schema.define(version: 20180529093006) do
add_foreign_key "clusters", "users", on_delete: :nullify add_foreign_key "clusters", "users", on_delete: :nullify
add_foreign_key "clusters_applications_helm", "clusters", on_delete: :cascade add_foreign_key "clusters_applications_helm", "clusters", on_delete: :cascade
add_foreign_key "clusters_applications_ingress", "clusters", name: "fk_753a7b41c1", on_delete: :cascade add_foreign_key "clusters_applications_ingress", "clusters", name: "fk_753a7b41c1", on_delete: :cascade
add_foreign_key "clusters_applications_jupyter", "clusters", on_delete: :cascade
add_foreign_key "clusters_applications_jupyter", "oauth_applications", on_delete: :nullify
add_foreign_key "clusters_applications_prometheus", "clusters", name: "fk_557e773639", on_delete: :cascade add_foreign_key "clusters_applications_prometheus", "clusters", name: "fk_557e773639", on_delete: :cascade
add_foreign_key "clusters_applications_runners", "ci_runners", column: "runner_id", name: "fk_02de2ded36", on_delete: :nullify add_foreign_key "clusters_applications_runners", "ci_runners", column: "runner_id", name: "fk_02de2ded36", on_delete: :nullify
add_foreign_key "clusters_applications_runners", "clusters", on_delete: :cascade add_foreign_key "clusters_applications_runners", "clusters", on_delete: :cascade
......
...@@ -7,14 +7,22 @@ ...@@ -7,14 +7,22 @@
GitLab integrates with LDAP to support user authentication. GitLab integrates with LDAP to support user authentication.
This integration works with most LDAP-compliant directory This integration works with most LDAP-compliant directory
servers, including Microsoft Active Directory, Apple Open Directory, Open LDAP, servers, including Microsoft Active Directory, Apple Open Directory, Open LDAP,
<<<<<<< HEAD
and 389 Server. GitLab Enterprise Edition includes enhanced integration, and 389 Server. GitLab Enterprise Edition includes enhanced integration,
=======
and 389 Server. GitLab Enterprise Editions include enhanced integration,
>>>>>>> upstream/master
including group membership syncing as well as multiple LDAP servers support. including group membership syncing as well as multiple LDAP servers support.
## GitLab EE ## GitLab EE
The information on this page is relevant for both GitLab CE and EE. For more The information on this page is relevant for both GitLab CE and EE. For more
details about EE-specific LDAP features, see the details about EE-specific LDAP features, see the
<<<<<<< HEAD
[LDAP Enterprise Edition documentation](ldap-ee.md). [LDAP Enterprise Edition documentation](ldap-ee.md).
=======
[LDAP Enterprise Edition documentation](https://docs.gitlab.com/ee/administration/auth/ldap-ee.html).
>>>>>>> upstream/master
## Security ## Security
...@@ -39,7 +47,11 @@ immediately block all access. ...@@ -39,7 +47,11 @@ immediately block all access.
NOTE: **Note**: NOTE: **Note**:
GitLab Enterprise Edition Starter supports a GitLab Enterprise Edition Starter supports a
<<<<<<< HEAD
[configurable sync time](ldap-ee.md#adjusting-ldap-user-and-group-sync-schedules), [configurable sync time](ldap-ee.md#adjusting-ldap-user-and-group-sync-schedules),
=======
[configurable sync time](https://docs.gitlab.com/ee/administration/auth/ldap-ee.html#adjusting-ldap-user-and-group-sync-schedules),
>>>>>>> upstream/master
with a default of one hour. with a default of one hour.
## Git password authentication ## Git password authentication
......
...@@ -156,6 +156,7 @@ added directly to your configured cluster. Those applications are needed for ...@@ -156,6 +156,7 @@ added directly to your configured cluster. Those applications are needed for
| [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) | 10.2+ | Ingress can provide load balancing, SSL termination, and name-based virtual hosting. It acts as a web proxy for your applications and is useful if you want to use [Auto DevOps] or deploy your own web apps. | | [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) | 10.2+ | Ingress can provide load balancing, SSL termination, and name-based virtual hosting. It acts as a web proxy for your applications and is useful if you want to use [Auto DevOps] or deploy your own web apps. |
| [Prometheus](https://prometheus.io/docs/introduction/overview/) | 10.4+ | Prometheus is an open-source monitoring and alerting system useful to supervise your deployed applications | | [Prometheus](https://prometheus.io/docs/introduction/overview/) | 10.4+ | Prometheus is an open-source monitoring and alerting system useful to supervise your deployed applications |
| [GitLab Runner](https://docs.gitlab.com/runner/) | 10.6+ | GitLab Runner is the open source project that is used to run your jobs and send the results back to GitLab. It is used in conjunction with [GitLab CI/CD](https://about.gitlab.com/features/gitlab-ci-cd/), the open-source continuous integration service included with GitLab that coordinates the jobs. When installing the GitLab Runner via the applications, it will run in **privileged mode** by default. Make sure you read the [security implications](#security-implications) before doing so. | | [GitLab Runner](https://docs.gitlab.com/runner/) | 10.6+ | GitLab Runner is the open source project that is used to run your jobs and send the results back to GitLab. It is used in conjunction with [GitLab CI/CD](https://about.gitlab.com/features/gitlab-ci-cd/), the open-source continuous integration service included with GitLab that coordinates the jobs. When installing the GitLab Runner via the applications, it will run in **privileged mode** by default. Make sure you read the [security implications](#security-implications) before doing so. |
| [JupyterHub](http://jupyter.org/) | 11.0+ | The Jupyter Notebook is an open-source web application that allows you to create and share documents that contain live code, equations, visualizations and narrative text. |
## Getting the external IP address ## Getting the external IP address
......
...@@ -14,8 +14,11 @@ module Gitlab ...@@ -14,8 +14,11 @@ module Gitlab
DEFAULT_SCOPES = [:api].freeze DEFAULT_SCOPES = [:api].freeze
class << self class << self
<<<<<<< HEAD
prepend EE::Gitlab::Auth prepend EE::Gitlab::Auth
=======
>>>>>>> upstream/master
def omniauth_customized_providers def omniauth_customized_providers
@omniauth_customized_providers ||= %w[bitbucket jwt] @omniauth_customized_providers ||= %w[bitbucket jwt]
end end
......
...@@ -7,8 +7,10 @@ module Gitlab ...@@ -7,8 +7,10 @@ module Gitlab
end end
def value def value
Gitlab::GitalyClient::StorageSettings.allow_disk_access do
@value ||= count_commits @value ||= count_commits
end end
end
private private
......
...@@ -1189,6 +1189,7 @@ module Gitlab ...@@ -1189,6 +1189,7 @@ module Gitlab
end end
def compare_source_branch(target_branch_name, source_repository, source_branch_name, straight:) def compare_source_branch(target_branch_name, source_repository, source_branch_name, straight:)
Gitlab::GitalyClient::StorageSettings.allow_disk_access do
with_repo_branch_commit(source_repository, source_branch_name) do |commit| with_repo_branch_commit(source_repository, source_branch_name) do |commit|
break unless commit break unless commit
...@@ -1200,6 +1201,7 @@ module Gitlab ...@@ -1200,6 +1201,7 @@ module Gitlab
) )
end end
end end
end
def write_ref(ref_path, ref, old_ref: nil, shell: true) def write_ref(ref_path, ref, old_ref: nil, shell: true)
ref_path = "#{Gitlab::Git::BRANCH_REF_PREFIX}#{ref_path}" unless ref_path.start_with?("refs/") || ref_path == "HEAD" ref_path = "#{Gitlab::Git::BRANCH_REF_PREFIX}#{ref_path}" unless ref_path.start_with?("refs/") || ref_path == "HEAD"
...@@ -1459,7 +1461,7 @@ module Gitlab ...@@ -1459,7 +1461,7 @@ module Gitlab
gitaly_repository_client.cleanup if is_enabled && exists? gitaly_repository_client.cleanup if is_enabled && exists?
end end
rescue Gitlab::Git::CommandError => e # Don't fail if we can't cleanup rescue Gitlab::Git::CommandError => e # Don't fail if we can't cleanup
Rails.logger.error("Unable to clean repository on storage #{storage} with path #{path}: #{e.message}") Rails.logger.error("Unable to clean repository on storage #{storage} with relative path #{relative_path}: #{e.message}")
Gitlab::Metrics.counter( Gitlab::Metrics.counter(
:failed_repository_cleanup_total, :failed_repository_cleanup_total,
'Number of failed repository cleanup events' 'Number of failed repository cleanup events'
......
...@@ -35,7 +35,7 @@ module Gitlab ...@@ -35,7 +35,7 @@ module Gitlab
def initialize(storage, logger = Rails.logger) def initialize(storage, logger = Rails.logger)
@storage = storage @storage = storage
config = Gitlab.config.repositories.storages[@storage] config = Gitlab.config.repositories.storages[@storage]
@storage_path = config.legacy_disk_path @storage_path = Gitlab::GitalyClient::StorageSettings.allow_disk_access { config.legacy_disk_path }
@logger = logger @logger = logger
@hostname = Gitlab::Environment.hostname @hostname = Gitlab::Environment.hostname
......
...@@ -22,7 +22,7 @@ module Gitlab ...@@ -22,7 +22,7 @@ module Gitlab
def self.build(storage, hostname = Gitlab::Environment.hostname) def self.build(storage, hostname = Gitlab::Environment.hostname)
config = Gitlab.config.repositories.storages[storage] config = Gitlab.config.repositories.storages[storage]
Gitlab::GitalyClient::StorageSettings.allow_disk_access do
if !config.present? if !config.present?
NullCircuitBreaker.new(storage, hostname, error: Misconfiguration.new("Storage '#{storage}' is not configured")) NullCircuitBreaker.new(storage, hostname, error: Misconfiguration.new("Storage '#{storage}' is not configured"))
elsif !config.legacy_disk_path.present? elsif !config.legacy_disk_path.present?
...@@ -31,6 +31,7 @@ module Gitlab ...@@ -31,6 +31,7 @@ module Gitlab
new(storage, hostname) new(storage, hostname)
end end
end end
end
def initialize(storage, hostname) def initialize(storage, hostname)
@storage = storage @storage = storage
......
...@@ -33,6 +33,11 @@ module Gitlab ...@@ -33,6 +33,11 @@ module Gitlab
MAXIMUM_GITALY_CALLS = 35 MAXIMUM_GITALY_CALLS = 35
CLIENT_NAME = (Sidekiq.server? ? 'gitlab-sidekiq' : 'gitlab-web').freeze CLIENT_NAME = (Sidekiq.server? ? 'gitlab-sidekiq' : 'gitlab-web').freeze
# We have a mechanism to let GitLab automatically opt in to all Gitaly
# features. We want to be able to exclude some features from automatic
# opt-in. That is what EXPLICIT_OPT_IN_REQUIRED is for.
EXPLICIT_OPT_IN_REQUIRED = [Gitlab::GitalyClient::StorageSettings::DISK_ACCESS_DENIED_FLAG].freeze
MUTEX = Mutex.new MUTEX = Mutex.new
class << self class << self
...@@ -234,7 +239,7 @@ module Gitlab ...@@ -234,7 +239,7 @@ module Gitlab
when MigrationStatus::OPT_OUT when MigrationStatus::OPT_OUT
true true
when MigrationStatus::OPT_IN when MigrationStatus::OPT_IN
opt_into_all_features? opt_into_all_features? && !EXPLICIT_OPT_IN_REQUIRED.include?(feature_name)
else else
false false
end end
......
...@@ -4,6 +4,8 @@ module Gitlab ...@@ -4,6 +4,8 @@ module Gitlab
# where production code (app, config, db, lib) touches Git repositories # where production code (app, config, db, lib) touches Git repositories
# directly. # directly.
class StorageSettings class StorageSettings
extend Gitlab::TemporarilyAllow
DirectPathAccessError = Class.new(StandardError) DirectPathAccessError = Class.new(StandardError)
InvalidConfigurationError = Class.new(StandardError) InvalidConfigurationError = Class.new(StandardError)
...@@ -17,7 +19,21 @@ module Gitlab ...@@ -17,7 +19,21 @@ module Gitlab
# This class will give easily recognizable NoMethodErrors # This class will give easily recognizable NoMethodErrors
Deprecated = Class.new Deprecated = Class.new
attr_reader :legacy_disk_path MUTEX = Mutex.new
DISK_ACCESS_DENIED_FLAG = :deny_disk_access
ALLOW_KEY = :allow_disk_access
# If your code needs this method then your code needs to be fixed.
def self.allow_disk_access
temporarily_allow(ALLOW_KEY) { yield }
end
def self.disk_access_denied?
!temporarily_allowed?(ALLOW_KEY) && GitalyClient.feature_enabled?(DISK_ACCESS_DENIED_FLAG)
rescue
false # Err on the side of caution, don't break gitlab for people
end
def initialize(storage) def initialize(storage)
raise InvalidConfigurationError, "expected a Hash, got a #{storage.class.name}" unless storage.is_a?(Hash) raise InvalidConfigurationError, "expected a Hash, got a #{storage.class.name}" unless storage.is_a?(Hash)
...@@ -34,6 +50,14 @@ module Gitlab ...@@ -34,6 +50,14 @@ module Gitlab
@hash.fetch(:gitaly_address) @hash.fetch(:gitaly_address)
end end
def legacy_disk_path
if self.class.disk_access_denied?
raise DirectPathAccessError, "git disk access denied via the gitaly_#{DISK_ACCESS_DENIED_FLAG} feature"
end
@legacy_disk_path
end
private private
def method_missing(m, *args, &block) def method_missing(m, *args, &block)
......
...@@ -77,8 +77,10 @@ module Gitlab ...@@ -77,8 +77,10 @@ module Gitlab
end end
def storage_path(storage_name) def storage_path(storage_name)
Gitlab::GitalyClient::StorageSettings.allow_disk_access do
storages_paths[storage_name]&.legacy_disk_path storages_paths[storage_name]&.legacy_disk_path
end end
end
# All below test methods use shell commands to perform actions on storage volumes. # All below test methods use shell commands to perform actions on storage volumes.
# In case a storage volume have connectivity problems causing pure Ruby IO operation to wait indefinitely, # In case a storage volume have connectivity problems causing pure Ruby IO operation to wait indefinitely,
......
module Gitlab
module TemporarilyAllow
TEMPORARILY_ALLOW_MUTEX = Mutex.new
def temporarily_allow(key)
temporarily_allow_add(key, 1)
yield
ensure
temporarily_allow_add(key, -1)
end
def temporarily_allowed?(key)
if RequestStore.active?
temporarily_allow_request_store[key] > 0
else
TEMPORARILY_ALLOW_MUTEX.synchronize do
temporarily_allow_ivar[key] > 0
end
end
end
private
def temporarily_allow_ivar
@temporarily_allow ||= Hash.new(0)
end
def temporarily_allow_request_store
RequestStore[:temporarily_allow] ||= Hash.new(0)
end
def temporarily_allow_add(key, value)
if RequestStore.active?
temporarily_allow_request_store[key] += value
else
TEMPORARILY_ALLOW_MUTEX.synchronize do
temporarily_allow_ivar[key] += value
end
end
end
end
end
...@@ -35,5 +35,8 @@ FactoryBot.define do ...@@ -35,5 +35,8 @@ FactoryBot.define do
factory :clusters_applications_ingress, class: Clusters::Applications::Ingress factory :clusters_applications_ingress, class: Clusters::Applications::Ingress
factory :clusters_applications_prometheus, class: Clusters::Applications::Prometheus factory :clusters_applications_prometheus, class: Clusters::Applications::Prometheus
factory :clusters_applications_runner, class: Clusters::Applications::Runner factory :clusters_applications_runner, class: Clusters::Applications::Runner
factory :clusters_applications_jupyter, class: Clusters::Applications::Jupyter do
oauth_application factory: :oauth_application
end
end end
end end
...@@ -31,7 +31,8 @@ ...@@ -31,7 +31,8 @@
} }
}, },
"status_reason": { "type": ["string", "null"] }, "status_reason": { "type": ["string", "null"] },
"external_ip": { "type": ["string", "null"] } "external_ip": { "type": ["string", "null"] },
"hostname": { "type": ["string", "null"] }
}, },
"required" : [ "name", "status" ] "required" : [ "name", "status" ]
} }
......
...@@ -207,11 +207,11 @@ describe('Clusters', () => { ...@@ -207,11 +207,11 @@ describe('Clusters', () => {
spyOn(cluster.service, 'installApplication').and.returnValue(Promise.resolve()); spyOn(cluster.service, 'installApplication').and.returnValue(Promise.resolve());
expect(cluster.store.state.applications.helm.requestStatus).toEqual(null); expect(cluster.store.state.applications.helm.requestStatus).toEqual(null);
cluster.installApplication('helm'); cluster.installApplication({ id: 'helm' });
expect(cluster.store.state.applications.helm.requestStatus).toEqual(REQUEST_LOADING); expect(cluster.store.state.applications.helm.requestStatus).toEqual(REQUEST_LOADING);
expect(cluster.store.state.applications.helm.requestReason).toEqual(null); expect(cluster.store.state.applications.helm.requestReason).toEqual(null);
expect(cluster.service.installApplication).toHaveBeenCalledWith('helm'); expect(cluster.service.installApplication).toHaveBeenCalledWith('helm', undefined);
getSetTimeoutPromise() getSetTimeoutPromise()
.then(() => { .then(() => {
...@@ -226,11 +226,11 @@ describe('Clusters', () => { ...@@ -226,11 +226,11 @@ describe('Clusters', () => {
spyOn(cluster.service, 'installApplication').and.returnValue(Promise.resolve()); spyOn(cluster.service, 'installApplication').and.returnValue(Promise.resolve());
expect(cluster.store.state.applications.ingress.requestStatus).toEqual(null); expect(cluster.store.state.applications.ingress.requestStatus).toEqual(null);
cluster.installApplication('ingress'); cluster.installApplication({ id: 'ingress' });
expect(cluster.store.state.applications.ingress.requestStatus).toEqual(REQUEST_LOADING); expect(cluster.store.state.applications.ingress.requestStatus).toEqual(REQUEST_LOADING);
expect(cluster.store.state.applications.ingress.requestReason).toEqual(null); expect(cluster.store.state.applications.ingress.requestReason).toEqual(null);
expect(cluster.service.installApplication).toHaveBeenCalledWith('ingress'); expect(cluster.service.installApplication).toHaveBeenCalledWith('ingress', undefined);
getSetTimeoutPromise() getSetTimeoutPromise()
.then(() => { .then(() => {
...@@ -245,11 +245,11 @@ describe('Clusters', () => { ...@@ -245,11 +245,11 @@ describe('Clusters', () => {
spyOn(cluster.service, 'installApplication').and.returnValue(Promise.resolve()); spyOn(cluster.service, 'installApplication').and.returnValue(Promise.resolve());
expect(cluster.store.state.applications.runner.requestStatus).toEqual(null); expect(cluster.store.state.applications.runner.requestStatus).toEqual(null);
cluster.installApplication('runner'); cluster.installApplication({ id: 'runner' });
expect(cluster.store.state.applications.runner.requestStatus).toEqual(REQUEST_LOADING); expect(cluster.store.state.applications.runner.requestStatus).toEqual(REQUEST_LOADING);
expect(cluster.store.state.applications.runner.requestReason).toEqual(null); expect(cluster.store.state.applications.runner.requestReason).toEqual(null);
expect(cluster.service.installApplication).toHaveBeenCalledWith('runner'); expect(cluster.service.installApplication).toHaveBeenCalledWith('runner', undefined);
getSetTimeoutPromise() getSetTimeoutPromise()
.then(() => { .then(() => {
...@@ -260,11 +260,29 @@ describe('Clusters', () => { ...@@ -260,11 +260,29 @@ describe('Clusters', () => {
.catch(done.fail); .catch(done.fail);
}); });
it('tries to install jupyter', (done) => {
spyOn(cluster.service, 'installApplication').and.returnValue(Promise.resolve());
expect(cluster.store.state.applications.jupyter.requestStatus).toEqual(null);
cluster.installApplication({ id: 'jupyter', params: { hostname: cluster.store.state.applications.jupyter.hostname } });
expect(cluster.store.state.applications.jupyter.requestStatus).toEqual(REQUEST_LOADING);
expect(cluster.store.state.applications.jupyter.requestReason).toEqual(null);
expect(cluster.service.installApplication).toHaveBeenCalledWith('jupyter', { hostname: cluster.store.state.applications.jupyter.hostname });
getSetTimeoutPromise()
.then(() => {
expect(cluster.store.state.applications.jupyter.requestStatus).toEqual(REQUEST_SUCCESS);
expect(cluster.store.state.applications.jupyter.requestReason).toEqual(null);
})
.then(done)
.catch(done.fail);
});
it('sets error request status when the request fails', (done) => { it('sets error request status when the request fails', (done) => {
spyOn(cluster.service, 'installApplication').and.returnValue(Promise.reject(new Error('STUBBED ERROR'))); spyOn(cluster.service, 'installApplication').and.returnValue(Promise.reject(new Error('STUBBED ERROR')));
expect(cluster.store.state.applications.helm.requestStatus).toEqual(null); expect(cluster.store.state.applications.helm.requestStatus).toEqual(null);
cluster.installApplication('helm'); cluster.installApplication({ id: 'helm' });
expect(cluster.store.state.applications.helm.requestStatus).toEqual(REQUEST_LOADING); expect(cluster.store.state.applications.helm.requestStatus).toEqual(REQUEST_LOADING);
expect(cluster.store.state.applications.helm.requestReason).toEqual(null); expect(cluster.store.state.applications.helm.requestReason).toEqual(null);
......
...@@ -174,7 +174,27 @@ describe('Application Row', () => { ...@@ -174,7 +174,27 @@ describe('Application Row', () => {
installButton.click(); installButton.click();
expect(eventHub.$emit).toHaveBeenCalledWith('installApplication', DEFAULT_APPLICATION_STATE.id); expect(eventHub.$emit).toHaveBeenCalledWith('installApplication', {
id: DEFAULT_APPLICATION_STATE.id,
params: {},
});
});
it('clicking install button when installApplicationRequestParams are provided emits event', () => {
spyOn(eventHub, '$emit');
vm = mountComponent(ApplicationRow, {
...DEFAULT_APPLICATION_STATE,
status: APPLICATION_INSTALLABLE,
installApplicationRequestParams: { hostname: 'jupyter' },
});
const installButton = vm.$el.querySelector('.js-cluster-application-install-button');
installButton.click();
expect(eventHub.$emit).toHaveBeenCalledWith('installApplication', {
id: DEFAULT_APPLICATION_STATE.id,
params: { hostname: 'jupyter' },
});
}); });
it('clicking disabled install button emits nothing', () => { it('clicking disabled install button emits nothing', () => {
......
...@@ -22,6 +22,7 @@ describe('Applications', () => { ...@@ -22,6 +22,7 @@ describe('Applications', () => {
ingress: { title: 'Ingress' }, ingress: { title: 'Ingress' },
runner: { title: 'GitLab Runner' }, runner: { title: 'GitLab Runner' },
prometheus: { title: 'Prometheus' }, prometheus: { title: 'Prometheus' },
jupyter: { title: 'JupyterHub' },
}, },
}); });
}); });
...@@ -41,6 +42,10 @@ describe('Applications', () => { ...@@ -41,6 +42,10 @@ describe('Applications', () => {
it('renders a row for GitLab Runner', () => { it('renders a row for GitLab Runner', () => {
expect(vm.$el.querySelector('.js-cluster-application-row-runner')).toBeDefined(); expect(vm.$el.querySelector('.js-cluster-application-row-runner')).toBeDefined();
}); });
it('renders a row for Jupyter', () => {
expect(vm.$el.querySelector('.js-cluster-application-row-jupyter')).not.toBe(null);
});
}); });
describe('Ingress application', () => { describe('Ingress application', () => {
...@@ -57,12 +62,11 @@ describe('Applications', () => { ...@@ -57,12 +62,11 @@ describe('Applications', () => {
helm: { title: 'Helm Tiller' }, helm: { title: 'Helm Tiller' },
runner: { title: 'GitLab Runner' }, runner: { title: 'GitLab Runner' },
prometheus: { title: 'Prometheus' }, prometheus: { title: 'Prometheus' },
jupyter: { title: 'JupyterHub', hostname: '' },
}, },
}); });
expect( expect(vm.$el.querySelector('.js-ip-address').value).toEqual('0.0.0.0');
vm.$el.querySelector('.js-ip-address').value,
).toEqual('0.0.0.0');
expect( expect(
vm.$el.querySelector('.js-clipboard-btn').getAttribute('data-clipboard-text'), vm.$el.querySelector('.js-clipboard-btn').getAttribute('data-clipboard-text'),
...@@ -81,12 +85,11 @@ describe('Applications', () => { ...@@ -81,12 +85,11 @@ describe('Applications', () => {
helm: { title: 'Helm Tiller' }, helm: { title: 'Helm Tiller' },
runner: { title: 'GitLab Runner' }, runner: { title: 'GitLab Runner' },
prometheus: { title: 'Prometheus' }, prometheus: { title: 'Prometheus' },
jupyter: { title: 'JupyterHub', hostname: '' },
}, },
}); });
expect( expect(vm.$el.querySelector('.js-ip-address').value).toEqual('?');
vm.$el.querySelector('.js-ip-address').value,
).toEqual('?');
expect(vm.$el.querySelector('.js-no-ip-message')).not.toBe(null); expect(vm.$el.querySelector('.js-no-ip-message')).not.toBe(null);
}); });
...@@ -101,6 +104,7 @@ describe('Applications', () => { ...@@ -101,6 +104,7 @@ describe('Applications', () => {
ingress: { title: 'Ingress' }, ingress: { title: 'Ingress' },
runner: { title: 'GitLab Runner' }, runner: { title: 'GitLab Runner' },
prometheus: { title: 'Prometheus' }, prometheus: { title: 'Prometheus' },
jupyter: { title: 'JupyterHub', hostname: '' },
}, },
}); });
...@@ -108,5 +112,83 @@ describe('Applications', () => { ...@@ -108,5 +112,83 @@ describe('Applications', () => {
expect(vm.$el.querySelector('.js-ip-address')).toBe(null); expect(vm.$el.querySelector('.js-ip-address')).toBe(null);
}); });
}); });
describe('Jupyter application', () => {
describe('with ingress installed with ip & jupyter installable', () => {
it('renders hostname active input', () => {
vm = mountComponent(Applications, {
applications: {
helm: { title: 'Helm Tiller', status: 'installed' },
ingress: { title: 'Ingress', status: 'installed', externalIp: '1.1.1.1' },
runner: { title: 'GitLab Runner' },
prometheus: { title: 'Prometheus' },
jupyter: { title: 'JupyterHub', hostname: '', status: 'installable' },
},
});
expect(vm.$el.querySelector('.js-hostname').getAttribute('readonly')).toEqual(null);
});
});
describe('with ingress installed without external ip', () => {
it('does not render hostname input', () => {
vm = mountComponent(Applications, {
applications: {
helm: { title: 'Helm Tiller', status: 'installed' },
ingress: { title: 'Ingress', status: 'installed' },
runner: { title: 'GitLab Runner' },
prometheus: { title: 'Prometheus' },
jupyter: { title: 'JupyterHub', hostname: '', status: 'installable' },
},
});
expect(vm.$el.querySelector('.js-hostname')).toBe(null);
});
});
describe('with ingress & jupyter installed', () => {
it('renders readonly input', () => {
vm = mountComponent(Applications, {
applications: {
helm: { title: 'Helm Tiller', status: 'installed' },
ingress: { title: 'Ingress', status: 'installed', externalIp: '1.1.1.1' },
runner: { title: 'GitLab Runner' },
prometheus: { title: 'Prometheus' },
jupyter: { title: 'JupyterHub', status: 'installed', hostname: '' },
},
});
expect(vm.$el.querySelector('.js-hostname').getAttribute('readonly')).toEqual('readonly');
});
});
describe('without ingress installed', () => {
beforeEach(() => {
vm = mountComponent(Applications, {
applications: {
helm: { title: 'Helm Tiller' },
ingress: { title: 'Ingress' },
runner: { title: 'GitLab Runner' },
prometheus: { title: 'Prometheus' },
jupyter: { title: 'JupyterHub', status: 'not_installable' },
},
});
});
it('does not render input', () => {
expect(vm.$el.querySelector('.js-hostname')).toBe(null);
});
it('renders disabled install button', () => {
expect(
vm.$el
.querySelector(
'.js-cluster-application-row-jupyter .js-cluster-application-install-button',
)
.getAttribute('disabled'),
).toEqual('disabled');
});
});
});
}); });
}); });
import { import {
APPLICATION_INSTALLED,
APPLICATION_INSTALLABLE, APPLICATION_INSTALLABLE,
APPLICATION_INSTALLING, APPLICATION_INSTALLING,
APPLICATION_ERROR, APPLICATION_ERROR,
...@@ -28,6 +29,39 @@ const CLUSTERS_MOCK_DATA = { ...@@ -28,6 +29,39 @@ const CLUSTERS_MOCK_DATA = {
name: 'prometheus', name: 'prometheus',
status: APPLICATION_ERROR, status: APPLICATION_ERROR,
status_reason: 'Cannot connect', status_reason: 'Cannot connect',
}, {
name: 'jupyter',
status: APPLICATION_INSTALLING,
status_reason: 'Cannot connect',
}],
},
},
'/gitlab-org/gitlab-shell/clusters/2/status.json': {
data: {
status: 'errored',
status_reason: 'Failed to request to CloudPlatform.',
applications: [{
name: 'helm',
status: APPLICATION_INSTALLED,
status_reason: null,
}, {
name: 'ingress',
status: APPLICATION_INSTALLED,
status_reason: 'Cannot connect',
external_ip: '1.1.1.1',
}, {
name: 'runner',
status: APPLICATION_INSTALLING,
status_reason: null,
},
{
name: 'prometheus',
status: APPLICATION_ERROR,
status_reason: 'Cannot connect',
}, {
name: 'jupyter',
status: APPLICATION_INSTALLABLE,
status_reason: 'Cannot connect',
}], }],
}, },
}, },
...@@ -37,6 +71,7 @@ const CLUSTERS_MOCK_DATA = { ...@@ -37,6 +71,7 @@ const CLUSTERS_MOCK_DATA = {
'/gitlab-org/gitlab-shell/clusters/1/applications/ingress': { }, '/gitlab-org/gitlab-shell/clusters/1/applications/ingress': { },
'/gitlab-org/gitlab-shell/clusters/1/applications/runner': { }, '/gitlab-org/gitlab-shell/clusters/1/applications/runner': { },
'/gitlab-org/gitlab-shell/clusters/1/applications/prometheus': { }, '/gitlab-org/gitlab-shell/clusters/1/applications/prometheus': { },
'/gitlab-org/gitlab-shell/clusters/1/applications/jupyter': { },
}, },
}; };
......
...@@ -91,8 +91,26 @@ describe('Clusters Store', () => { ...@@ -91,8 +91,26 @@ describe('Clusters Store', () => {
requestStatus: null, requestStatus: null,
requestReason: null, requestReason: null,
}, },
jupyter: {
title: 'JupyterHub',
status: mockResponseData.applications[4].status,
statusReason: mockResponseData.applications[4].status_reason,
requestStatus: null,
requestReason: null,
hostname: '',
}, },
},
});
}); });
it('sets default hostname for jupyter when ingress has a ip address', () => {
const mockResponseData = CLUSTERS_MOCK_DATA.GET['/gitlab-org/gitlab-shell/clusters/2/status.json'].data;
store.updateStateFromServer(mockResponseData);
expect(
store.state.applications.jupyter.hostname,
).toEqual(`jupyter.${store.state.applications.ingress.externalIp}.xip.io`);
}); });
}); });
}); });
...@@ -299,7 +299,11 @@ describe Gitlab::BackgroundMigration::DeserializeMergeRequestDiffsAndCommits, :m ...@@ -299,7 +299,11 @@ describe Gitlab::BackgroundMigration::DeserializeMergeRequestDiffsAndCommits, :m
let(:commits) { merge_request_diff.commits.map(&:to_hash) } let(:commits) { merge_request_diff.commits.map(&:to_hash) }
let(:first_commit) { project.repository.commit(merge_request_diff.head_commit_sha) } let(:first_commit) { project.repository.commit(merge_request_diff.head_commit_sha) }
let(:expected_commits) { commits } let(:expected_commits) { commits }
let(:diffs) { first_commit.rugged_diff_from_parent.patches } let(:diffs) do
Gitlab::GitalyClient::StorageSettings.allow_disk_access do
first_commit.rugged_diff_from_parent.patches
end
end
let(:expected_diffs) { [] } let(:expected_diffs) { [] }
include_examples 'updated MR diff' include_examples 'updated MR diff'
...@@ -309,7 +313,11 @@ describe Gitlab::BackgroundMigration::DeserializeMergeRequestDiffsAndCommits, :m ...@@ -309,7 +313,11 @@ describe Gitlab::BackgroundMigration::DeserializeMergeRequestDiffsAndCommits, :m
let(:commits) { merge_request_diff.commits.map(&:to_hash) } let(:commits) { merge_request_diff.commits.map(&:to_hash) }
let(:first_commit) { project.repository.commit(merge_request_diff.head_commit_sha) } let(:first_commit) { project.repository.commit(merge_request_diff.head_commit_sha) }
let(:expected_commits) { commits } let(:expected_commits) { commits }
let(:diffs) { first_commit.rugged_diff_from_parent.deltas } let(:diffs) do
Gitlab::GitalyClient::StorageSettings.allow_disk_access do
first_commit.rugged_diff_from_parent.deltas
end
end
let(:expected_diffs) { [] } let(:expected_diffs) { [] }
include_examples 'updated MR diff' include_examples 'updated MR diff'
......
...@@ -6,7 +6,9 @@ describe Gitlab::Checks::LfsIntegrity do ...@@ -6,7 +6,9 @@ describe Gitlab::Checks::LfsIntegrity do
let(:project) { create(:project, :repository) } let(:project) { create(:project, :repository) }
let(:repository) { project.repository } let(:repository) { project.repository }
let(:newrev) do let(:newrev) do
operations = BareRepoOperations.new(repository.path) operations = Gitlab::GitalyClient::StorageSettings.allow_disk_access do
BareRepoOperations.new(repository.path)
end
# Create a commit not pointed at by any ref to emulate being in the # Create a commit not pointed at by any ref to emulate being in the
# pre-receive hook so that `--not --all` returns some objects # pre-receive hook so that `--not --all` returns some objects
......
...@@ -3,7 +3,7 @@ require 'spec_helper' ...@@ -3,7 +3,7 @@ require 'spec_helper'
describe Gitlab::Conflict::File do describe Gitlab::Conflict::File do
let(:project) { create(:project, :repository) } let(:project) { create(:project, :repository) }
let(:repository) { project.repository } let(:repository) { project.repository }
let(:rugged) { repository.rugged } let(:rugged) { Gitlab::GitalyClient::StorageSettings.allow_disk_access { repository.rugged } }
let(:their_commit) { rugged.branches['conflict-start'].target } let(:their_commit) { rugged.branches['conflict-start'].target }
let(:our_commit) { rugged.branches['conflict-resolvable'].target } let(:our_commit) { rugged.branches['conflict-resolvable'].target }
let(:merge_request) { create(:merge_request, source_branch: 'conflict-resolvable', target_branch: 'conflict-start', source_project: project) } let(:merge_request) { create(:merge_request, source_branch: 'conflict-resolvable', target_branch: 'conflict-start', source_project: project) }
......
This diff is collapsed.
...@@ -433,7 +433,7 @@ describe Notify do ...@@ -433,7 +433,7 @@ describe Notify do
aggregate_failures do aggregate_failures do
is_expected.to have_referable_subject(merge_request, reply: true) is_expected.to have_referable_subject(merge_request, reply: true)
is_expected.to have_body_text(project_merge_request_path(project, merge_request)) is_expected.to have_body_text(project_merge_request_path(project, merge_request))
is_expected.to have_body_text('reasons:') is_expected.to have_body_text('following reasons:')
reasons.each do |reason| reasons.each do |reason|
is_expected.to have_body_text(reason) is_expected.to have_body_text(reason)
end end
......
require 'rails_helper'
describe Clusters::Applications::Jupyter do
include_examples 'cluster application core specs', :clusters_applications_jupyter
it { is_expected.to belong_to(:oauth_application) }
describe '#set_initial_status' do
before do
jupyter.set_initial_status
end
context 'when ingress is not installed' do
let(:cluster) { create(:cluster, :provided_by_gcp) }
let(:jupyter) { create(:clusters_applications_jupyter, cluster: cluster) }
it { expect(jupyter).to be_not_installable }
end
context 'when ingress is installed and external_ip is assigned' do
let(:ingress) { create(:clusters_applications_ingress, :installed, external_ip: '127.0.0.1') }
let(:jupyter) { create(:clusters_applications_jupyter, cluster: ingress.cluster) }
it { expect(jupyter).to be_installable }
end
end
describe '#install_command' do
let!(:ingress) { create(:clusters_applications_ingress, :installed, external_ip: '127.0.0.1') }
let!(:jupyter) { create(:clusters_applications_jupyter, cluster: ingress.cluster) }
subject { jupyter.install_command }
it { is_expected.to be_an_instance_of(Gitlab::Kubernetes::Helm::InstallCommand) }
it 'should be initialized with 4 arguments' do
expect(subject.name).to eq('jupyter')
expect(subject.chart).to eq('jupyter/jupyterhub')
expect(subject.repository).to eq('https://jupyterhub.github.io/helm-chart/')
expect(subject.values).to eq(jupyter.values)
end
end
describe '#values' do
let(:jupyter) { create(:clusters_applications_jupyter) }
subject { jupyter.values }
it 'should include valid values' do
is_expected.to include('ingress')
is_expected.to include('hub')
is_expected.to include('rbac')
is_expected.to include('proxy')
is_expected.to include('auth')
is_expected.to include("clientId: #{jupyter.oauth_application.uid}")
is_expected.to include("callbackUrl: #{jupyter.callback_url}")
end
end
end
...@@ -234,9 +234,10 @@ describe Clusters::Cluster do ...@@ -234,9 +234,10 @@ describe Clusters::Cluster do
let!(:ingress) { create(:clusters_applications_ingress, cluster: cluster) } let!(:ingress) { create(:clusters_applications_ingress, cluster: cluster) }
let!(:prometheus) { create(:clusters_applications_prometheus, cluster: cluster) } let!(:prometheus) { create(:clusters_applications_prometheus, cluster: cluster) }
let!(:runner) { create(:clusters_applications_runner, cluster: cluster) } let!(:runner) { create(:clusters_applications_runner, cluster: cluster) }
let!(:jupyter) { create(:clusters_applications_jupyter, cluster: cluster) }
it 'returns a list of created applications' do it 'returns a list of created applications' do
is_expected.to contain_exactly(helm, ingress, prometheus, runner) is_expected.to contain_exactly(helm, ingress, prometheus, runner, jupyter)
end end
end end
end end
......
...@@ -7,7 +7,10 @@ RSpec.configure do |config| ...@@ -7,7 +7,10 @@ RSpec.configure do |config|
next if example.metadata[:skip_gitaly_mock] next if example.metadata[:skip_gitaly_mock]
# Use 'and_wrap_original' to make sure the arguments are valid # Use 'and_wrap_original' to make sure the arguments are valid
allow(Gitlab::GitalyClient).to receive(:feature_enabled?).and_wrap_original { |m, *args| m.call(*args) || true } allow(Gitlab::GitalyClient).to receive(:feature_enabled?).and_wrap_original do |m, *args|
m.call(*args)
!Gitlab::GitalyClient::EXPLICIT_OPT_IN_REQUIRED.include?(args.first)
end
end end
end end
end end
rbac:
enabled: false
hub:
extraEnv:
JUPYTER_ENABLE_LAB: 1
extraConfig: |
c.KubeSpawner.cmd = ['jupyter-labhub']
auth:
type: gitlab
singleuser:
defaultUrl: "/lab"
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
...@@ -8933,9 +8933,9 @@ worker-farm@^1.5.2: ...@@ -8933,9 +8933,9 @@ worker-farm@^1.5.2:
errno "^0.1.4" errno "^0.1.4"
xtend "^4.0.1" xtend "^4.0.1"
worker-loader@^1.1.1: worker-loader@^2.0.0:
version "1.1.1" version "2.0.0"
resolved "https://registry.yarnpkg.com/worker-loader/-/worker-loader-1.1.1.tgz#920d74ddac6816fc635392653ed8b4af1929fd92" resolved "https://registry.yarnpkg.com/worker-loader/-/worker-loader-2.0.0.tgz#45fda3ef76aca815771a89107399ee4119b430ac"
dependencies: dependencies:
loader-utils "^1.0.0" loader-utils "^1.0.0"
schema-utils "^0.4.0" schema-utils "^0.4.0"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment