Commit 39dd1c66 authored by Eric Eastwood's avatar Eric Eastwood

Merge branch '38464-k8s-apps' into add-ingress-to-cluster-applications

parents 6d2a564d 14fcd394
......@@ -413,8 +413,9 @@ export default class Notes {
return;
}
this.note_ids.push(noteEntity.id);
form = $form || $(`.js-discussion-note-form[data-discussion-id="${noteEntity.discussion_id}"]`);
row = form.closest('tr');
row = (form.length || !noteEntity.discussion_line_code) ? form.closest('tr') : $(`#${noteEntity.discussion_line_code}`);
if (noteEntity.on_image) {
row = form;
......
......@@ -109,6 +109,8 @@ module NotesActions
diff_discussion_html: diff_discussion_html(discussion),
discussion_html: discussion_html(discussion)
)
attrs[:discussion_line_code] = discussion.line_code if discussion.diff_discussion?
end
end
else
......
......@@ -5,14 +5,12 @@ class Projects::Clusters::ApplicationsController < Projects::ApplicationControll
before_action :authorize_create_cluster!, only: [:create]
def create
scheduled = Clusters::Applications::ScheduleInstallationService.new(project, current_user,
application_class: @application_class,
cluster: @cluster).execute
if scheduled
head :no_content
else
head :bad_request
end
Clusters::Applications::ScheduleInstallationService.new(project, current_user,
application_class: @application_class,
cluster: @cluster).execute
head :no_content
rescue StandardError
head :bad_request
end
private
......
......@@ -18,7 +18,7 @@ module Clusters
end
def set_initial_status
self.status = 0 unless cluster.platform_kubernetes_active?
self.status = 0 unless cluster&.platform_kubernetes_active?
end
def name
......
......@@ -25,7 +25,7 @@ module Clusters
end
event :make_scheduled do
transition any - [:scheduled] => :scheduled
transition %i(installable errored) => :scheduled
end
before_transition any => [:scheduled] do |app_status, _|
......
......@@ -55,7 +55,7 @@ module Clusters
before_transition any => [:creating] do |provider, transition|
operation_id = transition.args.first
raise 'operation_id is required' unless operation_id
raise ArgumentError.new('operation_id is required') unless operation_id.present?
provider.operation_id = operation_id
end
......
module RepositoryMirroring
IMPORT_HEAD_REFS = '+refs/heads/*:refs/heads/*'.freeze
IMPORT_TAG_REFS = '+refs/tags/*:refs/tags/*'.freeze
def set_remote_as_mirror(name)
# This is used to define repository as equivalent as "git clone --mirror"
raw_repository.rugged.config["remote.#{name}.fetch"] = 'refs/*:refs/*'
raw_repository.rugged.config["remote.#{name}.mirror"] = true
raw_repository.rugged.config["remote.#{name}.prune"] = true
end
def set_import_remote_as_mirror(remote_name)
# Add first fetch with Rugged so it does not create its own.
raw_repository.rugged.config["remote.#{remote_name}.fetch"] = IMPORT_HEAD_REFS
add_remote_fetch_config(remote_name, IMPORT_TAG_REFS)
raw_repository.rugged.config["remote.#{remote_name}.mirror"] = true
raw_repository.rugged.config["remote.#{remote_name}.prune"] = true
end
def add_remote_fetch_config(remote_name, refspec)
run_git(%W[config --add remote.#{remote_name}.fetch #{refspec}])
end
def fetch_mirror(remote, url)
add_remote(remote, url)
set_remote_as_mirror(remote)
fetch_remote(remote, forced: true)
remove_remote(remote)
end
end
......@@ -30,7 +30,6 @@ class Environment < ActiveRecord::Base
message: Gitlab::Regex.environment_slug_regex_message }
validates :external_url,
uniqueness: { scope: :project_id },
length: { maximum: 255 },
allow_nil: true,
addressable_url: true
......
......@@ -1687,6 +1687,10 @@ class Project < ActiveRecord::Base
Gitlab::GlRepository.gl_repository(self, is_wiki)
end
def reference_counter(wiki: false)
Gitlab::ReferenceCounter.new(gl_repository(is_wiki: wiki))
end
private
def storage
......@@ -1705,11 +1709,11 @@ class Project < ActiveRecord::Base
end
def repo_reference_count
Gitlab::ReferenceCounter.new(gl_repository(is_wiki: false)).value
reference_counter.value
end
def wiki_reference_count
Gitlab::ReferenceCounter.new(gl_repository(is_wiki: true)).value
reference_counter(wiki: true).value
end
def check_repository_absence!
......
......@@ -135,7 +135,7 @@ class ProjectWiki
end
def repository
@repository ||= Repository.new(full_path, @project, disk_path: disk_path)
@repository ||= Repository.new(full_path, @project, disk_path: disk_path, is_wiki: true)
end
def default_branch
......
......@@ -15,9 +15,8 @@ class Repository
].freeze
include Gitlab::ShellAdapter
include RepositoryMirroring
attr_accessor :full_path, :disk_path, :project
attr_accessor :full_path, :disk_path, :project, :is_wiki
delegate :ref_name_for_sha, to: :raw_repository
......@@ -72,11 +71,12 @@ class Repository
end
end
def initialize(full_path, project, disk_path: nil)
def initialize(full_path, project, disk_path: nil, is_wiki: false)
@full_path = full_path
@disk_path = disk_path || full_path
@project = project
@commit_cache = {}
@is_wiki = is_wiki
end
def ==(other)
......@@ -965,21 +965,8 @@ class Repository
run_git(args).first.lines.map(&:strip)
end
def add_remote(name, url)
raw_repository.remote_add(name, url)
rescue Rugged::ConfigError
raw_repository.remote_update(name, url: url)
end
def remove_remote(name)
raw_repository.remote_delete(name)
true
rescue Rugged::ConfigError
false
end
def fetch_remote(remote, forced: false, no_tags: false)
gitlab_shell.fetch_remote(raw_repository, remote, forced: forced, no_tags: no_tags)
def fetch_remote(remote, forced: false, ssh_auth: nil, no_tags: false)
gitlab_shell.fetch_remote(raw_repository, remote, ssh_auth: ssh_auth, forced: forced, no_tags: no_tags)
end
def fetch_source_branch(source_repository, source_branch, local_ref)
......@@ -1141,7 +1128,7 @@ class Repository
end
def initialize_raw_repository
Gitlab::Git::Repository.new(project.repository_storage, disk_path + '.git', Gitlab::GlRepository.gl_repository(project, false))
Gitlab::Git::Repository.new(project.repository_storage, disk_path + '.git', Gitlab::GlRepository.gl_repository(project, is_wiki))
end
def find_commits_by_message_by_shelling_out(query, ref, path, limit, offset)
......
class ClusterAppEntity < Grape::Entity
class ClusterApplicationEntity < Grape::Entity
expose :name
expose :status_name, as: :status
expose :status_reason
......
......@@ -3,5 +3,5 @@ class ClusterEntity < Grape::Entity
expose :status_name, as: :status
expose :status_reason
expose :applications, using: ClusterAppEntity
expose :applications, using: ClusterApplicationEntity
end
......@@ -6,7 +6,7 @@ module Clusters
case installation_phase
when Gitlab::Kubernetes::Pod::SUCCEEDED
on_succeeded
finalize_installation
when Gitlab::Kubernetes::Pod::FAILED
on_failed
else
......@@ -18,22 +18,15 @@ module Clusters
private
def on_succeeded
if app.make_installed
finalize_installation
else
app.make_errored!("Failed to update app record; #{app.errors}")
end
end
def on_failed
app.make_errored!(log || 'Installation silently failed')
app.make_errored!(installation_errors || 'Installation silently failed')
finalize_installation
end
def check_timeout
if Time.now.utc - app.updated_at.to_time.utc > ClusterWaitForAppInstallationWorker::TIMEOUT
app.make_errored!('App installation timeouted')
app.make_errored!('Installation timeouted')
finalize_installation
else
ClusterWaitForAppInstallationWorker.perform_in(
ClusterWaitForAppInstallationWorker::INTERVAL, app.name, app.id)
......
......@@ -4,13 +4,7 @@ module Clusters
def execute
helm_api.delete_installation_pod!(app)
app.make_errored!('Installation aborted') if aborted?
end
private
def aborted?
app.installing? || app.scheduled?
app.make_installed! if app.installing?
end
end
end
......
......@@ -5,14 +5,11 @@ module Clusters
return unless app.scheduled?
begin
app.make_installing!
helm_api.install(app)
if app.make_installing
ClusterWaitForAppInstallationWorker.perform_in(
ClusterWaitForAppInstallationWorker::INTERVAL, app.name, app.id)
else
app.make_errored!("Failed to update app record; #{app.errors}")
end
ClusterWaitForAppInstallationWorker.perform_in(
ClusterWaitForAppInstallationWorker::INTERVAL, app.name, app.id)
rescue KubeException => ke
app.make_errored!("Kubernetes error: #{ke.message}")
rescue StandardError
......
......@@ -2,15 +2,10 @@ module Clusters
module Applications
class ScheduleInstallationService < ::BaseService
def execute
application = application_class.find_or_create_by!(cluster: cluster)
application.make_scheduled!
ClusterInstallAppWorker.perform_async(application.name, application.id)
true
rescue ActiveRecord::RecordInvalid
false
rescue StateMachines::InvalidTransition
false
application_class.find_or_create_by!(cluster: cluster).try do |application|
application.make_scheduled!
ClusterInstallAppWorker.perform_async(application.name, application.id)
end
end
private
......
......@@ -44,7 +44,7 @@ module Projects
else
clone_repository
end
rescue Gitlab::Shell::Error => e
rescue Gitlab::Shell::Error, Gitlab::Git::RepositoryMirroring::RemoteError => e
# Expire cache to prevent scenarios such as:
# 1. First import failed, but the repo was imported successfully, so +exists?+ returns true
# 2. Retried import, repo is broken or not imported but +exists?+ still returns true
......
---
title: Add new diff discussions on MR diffs tab in "realtime"
merge_request: 14981
author:
type: fixed
......@@ -472,8 +472,6 @@ ActiveRecord::Schema.define(version: 20171106101200) do
t.string "encrypted_password_iv"
t.text "encrypted_token"
t.string "encrypted_token_iv"
t.datetime_with_timezone "created_at", null: false
t.datetime_with_timezone "updated_at", null: false
end
add_index "cluster_platforms_kubernetes", ["cluster_id"], name: "index_cluster_platforms_kubernetes_on_cluster_id", unique: true, using: :btree
......@@ -494,14 +492,11 @@ ActiveRecord::Schema.define(version: 20171106101200) do
t.text "status_reason"
t.string "gcp_project_id", null: false
t.string "zone", null: false
t.integer "num_nodes", null: false
t.string "machine_type"
t.string "operation_id"
t.string "endpoint"
t.text "encrypted_access_token"
t.string "encrypted_access_token_iv"
t.datetime_with_timezone "created_at", null: false
t.datetime_with_timezone "updated_at", null: false
end
add_index "cluster_providers_gcp", ["cluster_id"], name: "index_cluster_providers_gcp_on_cluster_id", unique: true, using: :btree
......@@ -510,12 +505,11 @@ ActiveRecord::Schema.define(version: 20171106101200) do
t.integer "user_id"
t.boolean "enabled", default: true
t.string "name", null: false
t.integer "provider_type"
t.integer "platform_type"
t.datetime_with_timezone "created_at", null: false
t.datetime_with_timezone "updated_at", null: false
end
add_index "clusters", ["enabled"], name: "index_clusters_on_enabled", using: :btree
add_index "clusters", ["user_id"], name: "index_clusters_on_user_id", using: :btree
create_table "clusters_applications_helm", force: :cascade do |t|
t.integer "cluster_id", null: false
t.datetime_with_timezone "created_at", null: false
......
......@@ -60,7 +60,9 @@ module Github
project.repository.set_import_remote_as_mirror('github')
project.repository.add_remote_fetch_config('github', '+refs/pull/*/head:refs/merge-requests/*/head')
fetch_remote(forced: true)
rescue Gitlab::Git::Repository::NoRepository, Gitlab::Shell::Error => e
rescue Gitlab::Git::Repository::NoRepository,
Gitlab::Git::RepositoryMirroring::RemoteError,
Gitlab::Shell::Error => e
error(:project, repo_url, e.message)
raise Github::RepositoryFetchError
end
......
......@@ -6,6 +6,7 @@ require "rubygems/package"
module Gitlab
module Git
class Repository
include Gitlab::Git::RepositoryMirroring
include Gitlab::Git::Popen
ALLOWED_OBJECT_DIRECTORIES_VARIABLES = %w[
......@@ -898,16 +899,25 @@ module Gitlab
end
end
# Delete the specified remote from this repository.
def remote_delete(remote_name)
rugged.remotes.delete(remote_name)
nil
def add_remote(remote_name, url)
rugged.remotes.create(remote_name, url)
rescue Rugged::ConfigError
remote_update(remote_name, url: url)
end
# Add a new remote to this repository.
def remote_add(remote_name, url)
rugged.remotes.create(remote_name, url)
nil
def remove_remote(remote_name)
# When a remote is deleted all its remote refs are deleted too, but in
# the case of mirrors we map its refs (that would usualy go under
# [remote_name]/) to the top level namespace. We clean the mapping so
# those don't get deleted.
if rugged.config["remote.#{remote_name}.mirror"]
rugged.config.delete("remote.#{remote_name}.fetch")
end
rugged.remotes.delete(remote_name)
true
rescue Rugged::ConfigError
false
end
# Update the specified remote using the values in the +options+ hash
......
module Gitlab
module Git
module RepositoryMirroring
IMPORT_HEAD_REFS = '+refs/heads/*:refs/heads/*'.freeze
IMPORT_TAG_REFS = '+refs/tags/*:refs/tags/*'.freeze
MIRROR_REMOTE = 'mirror'.freeze
RemoteError = Class.new(StandardError)
def set_remote_as_mirror(remote_name)
# This is used to define repository as equivalent as "git clone --mirror"
rugged.config["remote.#{remote_name}.fetch"] = 'refs/*:refs/*'
rugged.config["remote.#{remote_name}.mirror"] = true
rugged.config["remote.#{remote_name}.prune"] = true
end
def set_import_remote_as_mirror(remote_name)
# Add first fetch with Rugged so it does not create its own.
rugged.config["remote.#{remote_name}.fetch"] = IMPORT_HEAD_REFS
add_remote_fetch_config(remote_name, IMPORT_TAG_REFS)
rugged.config["remote.#{remote_name}.mirror"] = true
rugged.config["remote.#{remote_name}.prune"] = true
end
def add_remote_fetch_config(remote_name, refspec)
run_git(%W[config --add remote.#{remote_name}.fetch #{refspec}])
end
def fetch_mirror(url)
add_remote(MIRROR_REMOTE, url)
set_remote_as_mirror(MIRROR_REMOTE)
fetch(MIRROR_REMOTE)
remove_remote(MIRROR_REMOTE)
end
def remote_tags(remote)
# Each line has this format: "dc872e9fa6963f8f03da6c8f6f264d0845d6b092\trefs/tags/v1.10.0\n"
# We want to convert it to: [{ 'v1.10.0' => 'dc872e9fa6963f8f03da6c8f6f264d0845d6b092' }, ...]
list_remote_tags(remote).map do |line|
target, path = line.strip.split("\t")
# When the remote repo does not have tags.
if target.nil? || path.nil?
Rails.logger.info "Empty or invalid list of tags for remote: #{remote}. Output: #{output}"
return []
end
name = path.split('/', 3).last
# We're only interested in tag references
# See: http://stackoverflow.com/questions/15472107/when-listing-git-ls-remote-why-theres-after-the-tag-name
next if name =~ /\^\{\}\Z/
target_commit = Gitlab::Git::Commit.find(self, target)
Gitlab::Git::Tag.new(self, name, target, target_commit)
end.compact
end
def remote_branches(remote_name)
branches = []
rugged.references.each("refs/remotes/#{remote_name}/*").map do |ref|
name = ref.name.sub(/\Arefs\/remotes\/#{remote_name}\//, '')
begin
target_commit = Gitlab::Git::Commit.find(self, ref.target)
branches << Gitlab::Git::Branch.new(self, name, ref.target, target_commit)
rescue Rugged::ReferenceError
# Omit invalid branch
end
end
branches
end
private
def list_remote_tags(remote)
tag_list, exit_code, error = nil
cmd = %W(#{Gitlab.config.git.bin_path} --git-dir=#{full_path} ls-remote --tags #{remote})
Open3.popen3(*cmd) do |stdin, stdout, stderr, wait_thr|
tag_list = stdout.read
error = stderr.read
exit_code = wait_thr.value.exitstatus
end
raise RemoteError, error unless exit_code.zero?
tag_list.split('\n')
end
end
end
end
......@@ -10,6 +10,8 @@ module Gitlab
end
PageBlob = Struct.new(:name)
attr_reader :repository
def self.default_ref
'master'
end
......
......@@ -54,7 +54,8 @@ project_tree:
- :auto_devops
- :triggers
- :pipeline_schedules
- :cluster
- clusters:
- :application_helm
- :services
- :hooks
- protected_branches:
......
......@@ -8,8 +8,8 @@ module Gitlab
triggers: 'Ci::Trigger',
pipeline_schedules: 'Ci::PipelineSchedule',
builds: 'Ci::Build',
cluster: 'Clusters::Cluster',
clusters: 'Clusters::Cluster',
application_helm: 'Clusters::Applications::Helm',
hooks: 'ProjectHook',
merge_access_levels: 'ProtectedBranch::MergeAccessLevel',
push_access_levels: 'ProtectedBranch::PushAccessLevel',
......
......@@ -49,6 +49,19 @@ describe Projects::Clusters::ApplicationsController do
expect(response).to have_http_status(:not_found)
end
end
context 'when application is already installing' do
before do
other = current_application.new(cluster: cluster)
other.make_installing!
end
it 'returns 400' do
go
expect(response).to have_http_status(:bad_request)
end
end
end
describe 'security' do
......
......@@ -200,51 +200,6 @@ describe Projects::ClustersController do
expect(response).to redirect_to(project_cluster_path(project, project.cluster))
end
end
# TODO: Activate in 10.3
# context 'when adds a cluster manually' do
# let(:params) do
# {
# cluster: {
# name: 'new-cluster',
# platform_type: :kubernetes,
# provider_type: :user,
# platform_kubernetes_attributes: {
# namespace: 'custom-namespace',
# api_url: 'https://111.111.111.111',
# token: 'token'
# }
# }
# }
# end
# it 'creates a new cluster' do
# expect(ClusterProvisionWorker).to receive(:perform_async)
# expect { go }.to change { Clusters::Cluster.count }
# expect(response).to redirect_to(project_cluster_path(project, project.cluster))
# end
# end
# TODO: We should fix this in 10.2
# Maybe
# - validates :provider_gcp, presence: true, if: :gcp?
# - validates :provider_type, presence: true
# are required in Clusters::Cluster
# context 'when not all required parameters are set' do
# let(:params) do
# {
# cluster: {
# name: 'new-cluster'
# }
# }
# end
# it 'shows an error message' do
# expect { go }.not_to change { Clusters::Cluster.count }
# expect(assigns(:cluster).errors).not_to be_empty
# expect(response).to render_template(:new)
# end
# end
end
context 'when access token is expired' do
......@@ -397,42 +352,6 @@ describe Projects::ClustersController do
end
end
end
# TODO: Activate in 10.3
# context 'when update namespace' do
# let(:namespace) { 'namespace-123' }
# let(:params) do
# {
# cluster: {
# platform_kubernetes_attributes: {
# namespace: namespace
# }
# }
# }
# end
# it "updates and redirects back to show page" do
# go
# cluster.reload
# expect(response).to redirect_to(project_cluster_path(project, project.cluster))
# expect(flash[:notice]).to eq('Cluster was successfully updated.')
# expect(cluster.platform.namespace).to eq(namespace)
# end
# context 'when namespace is invalid' do
# let(:namespace) { 'my Namespace 321321321 #' }
# it "rejects changes" do
# go
# expect(response).to have_gitlab_http_status(:ok)
# expect(response).to render_template(:show)
# expect(cluster.platform.namespace).not_to eq(namespace)
# end
# end
# end
end
describe 'security' do
......
......@@ -59,6 +59,7 @@ describe Projects::NotesController do
expect(note_json[:id]).to eq(note.id)
expect(note_json[:discussion_html]).not_to be_nil
expect(note_json[:diff_discussion_html]).to be_nil
expect(note_json[:discussion_line_code]).to be_nil
end
end
......@@ -74,6 +75,7 @@ describe Projects::NotesController do
expect(note_json[:id]).to eq(note.id)
expect(note_json[:discussion_html]).not_to be_nil
expect(note_json[:diff_discussion_html]).not_to be_nil
expect(note_json[:discussion_line_code]).not_to be_nil
end
end
......@@ -92,6 +94,7 @@ describe Projects::NotesController do
expect(note_json[:id]).to eq(note.id)
expect(note_json[:discussion_html]).not_to be_nil
expect(note_json[:diff_discussion_html]).to be_nil
expect(note_json[:discussion_line_code]).to be_nil
end
end
......@@ -104,6 +107,7 @@ describe Projects::NotesController do
expect(note_json[:id]).to eq(note.id)
expect(note_json[:discussion_html]).to be_nil
expect(note_json[:diff_discussion_html]).to be_nil
expect(note_json[:discussion_line_code]).to be_nil
end
context 'when user cannot read commit' do
......@@ -133,6 +137,7 @@ describe Projects::NotesController do
expect(note_json[:html]).not_to be_nil
expect(note_json[:discussion_html]).to be_nil
expect(note_json[:diff_discussion_html]).to be_nil
expect(note_json[:discussion_line_code]).to be_nil
end
end
......
......@@ -31,5 +31,10 @@ FactoryGirl.define do
status(-1)
status_reason 'something went wrong'
end
trait :timeouted do
installing
updated_at ClusterWaitForAppInstallationWorker::TIMEOUT.ago
end
end
end
......@@ -14,7 +14,7 @@ FactoryGirl.define do
platform_type :kubernetes
platform_kubernetes do
create(:platform_kubernetes, :configured)
create(:cluster_platform_kubernetes, :configured)
end
end
......@@ -23,8 +23,8 @@ FactoryGirl.define do
platform_type :kubernetes
before(:create) do |cluster, evaluator|
cluster.platform_kubernetes = build(:platform_kubernetes, :configured)
cluster.provider_gcp = build(:provider_gcp, :created)
cluster.platform_kubernetes = build(:cluster_platform_kubernetes, :configured)
cluster.provider_gcp = build(:cluster_provider_gcp, :created)
end
end
......@@ -32,7 +32,7 @@ FactoryGirl.define do
provider_type :gcp
provider_gcp do
create(:provider_gcp, :creating)
create(:cluster_provider_gcp, :creating)
end
end
end
......
FactoryGirl.define do
factory :platform_kubernetes, class: Clusters::Platforms::Kubernetes do
factory :cluster_platform_kubernetes, class: Clusters::Platforms::Kubernetes do
cluster
namespace nil
api_url 'https://kubernetes.example.com'
......
FactoryGirl.define do
factory :provider_gcp, class: Clusters::Providers::Gcp do
factory :cluster_provider_gcp, class: Clusters::Providers::Gcp do
cluster
gcp_project_id 'test-gcp-project'
......
{
"type": "object",
"required" : [
"status"
"status",
"applications"
],
"properties" : {
"status": { "type": "string" },
"status_reason": { "type": ["string", "null"] },
"applications": { "$ref": "#/definitions/applications" }
"applications": {
"type": "array",
"items": { "$ref": "#/definitions/application_status" }
}
},
"additionalProperties": false,
"definitions": {
"applications": {
"type": "object",
"additionalProperties": false,
"properties" : {
"helm": { "$ref": "#/definitions/app_status" },
"runner": { "$ref": "#/definitions/app_status" },
"ingress": { "$ref": "#/definitions/app_status" },
"prometheus": { "$ref": "#/definitions/app_status" }
}
},
"app_status": {
"application_status": {
"type": "object",
"additionalProperties": false,
"properties" : {
"name": { "type": "string" },
"status": {
"type": {
"enum": [
"installable",
"scheduled",
"installing",
"installed",
"error"
"errored"
]
}
},
"status_reason": { "type": ["string", "null"] }
},
"required" : [ "status" ]
"required" : [ "name", "status" ]
}
}
}
......@@ -343,6 +343,7 @@ import '~/notes';
diff_discussion_html: false,
};
$form = jasmine.createSpyObj('$form', ['closest', 'find']);
$form.length = 1;
row = jasmine.createSpyObj('row', ['prevAll', 'first', 'find']);
notes = jasmine.createSpyObj('notes', [
......@@ -371,13 +372,29 @@ import '~/notes';
$form.closest.and.returnValues(row, $form);
$form.find.and.returnValues(discussionContainer);
body.attr.and.returnValue('');
Notes.prototype.renderDiscussionNote.call(notes, note, $form);
});
it('should call Notes.animateAppendNote', () => {
Notes.prototype.renderDiscussionNote.call(notes, note, $form);
expect(Notes.animateAppendNote).toHaveBeenCalledWith(note.discussion_html, $('.main-notes-list'));
});
it('should append to row selected with line_code', () => {
$form.length = 0;
note.discussion_line_code = 'line_code';
note.diff_discussion_html = '<tr></tr>';
const line = document.createElement('div');
line.id = note.discussion_line_code;
document.body.appendChild(line);
$form.closest.and.returnValues($form);
Notes.prototype.renderDiscussionNote.call(notes, note, $form);
expect(line.nextSibling.outerHTML).toEqual(note.diff_discussion_html);
});
});
describe('Discussion sub note', () => {
......
......@@ -77,8 +77,20 @@ describe Gitlab::Ci::CronParser do
it_behaves_like "returns time in the future"
it 'converts time in server time zone' do
expect(subject.hour).to eq(hour_in_utc)
context 'when PST (Pacific Standard Time)' do
it 'converts time in server time zone' do
Timecop.freeze(Time.utc(2017, 1, 1)) do
expect(subject.hour).to eq(hour_in_utc)
end
end
end
context 'when PDT (Pacific Daylight Time)' do
it 'converts time in server time zone' do
Timecop.freeze(Time.utc(2017, 6, 1)) do
expect(subject.hour).to eq(hour_in_utc)
end
end
end
end
end
......@@ -100,8 +112,20 @@ describe Gitlab::Ci::CronParser do
it_behaves_like "returns time in the future"
it 'converts time in server time zone' do
expect(subject.hour).to eq(hour_in_utc)
context 'when CET (Central European Time)' do
it 'converts time in server time zone' do
Timecop.freeze(Time.utc(2017, 1, 1)) do
expect(subject.hour).to eq(hour_in_utc)
end
end
end
context 'when CEST (Central European Summer Time)' do
it 'converts time in server time zone' do
Timecop.freeze(Time.utc(2017, 6, 1)) do
expect(subject.hour).to eq(hour_in_utc)
end
end
end
end
......@@ -111,8 +135,20 @@ describe Gitlab::Ci::CronParser do
it_behaves_like "returns time in the future"
it 'converts time in server time zone' do
expect(subject.hour).to eq(hour_in_utc)
context 'when EST (Eastern Standard Time)' do
it 'converts time in server time zone' do
Timecop.freeze(Time.utc(2017, 1, 1)) do
expect(subject.hour).to eq(hour_in_utc)
end
end
end
context 'when EDT (Eastern Daylight Time)' do
it 'converts time in server time zone' do
Timecop.freeze(Time.utc(2017, 6, 1)) do
expect(subject.hour).to eq(hour_in_utc)
end
end
end
end
end
......
......@@ -559,10 +559,10 @@ describe Gitlab::Git::Repository, seed_helper: true do
end
end
describe "#remote_delete" do
describe "#remove_remote" do
before(:all) do
@repo = Gitlab::Git::Repository.new('default', TEST_MUTABLE_REPO_PATH, '')
@repo.remote_delete("expendable")
@repo.remove_remote("expendable")
end
it "should remove the remote" do
......@@ -575,14 +575,16 @@ describe Gitlab::Git::Repository, seed_helper: true do
end
end
describe "#remote_add" do
describe "#remote_update" do
before(:all) do
@repo = Gitlab::Git::Repository.new('default', TEST_MUTABLE_REPO_PATH, '')
@repo.remote_add("new_remote", SeedHelper::GITLAB_GIT_TEST_REPO_URL)
@repo.remote_update("expendable", url: TEST_NORMAL_REPO_PATH)
end
it "should add the remote" do
expect(@repo.rugged.remotes.each_name.to_a).to include("new_remote")
expect(@repo.rugged.remotes["expendable"].url).to(
eq(TEST_NORMAL_REPO_PATH)
)
end
after(:all) do
......@@ -591,21 +593,58 @@ describe Gitlab::Git::Repository, seed_helper: true do
end
end
describe "#remote_update" do
before(:all) do
@repo = Gitlab::Git::Repository.new('default', TEST_MUTABLE_REPO_PATH, '')
@repo.remote_update("expendable", url: TEST_NORMAL_REPO_PATH)
describe '#fetch_mirror' do
let(:new_repository) do
Gitlab::Git::Repository.new('default', 'my_project.git', '')
end
it "should add the remote" do
expect(@repo.rugged.remotes["expendable"].url).to(
eq(TEST_NORMAL_REPO_PATH)
)
subject { new_repository.fetch_mirror(repository.path) }
before do
Gitlab::Shell.new.add_repository('default', 'my_project')
end
after(:all) do
FileUtils.rm_rf(TEST_MUTABLE_REPO_PATH)
ensure_seeds
after do
Gitlab::Shell.new.remove_repository(TestEnv.repos_path, 'my_project')
end
it 'fetches a url as a mirror remote' do
subject
expect(refs(new_repository.path)).to eq(refs(repository.path))
end
context 'with keep-around refs' do
let(:sha) { SeedRepo::Commit::ID }
let(:keep_around_ref) { "refs/keep-around/#{sha}" }
let(:tmp_ref) { "refs/tmp/#{SecureRandom.hex}" }
before do
repository.rugged.references.create(keep_around_ref, sha, force: true)
repository.rugged.references.create(tmp_ref, sha, force: true)
end
it 'includes the temporary and keep-around refs' do
subject
expect(refs(new_repository.path)).to include(keep_around_ref)
expect(refs(new_repository.path)).to include(tmp_ref)
end
end
end
describe '#remote_tags' do
let(:target_commit_id) { SeedRepo::Commit::ID }
subject { repository.remote_tags('upstream') }
it 'gets the remote tags' do
expect(repository).to receive(:list_remote_tags).with('upstream')
.and_return(["#{target_commit_id}\trefs/tags/v0.0.1\n"])
expect(subject.first).to be_an_instance_of(Gitlab::Git::Tag)
expect(subject.first.name).to eq('v0.0.1')
expect(subject.first.dereferenced_target.id).to eq(target_commit_id)
end
end
......@@ -1685,6 +1724,21 @@ describe Gitlab::Git::Repository, seed_helper: true do
end
end
describe '#fetch' do
let(:git_path) { Gitlab.config.git.bin_path }
let(:remote_name) { 'my_remote' }
subject { repository.fetch(remote_name) }
it 'fetches the remote and returns true if the command was successful' do
expect(repository).to receive(:popen)
.with(%W(#{git_path} fetch #{remote_name}), repository.path)
.and_return(['', 0])
expect(subject).to be(true)
end
end
def create_remote_branch(repository, remote_name, branch_name, source_branch_name)
source_branch = repository.branches.find { |branch| branch.name == source_branch_name }
rugged = repository.rugged
......@@ -1760,4 +1814,10 @@ describe Gitlab::Git::Repository, seed_helper: true do
sha = Rugged::Commit.create(repo, options)
repo.lookup(sha)
end
def refs(dir)
IO.popen(%W[git -C #{dir} for-each-ref], &:read).split("\n").map do |line|
line.split("\t").last
end
end
end
......@@ -147,7 +147,8 @@ deploy_keys:
- user
- deploy_keys_projects
- projects
cluster:
clusters:
- application_helm
- cluster_projects
- projects
- user
......@@ -160,6 +161,8 @@ provider_gcp:
- cluster
platform_kubernetes:
- cluster
application_helm:
- cluster
services:
- project
- service_hook
......@@ -191,6 +194,7 @@ project:
- tags
- chat_services
- cluster
- clusters
- cluster_project
- creator
- group
......@@ -299,4 +303,4 @@ push_event_payload:
- event
issue_assignees:
- issue
- assignee
\ No newline at end of file
- assignee
......@@ -9,7 +9,7 @@ describe Clusters::Platforms::Kubernetes, :use_clean_rails_memory_store_caching
describe 'before_validation' do
context 'when namespace includes upper case' do
let(:kubernetes) { create(:platform_kubernetes, :configured, namespace: namespace) }
let(:kubernetes) { create(:cluster_platform_kubernetes, :configured, namespace: namespace) }
let(:namespace) { 'ABC' }
it 'converts to lower case' do
......@@ -22,7 +22,7 @@ describe Clusters::Platforms::Kubernetes, :use_clean_rails_memory_store_caching
subject { kubernetes.valid? }
context 'when validates namespace' do
let(:kubernetes) { build(:platform_kubernetes, :configured, namespace: namespace) }
let(:kubernetes) { build(:cluster_platform_kubernetes, :configured, namespace: namespace) }
context 'when namespace is blank' do
let(:namespace) { '' }
......@@ -50,7 +50,7 @@ describe Clusters::Platforms::Kubernetes, :use_clean_rails_memory_store_caching
end
context 'when validates api_url' do
let(:kubernetes) { build(:platform_kubernetes, :configured) }
let(:kubernetes) { build(:cluster_platform_kubernetes, :configured) }
before do
kubernetes.api_url = api_url
......@@ -76,7 +76,7 @@ describe Clusters::Platforms::Kubernetes, :use_clean_rails_memory_store_caching
end
context 'when validates token' do
let(:kubernetes) { build(:platform_kubernetes, :configured) }
let(:kubernetes) { build(:cluster_platform_kubernetes, :configured) }
before do
kubernetes.token = token
......@@ -95,8 +95,8 @@ describe Clusters::Platforms::Kubernetes, :use_clean_rails_memory_store_caching
let(:enabled) { true }
let(:project) { create(:project) }
let(:cluster) { build(:cluster, provider_type: :gcp, platform_type: :kubernetes, platform_kubernetes: platform, provider_gcp: provider, enabled: enabled, projects: [project]) }
let(:platform) { build(:platform_kubernetes, :configured) }
let(:provider) { build(:provider_gcp) }
let(:platform) { build(:cluster_platform_kubernetes, :configured) }
let(:provider) { build(:cluster_provider_gcp) }
let(:kubernetes_service) { project.kubernetes_service }
it 'updates KubernetesService' do
......@@ -126,8 +126,8 @@ describe Clusters::Platforms::Kubernetes, :use_clean_rails_memory_store_caching
context 'when kubernetes_service has been configured without cluster integration' do
let!(:project) { create(:project) }
let(:cluster) { build(:cluster, provider_type: :gcp, platform_type: :kubernetes, platform_kubernetes: platform, provider_gcp: provider, projects: [project]) }
let(:platform) { build(:platform_kubernetes, :configured, api_url: 'https://111.111.111.111') }
let(:provider) { build(:provider_gcp) }
let(:platform) { build(:cluster_platform_kubernetes, :configured, api_url: 'https://111.111.111.111') }
let(:provider) { build(:cluster_provider_gcp) }
before do
create(:kubernetes_service, project: project)
......@@ -144,7 +144,7 @@ describe Clusters::Platforms::Kubernetes, :use_clean_rails_memory_store_caching
let!(:cluster) { create(:cluster, :project, platform_kubernetes: kubernetes) }
let(:project) { cluster.project }
let(:kubernetes) { create(:platform_kubernetes, :configured, namespace: namespace) }
let(:kubernetes) { create(:cluster_platform_kubernetes, :configured, namespace: namespace) }
context 'when namespace is present' do
let(:namespace) { 'namespace-123' }
......@@ -170,7 +170,7 @@ describe Clusters::Platforms::Kubernetes, :use_clean_rails_memory_store_caching
describe '#default_namespace' do
subject { kubernetes.default_namespace }
let(:kubernetes) { create(:platform_kubernetes, :configured) }
let(:kubernetes) { create(:cluster_platform_kubernetes, :configured) }
context 'when cluster belongs to a project' do
let!(:cluster) { create(:cluster, :project, platform_kubernetes: kubernetes) }
......
......@@ -5,7 +5,7 @@ describe Clusters::Providers::Gcp do
it { is_expected.to validate_presence_of(:zone) }
describe 'default_value_for' do
let(:gcp) { build(:provider_gcp) }
let(:gcp) { build(:cluster_provider_gcp) }
it "has default value" do
expect(gcp.zone).to eq('us-central1-a')
......@@ -18,7 +18,7 @@ describe Clusters::Providers::Gcp do
subject { gcp.valid? }
context 'when validates gcp_project_id' do
let(:gcp) { build(:provider_gcp, gcp_project_id: gcp_project_id) }
let(:gcp) { build(:cluster_provider_gcp, gcp_project_id: gcp_project_id) }
context 'when gcp_project_id is shorter than 1' do
let(:gcp_project_id) { '' }
......@@ -46,7 +46,7 @@ describe Clusters::Providers::Gcp do
end
context 'when validates num_nodes' do
let(:gcp) { build(:provider_gcp, num_nodes: num_nodes) }
let(:gcp) { build(:cluster_provider_gcp, num_nodes: num_nodes) }
context 'when num_nodes is string' do
let(:num_nodes) { 'A3' }
......@@ -76,7 +76,7 @@ describe Clusters::Providers::Gcp do
describe '#state_machine' do
context 'when any => [:created]' do
let(:gcp) { build(:provider_gcp, :creating) }
let(:gcp) { build(:cluster_provider_gcp, :creating) }
before do
gcp.make_created
......@@ -90,7 +90,7 @@ describe Clusters::Providers::Gcp do
end
context 'when any => [:creating]' do
let(:gcp) { build(:provider_gcp) }
let(:gcp) { build(:cluster_provider_gcp) }
context 'when operation_id is present' do
let(:operation_id) { 'operation-xxx' }
......@@ -116,7 +116,7 @@ describe Clusters::Providers::Gcp do
end
context 'when any => [:errored]' do
let(:gcp) { build(:provider_gcp, :creating) }
let(:gcp) { build(:cluster_provider_gcp, :creating) }
let(:status_reason) { 'err msg' }
it 'nullify access_token and operation_id' do
......@@ -129,7 +129,7 @@ describe Clusters::Providers::Gcp do
end
context 'when status_reason is nil' do
let(:gcp) { build(:provider_gcp, :errored) }
let(:gcp) { build(:cluster_provider_gcp, :errored) }
it 'does not set status_reason' do
gcp.make_errored(nil)
......@@ -144,13 +144,13 @@ describe Clusters::Providers::Gcp do
subject { gcp.on_creation? }
context 'when status is creating' do
let(:gcp) { create(:provider_gcp, :creating) }
let(:gcp) { create(:cluster_provider_gcp, :creating) }
it { is_expected.to be_truthy }
end
context 'when status is created' do
let(:gcp) { create(:provider_gcp, :created) }
let(:gcp) { create(:cluster_provider_gcp, :created) }
it { is_expected.to be_falsey }
end
......@@ -160,7 +160,7 @@ describe Clusters::Providers::Gcp do
subject { gcp.api_client }
context 'when status is creating' do
let(:gcp) { build(:provider_gcp, :creating) }
let(:gcp) { build(:cluster_provider_gcp, :creating) }
it 'returns Cloud Platform API clinet' do
expect(subject).to be_an_instance_of(GoogleApi::CloudPlatform::Client)
......@@ -169,13 +169,13 @@ describe Clusters::Providers::Gcp do
end
context 'when status is created' do
let(:gcp) { build(:provider_gcp, :created) }
let(:gcp) { build(:cluster_provider_gcp, :created) }
it { is_expected.to be_nil }
end
context 'when status is errored' do
let(:gcp) { build(:provider_gcp, :errored) }
let(:gcp) { build(:cluster_provider_gcp, :errored) }
it { is_expected.to be_nil }
end
......
......@@ -18,7 +18,6 @@ describe Environment do
it { is_expected.to validate_length_of(:slug).is_at_most(24) }
it { is_expected.to validate_length_of(:external_url).is_at_most(255) }
it { is_expected.to validate_uniqueness_of(:external_url).scoped_to(:project_id) }
describe '.order_by_last_deployed_at' do
let(:project) { create(:project, :repository) }
......
......@@ -2298,4 +2298,24 @@ describe Repository do
project.commit_by(oid: '1' * 40)
end
end
describe '#raw_repository' do
subject { repository.raw_repository }
it 'returns a Gitlab::Git::Repository representation of the repository' do
expect(subject).to be_a(Gitlab::Git::Repository)
expect(subject.relative_path).to eq(project.disk_path + '.git')
expect(subject.gl_repository).to eq("project-#{project.id}")
end
context 'with a wiki repository' do
let(:repository) { project.wiki.repository }
it 'creates a Gitlab::Git::Repository with the proper attributes' do
expect(subject).to be_a(Gitlab::Git::Repository)
expect(subject.relative_path).to eq(project.disk_path + '.wiki.git')
expect(subject.gl_repository).to eq("wiki-#{project.id}")
end
end
end
end
require 'spec_helper'
describe ClusterApplicationEntity do
describe '#as_json' do
let(:application) { build(:applications_helm) }
subject { described_class.new(application).as_json }
it 'has name' do
expect(subject[:name]).to eq(application.name)
end
it 'has status' do
expect(subject[:status]).to eq(:installable)
end
it 'has no status_reason' do
expect(subject[:status_reason]).to be_nil
end
context 'when application is errored' do
let(:application) { build(:applications_helm, :errored) }
it 'has corresponded data' do
expect(subject[:status]).to eq(:errored)
expect(subject[:status_reason]).not_to be_nil
expect(subject[:status_reason]).to eq(application.status_reason)
end
end
end
end
......@@ -8,7 +8,7 @@ describe ClusterEntity do
let(:cluster) { create(:cluster, provider_type: :gcp, provider_gcp: provider) }
context 'when status is creating' do
let(:provider) { create(:provider_gcp, :creating) }
let(:provider) { create(:cluster_provider_gcp, :creating) }
it 'has corresponded data' do
expect(subject[:status]).to eq(:creating)
......@@ -17,7 +17,7 @@ describe ClusterEntity do
end
context 'when status is errored' do
let(:provider) { create(:provider_gcp, :errored) }
let(:provider) { create(:cluster_provider_gcp, :errored) }
it 'has corresponded data' do
expect(subject[:status]).to eq(:errored)
......@@ -35,8 +35,17 @@ describe ClusterEntity do
end
end
it 'contains applications' do
expect(subject[:applications]).to eq({})
context 'when no application has been installed' do
let(:cluster) { create(:cluster) }
subject { described_class.new(cluster).as_json[:applications]}
it 'contains helm as installable' do
expect(subject).not_to be_empty
helm = subject[0]
expect(helm[:name]).to eq('helm')
expect(helm[:status]).to eq(:installable)
end
end
end
end
......@@ -6,10 +6,10 @@ describe ClusterSerializer do
context 'when provider type is gcp' do
let(:cluster) { create(:cluster, provider_type: :gcp, provider_gcp: provider) }
let(:provider) { create(:provider_gcp, :errored) }
let(:provider) { create(:cluster_provider_gcp, :errored) }
it 'serializes only status' do
expect(subject.keys).to contain_exactly(:status, :status_reason)
expect(subject.keys).to contain_exactly(:status, :status_reason, :applications)
end
end
......
require 'spec_helper'
describe Clusters::Applications::CheckInstallationProgressService do
RESCHEDULE_PHASES = Gitlab::Kubernetes::Pod::PHASES - [Gitlab::Kubernetes::Pod::SUCCEEDED, Gitlab::Kubernetes::Pod::FAILED].freeze
let(:application) { create(:applications_helm, :installing) }
let(:service) { described_class.new(application) }
let(:phase) { Gitlab::Kubernetes::Pod::UNKNOWN }
let(:errors) { nil }
shared_examples 'a terminated installation' do
it 'finalize the installation' do
expect(service).to receive(:finalize_installation).once
service.execute
end
end
shared_examples 'a not yet terminated installation' do |a_phase|
let(:phase) { a_phase }
context "when phase is #{a_phase}" do
context 'when not timeouted' do
it 'reschedule a new check' do
expect(ClusterWaitForAppInstallationWorker).to receive(:perform_in).once
expect(service).not_to receive(:finalize_installation)
service.execute
expect(application).to be_installing
expect(application.status_reason).to be_nil
end
end
context 'when timeouted' do
let(:application) { create(:applications_helm, :timeouted) }
it_behaves_like 'a terminated installation'
it 'make the application errored' do
expect(ClusterWaitForAppInstallationWorker).not_to receive(:perform_in)
service.execute
expect(application).to be_errored
expect(application.status_reason).to match(/\btimeouted\b/)
end
end
end
end
before do
expect(service).to receive(:installation_phase).once.and_return(phase)
allow(service).to receive(:installation_errors).and_return(errors)
allow(service).to receive(:finalize_installation).and_return(nil)
end
describe '#execute' do
context 'when installation POD succeeded' do
let(:phase) { Gitlab::Kubernetes::Pod::SUCCEEDED }
it_behaves_like 'a terminated installation'
end
context 'when installation POD failed' do
let(:phase) { Gitlab::Kubernetes::Pod::FAILED }
let(:errors) { 'test installation failed' }
it_behaves_like 'a terminated installation'
it 'make the application errored' do
service.execute
expect(application).to be_errored
expect(application.status_reason).to eq(errors)
end
end
RESCHEDULE_PHASES.each { |phase| it_behaves_like 'a not yet terminated installation', phase }
end
end
require 'spec_helper'
describe Clusters::Applications::FinalizeInstallationService do
describe '#execute' do
let(:application) { create(:applications_helm, :installing) }
let(:service) { described_class.new(application) }
before do
expect_any_instance_of(Gitlab::Kubernetes::Helm).to receive(:delete_installation_pod!).with(application)
end
context 'when installation POD succeeded' do
it 'make the application installed' do
service.execute
expect(application).to be_installed
expect(application.status_reason).to be_nil
end
end
context 'when installation POD failed' do
let(:application) { create(:applications_helm, :errored) }
it 'make the application errored' do
service.execute
expect(application).to be_errored
expect(application.status_reason).not_to be_nil
end
end
end
end
require 'spec_helper'
describe Clusters::Applications::InstallService do
describe '#execute' do
let(:application) { create(:applications_helm, :scheduled) }
let(:service) { described_class.new(application) }
context 'when there are no errors' do
before do
expect_any_instance_of(Gitlab::Kubernetes::Helm).to receive(:install).with(application)
allow(ClusterWaitForAppInstallationWorker).to receive(:perform_in).and_return(nil)
end
it 'make the application installing' do
service.execute
expect(application).to be_installing
end
it 'schedule async installation status check' do
expect(ClusterWaitForAppInstallationWorker).to receive(:perform_in).once
service.execute
end
end
context 'when k8s cluster communication fails' do
before do
error = KubeException.new(500, 'system failure', nil)
expect_any_instance_of(Gitlab::Kubernetes::Helm).to receive(:install).with(application).and_raise(error)
end
it 'make the application errored' do
service.execute
expect(application).to be_errored
expect(application.status_reason).to match(/kubernetes error:/i)
end
end
context 'when application cannot be persisted' do
let(:application) { build(:applications_helm, :scheduled) }
it 'make the application errored' do
expect(application).to receive(:make_installing!).once.and_raise(ActiveRecord::RecordInvalid)
expect_any_instance_of(Gitlab::Kubernetes::Helm).not_to receive(:install)
service.execute
expect(application).to be_errored
end
end
end
end
require 'spec_helper'
describe Clusters::Applications::ScheduleInstallationService do
def count_scheduled
application_class&.with_status(:scheduled)&.count || 0
end
shared_examples 'a failing service' do
it 'raise an exception' do
expect(ClusterInstallAppWorker).not_to receive(:perform_async)
count_before = count_scheduled
expect { service.execute }.to raise_error(StandardError)
expect(count_scheduled).to eq(count_before)
end
end
describe '#execute' do
let(:application_class) { Clusters::Applications::Helm }
let(:cluster) { create(:cluster, :project, :provided_by_gcp) }
let(:project) { cluster.project }
let(:service) { described_class.new(project, nil, cluster: cluster, application_class: application_class) }
it 'creates a new application' do
expect { service.execute }.to change { application_class.count }.by(1)
end
it 'make the application scheduled' do
expect(ClusterInstallAppWorker).to receive(:perform_async).with(application_class.application_name, kind_of(Numeric)).once
expect { service.execute }.to change { application_class.with_status(:scheduled).count }.by(1)
end
context 'when installation is already in progress' do
let(:application) { create(:applications_helm, :installing) }
let(:cluster) { application.cluster }
it_behaves_like 'a failing service'
end
context 'when application_class is nil' do
let(:application_class) { nil }
it_behaves_like 'a failing service'
end
context 'when application cannot be persisted' do
before do
expect_any_instance_of(application_class).to receive(:make_scheduled!).once.and_raise(ActiveRecord::RecordInvalid)
end
it_behaves_like 'a failing service'
end
end
end
......@@ -61,57 +61,4 @@ describe Clusters::CreateService do
end
end
end
# TODO: This will be active in 10.3
# context 'when provider is user' do
# context 'when correct params' do
# let(:params) do
# {
# name: 'test-cluster',
# platform_type: :kubernetes,
# provider_type: :user,
# platform_kubernetes_attributes: {
# namespace: 'custom-namespace',
# api_url: 'https://111.111.111.111',
# token: 'token'
# }
# }
# end
# it 'creates a cluster object and performs a worker' do
# expect(ClusterProvisionWorker).to receive(:perform_async)
# expect { result }
# .to change { Clusters::Cluster.count }.by(1)
# .and change { Clusters::Platforms::Kubernetes.count }.by(1)
# expect(result.name).to eq('test-cluster')
# expect(result.user).to eq(user)
# expect(result.project).to eq(project)
# expect(result.provider).to be_nil
# expect(result.platform.namespace).to eq('custom-namespace')
# end
# end
# context 'when invalid params' do
# let(:params) do
# {
# name: 'test-cluster',
# platform_type: :kubernetes,
# provider_type: :user,
# platform_kubernetes_attributes: {
# namespace: 'custom-namespace',
# api_url: '!!!!!',
# token: 'token'
# }
# }
# end
# it 'returns an error' do
# # expect(ClusterProvisionWorker).not_to receive(:perform_async)
# expect { result }.to change { Clusters::Cluster.count }.by(0)
# expect(result.errors[:"platform_kubernetes.api_url"]).to be_present
# end
# end
# end
end
......@@ -4,7 +4,7 @@ describe Clusters::Gcp::FetchOperationService do
include GoogleApi::CloudPlatformHelpers
describe '#execute' do
let(:provider) { create(:provider_gcp, :creating) }
let(:provider) { create(:cluster_provider_gcp, :creating) }
let(:gcp_project_id) { provider.gcp_project_id }
let(:zone) { provider.zone }
let(:operation_id) { provider.operation_id }
......
......@@ -4,7 +4,7 @@ describe Clusters::Gcp::ProvisionService do
include GoogleApi::CloudPlatformHelpers
describe '#execute' do
let(:provider) { create(:provider_gcp, :scheduled) }
let(:provider) { create(:cluster_provider_gcp, :scheduled) }
let(:gcp_project_id) { provider.gcp_project_id }
let(:zone) { provider.zone }
......
......@@ -4,7 +4,7 @@ describe Clusters::Gcp::VerifyProvisionStatusService do
include GoogleApi::CloudPlatformHelpers
describe '#execute' do
let(:provider) { create(:provider_gcp, :creating) }
let(:provider) { create(:cluster_provider_gcp, :creating) }
let(:gcp_project_id) { provider.gcp_project_id }
let(:zone) { provider.zone }
let(:operation_id) { provider.operation_id }
......
......@@ -71,15 +71,9 @@ module GoogleApi
"name": options[:name] || 'string',
"description": options[:description] || 'string',
"initialNodeCount": options[:initialNodeCount] || 'number',
# "nodeConfig": {,
# object(NodeConfig),
# },,
"masterAuth": {
"username": options[:username] || 'string',
"password": options[:password] || 'string',
# "clientCertificateConfig": {
# object(ClientCertificateConfig)
# },
"clusterCaCertificate": options[:clusterCaCertificate] || load_sample_cert,
"clientCertificate": options[:clientCertificate] || 'string',
"clientKey": options[:clientKey] || 'string'
......@@ -88,36 +82,9 @@ module GoogleApi
"monitoringService": options[:monitoringService] || 'string',
"network": options[:network] || 'string',
"clusterIpv4Cidr": options[:clusterIpv4Cidr] || 'string',
# "addonsConfig": {,
# object(AddonsConfig),
# },,
"subnetwork": options[:subnetwork] || 'string',
# "nodePools": [,
# {,
# object(NodePool),
# },
# ],,
# "locations": [,
# string,
# ],,
"enableKubernetesAlpha": options[:enableKubernetesAlpha] || 'boolean',
# "resourceLabels": {,
# string: string,,
# ...,
# },,
"labelFingerprint": options[:labelFingerprint] || 'string',
# "legacyAbac": {,
# object(LegacyAbac),
# },
# "networkPolicy": {,
# object(NetworkPolicy),
# },
# "ipAllocationPolicy": {,
# object(IPAllocationPolicy),
# },
# "masterAuthorizedNetworksConfig": {,
# object(MasterAuthorizedNetworksConfig),
# },
"selfLink": options[:selfLink] || 'string',
"zone": options[:zone] || 'string',
"endpoint": options[:endpoint] || 'string',
......@@ -129,9 +96,6 @@ module GoogleApi
"statusMessage": options[:statusMessage] || 'string',
"nodeIpv4CidrSize": options[:nodeIpv4CidrSize] || 'number',
"servicesIpv4Cidr": options[:servicesIpv4Cidr] || 'string',
# "instanceGroupUrls": [,
# string,
# ],,
"currentNodeCount": options[:currentNodeCount] || 'number',
"expireTime": options[:expireTime] || 'string'
}
......
......@@ -4,7 +4,7 @@ describe ClusterProvisionWorker do
describe '#perform' do
context 'when provider type is gcp' do
let(:cluster) { create(:cluster, provider_type: :gcp, provider_gcp: provider) }
let(:provider) { create(:provider_gcp, :scheduled) }
let(:provider) { create(:cluster_provider_gcp, :scheduled) }
it 'provision a cluster' do
expect_any_instance_of(Clusters::Gcp::ProvisionService).to receive(:execute)
......
......@@ -4,7 +4,7 @@ describe WaitForClusterCreationWorker do
describe '#perform' do
context 'when provider type is gcp' do
let(:cluster) { create(:cluster, provider_type: :gcp, provider_gcp: provider) }
let(:provider) { create(:provider_gcp, :creating) }
let(:provider) { create(:cluster_provider_gcp, :creating) }
it 'provision a cluster' do
expect_any_instance_of(Clusters::Gcp::VerifyProvisionStatusService).to receive(:execute)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment