Commit e19bfdf5 authored by GitLab Bot's avatar GitLab Bot

Automatic merge of gitlab-org/gitlab master

parents 8ff7063b 8d1d6aa3
<script>
import { GlLink, GlModal } from '@gitlab/ui';
import { GlLink, GlModal, GlSprintf } from '@gitlab/ui';
import { isEmpty } from 'lodash';
import { __, s__, sprintf } from '~/locale';
import CiIcon from '~/vue_shared/components/ci_icon.vue';
......@@ -13,6 +13,7 @@ export default {
components: {
GlModal,
GlLink,
GlSprintf,
CiIcon,
},
props: {
......@@ -33,13 +34,7 @@ export default {
);
},
modalText() {
return sprintf(
s__(`Pipeline|You’re about to stop pipeline %{pipelineId}.`),
{
pipelineId: `<strong>#${this.pipeline.id}</strong>`,
},
false,
);
return s__(`Pipeline|You’re about to stop pipeline #%{pipelineId}.`);
},
hasRef() {
return !isEmpty(this.pipeline.ref);
......@@ -71,7 +66,13 @@ export default {
:action-cancel="cancelProps"
@primary="emitSubmit($event)"
>
<p v-html="modalText /* eslint-disable-line vue/no-v-html */"></p>
<p>
<gl-sprintf :message="modalText">
<template #pipelineId>
<strong>{{ pipeline.id }}</strong>
</template>
</gl-sprintf>
</p>
<p v-if="pipeline">
<ci-icon
......
......@@ -140,7 +140,7 @@
@include gl-border-none;
.avatar.s32 {
@extend .rect-avatar.s32;
border-radius: $border-radius-default;
box-shadow: $avatar-box-shadow;
}
}
......
......@@ -1284,6 +1284,7 @@ input {
a
.avatar-container.rect-avatar
.avatar.s32 {
border-radius: 4px;
box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.08);
}
.sidebar-top-level-items {
......@@ -1308,6 +1309,7 @@ input {
a
.avatar-container.rect-avatar
.avatar.s32 {
border-radius: 4px;
box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.08);
}
.sidebar-top-level-items > li .badge.badge-pill {
......@@ -1605,17 +1607,7 @@ svg.s16 {
.rect-avatar.s16 {
border-radius: 2px;
}
.rect-avatar.s32,
.nav-sidebar-inner-scroll
> div.context-header
a
.avatar-container.rect-avatar
.avatar.s32,
.sidebar-top-level-items
.context-header
a
.avatar-container.rect-avatar
.avatar.s32 {
.rect-avatar.s32 {
border-radius: 4px;
}
body.gl-dark .navbar-gitlab {
......
......@@ -1264,6 +1264,7 @@ input {
a
.avatar-container.rect-avatar
.avatar.s32 {
border-radius: 4px;
box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.08);
}
.sidebar-top-level-items {
......@@ -1288,6 +1289,7 @@ input {
a
.avatar-container.rect-avatar
.avatar.s32 {
border-radius: 4px;
box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.08);
}
.sidebar-top-level-items > li .badge.badge-pill {
......@@ -1585,17 +1587,7 @@ svg.s16 {
.rect-avatar.s16 {
border-radius: 2px;
}
.rect-avatar.s32,
.nav-sidebar-inner-scroll
> div.context-header
a
.avatar-container.rect-avatar
.avatar.s32,
.sidebar-top-level-items
.context-header
a
.avatar-container.rect-avatar
.avatar.s32 {
.rect-avatar.s32 {
border-radius: 4px;
}
......
......@@ -3,6 +3,7 @@
module Types
module Ci
class RunnerType < BaseObject
edge_type_class(RunnerWebUrlEdge)
graphql_name 'CiRunner'
authorize :read_runner
present_using ::Ci::RunnerPresenter
......@@ -48,12 +49,18 @@ module Types
description: 'Number of projects that the runner is associated with.'
field :job_count, GraphQL::Types::Int, null: true,
description: "Number of jobs processed by the runner (limited to #{JOB_COUNT_LIMIT}, plus one to indicate that more items exist)."
field :admin_url, GraphQL::Types::String, null: true,
description: 'Admin URL of the runner. Only available for adminstrators.'
def job_count
# We limit to 1 above the JOB_COUNT_LIMIT to indicate that more items exist after JOB_COUNT_LIMIT
runner.builds.limit(JOB_COUNT_LIMIT + 1).count
end
def admin_url
Gitlab::Routing.url_helpers.admin_runner_url(runner) if can_admin_runners?
end
# rubocop: disable CodeReuse/ActiveRecord
def project_count
BatchLoader::GraphQL.for(runner.id).batch(key: :runner_project_count) do |ids, loader, args|
......@@ -70,6 +77,12 @@ module Types
end
end
# rubocop: enable CodeReuse/ActiveRecord
private
def can_admin_runners?
context[:current_user]&.can_admin_all_resources?
end
end
end
end
......
# frozen_string_literal: true
module Types
module Ci
# rubocop: disable Graphql/AuthorizeTypes
class RunnerWebUrlEdge < GraphQL::Types::Relay::BaseEdge
include FindClosest
field :web_url, GraphQL::Types::String, null: true,
description: 'Web URL of the runner. The value depends on where you put this field in the query. You can use it for projects or groups.',
extras: [:parent]
def initialize(node, connection)
super
@runner = node.node
end
def web_url(parent:)
owner = closest_parent([::Types::ProjectType, ::Types::GroupType], parent)
case owner
when ::Group
Gitlab::Routing.url_helpers.group_runner_url(owner, @runner)
when ::Project
Gitlab::Routing.url_helpers.project_runner_url(owner, @runner)
end
end
end
end
end
# frozen_string_literal: true
module FindClosest
# Find the closest node of a given type above this node, and return the domain object
def closest_parent(type, parent)
parent = parent.try(:parent) while parent && parent.object.class != type
return unless parent
# Find the closest node which has any of the given types above this node, and return the domain object
def closest_parent(types, parent)
while parent
parent.object.object
if types.any? {|type| parent.object.instance_of? type}
return parent.object.object
else
parent = parent.try(:parent)
end
end
end
end
......@@ -14,7 +14,7 @@ module Types
end
def merge_request_interaction(parent:)
merge_request = closest_parent(::Types::MergeRequestType, parent)
merge_request = closest_parent([::Types::MergeRequestType], parent)
return unless merge_request
Users::MergeRequestInteraction.new(user: object, merge_request: merge_request)
......
......@@ -364,6 +364,10 @@ class ApplicationSetting < ApplicationRecord
validates :container_registry_expiration_policies_worker_capacity,
numericality: { only_integer: true, greater_than_or_equal_to: 0 }
validates :dependency_proxy_ttl_group_policy_worker_capacity,
allow_nil: false,
numericality: { only_integer: true, greater_than_or_equal_to: 0 }
validates :invisible_captcha_enabled,
inclusion: { in: [true, false], message: _('must be a boolean value') }
......
# frozen_string_literal: true
module TtlExpirable
extend ActiveSupport::Concern
included do
validates :status, presence: true
enum status: { default: 0, expired: 1, processing: 2, error: 3 }
scope :updated_before, ->(number_of_days) { where("updated_at <= ?", Time.zone.now - number_of_days.days) }
scope :active, -> { where(status: :default) }
scope :lock_next_by, ->(sort) do
order(sort)
.limit(1)
.lock('FOR UPDATE SKIP LOCKED')
end
end
end
......@@ -2,15 +2,14 @@
class DependencyProxy::Blob < ApplicationRecord
include FileStoreMounter
include TtlExpirable
include EachBatch
belongs_to :group
validates :group, presence: true
validates :file, presence: true
validates :file_name, presence: true
validates :status, presence: true
enum status: { default: 0, expired: 1 }
mount_file_store_uploader DependencyProxy::FileUploader
......
......@@ -8,4 +8,6 @@ class DependencyProxy::ImageTtlGroupPolicy < ApplicationRecord
validates :group, presence: true
validates :enabled, inclusion: { in: [true, false] }
validates :ttl, numericality: { greater_than: 0 }, allow_nil: true
scope :enabled, -> { where(enabled: true) }
end
......@@ -2,6 +2,8 @@
class DependencyProxy::Manifest < ApplicationRecord
include FileStoreMounter
include TtlExpirable
include EachBatch
belongs_to :group
......@@ -9,9 +11,6 @@ class DependencyProxy::Manifest < ApplicationRecord
validates :file, presence: true
validates :file_name, presence: true
validates :digest, presence: true
validates :status, presence: true
enum status: { default: 0, expired: 1 }
mount_file_store_uploader DependencyProxy::FileUploader
......
......@@ -16,8 +16,6 @@ module Packages
scope :with_namespace, ->(namespace) { where(namespace: namespace) }
scope :with_sha, ->(sha) { where(file_sha256: sha) }
scope :expired, -> { where("delete_at <= ?", Time.current) }
scope :without_namespace, -> { where(namespace_id: nil) }
end
end
end
......@@ -12,7 +12,7 @@ module DependencyProxy
def execute
from_cache = true
file_name = @blob_sha.sub('sha256:', '') + '.gz'
blob = @group.dependency_proxy_blobs.find_or_build(file_name)
blob = @group.dependency_proxy_blobs.active.find_or_build(file_name)
unless blob.persisted?
from_cache = false
......@@ -30,6 +30,8 @@ module DependencyProxy
blob.save!
end
# Technical debt: change to read_at https://gitlab.com/gitlab-org/gitlab/-/issues/341536
blob.touch if from_cache
success(blob: blob, from_cache: from_cache)
end
......
......@@ -13,11 +13,16 @@ module DependencyProxy
def execute
@manifest = @group.dependency_proxy_manifests
.active
.find_or_initialize_by_file_name_or_digest(file_name: @file_name, digest: @tag)
head_result = DependencyProxy::HeadManifestService.new(@image, @tag, @token).execute
return success(manifest: @manifest, from_cache: true) if cached_manifest_matches?(head_result)
if cached_manifest_matches?(head_result)
@manifest.touch
return success(manifest: @manifest, from_cache: true)
end
pull_new_manifest
respond(from_cache: false)
......@@ -46,6 +51,9 @@ module DependencyProxy
def respond(from_cache: true)
if @manifest.persisted?
# Technical debt: change to read_at https://gitlab.com/gitlab-org/gitlab/-/issues/341536
@manifest.touch if from_cache
success(manifest: @manifest, from_cache: from_cache)
else
error('Failed to download the manifest from the external registry', 503)
......
......@@ -17,10 +17,6 @@ module Packages
})
end
unless Feature.enabled?(:remove_composer_v1_cache_code, project)
::Packages::Composer::CacheUpdateWorker.perform_async(created_package.project_id, created_package.name, nil)
end
created_package
end
......
......@@ -51,10 +51,18 @@
%p.gl-mb-3.text-muted= _('Registration Features include:')
.form-text
- email_from_gitlab_path = help_page_path('tools/email.md')
- repo_size_limit_path = help_page_path('user/admin_area/settings/account_and_limit_settings.md', anchor: 'repository-size-limit')
- restrict_ip_path = help_page_path('user/group/index.md', anchor: 'restrict-group-access-by-ip-address')
- link_end = '</a>'.html_safe
- email_from_gitlab_link = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: email_from_gitlab_path }
- repo_size_limit_link = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: repo_size_limit_path }
- restrict_ip_link = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: restrict_ip_path }
%ul
%li
= _('Email from GitLab - email users right from the Admin Area. %{link_start}Learn more%{link_end}.').html_safe % { link_start: email_from_gitlab_link, link_end: link_end }
%li
= _('Limit project size at a global, group and project level. %{link_start}Learn more%{link_end}.').html_safe % { link_start: repo_size_limit_link, link_end: link_end }
%li
= _('Restrict group access by IP address. %{link_start}Learn more%{link_end}.').html_safe % { link_start: restrict_ip_link, link_end: link_end }
= f.submit _('Save changes'), class: "gl-button btn btn-confirm"
......@@ -264,6 +264,15 @@
:weight: 1
:idempotent: true
:tags: []
- :name: cronjob:dependency_proxy_image_ttl_group_policy
:worker_name: DependencyProxy::ImageTtlGroupPolicyWorker
:feature_category: :dependency_proxy
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent:
:tags: []
- :name: cronjob:environments_auto_delete_cron
:worker_name: Environments::AutoDeleteCronWorker
:feature_category: :continuous_delivery
......@@ -651,6 +660,24 @@
:weight: 1
:idempotent: true
:tags: []
- :name: dependency_proxy_blob:dependency_proxy_cleanup_blob
:worker_name: DependencyProxy::CleanupBlobWorker
:feature_category: :dependency_proxy
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: dependency_proxy_manifest:dependency_proxy_cleanup_manifest
:worker_name: DependencyProxy::CleanupManifestWorker
:feature_category: :dependency_proxy
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: deployment:deployments_drop_older_deployments
:worker_name: Deployments::DropOlderDeploymentsWorker
:feature_category: :continuous_delivery
......
# frozen_string_literal: true
module DependencyProxy
module CleanupWorker
extend ActiveSupport::Concern
include Gitlab::Utils::StrongMemoize
def perform_work
return unless artifact
log_metadata(artifact)
artifact.destroy!
rescue StandardError
artifact&.error!
end
def max_running_jobs
::Gitlab::CurrentSettings.dependency_proxy_ttl_group_policy_worker_capacity
end
def remaining_work_count
expired_artifacts.limit(max_running_jobs + 1).count
end
private
def model
raise NotImplementedError
end
def log_metadata
raise NotImplementedError
end
def log_cleanup_item
raise NotImplementedError
end
def artifact
strong_memoize(:artifact) do
model.transaction do
to_delete = next_item
if to_delete
to_delete.processing!
log_cleanup_item(to_delete)
end
to_delete
end
end
end
def expired_artifacts
model.expired
end
def next_item
expired_artifacts.lock_next_by(:updated_at).first
end
end
end
# frozen_string_literal: true
module DependencyProxy
class CleanupBlobWorker
include ApplicationWorker
include LimitedCapacity::Worker
include Gitlab::Utils::StrongMemoize
include DependencyProxy::CleanupWorker
data_consistency :always
sidekiq_options retry: 3
queue_namespace :dependency_proxy_blob
feature_category :dependency_proxy
urgency :low
worker_resource_boundary :unknown
idempotent!
private
def model
DependencyProxy::Blob
end
def log_metadata(blob)
log_extra_metadata_on_done(:dependency_proxy_blob_id, blob.id)
log_extra_metadata_on_done(:group_id, blob.group_id)
end
def log_cleanup_item(blob)
logger.info(
structured_payload(
group_id: blob.group_id,
dependency_proxy_blob_id: blob.id
)
)
end
end
end
# frozen_string_literal: true
module DependencyProxy
class CleanupManifestWorker
include ApplicationWorker
include LimitedCapacity::Worker
include Gitlab::Utils::StrongMemoize
include DependencyProxy::CleanupWorker
data_consistency :always
sidekiq_options retry: 3
queue_namespace :dependency_proxy_manifest
feature_category :dependency_proxy
urgency :low
worker_resource_boundary :unknown
idempotent!
private
def model
DependencyProxy::Manifest
end
def log_metadata(manifest)
log_extra_metadata_on_done(:dependency_proxy_manifest_id, manifest.id)
log_extra_metadata_on_done(:group_id, manifest.group_id)
end
def log_cleanup_item(manifest)
logger.info(
structured_payload(
group_id: manifest.group_id,
dependency_proxy_manifest_id: manifest.id
)
)
end
end
end
# frozen_string_literal: true
module DependencyProxy
class ImageTtlGroupPolicyWorker # rubocop:disable Scalability/IdempotentWorker
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
data_consistency :always
feature_category :dependency_proxy
UPDATE_BATCH_SIZE = 100
def perform
DependencyProxy::ImageTtlGroupPolicy.enabled.each do |policy|
# Technical Debt: change to read_before https://gitlab.com/gitlab-org/gitlab/-/issues/341536
qualified_blobs = policy.group.dependency_proxy_blobs.active.updated_before(policy.ttl)
qualified_manifests = policy.group.dependency_proxy_manifests.active.updated_before(policy.ttl)
enqueue_blob_cleanup_job if expire_artifacts(qualified_blobs, DependencyProxy::Blob)
enqueue_manifest_cleanup_job if expire_artifacts(qualified_manifests, DependencyProxy::Manifest)
end
log_counts
end
private
def expire_artifacts(artifacts, model)
rows_updated = false
artifacts.each_batch(of: UPDATE_BATCH_SIZE) do |batch|
rows = batch.update_all(status: :expired)
rows_updated ||= rows > 0
end
rows_updated
end
def enqueue_blob_cleanup_job
DependencyProxy::CleanupBlobWorker.perform_with_capacity
end
def enqueue_manifest_cleanup_job
DependencyProxy::CleanupManifestWorker.perform_with_capacity
end
def log_counts
use_replica_if_available do
expired_blob_count = DependencyProxy::Blob.expired.count
expired_manifest_count = DependencyProxy::Manifest.expired.count
processing_blob_count = DependencyProxy::Blob.processing.count
processing_manifest_count = DependencyProxy::Manifest.processing.count
error_blob_count = DependencyProxy::Blob.error.count
error_manifest_count = DependencyProxy::Manifest.error.count
log_extra_metadata_on_done(:expired_dependency_proxy_blob_count, expired_blob_count)
log_extra_metadata_on_done(:expired_dependency_proxy_manifest_count, expired_manifest_count)
log_extra_metadata_on_done(:processing_dependency_proxy_blob_count, processing_blob_count)
log_extra_metadata_on_done(:processing_dependency_proxy_manifest_count, processing_manifest_count)
log_extra_metadata_on_done(:error_dependency_proxy_blob_count, error_blob_count)
log_extra_metadata_on_done(:error_dependency_proxy_manifest_count, error_manifest_count)
end
end
def use_replica_if_available(&block)
::Gitlab::Database::LoadBalancing::Session.current.use_replicas_for_read_queries(&block)
end
end
end
......@@ -14,19 +14,7 @@ module Packages
idempotent!
def perform
::Packages::Composer::CacheFile.without_namespace.find_in_batches do |cache_files|
cache_files.each(&:destroy)
rescue ActiveRecord::RecordNotFound
# ignore. likely due to object already being deleted.
end
::Packages::Composer::CacheFile.expired.find_in_batches do |cache_files|
cache_files.each(&:destroy)
rescue ActiveRecord::RecordNotFound
# ignore. likely due to object already being deleted.
end
rescue StandardError => e
Gitlab::ErrorTracking.log_exception(e)
# no-op: to be removed after 14.5 https://gitlab.com/gitlab-org/gitlab/-/issues/333694
end
end
end
......
......@@ -7,20 +7,14 @@ module Packages
data_consistency :always
sidekiq_options retry: 3
sidekiq_options retry: false
feature_category :package_registry
idempotent!
def perform(project_id, package_name, last_page_sha)
project = Project.find_by_id(project_id)
return unless project
Gitlab::Composer::Cache.new(project: project, name: package_name, last_page_sha: last_page_sha).execute
rescue StandardError => e
Gitlab::ErrorTracking.log_exception(e, project_id: project_id)
def perform(*args)
# no-op: to be removed after 14.5 https://gitlab.com/gitlab-org/gitlab/-/issues/333694
end
end
end
......
---
name: remove_composer_v1_cache_code
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67843
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/338264
milestone: '14.2'
name: linear_application_setting_ancestor_scopes
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/70579
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/341346
milestone: '14.4'
type: development
group: group::package
group: group::access
default_enabled: false
......@@ -466,9 +466,6 @@ Settings.cron_jobs['personal_access_tokens_expired_notification_worker']['job_cl
Settings.cron_jobs['repository_archive_cache_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['repository_archive_cache_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['repository_archive_cache_worker']['job_class'] = 'RepositoryArchiveCacheWorker'
Settings.cron_jobs['packages_composer_cache_cleanup_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['packages_composer_cache_cleanup_worker']['cron'] ||= '30 * * * *'
Settings.cron_jobs['packages_composer_cache_cleanup_worker']['job_class'] = 'Packages::Composer::CacheCleanupWorker'
Settings.cron_jobs['import_export_project_cleanup_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['import_export_project_cleanup_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['import_export_project_cleanup_worker']['job_class'] = 'ImportExportProjectCleanupWorker'
......@@ -535,6 +532,9 @@ Settings.cron_jobs['namespaces_prune_aggregation_schedules_worker']['job_class']
Settings.cron_jobs['container_expiration_policy_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['container_expiration_policy_worker']['cron'] ||= '50 * * * *'
Settings.cron_jobs['container_expiration_policy_worker']['job_class'] = 'ContainerExpirationPolicyWorker'
Settings.cron_jobs['image_ttl_group_policy_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['image_ttl_group_policy_worker']['cron'] ||= '40 0 * * *'
Settings.cron_jobs['image_ttl_group_policy_worker']['job_class'] = 'DependencyProxy::ImageTtlGroupPolicyWorker'
Settings.cron_jobs['x509_issuer_crl_check_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['x509_issuer_crl_check_worker']['cron'] ||= '30 1 * * *'
Settings.cron_jobs['x509_issuer_crl_check_worker']['job_class'] = 'X509IssuerCrlCheckWorker'
......
......@@ -91,6 +91,10 @@
- 1
- - dependency_proxy
- 1
- - dependency_proxy_blob
- 1
- - dependency_proxy_manifest
- 1
- - deployment
- 3
- - design_management_copy_design_collection
......
# frozen_string_literal: true
class AddDependencyProxyTtlGroupPolicyWorkerCapacityToApplicationSettings < Gitlab::Database::Migration[1.0]
def change
add_column :application_settings,
:dependency_proxy_ttl_group_policy_worker_capacity,
:smallint,
default: 2,
null: false
end
end
# frozen_string_literal: true
class AddAppSettingsDepProxyTtlWorkerCapacityCheckConstraint < Gitlab::Database::Migration[1.0]
CONSTRAINT_NAME = 'app_settings_dep_proxy_ttl_policies_worker_capacity_positive'
disable_ddl_transaction!
def up
add_check_constraint :application_settings, 'dependency_proxy_ttl_group_policy_worker_capacity >= 0', CONSTRAINT_NAME
end
def down
remove_check_constraint :application_settings, CONSTRAINT_NAME
end
end
# frozen_string_literal: true
class UpdateDependencyProxyManifestsUniquenessConstraint < Gitlab::Database::Migration[1.0]
disable_ddl_transaction!
NEW_INDEX_NAME = 'index_dep_prox_manifests_on_group_id_file_name_and_status'
OLD_INDEX_NAME = 'index_dependency_proxy_manifests_on_group_id_and_file_name'
def up
add_concurrent_index :dependency_proxy_manifests, [:group_id, :file_name, :status], unique: true, name: NEW_INDEX_NAME
remove_concurrent_index_by_name :dependency_proxy_manifests, OLD_INDEX_NAME
end
def down
add_concurrent_index :dependency_proxy_manifests, [:group_id, :file_name], unique: true, name: OLD_INDEX_NAME
remove_concurrent_index_by_name :dependency_proxy_manifests, NEW_INDEX_NAME
end
end
# frozen_string_literal: true
class AddStatusIndexToDependencyProxyTables < Gitlab::Database::Migration[1.0]
MANIFEST_INDEX_NAME = 'index_dependency_proxy_manifests_on_status'
BLOB_INDEX_NAME = 'index_dependency_proxy_blobs_on_status'
disable_ddl_transaction!
def up
add_concurrent_index :dependency_proxy_manifests, :status, name: MANIFEST_INDEX_NAME
add_concurrent_index :dependency_proxy_blobs, :status, name: BLOB_INDEX_NAME
end
def down
remove_concurrent_index_by_name :dependency_proxy_manifests, MANIFEST_INDEX_NAME
remove_concurrent_index_by_name :dependency_proxy_blobs, BLOB_INDEX_NAME
end
end
# frozen_string_literal: true
class AddGroupIdStatusIdIndexToDependencyProxyTables < Gitlab::Database::Migration[1.0]
MANIFEST_INDEX_NAME = 'index_dependency_proxy_manifests_on_group_id_status_and_id'
BLOB_INDEX_NAME = 'index_dependency_proxy_blobs_on_group_id_status_and_id'
disable_ddl_transaction!
def up
add_concurrent_index :dependency_proxy_manifests, [:group_id, :status, :id], name: MANIFEST_INDEX_NAME
add_concurrent_index :dependency_proxy_blobs, [:group_id, :status, :id], name: BLOB_INDEX_NAME
end
def down
remove_concurrent_index_by_name :dependency_proxy_manifests, MANIFEST_INDEX_NAME
remove_concurrent_index_by_name :dependency_proxy_blobs, BLOB_INDEX_NAME
end
end
e6342d440d398980470f4dd018c5df56d0b5d4df11caa7ba5dd2e92578dbf678
\ No newline at end of file
d0b2ee97781a5d3c671b855fb6be844431a73584be47ba35d83c7e8cfec69bcb
\ No newline at end of file
377af41414793d7e52ffbb1fd60f2f19c58cd63bb0e85192983b5bfe98515ae8
\ No newline at end of file
2ab67d4cc17d0fdf01b5861a46d6ec51d1e76e7e88209b0964a884edd22cc63d
\ No newline at end of file
f257ff9896e2d90ced39c2c010df1d4b74badae046651a190585c9c47342d119
\ No newline at end of file
......@@ -10344,7 +10344,9 @@ CREATE TABLE application_settings (
throttle_authenticated_deprecated_api_requests_per_period integer DEFAULT 3600 NOT NULL,
throttle_authenticated_deprecated_api_period_in_seconds integer DEFAULT 3600 NOT NULL,
throttle_authenticated_deprecated_api_enabled boolean DEFAULT false NOT NULL,
dependency_proxy_ttl_group_policy_worker_capacity smallint DEFAULT 2 NOT NULL,
CONSTRAINT app_settings_container_reg_cleanup_tags_max_list_size_positive CHECK ((container_registry_cleanup_tags_service_max_list_size >= 0)),
CONSTRAINT app_settings_dep_proxy_ttl_policies_worker_capacity_positive CHECK ((dependency_proxy_ttl_group_policy_worker_capacity >= 0)),
CONSTRAINT app_settings_ext_pipeline_validation_service_url_text_limit CHECK ((char_length(external_pipeline_validation_service_url) <= 255)),
CONSTRAINT app_settings_registry_exp_policies_worker_capacity_positive CHECK ((container_registry_expiration_policies_worker_capacity >= 0)),
CONSTRAINT app_settings_yaml_max_depth_positive CHECK ((max_yaml_depth > 0)),
......@@ -24835,11 +24837,19 @@ CREATE INDEX index_dep_ci_build_trace_sections_on_project_id ON dep_ci_build_tra
CREATE INDEX index_dep_ci_build_trace_sections_on_section_name_id ON dep_ci_build_trace_sections USING btree (section_name_id);
CREATE UNIQUE INDEX index_dep_prox_manifests_on_group_id_file_name_and_status ON dependency_proxy_manifests USING btree (group_id, file_name, status);
CREATE INDEX index_dependency_proxy_blobs_on_group_id_and_file_name ON dependency_proxy_blobs USING btree (group_id, file_name);
CREATE INDEX index_dependency_proxy_blobs_on_group_id_status_and_id ON dependency_proxy_blobs USING btree (group_id, status, id);
CREATE INDEX index_dependency_proxy_blobs_on_status ON dependency_proxy_blobs USING btree (status);
CREATE INDEX index_dependency_proxy_group_settings_on_group_id ON dependency_proxy_group_settings USING btree (group_id);
CREATE UNIQUE INDEX index_dependency_proxy_manifests_on_group_id_and_file_name ON dependency_proxy_manifests USING btree (group_id, file_name);
CREATE INDEX index_dependency_proxy_manifests_on_group_id_status_and_id ON dependency_proxy_manifests USING btree (group_id, status, id);
CREATE INDEX index_dependency_proxy_manifests_on_status ON dependency_proxy_manifests USING btree (status);
CREATE INDEX index_deploy_key_id_on_protected_branch_push_access_levels ON protected_branch_push_access_levels USING btree (deploy_key_id);
......@@ -5180,6 +5180,7 @@ The edge type for [`CiRunner`](#cirunner).
| ---- | ---- | ----------- |
| <a id="cirunneredgecursor"></a>`cursor` | [`String!`](#string) | A cursor for use in pagination. |
| <a id="cirunneredgenode"></a>`node` | [`CiRunner`](#cirunner) | The item at the end of the edge. |
| <a id="cirunneredgeweburl"></a>`webUrl` | [`String`](#string) | Web URL of the runner. The value depends on where you put this field in the query. You can use it for projects or groups. |
#### `CiStageConnection`
......@@ -8399,6 +8400,7 @@ Represents the total number of issues and their weights for a particular day.
| ---- | ---- | ----------- |
| <a id="cirunneraccesslevel"></a>`accessLevel` | [`CiRunnerAccessLevel!`](#cirunneraccesslevel) | Access level of the runner. |
| <a id="cirunneractive"></a>`active` | [`Boolean!`](#boolean) | Indicates the runner is allowed to receive jobs. |
| <a id="cirunneradminurl"></a>`adminUrl` | [`String`](#string) | Admin URL of the runner. Only available for adminstrators. |
| <a id="cirunnercontactedat"></a>`contactedAt` | [`Time`](#time) | Last contact from the runner. |
| <a id="cirunnerdescription"></a>`description` | [`String`](#string) | Description of the runner. |
| <a id="cirunnerid"></a>`id` | [`CiRunnerID!`](#cirunnerid) | ID of the runner. |
......
......@@ -1284,6 +1284,7 @@ input {
a
.avatar-container.rect-avatar
.avatar.s32 {
border-radius: 4px;
box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.08);
}
.sidebar-top-level-items {
......@@ -1308,6 +1309,7 @@ input {
a
.avatar-container.rect-avatar
.avatar.s32 {
border-radius: 4px;
box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.08);
}
.sidebar-top-level-items > li .badge.badge-pill {
......@@ -1605,17 +1607,7 @@ svg.s16 {
.rect-avatar.s16 {
border-radius: 2px;
}
.rect-avatar.s32,
.nav-sidebar-inner-scroll
> div.context-header
a
.avatar-container.rect-avatar
.avatar.s32,
.sidebar-top-level-items
.context-header
a
.avatar-container.rect-avatar
.avatar.s32 {
.rect-avatar.s32 {
border-radius: 4px;
}
body.gl-dark .navbar-gitlab {
......
......@@ -1264,6 +1264,7 @@ input {
a
.avatar-container.rect-avatar
.avatar.s32 {
border-radius: 4px;
box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.08);
}
.sidebar-top-level-items {
......@@ -1288,6 +1289,7 @@ input {
a
.avatar-container.rect-avatar
.avatar.s32 {
border-radius: 4px;
box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.08);
}
.sidebar-top-level-items > li .badge.badge-pill {
......@@ -1585,17 +1587,7 @@ svg.s16 {
.rect-avatar.s16 {
border-radius: 2px;
}
.rect-avatar.s32,
.nav-sidebar-inner-scroll
> div.context-header
a
.avatar-container.rect-avatar
.avatar.s32,
.sidebar-top-level-items
.context-header
a
.avatar-container.rect-avatar
.avatar.s32 {
.rect-avatar.s32 {
border-radius: 4px;
}
......
# frozen_string_literal: true
module Admin
module IpRestrictionHelper
def ip_restriction_feature_available?(group)
group.licensed_feature_available?(:group_ip_restriction) || License.features_with_usage_ping.include?(:group_ip_restriction)
end
end
end
# frozen_string_literal: true
module Admin
module RepoSizeLimitHelper
def repo_size_limit_feature_available?
License.feature_available?(:repository_size_limit) || License.features_with_usage_ping.include?(:repository_size_limit)
end
end
end
......@@ -384,11 +384,14 @@ module EE
private
def elasticsearch_limited_project_exists?(project)
indexed_namespaces = ::Gitlab::ObjectHierarchy
.new(::Namespace.where(id: project.namespace_id))
.base_and_ancestors
.joins(:elasticsearch_indexed_namespace)
project_namespaces = ::Namespace.where(id: project.namespace_id)
indexed_namespaces = if ::Feature.enabled?(:linear_application_setting_ancestor_scopes, default_enabled: :yaml)
project_namespaces.self_and_ancestors
else
::Gitlab::ObjectHierarchy.new(project_namespaces).base_and_ancestors
end
indexed_namespaces = indexed_namespaces.joins(:elasticsearch_indexed_namespace)
indexed_namespaces = ::Project.where('EXISTS (?)', indexed_namespaces)
indexed_projects = ::Project.where('EXISTS (?)', ElasticsearchIndexedProject.where(project_id: project.id))
......
......@@ -10,6 +10,7 @@ module EE
extend ::Gitlab::Utils::Override
extend ::Gitlab::Cache::RequestCache
include ::Gitlab::Utils::StrongMemoize
include ::Admin::RepoSizeLimitHelper
GIT_LFS_DOWNLOAD_OPERATION = 'download'
PUBLIC_COST_FACTOR_RELEASE_DAY = Date.new(2021, 7, 17).freeze
......@@ -601,7 +602,7 @@ module EE
current_size_proc: -> { statistics.total_repository_size },
limit: actual_size_limit,
namespace: namespace,
enabled: License.feature_available?(:repository_size_limit)
enabled: repo_size_limit_feature_available?
)
end
end
......
......@@ -12,6 +12,8 @@ class GroupWiki < Wiki
end
def track_wiki_repository(shard)
return unless ::Gitlab::Database.read_write?
storage_record = container.group_wiki_repository || container.build_group_wiki_repository
storage_record.update!(shard_name: shard, disk_path: storage.disk_path)
end
......
......@@ -15,6 +15,11 @@ class License < ApplicationRecord
EES_FEATURES_WITH_USAGE_PING = %i[
send_emails_from_admin_area
repository_size_limit
].freeze
EEP_FEATURES_WITH_USAGE_PING = %i[
group_ip_restriction
].freeze
EES_FEATURES = %i[
......@@ -45,7 +50,6 @@ class License < ApplicationRecord
protected_refs_for_users
push_rules
repository_mirrors
repository_size_limit
resource_access_token
seat_link
scoped_issue_board
......@@ -54,7 +58,7 @@ class License < ApplicationRecord
wip_limits
].freeze + EES_FEATURES_WITH_USAGE_PING
EEP_FEATURES = EES_FEATURES + %i[
EEP_FEATURES = EES_FEATURES + EEP_FEATURES_WITH_USAGE_PING + %i[
adjourned_deletion_for_projects_and_groups
admin_audit_log
auditor_user
......@@ -92,7 +96,6 @@ class License < ApplicationRecord
group_allowed_email_domains
group_coverage_reports
group_forking_protection
group_ip_restriction
group_merge_request_analytics
group_merge_request_approval_settings
group_milestone_project_releases
......@@ -205,7 +208,7 @@ class License < ApplicationRecord
end
end.freeze
FEATURES_WITH_USAGE_PING = EES_FEATURES_WITH_USAGE_PING
FEATURES_WITH_USAGE_PING = EES_FEATURES_WITH_USAGE_PING + EEP_FEATURES_WITH_USAGE_PING
# Add on codes that may occur in legacy licenses that don't have a plan yet.
FEATURES_FOR_ADD_ONS = {
......
- return unless License.feature_available?(:repository_size_limit)
- return unless repo_size_limit_feature_available?
- form = local_assigns.fetch(:form)
......
- return if !group.licensed_feature_available?(:group_ip_restriction) || group.parent_id.present?
- return if !ip_restriction_feature_available?(group) || group.parent_id.present?
- hidden_input_id = 'group_ip_restriction_ranges'
- label_id = "#{hidden_input_id}_label"
......
- return unless current_user.admin? && License.feature_available?(:repository_size_limit)
- return unless current_user.admin? && repo_size_limit_feature_available?
- form = local_assigns.fetch(:form)
- type = local_assigns.fetch(:type)
......
- return unless current_user.admin? && License.feature_available?(:repository_size_limit)
- return unless current_user.admin? && repo_size_limit_feature_available?
- form = local_assigns.fetch(:form)
- is_project = local_assigns.fetch(:type) == :project
......
......@@ -12,7 +12,7 @@ module Gitlab
end
def allows_current_ip?
return true unless group&.feature_available?(:group_ip_restriction)
return true unless group&.feature_available?(:group_ip_restriction) || ::License.features_with_usage_ping.include?(:group_ip_restriction)
current_ip_address = Gitlab::IpAddressState.current
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Admin::IpRestrictionHelper do
let(:group) { create(:group) }
describe '#ip_restriction_feature_available' do
subject { helper.ip_restriction_feature_available?(group) }
context 'when group_ip_restriction feature is available' do
before do
stub_licensed_features(group_ip_restriction: true)
end
it { is_expected.to be_truthy }
end
context 'when group_ip_restriction feature is disabled' do
before do
stub_licensed_features(group_ip_restriction: false)
end
it { is_expected.to be_falsey }
end
context 'when usage ping is enabled' do
before do
stub_application_setting(usage_ping_enabled: true)
end
context 'when usage_ping_features is enabled' do
before do
stub_application_setting(usage_ping_features_enabled: true)
end
it { is_expected.to be_truthy }
end
context 'when usage_ping_features is disabled' do
before do
stub_application_setting(usage_ping_features_enabled: false)
end
it { is_expected.to be_falsey }
end
end
context 'when usage ping is disabled' do
before do
stub_application_setting(usage_ping_enabled: false)
end
it { is_expected.to be_falsey }
end
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Admin::RepoSizeLimitHelper do
describe '#repo_size_limit_feature_available?' do
subject { helper.repo_size_limit_feature_available? }
context 'when repository_size_limit feature is available' do
before do
stub_licensed_features(repository_size_limit: true)
end
it { is_expected.to be_truthy }
end
context 'when repo_size_limit_feature_available is not available' do
before do
stub_licensed_features(repository_size_limit: false)
end
it { is_expected.to be_falsey }
end
context 'when usage ping is enabled' do
before do
stub_licensed_features(repository_size_limit: false)
stub_application_setting(usage_ping_enabled: true)
end
context 'when usage_ping_features is enabled' do
before do
stub_application_setting(usage_ping_features_enabled: true)
end
it { is_expected.to be_truthy }
end
context 'when usage_ping_features is disabled' do
before do
stub_application_setting(usage_ping_features_enabled: false)
end
it { is_expected.to be_falsey }
end
end
context 'when usage ping is disabled' do
before do
stub_application_setting(usage_ping_enabled: false)
stub_licensed_features(repository_size_limit: false)
end
it { is_expected.to be_falsey }
end
end
end
......@@ -7,6 +7,32 @@ RSpec.describe Gitlab::IpRestriction::Enforcer do
let(:group) { create(:group) }
let(:current_ip) { '192.168.0.2' }
shared_examples 'ip_restriction' do
context 'without restriction' do
it { is_expected.to be_truthy }
end
context 'with restriction' do
before do
ranges.each do |range|
create(:ip_restriction, group: group, range: range)
end
end
context 'address is within one of the ranges' do
let(:ranges) { ['192.168.0.0/24', '255.255.255.224/27'] }
it { is_expected.to be_truthy }
end
context 'address is outside all of the ranges' do
let(:ranges) { ['10.0.0.0/8', '255.255.255.224/27'] }
it { is_expected.to be_falsey }
end
end
end
subject { described_class.new(group).allows_current_ip? }
before do
......@@ -14,33 +40,43 @@ RSpec.describe Gitlab::IpRestriction::Enforcer do
stub_licensed_features(group_ip_restriction: true)
end
context 'without restriction' do
it_behaves_like 'ip_restriction'
context 'group_ip_restriction feature is disabled' do
before do
stub_licensed_features(group_ip_restriction: false)
end
it { is_expected.to be_truthy }
end
context 'with restriction' do
context 'when usage ping is enabled' do
before do
ranges.each do |range|
create(:ip_restriction, group: group, range: range)
end
stub_licensed_features(group_ip_restriction: false)
stub_application_setting(usage_ping_enabled: true)
end
context 'address is within one of the ranges' do
let(:ranges) { ['192.168.0.0/24', '255.255.255.224/27'] }
context 'when usage_ping_features_enabled is enabled' do
before do
stub_application_setting(usage_ping_features_enabled: true)
end
it { is_expected.to be_truthy }
it_behaves_like 'ip_restriction'
end
context 'address is outside all of the ranges' do
let(:ranges) { ['10.0.0.0/8', '255.255.255.224/27'] }
context 'when usage_ping_features_enabled is disabled' do
before do
stub_application_setting(usage_ping_features_enabled: false)
end
it { is_expected.to be_falsey }
it { is_expected.to be_truthy }
end
end
context 'feature is disabled' do
context 'when usage ping is disabled' do
before do
stub_licensed_features(group_ip_restriction: false)
stub_application_setting(usage_ping_enabled: false)
end
it { is_expected.to be_truthy }
......
......@@ -427,31 +427,43 @@ RSpec.describe ApplicationSetting do
end
describe '#elasticsearch_indexes_project?' do
context 'when project is in a subgroup' do
let(:root_group) { create(:group) }
let(:subgroup) { create(:group, parent: root_group) }
let(:project) { create(:project, group: subgroup) }
before do
create(:elasticsearch_indexed_namespace, namespace: root_group)
shared_examples 'whether the project is indexed' do
context 'when project is in a subgroup' do
let(:root_group) { create(:group) }
let(:subgroup) { create(:group, parent: root_group) }
let(:project) { create(:project, group: subgroup) }
before do
create(:elasticsearch_indexed_namespace, namespace: root_group)
end
it 'allows project to be indexed' do
expect(setting.elasticsearch_indexes_project?(project)).to be(true)
end
end
it 'allows project to be indexed' do
expect(setting.elasticsearch_indexes_project?(project)).to be(true)
context 'when project is in a namespace' do
let(:namespace) { create(:namespace) }
let(:project) { create(:project, namespace: namespace) }
before do
create(:elasticsearch_indexed_namespace, namespace: namespace)
end
it 'allows project to be indexed' do
expect(setting.elasticsearch_indexes_project?(project)).to be(true)
end
end
end
context 'when project is in a namespace' do
let(:namespace) { create(:namespace) }
let(:project) { create(:project, namespace: namespace) }
it_behaves_like 'whether the project is indexed'
context 'when feature flag :linear_application_setting_ancestor_scopes is disabled' do
before do
create(:elasticsearch_indexed_namespace, namespace: namespace)
stub_feature_flags(linear_application_setting_ancestor_scopes: false)
end
it 'allows project to be indexed' do
expect(setting.elasticsearch_indexes_project?(project)).to be(true)
end
it_behaves_like 'whether the project is indexed'
end
end
end
......
......@@ -47,6 +47,16 @@ RSpec.describe GroupWiki do
shard_name: shard
)
end
context 'on a read-only instance' do
before do
allow(Gitlab::Database).to receive(:read_only?).and_return(true)
end
it 'does not attempt to create a new entry' do
expect { subject.track_wiki_repository(shard) }.not_to change(wiki_container, :group_wiki_repository)
end
end
end
context 'when a tracking entry exists' do
......@@ -64,6 +74,23 @@ RSpec.describe GroupWiki do
shard_name: shard
)
end
context 'on a read-only instance' do
before do
allow(Gitlab::Database).to receive(:read_only?).and_return(true)
end
it 'does not update the storage location' do
allow(subject.storage).to receive(:disk_path).and_return('fancy/new/path')
subject.track_wiki_repository(shard)
expect(wiki_container.group_wiki_repository).not_to have_attributes(
disk_path: 'fancy/new/path',
shard_name: shard
)
end
end
end
end
......
......@@ -2363,7 +2363,7 @@ RSpec.describe Project do
stub_licensed_features(repository_size_limit: true)
end
it 'is enabled' do
it 'size limit is enabled' do
expect(checker.enabled?).to be_truthy
end
end
......@@ -2373,7 +2373,45 @@ RSpec.describe Project do
stub_licensed_features(repository_size_limit: false)
end
it 'is disabled' do
it 'size limit is disabled' do
expect(checker.enabled?).to be_falsey
end
end
context 'when usage ping is enabled' do
before do
stub_licensed_features(repository_size_limit: false)
stub_application_setting(usage_ping_enabled: true)
end
context 'when usage_ping_features is activated' do
before do
stub_application_setting(usage_ping_features_enabled: true)
end
it 'size limit is enabled' do
expect(checker.enabled?).to be_truthy
end
end
context 'when usage_ping_features is disabled' do
before do
stub_application_setting(usage_ping_features_enabled: false)
end
it 'size limit is disabled' do
expect(checker.enabled?).to be_falsy
end
end
end
context 'when usage ping is disabled' do
before do
stub_licensed_features(repository_size_limit: false)
stub_application_setting(usage_ping_enabled: false)
end
it 'size limit is disabled' do
expect(checker.enabled?).to be_falsey
end
end
......
......@@ -20352,6 +20352,9 @@ msgstr ""
msgid "Limit namespaces and projects that can be indexed"
msgstr ""
msgid "Limit project size at a global, group and project level. %{link_start}Learn more%{link_end}."
msgstr ""
msgid "Limit sign in from multiple IP addresses"
msgstr ""
......@@ -25258,7 +25261,7 @@ msgstr ""
msgid "Pipeline|We are currently unable to fetch pipeline data"
msgstr ""
msgid "Pipeline|You’re about to stop pipeline %{pipelineId}."
msgid "Pipeline|You’re about to stop pipeline #%{pipelineId}."
msgstr ""
msgid "Pipeline|for"
......@@ -28952,6 +28955,9 @@ msgstr ""
msgid "Restoring the project will prevent the project from being removed on this date and restore people's ability to make changes to it."
msgstr ""
msgid "Restrict group access by IP address. %{link_start}Learn more%{link_end}."
msgstr ""
msgid "Restrict membership by email domain"
msgstr ""
......
......@@ -6,6 +6,11 @@ FactoryBot.define do
size { 1234 }
file { fixture_file_upload('spec/fixtures/dependency_proxy/a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4.gz') }
file_name { 'a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4.gz' }
status { :default }
trait :expired do
status { :expired }
end
end
factory :dependency_proxy_manifest, class: 'DependencyProxy::Manifest' do
......@@ -13,7 +18,12 @@ FactoryBot.define do
size { 1234 }
file { fixture_file_upload('spec/fixtures/dependency_proxy/manifest') }
digest { 'sha256:d0710affa17fad5f466a70159cc458227bd25d4afb39514ef662ead3e6c99515' }
file_name { 'alpine:latest.json' }
sequence(:file_name) { |n| "alpine:latest#{n}.json" }
content_type { 'application/vnd.docker.distribution.manifest.v2+json' }
status { :default }
trait :expired do
status { :expired }
end
end
end
......@@ -6,5 +6,9 @@ FactoryBot.define do
enabled { true }
ttl { 90 }
trait :disabled do
enabled { false }
end
end
end
import { shallowMount } from '@vue/test-utils';
import { GlSprintf } from '@gitlab/ui';
import PipelineStopModal from '~/pipelines/components/pipelines_list/pipeline_stop_modal.vue';
import { mockPipelineHeader } from '../../mock_data';
describe('PipelineStopModal', () => {
let wrapper;
const createComponent = () => {
wrapper = shallowMount(PipelineStopModal, {
propsData: {
pipeline: mockPipelineHeader,
},
stubs: {
GlSprintf,
},
});
};
beforeEach(() => {
createComponent();
});
it('should render "stop pipeline" warning', () => {
expect(wrapper.text()).toMatch(`You’re about to stop pipeline #${mockPipelineHeader.id}.`);
});
});
......@@ -11,7 +11,7 @@ RSpec.describe GitlabSchema.types['CiRunner'] do
expected_fields = %w[
id description contacted_at maximum_timeout access_level active status
version short_sha revision locked run_untagged ip_address runner_type tag_list
project_count job_count user_permissions
project_count job_count admin_url user_permissions
]
expect(described_class).to include_graphql_fields(*expected_fields)
......
......@@ -77,6 +77,9 @@ RSpec.describe ApplicationSetting do
it { is_expected.to validate_numericality_of(:container_registry_cleanup_tags_service_max_list_size).only_integer.is_greater_than_or_equal_to(0) }
it { is_expected.to validate_numericality_of(:container_registry_expiration_policies_worker_capacity).only_integer.is_greater_than_or_equal_to(0) }
it { is_expected.to validate_numericality_of(:dependency_proxy_ttl_group_policy_worker_capacity).only_integer.is_greater_than_or_equal_to(0) }
it { is_expected.not_to allow_value(nil).for(:dependency_proxy_ttl_group_policy_worker_capacity) }
it { is_expected.to validate_numericality_of(:snippet_size_limit).only_integer.is_greater_than(0) }
it { is_expected.to validate_numericality_of(:wiki_page_max_content_bytes).only_integer.is_greater_than_or_equal_to(1024) }
it { is_expected.to validate_presence_of(:max_artifacts_size) }
......
......@@ -2,17 +2,16 @@
require 'spec_helper'
RSpec.describe DependencyProxy::Blob, type: :model do
it_behaves_like 'ttl_expirable'
describe 'relationships' do
it { is_expected.to belong_to(:group) }
end
it_behaves_like 'having unique enum values'
describe 'validations' do
it { is_expected.to validate_presence_of(:group) }
it { is_expected.to validate_presence_of(:file) }
it { is_expected.to validate_presence_of(:file_name) }
it { is_expected.to validate_presence_of(:status) }
end
describe '.total_size' do
......
......@@ -20,4 +20,13 @@ RSpec.describe DependencyProxy::ImageTtlGroupPolicy, type: :model do
it { is_expected.to validate_numericality_of(:ttl).allow_nil.is_greater_than(0) }
end
end
describe '.enabled' do
it 'returns policies that are enabled' do
enabled_policy = create(:image_ttl_group_policy)
create(:image_ttl_group_policy, :disabled)
expect(described_class.enabled).to contain_exactly(enabled_policy)
end
end
end
......@@ -2,18 +2,17 @@
require 'spec_helper'
RSpec.describe DependencyProxy::Manifest, type: :model do
it_behaves_like 'ttl_expirable'
describe 'relationships' do
it { is_expected.to belong_to(:group) }
end
it_behaves_like 'having unique enum values'
describe 'validations' do
it { is_expected.to validate_presence_of(:group) }
it { is_expected.to validate_presence_of(:file) }
it { is_expected.to validate_presence_of(:file_name) }
it { is_expected.to validate_presence_of(:digest) }
it { is_expected.to validate_presence_of(:status) }
end
describe 'file is being stored' do
......
......@@ -6,6 +6,7 @@ RSpec.describe 'Query.runner(id)' do
include GraphqlHelpers
let_it_be(:user) { create(:user, :admin) }
let_it_be(:group) { create(:group) }
let_it_be(:active_instance_runner) do
create(:ci_runner, :instance, description: 'Runner 1', contacted_at: 2.hours.ago,
......@@ -18,12 +19,20 @@ RSpec.describe 'Query.runner(id)' do
version: 'adfe157', revision: 'b', ip_address: '10.10.10.10', access_level: 1, run_untagged: true)
end
let_it_be(:active_group_runner) do
create(:ci_runner, :group, groups: [group], description: 'Group runner 1', contacted_at: 2.hours.ago,
active: true, version: 'adfe156', revision: 'a', locked: true, ip_address: '127.0.0.1', maximum_timeout: 600,
access_level: 0, tag_list: %w[tag1 tag2], run_untagged: true)
end
def get_runner(id)
case id
when :active_instance_runner
active_instance_runner
when :inactive_instance_runner
inactive_instance_runner
when :active_group_runner
active_group_runner
end
end
......@@ -62,6 +71,7 @@ RSpec.describe 'Query.runner(id)' do
'runnerType' => runner.instance_type? ? 'INSTANCE_TYPE' : 'PROJECT_TYPE',
'jobCount' => 0,
'projectCount' => nil,
'adminUrl' => "http://localhost/admin/runners/#{runner.id}",
'userPermissions' => {
'readRunner' => true,
'updateRunner' => true,
......@@ -72,6 +82,32 @@ RSpec.describe 'Query.runner(id)' do
end
end
shared_examples 'retrieval with no admin url' do |runner_id|
let(:query) do
wrap_fields(query_graphql_path(query_path, all_graphql_fields_for('CiRunner')))
end
let(:query_path) do
[
[:runner, { id: get_runner(runner_id).to_global_id.to_s }]
]
end
it 'retrieves expected fields' do
post_graphql(query, current_user: user)
runner_data = graphql_data_at(:runner)
expect(runner_data).not_to be_nil
runner = get_runner(runner_id)
expect(runner_data).to match a_hash_including(
'id' => "gid://gitlab/Ci::Runner/#{runner.id}",
'adminUrl' => nil
)
expect(runner_data['tagList']).to match_array runner.tag_list
end
end
shared_examples 'retrieval by unauthorized user' do |runner_id|
let(:query) do
wrap_fields(query_graphql_path(query_path, all_graphql_fields_for('CiRunner')))
......@@ -152,6 +188,39 @@ RSpec.describe 'Query.runner(id)' do
it_behaves_like 'runner details fetch', :inactive_instance_runner
end
describe 'for runner inside group request' do
let(:query) do
%(
query {
group(fullPath: "#{group.full_path}") {
runners {
edges {
webUrl
node {
id
}
}
}
}
}
)
end
it 'retrieves webUrl field with expected value' do
post_graphql(query, current_user: user)
runner_data = graphql_data_at(:group, :runners, :edges)
expect(runner_data).to match_array [
a_hash_including(
'webUrl' => "http://localhost/groups/#{group.full_path}/-/runners/#{active_group_runner.id}",
'node' => {
'id' => "gid://gitlab/Ci::Runner/#{active_group_runner.id}"
}
)
]
end
end
describe 'for multiple runners' do
let_it_be(:project1) { create(:project, :test_repo) }
let_it_be(:project2) { create(:project, :test_repo) }
......@@ -210,6 +279,16 @@ RSpec.describe 'Query.runner(id)' do
it_behaves_like 'retrieval by unauthorized user', :active_instance_runner
end
describe 'by non-admin user' do
let(:user) { create(:user) }
before do
group.add_user(user, Gitlab::Access::OWNER)
end
it_behaves_like 'retrieval with no admin url', :active_group_runner
end
describe 'by unauthenticated user' do
let(:user) { nil }
......
......@@ -4,7 +4,8 @@ require 'spec_helper'
RSpec.describe DependencyProxy::FindOrCreateBlobService do
include DependencyProxyHelpers
let(:blob) { create(:dependency_proxy_blob) }
let_it_be_with_reload(:blob) { create(:dependency_proxy_blob) }
let(:group) { blob.group }
let(:image) { 'alpine' }
let(:tag) { '3.9' }
......@@ -17,11 +18,7 @@ RSpec.describe DependencyProxy::FindOrCreateBlobService do
stub_registry_auth(image, token)
end
context 'no cache' do
before do
stub_blob_download(image, blob_sha)
end
shared_examples 'downloads the remote blob' do
it 'downloads blob from remote registry if there is no cached one' do
expect(subject[:status]).to eq(:success)
expect(subject[:blob]).to be_a(DependencyProxy::Blob)
......@@ -30,15 +27,34 @@ RSpec.describe DependencyProxy::FindOrCreateBlobService do
end
end
context 'no cache' do
before do
stub_blob_download(image, blob_sha)
end
it_behaves_like 'downloads the remote blob'
end
context 'cached blob' do
let(:blob_sha) { blob.file_name.sub('.gz', '') }
it 'uses cached blob instead of downloading one' do
expect { subject }.to change { blob.reload.updated_at }
expect(subject[:status]).to eq(:success)
expect(subject[:blob]).to be_a(DependencyProxy::Blob)
expect(subject[:blob]).to eq(blob)
expect(subject[:from_cache]).to eq true
end
context 'when the cached blob is expired' do
before do
blob.update_column(:status, DependencyProxy::Blob.statuses[:expired])
stub_blob_download(image, blob_sha)
end
it_behaves_like 'downloads the remote blob'
end
end
context 'no such blob exists remotely' do
......
......@@ -21,19 +21,19 @@ RSpec.describe DependencyProxy::FindOrCreateManifestService do
describe '#execute' do
subject { described_class.new(group, image, tag, token).execute }
shared_examples 'downloading the manifest' do
it 'downloads manifest from remote registry if there is no cached one', :aggregate_failures do
expect { subject }.to change { group.dependency_proxy_manifests.count }.by(1)
expect(subject[:status]).to eq(:success)
expect(subject[:manifest]).to be_a(DependencyProxy::Manifest)
expect(subject[:manifest]).to be_persisted
expect(subject[:from_cache]).to eq false
end
end
context 'when no manifest exists' do
let_it_be(:image) { 'new-image' }
shared_examples 'downloading the manifest' do
it 'downloads manifest from remote registry if there is no cached one', :aggregate_failures do
expect { subject }.to change { group.dependency_proxy_manifests.count }.by(1)
expect(subject[:status]).to eq(:success)
expect(subject[:manifest]).to be_a(DependencyProxy::Manifest)
expect(subject[:manifest]).to be_persisted
expect(subject[:from_cache]).to eq false
end
end
context 'successful head request' do
before do
stub_manifest_head(image, tag, headers: headers)
......@@ -60,6 +60,8 @@ RSpec.describe DependencyProxy::FindOrCreateManifestService do
shared_examples 'using the cached manifest' do
it 'uses cached manifest instead of downloading one', :aggregate_failures do
expect { subject }.to change { dependency_proxy_manifest.reload.updated_at }
expect(subject[:status]).to eq(:success)
expect(subject[:manifest]).to be_a(DependencyProxy::Manifest)
expect(subject[:manifest]).to eq(dependency_proxy_manifest)
......@@ -87,6 +89,16 @@ RSpec.describe DependencyProxy::FindOrCreateManifestService do
end
end
context 'when the cached manifest is expired' do
before do
dependency_proxy_manifest.update_column(:status, DependencyProxy::Manifest.statuses[:expired])
stub_manifest_head(image, tag, headers: headers)
stub_manifest_download(image, tag, headers: headers)
end
it_behaves_like 'downloading the manifest'
end
context 'failed connection' do
before do
expect(DependencyProxy::HeadManifestService).to receive(:new).and_raise(Net::OpenTimeout)
......
......@@ -24,25 +24,6 @@ RSpec.describe Packages::Composer::CreatePackageService do
let(:created_package) { Packages::Package.composer.last }
shared_examples 'using the cache update worker' do
context 'with remove_composer_v1_cache_code enabled' do
it 'does not enqueue a cache update job' do
expect(::Packages::Composer::CacheUpdateWorker).not_to receive(:perform_async)
subject
end
end
context 'with remove_composer_v1_cache_code disabled' do
it 'enqueues a cache update job' do
stub_feature_flags(remove_composer_v1_cache_code: true)
expect(::Packages::Composer::CacheUpdateWorker).not_to receive(:perform_async)
subject
end
end
end
context 'without an existing package' do
context 'with a branch' do
let(:branch) { project.repository.find_branch('master') }
......@@ -64,7 +45,6 @@ RSpec.describe Packages::Composer::CreatePackageService do
it_behaves_like 'assigns build to package'
it_behaves_like 'assigns status to package'
it_behaves_like 'using the cache update worker'
end
context 'with a tag' do
......@@ -89,7 +69,6 @@ RSpec.describe Packages::Composer::CreatePackageService do
it_behaves_like 'assigns build to package'
it_behaves_like 'assigns status to package'
it_behaves_like 'using the cache update worker'
end
end
......@@ -106,8 +85,6 @@ RSpec.describe Packages::Composer::CreatePackageService do
.to change { Packages::Package.composer.count }.by(0)
.and change { Packages::Composer::Metadatum.count }.by(0)
end
it_behaves_like 'using the cache update worker'
end
context 'belonging to another project' do
......@@ -129,8 +106,6 @@ RSpec.describe Packages::Composer::CreatePackageService do
.to change { Packages::Package.composer.count }.by(1)
.and change { Packages::Composer::Metadatum.count }.by(1)
end
it_behaves_like 'using the cache update worker'
end
end
end
......
......@@ -85,6 +85,7 @@
- "./spec/requests/api/ci/runner/runners_post_spec.rb"
- "./spec/requests/api/ci/runners_spec.rb"
- "./spec/requests/api/commit_statuses_spec.rb"
- "./spec/requests/api/graphql/ci/runner_spec.rb"
- "./spec/requests/api/graphql/group_query_spec.rb"
- "./spec/requests/api/graphql/merge_request/merge_request_spec.rb"
- "./spec/requests/api/graphql/mutations/merge_requests/create_spec.rb"
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.shared_examples 'ttl_expirable' do
let_it_be(:class_symbol) { described_class.model_name.param_key.to_sym }
it_behaves_like 'having unique enum values'
describe 'validations' do
it { is_expected.to validate_presence_of(:status) }
end
describe '.updated_before' do
# rubocop:disable Rails/SaveBang
let_it_be_with_reload(:item1) { create(class_symbol) }
let_it_be(:item2) { create(class_symbol) }
# rubocop:enable Rails/SaveBang
before do
item1.update_column(:updated_at, 1.month.ago)
end
it 'returns items with created at older than the supplied number of days' do
expect(described_class.updated_before(10)).to contain_exactly(item1)
end
end
describe '.active' do
# rubocop:disable Rails/SaveBang
let_it_be(:item1) { create(class_symbol) }
let_it_be(:item2) { create(class_symbol, :expired) }
let_it_be(:item3) { create(class_symbol, status: :error) }
# rubocop:enable Rails/SaveBang
it 'returns only active items' do
expect(described_class.active).to contain_exactly(item1)
end
end
describe '.lock_next_by' do
let_it_be(:item1) { create(class_symbol, created_at: 1.month.ago, updated_at: 1.day.ago) }
let_it_be(:item2) { create(class_symbol, created_at: 1.year.ago, updated_at: 1.year.ago) }
let_it_be(:item3) { create(class_symbol, created_at: 2.years.ago, updated_at: 1.month.ago) }
it 'returns the first item sorted by the argument' do
expect(described_class.lock_next_by(:updated_at)).to contain_exactly(item2)
expect(described_class.lock_next_by(:created_at)).to contain_exactly(item3)
end
end
end
# frozen_string_literal: true
RSpec.shared_examples 'dependency_proxy_cleanup_worker' do
let_it_be(:group) { create(:group) }
let(:worker) { described_class.new }
describe '#perform_work' do
subject(:perform_work) { worker.perform_work }
context 'with no work to do' do
it { is_expected.to be_nil }
end
context 'with work to do' do
let_it_be(:artifact1) { create(factory_type, :expired, group: group) }
let_it_be(:artifact2) { create(factory_type, :expired, group: group, updated_at: 6.months.ago, created_at: 2.years.ago) }
let_it_be_with_reload(:artifact3) { create(factory_type, :expired, group: group, updated_at: 1.year.ago, created_at: 1.year.ago) }
let_it_be(:artifact4) { create(factory_type, group: group, updated_at: 2.years.ago, created_at: 2.years.ago) }
it 'deletes the oldest expired artifact based on updated_at', :aggregate_failures do
expect(worker).to receive(:log_extra_metadata_on_done).with("#{factory_type}_id".to_sym, artifact3.id)
expect(worker).to receive(:log_extra_metadata_on_done).with(:group_id, group.id)
expect { perform_work }.to change { artifact1.class.count }.by(-1)
end
end
end
describe '#max_running_jobs' do
let(:capacity) { 5 }
subject { worker.max_running_jobs }
before do
stub_application_setting(dependency_proxy_ttl_group_policy_worker_capacity: capacity)
end
it { is_expected.to eq(capacity) }
end
describe '#remaining_work_count' do
let_it_be(:expired_artifacts) do
(1..3).map do |_|
create(factory_type, :expired, group: group)
end
end
subject { worker.remaining_work_count }
it { is_expected.to eq(3) }
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe DependencyProxy::CleanupBlobWorker do
let_it_be(:factory_type) { :dependency_proxy_blob }
it_behaves_like 'dependency_proxy_cleanup_worker'
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe DependencyProxy::CleanupManifestWorker do
let_it_be(:factory_type) { :dependency_proxy_manifest }
it_behaves_like 'dependency_proxy_cleanup_worker'
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe DependencyProxy::ImageTtlGroupPolicyWorker do
let(:worker) { described_class.new }
describe '#perform' do
let_it_be(:policy) { create(:image_ttl_group_policy) }
let_it_be(:group) { policy.group }
subject { worker.perform }
context 'when there are images to expire' do
let_it_be_with_reload(:old_blob) { create(:dependency_proxy_blob, group: group, updated_at: 1.year.ago) }
let_it_be_with_reload(:old_manifest) { create(:dependency_proxy_manifest, group: group, updated_at: 1.year.ago) }
let_it_be_with_reload(:new_blob) { create(:dependency_proxy_blob, group: group) }
let_it_be_with_reload(:new_manifest) { create(:dependency_proxy_manifest, group: group) }
it 'calls the limited capacity workers', :aggregate_failures do
expect(DependencyProxy::CleanupBlobWorker).to receive(:perform_with_capacity)
expect(DependencyProxy::CleanupManifestWorker).to receive(:perform_with_capacity)
subject
end
it 'updates the old images to expired' do
expect { subject }
.to change { old_blob.reload.status }.from('default').to('expired')
.and change { old_manifest.reload.status }.from('default').to('expired')
.and not_change { new_blob.reload.status }
.and not_change { new_manifest.reload.status }
end
end
context 'when there are no images to expire' do
it 'does not do anything', :aggregate_failures do
expect(DependencyProxy::CleanupBlobWorker).not_to receive(:perform_with_capacity)
expect(DependencyProxy::CleanupManifestWorker).not_to receive(:perform_with_capacity)
subject
end
end
context 'counts logging' do
let_it_be(:expired_blob) { create(:dependency_proxy_blob, :expired, group: group) }
let_it_be(:expired_blob2) { create(:dependency_proxy_blob, :expired, group: group) }
let_it_be(:expired_manifest) { create(:dependency_proxy_manifest, :expired, group: group) }
let_it_be(:processing_blob) { create(:dependency_proxy_blob, status: :processing, group: group) }
let_it_be(:processing_manifest) { create(:dependency_proxy_manifest, status: :processing, group: group) }
let_it_be(:error_blob) { create(:dependency_proxy_blob, status: :error, group: group) }
let_it_be(:error_manifest) { create(:dependency_proxy_manifest, status: :error, group: group) }
it 'logs all the counts', :aggregate_failures do
expect(worker).to receive(:log_extra_metadata_on_done).with(:expired_dependency_proxy_blob_count, 2)
expect(worker).to receive(:log_extra_metadata_on_done).with(:expired_dependency_proxy_manifest_count, 1)
expect(worker).to receive(:log_extra_metadata_on_done).with(:processing_dependency_proxy_blob_count, 1)
expect(worker).to receive(:log_extra_metadata_on_done).with(:processing_dependency_proxy_manifest_count, 1)
expect(worker).to receive(:log_extra_metadata_on_done).with(:error_dependency_proxy_blob_count, 1)
expect(worker).to receive(:log_extra_metadata_on_done).with(:error_dependency_proxy_manifest_count, 1)
subject
end
context 'with load balancing enabled', :db_load_balancing do
it 'reads the counts from the replica' do
expect(Gitlab::Database::LoadBalancing::Session.current).to receive(:use_replicas_for_read_queries).and_call_original
subject
end
end
end
end
end
......@@ -198,6 +198,8 @@ RSpec.describe 'Every Sidekiq worker' do
'DeleteMergedBranchesWorker' => 3,
'DeleteStoredFilesWorker' => 3,
'DeleteUserWorker' => 3,
'DependencyProxy::CleanupBlobWorker' => 3,
'DependencyProxy::CleanupManifestWorker' => 3,
'Deployments::AutoRollbackWorker' => 3,
'Deployments::DropOlderDeploymentsWorker' => 3,
'Deployments::FinishedWorker' => 3,
......@@ -359,7 +361,7 @@ RSpec.describe 'Every Sidekiq worker' do
'ObjectPool::ScheduleJoinWorker' => 3,
'ObjectStorage::BackgroundMoveWorker' => 5,
'ObjectStorage::MigrateUploadsWorker' => 3,
'Packages::Composer::CacheUpdateWorker' => 3,
'Packages::Composer::CacheUpdateWorker' => false,
'Packages::Go::SyncPackagesWorker' => 3,
'Packages::Maven::Metadata::SyncWorker' => 3,
'Packages::Nuget::ExtractionWorker' => 3,
......
......@@ -18,12 +18,8 @@ RSpec.describe Packages::Composer::CacheCleanupWorker, type: :worker do
cache_file4.update_columns(namespace_id: nil)
end
it 'deletes expired packages' do
expect { subject }.to change { Packages::Composer::CacheFile.count }.by(-2)
expect { cache_file1.reload }.not_to raise_error ActiveRecord::RecordNotFound
expect { cache_file2.reload }.not_to raise_error ActiveRecord::RecordNotFound
expect { cache_file3.reload }.to raise_error ActiveRecord::RecordNotFound
expect { cache_file4.reload }.to raise_error ActiveRecord::RecordNotFound
it 'does nothing' do
expect { subject }.not_to change { Packages::Composer::CacheFile.count }
end
end
end
......@@ -21,8 +21,8 @@ RSpec.describe Packages::Composer::CacheUpdateWorker, type: :worker do
include_examples 'an idempotent worker' do
context 'creating a package' do
it 'updates the cache' do
expect { subject }.to change { Packages::Composer::CacheFile.count }.by(1)
it 'does nothing' do
expect { subject }.to change { Packages::Composer::CacheFile.count }.by(0)
end
end
......@@ -36,12 +36,12 @@ RSpec.describe Packages::Composer::CacheUpdateWorker, type: :worker do
package.destroy!
end
it 'marks the file for deletion' do
it 'does nothing' do
expect { subject }.not_to change { Packages::Composer::CacheFile.count }
cache_file = Packages::Composer::CacheFile.last
expect(cache_file.reload.delete_at).not_to be_nil
expect(cache_file.reload.delete_at).to be_nil
end
end
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment