Commit b4b9b385 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 2e31c85a
...@@ -5,15 +5,17 @@ import { ...@@ -5,15 +5,17 @@ import {
GlSprintf, GlSprintf,
GlAlert, GlAlert,
GlDropdown, GlDropdown,
GlDropdownDivider,
GlDropdownItem, GlDropdownItem,
GlFormGroup, GlFormGroup,
GlSearchBoxByClick, GlSearchBoxByClick,
GlInfiniteScroll, GlInfiniteScroll,
} from '@gitlab/ui'; } from '@gitlab/ui';
import { s__ } from '~/locale';
import DateTimePicker from '~/vue_shared/components/date_time_picker/date_time_picker.vue'; import DateTimePicker from '~/vue_shared/components/date_time_picker/date_time_picker.vue';
import LogControlButtons from './log_control_buttons.vue'; import LogControlButtons from './log_control_buttons.vue';
import { timeRanges, defaultTimeRange } from '~/monitoring/constants'; import { timeRanges, defaultTimeRange } from '~/vue_shared/constants';
import { timeRangeFromUrl } from '~/monitoring/utils'; import { timeRangeFromUrl } from '~/monitoring/utils';
import { formatDate } from '../utils'; import { formatDate } from '../utils';
...@@ -22,6 +24,7 @@ export default { ...@@ -22,6 +24,7 @@ export default {
GlSprintf, GlSprintf,
GlAlert, GlAlert,
GlDropdown, GlDropdown,
GlDropdownDivider,
GlDropdownItem, GlDropdownItem,
GlFormGroup, GlFormGroup,
GlSearchBoxByClick, GlSearchBoxByClick,
...@@ -90,6 +93,16 @@ export default { ...@@ -90,6 +93,16 @@ export default {
shouldShowElasticStackCallout() { shouldShowElasticStackCallout() {
return !this.isElasticStackCalloutDismissed && this.disableAdvancedControls; return !this.isElasticStackCalloutDismissed && this.disableAdvancedControls;
}, },
podDropdownText() {
if (this.pods.current) {
return this.pods.current;
} else if (this.advancedFeaturesEnabled) {
// "All pods" is a valid option when advanced querying is available
return s__('Environments|All pods');
}
return s__('Environments|No pod selected');
},
}, },
mounted() { mounted() {
this.setInitData({ this.setInitData({
...@@ -178,11 +191,17 @@ export default { ...@@ -178,11 +191,17 @@ export default {
> >
<gl-dropdown <gl-dropdown
id="pods-dropdown" id="pods-dropdown"
:text="pods.current || s__('Environments|No pods to display')" :text="podDropdownText"
:disabled="environments.isLoading" :disabled="environments.isLoading"
class="d-flex gl-h-32 js-pods-dropdown" class="d-flex gl-h-32 js-pods-dropdown"
toggle-class="dropdown-menu-toggle" toggle-class="dropdown-menu-toggle"
> >
<template v-if="advancedFeaturesEnabled">
<gl-dropdown-item key="all-pods" @click="showPodLogs(null)">
{{ s__('Environments|All pods') }}
</gl-dropdown-item>
<gl-dropdown-divider />
</template>
<gl-dropdown-item <gl-dropdown-item
v-for="podName in pods.options" v-for="podName in pods.options"
:key="podName" :key="podName"
......
...@@ -82,7 +82,6 @@ export const setTimeRange = ({ dispatch, commit }, timeRange) => { ...@@ -82,7 +82,6 @@ export const setTimeRange = ({ dispatch, commit }, timeRange) => {
export const showEnvironment = ({ dispatch, commit }, environmentName) => { export const showEnvironment = ({ dispatch, commit }, environmentName) => {
commit(types.SET_PROJECT_ENVIRONMENT, environmentName); commit(types.SET_PROJECT_ENVIRONMENT, environmentName);
commit(types.SET_CURRENT_POD_NAME, null);
dispatch('fetchLogs'); dispatch('fetchLogs');
}; };
...@@ -107,16 +106,16 @@ export const fetchEnvironments = ({ commit, dispatch }, environmentsPath) => { ...@@ -107,16 +106,16 @@ export const fetchEnvironments = ({ commit, dispatch }, environmentsPath) => {
}; };
export const fetchLogs = ({ commit, state }) => { export const fetchLogs = ({ commit, state }) => {
commit(types.REQUEST_PODS_DATA);
commit(types.REQUEST_LOGS_DATA); commit(types.REQUEST_LOGS_DATA);
return requestLogsUntilData(state) return requestLogsUntilData(state)
.then(({ data }) => { .then(({ data }) => {
const { pod_name, pods, logs, cursor } = data; const { pod_name, pods, logs, cursor } = data;
commit(types.RECEIVE_LOGS_DATA_SUCCESS, { logs, cursor });
commit(types.SET_CURRENT_POD_NAME, pod_name); commit(types.SET_CURRENT_POD_NAME, pod_name);
commit(types.RECEIVE_PODS_DATA_SUCCESS, pods); commit(types.RECEIVE_PODS_DATA_SUCCESS, pods);
commit(types.RECEIVE_LOGS_DATA_SUCCESS, { logs, cursor });
}) })
.catch(() => { .catch(() => {
commit(types.RECEIVE_PODS_DATA_ERROR); commit(types.RECEIVE_PODS_DATA_ERROR);
......
import { formatDate } from '../utils'; import { formatDate } from '../utils';
const mapTrace = ({ timestamp = null, message = '' }) => const mapTrace = ({ timestamp = null, pod = '', message = '' }) =>
[timestamp ? formatDate(timestamp) : '', message].join(' | '); [timestamp ? formatDate(timestamp) : '', pod, message].join(' | ');
export const trace = state => state.logs.lines.map(mapTrace).join('\n'); export const trace = state => state.logs.lines.map(mapTrace).join('\n');
......
...@@ -14,6 +14,5 @@ export const REQUEST_LOGS_DATA_PREPEND = 'REQUEST_LOGS_DATA_PREPEND'; ...@@ -14,6 +14,5 @@ export const REQUEST_LOGS_DATA_PREPEND = 'REQUEST_LOGS_DATA_PREPEND';
export const RECEIVE_LOGS_DATA_PREPEND_SUCCESS = 'RECEIVE_LOGS_DATA_PREPEND_SUCCESS'; export const RECEIVE_LOGS_DATA_PREPEND_SUCCESS = 'RECEIVE_LOGS_DATA_PREPEND_SUCCESS';
export const RECEIVE_LOGS_DATA_PREPEND_ERROR = 'RECEIVE_LOGS_DATA_PREPEND_ERROR'; export const RECEIVE_LOGS_DATA_PREPEND_ERROR = 'RECEIVE_LOGS_DATA_PREPEND_ERROR';
export const REQUEST_PODS_DATA = 'REQUEST_PODS_DATA';
export const RECEIVE_PODS_DATA_SUCCESS = 'RECEIVE_PODS_DATA_SUCCESS'; export const RECEIVE_PODS_DATA_SUCCESS = 'RECEIVE_PODS_DATA_SUCCESS';
export const RECEIVE_PODS_DATA_ERROR = 'RECEIVE_PODS_DATA_ERROR'; export const RECEIVE_PODS_DATA_ERROR = 'RECEIVE_PODS_DATA_ERROR';
import * as types from './mutation_types'; import * as types from './mutation_types';
import { convertToFixedRange } from '~/lib/utils/datetime_range'; import { convertToFixedRange } from '~/lib/utils/datetime_range';
const mapLine = ({ timestamp, message }) => ({ const mapLine = ({ timestamp, pod, message }) => ({
timestamp, timestamp,
pod,
message, message,
}); });
...@@ -21,6 +22,10 @@ export default { ...@@ -21,6 +22,10 @@ export default {
// Environments Data // Environments Data
[types.SET_PROJECT_ENVIRONMENT](state, environmentName) { [types.SET_PROJECT_ENVIRONMENT](state, environmentName) {
state.environments.current = environmentName; state.environments.current = environmentName;
// Clear current pod options
state.pods.current = null;
state.pods.options = [];
}, },
[types.REQUEST_ENVIRONMENTS_DATA](state) { [types.REQUEST_ENVIRONMENTS_DATA](state) {
state.environments.options = []; state.environments.options = [];
...@@ -81,9 +86,6 @@ export default { ...@@ -81,9 +86,6 @@ export default {
[types.SET_CURRENT_POD_NAME](state, podName) { [types.SET_CURRENT_POD_NAME](state, podName) {
state.pods.current = podName; state.pods.current = podName;
}, },
[types.REQUEST_PODS_DATA](state) {
state.pods.options = [];
},
[types.RECEIVE_PODS_DATA_SUCCESS](state, podOptions) { [types.RECEIVE_PODS_DATA_SUCCESS](state, podOptions) {
state.pods.options = podOptions; state.pods.options = podOptions;
}, },
......
import { timeRanges, defaultTimeRange } from '~/monitoring/constants'; import { timeRanges, defaultTimeRange } from '~/vue_shared/constants';
import { convertToFixedRange } from '~/lib/utils/datetime_range'; import { convertToFixedRange } from '~/lib/utils/datetime_range';
export default () => ({ export default () => ({
......
...@@ -31,7 +31,8 @@ import DashboardsDropdown from './dashboards_dropdown.vue'; ...@@ -31,7 +31,8 @@ import DashboardsDropdown from './dashboards_dropdown.vue';
import TrackEventDirective from '~/vue_shared/directives/track_event'; import TrackEventDirective from '~/vue_shared/directives/track_event';
import { getAddMetricTrackingOptions, timeRangeToUrl, timeRangeFromUrl } from '../utils'; import { getAddMetricTrackingOptions, timeRangeToUrl, timeRangeFromUrl } from '../utils';
import { defaultTimeRange, timeRanges, metricStates } from '../constants'; import { metricStates } from '../constants';
import { defaultTimeRange, timeRanges } from '~/vue_shared/constants';
export default { export default {
components: { components: {
......
...@@ -3,7 +3,8 @@ import { mapActions, mapState, mapGetters } from 'vuex'; ...@@ -3,7 +3,8 @@ import { mapActions, mapState, mapGetters } from 'vuex';
import PanelType from 'ee_else_ce/monitoring/components/panel_type.vue'; import PanelType from 'ee_else_ce/monitoring/components/panel_type.vue';
import { convertToFixedRange } from '~/lib/utils/datetime_range'; import { convertToFixedRange } from '~/lib/utils/datetime_range';
import { timeRangeFromUrl, removeTimeRangeParams } from '../utils'; import { timeRangeFromUrl, removeTimeRangeParams } from '../utils';
import { sidebarAnimationDuration, defaultTimeRange } from '../constants'; import { sidebarAnimationDuration } from '../constants';
import { defaultTimeRange } from '~/vue_shared/constants';
let sidebarMutationObserver; let sidebarMutationObserver;
......
import { __ } from '~/locale';
export const PROMETHEUS_TIMEOUT = 120000; // TWO_MINUTES export const PROMETHEUS_TIMEOUT = 120000; // TWO_MINUTES
/** /**
...@@ -89,37 +87,3 @@ export const dateFormats = { ...@@ -89,37 +87,3 @@ export const dateFormats = {
timeOfDay: 'h:MM TT', timeOfDay: 'h:MM TT',
default: 'dd mmm yyyy, h:MMTT', default: 'dd mmm yyyy, h:MMTT',
}; };
export const timeRanges = [
{
label: __('30 minutes'),
duration: { seconds: 60 * 30 },
},
{
label: __('3 hours'),
duration: { seconds: 60 * 60 * 3 },
},
{
label: __('8 hours'),
duration: { seconds: 60 * 60 * 8 },
default: true,
},
{
label: __('1 day'),
duration: { seconds: 60 * 60 * 24 * 1 },
},
{
label: __('3 days'),
duration: { seconds: 60 * 60 * 24 * 3 },
},
{
label: __('1 week'),
duration: { seconds: 60 * 60 * 24 * 7 * 1 },
},
{
label: __('1 month'),
duration: { seconds: 60 * 60 * 24 * 30 },
},
];
export const defaultTimeRange = timeRanges.find(tr => tr.default);
...@@ -43,6 +43,11 @@ export default { ...@@ -43,6 +43,11 @@ export default {
required: false, required: false,
default: () => defaultTimeRanges, default: () => defaultTimeRanges,
}, },
customEnabled: {
type: Boolean,
required: false,
default: true,
},
}, },
data() { data() {
return { return {
...@@ -166,6 +171,7 @@ export default { ...@@ -166,6 +171,7 @@ export default {
> >
<div class="d-flex justify-content-between gl-p-2"> <div class="d-flex justify-content-between gl-p-2">
<gl-form-group <gl-form-group
v-if="customEnabled"
:label="__('Custom range')" :label="__('Custom range')"
label-for="custom-from-time" label-for="custom-from-time"
label-class="gl-pb-1" label-class="gl-pb-1"
......
import { __ } from '~/locale';
const INTERVALS = {
minute: 'minute',
hour: 'hour',
day: 'day',
};
export const timeRanges = [
{
label: __('30 minutes'),
duration: { seconds: 60 * 30 },
name: 'thirtyMinutes',
interval: INTERVALS.minute,
},
{
label: __('3 hours'),
duration: { seconds: 60 * 60 * 3 },
name: 'threeHours',
interval: INTERVALS.hour,
},
{
label: __('8 hours'),
duration: { seconds: 60 * 60 * 8 },
name: 'eightHours',
default: true,
interval: INTERVALS.hour,
},
{
label: __('1 day'),
duration: { seconds: 60 * 60 * 24 * 1 },
name: 'oneDay',
interval: INTERVALS.hour,
},
{
label: __('3 days'),
duration: { seconds: 60 * 60 * 24 * 3 },
name: 'threeDays',
interval: INTERVALS.hour,
},
{
label: __('1 week'),
duration: { seconds: 60 * 60 * 24 * 7 * 1 },
name: 'oneWeek',
interval: INTERVALS.day,
},
{
label: __('1 month'),
duration: { seconds: 60 * 60 * 24 * 30 },
name: 'oneMonth',
interval: INTERVALS.day,
},
];
export const defaultTimeRange = timeRanges.find(tr => tr.default);
export const getTimeWindow = timeWindowName => timeRanges.find(tr => tr.name === timeWindowName);
...@@ -55,22 +55,10 @@ module PodLogs ...@@ -55,22 +55,10 @@ module PodLogs
return error(_('Cluster does not exist')) if cluster.nil? return error(_('Cluster does not exist')) if cluster.nil?
return error(_('Namespace is empty')) if namespace.blank? return error(_('Namespace is empty')) if namespace.blank?
success(result) result[:pod_name] = params['pod_name'].presence
end result[:container_name] = params['container_name'].presence
def check_param_lengths(_result)
pod_name = params['pod_name'].presence
container_name = params['container_name'].presence
if pod_name&.length.to_i > K8S_NAME_MAX_LENGTH success(result)
return error(_('pod_name cannot be larger than %{max_length}'\
' chars' % { max_length: K8S_NAME_MAX_LENGTH }))
elsif container_name&.length.to_i > K8S_NAME_MAX_LENGTH
return error(_('container_name cannot be larger than'\
' %{max_length} chars' % { max_length: K8S_NAME_MAX_LENGTH }))
end
success(pod_name: pod_name, container_name: container_name)
end end
def get_raw_pods(result) def get_raw_pods(result)
...@@ -85,40 +73,6 @@ module PodLogs ...@@ -85,40 +73,6 @@ module PodLogs
success(result) success(result)
end end
def check_pod_name(result)
# If pod_name is not received as parameter, get the pod logs of the first
# pod of this namespace.
result[:pod_name] ||= result[:pods].first
unless result[:pod_name]
return error(_('No pods available'))
end
unless result[:pods].include?(result[:pod_name])
return error(_('Pod does not exist'))
end
success(result)
end
def check_container_name(result)
pod_details = result[:raw_pods].first { |p| p.metadata.name == result[:pod_name] }
containers = pod_details.spec.containers.map(&:name)
# select first container if not specified
result[:container_name] ||= containers.first
unless result[:container_name]
return error(_('No containers available'))
end
unless containers.include?(result[:container_name])
return error(_('Container does not exist'))
end
success(result)
end
def pod_logs(result) def pod_logs(result)
raise NotImplementedError raise NotImplementedError
end end
......
...@@ -3,11 +3,8 @@ ...@@ -3,11 +3,8 @@
module PodLogs module PodLogs
class ElasticsearchService < PodLogs::BaseService class ElasticsearchService < PodLogs::BaseService
steps :check_arguments, steps :check_arguments,
:check_param_lengths,
:get_raw_pods, :get_raw_pods,
:get_pod_names, :get_pod_names,
:check_pod_name,
:check_container_name,
:check_times, :check_times,
:check_search, :check_search,
:check_cursor, :check_cursor,
...@@ -53,7 +50,7 @@ module PodLogs ...@@ -53,7 +50,7 @@ module PodLogs
response = ::Gitlab::Elasticsearch::Logs.new(client).pod_logs( response = ::Gitlab::Elasticsearch::Logs.new(client).pod_logs(
namespace, namespace,
result[:pod_name], pod_name: result[:pod_name],
container_name: result[:container_name], container_name: result[:container_name],
search: result[:search], search: result[:search],
start_time: result[:start], start_time: result[:start],
......
...@@ -8,7 +8,6 @@ module PodLogs ...@@ -8,7 +8,6 @@ module PodLogs
EncodingHelperError = Class.new(StandardError) EncodingHelperError = Class.new(StandardError)
steps :check_arguments, steps :check_arguments,
:check_param_lengths,
:get_raw_pods, :get_raw_pods,
:get_pod_names, :get_pod_names,
:check_pod_name, :check_pod_name,
...@@ -22,6 +21,50 @@ module PodLogs ...@@ -22,6 +21,50 @@ module PodLogs
private private
def check_pod_name(result)
# If pod_name is not received as parameter, get the pod logs of the first
# pod of this namespace.
result[:pod_name] ||= result[:pods].first
unless result[:pod_name]
return error(_('No pods available'))
end
unless result[:pod_name].length.to_i <= K8S_NAME_MAX_LENGTH
return error(_('pod_name cannot be larger than %{max_length}'\
' chars' % { max_length: K8S_NAME_MAX_LENGTH }))
end
unless result[:pods].include?(result[:pod_name])
return error(_('Pod does not exist'))
end
success(result)
end
def check_container_name(result)
pod_details = result[:raw_pods].first { |p| p.metadata.name == result[:pod_name] }
containers = pod_details.spec.containers.map(&:name)
# select first container if not specified
result[:container_name] ||= containers.first
unless result[:container_name]
return error(_('No containers available'))
end
unless result[:container_name].length.to_i <= K8S_NAME_MAX_LENGTH
return error(_('container_name cannot be larger than'\
' %{max_length} chars' % { max_length: K8S_NAME_MAX_LENGTH }))
end
unless containers.include?(result[:container_name])
return error(_('Container does not exist'))
end
success(result)
end
def pod_logs(result) def pod_logs(result)
result[:logs] = cluster.kubeclient.get_pod_log( result[:logs] = cluster.kubeclient.get_pod_log(
result[:pod_name], result[:pod_name],
...@@ -62,7 +105,8 @@ module PodLogs ...@@ -62,7 +105,8 @@ module PodLogs
values = line.split(' ', 2) values = line.split(' ', 2)
{ {
timestamp: values[0], timestamp: values[0],
message: values[1] message: values[1],
pod: result[:pod_name]
} }
end end
......
---
title: Add all pods view to logs explorer
merge_request: 26883
author:
type: added
---
title: Adds crossplane as CI/CD Managed App
merge_request: 27374
author:
type: added
...@@ -6000,6 +6000,76 @@ type Project { ...@@ -6000,6 +6000,76 @@ type Project {
""" """
requestAccessEnabled: Boolean requestAccessEnabled: Boolean
"""
Find a single requirement. Available only when feature flag `requirements_management` is enabled.
"""
requirement(
"""
IID of the requirement, e.g., "1"
"""
iid: ID
"""
List of IIDs of requirements, e.g., [1, 2]
"""
iids: [ID!]
"""
List requirements by sort order
"""
sort: Sort
"""
Filter requirements by state
"""
state: RequirementState
): Requirement
"""
Find requirements. Available only when feature flag `requirements_management` is enabled.
"""
requirements(
"""
Returns the elements in the list that come after the specified cursor.
"""
after: String
"""
Returns the elements in the list that come before the specified cursor.
"""
before: String
"""
Returns the first _n_ elements from the list.
"""
first: Int
"""
IID of the requirement, e.g., "1"
"""
iid: ID
"""
List of IIDs of requirements, e.g., [1, 2]
"""
iids: [ID!]
"""
Returns the last _n_ elements from the list.
"""
last: Int
"""
List requirements by sort order
"""
sort: Sort
"""
Filter requirements by state
"""
state: RequirementState
): RequirementConnection
""" """
Detailed version of a Sentry error on the project Detailed version of a Sentry error on the project
""" """
...@@ -6664,6 +6734,41 @@ type Requirement { ...@@ -6664,6 +6734,41 @@ type Requirement {
userPermissions: RequirementPermissions! userPermissions: RequirementPermissions!
} }
"""
The connection type for Requirement.
"""
type RequirementConnection {
"""
A list of edges.
"""
edges: [RequirementEdge]
"""
A list of nodes.
"""
nodes: [Requirement]
"""
Information to aid in pagination.
"""
pageInfo: PageInfo!
}
"""
An edge in a connection.
"""
type RequirementEdge {
"""
A cursor for use in pagination.
"""
cursor: String!
"""
The item at the end of the edge.
"""
node: Requirement
}
""" """
Check permissions for the current user on a requirement Check permissions for the current user on a requirement
""" """
...@@ -7463,6 +7568,31 @@ type SnippetPermissions { ...@@ -7463,6 +7568,31 @@ type SnippetPermissions {
updateSnippet: Boolean! updateSnippet: Boolean!
} }
"""
Common sort values
"""
enum Sort {
"""
Created at ascending order
"""
created_asc
"""
Created at descending order
"""
created_desc
"""
Updated at ascending order
"""
updated_asc
"""
Updated at descending order
"""
updated_desc
}
type Submodule implements Entry { type Submodule implements Entry {
""" """
Flat path of the entry Flat path of the entry
......
...@@ -18032,6 +18032,168 @@ ...@@ -18032,6 +18032,168 @@
"isDeprecated": false, "isDeprecated": false,
"deprecationReason": null "deprecationReason": null
}, },
{
"name": "requirement",
"description": "Find a single requirement. Available only when feature flag `requirements_management` is enabled.",
"args": [
{
"name": "iid",
"description": "IID of the requirement, e.g., \"1\"",
"type": {
"kind": "SCALAR",
"name": "ID",
"ofType": null
},
"defaultValue": null
},
{
"name": "iids",
"description": "List of IIDs of requirements, e.g., [1, 2]",
"type": {
"kind": "LIST",
"name": null,
"ofType": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "SCALAR",
"name": "ID",
"ofType": null
}
}
},
"defaultValue": null
},
{
"name": "sort",
"description": "List requirements by sort order",
"type": {
"kind": "ENUM",
"name": "Sort",
"ofType": null
},
"defaultValue": null
},
{
"name": "state",
"description": "Filter requirements by state",
"type": {
"kind": "ENUM",
"name": "RequirementState",
"ofType": null
},
"defaultValue": null
}
],
"type": {
"kind": "OBJECT",
"name": "Requirement",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "requirements",
"description": "Find requirements. Available only when feature flag `requirements_management` is enabled.",
"args": [
{
"name": "iid",
"description": "IID of the requirement, e.g., \"1\"",
"type": {
"kind": "SCALAR",
"name": "ID",
"ofType": null
},
"defaultValue": null
},
{
"name": "iids",
"description": "List of IIDs of requirements, e.g., [1, 2]",
"type": {
"kind": "LIST",
"name": null,
"ofType": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "SCALAR",
"name": "ID",
"ofType": null
}
}
},
"defaultValue": null
},
{
"name": "sort",
"description": "List requirements by sort order",
"type": {
"kind": "ENUM",
"name": "Sort",
"ofType": null
},
"defaultValue": null
},
{
"name": "state",
"description": "Filter requirements by state",
"type": {
"kind": "ENUM",
"name": "RequirementState",
"ofType": null
},
"defaultValue": null
},
{
"name": "after",
"description": "Returns the elements in the list that come after the specified cursor.",
"type": {
"kind": "SCALAR",
"name": "String",
"ofType": null
},
"defaultValue": null
},
{
"name": "before",
"description": "Returns the elements in the list that come before the specified cursor.",
"type": {
"kind": "SCALAR",
"name": "String",
"ofType": null
},
"defaultValue": null
},
{
"name": "first",
"description": "Returns the first _n_ elements from the list.",
"type": {
"kind": "SCALAR",
"name": "Int",
"ofType": null
},
"defaultValue": null
},
{
"name": "last",
"description": "Returns the last _n_ elements from the list.",
"type": {
"kind": "SCALAR",
"name": "Int",
"ofType": null
},
"defaultValue": null
}
],
"type": {
"kind": "OBJECT",
"name": "RequirementConnection",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
},
{ {
"name": "sentryDetailedError", "name": "sentryDetailedError",
"description": "Detailed version of a Sentry error on the project", "description": "Detailed version of a Sentry error on the project",
...@@ -20106,6 +20268,118 @@ ...@@ -20106,6 +20268,118 @@
"enumValues": null, "enumValues": null,
"possibleTypes": null "possibleTypes": null
}, },
{
"kind": "OBJECT",
"name": "RequirementConnection",
"description": "The connection type for Requirement.",
"fields": [
{
"name": "edges",
"description": "A list of edges.",
"args": [
],
"type": {
"kind": "LIST",
"name": null,
"ofType": {
"kind": "OBJECT",
"name": "RequirementEdge",
"ofType": null
}
},
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "nodes",
"description": "A list of nodes.",
"args": [
],
"type": {
"kind": "LIST",
"name": null,
"ofType": {
"kind": "OBJECT",
"name": "Requirement",
"ofType": null
}
},
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "pageInfo",
"description": "Information to aid in pagination.",
"args": [
],
"type": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "OBJECT",
"name": "PageInfo",
"ofType": null
}
},
"isDeprecated": false,
"deprecationReason": null
}
],
"inputFields": null,
"interfaces": [
],
"enumValues": null,
"possibleTypes": null
},
{
"kind": "OBJECT",
"name": "RequirementEdge",
"description": "An edge in a connection.",
"fields": [
{
"name": "cursor",
"description": "A cursor for use in pagination.",
"args": [
],
"type": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "SCALAR",
"name": "String",
"ofType": null
}
},
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "node",
"description": "The item at the end of the edge.",
"args": [
],
"type": {
"kind": "OBJECT",
"name": "Requirement",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
}
],
"inputFields": null,
"interfaces": [
],
"enumValues": null,
"possibleTypes": null
},
{ {
"kind": "OBJECT", "kind": "OBJECT",
"name": "RequirementPermissions", "name": "RequirementPermissions",
...@@ -22643,6 +22917,41 @@ ...@@ -22643,6 +22917,41 @@
"enumValues": null, "enumValues": null,
"possibleTypes": null "possibleTypes": null
}, },
{
"kind": "ENUM",
"name": "Sort",
"description": "Common sort values",
"fields": null,
"inputFields": null,
"interfaces": null,
"enumValues": [
{
"name": "updated_desc",
"description": "Updated at descending order",
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "updated_asc",
"description": "Updated at ascending order",
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "created_desc",
"description": "Created at descending order",
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "created_asc",
"description": "Created at ascending order",
"isDeprecated": false,
"deprecationReason": null
}
],
"possibleTypes": null
},
{ {
"kind": "SCALAR", "kind": "SCALAR",
"name": "String", "name": "String",
......
...@@ -898,6 +898,7 @@ Information about pagination in a connection. ...@@ -898,6 +898,7 @@ Information about pagination in a connection.
| `removeSourceBranchAfterMerge` | Boolean | Indicates if `Delete source branch` option should be enabled by default for all new merge requests of the project | | `removeSourceBranchAfterMerge` | Boolean | Indicates if `Delete source branch` option should be enabled by default for all new merge requests of the project |
| `repository` | Repository | Git repository of the project | | `repository` | Repository | Git repository of the project |
| `requestAccessEnabled` | Boolean | Indicates if users can request member access to the project | | `requestAccessEnabled` | Boolean | Indicates if users can request member access to the project |
| `requirement` | Requirement | Find a single requirement. Available only when feature flag `requirements_management` is enabled. |
| `sentryDetailedError` | SentryDetailedError | Detailed version of a Sentry error on the project | | `sentryDetailedError` | SentryDetailedError | Detailed version of a Sentry error on the project |
| `sentryErrors` | SentryErrorCollection | Paginated collection of Sentry errors on the project | | `sentryErrors` | SentryErrorCollection | Paginated collection of Sentry errors on the project |
| `serviceDeskAddress` | String | E-mail address of the service desk. | | `serviceDeskAddress` | String | E-mail address of the service desk. |
......
...@@ -81,7 +81,7 @@ already reserved for category labels). ...@@ -81,7 +81,7 @@ already reserved for category labels).
The descriptions on the [labels page](https://gitlab.com/groups/gitlab-org/-/labels) The descriptions on the [labels page](https://gitlab.com/groups/gitlab-org/-/labels)
explain what falls under each type label. explain what falls under each type label.
The GitLab handbook documents [when something is a bug and when it is a feature request.](https://about.gitlab.com/handbook/product/product-management/process/feature-or-bug.html) The GitLab handbook documents [when something is a bug and when it is a feature request](https://about.gitlab.com/handbook/product/product-management/process/feature-or-bug.html).
### Facet labels ### Facet labels
......
...@@ -32,12 +32,12 @@ The `BulkInsertSafe` concern has two functions: ...@@ -32,12 +32,12 @@ The `BulkInsertSafe` concern has two functions:
- It performs checks against your model class to ensure that it does not use ActiveRecord - It performs checks against your model class to ensure that it does not use ActiveRecord
APIs that are not safe to use with respect to bulk insertions (more on that below). APIs that are not safe to use with respect to bulk insertions (more on that below).
- It adds a new class method `bulk_insert!`, which you can use to insert many records at once. - It adds new class methods `bulk_insert!` and `bulk_upsert!`, which you can use to insert many records at once.
## Insert records via `bulk_insert!` ## Insert records with `bulk_insert!` and `bulk_upsert!`
If the target class passes the checks performed by `BulkInsertSafe`, you can proceed to use If the target class passes the checks performed by `BulkInsertSafe`, you can insert an array of
the `bulk_insert!` class method as follows: ActiveRecord model objects as follows:
```ruby ```ruby
records = [MyModel.new, ...] records = [MyModel.new, ...]
...@@ -45,6 +45,28 @@ records = [MyModel.new, ...] ...@@ -45,6 +45,28 @@ records = [MyModel.new, ...]
MyModel.bulk_insert!(records) MyModel.bulk_insert!(records)
``` ```
Note that calls to `bulk_insert!` will always attempt to insert _new records_. If instead
you would like to replace existing records with new values, while still inserting those
that do not already exist, then you can use `bulk_upsert!`:
```ruby
records = [MyModel.new, existing_model, ...]
MyModel.bulk_upsert!(records, unique_by: [:name])
```
In this example, `unique_by` specifies the columns by which records are considered to be
unique and as such will be updated if they existed prior to insertion. For example, if
`existing_model` has a `name` attribute, and if a record with the same `name` value already
exists, its fields will be updated with those of `existing_model`.
The `unique_by` parameter can also be passed as a `Symbol`, in which case it specifies
a database index by which a column is considered unique:
```ruby
MyModel.bulk_insert!(records, unique_by: :index_on_name)
```
### Record validation ### Record validation
The `bulk_insert!` method guarantees that `records` will be inserted transactionally, and The `bulk_insert!` method guarantees that `records` will be inserted transactionally, and
...@@ -74,6 +96,23 @@ Since this will also affect the number of `INSERT`s that occur, make sure you me ...@@ -74,6 +96,23 @@ Since this will also affect the number of `INSERT`s that occur, make sure you me
performance impact this might have on your code. There is a trade-off between the number of performance impact this might have on your code. There is a trade-off between the number of
`INSERT` statements the database has to process and the size and cost of each `INSERT`. `INSERT` statements the database has to process and the size and cost of each `INSERT`.
### Handling duplicate records
NOTE: **Note:**
This parameter applies only to `bulk_insert!`. If you intend to update existing
records, use `bulk_upsert!` instead.
It may happen that some records you are trying to insert already exist, which would result in
primary key conflicts. There are two ways to address this problem: failing fast by raising an
error or skipping duplicate records. The default behavior of `bulk_insert!` is to fail fast
and raise an `ActiveRecord::RecordNotUnique` error.
If this is undesirable, you can instead skip duplicate records with the `skip_duplicates` flag:
```ruby
MyModel.bulk_insert!(records, skip_duplicates: true)
```
### Requirements for safe bulk insertions ### Requirements for safe bulk insertions
Large parts of ActiveRecord's persistence API are built around the notion of callbacks. Many Large parts of ActiveRecord's persistence API are built around the notion of callbacks. Many
...@@ -145,11 +184,12 @@ simply be treated as if you had invoked `save` from outside the block. ...@@ -145,11 +184,12 @@ simply be treated as if you had invoked `save` from outside the block.
There are a few restrictions to how these APIs can be used: There are a few restrictions to how these APIs can be used:
- Bulk inserts only work for new records; `UPDATE`s or "upserts" are not supported yet.
- `ON CONFLICT` behavior cannot currently be configured; an error will be raised on primary key conflicts. - `ON CONFLICT` behavior cannot currently be configured; an error will be raised on primary key conflicts.
- `BulkInsertableAssociations` furthermore has the following restrictions: - `BulkInsertableAssociations` furthermore has the following restrictions:
- only compatible with `has_many` relations. - only compatible with `has_many` relations.
- does not support `has_many through: ...` relations. - does not support `has_many through: ...` relations.
- Writing [`jsonb`](https://www.postgresql.org/docs/current/datatype-json.html) content is
[not currently supported](https://gitlab.com/gitlab-org/gitlab/-/issues/210560).
Moreover, input data should either be limited to around 1000 records at most, Moreover, input data should either be limited to around 1000 records at most,
or already batched prior to calling bulk insert. The `INSERT` statement will run in a single or already batched prior to calling bulk insert. The `INSERT` statement will run in a single
......
...@@ -144,10 +144,20 @@ It's also important to ensure that any background migrations have been fully com ...@@ -144,10 +144,20 @@ It's also important to ensure that any background migrations have been fully com
before upgrading to a new major version. To see the current size of the `background_migration` queue, before upgrading to a new major version. To see the current size of the `background_migration` queue,
[Check for background migrations before upgrading](../update/README.md#checking-for-background-migrations-before-upgrading). [Check for background migrations before upgrading](../update/README.md#checking-for-background-migrations-before-upgrading).
From version 12 onwards, an additional step is required. More significant migrations may occur during major release upgrades. To ensure these are successful, increment to the first minor version (`x.0.x`) during the major version jump. Then proceed with upgrading to a newer release. ### Version 12 onwards: Extra step for major upgrades
From version 12 onwards, an additional step is required. More significant migrations
may occur during major release upgrades.
To ensure these are successful:
1. Increment to the first minor version (`x.0.x`) during the major version jump.
1. Proceed with upgrading to a newer release.
For example: `11.11.x` -> `12.0.x` -> `12.8.x` For example: `11.11.x` -> `12.0.x` -> `12.8.x`
### Example upgrade paths
Please see the table below for some examples: Please see the table below for some examples:
| Latest stable version | Your version | Recommended upgrade path | Note | | Latest stable version | Your version | Recommended upgrade path | Note |
...@@ -155,8 +165,10 @@ Please see the table below for some examples: ...@@ -155,8 +165,10 @@ Please see the table below for some examples:
| 9.4.5 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.4.5` | `8.17.7` is the last version in version `8` | | 9.4.5 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.4.5` | `8.17.7` is the last version in version `8` |
| 10.1.4 | 8.13.4 | `8.13.4 -> 8.17.7 -> 9.5.10 -> 10.1.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9` | | 10.1.4 | 8.13.4 | `8.13.4 -> 8.17.7 -> 9.5.10 -> 10.1.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9` |
| 11.3.4 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.5.10` -> `10.8.7` -> `11.3.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9`, `10.8.7` is the last version in version `10` | | 11.3.4 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.5.10` -> `10.8.7` -> `11.3.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9`, `10.8.7` is the last version in version `10` |
| 12.5.8 | 11.3.4 | `11.3.4` -> `11.11.8` -> `12.0.12` -> `12.5.8` | `11.11.8` is the last version in version `11`. `12.0.x` [is a required step.](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/23211#note_272842444) | | 12.5.8 | 11.3.4 | `11.3.4` -> `11.11.8` -> `12.0.12` -> `12.5.8` | `11.11.8` is the last version in version `11`. `12.0.x` [is a required step](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/23211#note_272842444). |
| 12.8.5 | 9.2.6 | `9.2.6` -> `9.5.10` -> `10.8.7` -> `11.11.8` -> `12.0.12` -> `12.8.5` | Four intermediate versions required: the final 9.5, 10.8, 11.11 releases, plus 12.0 | | 12.8.5 | 9.2.6 | `9.2.6` -> `9.5.10` -> `10.8.7` -> `11.11.8` -> `12.0.12` -> `12.8.5` | Four intermediate versions are required: the final 9.5, 10.8, 11.11 releases, plus 12.0. |
## More information
More information about the release procedures can be found in our More information about the release procedures can be found in our
[release documentation](https://gitlab.com/gitlab-org/release/docs). You may also want to read our [release documentation](https://gitlab.com/gitlab-org/release/docs). You may also want to read our
......
...@@ -548,6 +548,7 @@ Supported applications: ...@@ -548,6 +548,7 @@ Supported applications:
- [Sentry](#install-sentry-using-gitlab-ci) - [Sentry](#install-sentry-using-gitlab-ci)
- [GitLab Runner](#install-gitlab-runner-using-gitlab-ci) - [GitLab Runner](#install-gitlab-runner-using-gitlab-ci)
- [Cilium](#install-cilium-using-gitlab-ci) - [Cilium](#install-cilium-using-gitlab-ci)
- [Vault](#install-vault-using-gitlab-ci)
- [JupyterHub](#install-jupyterhub-using-gitlab-ci) - [JupyterHub](#install-jupyterhub-using-gitlab-ci)
- [Elastic Stack](#install-elastic-stack-using-gitlab-ci) - [Elastic Stack](#install-elastic-stack-using-gitlab-ci)
- [Crossplane](#install-crossplane-using-gitlab-ci) - [Crossplane](#install-crossplane-using-gitlab-ci)
...@@ -813,6 +814,95 @@ agent: ...@@ -813,6 +814,95 @@ agent:
enabled: false enabled: false
``` ```
### Install Vault using GitLab CI
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/9982) in GitLab 12.9.
[Hashicorp Vault](https://vaultproject.io/) is a secrets management solution which
can be used to safely manage and store passwords, credentials, certificates and more. A Vault
installation could be leveraged to provide a single secure data store for credentials
used in your applications, GitLab CI jobs, and more. It could also serve as a way of
providing SSL/TLS certificates to systems and deployments in your infrastructure. Leveraging
Vault as a single source for all these credentials allows greater security by having
a single source of access, control, and auditability around all your sensitive
credentials and certificates.
To install Vault, enable it in the `.gitlab/managed-apps/config.yaml` file:
```yaml
vault:
installed: true
```
By default you will get a basic Vault setup with no high availability nor any scalable
storage backend. This is enough for simple testing and small scale deployments, though has limits
to how much it can scale, and as it is a single instance deployment, you will experience downtime
when upgrading the Vault application.
To optimally use Vault in a production environment, it's ideal to have a good understanding
of the internals of Vault and how to configure it. This can be done by reading the
[the Vault documentation](https://www.vaultproject.io/docs/internals/) as well as
the Vault Helm chart [values.yaml file](https://github.com/hashicorp/vault-helm/blob/v0.3.3/values.yaml).
At a minimum you will likely set up:
- A [seal](https://www.vaultproject.io/docs/configuration/seal/) for extra encryption
of the master key.
- A [storage backend](https://www.vaultproject.io/docs/configuration/storage/) that is
suitable for environment and storage security requirements.
- [HA Mode](https://www.vaultproject.io/docs/concepts/ha/).
- [The Vault UI](https://www.vaultproject.io/docs/configuration/ui/).
The following is an example values file (`.gitlab/managed-apps/vault/values.yaml`)
that configures Google Key Management Service for auto-unseal, using a Google Cloud Storage backend, enabling
the Vault UI, and enabling HA with 3 pod replicas. The `storage` and `seal` stanzas
below are examples and should be replaced with settings specific to your environment.
```yaml
# Enable the Vault WebUI
ui:
enabled: true
server:
# Disable the built in data storage volume as it's not safe for Hight Availablity mode
dataStorage:
enabled: false
# Enable High Availability Mode
ha:
enabled: true
# Configure Vault to listen on port 8200 for normal traffic and port 8201 for inter-cluster traffic
config: |
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
# Configure Vault to store its data in a GCS Bucket backend
storage "gcs" {
path = "gcs://my-vault-storage/vault-bucket"
ha_enabled = "true"
}
# Configure Vault to automatically unseal storage using a GKMS key
seal "gcpckms" {
project = "vault-helm-dev-246514"
region = "global"
key_ring = "vault-helm-unseal-kr"
crypto_key = "vault-helm-unseal-key"
}
```
Once you have successfully installed Vault, you will need to [initialize the Vault](https://learn.hashicorp.com/vault/getting-started/deploy#initializing-the-vault)
and obtain the initial root token. You will need access to your Kubernetes cluster that Vault has been deployed into in order to do this.
To initialise the Vault, get a shell to one of the Vault pods running inside Kubernetes (typically this is done by using the `kubectl` command line tool).
Once you have a shell into the pod, run the `vault operator init` command:
```shell
kubectl -n gitlab-managed-apps exec -it vault-0 sh
/ $ vault operator init
```
This should give you your unseal keys and initial root token. Make sure to note these down
and keep these safe as you will need them to unseal the Vault throughout its lifecycle.
### Install JupyterHub using GitLab CI ### Install JupyterHub using GitLab CI
> [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/merge_requests/40) in GitLab 12.8. > [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/merge_requests/40) in GitLab 12.8.
......
apply: apply:
stage: deploy stage: deploy
image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.11.0" image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.12.0"
environment: environment:
name: production name: production
variables: variables:
...@@ -16,6 +16,7 @@ apply: ...@@ -16,6 +16,7 @@ apply:
PROMETHEUS_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/prometheus/values.yaml PROMETHEUS_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/prometheus/values.yaml
ELASTIC_STACK_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/elastic-stack/values.yaml ELASTIC_STACK_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/elastic-stack/values.yaml
VAULT_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/vault/values.yaml VAULT_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/vault/values.yaml
CROSSPLANE_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/crossplane/values.yaml
script: script:
- gitlab-managed-apps /usr/local/share/gitlab-managed-apps/helmfile.yaml - gitlab-managed-apps /usr/local/share/gitlab-managed-apps/helmfile.yaml
only: only:
......
...@@ -12,7 +12,7 @@ module Gitlab ...@@ -12,7 +12,7 @@ module Gitlab
@client = client @client = client
end end
def pod_logs(namespace, pod_name, container_name: nil, search: nil, start_time: nil, end_time: nil, cursor: nil) def pod_logs(namespace, pod_name: nil, container_name: nil, search: nil, start_time: nil, end_time: nil, cursor: nil)
query = { bool: { must: [] } }.tap do |q| query = { bool: { must: [] } }.tap do |q|
filter_pod_name(q, pod_name) filter_pod_name(q, pod_name)
filter_namespace(q, namespace) filter_namespace(q, namespace)
...@@ -38,7 +38,7 @@ module Gitlab ...@@ -38,7 +38,7 @@ module Gitlab
{ "offset": { order: :desc } } { "offset": { order: :desc } }
], ],
# only return these fields in the response # only return these fields in the response
_source: ["@timestamp", "message"], _source: ["@timestamp", "message", "kubernetes.pod.name"],
# fixed limit for now, we should support paginated queries # fixed limit for now, we should support paginated queries
size: ::Gitlab::Elasticsearch::Logs::LOGS_LIMIT size: ::Gitlab::Elasticsearch::Logs::LOGS_LIMIT
} }
...@@ -51,6 +51,9 @@ module Gitlab ...@@ -51,6 +51,9 @@ module Gitlab
end end
def filter_pod_name(query, pod_name) def filter_pod_name(query, pod_name)
# We can filter by "all pods" with a null pod_name
return if pod_name.nil?
query[:bool][:must] << { query[:bool][:must] << {
match_phrase: { match_phrase: {
"kubernetes.pod.name" => { "kubernetes.pod.name" => {
...@@ -113,7 +116,8 @@ module Gitlab ...@@ -113,7 +116,8 @@ module Gitlab
results = results.map do |hit| results = results.map do |hit|
{ {
timestamp: hit["_source"]["@timestamp"], timestamp: hit["_source"]["@timestamp"],
message: hit["_source"]["message"] message: hit["_source"]["message"],
pod: hit["_source"]["kubernetes"]["pod"]["name"]
} }
end end
......
...@@ -711,9 +711,6 @@ msgstr "" ...@@ -711,9 +711,6 @@ msgstr ""
msgid "20-29 contributions" msgid "20-29 contributions"
msgstr "" msgstr ""
msgid "24 hours"
msgstr ""
msgid "2FA" msgid "2FA"
msgstr "" msgstr ""
...@@ -726,9 +723,6 @@ msgstr "" ...@@ -726,9 +723,6 @@ msgstr ""
msgid "3 hours" msgid "3 hours"
msgstr "" msgstr ""
msgid "30 days"
msgstr ""
msgid "30 minutes" msgid "30 minutes"
msgstr "" msgstr ""
...@@ -750,9 +744,6 @@ msgstr "" ...@@ -750,9 +744,6 @@ msgstr ""
msgid "404|Please contact your GitLab administrator if you think this is a mistake." msgid "404|Please contact your GitLab administrator if you think this is a mistake."
msgstr "" msgstr ""
msgid "7 days"
msgstr ""
msgid "8 hours" msgid "8 hours"
msgstr "" msgstr ""
...@@ -7660,6 +7651,9 @@ msgstr "" ...@@ -7660,6 +7651,9 @@ msgstr ""
msgid "EnvironmentsDashboard|This dashboard displays a maximum of 7 projects and 3 environments per project. %{readMoreLink}" msgid "EnvironmentsDashboard|This dashboard displays a maximum of 7 projects and 3 environments per project. %{readMoreLink}"
msgstr "" msgstr ""
msgid "Environments|All pods"
msgstr ""
msgid "Environments|An error occurred while canceling the auto stop, please try again" msgid "Environments|An error occurred while canceling the auto stop, please try again"
msgstr "" msgstr ""
...@@ -7741,7 +7735,7 @@ msgstr "" ...@@ -7741,7 +7735,7 @@ msgstr ""
msgid "Environments|No deployments yet" msgid "Environments|No deployments yet"
msgstr "" msgstr ""
msgid "Environments|No pods to display" msgid "Environments|No pod selected"
msgstr "" msgstr ""
msgid "Environments|Note that this action will stop the environment, but it will %{emphasisStart}not%{emphasisEnd} have an effect on any existing deployment due to no “stop environment action” being defined in the %{ciConfigLinkStart}.gitlab-ci.yml%{ciConfigLinkEnd} file." msgid "Environments|Note that this action will stop the environment, but it will %{emphasisStart}not%{emphasisEnd} have an effect on any existing deployment due to no “stop environment action” being defined in the %{ciConfigLinkStart}.gitlab-ci.yml%{ciConfigLinkEnd} file."
......
...@@ -19,7 +19,12 @@ ...@@ -19,7 +19,12 @@
"_score": null, "_score": null,
"_source": { "_source": {
"message": "10.8.2.1 - - [25/Oct/2019:08:03:22 UTC] \"GET / HTTP/1.1\" 200 13", "message": "10.8.2.1 - - [25/Oct/2019:08:03:22 UTC] \"GET / HTTP/1.1\" 200 13",
"@timestamp": "2019-12-13T14:35:34.034Z" "@timestamp": "2019-12-13T14:35:34.034Z",
"kubernetes": {
"pod": {
"name": "production-6866bc8974-m4sk4"
}
}
}, },
"sort": [ "sort": [
9999998, 9999998,
...@@ -33,7 +38,12 @@ ...@@ -33,7 +38,12 @@
"_score": null, "_score": null,
"_source": { "_source": {
"message": "10.8.2.1 - - [27/Oct/2019:23:49:54 UTC] \"GET / HTTP/1.1\" 200 13", "message": "10.8.2.1 - - [27/Oct/2019:23:49:54 UTC] \"GET / HTTP/1.1\" 200 13",
"@timestamp": "2019-12-13T14:35:35.034Z" "@timestamp": "2019-12-13T14:35:35.034Z",
"kubernetes": {
"pod": {
"name": "production-6866bc8974-m4sk4"
}
}
}, },
"sort": [ "sort": [
9999949, 9999949,
...@@ -47,7 +57,12 @@ ...@@ -47,7 +57,12 @@
"_score": null, "_score": null,
"_source": { "_source": {
"message": "10.8.2.1 - - [04/Nov/2019:23:09:24 UTC] \"GET / HTTP/1.1\" 200 13", "message": "10.8.2.1 - - [04/Nov/2019:23:09:24 UTC] \"GET / HTTP/1.1\" 200 13",
"@timestamp": "2019-12-13T14:35:36.034Z" "@timestamp": "2019-12-13T14:35:36.034Z",
"kubernetes": {
"pod": {
"name": "production-6866bc8974-m4sk4"
}
}
}, },
"sort": [ "sort": [
9999944, 9999944,
...@@ -61,7 +76,12 @@ ...@@ -61,7 +76,12 @@
"_score": null, "_score": null,
"_source": { "_source": {
"message": "- -\u003e /", "message": "- -\u003e /",
"@timestamp": "2019-12-13T14:35:37.034Z" "@timestamp": "2019-12-13T14:35:37.034Z",
"kubernetes": {
"pod": {
"name": "production-6866bc8974-m4sk4"
}
}
}, },
"sort": [ "sort": [
9999934, 9999934,
......
...@@ -33,7 +33,8 @@ ...@@ -33,7 +33,8 @@
], ],
"_source": [ "_source": [
"@timestamp", "@timestamp",
"message" "message",
"kubernetes.pod.name"
], ],
"size": 500 "size": 500
} }
...@@ -40,7 +40,8 @@ ...@@ -40,7 +40,8 @@
], ],
"_source": [ "_source": [
"@timestamp", "@timestamp",
"message" "message",
"kubernetes.pod.name"
], ],
"size": 500 "size": 500
} }
...@@ -37,7 +37,8 @@ ...@@ -37,7 +37,8 @@
], ],
"_source": [ "_source": [
"@timestamp", "@timestamp",
"message" "message",
"kubernetes.pod.name"
], ],
"size": 500 "size": 500
} }
...@@ -42,7 +42,8 @@ ...@@ -42,7 +42,8 @@
], ],
"_source": [ "_source": [
"@timestamp", "@timestamp",
"message" "message",
"kubernetes.pod.name"
], ],
"size": 500 "size": 500
} }
...@@ -42,7 +42,8 @@ ...@@ -42,7 +42,8 @@
], ],
"_source": [ "_source": [
"@timestamp", "@timestamp",
"message" "message",
"kubernetes.pod.name"
], ],
"size": 500 "size": 500
} }
...@@ -42,7 +42,8 @@ ...@@ -42,7 +42,8 @@
], ],
"_source": [ "_source": [
"@timestamp", "@timestamp",
"message" "message",
"kubernetes.pod.name"
], ],
"size": 500 "size": 500
} }
...@@ -43,7 +43,8 @@ ...@@ -43,7 +43,8 @@
], ],
"_source": [ "_source": [
"@timestamp", "@timestamp",
"message" "message",
"kubernetes.pod.name"
], ],
"size": 500 "size": 500
} }
...@@ -300,9 +300,10 @@ describe('EnvironmentLogs', () => { ...@@ -300,9 +300,10 @@ describe('EnvironmentLogs', () => {
const items = findPodsDropdown().findAll(GlDropdownItem); const items = findPodsDropdown().findAll(GlDropdownItem);
expect(findPodsDropdown().props('text')).toBe(mockPodName); expect(findPodsDropdown().props('text')).toBe(mockPodName);
expect(items.length).toBe(mockPods.length); expect(items.length).toBe(mockPods.length + 1);
expect(items.at(0).text()).toBe('All pods');
mockPods.forEach((pod, i) => { mockPods.forEach((pod, i) => {
const item = items.at(i); const item = items.at(i + 1);
expect(item.text()).toBe(pod); expect(item.text()).toBe(pod);
}); });
}); });
...@@ -345,7 +346,7 @@ describe('EnvironmentLogs', () => { ...@@ -345,7 +346,7 @@ describe('EnvironmentLogs', () => {
expect(dispatch).not.toHaveBeenCalledWith(`${module}/showPodLogs`, expect.anything()); expect(dispatch).not.toHaveBeenCalledWith(`${module}/showPodLogs`, expect.anything());
items.at(index).vm.$emit('click'); items.at(index + 1).vm.$emit('click');
expect(dispatch).toHaveBeenCalledWith(`${module}/showPodLogs`, mockPods[index]); expect(dispatch).toHaveBeenCalledWith(`${module}/showPodLogs`, mockPods[index]);
}); });
......
...@@ -32,15 +32,93 @@ export const mockPods = [ ...@@ -32,15 +32,93 @@ export const mockPods = [
]; ];
export const mockLogsResult = [ export const mockLogsResult = [
{ timestamp: '2019-12-13T13:43:18.2760123Z', message: 'Log 1' }, {
{ timestamp: '2019-12-13T13:43:18.2760123Z', message: 'Log 2' }, timestamp: '2019-12-13T13:43:18.2760123Z',
{ timestamp: '2019-12-13T13:43:26.8420123Z', message: 'Log 3' }, message: '10.36.0.1 - - [16/Oct/2019:06:29:48 UTC] "GET / HTTP/1.1" 200 13',
pod: 'foo',
},
{
timestamp: '2019-12-13T13:43:18.2760123Z',
message: '- -> /',
pod: 'bar',
},
{
timestamp: '2019-12-13T13:43:26.8420123Z',
message: '10.36.0.1 - - [16/Oct/2019:06:29:57 UTC] "GET / HTTP/1.1" 200 13',
pod: 'foo',
},
{
timestamp: '2019-12-13T13:43:26.8420123Z',
message: '- -> /',
pod: 'bar',
},
{
timestamp: '2019-12-13T13:43:28.3710123Z',
message: '10.36.0.1 - - [16/Oct/2019:06:29:58 UTC] "GET / HTTP/1.1" 200 13',
pod: 'foo',
},
{
timestamp: '2019-12-13T13:43:28.3710123Z',
message: '- -> /',
pod: 'bar',
},
{
timestamp: '2019-12-13T13:43:36.8860123Z',
message: '10.36.0.1 - - [16/Oct/2019:06:30:07 UTC] "GET / HTTP/1.1" 200 13',
pod: 'foo',
},
{
timestamp: '2019-12-13T13:43:36.8860123Z',
message: '- -> /',
pod: 'bar',
},
{
timestamp: '2019-12-13T13:43:38.4000123Z',
message: '10.36.0.1 - - [16/Oct/2019:06:30:08 UTC] "GET / HTTP/1.1" 200 13',
pod: 'foo',
},
{
timestamp: '2019-12-13T13:43:38.4000123Z',
message: '- -> /',
pod: 'bar',
},
{
timestamp: '2019-12-13T13:43:46.8420123Z',
message: '10.36.0.1 - - [16/Oct/2019:06:30:17 UTC] "GET / HTTP/1.1" 200 13',
pod: 'foo',
},
{
timestamp: '2019-12-13T13:43:46.8430123Z',
message: '- -> /',
pod: 'bar',
},
{
timestamp: '2019-12-13T13:43:48.3240123Z',
message: '10.36.0.1 - - [16/Oct/2019:06:30:18 UTC] "GET / HTTP/1.1" 200 13',
pod: 'foo',
},
{
timestamp: '2019-12-13T13:43:48.3250123Z',
message: '- -> /',
pod: 'bar',
},
]; ];
export const mockTrace = [ export const mockTrace = [
'Dec 13 13:43:18.276Z | Log 1', 'Dec 13 13:43:18.276Z | foo | 10.36.0.1 - - [16/Oct/2019:06:29:48 UTC] "GET / HTTP/1.1" 200 13',
'Dec 13 13:43:18.276Z | Log 2', 'Dec 13 13:43:18.276Z | bar | - -> /',
'Dec 13 13:43:26.842Z | Log 3', 'Dec 13 13:43:26.842Z | foo | 10.36.0.1 - - [16/Oct/2019:06:29:57 UTC] "GET / HTTP/1.1" 200 13',
'Dec 13 13:43:26.842Z | bar | - -> /',
'Dec 13 13:43:28.371Z | foo | 10.36.0.1 - - [16/Oct/2019:06:29:58 UTC] "GET / HTTP/1.1" 200 13',
'Dec 13 13:43:28.371Z | bar | - -> /',
'Dec 13 13:43:36.886Z | foo | 10.36.0.1 - - [16/Oct/2019:06:30:07 UTC] "GET / HTTP/1.1" 200 13',
'Dec 13 13:43:36.886Z | bar | - -> /',
'Dec 13 13:43:38.400Z | foo | 10.36.0.1 - - [16/Oct/2019:06:30:08 UTC] "GET / HTTP/1.1" 200 13',
'Dec 13 13:43:38.400Z | bar | - -> /',
'Dec 13 13:43:46.842Z | foo | 10.36.0.1 - - [16/Oct/2019:06:30:17 UTC] "GET / HTTP/1.1" 200 13',
'Dec 13 13:43:46.843Z | bar | - -> /',
'Dec 13 13:43:48.324Z | foo | 10.36.0.1 - - [16/Oct/2019:06:30:18 UTC] "GET / HTTP/1.1" 200 13',
'Dec 13 13:43:48.325Z | bar | - -> /',
]; ];
export const mockResponse = { export const mockResponse = {
......
...@@ -13,7 +13,7 @@ import { ...@@ -13,7 +13,7 @@ import {
fetchMoreLogsPrepend, fetchMoreLogsPrepend,
} from '~/logs/stores/actions'; } from '~/logs/stores/actions';
import { defaultTimeRange } from '~/monitoring/constants'; import { defaultTimeRange } from '~/vue_shared/constants';
import axios from '~/lib/utils/axios_utils'; import axios from '~/lib/utils/axios_utils';
import flash from '~/flash'; import flash from '~/flash';
...@@ -172,14 +172,13 @@ describe('Logs Store actions', () => { ...@@ -172,14 +172,13 @@ describe('Logs Store actions', () => {
describe('fetchLogs', () => { describe('fetchLogs', () => {
beforeEach(() => { beforeEach(() => {
expectedMutations = [ expectedMutations = [
{ type: types.REQUEST_PODS_DATA },
{ type: types.REQUEST_LOGS_DATA }, { type: types.REQUEST_LOGS_DATA },
{ type: types.SET_CURRENT_POD_NAME, payload: mockPodName },
{ type: types.RECEIVE_PODS_DATA_SUCCESS, payload: mockPods },
{ {
type: types.RECEIVE_LOGS_DATA_SUCCESS, type: types.RECEIVE_LOGS_DATA_SUCCESS,
payload: { logs: mockLogsResult, cursor: mockNextCursor }, payload: { logs: mockLogsResult, cursor: mockNextCursor },
}, },
{ type: types.SET_CURRENT_POD_NAME, payload: mockPodName },
{ type: types.RECEIVE_PODS_DATA_SUCCESS, payload: mockPods },
]; ];
expectedActions = []; expectedActions = [];
...@@ -364,7 +363,6 @@ describe('Logs Store actions', () => { ...@@ -364,7 +363,6 @@ describe('Logs Store actions', () => {
null, null,
state, state,
[ [
{ type: types.REQUEST_PODS_DATA },
{ type: types.REQUEST_LOGS_DATA }, { type: types.REQUEST_LOGS_DATA },
{ type: types.RECEIVE_PODS_DATA_ERROR }, { type: types.RECEIVE_PODS_DATA_ERROR },
{ type: types.RECEIVE_LOGS_DATA_ERROR }, { type: types.RECEIVE_LOGS_DATA_ERROR },
......
...@@ -223,17 +223,6 @@ describe('Logs Store Mutations', () => { ...@@ -223,17 +223,6 @@ describe('Logs Store Mutations', () => {
}); });
}); });
describe('REQUEST_PODS_DATA', () => {
it('receives pods data', () => {
mutations[types.REQUEST_PODS_DATA](state);
expect(state.pods).toEqual(
expect.objectContaining({
options: [],
}),
);
});
});
describe('RECEIVE_PODS_DATA_SUCCESS', () => { describe('RECEIVE_PODS_DATA_SUCCESS', () => {
it('receives pods data success', () => { it('receives pods data success', () => {
mutations[types.RECEIVE_PODS_DATA_SUCCESS](state, mockPods); mutations[types.RECEIVE_PODS_DATA_SUCCESS](state, mockPods);
......
...@@ -78,6 +78,7 @@ exports[`Dashboard template matches the default snapshot 1`] = ` ...@@ -78,6 +78,7 @@ exports[`Dashboard template matches the default snapshot 1`] = `
label-size="sm" label-size="sm"
> >
<date-time-picker-stub <date-time-picker-stub
customenabled="true"
options="[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object]" options="[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object]"
value="[object Object]" value="[object Object]"
/> />
......
...@@ -7,7 +7,7 @@ import { mockProjectDir } from '../mock_data'; ...@@ -7,7 +7,7 @@ import { mockProjectDir } from '../mock_data';
import Dashboard from '~/monitoring/components/dashboard.vue'; import Dashboard from '~/monitoring/components/dashboard.vue';
import { createStore } from '~/monitoring/stores'; import { createStore } from '~/monitoring/stores';
import { defaultTimeRange } from '~/monitoring/constants'; import { defaultTimeRange } from '~/vue_shared/constants';
import { propsData } from '../init_utils'; import { propsData } from '../init_utils';
jest.mock('~/flash'); jest.mock('~/flash');
......
...@@ -5,10 +5,10 @@ require 'spec_helper' ...@@ -5,10 +5,10 @@ require 'spec_helper'
describe Gitlab::Elasticsearch::Logs do describe Gitlab::Elasticsearch::Logs do
let(:client) { Elasticsearch::Transport::Client } let(:client) { Elasticsearch::Transport::Client }
let(:es_message_1) { { timestamp: "2019-12-13T14:35:34.034Z", message: "10.8.2.1 - - [25/Oct/2019:08:03:22 UTC] \"GET / HTTP/1.1\" 200 13" } } let(:es_message_1) { { timestamp: "2019-12-13T14:35:34.034Z", pod: "production-6866bc8974-m4sk4", message: "10.8.2.1 - - [25/Oct/2019:08:03:22 UTC] \"GET / HTTP/1.1\" 200 13" } }
let(:es_message_2) { { timestamp: "2019-12-13T14:35:35.034Z", message: "10.8.2.1 - - [27/Oct/2019:23:49:54 UTC] \"GET / HTTP/1.1\" 200 13" } } let(:es_message_2) { { timestamp: "2019-12-13T14:35:35.034Z", pod: "production-6866bc8974-m4sk4", message: "10.8.2.1 - - [27/Oct/2019:23:49:54 UTC] \"GET / HTTP/1.1\" 200 13" } }
let(:es_message_3) { { timestamp: "2019-12-13T14:35:36.034Z", message: "10.8.2.1 - - [04/Nov/2019:23:09:24 UTC] \"GET / HTTP/1.1\" 200 13" } } let(:es_message_3) { { timestamp: "2019-12-13T14:35:36.034Z", pod: "production-6866bc8974-m4sk4", message: "10.8.2.1 - - [04/Nov/2019:23:09:24 UTC] \"GET / HTTP/1.1\" 200 13" } }
let(:es_message_4) { { timestamp: "2019-12-13T14:35:37.034Z", message: "- -\u003e /" } } let(:es_message_4) { { timestamp: "2019-12-13T14:35:37.034Z", pod: "production-6866bc8974-m4sk4", message: "- -\u003e /" } }
let(:es_response) { JSON.parse(fixture_file('lib/elasticsearch/logs_response.json')) } let(:es_response) { JSON.parse(fixture_file('lib/elasticsearch/logs_response.json')) }
...@@ -40,49 +40,49 @@ describe Gitlab::Elasticsearch::Logs do ...@@ -40,49 +40,49 @@ describe Gitlab::Elasticsearch::Logs do
it 'returns the logs as an array' do it 'returns the logs as an array' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body)).and_return(es_response) expect(client).to receive(:search).with(body: a_hash_equal_to_json(body)).and_return(es_response)
result = subject.pod_logs(namespace, pod_name) result = subject.pod_logs(namespace, pod_name: pod_name)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor) expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end end
it 'can further filter the logs by container name' do it 'can further filter the logs by container name' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_container)).and_return(es_response) expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_container)).and_return(es_response)
result = subject.pod_logs(namespace, pod_name, container_name: container_name) result = subject.pod_logs(namespace, pod_name: pod_name, container_name: container_name)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor) expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end end
it 'can further filter the logs by search' do it 'can further filter the logs by search' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_search)).and_return(es_response) expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_search)).and_return(es_response)
result = subject.pod_logs(namespace, pod_name, search: search) result = subject.pod_logs(namespace, pod_name: pod_name, search: search)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor) expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end end
it 'can further filter the logs by start_time and end_time' do it 'can further filter the logs by start_time and end_time' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_times)).and_return(es_response) expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_times)).and_return(es_response)
result = subject.pod_logs(namespace, pod_name, start_time: start_time, end_time: end_time) result = subject.pod_logs(namespace, pod_name: pod_name, start_time: start_time, end_time: end_time)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor) expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end end
it 'can further filter the logs by only start_time' do it 'can further filter the logs by only start_time' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_start_time)).and_return(es_response) expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_start_time)).and_return(es_response)
result = subject.pod_logs(namespace, pod_name, start_time: start_time) result = subject.pod_logs(namespace, pod_name: pod_name, start_time: start_time)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor) expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end end
it 'can further filter the logs by only end_time' do it 'can further filter the logs by only end_time' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_end_time)).and_return(es_response) expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_end_time)).and_return(es_response)
result = subject.pod_logs(namespace, pod_name, end_time: end_time) result = subject.pod_logs(namespace, pod_name: pod_name, end_time: end_time)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor) expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end end
it 'can search after a cursor' do it 'can search after a cursor' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_cursor)).and_return(es_response) expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_cursor)).and_return(es_response)
result = subject.pod_logs(namespace, pod_name, cursor: cursor) result = subject.pod_logs(namespace, pod_name: pod_name, cursor: cursor)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor) expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end end
end end
......
...@@ -78,9 +78,7 @@ describe ::PodLogs::BaseService do ...@@ -78,9 +78,7 @@ describe ::PodLogs::BaseService do
expect(result[:message]).to eq('Namespace is empty') expect(result[:message]).to eq('Namespace is empty')
end end
end end
end
describe '#check_param_lengths' do
context 'when pod_name and container_name are provided' do context 'when pod_name and container_name are provided' do
let(:params) do let(:params) do
{ {
...@@ -90,43 +88,13 @@ describe ::PodLogs::BaseService do ...@@ -90,43 +88,13 @@ describe ::PodLogs::BaseService do
end end
it 'returns success' do it 'returns success' do
result = subject.send(:check_param_lengths, {}) result = subject.send(:check_arguments, {})
expect(result[:status]).to eq(:success) expect(result[:status]).to eq(:success)
expect(result[:pod_name]).to eq(pod_name) expect(result[:pod_name]).to eq(pod_name)
expect(result[:container_name]).to eq(container_name) expect(result[:container_name]).to eq(container_name)
end end
end end
context 'when pod_name is too long' do
let(:params) do
{
'pod_name' => "a very long string." * 15
}
end
it 'returns an error' do
result = subject.send(:check_param_lengths, {})
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('pod_name cannot be larger than 253 chars')
end
end
context 'when container_name is too long' do
let(:params) do
{
'container_name' => "a very long string." * 15
}
end
it 'returns an error' do
result = subject.send(:check_param_lengths, {})
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('container_name cannot be larger than 253 chars')
end
end
end end
describe '#get_raw_pods' do describe '#get_raw_pods' do
...@@ -150,80 +118,4 @@ describe ::PodLogs::BaseService do ...@@ -150,80 +118,4 @@ describe ::PodLogs::BaseService do
expect(result[:pods]).to eq([pod_name]) expect(result[:pods]).to eq([pod_name])
end end
end end
describe '#check_pod_name' do
it 'returns success if pod_name was specified' do
result = subject.send(:check_pod_name, pod_name: pod_name, pods: [pod_name])
expect(result[:status]).to eq(:success)
expect(result[:pod_name]).to eq(pod_name)
end
it 'returns success if pod_name was not specified but there are pods' do
result = subject.send(:check_pod_name, pod_name: nil, pods: [pod_name])
expect(result[:status]).to eq(:success)
expect(result[:pod_name]).to eq(pod_name)
end
it 'returns error if pod_name was not specified and there are no pods' do
result = subject.send(:check_pod_name, pod_name: nil, pods: [])
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('No pods available')
end
it 'returns error if pod_name was specified but does not exist' do
result = subject.send(:check_pod_name, pod_name: 'another_pod', pods: [pod_name])
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('Pod does not exist')
end
end
describe '#check_container_name' do
it 'returns success if container_name was specified' do
result = subject.send(:check_container_name,
container_name: container_name,
pod_name: pod_name,
raw_pods: raw_pods
)
expect(result[:status]).to eq(:success)
expect(result[:container_name]).to eq(container_name)
end
it 'returns success if container_name was not specified and there are containers' do
result = subject.send(:check_container_name,
pod_name: pod_name,
raw_pods: raw_pods
)
expect(result[:status]).to eq(:success)
expect(result[:container_name]).to eq(container_name)
end
it 'returns error if container_name was not specified and there are no containers on the pod' do
raw_pods.first.spec.containers = []
result = subject.send(:check_container_name,
pod_name: pod_name,
raw_pods: raw_pods
)
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('No containers available')
end
it 'returns error if container_name was specified but does not exist' do
result = subject.send(:check_container_name,
container_name: 'foo',
pod_name: pod_name,
raw_pods: raw_pods
)
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('Container does not exist')
end
end
end end
...@@ -170,7 +170,7 @@ describe ::PodLogs::ElasticsearchService do ...@@ -170,7 +170,7 @@ describe ::PodLogs::ElasticsearchService do
.and_return(Elasticsearch::Transport::Client.new) .and_return(Elasticsearch::Transport::Client.new)
allow_any_instance_of(::Gitlab::Elasticsearch::Logs) allow_any_instance_of(::Gitlab::Elasticsearch::Logs)
.to receive(:pod_logs) .to receive(:pod_logs)
.with(namespace, pod_name, container_name: container_name, search: search, start_time: start_time, end_time: end_time, cursor: cursor) .with(namespace, pod_name: pod_name, container_name: container_name, search: search, start_time: start_time, end_time: end_time, cursor: cursor)
.and_return({ logs: expected_logs, cursor: expected_cursor }) .and_return({ logs: expected_logs, cursor: expected_cursor })
result = subject.send(:pod_logs, result_arg) result = subject.send(:pod_logs, result_arg)
......
...@@ -9,13 +9,18 @@ describe ::PodLogs::KubernetesService do ...@@ -9,13 +9,18 @@ describe ::PodLogs::KubernetesService do
let(:namespace) { 'autodevops-deploy-9-production' } let(:namespace) { 'autodevops-deploy-9-production' }
let(:pod_name) { 'pod-1' } let(:pod_name) { 'pod-1' }
let(:container_name) { 'container-1' } let(:container_name) { 'container-0' }
let(:params) { {} } let(:params) { {} }
let(:raw_logs) do let(:raw_logs) do
"2019-12-13T14:04:22.123456Z Log 1\n2019-12-13T14:04:23.123456Z Log 2\n" \ "2019-12-13T14:04:22.123456Z Log 1\n2019-12-13T14:04:23.123456Z Log 2\n" \
"2019-12-13T14:04:24.123456Z Log 3" "2019-12-13T14:04:24.123456Z Log 3"
end end
let(:raw_pods) do
JSON.parse([
kube_pod(name: pod_name)
].to_json, object_class: OpenStruct)
end
subject { described_class.new(cluster, namespace, params: params) } subject { described_class.new(cluster, namespace, params: params) }
...@@ -140,9 +145,9 @@ describe ::PodLogs::KubernetesService do ...@@ -140,9 +145,9 @@ describe ::PodLogs::KubernetesService do
let(:expected_logs) do let(:expected_logs) do
[ [
{ message: "Log 1", timestamp: "2019-12-13T14:04:22.123456Z" }, { message: "Log 1", pod: 'pod-1', timestamp: "2019-12-13T14:04:22.123456Z" },
{ message: "Log 2", timestamp: "2019-12-13T14:04:23.123456Z" }, { message: "Log 2", pod: 'pod-1', timestamp: "2019-12-13T14:04:23.123456Z" },
{ message: "Log 3", timestamp: "2019-12-13T14:04:24.123456Z" } { message: "Log 3", pod: 'pod-1', timestamp: "2019-12-13T14:04:24.123456Z" }
] ]
end end
...@@ -163,4 +168,98 @@ describe ::PodLogs::KubernetesService do ...@@ -163,4 +168,98 @@ describe ::PodLogs::KubernetesService do
end end
end end
end end
describe '#check_pod_name' do
it 'returns success if pod_name was specified' do
result = subject.send(:check_pod_name, pod_name: pod_name, pods: [pod_name])
expect(result[:status]).to eq(:success)
expect(result[:pod_name]).to eq(pod_name)
end
it 'returns success if pod_name was not specified but there are pods' do
result = subject.send(:check_pod_name, pod_name: nil, pods: [pod_name])
expect(result[:status]).to eq(:success)
expect(result[:pod_name]).to eq(pod_name)
end
it 'returns error if pod_name was not specified and there are no pods' do
result = subject.send(:check_pod_name, pod_name: nil, pods: [])
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('No pods available')
end
it 'returns error if pod_name was specified but does not exist' do
result = subject.send(:check_pod_name, pod_name: 'another_pod', pods: [pod_name])
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('Pod does not exist')
end
it 'returns error if pod_name is too long' do
result = subject.send(:check_pod_name, pod_name: "a very long string." * 15, pods: [pod_name])
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('pod_name cannot be larger than 253 chars')
end
end
describe '#check_container_name' do
it 'returns success if container_name was specified' do
result = subject.send(:check_container_name,
container_name: container_name,
pod_name: pod_name,
raw_pods: raw_pods
)
expect(result[:status]).to eq(:success)
expect(result[:container_name]).to eq(container_name)
end
it 'returns success if container_name was not specified and there are containers' do
result = subject.send(:check_container_name,
pod_name: pod_name,
raw_pods: raw_pods
)
expect(result[:status]).to eq(:success)
expect(result[:container_name]).to eq(container_name)
end
it 'returns error if container_name was not specified and there are no containers on the pod' do
raw_pods.first.spec.containers = []
result = subject.send(:check_container_name,
pod_name: pod_name,
raw_pods: raw_pods
)
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('No containers available')
end
it 'returns error if container_name was specified but does not exist' do
result = subject.send(:check_container_name,
container_name: 'foo',
pod_name: pod_name,
raw_pods: raw_pods
)
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('Container does not exist')
end
it 'returns error if container_name is too long' do
result = subject.send(:check_container_name,
container_name: "a very long string." * 15,
pod_name: pod_name,
raw_pods: raw_pods
)
expect(result[:status]).to eq(:error)
expect(result[:message]).to eq('container_name cannot be larger than 253 chars')
end
end
end end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment