Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--app/assets/javascripts/logs/components/environment_logs.vue23
-rw-r--r--app/assets/javascripts/logs/stores/actions.js5
-rw-r--r--app/assets/javascripts/logs/stores/getters.js4
-rw-r--r--app/assets/javascripts/logs/stores/mutation_types.js1
-rw-r--r--app/assets/javascripts/logs/stores/mutations.js10
-rw-r--r--app/assets/javascripts/logs/stores/state.js2
-rw-r--r--app/assets/javascripts/monitoring/components/dashboard.vue3
-rw-r--r--app/assets/javascripts/monitoring/components/embed.vue3
-rw-r--r--app/assets/javascripts/monitoring/constants.js36
-rw-r--r--app/assets/javascripts/vue_shared/components/date_time_picker/date_time_picker.vue6
-rw-r--r--app/assets/javascripts/vue_shared/constants.js56
-rw-r--r--app/services/pod_logs/base_service.rb52
-rw-r--r--app/services/pod_logs/elasticsearch_service.rb5
-rw-r--r--app/services/pod_logs/kubernetes_service.rb48
-rw-r--r--changelogs/unreleased/ak-all-pods.yml5
-rw-r--r--changelogs/unreleased/udpate-cluster-application-image-to-0-12.yml5
-rw-r--r--doc/api/graphql/reference/gitlab_schema.graphql130
-rw-r--r--doc/api/graphql/reference/gitlab_schema.json309
-rw-r--r--doc/api/graphql/reference/index.md1
-rw-r--r--doc/development/contributing/issue_workflow.md2
-rw-r--r--doc/development/insert_into_tables_in_batches.md50
-rw-r--r--doc/policy/maintenance.md18
-rw-r--r--doc/user/clusters/applications.md90
-rw-r--r--lib/gitlab/ci/templates/Managed-Cluster-Applications.gitlab-ci.yml3
-rw-r--r--lib/gitlab/elasticsearch/logs.rb10
-rw-r--r--locale/gitlab.pot14
-rw-r--r--spec/fixtures/lib/elasticsearch/logs_response.json28
-rw-r--r--spec/fixtures/lib/elasticsearch/query.json3
-rw-r--r--spec/fixtures/lib/elasticsearch/query_with_container.json3
-rw-r--r--spec/fixtures/lib/elasticsearch/query_with_cursor.json3
-rw-r--r--spec/fixtures/lib/elasticsearch/query_with_end_time.json3
-rw-r--r--spec/fixtures/lib/elasticsearch/query_with_search.json3
-rw-r--r--spec/fixtures/lib/elasticsearch/query_with_start_time.json3
-rw-r--r--spec/fixtures/lib/elasticsearch/query_with_times.json3
-rw-r--r--spec/frontend/logs/components/environment_logs_spec.js7
-rw-r--r--spec/frontend/logs/mock_data.js90
-rw-r--r--spec/frontend/logs/stores/actions_spec.js8
-rw-r--r--spec/frontend/logs/stores/mutations_spec.js11
-rw-r--r--spec/frontend/monitoring/components/__snapshots__/dashboard_template_spec.js.snap1
-rw-r--r--spec/frontend/monitoring/components/dashboard_url_time_spec.js2
-rw-r--r--spec/lib/gitlab/elasticsearch/logs_spec.rb22
-rw-r--r--spec/services/pod_logs/base_service_spec.rb110
-rw-r--r--spec/services/pod_logs/elasticsearch_service_spec.rb2
-rw-r--r--spec/services/pod_logs/kubernetes_service_spec.rb107
44 files changed, 1009 insertions, 291 deletions
diff --git a/app/assets/javascripts/logs/components/environment_logs.vue b/app/assets/javascripts/logs/components/environment_logs.vue
index b0acd69bae0..03019e4c25e 100644
--- a/app/assets/javascripts/logs/components/environment_logs.vue
+++ b/app/assets/javascripts/logs/components/environment_logs.vue
@@ -5,15 +5,17 @@ import {
GlSprintf,
GlAlert,
GlDropdown,
+ GlDropdownDivider,
GlDropdownItem,
GlFormGroup,
GlSearchBoxByClick,
GlInfiniteScroll,
} from '@gitlab/ui';
+import { s__ } from '~/locale';
import DateTimePicker from '~/vue_shared/components/date_time_picker/date_time_picker.vue';
import LogControlButtons from './log_control_buttons.vue';
-import { timeRanges, defaultTimeRange } from '~/monitoring/constants';
+import { timeRanges, defaultTimeRange } from '~/vue_shared/constants';
import { timeRangeFromUrl } from '~/monitoring/utils';
import { formatDate } from '../utils';
@@ -22,6 +24,7 @@ export default {
GlSprintf,
GlAlert,
GlDropdown,
+ GlDropdownDivider,
GlDropdownItem,
GlFormGroup,
GlSearchBoxByClick,
@@ -90,6 +93,16 @@ export default {
shouldShowElasticStackCallout() {
return !this.isElasticStackCalloutDismissed && this.disableAdvancedControls;
},
+
+ podDropdownText() {
+ if (this.pods.current) {
+ return this.pods.current;
+ } else if (this.advancedFeaturesEnabled) {
+ // "All pods" is a valid option when advanced querying is available
+ return s__('Environments|All pods');
+ }
+ return s__('Environments|No pod selected');
+ },
},
mounted() {
this.setInitData({
@@ -178,11 +191,17 @@ export default {
>
<gl-dropdown
id="pods-dropdown"
- :text="pods.current || s__('Environments|No pods to display')"
+ :text="podDropdownText"
:disabled="environments.isLoading"
class="d-flex gl-h-32 js-pods-dropdown"
toggle-class="dropdown-menu-toggle"
>
+ <template v-if="advancedFeaturesEnabled">
+ <gl-dropdown-item key="all-pods" @click="showPodLogs(null)">
+ {{ s__('Environments|All pods') }}
+ </gl-dropdown-item>
+ <gl-dropdown-divider />
+ </template>
<gl-dropdown-item
v-for="podName in pods.options"
:key="podName"
diff --git a/app/assets/javascripts/logs/stores/actions.js b/app/assets/javascripts/logs/stores/actions.js
index 4544ebdfec1..834462573cc 100644
--- a/app/assets/javascripts/logs/stores/actions.js
+++ b/app/assets/javascripts/logs/stores/actions.js
@@ -82,7 +82,6 @@ export const setTimeRange = ({ dispatch, commit }, timeRange) => {
export const showEnvironment = ({ dispatch, commit }, environmentName) => {
commit(types.SET_PROJECT_ENVIRONMENT, environmentName);
- commit(types.SET_CURRENT_POD_NAME, null);
dispatch('fetchLogs');
};
@@ -107,16 +106,16 @@ export const fetchEnvironments = ({ commit, dispatch }, environmentsPath) => {
};
export const fetchLogs = ({ commit, state }) => {
- commit(types.REQUEST_PODS_DATA);
commit(types.REQUEST_LOGS_DATA);
return requestLogsUntilData(state)
.then(({ data }) => {
const { pod_name, pods, logs, cursor } = data;
+ commit(types.RECEIVE_LOGS_DATA_SUCCESS, { logs, cursor });
+
commit(types.SET_CURRENT_POD_NAME, pod_name);
commit(types.RECEIVE_PODS_DATA_SUCCESS, pods);
- commit(types.RECEIVE_LOGS_DATA_SUCCESS, { logs, cursor });
})
.catch(() => {
commit(types.RECEIVE_PODS_DATA_ERROR);
diff --git a/app/assets/javascripts/logs/stores/getters.js b/app/assets/javascripts/logs/stores/getters.js
index 58f2dbf4835..8770306fdd6 100644
--- a/app/assets/javascripts/logs/stores/getters.js
+++ b/app/assets/javascripts/logs/stores/getters.js
@@ -1,7 +1,7 @@
import { formatDate } from '../utils';
-const mapTrace = ({ timestamp = null, message = '' }) =>
- [timestamp ? formatDate(timestamp) : '', message].join(' | ');
+const mapTrace = ({ timestamp = null, pod = '', message = '' }) =>
+ [timestamp ? formatDate(timestamp) : '', pod, message].join(' | ');
export const trace = state => state.logs.lines.map(mapTrace).join('\n');
diff --git a/app/assets/javascripts/logs/stores/mutation_types.js b/app/assets/javascripts/logs/stores/mutation_types.js
index 5ff49135e41..7e7771a9df8 100644
--- a/app/assets/javascripts/logs/stores/mutation_types.js
+++ b/app/assets/javascripts/logs/stores/mutation_types.js
@@ -14,6 +14,5 @@ export const REQUEST_LOGS_DATA_PREPEND = 'REQUEST_LOGS_DATA_PREPEND';
export const RECEIVE_LOGS_DATA_PREPEND_SUCCESS = 'RECEIVE_LOGS_DATA_PREPEND_SUCCESS';
export const RECEIVE_LOGS_DATA_PREPEND_ERROR = 'RECEIVE_LOGS_DATA_PREPEND_ERROR';
-export const REQUEST_PODS_DATA = 'REQUEST_PODS_DATA';
export const RECEIVE_PODS_DATA_SUCCESS = 'RECEIVE_PODS_DATA_SUCCESS';
export const RECEIVE_PODS_DATA_ERROR = 'RECEIVE_PODS_DATA_ERROR';
diff --git a/app/assets/javascripts/logs/stores/mutations.js b/app/assets/javascripts/logs/stores/mutations.js
index d94d71cd25a..d77c2894a05 100644
--- a/app/assets/javascripts/logs/stores/mutations.js
+++ b/app/assets/javascripts/logs/stores/mutations.js
@@ -1,8 +1,9 @@
import * as types from './mutation_types';
import { convertToFixedRange } from '~/lib/utils/datetime_range';
-const mapLine = ({ timestamp, message }) => ({
+const mapLine = ({ timestamp, pod, message }) => ({
timestamp,
+ pod,
message,
});
@@ -21,6 +22,10 @@ export default {
// Environments Data
[types.SET_PROJECT_ENVIRONMENT](state, environmentName) {
state.environments.current = environmentName;
+
+ // Clear current pod options
+ state.pods.current = null;
+ state.pods.options = [];
},
[types.REQUEST_ENVIRONMENTS_DATA](state) {
state.environments.options = [];
@@ -81,9 +86,6 @@ export default {
[types.SET_CURRENT_POD_NAME](state, podName) {
state.pods.current = podName;
},
- [types.REQUEST_PODS_DATA](state) {
- state.pods.options = [];
- },
[types.RECEIVE_PODS_DATA_SUCCESS](state, podOptions) {
state.pods.options = podOptions;
},
diff --git a/app/assets/javascripts/logs/stores/state.js b/app/assets/javascripts/logs/stores/state.js
index e058f15b6b4..2c8f47294cc 100644
--- a/app/assets/javascripts/logs/stores/state.js
+++ b/app/assets/javascripts/logs/stores/state.js
@@ -1,4 +1,4 @@
-import { timeRanges, defaultTimeRange } from '~/monitoring/constants';
+import { timeRanges, defaultTimeRange } from '~/vue_shared/constants';
import { convertToFixedRange } from '~/lib/utils/datetime_range';
export default () => ({
diff --git a/app/assets/javascripts/monitoring/components/dashboard.vue b/app/assets/javascripts/monitoring/components/dashboard.vue
index a0bd45bef5e..dbfb3e97c20 100644
--- a/app/assets/javascripts/monitoring/components/dashboard.vue
+++ b/app/assets/javascripts/monitoring/components/dashboard.vue
@@ -31,7 +31,8 @@ import DashboardsDropdown from './dashboards_dropdown.vue';
import TrackEventDirective from '~/vue_shared/directives/track_event';
import { getAddMetricTrackingOptions, timeRangeToUrl, timeRangeFromUrl } from '../utils';
-import { defaultTimeRange, timeRanges, metricStates } from '../constants';
+import { metricStates } from '../constants';
+import { defaultTimeRange, timeRanges } from '~/vue_shared/constants';
export default {
components: {
diff --git a/app/assets/javascripts/monitoring/components/embed.vue b/app/assets/javascripts/monitoring/components/embed.vue
index 826b73908a4..6182b570e76 100644
--- a/app/assets/javascripts/monitoring/components/embed.vue
+++ b/app/assets/javascripts/monitoring/components/embed.vue
@@ -3,7 +3,8 @@ import { mapActions, mapState, mapGetters } from 'vuex';
import PanelType from 'ee_else_ce/monitoring/components/panel_type.vue';
import { convertToFixedRange } from '~/lib/utils/datetime_range';
import { timeRangeFromUrl, removeTimeRangeParams } from '../utils';
-import { sidebarAnimationDuration, defaultTimeRange } from '../constants';
+import { sidebarAnimationDuration } from '../constants';
+import { defaultTimeRange } from '~/vue_shared/constants';
let sidebarMutationObserver;
diff --git a/app/assets/javascripts/monitoring/constants.js b/app/assets/javascripts/monitoring/constants.js
index ddf6c9878df..cc7f5af2259 100644
--- a/app/assets/javascripts/monitoring/constants.js
+++ b/app/assets/javascripts/monitoring/constants.js
@@ -1,5 +1,3 @@
-import { __ } from '~/locale';
-
export const PROMETHEUS_TIMEOUT = 120000; // TWO_MINUTES
/**
@@ -89,37 +87,3 @@ export const dateFormats = {
timeOfDay: 'h:MM TT',
default: 'dd mmm yyyy, h:MMTT',
};
-
-export const timeRanges = [
- {
- label: __('30 minutes'),
- duration: { seconds: 60 * 30 },
- },
- {
- label: __('3 hours'),
- duration: { seconds: 60 * 60 * 3 },
- },
- {
- label: __('8 hours'),
- duration: { seconds: 60 * 60 * 8 },
- default: true,
- },
- {
- label: __('1 day'),
- duration: { seconds: 60 * 60 * 24 * 1 },
- },
- {
- label: __('3 days'),
- duration: { seconds: 60 * 60 * 24 * 3 },
- },
- {
- label: __('1 week'),
- duration: { seconds: 60 * 60 * 24 * 7 * 1 },
- },
- {
- label: __('1 month'),
- duration: { seconds: 60 * 60 * 24 * 30 },
- },
-];
-
-export const defaultTimeRange = timeRanges.find(tr => tr.default);
diff --git a/app/assets/javascripts/vue_shared/components/date_time_picker/date_time_picker.vue b/app/assets/javascripts/vue_shared/components/date_time_picker/date_time_picker.vue
index 9ac687f5e2c..7b09337eb15 100644
--- a/app/assets/javascripts/vue_shared/components/date_time_picker/date_time_picker.vue
+++ b/app/assets/javascripts/vue_shared/components/date_time_picker/date_time_picker.vue
@@ -43,6 +43,11 @@ export default {
required: false,
default: () => defaultTimeRanges,
},
+ customEnabled: {
+ type: Boolean,
+ required: false,
+ default: true,
+ },
},
data() {
return {
@@ -166,6 +171,7 @@ export default {
>
<div class="d-flex justify-content-between gl-p-2">
<gl-form-group
+ v-if="customEnabled"
:label="__('Custom range')"
label-for="custom-from-time"
label-class="gl-pb-1"
diff --git a/app/assets/javascripts/vue_shared/constants.js b/app/assets/javascripts/vue_shared/constants.js
new file mode 100644
index 00000000000..63ce4212717
--- /dev/null
+++ b/app/assets/javascripts/vue_shared/constants.js
@@ -0,0 +1,56 @@
+import { __ } from '~/locale';
+
+const INTERVALS = {
+ minute: 'minute',
+ hour: 'hour',
+ day: 'day',
+};
+
+export const timeRanges = [
+ {
+ label: __('30 minutes'),
+ duration: { seconds: 60 * 30 },
+ name: 'thirtyMinutes',
+ interval: INTERVALS.minute,
+ },
+ {
+ label: __('3 hours'),
+ duration: { seconds: 60 * 60 * 3 },
+ name: 'threeHours',
+ interval: INTERVALS.hour,
+ },
+ {
+ label: __('8 hours'),
+ duration: { seconds: 60 * 60 * 8 },
+ name: 'eightHours',
+ default: true,
+ interval: INTERVALS.hour,
+ },
+ {
+ label: __('1 day'),
+ duration: { seconds: 60 * 60 * 24 * 1 },
+ name: 'oneDay',
+ interval: INTERVALS.hour,
+ },
+ {
+ label: __('3 days'),
+ duration: { seconds: 60 * 60 * 24 * 3 },
+ name: 'threeDays',
+ interval: INTERVALS.hour,
+ },
+ {
+ label: __('1 week'),
+ duration: { seconds: 60 * 60 * 24 * 7 * 1 },
+ name: 'oneWeek',
+ interval: INTERVALS.day,
+ },
+ {
+ label: __('1 month'),
+ duration: { seconds: 60 * 60 * 24 * 30 },
+ name: 'oneMonth',
+ interval: INTERVALS.day,
+ },
+];
+
+export const defaultTimeRange = timeRanges.find(tr => tr.default);
+export const getTimeWindow = timeWindowName => timeRanges.find(tr => tr.name === timeWindowName);
diff --git a/app/services/pod_logs/base_service.rb b/app/services/pod_logs/base_service.rb
index d94d4e92eb6..8cc8fb913a2 100644
--- a/app/services/pod_logs/base_service.rb
+++ b/app/services/pod_logs/base_service.rb
@@ -55,22 +55,10 @@ module PodLogs
return error(_('Cluster does not exist')) if cluster.nil?
return error(_('Namespace is empty')) if namespace.blank?
- success(result)
- end
-
- def check_param_lengths(_result)
- pod_name = params['pod_name'].presence
- container_name = params['container_name'].presence
+ result[:pod_name] = params['pod_name'].presence
+ result[:container_name] = params['container_name'].presence
- if pod_name&.length.to_i > K8S_NAME_MAX_LENGTH
- return error(_('pod_name cannot be larger than %{max_length}'\
- ' chars' % { max_length: K8S_NAME_MAX_LENGTH }))
- elsif container_name&.length.to_i > K8S_NAME_MAX_LENGTH
- return error(_('container_name cannot be larger than'\
- ' %{max_length} chars' % { max_length: K8S_NAME_MAX_LENGTH }))
- end
-
- success(pod_name: pod_name, container_name: container_name)
+ success(result)
end
def get_raw_pods(result)
@@ -85,40 +73,6 @@ module PodLogs
success(result)
end
- def check_pod_name(result)
- # If pod_name is not received as parameter, get the pod logs of the first
- # pod of this namespace.
- result[:pod_name] ||= result[:pods].first
-
- unless result[:pod_name]
- return error(_('No pods available'))
- end
-
- unless result[:pods].include?(result[:pod_name])
- return error(_('Pod does not exist'))
- end
-
- success(result)
- end
-
- def check_container_name(result)
- pod_details = result[:raw_pods].first { |p| p.metadata.name == result[:pod_name] }
- containers = pod_details.spec.containers.map(&:name)
-
- # select first container if not specified
- result[:container_name] ||= containers.first
-
- unless result[:container_name]
- return error(_('No containers available'))
- end
-
- unless containers.include?(result[:container_name])
- return error(_('Container does not exist'))
- end
-
- success(result)
- end
-
def pod_logs(result)
raise NotImplementedError
end
diff --git a/app/services/pod_logs/elasticsearch_service.rb b/app/services/pod_logs/elasticsearch_service.rb
index 3bb6e2bd846..11862de4ade 100644
--- a/app/services/pod_logs/elasticsearch_service.rb
+++ b/app/services/pod_logs/elasticsearch_service.rb
@@ -3,11 +3,8 @@
module PodLogs
class ElasticsearchService < PodLogs::BaseService
steps :check_arguments,
- :check_param_lengths,
:get_raw_pods,
:get_pod_names,
- :check_pod_name,
- :check_container_name,
:check_times,
:check_search,
:check_cursor,
@@ -53,7 +50,7 @@ module PodLogs
response = ::Gitlab::Elasticsearch::Logs.new(client).pod_logs(
namespace,
- result[:pod_name],
+ pod_name: result[:pod_name],
container_name: result[:container_name],
search: result[:search],
start_time: result[:start],
diff --git a/app/services/pod_logs/kubernetes_service.rb b/app/services/pod_logs/kubernetes_service.rb
index 6c8ed74f8e1..92ebb84b877 100644
--- a/app/services/pod_logs/kubernetes_service.rb
+++ b/app/services/pod_logs/kubernetes_service.rb
@@ -8,7 +8,6 @@ module PodLogs
EncodingHelperError = Class.new(StandardError)
steps :check_arguments,
- :check_param_lengths,
:get_raw_pods,
:get_pod_names,
:check_pod_name,
@@ -22,6 +21,50 @@ module PodLogs
private
+ def check_pod_name(result)
+ # If pod_name is not received as parameter, get the pod logs of the first
+ # pod of this namespace.
+ result[:pod_name] ||= result[:pods].first
+
+ unless result[:pod_name]
+ return error(_('No pods available'))
+ end
+
+ unless result[:pod_name].length.to_i <= K8S_NAME_MAX_LENGTH
+ return error(_('pod_name cannot be larger than %{max_length}'\
+ ' chars' % { max_length: K8S_NAME_MAX_LENGTH }))
+ end
+
+ unless result[:pods].include?(result[:pod_name])
+ return error(_('Pod does not exist'))
+ end
+
+ success(result)
+ end
+
+ def check_container_name(result)
+ pod_details = result[:raw_pods].first { |p| p.metadata.name == result[:pod_name] }
+ containers = pod_details.spec.containers.map(&:name)
+
+ # select first container if not specified
+ result[:container_name] ||= containers.first
+
+ unless result[:container_name]
+ return error(_('No containers available'))
+ end
+
+ unless result[:container_name].length.to_i <= K8S_NAME_MAX_LENGTH
+ return error(_('container_name cannot be larger than'\
+ ' %{max_length} chars' % { max_length: K8S_NAME_MAX_LENGTH }))
+ end
+
+ unless containers.include?(result[:container_name])
+ return error(_('Container does not exist'))
+ end
+
+ success(result)
+ end
+
def pod_logs(result)
result[:logs] = cluster.kubeclient.get_pod_log(
result[:pod_name],
@@ -62,7 +105,8 @@ module PodLogs
values = line.split(' ', 2)
{
timestamp: values[0],
- message: values[1]
+ message: values[1],
+ pod: result[:pod_name]
}
end
diff --git a/changelogs/unreleased/ak-all-pods.yml b/changelogs/unreleased/ak-all-pods.yml
new file mode 100644
index 00000000000..0d2269585d0
--- /dev/null
+++ b/changelogs/unreleased/ak-all-pods.yml
@@ -0,0 +1,5 @@
+---
+title: Add all pods view to logs explorer
+merge_request: 26883
+author:
+type: added
diff --git a/changelogs/unreleased/udpate-cluster-application-image-to-0-12.yml b/changelogs/unreleased/udpate-cluster-application-image-to-0-12.yml
new file mode 100644
index 00000000000..786e088088c
--- /dev/null
+++ b/changelogs/unreleased/udpate-cluster-application-image-to-0-12.yml
@@ -0,0 +1,5 @@
+---
+title: Adds crossplane as CI/CD Managed App
+merge_request: 27374
+author:
+type: added
diff --git a/doc/api/graphql/reference/gitlab_schema.graphql b/doc/api/graphql/reference/gitlab_schema.graphql
index 74ec8efbe99..e3f988016fe 100644
--- a/doc/api/graphql/reference/gitlab_schema.graphql
+++ b/doc/api/graphql/reference/gitlab_schema.graphql
@@ -6001,6 +6001,76 @@ type Project {
requestAccessEnabled: Boolean
"""
+ Find a single requirement. Available only when feature flag `requirements_management` is enabled.
+ """
+ requirement(
+ """
+ IID of the requirement, e.g., "1"
+ """
+ iid: ID
+
+ """
+ List of IIDs of requirements, e.g., [1, 2]
+ """
+ iids: [ID!]
+
+ """
+ List requirements by sort order
+ """
+ sort: Sort
+
+ """
+ Filter requirements by state
+ """
+ state: RequirementState
+ ): Requirement
+
+ """
+ Find requirements. Available only when feature flag `requirements_management` is enabled.
+ """
+ requirements(
+ """
+ Returns the elements in the list that come after the specified cursor.
+ """
+ after: String
+
+ """
+ Returns the elements in the list that come before the specified cursor.
+ """
+ before: String
+
+ """
+ Returns the first _n_ elements from the list.
+ """
+ first: Int
+
+ """
+ IID of the requirement, e.g., "1"
+ """
+ iid: ID
+
+ """
+ List of IIDs of requirements, e.g., [1, 2]
+ """
+ iids: [ID!]
+
+ """
+ Returns the last _n_ elements from the list.
+ """
+ last: Int
+
+ """
+ List requirements by sort order
+ """
+ sort: Sort
+
+ """
+ Filter requirements by state
+ """
+ state: RequirementState
+ ): RequirementConnection
+
+ """
Detailed version of a Sentry error on the project
"""
sentryDetailedError(
@@ -6665,6 +6735,41 @@ type Requirement {
}
"""
+The connection type for Requirement.
+"""
+type RequirementConnection {
+ """
+ A list of edges.
+ """
+ edges: [RequirementEdge]
+
+ """
+ A list of nodes.
+ """
+ nodes: [Requirement]
+
+ """
+ Information to aid in pagination.
+ """
+ pageInfo: PageInfo!
+}
+
+"""
+An edge in a connection.
+"""
+type RequirementEdge {
+ """
+ A cursor for use in pagination.
+ """
+ cursor: String!
+
+ """
+ The item at the end of the edge.
+ """
+ node: Requirement
+}
+
+"""
Check permissions for the current user on a requirement
"""
type RequirementPermissions {
@@ -7463,6 +7568,31 @@ type SnippetPermissions {
updateSnippet: Boolean!
}
+"""
+Common sort values
+"""
+enum Sort {
+ """
+ Created at ascending order
+ """
+ created_asc
+
+ """
+ Created at descending order
+ """
+ created_desc
+
+ """
+ Updated at ascending order
+ """
+ updated_asc
+
+ """
+ Updated at descending order
+ """
+ updated_desc
+}
+
type Submodule implements Entry {
"""
Flat path of the entry
diff --git a/doc/api/graphql/reference/gitlab_schema.json b/doc/api/graphql/reference/gitlab_schema.json
index 8eb9b53af04..3d941d9cc69 100644
--- a/doc/api/graphql/reference/gitlab_schema.json
+++ b/doc/api/graphql/reference/gitlab_schema.json
@@ -18033,6 +18033,168 @@
"deprecationReason": null
},
{
+ "name": "requirement",
+ "description": "Find a single requirement. Available only when feature flag `requirements_management` is enabled.",
+ "args": [
+ {
+ "name": "iid",
+ "description": "IID of the requirement, e.g., \"1\"",
+ "type": {
+ "kind": "SCALAR",
+ "name": "ID",
+ "ofType": null
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "iids",
+ "description": "List of IIDs of requirements, e.g., [1, 2]",
+ "type": {
+ "kind": "LIST",
+ "name": null,
+ "ofType": {
+ "kind": "NON_NULL",
+ "name": null,
+ "ofType": {
+ "kind": "SCALAR",
+ "name": "ID",
+ "ofType": null
+ }
+ }
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "sort",
+ "description": "List requirements by sort order",
+ "type": {
+ "kind": "ENUM",
+ "name": "Sort",
+ "ofType": null
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "state",
+ "description": "Filter requirements by state",
+ "type": {
+ "kind": "ENUM",
+ "name": "RequirementState",
+ "ofType": null
+ },
+ "defaultValue": null
+ }
+ ],
+ "type": {
+ "kind": "OBJECT",
+ "name": "Requirement",
+ "ofType": null
+ },
+ "isDeprecated": false,
+ "deprecationReason": null
+ },
+ {
+ "name": "requirements",
+ "description": "Find requirements. Available only when feature flag `requirements_management` is enabled.",
+ "args": [
+ {
+ "name": "iid",
+ "description": "IID of the requirement, e.g., \"1\"",
+ "type": {
+ "kind": "SCALAR",
+ "name": "ID",
+ "ofType": null
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "iids",
+ "description": "List of IIDs of requirements, e.g., [1, 2]",
+ "type": {
+ "kind": "LIST",
+ "name": null,
+ "ofType": {
+ "kind": "NON_NULL",
+ "name": null,
+ "ofType": {
+ "kind": "SCALAR",
+ "name": "ID",
+ "ofType": null
+ }
+ }
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "sort",
+ "description": "List requirements by sort order",
+ "type": {
+ "kind": "ENUM",
+ "name": "Sort",
+ "ofType": null
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "state",
+ "description": "Filter requirements by state",
+ "type": {
+ "kind": "ENUM",
+ "name": "RequirementState",
+ "ofType": null
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "after",
+ "description": "Returns the elements in the list that come after the specified cursor.",
+ "type": {
+ "kind": "SCALAR",
+ "name": "String",
+ "ofType": null
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "before",
+ "description": "Returns the elements in the list that come before the specified cursor.",
+ "type": {
+ "kind": "SCALAR",
+ "name": "String",
+ "ofType": null
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "first",
+ "description": "Returns the first _n_ elements from the list.",
+ "type": {
+ "kind": "SCALAR",
+ "name": "Int",
+ "ofType": null
+ },
+ "defaultValue": null
+ },
+ {
+ "name": "last",
+ "description": "Returns the last _n_ elements from the list.",
+ "type": {
+ "kind": "SCALAR",
+ "name": "Int",
+ "ofType": null
+ },
+ "defaultValue": null
+ }
+ ],
+ "type": {
+ "kind": "OBJECT",
+ "name": "RequirementConnection",
+ "ofType": null
+ },
+ "isDeprecated": false,
+ "deprecationReason": null
+ },
+ {
"name": "sentryDetailedError",
"description": "Detailed version of a Sentry error on the project",
"args": [
@@ -20108,6 +20270,118 @@
},
{
"kind": "OBJECT",
+ "name": "RequirementConnection",
+ "description": "The connection type for Requirement.",
+ "fields": [
+ {
+ "name": "edges",
+ "description": "A list of edges.",
+ "args": [
+
+ ],
+ "type": {
+ "kind": "LIST",
+ "name": null,
+ "ofType": {
+ "kind": "OBJECT",
+ "name": "RequirementEdge",
+ "ofType": null
+ }
+ },
+ "isDeprecated": false,
+ "deprecationReason": null
+ },
+ {
+ "name": "nodes",
+ "description": "A list of nodes.",
+ "args": [
+
+ ],
+ "type": {
+ "kind": "LIST",
+ "name": null,
+ "ofType": {
+ "kind": "OBJECT",
+ "name": "Requirement",
+ "ofType": null
+ }
+ },
+ "isDeprecated": false,
+ "deprecationReason": null
+ },
+ {
+ "name": "pageInfo",
+ "description": "Information to aid in pagination.",
+ "args": [
+
+ ],
+ "type": {
+ "kind": "NON_NULL",
+ "name": null,
+ "ofType": {
+ "kind": "OBJECT",
+ "name": "PageInfo",
+ "ofType": null
+ }
+ },
+ "isDeprecated": false,
+ "deprecationReason": null
+ }
+ ],
+ "inputFields": null,
+ "interfaces": [
+
+ ],
+ "enumValues": null,
+ "possibleTypes": null
+ },
+ {
+ "kind": "OBJECT",
+ "name": "RequirementEdge",
+ "description": "An edge in a connection.",
+ "fields": [
+ {
+ "name": "cursor",
+ "description": "A cursor for use in pagination.",
+ "args": [
+
+ ],
+ "type": {
+ "kind": "NON_NULL",
+ "name": null,
+ "ofType": {
+ "kind": "SCALAR",
+ "name": "String",
+ "ofType": null
+ }
+ },
+ "isDeprecated": false,
+ "deprecationReason": null
+ },
+ {
+ "name": "node",
+ "description": "The item at the end of the edge.",
+ "args": [
+
+ ],
+ "type": {
+ "kind": "OBJECT",
+ "name": "Requirement",
+ "ofType": null
+ },
+ "isDeprecated": false,
+ "deprecationReason": null
+ }
+ ],
+ "inputFields": null,
+ "interfaces": [
+
+ ],
+ "enumValues": null,
+ "possibleTypes": null
+ },
+ {
+ "kind": "OBJECT",
"name": "RequirementPermissions",
"description": "Check permissions for the current user on a requirement",
"fields": [
@@ -22644,6 +22918,41 @@
"possibleTypes": null
},
{
+ "kind": "ENUM",
+ "name": "Sort",
+ "description": "Common sort values",
+ "fields": null,
+ "inputFields": null,
+ "interfaces": null,
+ "enumValues": [
+ {
+ "name": "updated_desc",
+ "description": "Updated at descending order",
+ "isDeprecated": false,
+ "deprecationReason": null
+ },
+ {
+ "name": "updated_asc",
+ "description": "Updated at ascending order",
+ "isDeprecated": false,
+ "deprecationReason": null
+ },
+ {
+ "name": "created_desc",
+ "description": "Created at descending order",
+ "isDeprecated": false,
+ "deprecationReason": null
+ },
+ {
+ "name": "created_asc",
+ "description": "Created at ascending order",
+ "isDeprecated": false,
+ "deprecationReason": null
+ }
+ ],
+ "possibleTypes": null
+ },
+ {
"kind": "SCALAR",
"name": "String",
"description": "Represents textual data as UTF-8 character sequences. This type is most often used by GraphQL to represent free-form human-readable text.",
diff --git a/doc/api/graphql/reference/index.md b/doc/api/graphql/reference/index.md
index d6a427d045c..38067b275d5 100644
--- a/doc/api/graphql/reference/index.md
+++ b/doc/api/graphql/reference/index.md
@@ -898,6 +898,7 @@ Information about pagination in a connection.
| `removeSourceBranchAfterMerge` | Boolean | Indicates if `Delete source branch` option should be enabled by default for all new merge requests of the project |
| `repository` | Repository | Git repository of the project |
| `requestAccessEnabled` | Boolean | Indicates if users can request member access to the project |
+| `requirement` | Requirement | Find a single requirement. Available only when feature flag `requirements_management` is enabled. |
| `sentryDetailedError` | SentryDetailedError | Detailed version of a Sentry error on the project |
| `sentryErrors` | SentryErrorCollection | Paginated collection of Sentry errors on the project |
| `serviceDeskAddress` | String | E-mail address of the service desk. |
diff --git a/doc/development/contributing/issue_workflow.md b/doc/development/contributing/issue_workflow.md
index 46d1d4c2414..94cf1c223dd 100644
--- a/doc/development/contributing/issue_workflow.md
+++ b/doc/development/contributing/issue_workflow.md
@@ -81,7 +81,7 @@ already reserved for category labels).
The descriptions on the [labels page](https://gitlab.com/groups/gitlab-org/-/labels)
explain what falls under each type label.
-The GitLab handbook documents [when something is a bug and when it is a feature request.](https://about.gitlab.com/handbook/product/product-management/process/feature-or-bug.html)
+The GitLab handbook documents [when something is a bug and when it is a feature request](https://about.gitlab.com/handbook/product/product-management/process/feature-or-bug.html).
### Facet labels
diff --git a/doc/development/insert_into_tables_in_batches.md b/doc/development/insert_into_tables_in_batches.md
index 763185013c9..de62d2cca52 100644
--- a/doc/development/insert_into_tables_in_batches.md
+++ b/doc/development/insert_into_tables_in_batches.md
@@ -32,12 +32,12 @@ The `BulkInsertSafe` concern has two functions:
- It performs checks against your model class to ensure that it does not use ActiveRecord
APIs that are not safe to use with respect to bulk insertions (more on that below).
-- It adds a new class method `bulk_insert!`, which you can use to insert many records at once.
+- It adds new class methods `bulk_insert!` and `bulk_upsert!`, which you can use to insert many records at once.
-## Insert records via `bulk_insert!`
+## Insert records with `bulk_insert!` and `bulk_upsert!`
-If the target class passes the checks performed by `BulkInsertSafe`, you can proceed to use
-the `bulk_insert!` class method as follows:
+If the target class passes the checks performed by `BulkInsertSafe`, you can insert an array of
+ActiveRecord model objects as follows:
```ruby
records = [MyModel.new, ...]
@@ -45,6 +45,28 @@ records = [MyModel.new, ...]
MyModel.bulk_insert!(records)
```
+Note that calls to `bulk_insert!` will always attempt to insert _new records_. If instead
+you would like to replace existing records with new values, while still inserting those
+that do not already exist, then you can use `bulk_upsert!`:
+
+```ruby
+records = [MyModel.new, existing_model, ...]
+
+MyModel.bulk_upsert!(records, unique_by: [:name])
+```
+
+In this example, `unique_by` specifies the columns by which records are considered to be
+unique and as such will be updated if they existed prior to insertion. For example, if
+`existing_model` has a `name` attribute, and if a record with the same `name` value already
+exists, its fields will be updated with those of `existing_model`.
+
+The `unique_by` parameter can also be passed as a `Symbol`, in which case it specifies
+a database index by which a column is considered unique:
+
+```ruby
+MyModel.bulk_insert!(records, unique_by: :index_on_name)
+```
+
### Record validation
The `bulk_insert!` method guarantees that `records` will be inserted transactionally, and
@@ -74,6 +96,23 @@ Since this will also affect the number of `INSERT`s that occur, make sure you me
performance impact this might have on your code. There is a trade-off between the number of
`INSERT` statements the database has to process and the size and cost of each `INSERT`.
+### Handling duplicate records
+
+NOTE: **Note:**
+This parameter applies only to `bulk_insert!`. If you intend to update existing
+records, use `bulk_upsert!` instead.
+
+It may happen that some records you are trying to insert already exist, which would result in
+primary key conflicts. There are two ways to address this problem: failing fast by raising an
+error or skipping duplicate records. The default behavior of `bulk_insert!` is to fail fast
+and raise an `ActiveRecord::RecordNotUnique` error.
+
+If this is undesirable, you can instead skip duplicate records with the `skip_duplicates` flag:
+
+```ruby
+MyModel.bulk_insert!(records, skip_duplicates: true)
+```
+
### Requirements for safe bulk insertions
Large parts of ActiveRecord's persistence API are built around the notion of callbacks. Many
@@ -145,11 +184,12 @@ simply be treated as if you had invoked `save` from outside the block.
There are a few restrictions to how these APIs can be used:
-- Bulk inserts only work for new records; `UPDATE`s or "upserts" are not supported yet.
- `ON CONFLICT` behavior cannot currently be configured; an error will be raised on primary key conflicts.
- `BulkInsertableAssociations` furthermore has the following restrictions:
- only compatible with `has_many` relations.
- does not support `has_many through: ...` relations.
+- Writing [`jsonb`](https://www.postgresql.org/docs/current/datatype-json.html) content is
+[not currently supported](https://gitlab.com/gitlab-org/gitlab/-/issues/210560).
Moreover, input data should either be limited to around 1000 records at most,
or already batched prior to calling bulk insert. The `INSERT` statement will run in a single
diff --git a/doc/policy/maintenance.md b/doc/policy/maintenance.md
index 028e372985d..441360aa812 100644
--- a/doc/policy/maintenance.md
+++ b/doc/policy/maintenance.md
@@ -144,10 +144,20 @@ It's also important to ensure that any background migrations have been fully com
before upgrading to a new major version. To see the current size of the `background_migration` queue,
[Check for background migrations before upgrading](../update/README.md#checking-for-background-migrations-before-upgrading).
-From version 12 onwards, an additional step is required. More significant migrations may occur during major release upgrades. To ensure these are successful, increment to the first minor version (`x.0.x`) during the major version jump. Then proceed with upgrading to a newer release.
+### Version 12 onwards: Extra step for major upgrades
+
+From version 12 onwards, an additional step is required. More significant migrations
+may occur during major release upgrades.
+
+To ensure these are successful:
+
+1. Increment to the first minor version (`x.0.x`) during the major version jump.
+1. Proceed with upgrading to a newer release.
For example: `11.11.x` -> `12.0.x` -> `12.8.x`
+### Example upgrade paths
+
Please see the table below for some examples:
| Latest stable version | Your version | Recommended upgrade path | Note |
@@ -155,8 +165,10 @@ Please see the table below for some examples:
| 9.4.5 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.4.5` | `8.17.7` is the last version in version `8` |
| 10.1.4 | 8.13.4 | `8.13.4 -> 8.17.7 -> 9.5.10 -> 10.1.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9` |
| 11.3.4 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.5.10` -> `10.8.7` -> `11.3.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9`, `10.8.7` is the last version in version `10` |
-| 12.5.8 | 11.3.4 | `11.3.4` -> `11.11.8` -> `12.0.12` -> `12.5.8` | `11.11.8` is the last version in version `11`. `12.0.x` [is a required step.](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/23211#note_272842444) |
-| 12.8.5 | 9.2.6 | `9.2.6` -> `9.5.10` -> `10.8.7` -> `11.11.8` -> `12.0.12` -> `12.8.5` | Four intermediate versions required: the final 9.5, 10.8, 11.11 releases, plus 12.0 |
+| 12.5.8 | 11.3.4 | `11.3.4` -> `11.11.8` -> `12.0.12` -> `12.5.8` | `11.11.8` is the last version in version `11`. `12.0.x` [is a required step](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/23211#note_272842444). |
+| 12.8.5 | 9.2.6 | `9.2.6` -> `9.5.10` -> `10.8.7` -> `11.11.8` -> `12.0.12` -> `12.8.5` | Four intermediate versions are required: the final 9.5, 10.8, 11.11 releases, plus 12.0. |
+
+## More information
More information about the release procedures can be found in our
[release documentation](https://gitlab.com/gitlab-org/release/docs). You may also want to read our
diff --git a/doc/user/clusters/applications.md b/doc/user/clusters/applications.md
index 0fa5e62dc68..4768fcc2970 100644
--- a/doc/user/clusters/applications.md
+++ b/doc/user/clusters/applications.md
@@ -548,6 +548,7 @@ Supported applications:
- [Sentry](#install-sentry-using-gitlab-ci)
- [GitLab Runner](#install-gitlab-runner-using-gitlab-ci)
- [Cilium](#install-cilium-using-gitlab-ci)
+- [Vault](#install-vault-using-gitlab-ci)
- [JupyterHub](#install-jupyterhub-using-gitlab-ci)
- [Elastic Stack](#install-elastic-stack-using-gitlab-ci)
- [Crossplane](#install-crossplane-using-gitlab-ci)
@@ -813,6 +814,95 @@ agent:
enabled: false
```
+### Install Vault using GitLab CI
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/9982) in GitLab 12.9.
+
+[Hashicorp Vault](https://vaultproject.io/) is a secrets management solution which
+can be used to safely manage and store passwords, credentials, certificates and more. A Vault
+installation could be leveraged to provide a single secure data store for credentials
+used in your applications, GitLab CI jobs, and more. It could also serve as a way of
+providing SSL/TLS certificates to systems and deployments in your infrastructure. Leveraging
+Vault as a single source for all these credentials allows greater security by having
+a single source of access, control, and auditability around all your sensitive
+credentials and certificates.
+
+To install Vault, enable it in the `.gitlab/managed-apps/config.yaml` file:
+
+```yaml
+vault:
+ installed: true
+```
+
+By default you will get a basic Vault setup with no high availability nor any scalable
+storage backend. This is enough for simple testing and small scale deployments, though has limits
+to how much it can scale, and as it is a single instance deployment, you will experience downtime
+when upgrading the Vault application.
+
+To optimally use Vault in a production environment, it's ideal to have a good understanding
+of the internals of Vault and how to configure it. This can be done by reading the
+[the Vault documentation](https://www.vaultproject.io/docs/internals/) as well as
+the Vault Helm chart [values.yaml file](https://github.com/hashicorp/vault-helm/blob/v0.3.3/values.yaml).
+
+At a minimum you will likely set up:
+
+- A [seal](https://www.vaultproject.io/docs/configuration/seal/) for extra encryption
+ of the master key.
+- A [storage backend](https://www.vaultproject.io/docs/configuration/storage/) that is
+ suitable for environment and storage security requirements.
+- [HA Mode](https://www.vaultproject.io/docs/concepts/ha/).
+- [The Vault UI](https://www.vaultproject.io/docs/configuration/ui/).
+
+The following is an example values file (`.gitlab/managed-apps/vault/values.yaml`)
+that configures Google Key Management Service for auto-unseal, using a Google Cloud Storage backend, enabling
+the Vault UI, and enabling HA with 3 pod replicas. The `storage` and `seal` stanzas
+below are examples and should be replaced with settings specific to your environment.
+
+```yaml
+# Enable the Vault WebUI
+ui:
+ enabled: true
+server:
+ # Disable the built in data storage volume as it's not safe for Hight Availablity mode
+ dataStorage:
+ enabled: false
+ # Enable High Availability Mode
+ ha:
+ enabled: true
+ # Configure Vault to listen on port 8200 for normal traffic and port 8201 for inter-cluster traffic
+ config: |
+ listener "tcp" {
+ tls_disable = 1
+ address = "[::]:8200"
+ cluster_address = "[::]:8201"
+ }
+ # Configure Vault to store its data in a GCS Bucket backend
+ storage "gcs" {
+ path = "gcs://my-vault-storage/vault-bucket"
+ ha_enabled = "true"
+ }
+ # Configure Vault to automatically unseal storage using a GKMS key
+ seal "gcpckms" {
+ project = "vault-helm-dev-246514"
+ region = "global"
+ key_ring = "vault-helm-unseal-kr"
+ crypto_key = "vault-helm-unseal-key"
+ }
+```
+
+Once you have successfully installed Vault, you will need to [initialize the Vault](https://learn.hashicorp.com/vault/getting-started/deploy#initializing-the-vault)
+and obtain the initial root token. You will need access to your Kubernetes cluster that Vault has been deployed into in order to do this.
+To initialise the Vault, get a shell to one of the Vault pods running inside Kubernetes (typically this is done by using the `kubectl` command line tool).
+Once you have a shell into the pod, run the `vault operator init` command:
+
+```shell
+kubectl -n gitlab-managed-apps exec -it vault-0 sh
+/ $ vault operator init
+```
+
+This should give you your unseal keys and initial root token. Make sure to note these down
+and keep these safe as you will need them to unseal the Vault throughout its lifecycle.
+
### Install JupyterHub using GitLab CI
> [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/merge_requests/40) in GitLab 12.8.
diff --git a/lib/gitlab/ci/templates/Managed-Cluster-Applications.gitlab-ci.yml b/lib/gitlab/ci/templates/Managed-Cluster-Applications.gitlab-ci.yml
index 48458142f1c..4ef6a4d3bef 100644
--- a/lib/gitlab/ci/templates/Managed-Cluster-Applications.gitlab-ci.yml
+++ b/lib/gitlab/ci/templates/Managed-Cluster-Applications.gitlab-ci.yml
@@ -1,6 +1,6 @@
apply:
stage: deploy
- image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.11.0"
+ image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.12.0"
environment:
name: production
variables:
@@ -16,6 +16,7 @@ apply:
PROMETHEUS_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/prometheus/values.yaml
ELASTIC_STACK_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/elastic-stack/values.yaml
VAULT_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/vault/values.yaml
+ CROSSPLANE_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/crossplane/values.yaml
script:
- gitlab-managed-apps /usr/local/share/gitlab-managed-apps/helmfile.yaml
only:
diff --git a/lib/gitlab/elasticsearch/logs.rb b/lib/gitlab/elasticsearch/logs.rb
index f976f6ce305..3b6d1d0286a 100644
--- a/lib/gitlab/elasticsearch/logs.rb
+++ b/lib/gitlab/elasticsearch/logs.rb
@@ -12,7 +12,7 @@ module Gitlab
@client = client
end
- def pod_logs(namespace, pod_name, container_name: nil, search: nil, start_time: nil, end_time: nil, cursor: nil)
+ def pod_logs(namespace, pod_name: nil, container_name: nil, search: nil, start_time: nil, end_time: nil, cursor: nil)
query = { bool: { must: [] } }.tap do |q|
filter_pod_name(q, pod_name)
filter_namespace(q, namespace)
@@ -38,7 +38,7 @@ module Gitlab
{ "offset": { order: :desc } }
],
# only return these fields in the response
- _source: ["@timestamp", "message"],
+ _source: ["@timestamp", "message", "kubernetes.pod.name"],
# fixed limit for now, we should support paginated queries
size: ::Gitlab::Elasticsearch::Logs::LOGS_LIMIT
}
@@ -51,6 +51,9 @@ module Gitlab
end
def filter_pod_name(query, pod_name)
+ # We can filter by "all pods" with a null pod_name
+ return if pod_name.nil?
+
query[:bool][:must] << {
match_phrase: {
"kubernetes.pod.name" => {
@@ -113,7 +116,8 @@ module Gitlab
results = results.map do |hit|
{
timestamp: hit["_source"]["@timestamp"],
- message: hit["_source"]["message"]
+ message: hit["_source"]["message"],
+ pod: hit["_source"]["kubernetes"]["pod"]["name"]
}
end
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index d0c2f4b2906..11f93ac0bce 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -711,9 +711,6 @@ msgstr ""
msgid "20-29 contributions"
msgstr ""
-msgid "24 hours"
-msgstr ""
-
msgid "2FA"
msgstr ""
@@ -726,9 +723,6 @@ msgstr ""
msgid "3 hours"
msgstr ""
-msgid "30 days"
-msgstr ""
-
msgid "30 minutes"
msgstr ""
@@ -750,9 +744,6 @@ msgstr ""
msgid "404|Please contact your GitLab administrator if you think this is a mistake."
msgstr ""
-msgid "7 days"
-msgstr ""
-
msgid "8 hours"
msgstr ""
@@ -7660,6 +7651,9 @@ msgstr ""
msgid "EnvironmentsDashboard|This dashboard displays a maximum of 7 projects and 3 environments per project. %{readMoreLink}"
msgstr ""
+msgid "Environments|All pods"
+msgstr ""
+
msgid "Environments|An error occurred while canceling the auto stop, please try again"
msgstr ""
@@ -7741,7 +7735,7 @@ msgstr ""
msgid "Environments|No deployments yet"
msgstr ""
-msgid "Environments|No pods to display"
+msgid "Environments|No pod selected"
msgstr ""
msgid "Environments|Note that this action will stop the environment, but it will %{emphasisStart}not%{emphasisEnd} have an effect on any existing deployment due to no “stop environment action” being defined in the %{ciConfigLinkStart}.gitlab-ci.yml%{ciConfigLinkEnd} file."
diff --git a/spec/fixtures/lib/elasticsearch/logs_response.json b/spec/fixtures/lib/elasticsearch/logs_response.json
index 7a733882089..2dbc6f569a5 100644
--- a/spec/fixtures/lib/elasticsearch/logs_response.json
+++ b/spec/fixtures/lib/elasticsearch/logs_response.json
@@ -19,7 +19,12 @@
"_score": null,
"_source": {
"message": "10.8.2.1 - - [25/Oct/2019:08:03:22 UTC] \"GET / HTTP/1.1\" 200 13",
- "@timestamp": "2019-12-13T14:35:34.034Z"
+ "@timestamp": "2019-12-13T14:35:34.034Z",
+ "kubernetes": {
+ "pod": {
+ "name": "production-6866bc8974-m4sk4"
+ }
+ }
},
"sort": [
9999998,
@@ -33,7 +38,12 @@
"_score": null,
"_source": {
"message": "10.8.2.1 - - [27/Oct/2019:23:49:54 UTC] \"GET / HTTP/1.1\" 200 13",
- "@timestamp": "2019-12-13T14:35:35.034Z"
+ "@timestamp": "2019-12-13T14:35:35.034Z",
+ "kubernetes": {
+ "pod": {
+ "name": "production-6866bc8974-m4sk4"
+ }
+ }
},
"sort": [
9999949,
@@ -47,7 +57,12 @@
"_score": null,
"_source": {
"message": "10.8.2.1 - - [04/Nov/2019:23:09:24 UTC] \"GET / HTTP/1.1\" 200 13",
- "@timestamp": "2019-12-13T14:35:36.034Z"
+ "@timestamp": "2019-12-13T14:35:36.034Z",
+ "kubernetes": {
+ "pod": {
+ "name": "production-6866bc8974-m4sk4"
+ }
+ }
},
"sort": [
9999944,
@@ -61,7 +76,12 @@
"_score": null,
"_source": {
"message": "- -\u003e /",
- "@timestamp": "2019-12-13T14:35:37.034Z"
+ "@timestamp": "2019-12-13T14:35:37.034Z",
+ "kubernetes": {
+ "pod": {
+ "name": "production-6866bc8974-m4sk4"
+ }
+ }
},
"sort": [
9999934,
diff --git a/spec/fixtures/lib/elasticsearch/query.json b/spec/fixtures/lib/elasticsearch/query.json
index 565c871b1c7..75164a7439f 100644
--- a/spec/fixtures/lib/elasticsearch/query.json
+++ b/spec/fixtures/lib/elasticsearch/query.json
@@ -33,7 +33,8 @@
],
"_source": [
"@timestamp",
- "message"
+ "message",
+ "kubernetes.pod.name"
],
"size": 500
}
diff --git a/spec/fixtures/lib/elasticsearch/query_with_container.json b/spec/fixtures/lib/elasticsearch/query_with_container.json
index 21eac5d7dbe..11bc653441c 100644
--- a/spec/fixtures/lib/elasticsearch/query_with_container.json
+++ b/spec/fixtures/lib/elasticsearch/query_with_container.json
@@ -40,7 +40,8 @@
],
"_source": [
"@timestamp",
- "message"
+ "message",
+ "kubernetes.pod.name"
],
"size": 500
}
diff --git a/spec/fixtures/lib/elasticsearch/query_with_cursor.json b/spec/fixtures/lib/elasticsearch/query_with_cursor.json
index 1264fdb0322..c5b81e97d3c 100644
--- a/spec/fixtures/lib/elasticsearch/query_with_cursor.json
+++ b/spec/fixtures/lib/elasticsearch/query_with_cursor.json
@@ -37,7 +37,8 @@
],
"_source": [
"@timestamp",
- "message"
+ "message",
+ "kubernetes.pod.name"
],
"size": 500
}
diff --git a/spec/fixtures/lib/elasticsearch/query_with_end_time.json b/spec/fixtures/lib/elasticsearch/query_with_end_time.json
index 2859e6427d4..226e0f115e7 100644
--- a/spec/fixtures/lib/elasticsearch/query_with_end_time.json
+++ b/spec/fixtures/lib/elasticsearch/query_with_end_time.json
@@ -42,7 +42,8 @@
],
"_source": [
"@timestamp",
- "message"
+ "message",
+ "kubernetes.pod.name"
],
"size": 500
}
diff --git a/spec/fixtures/lib/elasticsearch/query_with_search.json b/spec/fixtures/lib/elasticsearch/query_with_search.json
index 3c9bed047fa..ca63c12f3b8 100644
--- a/spec/fixtures/lib/elasticsearch/query_with_search.json
+++ b/spec/fixtures/lib/elasticsearch/query_with_search.json
@@ -42,7 +42,8 @@
],
"_source": [
"@timestamp",
- "message"
+ "message",
+ "kubernetes.pod.name"
],
"size": 500
}
diff --git a/spec/fixtures/lib/elasticsearch/query_with_start_time.json b/spec/fixtures/lib/elasticsearch/query_with_start_time.json
index 0c5cfca42f7..cb3e37de8a7 100644
--- a/spec/fixtures/lib/elasticsearch/query_with_start_time.json
+++ b/spec/fixtures/lib/elasticsearch/query_with_start_time.json
@@ -42,7 +42,8 @@
],
"_source": [
"@timestamp",
- "message"
+ "message",
+ "kubernetes.pod.name"
],
"size": 500
}
diff --git a/spec/fixtures/lib/elasticsearch/query_with_times.json b/spec/fixtures/lib/elasticsearch/query_with_times.json
index 7108d42217e..91d28b28842 100644
--- a/spec/fixtures/lib/elasticsearch/query_with_times.json
+++ b/spec/fixtures/lib/elasticsearch/query_with_times.json
@@ -43,7 +43,8 @@
],
"_source": [
"@timestamp",
- "message"
+ "message",
+ "kubernetes.pod.name"
],
"size": 500
}
diff --git a/spec/frontend/logs/components/environment_logs_spec.js b/spec/frontend/logs/components/environment_logs_spec.js
index c638b4c05f9..162aeb1cc56 100644
--- a/spec/frontend/logs/components/environment_logs_spec.js
+++ b/spec/frontend/logs/components/environment_logs_spec.js
@@ -300,9 +300,10 @@ describe('EnvironmentLogs', () => {
const items = findPodsDropdown().findAll(GlDropdownItem);
expect(findPodsDropdown().props('text')).toBe(mockPodName);
- expect(items.length).toBe(mockPods.length);
+ expect(items.length).toBe(mockPods.length + 1);
+ expect(items.at(0).text()).toBe('All pods');
mockPods.forEach((pod, i) => {
- const item = items.at(i);
+ const item = items.at(i + 1);
expect(item.text()).toBe(pod);
});
});
@@ -345,7 +346,7 @@ describe('EnvironmentLogs', () => {
expect(dispatch).not.toHaveBeenCalledWith(`${module}/showPodLogs`, expect.anything());
- items.at(index).vm.$emit('click');
+ items.at(index + 1).vm.$emit('click');
expect(dispatch).toHaveBeenCalledWith(`${module}/showPodLogs`, mockPods[index]);
});
diff --git a/spec/frontend/logs/mock_data.js b/spec/frontend/logs/mock_data.js
index 1a84d6edd12..537582cff5a 100644
--- a/spec/frontend/logs/mock_data.js
+++ b/spec/frontend/logs/mock_data.js
@@ -32,15 +32,93 @@ export const mockPods = [
];
export const mockLogsResult = [
- { timestamp: '2019-12-13T13:43:18.2760123Z', message: 'Log 1' },
- { timestamp: '2019-12-13T13:43:18.2760123Z', message: 'Log 2' },
- { timestamp: '2019-12-13T13:43:26.8420123Z', message: 'Log 3' },
+ {
+ timestamp: '2019-12-13T13:43:18.2760123Z',
+ message: '10.36.0.1 - - [16/Oct/2019:06:29:48 UTC] "GET / HTTP/1.1" 200 13',
+ pod: 'foo',
+ },
+ {
+ timestamp: '2019-12-13T13:43:18.2760123Z',
+ message: '- -> /',
+ pod: 'bar',
+ },
+ {
+ timestamp: '2019-12-13T13:43:26.8420123Z',
+ message: '10.36.0.1 - - [16/Oct/2019:06:29:57 UTC] "GET / HTTP/1.1" 200 13',
+ pod: 'foo',
+ },
+ {
+ timestamp: '2019-12-13T13:43:26.8420123Z',
+ message: '- -> /',
+ pod: 'bar',
+ },
+ {
+ timestamp: '2019-12-13T13:43:28.3710123Z',
+ message: '10.36.0.1 - - [16/Oct/2019:06:29:58 UTC] "GET / HTTP/1.1" 200 13',
+ pod: 'foo',
+ },
+ {
+ timestamp: '2019-12-13T13:43:28.3710123Z',
+ message: '- -> /',
+ pod: 'bar',
+ },
+ {
+ timestamp: '2019-12-13T13:43:36.8860123Z',
+ message: '10.36.0.1 - - [16/Oct/2019:06:30:07 UTC] "GET / HTTP/1.1" 200 13',
+ pod: 'foo',
+ },
+ {
+ timestamp: '2019-12-13T13:43:36.8860123Z',
+ message: '- -> /',
+ pod: 'bar',
+ },
+ {
+ timestamp: '2019-12-13T13:43:38.4000123Z',
+ message: '10.36.0.1 - - [16/Oct/2019:06:30:08 UTC] "GET / HTTP/1.1" 200 13',
+ pod: 'foo',
+ },
+ {
+ timestamp: '2019-12-13T13:43:38.4000123Z',
+ message: '- -> /',
+ pod: 'bar',
+ },
+ {
+ timestamp: '2019-12-13T13:43:46.8420123Z',
+ message: '10.36.0.1 - - [16/Oct/2019:06:30:17 UTC] "GET / HTTP/1.1" 200 13',
+ pod: 'foo',
+ },
+ {
+ timestamp: '2019-12-13T13:43:46.8430123Z',
+ message: '- -> /',
+ pod: 'bar',
+ },
+ {
+ timestamp: '2019-12-13T13:43:48.3240123Z',
+ message: '10.36.0.1 - - [16/Oct/2019:06:30:18 UTC] "GET / HTTP/1.1" 200 13',
+ pod: 'foo',
+ },
+ {
+ timestamp: '2019-12-13T13:43:48.3250123Z',
+ message: '- -> /',
+ pod: 'bar',
+ },
];
export const mockTrace = [
- 'Dec 13 13:43:18.276Z | Log 1',
- 'Dec 13 13:43:18.276Z | Log 2',
- 'Dec 13 13:43:26.842Z | Log 3',
+ 'Dec 13 13:43:18.276Z | foo | 10.36.0.1 - - [16/Oct/2019:06:29:48 UTC] "GET / HTTP/1.1" 200 13',
+ 'Dec 13 13:43:18.276Z | bar | - -> /',
+ 'Dec 13 13:43:26.842Z | foo | 10.36.0.1 - - [16/Oct/2019:06:29:57 UTC] "GET / HTTP/1.1" 200 13',
+ 'Dec 13 13:43:26.842Z | bar | - -> /',
+ 'Dec 13 13:43:28.371Z | foo | 10.36.0.1 - - [16/Oct/2019:06:29:58 UTC] "GET / HTTP/1.1" 200 13',
+ 'Dec 13 13:43:28.371Z | bar | - -> /',
+ 'Dec 13 13:43:36.886Z | foo | 10.36.0.1 - - [16/Oct/2019:06:30:07 UTC] "GET / HTTP/1.1" 200 13',
+ 'Dec 13 13:43:36.886Z | bar | - -> /',
+ 'Dec 13 13:43:38.400Z | foo | 10.36.0.1 - - [16/Oct/2019:06:30:08 UTC] "GET / HTTP/1.1" 200 13',
+ 'Dec 13 13:43:38.400Z | bar | - -> /',
+ 'Dec 13 13:43:46.842Z | foo | 10.36.0.1 - - [16/Oct/2019:06:30:17 UTC] "GET / HTTP/1.1" 200 13',
+ 'Dec 13 13:43:46.843Z | bar | - -> /',
+ 'Dec 13 13:43:48.324Z | foo | 10.36.0.1 - - [16/Oct/2019:06:30:18 UTC] "GET / HTTP/1.1" 200 13',
+ 'Dec 13 13:43:48.325Z | bar | - -> /',
];
export const mockResponse = {
diff --git a/spec/frontend/logs/stores/actions_spec.js b/spec/frontend/logs/stores/actions_spec.js
index 1512797e1bc..1754931bcaf 100644
--- a/spec/frontend/logs/stores/actions_spec.js
+++ b/spec/frontend/logs/stores/actions_spec.js
@@ -13,7 +13,7 @@ import {
fetchMoreLogsPrepend,
} from '~/logs/stores/actions';
-import { defaultTimeRange } from '~/monitoring/constants';
+import { defaultTimeRange } from '~/vue_shared/constants';
import axios from '~/lib/utils/axios_utils';
import flash from '~/flash';
@@ -172,14 +172,13 @@ describe('Logs Store actions', () => {
describe('fetchLogs', () => {
beforeEach(() => {
expectedMutations = [
- { type: types.REQUEST_PODS_DATA },
{ type: types.REQUEST_LOGS_DATA },
- { type: types.SET_CURRENT_POD_NAME, payload: mockPodName },
- { type: types.RECEIVE_PODS_DATA_SUCCESS, payload: mockPods },
{
type: types.RECEIVE_LOGS_DATA_SUCCESS,
payload: { logs: mockLogsResult, cursor: mockNextCursor },
},
+ { type: types.SET_CURRENT_POD_NAME, payload: mockPodName },
+ { type: types.RECEIVE_PODS_DATA_SUCCESS, payload: mockPods },
];
expectedActions = [];
@@ -364,7 +363,6 @@ describe('Logs Store actions', () => {
null,
state,
[
- { type: types.REQUEST_PODS_DATA },
{ type: types.REQUEST_LOGS_DATA },
{ type: types.RECEIVE_PODS_DATA_ERROR },
{ type: types.RECEIVE_LOGS_DATA_ERROR },
diff --git a/spec/frontend/logs/stores/mutations_spec.js b/spec/frontend/logs/stores/mutations_spec.js
index eae838a31d4..37db355af09 100644
--- a/spec/frontend/logs/stores/mutations_spec.js
+++ b/spec/frontend/logs/stores/mutations_spec.js
@@ -223,17 +223,6 @@ describe('Logs Store Mutations', () => {
});
});
- describe('REQUEST_PODS_DATA', () => {
- it('receives pods data', () => {
- mutations[types.REQUEST_PODS_DATA](state);
-
- expect(state.pods).toEqual(
- expect.objectContaining({
- options: [],
- }),
- );
- });
- });
describe('RECEIVE_PODS_DATA_SUCCESS', () => {
it('receives pods data success', () => {
mutations[types.RECEIVE_PODS_DATA_SUCCESS](state, mockPods);
diff --git a/spec/frontend/monitoring/components/__snapshots__/dashboard_template_spec.js.snap b/spec/frontend/monitoring/components/__snapshots__/dashboard_template_spec.js.snap
index 77f7f2e0609..e37043e5d4d 100644
--- a/spec/frontend/monitoring/components/__snapshots__/dashboard_template_spec.js.snap
+++ b/spec/frontend/monitoring/components/__snapshots__/dashboard_template_spec.js.snap
@@ -78,6 +78,7 @@ exports[`Dashboard template matches the default snapshot 1`] = `
label-size="sm"
>
<date-time-picker-stub
+ customenabled="true"
options="[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object]"
value="[object Object]"
/>
diff --git a/spec/frontend/monitoring/components/dashboard_url_time_spec.js b/spec/frontend/monitoring/components/dashboard_url_time_spec.js
index 161c64dd74b..bf5a11a536e 100644
--- a/spec/frontend/monitoring/components/dashboard_url_time_spec.js
+++ b/spec/frontend/monitoring/components/dashboard_url_time_spec.js
@@ -7,7 +7,7 @@ import { mockProjectDir } from '../mock_data';
import Dashboard from '~/monitoring/components/dashboard.vue';
import { createStore } from '~/monitoring/stores';
-import { defaultTimeRange } from '~/monitoring/constants';
+import { defaultTimeRange } from '~/vue_shared/constants';
import { propsData } from '../init_utils';
jest.mock('~/flash');
diff --git a/spec/lib/gitlab/elasticsearch/logs_spec.rb b/spec/lib/gitlab/elasticsearch/logs_spec.rb
index f82c4acb82b..6b9d1dbef99 100644
--- a/spec/lib/gitlab/elasticsearch/logs_spec.rb
+++ b/spec/lib/gitlab/elasticsearch/logs_spec.rb
@@ -5,10 +5,10 @@ require 'spec_helper'
describe Gitlab::Elasticsearch::Logs do
let(:client) { Elasticsearch::Transport::Client }
- let(:es_message_1) { { timestamp: "2019-12-13T14:35:34.034Z", message: "10.8.2.1 - - [25/Oct/2019:08:03:22 UTC] \"GET / HTTP/1.1\" 200 13" } }
- let(:es_message_2) { { timestamp: "2019-12-13T14:35:35.034Z", message: "10.8.2.1 - - [27/Oct/2019:23:49:54 UTC] \"GET / HTTP/1.1\" 200 13" } }
- let(:es_message_3) { { timestamp: "2019-12-13T14:35:36.034Z", message: "10.8.2.1 - - [04/Nov/2019:23:09:24 UTC] \"GET / HTTP/1.1\" 200 13" } }
- let(:es_message_4) { { timestamp: "2019-12-13T14:35:37.034Z", message: "- -\u003e /" } }
+ let(:es_message_1) { { timestamp: "2019-12-13T14:35:34.034Z", pod: "production-6866bc8974-m4sk4", message: "10.8.2.1 - - [25/Oct/2019:08:03:22 UTC] \"GET / HTTP/1.1\" 200 13" } }
+ let(:es_message_2) { { timestamp: "2019-12-13T14:35:35.034Z", pod: "production-6866bc8974-m4sk4", message: "10.8.2.1 - - [27/Oct/2019:23:49:54 UTC] \"GET / HTTP/1.1\" 200 13" } }
+ let(:es_message_3) { { timestamp: "2019-12-13T14:35:36.034Z", pod: "production-6866bc8974-m4sk4", message: "10.8.2.1 - - [04/Nov/2019:23:09:24 UTC] \"GET / HTTP/1.1\" 200 13" } }
+ let(:es_message_4) { { timestamp: "2019-12-13T14:35:37.034Z", pod: "production-6866bc8974-m4sk4", message: "- -\u003e /" } }
let(:es_response) { JSON.parse(fixture_file('lib/elasticsearch/logs_response.json')) }
@@ -40,49 +40,49 @@ describe Gitlab::Elasticsearch::Logs do
it 'returns the logs as an array' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body)).and_return(es_response)
- result = subject.pod_logs(namespace, pod_name)
+ result = subject.pod_logs(namespace, pod_name: pod_name)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end
it 'can further filter the logs by container name' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_container)).and_return(es_response)
- result = subject.pod_logs(namespace, pod_name, container_name: container_name)
+ result = subject.pod_logs(namespace, pod_name: pod_name, container_name: container_name)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end
it 'can further filter the logs by search' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_search)).and_return(es_response)
- result = subject.pod_logs(namespace, pod_name, search: search)
+ result = subject.pod_logs(namespace, pod_name: pod_name, search: search)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end
it 'can further filter the logs by start_time and end_time' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_times)).and_return(es_response)
- result = subject.pod_logs(namespace, pod_name, start_time: start_time, end_time: end_time)
+ result = subject.pod_logs(namespace, pod_name: pod_name, start_time: start_time, end_time: end_time)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end
it 'can further filter the logs by only start_time' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_start_time)).and_return(es_response)
- result = subject.pod_logs(namespace, pod_name, start_time: start_time)
+ result = subject.pod_logs(namespace, pod_name: pod_name, start_time: start_time)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end
it 'can further filter the logs by only end_time' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_end_time)).and_return(es_response)
- result = subject.pod_logs(namespace, pod_name, end_time: end_time)
+ result = subject.pod_logs(namespace, pod_name: pod_name, end_time: end_time)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end
it 'can search after a cursor' do
expect(client).to receive(:search).with(body: a_hash_equal_to_json(body_with_cursor)).and_return(es_response)
- result = subject.pod_logs(namespace, pod_name, cursor: cursor)
+ result = subject.pod_logs(namespace, pod_name: pod_name, cursor: cursor)
expect(result).to eq(logs: [es_message_4, es_message_3, es_message_2, es_message_1], cursor: cursor)
end
end
diff --git a/spec/services/pod_logs/base_service_spec.rb b/spec/services/pod_logs/base_service_spec.rb
index a18fda544df..d93ea51eae1 100644
--- a/spec/services/pod_logs/base_service_spec.rb
+++ b/spec/services/pod_logs/base_service_spec.rb
@@ -78,9 +78,7 @@ describe ::PodLogs::BaseService do
expect(result[:message]).to eq('Namespace is empty')
end
end
- end
- describe '#check_param_lengths' do
context 'when pod_name and container_name are provided' do
let(:params) do
{
@@ -90,43 +88,13 @@ describe ::PodLogs::BaseService do
end
it 'returns success' do
- result = subject.send(:check_param_lengths, {})
+ result = subject.send(:check_arguments, {})
expect(result[:status]).to eq(:success)
expect(result[:pod_name]).to eq(pod_name)
expect(result[:container_name]).to eq(container_name)
end
end
-
- context 'when pod_name is too long' do
- let(:params) do
- {
- 'pod_name' => "a very long string." * 15
- }
- end
-
- it 'returns an error' do
- result = subject.send(:check_param_lengths, {})
-
- expect(result[:status]).to eq(:error)
- expect(result[:message]).to eq('pod_name cannot be larger than 253 chars')
- end
- end
-
- context 'when container_name is too long' do
- let(:params) do
- {
- 'container_name' => "a very long string." * 15
- }
- end
-
- it 'returns an error' do
- result = subject.send(:check_param_lengths, {})
-
- expect(result[:status]).to eq(:error)
- expect(result[:message]).to eq('container_name cannot be larger than 253 chars')
- end
- end
end
describe '#get_raw_pods' do
@@ -150,80 +118,4 @@ describe ::PodLogs::BaseService do
expect(result[:pods]).to eq([pod_name])
end
end
-
- describe '#check_pod_name' do
- it 'returns success if pod_name was specified' do
- result = subject.send(:check_pod_name, pod_name: pod_name, pods: [pod_name])
-
- expect(result[:status]).to eq(:success)
- expect(result[:pod_name]).to eq(pod_name)
- end
-
- it 'returns success if pod_name was not specified but there are pods' do
- result = subject.send(:check_pod_name, pod_name: nil, pods: [pod_name])
-
- expect(result[:status]).to eq(:success)
- expect(result[:pod_name]).to eq(pod_name)
- end
-
- it 'returns error if pod_name was not specified and there are no pods' do
- result = subject.send(:check_pod_name, pod_name: nil, pods: [])
-
- expect(result[:status]).to eq(:error)
- expect(result[:message]).to eq('No pods available')
- end
-
- it 'returns error if pod_name was specified but does not exist' do
- result = subject.send(:check_pod_name, pod_name: 'another_pod', pods: [pod_name])
-
- expect(result[:status]).to eq(:error)
- expect(result[:message]).to eq('Pod does not exist')
- end
- end
-
- describe '#check_container_name' do
- it 'returns success if container_name was specified' do
- result = subject.send(:check_container_name,
- container_name: container_name,
- pod_name: pod_name,
- raw_pods: raw_pods
- )
-
- expect(result[:status]).to eq(:success)
- expect(result[:container_name]).to eq(container_name)
- end
-
- it 'returns success if container_name was not specified and there are containers' do
- result = subject.send(:check_container_name,
- pod_name: pod_name,
- raw_pods: raw_pods
- )
-
- expect(result[:status]).to eq(:success)
- expect(result[:container_name]).to eq(container_name)
- end
-
- it 'returns error if container_name was not specified and there are no containers on the pod' do
- raw_pods.first.spec.containers = []
-
- result = subject.send(:check_container_name,
- pod_name: pod_name,
- raw_pods: raw_pods
- )
-
- expect(result[:status]).to eq(:error)
- expect(result[:message]).to eq('No containers available')
- end
-
- it 'returns error if container_name was specified but does not exist' do
- result = subject.send(:check_container_name,
- container_name: 'foo',
- pod_name: pod_name,
- raw_pods: raw_pods
- )
-
- expect(result[:status]).to eq(:error)
- expect(result[:message]).to eq('Container does not exist')
- end
- end
end
diff --git a/spec/services/pod_logs/elasticsearch_service_spec.rb b/spec/services/pod_logs/elasticsearch_service_spec.rb
index 984a303e9e3..1387d2cfb8e 100644
--- a/spec/services/pod_logs/elasticsearch_service_spec.rb
+++ b/spec/services/pod_logs/elasticsearch_service_spec.rb
@@ -170,7 +170,7 @@ describe ::PodLogs::ElasticsearchService do
.and_return(Elasticsearch::Transport::Client.new)
allow_any_instance_of(::Gitlab::Elasticsearch::Logs)
.to receive(:pod_logs)
- .with(namespace, pod_name, container_name: container_name, search: search, start_time: start_time, end_time: end_time, cursor: cursor)
+ .with(namespace, pod_name: pod_name, container_name: container_name, search: search, start_time: start_time, end_time: end_time, cursor: cursor)
.and_return({ logs: expected_logs, cursor: expected_cursor })
result = subject.send(:pod_logs, result_arg)
diff --git a/spec/services/pod_logs/kubernetes_service_spec.rb b/spec/services/pod_logs/kubernetes_service_spec.rb
index 9fab88a14f6..8ce79d4c318 100644
--- a/spec/services/pod_logs/kubernetes_service_spec.rb
+++ b/spec/services/pod_logs/kubernetes_service_spec.rb
@@ -9,13 +9,18 @@ describe ::PodLogs::KubernetesService do
let(:namespace) { 'autodevops-deploy-9-production' }
let(:pod_name) { 'pod-1' }
- let(:container_name) { 'container-1' }
+ let(:container_name) { 'container-0' }
let(:params) { {} }
let(:raw_logs) do
"2019-12-13T14:04:22.123456Z Log 1\n2019-12-13T14:04:23.123456Z Log 2\n" \
"2019-12-13T14:04:24.123456Z Log 3"
end
+ let(:raw_pods) do
+ JSON.parse([
+ kube_pod(name: pod_name)
+ ].to_json, object_class: OpenStruct)
+ end
subject { described_class.new(cluster, namespace, params: params) }
@@ -140,9 +145,9 @@ describe ::PodLogs::KubernetesService do
let(:expected_logs) do
[
- { message: "Log 1", timestamp: "2019-12-13T14:04:22.123456Z" },
- { message: "Log 2", timestamp: "2019-12-13T14:04:23.123456Z" },
- { message: "Log 3", timestamp: "2019-12-13T14:04:24.123456Z" }
+ { message: "Log 1", pod: 'pod-1', timestamp: "2019-12-13T14:04:22.123456Z" },
+ { message: "Log 2", pod: 'pod-1', timestamp: "2019-12-13T14:04:23.123456Z" },
+ { message: "Log 3", pod: 'pod-1', timestamp: "2019-12-13T14:04:24.123456Z" }
]
end
@@ -163,4 +168,98 @@ describe ::PodLogs::KubernetesService do
end
end
end
+
+ describe '#check_pod_name' do
+ it 'returns success if pod_name was specified' do
+ result = subject.send(:check_pod_name, pod_name: pod_name, pods: [pod_name])
+
+ expect(result[:status]).to eq(:success)
+ expect(result[:pod_name]).to eq(pod_name)
+ end
+
+ it 'returns success if pod_name was not specified but there are pods' do
+ result = subject.send(:check_pod_name, pod_name: nil, pods: [pod_name])
+
+ expect(result[:status]).to eq(:success)
+ expect(result[:pod_name]).to eq(pod_name)
+ end
+
+ it 'returns error if pod_name was not specified and there are no pods' do
+ result = subject.send(:check_pod_name, pod_name: nil, pods: [])
+
+ expect(result[:status]).to eq(:error)
+ expect(result[:message]).to eq('No pods available')
+ end
+
+ it 'returns error if pod_name was specified but does not exist' do
+ result = subject.send(:check_pod_name, pod_name: 'another_pod', pods: [pod_name])
+
+ expect(result[:status]).to eq(:error)
+ expect(result[:message]).to eq('Pod does not exist')
+ end
+
+ it 'returns error if pod_name is too long' do
+ result = subject.send(:check_pod_name, pod_name: "a very long string." * 15, pods: [pod_name])
+
+ expect(result[:status]).to eq(:error)
+ expect(result[:message]).to eq('pod_name cannot be larger than 253 chars')
+ end
+ end
+
+ describe '#check_container_name' do
+ it 'returns success if container_name was specified' do
+ result = subject.send(:check_container_name,
+ container_name: container_name,
+ pod_name: pod_name,
+ raw_pods: raw_pods
+ )
+
+ expect(result[:status]).to eq(:success)
+ expect(result[:container_name]).to eq(container_name)
+ end
+
+ it 'returns success if container_name was not specified and there are containers' do
+ result = subject.send(:check_container_name,
+ pod_name: pod_name,
+ raw_pods: raw_pods
+ )
+
+ expect(result[:status]).to eq(:success)
+ expect(result[:container_name]).to eq(container_name)
+ end
+
+ it 'returns error if container_name was not specified and there are no containers on the pod' do
+ raw_pods.first.spec.containers = []
+
+ result = subject.send(:check_container_name,
+ pod_name: pod_name,
+ raw_pods: raw_pods
+ )
+
+ expect(result[:status]).to eq(:error)
+ expect(result[:message]).to eq('No containers available')
+ end
+
+ it 'returns error if container_name was specified but does not exist' do
+ result = subject.send(:check_container_name,
+ container_name: 'foo',
+ pod_name: pod_name,
+ raw_pods: raw_pods
+ )
+
+ expect(result[:status]).to eq(:error)
+ expect(result[:message]).to eq('Container does not exist')
+ end
+
+ it 'returns error if container_name is too long' do
+ result = subject.send(:check_container_name,
+ container_name: "a very long string." * 15,
+ pod_name: pod_name,
+ raw_pods: raw_pods
+ )
+
+ expect(result[:status]).to eq(:error)
+ expect(result[:message]).to eq('container_name cannot be larger than 253 chars')
+ end
+ end
end