Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2020-02-13 21:09:00 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2020-02-13 21:09:00 +0300
commite4dffdfe364af6c72dcb6b4671cb39a24e8e100c (patch)
tree6428a1c3472b14396645dcb280b219dbc0420c66
parent0ab47b994caa80c5587f33dc818626b66cfdafe2 (diff)
Add latest changes from gitlab-org/gitlab@master
-rw-r--r--app/models/ci/pipeline.rb6
-rw-r--r--app/models/ci/pipeline_enums.rb11
-rw-r--r--app/models/lfs_objects_project.rb2
-rw-r--r--app/workers/build_finished_worker.rb2
-rw-r--r--app/workers/expire_pipeline_cache_worker.rb2
-rw-r--r--changelogs/unreleased/37335-conan-name-validation-fixes.yml5
-rw-r--r--config/gitlab.yml.example5
-rw-r--r--config/initializers/1_settings.rb3
-rw-r--r--db/migrate/20200207182131_replace_conan_metadata_index.rb25
-rw-r--r--db/schema.rb2
-rw-r--r--doc/development/testing_guide/frontend_testing.md19
-rw-r--r--doc/user/project/clusters/img/kubernetes_pod_logs_v12_5.pngbin183707 -> 0 bytes
-rw-r--r--doc/user/project/clusters/img/kubernetes_pod_logs_v12_8.pngbin0 -> 152582 bytes
-rw-r--r--doc/user/project/clusters/kubernetes_pod_logs.md13
-rw-r--r--doc/user/project/integrations/img/download_as_csv.pngbin33801 -> 0 bytes
-rw-r--r--doc/user/project/integrations/img/panel_context_menu_v12_8.pngbin0 -> 25884 bytes
-rw-r--r--doc/user/project/integrations/prometheus.md23
-rw-r--r--lib/gitlab/looping_batcher.rb99
-rw-r--r--qa/qa/page/component/ci_badge_link.rb23
-rw-r--r--qa/qa/page/project/job/show.rb2
-rw-r--r--qa/qa/specs/features/browser_ui/4_verify/pipeline/create_and_process_pipeline_spec.rb20
-rw-r--r--spec/lib/gitlab/looping_batcher_spec.rb71
-rw-r--r--spec/models/ci/pipeline_spec.rb4
-rw-r--r--spec/workers/expire_pipeline_cache_worker_spec.rb15
24 files changed, 324 insertions, 28 deletions
diff --git a/app/models/ci/pipeline.rb b/app/models/ci/pipeline.rb
index a8685da3cd9..bc704457be1 100644
--- a/app/models/ci/pipeline.rb
+++ b/app/models/ci/pipeline.rb
@@ -185,7 +185,7 @@ module Ci
pipeline.run_after_commit do
PipelineHooksWorker.perform_async(pipeline.id)
- ExpirePipelineCacheWorker.perform_async(pipeline.id)
+ ExpirePipelineCacheWorker.perform_async(pipeline.id) if pipeline.cacheable?
end
end
@@ -902,6 +902,10 @@ module Ci
statuses.latest.success.where(name: names).pluck(:id)
end
+ def cacheable?
+ Ci::PipelineEnums.ci_config_sources.key?(config_source.to_sym)
+ end
+
private
def pipeline_data
diff --git a/app/models/ci/pipeline_enums.rb b/app/models/ci/pipeline_enums.rb
index fde169d2f03..7e203cb67c4 100644
--- a/app/models/ci/pipeline_enums.rb
+++ b/app/models/ci/pipeline_enums.rb
@@ -46,13 +46,18 @@ module Ci
}
end
- def self.ci_config_sources_values
- config_sources.values_at(
+ def self.ci_config_sources
+ config_sources.slice(
:unknown_source,
:repository_source,
:auto_devops_source,
:remote_source,
- :external_project_source)
+ :external_project_source
+ )
+ end
+
+ def self.ci_config_sources_values
+ ci_config_sources.values
end
end
end
diff --git a/app/models/lfs_objects_project.rb b/app/models/lfs_objects_project.rb
index e45c56b6394..68ef84223c5 100644
--- a/app/models/lfs_objects_project.rb
+++ b/app/models/lfs_objects_project.rb
@@ -16,6 +16,8 @@ class LfsObjectsProject < ApplicationRecord
design: 2 ## EE-specific
}
+ scope :project_id_in, ->(ids) { where(project_id: ids) }
+
private
def update_project_statistics
diff --git a/app/workers/build_finished_worker.rb b/app/workers/build_finished_worker.rb
index e61f37ddce1..77ce0923307 100644
--- a/app/workers/build_finished_worker.rb
+++ b/app/workers/build_finished_worker.rb
@@ -32,7 +32,7 @@ class BuildFinishedWorker
# We execute these async as these are independent operations.
BuildHooksWorker.perform_async(build.id)
ArchiveTraceWorker.perform_async(build.id)
- ExpirePipelineCacheWorker.perform_async(build.pipeline_id)
+ ExpirePipelineCacheWorker.perform_async(build.pipeline_id) if build.pipeline.cacheable?
ChatNotificationWorker.perform_async(build.id) if build.pipeline.chat?
end
end
diff --git a/app/workers/expire_pipeline_cache_worker.rb b/app/workers/expire_pipeline_cache_worker.rb
index ab57c59ffda..1d204e0a19e 100644
--- a/app/workers/expire_pipeline_cache_worker.rb
+++ b/app/workers/expire_pipeline_cache_worker.rb
@@ -11,7 +11,7 @@ class ExpirePipelineCacheWorker
# rubocop: disable CodeReuse/ActiveRecord
def perform(pipeline_id)
pipeline = Ci::Pipeline.find_by(id: pipeline_id)
- return unless pipeline
+ return unless pipeline&.cacheable?
Ci::ExpirePipelineCacheService.new.execute(pipeline)
end
diff --git a/changelogs/unreleased/37335-conan-name-validation-fixes.yml b/changelogs/unreleased/37335-conan-name-validation-fixes.yml
new file mode 100644
index 00000000000..42d2d66108d
--- /dev/null
+++ b/changelogs/unreleased/37335-conan-name-validation-fixes.yml
@@ -0,0 +1,5 @@
+---
+title: Conan packages are validated based on full recipe instead of name/version alone
+merge_request: 24692
+author:
+type: changed
diff --git a/config/gitlab.yml.example b/config/gitlab.yml.example
index 550973e19f7..9515ebaea62 100644
--- a/config/gitlab.yml.example
+++ b/config/gitlab.yml.example
@@ -432,6 +432,11 @@ production: &base
geo_repository_sync_worker:
cron: "*/1 * * * *"
+ # GitLab Geo registry backfill worker
+ # NOTE: This will only take effect if Geo is enabled (secondary nodes only)
+ geo_secondary_registry_consistency_worker:
+ cron: "* * * * *"
+
# GitLab Geo file download dispatch worker
# NOTE: This will only take effect if Geo is enabled (secondary nodes only)
geo_file_download_dispatch_worker:
diff --git a/config/initializers/1_settings.rb b/config/initializers/1_settings.rb
index a6fbb8608b3..76391983e6e 100644
--- a/config/initializers/1_settings.rb
+++ b/config/initializers/1_settings.rb
@@ -498,6 +498,9 @@ Gitlab.ee do
Settings.cron_jobs['geo_repository_sync_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_repository_sync_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_repository_sync_worker']['job_class'] ||= 'Geo::RepositorySyncWorker'
+ Settings.cron_jobs['geo_secondary_registry_consistency_worker'] ||= Settingslogic.new({})
+ Settings.cron_jobs['geo_secondary_registry_consistency_worker']['cron'] ||= '* * * * *'
+ Settings.cron_jobs['geo_secondary_registry_consistency_worker']['job_class'] ||= 'Geo::Secondary::RegistryConsistencyWorker'
Settings.cron_jobs['geo_repository_verification_primary_batch_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_repository_verification_primary_batch_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_repository_verification_primary_batch_worker']['job_class'] ||= 'Geo::RepositoryVerification::Primary::BatchWorker'
diff --git a/db/migrate/20200207182131_replace_conan_metadata_index.rb b/db/migrate/20200207182131_replace_conan_metadata_index.rb
new file mode 100644
index 00000000000..4f55a2974d8
--- /dev/null
+++ b/db/migrate/20200207182131_replace_conan_metadata_index.rb
@@ -0,0 +1,25 @@
+# frozen_string_literal: true
+
+class ReplaceConanMetadataIndex < ActiveRecord::Migration[5.2]
+ include Gitlab::Database::MigrationHelpers
+
+ DOWNTIME = false
+ OLD_INDEX = 'index_packages_conan_metadata_on_package_id'
+ NEW_INDEX = 'index_packages_conan_metadata_on_package_id_username_channel'
+
+ disable_ddl_transaction!
+
+ def up
+ add_concurrent_index :packages_conan_metadata,
+ [:package_id, :package_username, :package_channel],
+ unique: true, name: NEW_INDEX
+
+ remove_concurrent_index_by_name :packages_conan_metadata, OLD_INDEX
+ end
+
+ def down
+ add_concurrent_index :packages_conan_metadata, :package_id, name: OLD_INDEX
+
+ remove_concurrent_index_by_name :packages_conan_metadata, NEW_INDEX
+ end
+end
diff --git a/db/schema.rb b/db/schema.rb
index 2ab9d456531..2fd7341c4d9 100644
--- a/db/schema.rb
+++ b/db/schema.rb
@@ -2958,7 +2958,7 @@ ActiveRecord::Schema.define(version: 2020_02_12_052620) do
t.datetime_with_timezone "updated_at", null: false
t.string "package_username", limit: 255, null: false
t.string "package_channel", limit: 255, null: false
- t.index ["package_id"], name: "index_packages_conan_metadata_on_package_id", unique: true
+ t.index ["package_id", "package_username", "package_channel"], name: "index_packages_conan_metadata_on_package_id_username_channel", unique: true
end
create_table "packages_dependencies", force: :cascade do |t|
diff --git a/doc/development/testing_guide/frontend_testing.md b/doc/development/testing_guide/frontend_testing.md
index 9dc06a4b8b8..26357d4fdfd 100644
--- a/doc/development/testing_guide/frontend_testing.md
+++ b/doc/development/testing_guide/frontend_testing.md
@@ -202,6 +202,25 @@ For example, it's better to use the generated markup to trigger a button click a
Following you'll find some general common practices you will find as part of our testsuite. Should you stumble over something not following this guide, ideally fix it right away. 🎉
+### How to query DOM elements
+
+When it comes to querying DOM elements in your tests, it is best to uniquely target the element, without adding additional attributes specifically for testing purposes. Sometimes this cannot be done feasibly. In these cases, adding test attributes to simplify the selectors might be the best option.
+
+Preferentially, in component testing with `@vue/test-utils`, you should query for child components using the component itself. Otherwise, try to use an existing attribute like `name` or a Vue `ref` (if using `@vue/test-utils`):
+
+```javascript
+it('exists', () => {
+ wrapper.find(FooComponent);
+ wrapper.find('input[name=foo]');
+ wrapper.find({ ref: 'foo'});
+ wrapper.find('.js-foo');
+});
+```
+
+It is not recommended that you add `.js-*` classes just for testing purposes. Only do this if there are no other feasible options available.
+
+Do not use a `.qa-*` class or `data-qa-selector` attribute for any tests other than QA end-to-end testing.
+
### Naming unit tests
When writing describe test blocks to test specific functions/methods,
diff --git a/doc/user/project/clusters/img/kubernetes_pod_logs_v12_5.png b/doc/user/project/clusters/img/kubernetes_pod_logs_v12_5.png
deleted file mode 100644
index e54637e7218..00000000000
--- a/doc/user/project/clusters/img/kubernetes_pod_logs_v12_5.png
+++ /dev/null
Binary files differ
diff --git a/doc/user/project/clusters/img/kubernetes_pod_logs_v12_8.png b/doc/user/project/clusters/img/kubernetes_pod_logs_v12_8.png
new file mode 100644
index 00000000000..7be0cd01768
--- /dev/null
+++ b/doc/user/project/clusters/img/kubernetes_pod_logs_v12_8.png
Binary files differ
diff --git a/doc/user/project/clusters/kubernetes_pod_logs.md b/doc/user/project/clusters/kubernetes_pod_logs.md
index a6914a8715b..a36b712ae76 100644
--- a/doc/user/project/clusters/kubernetes_pod_logs.md
+++ b/doc/user/project/clusters/kubernetes_pod_logs.md
@@ -13,7 +13,7 @@ Everything you need to build, test, deploy, and run your app at scale.
[Kubernetes](https://kubernetes.io) pod logs can be viewed directly within GitLab.
-![Pod logs](img/kubernetes_pod_logs_v12_5.png)
+![Pod logs](img/kubernetes_pod_logs_v12_8.png)
## Requirements
@@ -50,14 +50,23 @@ The logs view will contain the last 500 lines for a pod, and has control to filt
- Pods.
- [From GitLab 12.4](https://gitlab.com/gitlab-org/gitlab/issues/5769), environments.
- [From GitLab 12.7](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/21656), [full text search](#full-text-search).
+- [From GitLab 12.8](https://gitlab.com/gitlab-org/gitlab/issues/197879), dates.
Support for pods with multiple containers is coming [in a future release](https://gitlab.com/gitlab-org/gitlab/issues/13404).
Support for historical data is coming [in a future release](https://gitlab.com/gitlab-org/gitlab/issues/196191).
+### Filter by date
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/197879) in GitLab 12.8.
+
+When you enable [Elastic Stack](../../clusters/applications.md#elastic-stack) on your cluster, you can filter by date.
+
+Click on "Show last" to see the available options.
+
### Full text search
-> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/21656) in GitLab 12.8.
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/21656) in GitLab 12.7.
When you enable [Elastic Stack](../../clusters/applications.md#elastic-stack) on your cluster,
you can search the content of your logs via a search bar.
diff --git a/doc/user/project/integrations/img/download_as_csv.png b/doc/user/project/integrations/img/download_as_csv.png
deleted file mode 100644
index 0ed5ab8db89..00000000000
--- a/doc/user/project/integrations/img/download_as_csv.png
+++ /dev/null
Binary files differ
diff --git a/doc/user/project/integrations/img/panel_context_menu_v12_8.png b/doc/user/project/integrations/img/panel_context_menu_v12_8.png
new file mode 100644
index 00000000000..86d2b26b713
--- /dev/null
+++ b/doc/user/project/integrations/img/panel_context_menu_v12_8.png
Binary files differ
diff --git a/doc/user/project/integrations/prometheus.md b/doc/user/project/integrations/prometheus.md
index 66c128314bb..7e3ac38b627 100644
--- a/doc/user/project/integrations/prometheus.md
+++ b/doc/user/project/integrations/prometheus.md
@@ -457,12 +457,31 @@ Note the following properties:
When viewing a custom dashboard of a project, you can view the original
`.yml` file by clicking on **Edit dashboard** button.
+### Chart Context Menu
+
+From each of the panels in the dashboard, you can access the context menu by clicking the **{ellipsis_v}** **More actions** dropdown box above the upper right corner of the panel to take actions related to the chart's data.
+
+![Context Menu](img/panel_context_menu_v12_8.png)
+
+The options are:
+
+- [View logs](#view-pod-logs-ultimate)
+- [Download CSV](#downloading-data-as-csv)
+- [Generate link to chart](#embedding-gitlab-managed-kubernetes-metrics)
+- [Alerts](#setting-up-alerts-for-prometheus-metrics-ultimate)
+
+### View Pod Logs **(ULTIMATE)**
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/122013) in GitLab 12.8.
+
+If you have [Kubernetes Pod Logs](../clusters/kubernetes_pod_logs.md) enabled, you can navigate from the charts in the dashboard to view Pod Logs by clicking on the context menu in the upper-right corner.
+
+If you use the **Timeline zoom** function at the bottom of the chart, logs will narrow down to the time range you selected.
+
### Downloading data as CSV
Data from Prometheus charts on the metrics dashboard can be downloaded as CSV.
-![Downloading as CSV](img/download_as_csv.png)
-
### Setting up alerts for Prometheus metrics **(ULTIMATE)**
#### Managed Prometheus instances
diff --git a/lib/gitlab/looping_batcher.rb b/lib/gitlab/looping_batcher.rb
new file mode 100644
index 00000000000..adf0aeda506
--- /dev/null
+++ b/lib/gitlab/looping_batcher.rb
@@ -0,0 +1,99 @@
+# frozen_string_literal: true
+
+module Gitlab
+ # Returns an ID range within a table so it can be iterated over. Repeats from
+ # the beginning after it reaches the end.
+ #
+ # Used by Geo in particular to iterate over a replicable and its registry
+ # table.
+ #
+ # Tracks a cursor for each table, by "key". If the table is smaller than
+ # batch_size, then a range for the whole table is returned on every call.
+ class LoopingBatcher
+ # @param [Class] model_class the class of the table to iterate on
+ # @param [String] key to identify the cursor. Note, cursor is already unique
+ # per table.
+ # @param [Integer] batch_size to limit the number of records in a batch
+ def initialize(model_class, key:, batch_size: 1000)
+ @model_class = model_class
+ @key = key
+ @batch_size = batch_size
+ end
+
+ # @return [Range] a range of IDs. `nil` if 0 records at or after the cursor.
+ def next_range!
+ return unless @model_class.any?
+
+ batch_first_id = cursor_id
+
+ batch_last_id = get_batch_last_id(batch_first_id)
+ return unless batch_last_id
+
+ batch_first_id..batch_last_id
+ end
+
+ private
+
+ # @private
+ #
+ # Get the last ID of the batch. Increment the cursor or reset it if at end.
+ #
+ # @param [Integer] batch_first_id the first ID of the batch
+ # @return [Integer] batch_last_id the last ID of the batch (not the table)
+ def get_batch_last_id(batch_first_id)
+ batch_last_id, more_rows = run_query(@model_class.table_name, @model_class.primary_key, batch_first_id, @batch_size)
+
+ if more_rows
+ increment_batch(batch_last_id)
+ else
+ reset if batch_first_id > 1
+ end
+
+ batch_last_id
+ end
+
+ def run_query(table, primary_key, batch_first_id, batch_size)
+ sql = <<~SQL
+ SELECT MAX(batch.id) AS batch_last_id,
+ EXISTS (
+ SELECT #{primary_key}
+ FROM #{table}
+ WHERE #{primary_key} > MAX(batch.id)
+ ) AS more_rows
+ FROM (
+ SELECT #{primary_key}
+ FROM #{table}
+ WHERE #{primary_key} >= #{batch_first_id}
+ ORDER BY #{primary_key}
+ LIMIT #{batch_size}) AS batch;
+ SQL
+
+ result = ActiveRecord::Base.connection.exec_query(sql).first
+
+ [result["batch_last_id"], result["more_rows"]]
+ end
+
+ def reset
+ set_cursor_id(1)
+ end
+
+ def increment_batch(batch_last_id)
+ set_cursor_id(batch_last_id + 1)
+ end
+
+ # @private
+ #
+ # @return [Integer] the cursor ID, or 1 if it is not set
+ def cursor_id
+ Rails.cache.fetch("#{cache_key}:cursor_id") || 1
+ end
+
+ def set_cursor_id(id)
+ Rails.cache.write("#{cache_key}:cursor_id", id)
+ end
+
+ def cache_key
+ @cache_key ||= "#{self.class.name.parameterize}:#{@model_class.name.parameterize}:#{@key}:cursor_id"
+ end
+ end
+end
diff --git a/qa/qa/page/component/ci_badge_link.rb b/qa/qa/page/component/ci_badge_link.rb
index d3e44fd867d..ef9bfa560ce 100644
--- a/qa/qa/page/component/ci_badge_link.rb
+++ b/qa/qa/page/component/ci_badge_link.rb
@@ -5,7 +5,22 @@ module QA
module Component
module CiBadgeLink
COMPLETED_STATUSES = %w[passed failed canceled blocked skipped manual].freeze # excludes created, pending, running
- PASSED_STATUS = 'passed'.freeze
+ INCOMPLETE_STATUSES = %w[pending created running].freeze
+
+ # e.g. def passed?(timeout: nil); status_badge == 'passed'; end
+ COMPLETED_STATUSES.map do |status|
+ define_method "#{status}?" do |timeout: nil|
+ timeout ? completed?(timeout: timeout) : completed?
+ status_badge == status
+ end
+ end
+
+ # e.g. def pending?; status_badge == 'pending'; end
+ INCOMPLETE_STATUSES.map do |status|
+ define_method "#{status}?" do
+ status_badge == status
+ end
+ end
def self.included(base)
base.view 'app/assets/javascripts/vue_shared/components/ci_badge_link.vue' do
@@ -17,12 +32,6 @@ module QA
find_element(:status_badge).text
end
- def successful?(timeout: 60)
- raise "Timed out waiting for the status to be a valid completed state" unless completed?(timeout: timeout)
-
- status_badge == PASSED_STATUS
- end
-
private
def completed?(timeout: 60)
diff --git a/qa/qa/page/project/job/show.rb b/qa/qa/page/project/job/show.rb
index d673efd1970..26db2f20c1b 100644
--- a/qa/qa/page/project/job/show.rb
+++ b/qa/qa/page/project/job/show.rb
@@ -21,7 +21,7 @@ module QA::Page
raise "Timed out waiting for the build trace to load" unless loaded?
raise "Timed out waiting for the status to be a valid completed state" unless completed?(timeout: timeout)
- status_badge == PASSED_STATUS
+ passed?
end
# Reminder: You may wish to wait for a particular job status before checking output
diff --git a/qa/qa/specs/features/browser_ui/4_verify/pipeline/create_and_process_pipeline_spec.rb b/qa/qa/specs/features/browser_ui/4_verify/pipeline/create_and_process_pipeline_spec.rb
index c036f188ea2..98c42f5803d 100644
--- a/qa/qa/specs/features/browser_ui/4_verify/pipeline/create_and_process_pipeline_spec.rb
+++ b/qa/qa/specs/features/browser_ui/4_verify/pipeline/create_and_process_pipeline_spec.rb
@@ -68,12 +68,20 @@ module QA
Page::Project::Menu.perform(&:click_ci_cd_pipelines)
Page::Project::Pipeline::Index.perform(&:click_on_latest_pipeline)
- Page::Project::Pipeline::Show.perform do |pipeline|
- expect(pipeline).to be_running(wait: max_wait)
- expect(pipeline).to have_build('test-success', status: :success, wait: max_wait)
- expect(pipeline).to have_build('test-failure', status: :failed, wait: max_wait)
- expect(pipeline).to have_build('test-tags', status: :pending, wait: max_wait)
- expect(pipeline).to have_build('test-artifacts', status: :success, wait: max_wait)
+ {
+ 'test-success': :passed,
+ 'test-failure': :failed,
+ 'test-tags': :pending,
+ 'test-artifacts': :passed
+ }.each do |job, status|
+ Page::Project::Pipeline::Show.perform do |pipeline|
+ pipeline.click_job(job)
+ end
+
+ Page::Project::Job::Show.perform do |show|
+ expect(show).to public_send("be_#{status}")
+ show.click_element(:pipeline_path, Page::Project::Pipeline::Show)
+ end
end
end
end
diff --git a/spec/lib/gitlab/looping_batcher_spec.rb b/spec/lib/gitlab/looping_batcher_spec.rb
new file mode 100644
index 00000000000..b03e969c1e7
--- /dev/null
+++ b/spec/lib/gitlab/looping_batcher_spec.rb
@@ -0,0 +1,71 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Gitlab::LoopingBatcher, :use_clean_rails_memory_store_caching do
+ describe '#next_range!' do
+ let(:model_class) { LfsObject }
+ let(:key) { 'looping_batcher_spec' }
+ let(:batch_size) { 2 }
+
+ subject { described_class.new(model_class, key: key, batch_size: batch_size).next_range! }
+
+ context 'when there are no records' do
+ it { is_expected.to be_nil }
+ end
+
+ context 'when there are records' do
+ let!(:records) { create_list(model_class.underscore, 3) }
+
+ context 'when it has never been called before' do
+ it { is_expected.to be_a Range }
+
+ it 'starts from the beginning' do
+ expect(subject.first).to eq(1)
+ end
+
+ it 'ends at a full batch' do
+ expect(subject.last).to eq(records.second.id)
+ end
+
+ context 'when the batch size is greater than the number of records' do
+ let(:batch_size) { 5 }
+
+ it 'ends at the last ID' do
+ expect(subject.last).to eq(records.last.id)
+ end
+ end
+ end
+
+ context 'when it was called before' do
+ context 'when the previous batch included the end of the table' do
+ before do
+ described_class.new(model_class, key: key, batch_size: model_class.count).next_range!
+ end
+
+ it 'starts from the beginning' do
+ expect(subject).to eq(1..records.second.id)
+ end
+ end
+
+ context 'when the previous batch did not include the end of the table' do
+ before do
+ described_class.new(model_class, key: key, batch_size: model_class.count - 1).next_range!
+ end
+
+ it 'starts after the previous batch' do
+ expect(subject).to eq(records.last.id..records.last.id)
+ end
+ end
+
+ context 'if cache is cleared' do
+ it 'starts from the beginning' do
+ Rails.cache.clear
+
+ expect(subject).to eq(1..records.second.id)
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/spec/models/ci/pipeline_spec.rb b/spec/models/ci/pipeline_spec.rb
index 1515da7eeca..86c3628216e 100644
--- a/spec/models/ci/pipeline_spec.rb
+++ b/spec/models/ci/pipeline_spec.rb
@@ -1117,6 +1117,10 @@ describe Ci::Pipeline, :mailer do
end
describe 'pipeline caching' do
+ before do
+ pipeline.config_source = 'repository_source'
+ end
+
it 'performs ExpirePipelinesCacheWorker' do
expect(ExpirePipelineCacheWorker).to receive(:perform_async).with(pipeline.id)
diff --git a/spec/workers/expire_pipeline_cache_worker_spec.rb b/spec/workers/expire_pipeline_cache_worker_spec.rb
index e162a227a66..8d898ffc13e 100644
--- a/spec/workers/expire_pipeline_cache_worker_spec.rb
+++ b/spec/workers/expire_pipeline_cache_worker_spec.rb
@@ -3,9 +3,9 @@
require 'spec_helper'
describe ExpirePipelineCacheWorker do
- let(:user) { create(:user) }
- let(:project) { create(:project) }
- let(:pipeline) { create(:ci_pipeline, project: project) }
+ let_it_be(:user) { create(:user) }
+ let_it_be(:project) { create(:project) }
+ let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
subject { described_class.new }
@@ -22,5 +22,14 @@ describe ExpirePipelineCacheWorker do
subject.perform(617748)
end
+
+ it "doesn't do anything if the pipeline cannot be cached" do
+ allow_any_instance_of(Ci::Pipeline).to receive(:cacheable?).and_return(false)
+
+ expect_any_instance_of(Ci::ExpirePipelineCacheService).not_to receive(:execute)
+ expect_any_instance_of(Gitlab::EtagCaching::Store).not_to receive(:touch)
+
+ subject.perform(pipeline.id)
+ end
end
end