Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/spec
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2020-03-24 12:09:25 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2020-03-24 12:09:25 +0300
commit6f7881ee9dcec34141a8f34fc814b56b366d2b48 (patch)
tree25f72a06874b32b1049b79a9d7f4f1b7bca43b9b /spec
parent8c8bf44fa64f98114f7439f751c92d59a44b3218 (diff)
Add latest changes from gitlab-org/gitlab@master
Diffstat (limited to 'spec')
-rw-r--r--spec/factories/user_canonical_emails.rb8
-rw-r--r--spec/frontend/notes/components/notes_app_spec.js (renamed from spec/frontend/notes/components/note_app_spec.js)0
-rw-r--r--spec/frontend/u2f/authenticate_spec.js (renamed from spec/javascripts/u2f/authenticate_spec.js)59
-rw-r--r--spec/frontend/u2f/mock_u2f_device.js (renamed from spec/javascripts/u2f/mock_u2f_device.js)0
-rw-r--r--spec/frontend/u2f/register_spec.js (renamed from spec/javascripts/u2f/register_spec.js)48
-rw-r--r--spec/models/clusters/applications/ingress_spec.rb58
-rw-r--r--spec/models/clusters/applications/prometheus_spec.rb45
-rw-r--r--spec/models/project_spec.rb18
-rw-r--r--spec/models/user_canonical_email_spec.rb20
-rw-r--r--spec/services/clusters/applications/check_upgrade_progress_service_spec.rb94
-rw-r--r--spec/services/clusters/applications/prometheus_config_service_spec.rb158
-rw-r--r--spec/services/clusters/applications/prometheus_update_service_spec.rb92
-rw-r--r--spec/services/clusters/applications/schedule_update_service_spec.rb37
-rw-r--r--spec/services/users/build_service_spec.rb8
-rw-r--r--spec/services/users/create_service_spec.rb3
-rw-r--r--spec/services/users/update_canonical_email_service_spec.rb116
-rw-r--r--spec/services/users/update_service_spec.rb26
-rw-r--r--spec/workers/cluster_update_app_worker_spec.rb98
-rw-r--r--spec/workers/cluster_wait_for_app_update_worker_spec.rb27
19 files changed, 854 insertions, 61 deletions
diff --git a/spec/factories/user_canonical_emails.rb b/spec/factories/user_canonical_emails.rb
new file mode 100644
index 00000000000..0161d25c525
--- /dev/null
+++ b/spec/factories/user_canonical_emails.rb
@@ -0,0 +1,8 @@
+# frozen_string_literal: true
+
+FactoryBot.define do
+ factory :user_canonical_email do
+ user
+ canonical_email { user.email }
+ end
+end
diff --git a/spec/frontend/notes/components/note_app_spec.js b/spec/frontend/notes/components/notes_app_spec.js
index 2d0cca18647..2d0cca18647 100644
--- a/spec/frontend/notes/components/note_app_spec.js
+++ b/spec/frontend/notes/components/notes_app_spec.js
diff --git a/spec/javascripts/u2f/authenticate_spec.js b/spec/frontend/u2f/authenticate_spec.js
index 8f9cb270729..1d39c4857ae 100644
--- a/spec/javascripts/u2f/authenticate_spec.js
+++ b/spec/frontend/u2f/authenticate_spec.js
@@ -3,15 +3,19 @@ import U2FAuthenticate from '~/u2f/authenticate';
import 'vendor/u2f';
import MockU2FDevice from './mock_u2f_device';
-describe('U2FAuthenticate', function() {
+describe('U2FAuthenticate', () => {
+ let u2fDevice;
+ let container;
+ let component;
+
preloadFixtures('u2f/authenticate.html');
beforeEach(() => {
loadFixtures('u2f/authenticate.html');
- this.u2fDevice = new MockU2FDevice();
- this.container = $('#js-authenticate-u2f');
- this.component = new U2FAuthenticate(
- this.container,
+ u2fDevice = new MockU2FDevice();
+ container = $('#js-authenticate-u2f');
+ component = new U2FAuthenticate(
+ container,
'#js-login-u2f-form',
{
sign_requests: [],
@@ -22,21 +26,23 @@ describe('U2FAuthenticate', function() {
});
describe('with u2f unavailable', () => {
+ let oldu2f;
+
beforeEach(() => {
- spyOn(this.component, 'switchToFallbackUI');
- this.oldu2f = window.u2f;
+ jest.spyOn(component, 'switchToFallbackUI').mockImplementation(() => {});
+ oldu2f = window.u2f;
window.u2f = null;
});
afterEach(() => {
- window.u2f = this.oldu2f;
+ window.u2f = oldu2f;
});
it('falls back to normal 2fa', done => {
- this.component
+ component
.start()
.then(() => {
- expect(this.component.switchToFallbackUI).toHaveBeenCalled();
+ expect(component.switchToFallbackUI).toHaveBeenCalled();
done();
})
.catch(done.fail);
@@ -46,54 +52,55 @@ describe('U2FAuthenticate', function() {
describe('with u2f available', () => {
beforeEach(done => {
// bypass automatic form submission within renderAuthenticated
- spyOn(this.component, 'renderAuthenticated').and.returnValue(true);
- this.u2fDevice = new MockU2FDevice();
+ jest.spyOn(component, 'renderAuthenticated').mockReturnValue(true);
+ u2fDevice = new MockU2FDevice();
- this.component
+ component
.start()
.then(done)
.catch(done.fail);
});
it('allows authenticating via a U2F device', () => {
- const inProgressMessage = this.container.find('p');
+ const inProgressMessage = container.find('p');
expect(inProgressMessage.text()).toContain('Trying to communicate with your device');
- this.u2fDevice.respondToAuthenticateRequest({
+ u2fDevice.respondToAuthenticateRequest({
deviceData: 'this is data from the device',
});
- expect(this.component.renderAuthenticated).toHaveBeenCalledWith(
+ expect(component.renderAuthenticated).toHaveBeenCalledWith(
'{"deviceData":"this is data from the device"}',
);
});
describe('errors', () => {
it('displays an error message', () => {
- const setupButton = this.container.find('#js-login-u2f-device');
+ const setupButton = container.find('#js-login-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToAuthenticateRequest({
+ u2fDevice.respondToAuthenticateRequest({
errorCode: 'error!',
});
- const errorMessage = this.container.find('p');
+ const errorMessage = container.find('p');
expect(errorMessage.text()).toContain('There was a problem communicating with your device');
});
- return it('allows retrying authentication after an error', () => {
- let setupButton = this.container.find('#js-login-u2f-device');
+
+ it('allows retrying authentication after an error', () => {
+ let setupButton = container.find('#js-login-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToAuthenticateRequest({
+ u2fDevice.respondToAuthenticateRequest({
errorCode: 'error!',
});
- const retryButton = this.container.find('#js-u2f-try-again');
+ const retryButton = container.find('#js-u2f-try-again');
retryButton.trigger('click');
- setupButton = this.container.find('#js-login-u2f-device');
+ setupButton = container.find('#js-login-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToAuthenticateRequest({
+ u2fDevice.respondToAuthenticateRequest({
deviceData: 'this is data from the device',
});
- expect(this.component.renderAuthenticated).toHaveBeenCalledWith(
+ expect(component.renderAuthenticated).toHaveBeenCalledWith(
'{"deviceData":"this is data from the device"}',
);
});
diff --git a/spec/javascripts/u2f/mock_u2f_device.js b/spec/frontend/u2f/mock_u2f_device.js
index ec8425a4e3e..ec8425a4e3e 100644
--- a/spec/javascripts/u2f/mock_u2f_device.js
+++ b/spec/frontend/u2f/mock_u2f_device.js
diff --git a/spec/javascripts/u2f/register_spec.js b/spec/frontend/u2f/register_spec.js
index a75ceca9f4c..a4395a2123a 100644
--- a/spec/javascripts/u2f/register_spec.js
+++ b/spec/frontend/u2f/register_spec.js
@@ -3,33 +3,37 @@ import U2FRegister from '~/u2f/register';
import 'vendor/u2f';
import MockU2FDevice from './mock_u2f_device';
-describe('U2FRegister', function() {
+describe('U2FRegister', () => {
+ let u2fDevice;
+ let container;
+ let component;
+
preloadFixtures('u2f/register.html');
beforeEach(done => {
loadFixtures('u2f/register.html');
- this.u2fDevice = new MockU2FDevice();
- this.container = $('#js-register-u2f');
- this.component = new U2FRegister(this.container, $('#js-register-u2f-templates'), {}, 'token');
- this.component
+ u2fDevice = new MockU2FDevice();
+ container = $('#js-register-u2f');
+ component = new U2FRegister(container, $('#js-register-u2f-templates'), {}, 'token');
+ component
.start()
.then(done)
.catch(done.fail);
});
it('allows registering a U2F device', () => {
- const setupButton = this.container.find('#js-setup-u2f-device');
+ const setupButton = container.find('#js-setup-u2f-device');
expect(setupButton.text()).toBe('Set up new U2F device');
setupButton.trigger('click');
- const inProgressMessage = this.container.children('p');
+ const inProgressMessage = container.children('p');
expect(inProgressMessage.text()).toContain('Trying to communicate with your device');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
deviceData: 'this is data from the device',
});
- const registeredMessage = this.container.find('p');
- const deviceResponse = this.container.find('#js-device-response');
+ const registeredMessage = container.find('p');
+ const deviceResponse = container.find('#js-device-response');
expect(registeredMessage.text()).toContain('Your device was successfully set up!');
expect(deviceResponse.val()).toBe('{"deviceData":"this is data from the device"}');
@@ -37,41 +41,41 @@ describe('U2FRegister', function() {
describe('errors', () => {
it("doesn't allow the same device to be registered twice (for the same user", () => {
- const setupButton = this.container.find('#js-setup-u2f-device');
+ const setupButton = container.find('#js-setup-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
errorCode: 4,
});
- const errorMessage = this.container.find('p');
+ const errorMessage = container.find('p');
expect(errorMessage.text()).toContain('already been registered with us');
});
it('displays an error message for other errors', () => {
- const setupButton = this.container.find('#js-setup-u2f-device');
+ const setupButton = container.find('#js-setup-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
errorCode: 'error!',
});
- const errorMessage = this.container.find('p');
+ const errorMessage = container.find('p');
expect(errorMessage.text()).toContain('There was a problem communicating with your device');
});
it('allows retrying registration after an error', () => {
- let setupButton = this.container.find('#js-setup-u2f-device');
+ let setupButton = container.find('#js-setup-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
errorCode: 'error!',
});
- const retryButton = this.container.find('#U2FTryAgain');
+ const retryButton = container.find('#U2FTryAgain');
retryButton.trigger('click');
- setupButton = this.container.find('#js-setup-u2f-device');
+ setupButton = container.find('#js-setup-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
deviceData: 'this is data from the device',
});
- const registeredMessage = this.container.find('p');
+ const registeredMessage = container.find('p');
expect(registeredMessage.text()).toContain('Your device was successfully set up!');
});
diff --git a/spec/models/clusters/applications/ingress_spec.rb b/spec/models/clusters/applications/ingress_spec.rb
index ba5f48ce6b3..64d667f40f6 100644
--- a/spec/models/clusters/applications/ingress_spec.rb
+++ b/spec/models/clusters/applications/ingress_spec.rb
@@ -21,26 +21,60 @@ describe Clusters::Applications::Ingress do
describe '#can_uninstall?' do
subject { ingress.can_uninstall? }
- it 'returns true if external ip is set and no application exists' do
- ingress.external_ip = 'IP'
+ context 'with jupyter installed' do
+ before do
+ create(:clusters_applications_jupyter, :installed, cluster: ingress.cluster)
+ end
- is_expected.to be_truthy
- end
+ it 'returns false if external_ip_or_hostname? is true' do
+ ingress.external_ip = 'IP'
- it 'returns false if application_jupyter_nil_or_installable? is false' do
- create(:clusters_applications_jupyter, :installed, cluster: ingress.cluster)
+ is_expected.to be_falsey
+ end
- is_expected.to be_falsey
+ it 'returns false if external_ip_or_hostname? is false' do
+ is_expected.to be_falsey
+ end
end
- it 'returns false if application_elastic_stack_nil_or_installable? is false' do
- create(:clusters_applications_elastic_stack, :installed, cluster: ingress.cluster)
+ context 'with jupyter installable' do
+ before do
+ create(:clusters_applications_jupyter, :installable, cluster: ingress.cluster)
+ end
+
+ it 'returns true if external_ip_or_hostname? is true' do
+ ingress.external_ip = 'IP'
+
+ is_expected.to be_truthy
+ end
- is_expected.to be_falsey
+ it 'returns false if external_ip_or_hostname? is false' do
+ is_expected.to be_falsey
+ end
end
- it 'returns false if external_ip_or_hostname? is false' do
- is_expected.to be_falsey
+ context 'with jupyter nil' do
+ it 'returns false if external_ip_or_hostname? is false' do
+ is_expected.to be_falsey
+ end
+
+ context 'if external_ip_or_hostname? is true' do
+ context 'with IP' do
+ before do
+ ingress.external_ip = 'IP'
+ end
+
+ it { is_expected.to be_truthy }
+ end
+
+ context 'with hostname' do
+ before do
+ ingress.external_hostname = 'example.com'
+ end
+
+ it { is_expected.to be_truthy }
+ end
+ end
end
end
diff --git a/spec/models/clusters/applications/prometheus_spec.rb b/spec/models/clusters/applications/prometheus_spec.rb
index ecb87910d2d..ce341e67c14 100644
--- a/spec/models/clusters/applications/prometheus_spec.rb
+++ b/spec/models/clusters/applications/prometheus_spec.rb
@@ -39,6 +39,19 @@ describe Clusters::Applications::Prometheus do
end
end
+ describe 'transition to updating' do
+ let(:project) { create(:project) }
+ let(:cluster) { create(:cluster, projects: [project]) }
+
+ subject { create(:clusters_applications_prometheus, :installed, cluster: cluster) }
+
+ it 'sets last_update_started_at to now' do
+ Timecop.freeze do
+ expect { subject.make_updating }.to change { subject.reload.last_update_started_at }.to be_within(1.second).of(Time.now)
+ end
+ end
+ end
+
describe '#can_uninstall?' do
let(:prometheus) { create(:clusters_applications_prometheus) }
@@ -331,6 +344,38 @@ describe Clusters::Applications::Prometheus do
end
end
+ describe '#updated_since?' do
+ let(:cluster) { create(:cluster) }
+ let(:prometheus_app) { build(:clusters_applications_prometheus, cluster: cluster) }
+ let(:timestamp) { Time.now - 5.minutes }
+
+ around do |example|
+ Timecop.freeze { example.run }
+ end
+
+ before do
+ prometheus_app.last_update_started_at = Time.now
+ end
+
+ context 'when app does not have status failed' do
+ it 'returns true when last update started after the timestamp' do
+ expect(prometheus_app.updated_since?(timestamp)).to be true
+ end
+
+ it 'returns false when last update started before the timestamp' do
+ expect(prometheus_app.updated_since?(Time.now + 5.minutes)).to be false
+ end
+ end
+
+ context 'when app has status failed' do
+ it 'returns false when last update started after the timestamp' do
+ prometheus_app.status = 6
+
+ expect(prometheus_app.updated_since?(timestamp)).to be false
+ end
+ end
+ end
+
describe 'alert manager token' do
subject { create(:clusters_applications_prometheus) }
diff --git a/spec/models/project_spec.rb b/spec/models/project_spec.rb
index ceb6382eb6c..f0423937710 100644
--- a/spec/models/project_spec.rb
+++ b/spec/models/project_spec.rb
@@ -5938,6 +5938,24 @@ describe Project do
end
end
+ describe '#environments_for_scope' do
+ let_it_be(:project, reload: true) { create(:project) }
+
+ before do
+ create_list(:environment, 2, project: project)
+ end
+
+ it 'retrieves all project environments when using the * wildcard' do
+ expect(project.environments_for_scope("*")).to eq(project.environments)
+ end
+
+ it 'retrieves a specific project environment when using the name of that environment' do
+ environment = project.environments.first
+
+ expect(project.environments_for_scope(environment.name)).to eq([environment])
+ end
+ end
+
def finish_job(export_job)
export_job.start
export_job.finish
diff --git a/spec/models/user_canonical_email_spec.rb b/spec/models/user_canonical_email_spec.rb
new file mode 100644
index 00000000000..54a4e968033
--- /dev/null
+++ b/spec/models/user_canonical_email_spec.rb
@@ -0,0 +1,20 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe UserCanonicalEmail do
+ it { is_expected.to belong_to(:user) }
+
+ describe 'validations' do
+ describe 'canonical_email' do
+ it { is_expected.to validate_presence_of(:canonical_email) }
+
+ it 'validates email address', :aggregate_failures do
+ expect(build(:user_canonical_email, canonical_email: 'nonsense')).not_to be_valid
+ expect(build(:user_canonical_email, canonical_email: '@nonsense')).not_to be_valid
+ expect(build(:user_canonical_email, canonical_email: '@nonsense@')).not_to be_valid
+ expect(build(:user_canonical_email, canonical_email: 'nonsense@')).not_to be_valid
+ end
+ end
+ end
+end
diff --git a/spec/services/clusters/applications/check_upgrade_progress_service_spec.rb b/spec/services/clusters/applications/check_upgrade_progress_service_spec.rb
new file mode 100644
index 00000000000..c08b618fe6a
--- /dev/null
+++ b/spec/services/clusters/applications/check_upgrade_progress_service_spec.rb
@@ -0,0 +1,94 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Applications::CheckUpgradeProgressService do
+ RESCHEDULE_PHASES = ::Gitlab::Kubernetes::Pod::PHASES -
+ [::Gitlab::Kubernetes::Pod::SUCCEEDED, ::Gitlab::Kubernetes::Pod::FAILED, ::Gitlab].freeze
+
+ let(:application) { create(:clusters_applications_prometheus, :updating) }
+ let(:service) { described_class.new(application) }
+ let(:phase) { ::Gitlab::Kubernetes::Pod::UNKNOWN }
+ let(:errors) { nil }
+
+ shared_examples 'a terminated upgrade' do
+ it 'removes the POD' do
+ expect(service).to receive(:remove_pod).once
+
+ service.execute
+ end
+ end
+
+ shared_examples 'a not yet terminated upgrade' do |a_phase|
+ let(:phase) { a_phase }
+
+ context "when phase is #{a_phase}" do
+ context 'when not timed out' do
+ it 'reschedule a new check' do
+ expect(::ClusterWaitForAppUpdateWorker).to receive(:perform_in).once
+ expect(service).not_to receive(:remove_pod)
+
+ service.execute
+
+ expect(application).to be_updating
+ expect(application.status_reason).to be_nil
+ end
+ end
+
+ context 'when timed out' do
+ let(:application) { create(:clusters_applications_prometheus, :timed_out, :updating) }
+
+ it_behaves_like 'a terminated upgrade'
+
+ it 'make the application update errored' do
+ expect(::ClusterWaitForAppUpdateWorker).not_to receive(:perform_in)
+
+ service.execute
+
+ expect(application).to be_update_errored
+ expect(application.status_reason).to eq("Update timed out")
+ end
+ end
+ end
+ end
+
+ before do
+ allow(service).to receive(:phase).once.and_return(phase)
+
+ allow(service).to receive(:errors).and_return(errors)
+ allow(service).to receive(:remove_pod).and_return(nil)
+ end
+
+ describe '#execute' do
+ context 'when upgrade pod succeeded' do
+ let(:phase) { ::Gitlab::Kubernetes::Pod::SUCCEEDED }
+
+ it_behaves_like 'a terminated upgrade'
+
+ it 'make the application upgraded' do
+ expect(::ClusterWaitForAppUpdateWorker).not_to receive(:perform_in)
+
+ service.execute
+
+ expect(application).to be_updated
+ expect(application.status_reason).to be_nil
+ end
+ end
+
+ context 'when upgrade pod failed' do
+ let(:phase) { ::Gitlab::Kubernetes::Pod::FAILED }
+ let(:errors) { 'test installation failed' }
+
+ it_behaves_like 'a terminated upgrade'
+
+ it 'make the application update errored' do
+ service.execute
+
+ expect(application).to be_update_errored
+ expect(application.status_reason).to eq(errors)
+ end
+ end
+
+ RESCHEDULE_PHASES.each { |phase| it_behaves_like 'a not yet terminated upgrade', phase }
+ end
+end
diff --git a/spec/services/clusters/applications/prometheus_config_service_spec.rb b/spec/services/clusters/applications/prometheus_config_service_spec.rb
new file mode 100644
index 00000000000..993a697b543
--- /dev/null
+++ b/spec/services/clusters/applications/prometheus_config_service_spec.rb
@@ -0,0 +1,158 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Applications::PrometheusConfigService do
+ include Gitlab::Routing.url_helpers
+
+ let_it_be(:project) { create(:project) }
+ let_it_be(:production) { create(:environment, project: project) }
+ let_it_be(:cluster) { create(:cluster, :provided_by_user, projects: [project]) }
+
+ let(:application) do
+ create(:clusters_applications_prometheus, :installed, cluster: cluster)
+ end
+
+ subject { described_class.new(project, cluster, application).execute(input) }
+
+ describe '#execute' do
+ let(:input) do
+ YAML.load_file(Rails.root.join('vendor/prometheus/values.yaml'))
+ end
+
+ context 'with alerts' do
+ let!(:alert) do
+ create(:prometheus_alert, project: project, environment: production)
+ end
+
+ it 'enables alertmanager' do
+ expect(subject.dig('alertmanager', 'enabled')).to eq(true)
+ end
+
+ describe 'alertmanagerFiles' do
+ let(:alertmanager) do
+ subject.dig('alertmanagerFiles', 'alertmanager.yml')
+ end
+
+ it 'contains receivers and route' do
+ expect(alertmanager.keys).to contain_exactly('receivers', 'route')
+ end
+
+ describe 'receivers' do
+ let(:receiver) { alertmanager.dig('receivers', 0) }
+ let(:webhook_config) { receiver.dig('webhook_configs', 0) }
+
+ let(:notify_url) do
+ notify_project_prometheus_alerts_url(project, format: :json)
+ end
+
+ it 'sets receiver' do
+ expect(receiver['name']).to eq('gitlab')
+ end
+
+ it 'sets webhook_config' do
+ expect(webhook_config).to eq(
+ 'url' => notify_url,
+ 'send_resolved' => true,
+ 'http_config' => {
+ 'bearer_token' => application.alert_manager_token
+ }
+ )
+ end
+ end
+
+ describe 'route' do
+ let(:route) { alertmanager.fetch('route') }
+
+ it 'sets route' do
+ expect(route).to eq(
+ 'receiver' => 'gitlab',
+ 'group_wait' => '30s',
+ 'group_interval' => '5m',
+ 'repeat_interval' => '4h'
+ )
+ end
+ end
+ end
+
+ describe 'serverFiles' do
+ let(:groups) { subject.dig('serverFiles', 'alerts', 'groups') }
+
+ it 'sets the alerts' do
+ rules = groups.dig(0, 'rules')
+ expect(rules.size).to eq(1)
+
+ expect(rules.first['alert']).to eq(alert.title)
+ end
+
+ context 'with parameterized queries' do
+ let!(:alert) do
+ create(:prometheus_alert,
+ project: project,
+ environment: production,
+ prometheus_metric: metric)
+ end
+
+ let(:metric) do
+ create(:prometheus_metric, query: query, project: project)
+ end
+
+ let(:query) { '%{ci_environment_slug}' }
+
+ it 'substitutes query variables' do
+ expect(Gitlab::Prometheus::QueryVariables)
+ .to receive(:call)
+ .with(production)
+ .and_call_original
+
+ expr = groups.dig(0, 'rules', 0, 'expr')
+ expect(expr).to include(production.name)
+ end
+ end
+
+ context 'with multiple environments' do
+ let(:staging) { create(:environment, project: project) }
+
+ before do
+ create(:prometheus_alert, project: project, environment: production)
+ create(:prometheus_alert, project: project, environment: staging)
+ end
+
+ it 'sets alerts for multiple environment' do
+ env_names = groups.map { |group| group['name'] }
+ expect(env_names).to contain_exactly(
+ "#{production.name}.rules",
+ "#{staging.name}.rules"
+ )
+ end
+
+ it 'substitutes query variables once per environment' do
+ expect(Gitlab::Prometheus::QueryVariables)
+ .to receive(:call)
+ .with(production)
+
+ expect(Gitlab::Prometheus::QueryVariables)
+ .to receive(:call)
+ .with(staging)
+
+ subject
+ end
+ end
+ end
+ end
+
+ context 'without alerts' do
+ it 'disables alertmanager' do
+ expect(subject.dig('alertmanager', 'enabled')).to eq(false)
+ end
+
+ it 'removes alertmanagerFiles' do
+ expect(subject).not_to include('alertmanagerFiles')
+ end
+
+ it 'removes alerts' do
+ expect(subject.dig('serverFiles', 'alerts')).to eq({})
+ end
+ end
+ end
+end
diff --git a/spec/services/clusters/applications/prometheus_update_service_spec.rb b/spec/services/clusters/applications/prometheus_update_service_spec.rb
new file mode 100644
index 00000000000..078b01d2777
--- /dev/null
+++ b/spec/services/clusters/applications/prometheus_update_service_spec.rb
@@ -0,0 +1,92 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Applications::PrometheusUpdateService do
+ describe '#execute' do
+ let(:project) { create(:project) }
+ let(:environment) { create(:environment, project: project) }
+ let(:cluster) { create(:cluster, :provided_by_user, :with_installed_helm, projects: [project]) }
+ let(:application) { create(:clusters_applications_prometheus, :installed, cluster: cluster) }
+ let(:empty_alerts_values_update_yaml) { "---\nalertmanager:\n enabled: false\nserverFiles:\n alerts: {}\n" }
+ let!(:patch_command) { application.patch_command(empty_alerts_values_update_yaml) }
+ let(:helm_client) { instance_double(::Gitlab::Kubernetes::Helm::API) }
+
+ subject(:service) { described_class.new(application, project) }
+
+ before do
+ allow(service).to receive(:patch_command).with(empty_alerts_values_update_yaml).and_return(patch_command)
+ allow(service).to receive(:helm_api).and_return(helm_client)
+ end
+
+ context 'when there are no errors' do
+ before do
+ expect(helm_client).to receive(:update).with(patch_command)
+
+ allow(::ClusterWaitForAppUpdateWorker)
+ .to receive(:perform_in)
+ .and_return(nil)
+ end
+
+ it 'make the application updating' do
+ expect(application.cluster).not_to be_nil
+
+ service.execute
+
+ expect(application).to be_updating
+ end
+
+ it 'updates current config' do
+ prometheus_config_service = spy(:prometheus_config_service)
+
+ expect(Clusters::Applications::PrometheusConfigService)
+ .to receive(:new)
+ .with(project, cluster, application)
+ .and_return(prometheus_config_service)
+
+ expect(prometheus_config_service)
+ .to receive(:execute)
+ .and_return(YAML.safe_load(empty_alerts_values_update_yaml))
+
+ service.execute
+ end
+
+ it 'schedules async update status check' do
+ expect(::ClusterWaitForAppUpdateWorker).to receive(:perform_in).once
+
+ service.execute
+ end
+ end
+
+ context 'when k8s cluster communication fails' do
+ before do
+ error = ::Kubeclient::HttpError.new(500, 'system failure', nil)
+ allow(helm_client).to receive(:update).and_raise(error)
+ end
+
+ it 'make the application update errored' do
+ service.execute
+
+ expect(application).to be_update_errored
+ expect(application.status_reason).to match(/kubernetes error:/i)
+ end
+ end
+
+ context 'when application cannot be persisted' do
+ let(:application) { build(:clusters_applications_prometheus, :installed) }
+
+ before do
+ allow(application).to receive(:make_updating!).once
+ .and_raise(ActiveRecord::RecordInvalid.new(application))
+ end
+
+ it 'make the application update errored' do
+ expect(helm_client).not_to receive(:update)
+
+ service.execute
+
+ expect(application).to be_update_errored
+ end
+ end
+ end
+end
diff --git a/spec/services/clusters/applications/schedule_update_service_spec.rb b/spec/services/clusters/applications/schedule_update_service_spec.rb
new file mode 100644
index 00000000000..0764f5b6a97
--- /dev/null
+++ b/spec/services/clusters/applications/schedule_update_service_spec.rb
@@ -0,0 +1,37 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Applications::ScheduleUpdateService do
+ describe '#execute' do
+ let(:project) { create(:project) }
+
+ around do |example|
+ Timecop.freeze { example.run }
+ end
+
+ context 'when application is able to be updated' do
+ context 'when the application was recently scheduled' do
+ it 'schedules worker with a backoff delay' do
+ application = create(:clusters_applications_prometheus, :installed, last_update_started_at: Time.now + 5.minutes)
+ service = described_class.new(application, project)
+
+ expect(::ClusterUpdateAppWorker).to receive(:perform_in).with(described_class::BACKOFF_DELAY, application.name, application.id, project.id, Time.now).once
+
+ service.execute
+ end
+ end
+
+ context 'when the application has not been recently updated' do
+ it 'schedules worker' do
+ application = create(:clusters_applications_prometheus, :installed)
+ service = described_class.new(application, project)
+
+ expect(::ClusterUpdateAppWorker).to receive(:perform_async).with(application.name, application.id, project.id, Time.now).once
+
+ service.execute
+ end
+ end
+ end
+ end
+end
diff --git a/spec/services/users/build_service_spec.rb b/spec/services/users/build_service_spec.rb
index aed5d2598ef..146819c7f44 100644
--- a/spec/services/users/build_service_spec.rb
+++ b/spec/services/users/build_service_spec.rb
@@ -16,6 +16,14 @@ describe Users::BuildService do
expect(service.execute).to be_valid
end
+ context 'calls the UpdateCanonicalEmailService' do
+ specify do
+ expect(Users::UpdateCanonicalEmailService).to receive(:new).and_call_original
+
+ service.execute
+ end
+ end
+
context 'allowed params' do
let(:params) do
{
diff --git a/spec/services/users/create_service_spec.rb b/spec/services/users/create_service_spec.rb
index a139dc01314..c783a1403df 100644
--- a/spec/services/users/create_service_spec.rb
+++ b/spec/services/users/create_service_spec.rb
@@ -8,10 +8,11 @@ describe Users::CreateService do
context 'with an admin user' do
let(:service) { described_class.new(admin_user, params) }
+ let(:email) { 'jd@example.com' }
context 'when required parameters are provided' do
let(:params) do
- { name: 'John Doe', username: 'jduser', email: 'jd@example.com', password: 'mydummypass' }
+ { name: 'John Doe', username: 'jduser', email: email, password: 'mydummypass' }
end
it 'returns a persisted user' do
diff --git a/spec/services/users/update_canonical_email_service_spec.rb b/spec/services/users/update_canonical_email_service_spec.rb
new file mode 100644
index 00000000000..68ba1b75b6c
--- /dev/null
+++ b/spec/services/users/update_canonical_email_service_spec.rb
@@ -0,0 +1,116 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Users::UpdateCanonicalEmailService do
+ let(:other_email) { "differentaddress@includeddomain.com" }
+
+ before do
+ stub_const("Users::UpdateCanonicalEmailService::INCLUDED_DOMAINS_PATTERN", [/includeddomain/])
+ end
+
+ describe '#initialize' do
+ context 'unsuccessful' do
+ it 'raises an error if there is no user' do
+ expect { described_class.new(user: nil) }.to raise_error(ArgumentError, /Please provide a user/)
+ end
+
+ it 'raises an error if the object is not a User' do
+ expect { described_class.new(user: 123) }.to raise_error(ArgumentError, /Please provide a user/)
+ end
+ end
+
+ context 'when a user is provided' do
+ it 'does not error' do
+ user = build(:user)
+
+ expect { described_class.new(user: user) }.not_to raise_error
+ end
+ end
+ end
+
+ describe "#canonicalize_email" do
+ let(:user) { build(:user) }
+ let(:subject) { described_class.new(user: user) }
+
+ context 'when the email domain is included' do
+ context 'strips out any . or anything after + in the agent for included domains' do
+ using RSpec::Parameterized::TableSyntax
+
+ let(:expected_result) { 'user@includeddomain.com' }
+
+ where(:raw_email, :expected_result) do
+ 'user@includeddomain.com' | 'user@includeddomain.com'
+ 'u.s.e.r@includeddomain.com' | 'user@includeddomain.com'
+ 'user+123@includeddomain.com' | 'user@includeddomain.com'
+ 'us.er+123@includeddomain.com' | 'user@includeddomain.com'
+ end
+
+ with_them do
+ before do
+ user.email = raw_email
+ end
+
+ specify do
+ subject.execute
+
+ expect(user.user_canonical_email).not_to be_nil
+ expect(user.user_canonical_email.canonical_email).to eq expected_result
+ end
+ end
+ end
+
+ context 'when the user has an existing canonical email' do
+ it 'updates the user canonical email record' do
+ user.user_canonical_email = build(:user_canonical_email, canonical_email: other_email)
+ user.email = "us.er+123@includeddomain.com"
+
+ subject.execute
+
+ expect(user.user_canonical_email.canonical_email).to eq "user@includeddomain.com"
+ end
+ end
+ end
+
+ context 'when the email domain is not included' do
+ it 'returns nil' do
+ user.email = "u.s.er+343@excludeddomain.com"
+
+ subject.execute
+
+ expect(user.user_canonical_email).to be_nil
+ end
+
+ it 'destroys any existing UserCanonicalEmail record' do
+ user.email = "u.s.er+343@excludeddomain.com"
+ user.user_canonical_email = build(:user_canonical_email, canonical_email: other_email)
+ expect(user.user_canonical_email).to receive(:delete)
+
+ subject.execute
+ end
+ end
+
+ context 'when the user email is not processable' do
+ [nil, 'nonsense'].each do |invalid_address|
+ before do
+ user.email = invalid_address
+ end
+
+ specify do
+ subject.execute
+
+ expect(user.user_canonical_email).to be_nil
+ end
+
+ it 'preserves any existing record' do
+ user.email = nil
+ user.user_canonical_email = build(:user_canonical_email, canonical_email: other_email)
+
+ subject.execute
+
+ expect(user.user_canonical_email.canonical_email).to eq other_email
+ end
+ end
+ end
+ end
+end
diff --git a/spec/services/users/update_service_spec.rb b/spec/services/users/update_service_spec.rb
index 24738a79045..bd54ca97431 100644
--- a/spec/services/users/update_service_spec.rb
+++ b/spec/services/users/update_service_spec.rb
@@ -71,6 +71,32 @@ describe Users::UpdateService do
expect(user.job_title).to eq('Backend Engineer')
end
+ context 'updating canonical email' do
+ context 'if email was changed' do
+ subject do
+ update_user(user, email: 'user+extrastuff@example.com')
+ end
+
+ it 'calls canonicalize_email' do
+ expect_next_instance_of(Users::UpdateCanonicalEmailService) do |service|
+ expect(service).to receive(:execute)
+ end
+
+ subject
+ end
+ end
+
+ context 'if email was NOT changed' do
+ subject do
+ update_user(user, job_title: 'supreme leader of the universe')
+ end
+
+ it 'skips update canonicalize email service call' do
+ expect { subject }.not_to change { user.user_canonical_email }
+ end
+ end
+ end
+
def update_user(user, opts)
described_class.new(user, opts.merge(user: user)).execute
end
diff --git a/spec/workers/cluster_update_app_worker_spec.rb b/spec/workers/cluster_update_app_worker_spec.rb
new file mode 100644
index 00000000000..e540ede4bc0
--- /dev/null
+++ b/spec/workers/cluster_update_app_worker_spec.rb
@@ -0,0 +1,98 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe ClusterUpdateAppWorker do
+ include ExclusiveLeaseHelpers
+
+ let_it_be(:project) { create(:project) }
+
+ let(:prometheus_update_service) { spy }
+
+ subject { described_class.new }
+
+ around do |example|
+ Timecop.freeze(Time.now) { example.run }
+ end
+
+ before do
+ allow(::Clusters::Applications::PrometheusUpdateService).to receive(:new).and_return(prometheus_update_service)
+ end
+
+ describe '#perform' do
+ context 'when the application last_update_started_at is higher than the time the job was scheduled in' do
+ it 'does nothing' do
+ application = create(:clusters_applications_prometheus, :updated, last_update_started_at: Time.now)
+
+ expect(prometheus_update_service).not_to receive(:execute)
+
+ expect(subject.perform(application.name, application.id, project.id, Time.now - 5.minutes)).to be_nil
+ end
+ end
+
+ context 'when another worker is already running' do
+ it 'returns nil' do
+ application = create(:clusters_applications_prometheus, :updating)
+
+ expect(subject.perform(application.name, application.id, project.id, Time.now)).to be_nil
+ end
+ end
+
+ it 'executes PrometheusUpdateService' do
+ application = create(:clusters_applications_prometheus, :installed)
+
+ expect(prometheus_update_service).to receive(:execute)
+
+ subject.perform(application.name, application.id, project.id, Time.now)
+ end
+
+ context 'with exclusive lease' do
+ let(:application) { create(:clusters_applications_prometheus, :installed) }
+ let(:lease_key) { "#{described_class.name.underscore}-#{application.id}" }
+
+ before do
+ allow(Gitlab::ExclusiveLease).to receive(:new)
+ stub_exclusive_lease_taken(lease_key)
+ end
+
+ it 'does not allow same app to be updated concurrently by same project' do
+ expect(Clusters::Applications::PrometheusUpdateService).not_to receive(:new)
+
+ subject.perform(application.name, application.id, project.id, Time.now)
+ end
+
+ it 'does not allow same app to be updated concurrently by different project' do
+ project1 = create(:project)
+
+ expect(Clusters::Applications::PrometheusUpdateService).not_to receive(:new)
+
+ subject.perform(application.name, application.id, project1.id, Time.now)
+ end
+
+ it 'allows different app to be updated concurrently by same project' do
+ application2 = create(:clusters_applications_prometheus, :installed)
+ lease_key2 = "#{described_class.name.underscore}-#{application2.id}"
+
+ stub_exclusive_lease(lease_key2)
+
+ expect(Clusters::Applications::PrometheusUpdateService).to receive(:new)
+ .with(application2, project)
+
+ subject.perform(application2.name, application2.id, project.id, Time.now)
+ end
+
+ it 'allows different app to be updated by different project' do
+ application2 = create(:clusters_applications_prometheus, :installed)
+ lease_key2 = "#{described_class.name.underscore}-#{application2.id}"
+ project2 = create(:project)
+
+ stub_exclusive_lease(lease_key2)
+
+ expect(Clusters::Applications::PrometheusUpdateService).to receive(:new)
+ .with(application2, project2)
+
+ subject.perform(application2.name, application2.id, project2.id, Time.now)
+ end
+ end
+ end
+end
diff --git a/spec/workers/cluster_wait_for_app_update_worker_spec.rb b/spec/workers/cluster_wait_for_app_update_worker_spec.rb
new file mode 100644
index 00000000000..f1206bd85cb
--- /dev/null
+++ b/spec/workers/cluster_wait_for_app_update_worker_spec.rb
@@ -0,0 +1,27 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe ClusterWaitForAppUpdateWorker do
+ let(:check_upgrade_progress_service) { spy }
+
+ before do
+ allow(::Clusters::Applications::CheckUpgradeProgressService).to receive(:new).and_return(check_upgrade_progress_service)
+ end
+
+ it 'runs CheckUpgradeProgressService when application is found' do
+ application = create(:clusters_applications_prometheus)
+
+ expect(check_upgrade_progress_service).to receive(:execute)
+
+ subject.perform(application.name, application.id)
+ end
+
+ it 'does not run CheckUpgradeProgressService when application is not found' do
+ expect(check_upgrade_progress_service).not_to receive(:execute)
+
+ expect do
+ subject.perform("prometheus", -1)
+ end.to raise_error(ActiveRecord::RecordNotFound)
+ end
+end