Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2022-03-02 03:13:45 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2022-03-02 03:13:45 +0300
commit90c386a7b0f2701abeb1e86517fc1d5dea231c09 (patch)
treebd31e90b87c7986276daee9020a065313db7c3ba /app/services/spam
parent70fe7ce74ba4a8430c88ec6e3f4c60475a69fe21 (diff)
Add latest changes from gitlab-org/gitlab@master
Diffstat (limited to 'app/services/spam')
-rw-r--r--app/services/spam/spam_action_service.rb23
-rw-r--r--app/services/spam/spam_constants.rb18
-rw-r--r--app/services/spam/spam_verdict_service.rb17
3 files changed, 35 insertions, 23 deletions
diff --git a/app/services/spam/spam_action_service.rb b/app/services/spam/spam_action_service.rb
index 2a28b66f09b..4fa9c0e4993 100644
--- a/app/services/spam/spam_action_service.rb
+++ b/app/services/spam/spam_action_service.rb
@@ -65,22 +65,19 @@ module Spam
# ask the SpamVerdictService what to do with the target.
spam_verdict_service.execute.tap do |result|
case result
- when CONDITIONAL_ALLOW
- # at the moment, this means "ask for reCAPTCHA"
- create_spam_log
-
- break if target.allow_possible_spam?
-
- target.needs_recaptcha!
- when DISALLOW
- # TODO: remove `unless target.allow_possible_spam?` once this flag has been passed to `SpamVerdictService`
- # https://gitlab.com/gitlab-org/gitlab/-/issues/214739
- target.spam! unless target.allow_possible_spam?
- create_spam_log
when BLOCK_USER
# TODO: improve BLOCK_USER handling, non-existent until now
# https://gitlab.com/gitlab-org/gitlab/-/issues/329666
- target.spam! unless target.allow_possible_spam?
+ target.spam!
+ create_spam_log
+ when DISALLOW
+ target.spam!
+ create_spam_log
+ when CONDITIONAL_ALLOW
+ # This means "require a CAPTCHA to be solved"
+ target.needs_recaptcha!
+ create_spam_log
+ when OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM
create_spam_log
when ALLOW
target.clear_spam_flags!
diff --git a/app/services/spam/spam_constants.rb b/app/services/spam/spam_constants.rb
index b654fbbbcc8..d300525710c 100644
--- a/app/services/spam/spam_constants.rb
+++ b/app/services/spam/spam_constants.rb
@@ -2,11 +2,12 @@
module Spam
module SpamConstants
- CONDITIONAL_ALLOW = "conditional_allow"
- DISALLOW = "disallow"
- ALLOW = "allow"
- BLOCK_USER = "block"
- NOOP = "noop"
+ BLOCK_USER = 'block'
+ DISALLOW = 'disallow'
+ CONDITIONAL_ALLOW = 'conditional_allow'
+ OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM = 'override_via_allow_possible_spam'
+ ALLOW = 'allow'
+ NOOP = 'noop'
SUPPORTED_VERDICTS = {
BLOCK_USER => {
@@ -18,11 +19,14 @@ module Spam
CONDITIONAL_ALLOW => {
priority: 3
},
- ALLOW => {
+ OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM => {
priority: 4
},
- NOOP => {
+ ALLOW => {
priority: 5
+ },
+ NOOP => {
+ priority: 6
}
}.freeze
end
diff --git a/app/services/spam/spam_verdict_service.rb b/app/services/spam/spam_verdict_service.rb
index c8bdcf4310b..e73b2666c02 100644
--- a/app/services/spam/spam_verdict_service.rb
+++ b/app/services/spam/spam_verdict_service.rb
@@ -39,21 +39,24 @@ module Spam
return ALLOW unless valid_results.any?
# Favour the most restrictive result.
- final_verdict = valid_results.min_by { |v| SUPPORTED_VERDICTS[v][:priority] }
+ verdict = valid_results.min_by { |v| SUPPORTED_VERDICTS[v][:priority] }
+
+ # The target can override the verdict via the `allow_possible_spam` feature flag
+ verdict = OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM if override_via_allow_possible_spam?(verdict: verdict)
logger.info(class: self.class.name,
akismet_verdict: akismet_verdict,
spam_check_verdict: original_spamcheck_result,
extra_attributes: spamcheck_attribs,
spam_check_rtt: external_spam_check_round_trip_time.real,
- final_verdict: final_verdict,
+ final_verdict: verdict,
username: user.username,
user_id: user.id,
target_type: target.class.to_s,
project_id: target.project_id
)
- final_verdict
+ verdict
end
private
@@ -87,6 +90,14 @@ module Spam
end
end
+ def override_via_allow_possible_spam?(verdict:)
+ # If the verdict is already going to allow (because current verdict's priority value is greater
+ # than the override verdict's priority value), then we don't need to override it.
+ return false if SUPPORTED_VERDICTS[verdict][:priority] > SUPPORTED_VERDICTS[OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM][:priority]
+
+ target.allow_possible_spam?
+ end
+
def spamcheck_client
@spamcheck_client ||= Gitlab::Spamcheck::Client.new
end