Welcome to mirror list, hosted at ThFree Co, Russian Federation.

spam_verdict_service.rb « spam « services « app - gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 9efe51b43b815ee3c03b7f5351602dd37471d14f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
# frozen_string_literal: true

module Spam
  class SpamVerdictService
    include AkismetMethods
    include SpamConstants

    def initialize(user:, target:, options:, context: {}, extra_features: {})
      @target = target
      @user = user
      @options = options
      @context = context
      @extra_features = extra_features
    end

    def execute
      spamcheck_verdict = nil

      external_spam_check_round_trip_time = Benchmark.realtime do
        spamcheck_verdict = get_spamcheck_verdict
      end

      histogram.observe({ result: spamcheck_verdict.upcase }, external_spam_check_round_trip_time) if spamcheck_verdict

      akismet_verdict = get_akismet_verdict

      # filter out anything we don't recognise, including nils.
      valid_verdicts = [spamcheck_verdict, akismet_verdict].compact.select { |r| SUPPORTED_VERDICTS.key?(r) }

      # Treat nils - such as service unavailable - as ALLOW
      return ALLOW unless valid_verdicts.any?

      # Favour the most restrictive verdict
      final_verdict = valid_verdicts.min_by { |v| SUPPORTED_VERDICTS[v][:priority] }

      # The target can override the verdict via the `allow_possible_spam` application setting
      final_verdict = OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM if override_via_allow_possible_spam?(verdict: final_verdict)

      logger.info(
        class: self.class.name,
        akismet_verdict: akismet_verdict,
        spam_check_verdict: spamcheck_verdict,
        spam_check_rtt: external_spam_check_round_trip_time.real,
        final_verdict: final_verdict,
        username: user.username,
        user_id: user.id,
        target_type: target.class.to_s,
        project_id: target.project_id
      )

      final_verdict
    end

    private

    attr_reader :user, :target, :options, :context, :extra_features

    def get_akismet_verdict
      if akismet.spam?
        Gitlab::Recaptcha.enabled? ? CONDITIONAL_ALLOW : DISALLOW
      else
        ALLOW
      end
    end

    def get_spamcheck_verdict
      return unless Gitlab::CurrentSettings.spam_check_endpoint_enabled

      begin
        result = spamcheck_client.spam?(spammable: target, user: user, context: context, extra_features: extra_features)

        if result.evaluated?
          Abuse::TrustScore.create!(user: user, score: result.score, source: :spamcheck)
        end

        result.verdict

      rescue StandardError => e
        Gitlab::ErrorTracking.log_exception(e, error: ERROR_TYPE)
        nil
      end
    end

    def override_via_allow_possible_spam?(verdict:)
      # If the verdict is already going to allow (because current verdict's priority value is greater
      # than the override verdict's priority value), then we don't need to override it.
      return false if SUPPORTED_VERDICTS[verdict][:priority] > SUPPORTED_VERDICTS[OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM][:priority]

      allow_possible_spam?
    end

    def allow_possible_spam?
      target.allow_possible_spam?(user) || user.allow_possible_spam?
    end

    def spamcheck_client
      @spamcheck_client ||= Gitlab::Spamcheck::Client.new
    end

    def logger
      @logger ||= Gitlab::AppJsonLogger.build
    end

    def histogram
      @histogram ||= Gitlab::Metrics.histogram(:gitlab_spamcheck_request_duration_seconds, 'Request duration to the anti-spam service')
    end
  end
end

Spam::SpamVerdictService.prepend_mod