contrast-agent 7.3.2 → 7.4.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/contrast/agent/middleware/middleware.rb +1 -1
- data/lib/contrast/agent/protect/input_analyzer/input_analyzer.rb +9 -11
- data/lib/contrast/agent/protect/input_analyzer/worth_watching_analyzer.rb +55 -20
- data/lib/contrast/agent/protect/policy/rule_applicator.rb +1 -4
- data/lib/contrast/agent/protect/rule/base.rb +61 -26
- data/lib/contrast/agent/protect/rule/bot_blocker/bot_blocker.rb +12 -4
- data/lib/contrast/agent/protect/rule/cmdi/cmd_injection.rb +19 -15
- data/lib/contrast/agent/protect/rule/cmdi/cmdi_backdoors.rb +2 -4
- data/lib/contrast/agent/protect/rule/cmdi/cmdi_base_rule.rb +2 -1
- data/lib/contrast/agent/protect/rule/deserialization/deserialization.rb +4 -4
- data/lib/contrast/agent/protect/rule/input_classification/base.rb +7 -2
- data/lib/contrast/agent/protect/rule/input_classification/encoding.rb +1 -1
- data/lib/contrast/agent/protect/rule/no_sqli/no_sqli.rb +5 -2
- data/lib/contrast/agent/protect/rule/path_traversal/path_traversal.rb +20 -8
- data/lib/contrast/agent/protect/rule/path_traversal/path_traversal_semantic_security_bypass.rb +2 -2
- data/lib/contrast/agent/protect/rule/sqli/sqli.rb +8 -1
- data/lib/contrast/agent/protect/rule/sqli/sqli_base_rule.rb +2 -3
- data/lib/contrast/agent/protect/rule/sqli/sqli_semantic/sqli_dangerous_functions.rb +3 -4
- data/lib/contrast/agent/protect/rule/unsafe_file_upload/unsafe_file_upload.rb +3 -0
- data/lib/contrast/agent/protect/rule/utils/builders.rb +3 -4
- data/lib/contrast/agent/protect/rule/utils/filters.rb +32 -16
- data/lib/contrast/agent/protect/rule/xss/xss.rb +80 -0
- data/lib/contrast/agent/protect/rule/xxe/xxe.rb +9 -2
- data/lib/contrast/agent/protect/state.rb +110 -0
- data/lib/contrast/agent/reporting/details/xss_match.rb +17 -0
- data/lib/contrast/agent/reporting/input_analysis/input_analysis.rb +32 -0
- data/lib/contrast/agent/reporting/reporting_events/application_defend_attack_sample_activity.rb +2 -0
- data/lib/contrast/agent/reporting/reporting_events/architecture_component.rb +2 -0
- data/lib/contrast/agent/reporting/reporting_events/finding.rb +1 -4
- data/lib/contrast/agent/reporting/reporting_events/finding_event.rb +4 -0
- data/lib/contrast/agent/reporting/reporting_events/finding_request.rb +2 -0
- data/lib/contrast/agent/reporting/reporting_events/observed_library_usage.rb +2 -0
- data/lib/contrast/agent/reporting/reporting_events/preflight_message.rb +9 -8
- data/lib/contrast/agent/reporting/reporting_events/reportable_hash.rb +30 -6
- data/lib/contrast/agent/reporting/reporting_utilities/reporter_client.rb +1 -1
- data/lib/contrast/agent/reporting/reporting_utilities/resend.rb +1 -1
- data/lib/contrast/agent/reporting/reporting_utilities/response_handler_utils.rb +1 -5
- data/lib/contrast/agent/reporting/settings/protect.rb +3 -3
- data/lib/contrast/agent/reporting/settings/sampling.rb +5 -4
- data/lib/contrast/agent/request/request_context_extend.rb +0 -2
- data/lib/contrast/agent/version.rb +1 -1
- data/lib/contrast/components/agent.rb +3 -5
- data/lib/contrast/components/api.rb +3 -3
- data/lib/contrast/components/assess.rb +4 -0
- data/lib/contrast/components/assess_rules.rb +1 -2
- data/lib/contrast/components/base.rb +1 -2
- data/lib/contrast/components/config/sources.rb +23 -0
- data/lib/contrast/components/logger.rb +19 -0
- data/lib/contrast/components/protect.rb +55 -14
- data/lib/contrast/components/sampling.rb +5 -12
- data/lib/contrast/components/security_logger.rb +17 -0
- data/lib/contrast/components/settings.rb +110 -76
- data/lib/contrast/config/certification_configuration.rb +1 -1
- data/lib/contrast/config/configuration_files.rb +0 -2
- data/lib/contrast/config/diagnostics/config.rb +3 -3
- data/lib/contrast/config/diagnostics/effective_config.rb +1 -1
- data/lib/contrast/config/diagnostics/environment_variables.rb +21 -11
- data/lib/contrast/config/diagnostics/monitor.rb +1 -1
- data/lib/contrast/config/diagnostics/singleton_tools.rb +170 -0
- data/lib/contrast/config/diagnostics/source_config_value.rb +14 -9
- data/lib/contrast/config/diagnostics/tools.rb +23 -84
- data/lib/contrast/config/request_audit_configuration.rb +1 -1
- data/lib/contrast/config/server_configuration.rb +3 -15
- data/lib/contrast/configuration.rb +5 -2
- data/lib/contrast/framework/manager.rb +4 -3
- data/lib/contrast/framework/manager_extend.rb +3 -1
- data/lib/contrast/framework/rack/support.rb +11 -2
- data/lib/contrast/framework/rails/support.rb +2 -2
- data/lib/contrast/logger/cef_log.rb +30 -4
- data/lib/contrast/utils/io_util.rb +3 -0
- data/lib/contrast/utils/log_utils.rb +22 -11
- data/lib/contrast/utils/request_utils.rb +1 -1
- data/lib/contrast/utils/timer.rb +1 -1
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2b62e21b5f8d2c3d786aaaac33005487eab07fd07438e9fe20532742f926bcb0
|
4
|
+
data.tar.gz: 37ff12d328d1a58bddb75e4f0e9c799b44df881daaa78bea62f565f95ae715ff
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 46f9f576d3a28befa4681e73a250af98602c100d50ace13a1e3fcaa5a22ca2a0d0695b4b19492677d57183ff56203d2f57a5470b7c6fa0fc9c1b15bbbd49dc16
|
7
|
+
data.tar.gz: 427a3dafa3d8108a4aa2130ff02c8c4d52539f7d1c7b265ba78d60dc10f86f142a2b4f1132ae0be02c0c9a41c043ab1459c0a0e3a836dbced2f0db4b550aa970
|
@@ -191,7 +191,7 @@ module Contrast
|
|
191
191
|
# Now we can build the ia_results only for postfilter rules.
|
192
192
|
context.protect_postfilter_ia
|
193
193
|
# Process Worth Watching Inputs for v2 rules
|
194
|
-
Contrast::Agent.worth_watching_analyzer&.add_to_queue(context
|
194
|
+
Contrast::Agent.worth_watching_analyzer&.add_to_queue(context)
|
195
195
|
|
196
196
|
if Contrast::Agent.framework_manager.streaming?(env)
|
197
197
|
context.reset_activity
|
@@ -33,10 +33,10 @@ module Contrast
|
|
33
33
|
DISPOSITION_FILENAME = 'filename'
|
34
34
|
PREFILTER_RULES = %w[bot-blocker unsafe-file-upload reflected-xss].cs__freeze
|
35
35
|
INFILTER_RULES = %w[
|
36
|
-
sql-injection cmd-injection
|
36
|
+
sql-injection cmd-injection bot-blocker unsafe-file-upload path-traversal
|
37
37
|
nosql-injection
|
38
38
|
].cs__freeze
|
39
|
-
POSTFILTER_RULES = %w[sql-injection cmd-injection
|
39
|
+
POSTFILTER_RULES = %w[sql-injection cmd-injection path-traversal nosql-injection].cs__freeze
|
40
40
|
AGENTLIB_TIMEOUT = 5.cs__freeze
|
41
41
|
TIMEOUT_ERROR_MESSAGE = '[AgentLib] Timed out when processing InputAnalysisResult'
|
42
42
|
STANDARD_ERROR_MESSAGE = '[InputAnalyzer] Exception raise while doing input analysis:'
|
@@ -100,10 +100,15 @@ module Contrast
|
|
100
100
|
# @param input_analysis [Contrast::Agent::Reporting::InputAnalysis] from analyze method.
|
101
101
|
# @param interval [Integer] The timeout determined for the AgentLib analysis to be performed.
|
102
102
|
def input_classification_for rule_id, input_analysis, interval: AGENTLIB_TIMEOUT
|
103
|
-
return
|
103
|
+
return if input_analysis.analysed_rules.include?(rule_id)
|
104
|
+
return if input_analysis.no_inputs?
|
104
105
|
return unless (protect_rule = Contrast::PROTECT.rule(rule_id)) && protect_rule.enabled?
|
105
106
|
|
106
107
|
input_analysis.inputs.each do |input_type, value|
|
108
|
+
# TODO: RUBY-2110 Update the HEADER handling if possible.
|
109
|
+
# Analyze only Header values:
|
110
|
+
# This may break bot blocker rule:
|
111
|
+
# value = value.values if input_type == HEADER
|
107
112
|
next if value.nil? || value.empty?
|
108
113
|
|
109
114
|
Timeout.timeout(interval) do
|
@@ -128,14 +133,12 @@ module Contrast
|
|
128
133
|
# for each protect rule.
|
129
134
|
# @param prefilter [Boolean] flag to set input analysis for prefilter rules only
|
130
135
|
# @param postfilter [Boolean] flag to set input analysis for postfilter rules.
|
131
|
-
# @param infilter [Boolean]
|
132
136
|
# @param interval [Integer] The timeout determined for the AgentLib analysis to be performed
|
133
137
|
# @return input_analysis [Contrast::Agent::Reporting::InputAnalysis, nil]
|
134
138
|
# @raise [Timeout::Error] If timeout is met.
|
135
139
|
def input_classification(input_analysis,
|
136
140
|
prefilter: false,
|
137
141
|
postfilter: false,
|
138
|
-
infilter: false,
|
139
142
|
interval: AGENTLIB_TIMEOUT)
|
140
143
|
return unless input_analysis
|
141
144
|
|
@@ -147,12 +150,7 @@ module Contrast
|
|
147
150
|
INFILTER_RULES
|
148
151
|
end
|
149
152
|
|
150
|
-
rules.each
|
151
|
-
# Check to see if rules is already triggered only for infilter:
|
152
|
-
next if input_analysis.triggered_rules.include?(rule_id) && infilter
|
153
|
-
|
154
|
-
input_classification_for(rule_id, input_analysis, interval: interval)
|
155
|
-
end
|
153
|
+
rules.each { |rule_id| input_classification_for(rule_id, input_analysis, interval: interval) }
|
156
154
|
input_analysis
|
157
155
|
end
|
158
156
|
|
@@ -42,16 +42,15 @@ module Contrast
|
|
42
42
|
next if queue.empty?
|
43
43
|
|
44
44
|
report = false
|
45
|
-
|
46
|
-
|
47
|
-
results = build_results(stored_ia)
|
48
|
-
activity = Contrast::Agent::Reporting::ApplicationActivity.new(ia_request: stored_ia.request)
|
45
|
+
stored_context, stored_ia, results, activity = extract_from_context
|
46
|
+
|
49
47
|
results.each do |result|
|
50
|
-
next unless (attack_result = eval_input(result))
|
48
|
+
next unless (attack_result = eval_input(stored_context, result, stored_ia))
|
51
49
|
|
52
50
|
activity.attach_defend(attack_result)
|
53
51
|
report = true
|
54
52
|
end
|
53
|
+
|
55
54
|
report_activity(activity) if report
|
56
55
|
# Handle reporting of IA Cache statistics:
|
57
56
|
enqueue_cache_event(stored_ia.request)
|
@@ -62,9 +61,22 @@ module Contrast
|
|
62
61
|
end
|
63
62
|
end
|
64
63
|
|
65
|
-
#
|
66
|
-
|
67
|
-
|
64
|
+
# build attack_results for all infilter active protect rules.
|
65
|
+
# Stored Context will update the logger context and build attack results for protect rules.
|
66
|
+
# Note: call only in thread loop as it extracts from the queue.
|
67
|
+
#
|
68
|
+
# @return [Array<stored_context, stored_ia, results, activity>]
|
69
|
+
def extract_from_context
|
70
|
+
stored_context = queue.pop
|
71
|
+
stored_ia = stored_context.agent_input_analysis
|
72
|
+
results = build_results(stored_ia)
|
73
|
+
activity = Contrast::Agent::Reporting::ApplicationActivity.new(ia_request: stored_ia.request)
|
74
|
+
[stored_context, stored_ia, results, activity]
|
75
|
+
end
|
76
|
+
|
77
|
+
# @param context [Contrast::Agent::RequestContext]
|
78
|
+
def add_to_queue context
|
79
|
+
return unless context
|
68
80
|
|
69
81
|
if queue.size >= QUEUE_SIZE
|
70
82
|
logger.debug('[WorthWatchingAnalyzer] queue at max size, skip input_result')
|
@@ -74,7 +86,7 @@ module Contrast
|
|
74
86
|
# we need to save the ia which contains the request and saved extracted user inputs to
|
75
87
|
# be evaluated on the thread rather than building results here. This way we allow the
|
76
88
|
# request to continue and will build the attack results later.
|
77
|
-
queue <<
|
89
|
+
queue << context.dup
|
78
90
|
end
|
79
91
|
|
80
92
|
private
|
@@ -91,6 +103,9 @@ module Contrast
|
|
91
103
|
Contrast::Agent::Protect::InputAnalyzer.lru_cache.clear_statistics
|
92
104
|
end
|
93
105
|
|
106
|
+
# Enqueue for Telemetry reporting all base64 related events.
|
107
|
+
#
|
108
|
+
# @param request [Contrast::Agent::Request] stored request.
|
94
109
|
def enqueue_encoding_event request
|
95
110
|
return unless Contrast::Agent::Telemetry::Base.enabled?
|
96
111
|
return unless Contrast::PROTECT.normalize_base64?
|
@@ -102,16 +117,16 @@ module Contrast
|
|
102
117
|
|
103
118
|
# This method will build the attack results from the saved ia.
|
104
119
|
#
|
105
|
-
# @param
|
120
|
+
# @param stored_ia [Contrast::Agent::Reporting::InputAnalysis]
|
106
121
|
# @return attack_results [array<Contrast::Agent::Reporting::InputAnalysisResult>] all the results
|
107
122
|
# from the input analysis.
|
108
|
-
def build_results
|
123
|
+
def build_results stored_ia
|
109
124
|
# Construct the input analysis for the all the infilter rules that were not triggered.
|
110
125
|
# There is a set timeout for each rule to be analyzed in. The infilter flag will make
|
111
126
|
# sure that if a rule is already triggered during the infilter phase it will not be analyzed
|
112
127
|
# now, making sure we don't report same rule twice.
|
113
|
-
Contrast::Agent::Protect::InputAnalyzer.input_classification(
|
114
|
-
results =
|
128
|
+
Contrast::Agent::Protect::InputAnalyzer.input_classification(stored_ia)
|
129
|
+
results = stored_ia.results.reject do |val|
|
115
130
|
val.score_level == Contrast::Agent::Reporting::InputAnalysisResult::SCORE_LEVEL::IGNORE
|
116
131
|
end
|
117
132
|
return results if results
|
@@ -119,39 +134,59 @@ module Contrast
|
|
119
134
|
[]
|
120
135
|
end
|
121
136
|
|
137
|
+
# Evaluates the stored ia results and builds attack results if any.
|
138
|
+
#
|
139
|
+
# @param stored_context [Contrast::Agent::RequestContext]
|
122
140
|
# @param ia_result Contrast::Agent::Reporting::InputAnalysisResult the WorthWatching InputAnalysisResult
|
141
|
+
# @param stored_ia [Contrast::Agent::Reporting::InputAnalysis] the stored InputAnalysis
|
123
142
|
# @return [Contrast::Agent::Reporting::AttackResult, nil] InputAnalysisResult updated Result or nil
|
124
|
-
def eval_input ia_result
|
125
|
-
return
|
143
|
+
def eval_input stored_context, ia_result, stored_ia
|
144
|
+
return skip_log if ia_result.value.to_s.bytesize >= INPUT_BYTESIZE_THRESHOLD
|
126
145
|
|
127
|
-
|
128
|
-
#{ INPUT_BYTESIZE_THRESHOLD / 1024 }KB")
|
129
|
-
nil
|
146
|
+
build_attack_result(stored_context, ia_result, stored_ia)
|
130
147
|
end
|
131
148
|
|
149
|
+
# Creates new Attack Event per rule that will be triggered or probed.
|
150
|
+
#
|
151
|
+
# @param stored_context [Contrast::Agent::RequestContext]
|
132
152
|
# @param ia_result Contrast::Agent::Reporting::InputAnalysisResult the updated InputAnalysisResult
|
133
153
|
# with a score of :DEFINITEATTACK
|
154
|
+
# @param stored_ia [Contrast::Agent::Reporting::InputAnalysis] the stored InputAnalysis
|
134
155
|
# @return [Contrast::Agent::Reporting::AttackResult] the attack result from
|
135
156
|
# this input
|
136
|
-
def build_attack_result ia_result
|
137
|
-
|
157
|
+
def build_attack_result stored_context, ia_result, stored_ia
|
158
|
+
return if stored_ia.triggered_rules.include?(ia_result.rule_id)
|
159
|
+
|
160
|
+
Contrast::PROTECT.rule(ia_result.rule_id).build_attack_without_match(stored_context, ia_result, nil)
|
138
161
|
end
|
139
162
|
|
163
|
+
# @return [Queue]
|
140
164
|
def queue
|
141
165
|
@_queue ||= Queue.new
|
142
166
|
end
|
143
167
|
|
168
|
+
# Reports all gather activities to batch.
|
169
|
+
#
|
170
|
+
# @param activity [Contrast::Agent::Reporting::ApplicationActivity]
|
144
171
|
def report_activity activity
|
145
172
|
logger.debug('[WorthWatchingAnalyzer] preparing to send activity batch')
|
146
173
|
add_activity_to_batch(activity)
|
147
174
|
report_batch
|
148
175
|
end
|
149
176
|
|
177
|
+
# Deletes Queue and closes it.
|
150
178
|
def delete_queue!
|
151
179
|
@_queue&.clear
|
152
180
|
@_queue&.close
|
153
181
|
@_queue = nil
|
154
182
|
end
|
183
|
+
|
184
|
+
# Logs a message that the input was skipped because it was too large.
|
185
|
+
def skip_log
|
186
|
+
logger.debug("[WorthWatchingAnalyzer] Skipping analysis: Input size is larger than
|
187
|
+
#{ INPUT_BYTESIZE_THRESHOLD / 1024 }KB")
|
188
|
+
nil
|
189
|
+
end
|
155
190
|
end
|
156
191
|
end
|
157
192
|
end
|
@@ -55,10 +55,7 @@ module Contrast
|
|
55
55
|
return unless (ia = context.agent_input_analysis)
|
56
56
|
|
57
57
|
Contrast::Agent::Protect::InputAnalyzer.input_classification_for(rule_id, ia)
|
58
|
-
|
59
|
-
# as already it's input applicable types has been analysed.
|
60
|
-
ia.triggered_rules << rule_name
|
61
|
-
ia
|
58
|
+
context.agent_input_analysis.record_analysed_rule(rule_id)
|
62
59
|
end
|
63
60
|
|
64
61
|
protected
|
@@ -45,7 +45,6 @@ module Contrast
|
|
45
45
|
#
|
46
46
|
# @return mode [Symbol]
|
47
47
|
def initialize
|
48
|
-
::Contrast::PROTECT.defend_rules[rule_name] = self
|
49
48
|
@mode = mode_from_settings
|
50
49
|
end
|
51
50
|
|
@@ -63,6 +62,11 @@ module Contrast
|
|
63
62
|
RULE_NAME
|
64
63
|
end
|
65
64
|
|
65
|
+
# Update state form Settings or Configuration.
|
66
|
+
def update
|
67
|
+
@mode = mode_from_settings
|
68
|
+
end
|
69
|
+
|
66
70
|
# Should return the short name.
|
67
71
|
#
|
68
72
|
# @return [String]
|
@@ -137,20 +141,30 @@ module Contrast
|
|
137
141
|
end
|
138
142
|
|
139
143
|
# With this we log to CEF
|
140
|
-
#
|
141
|
-
# @param result [Contrast::Agent::Reporting::AttackResult]
|
144
|
+
# @param result [Contrast::Agent::Reporting::InputAnalysisResult]
|
142
145
|
# @param attack [Symbol] the type of message we want to send
|
143
146
|
# @param value [String] the input value we want to log
|
144
|
-
|
147
|
+
# @param input_type [String] the input type we want to log
|
148
|
+
# @param context [Contrast::Agent::RequestContext]
|
149
|
+
def cef_logging result, attack = :ineffective_attack, value: nil, input_type: nil, context: nil
|
145
150
|
sample = result.samples[0]
|
146
151
|
outcome = result.response.to_s
|
147
|
-
input_type = sample
|
148
|
-
input_value = sample
|
149
|
-
cef_logger.send(attack, result.rule_id, outcome, input_type, input_value)
|
152
|
+
input_type = sample&.user_input&.input_type&.to_s || input_type
|
153
|
+
input_value = sample&.user_input&.value || value
|
154
|
+
cef_logger.send(attack, result.rule_id, outcome, input_type, input_value, context)
|
150
155
|
end
|
151
156
|
|
152
157
|
protected
|
153
158
|
|
159
|
+
# Records the rule being triggered at sink.
|
160
|
+
#
|
161
|
+
# @param context [Contrast::Agent::RequestContext]
|
162
|
+
def record_triggered context
|
163
|
+
return unless context
|
164
|
+
|
165
|
+
context.agent_input_analysis.record_rule_triggered(rule_name)
|
166
|
+
end
|
167
|
+
|
154
168
|
# Assign the mode from active settings.
|
155
169
|
#
|
156
170
|
# @return mode [Symbol]
|
@@ -187,22 +201,20 @@ module Contrast
|
|
187
201
|
# @param potential_attack_string [String, nil]
|
188
202
|
# @param ia_results [Array<Contrast::Agent::Reporting::InputAnalysis>]
|
189
203
|
# @param **kwargs
|
190
|
-
# @return [Contrast::Agent::Reporting, nil]
|
204
|
+
# @return [Contrast::Agent::Reporting::AttackResult, nil]
|
191
205
|
def find_attacker_with_results context, potential_attack_string, ia_results, **kwargs
|
192
206
|
logger.trace('Checking vectors for attacks', rule: rule_name, input: potential_attack_string)
|
207
|
+
return unless ia_results&.any?
|
208
|
+
return build_attack_without_match(context, ia_results[0], nil, **kwargs) unless potential_attack_string
|
193
209
|
|
194
|
-
result = nil
|
195
210
|
ia_results.each do |ia_result|
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
else
|
202
|
-
result = build_attack_without_match(context, ia_result, result, **kwargs)
|
203
|
-
end
|
211
|
+
idx = potential_attack_string.index(ia_result.value)
|
212
|
+
next unless idx
|
213
|
+
|
214
|
+
result = build_attack_with_match(context, ia_result, result || nil, potential_attack_string, **kwargs)
|
215
|
+
return result if result
|
204
216
|
end
|
205
|
-
|
217
|
+
nil
|
206
218
|
end
|
207
219
|
|
208
220
|
# By default, rules do not have to find attackers as they do not have
|
@@ -227,15 +239,17 @@ module Contrast
|
|
227
239
|
#
|
228
240
|
# @param context [Contrast::Agent::RequestContext] the context for
|
229
241
|
# the current request
|
230
|
-
# @param ia_result [Contrast::Agent::Reporting::
|
242
|
+
# @param ia_result [Contrast::Agent::Reporting::Settings::InputAnalysisResult]
|
231
243
|
# @param result [Contrast::Agent::Reporting::AttackResult]
|
232
244
|
# @param attack_string [String] Potential attack vector
|
233
245
|
# @return [Contrast::Agent::Reporting::AttackResult]
|
234
246
|
def update_successful_attack_response context, ia_result, result, attack_string = nil
|
247
|
+
cef_outcome = :successful_attack
|
235
248
|
case mode
|
236
249
|
when :MONITOR
|
237
250
|
# We are checking the result as the ia_result would not contain the sub-rules.
|
238
251
|
result.response = if SUSPICIOUS_REPORTING_RULES.include?(result&.rule_id)
|
252
|
+
cef_outcome = :suspicious_attack
|
239
253
|
Contrast::Agent::Reporting::ResponseType::SUSPICIOUS
|
240
254
|
else
|
241
255
|
Contrast::Agent::Reporting::ResponseType::MONITORED
|
@@ -246,7 +260,11 @@ module Contrast
|
|
246
260
|
|
247
261
|
ia_result.attack_count = ia_result.attack_count + 1 if ia_result
|
248
262
|
log_rule_matched(context, ia_result, result.response, attack_string)
|
249
|
-
|
263
|
+
cef_logging(result,
|
264
|
+
cef_outcome,
|
265
|
+
value: ia_result&.value || attack_string,
|
266
|
+
input_type: ia_result&.input_type,
|
267
|
+
context: context)
|
250
268
|
result
|
251
269
|
end
|
252
270
|
|
@@ -261,17 +279,32 @@ module Contrast
|
|
261
279
|
# multiple inputs being found to violate the protection criteria
|
262
280
|
# @return [Contrast::Agent::Reporting::AttackResult]
|
263
281
|
def update_perimeter_attack_response context, ia_result, result
|
264
|
-
|
282
|
+
cef_outcome = :successful_attack
|
283
|
+
case mode
|
284
|
+
when :BLOCK_AT_PERIMETER
|
265
285
|
result.response = if blocked_rule?(ia_result)
|
266
286
|
Contrast::Agent::Reporting::ResponseType::BLOCKED
|
267
287
|
else
|
268
288
|
Contrast::Agent::Reporting::ResponseType::BLOCKED_AT_PERIMETER
|
269
289
|
end
|
270
290
|
log_rule_matched(context, ia_result, result.response)
|
271
|
-
|
291
|
+
when :BLOCK && rule_name == Contrast::Agent::Protect::Rule::Xss::NAME
|
292
|
+
# Handle cases like reflected-xss:
|
293
|
+
result.response = Contrast::Agent::Reporting::ResponseType::BLOCKED
|
294
|
+
log_rule_matched(context, ia_result, result.response)
|
295
|
+
else
|
296
|
+
# Handles all other cases including Reflected-xss in MONITOR mode.
|
297
|
+
return unless ia_result.nil? || ia_result.attack_count.zero?
|
298
|
+
|
272
299
|
result.response = assign_reporter_response_type(ia_result)
|
300
|
+
cef_outcome = suspicious_rule?(ia_result) ? :suspicious_attack : :ineffective_attack
|
273
301
|
log_rule_probed(context, ia_result)
|
274
302
|
end
|
303
|
+
cef_logging(result,
|
304
|
+
cef_outcome,
|
305
|
+
value: ia_result&.value,
|
306
|
+
input_type: ia_result&.input_type,
|
307
|
+
context: context)
|
275
308
|
|
276
309
|
result
|
277
310
|
end
|
@@ -330,7 +363,7 @@ module Contrast
|
|
330
363
|
# the rule id.
|
331
364
|
#
|
332
365
|
# @param context [Contrast::Agent::RequestContext]
|
333
|
-
# @return [Array<Contrast::Agent::Reporting::
|
366
|
+
# @return [Array<Contrast::Agent::Reporting::InputAnalysisResult>]
|
334
367
|
def gather_ia_results context
|
335
368
|
return [] unless context&.agent_input_analysis&.results
|
336
369
|
|
@@ -345,7 +378,8 @@ module Contrast
|
|
345
378
|
def blocked_violation? result
|
346
379
|
return false unless result
|
347
380
|
|
348
|
-
result.response == Contrast::Agent::Reporting::ResponseType::BLOCKED
|
381
|
+
blocked? && (result.response == Contrast::Agent::Reporting::ResponseType::BLOCKED ||
|
382
|
+
result.response == Contrast::Agent::Reporting::ResponseType::BLOCKED_AT_PERIMETER)
|
349
383
|
end
|
350
384
|
|
351
385
|
private
|
@@ -355,7 +389,8 @@ module Contrast
|
|
355
389
|
def blocked_rule? ia_result
|
356
390
|
[
|
357
391
|
Contrast::Agent::Protect::Rule::Sqli::NAME,
|
358
|
-
Contrast::Agent::Protect::Rule::NoSqli::NAME
|
392
|
+
Contrast::Agent::Protect::Rule::NoSqli::NAME,
|
393
|
+
Contrast::Agent::Protect::Rule::Xss::NAME
|
359
394
|
].include?(ia_result&.rule_id)
|
360
395
|
end
|
361
396
|
|
@@ -388,7 +423,7 @@ module Contrast
|
|
388
423
|
|
389
424
|
# @param context [Contrast::Agent::RequestContext]
|
390
425
|
# @param potential_attack_string [String, nil]
|
391
|
-
# @return [Contrast::Agent::Reporting, nil]
|
426
|
+
# @return [Contrast::Agent::Reporting::AttackResult, nil]
|
392
427
|
def find_postfilter_attacker context, potential_attack_string, **kwargs
|
393
428
|
ia_results = gather_ia_results(context)
|
394
429
|
ia_results.select! do |ia_result|
|
@@ -19,6 +19,7 @@ module Contrast
|
|
19
19
|
|
20
20
|
NAME = 'bot-blocker'
|
21
21
|
APPLICABLE_USER_INPUTS = [HEADER].cs__freeze
|
22
|
+
BLOCK_MESSAGE = 'Bot Blocker rule triggered. Unsafe Bot blocked.'
|
22
23
|
|
23
24
|
def rule_name
|
24
25
|
NAME
|
@@ -28,6 +29,13 @@ module Contrast
|
|
28
29
|
APPLICABLE_USER_INPUTS
|
29
30
|
end
|
30
31
|
|
32
|
+
# Return the specific blocking message for this rule.
|
33
|
+
#
|
34
|
+
# @return [String] the reason for the raised security exception.
|
35
|
+
def block_message
|
36
|
+
BLOCK_MESSAGE
|
37
|
+
end
|
38
|
+
|
31
39
|
# Bot blocker input classification
|
32
40
|
#
|
33
41
|
# @return [module<Contrast::Agent::Protect::Rule::BotBlockerInputClassification>]
|
@@ -49,13 +57,13 @@ module Contrast
|
|
49
57
|
ia_result.score_level == Contrast::Agent::Reporting::ScoreLevel::DEFINITEATTACK
|
50
58
|
|
51
59
|
result = build_attack_without_match(context, ia_result, nil)
|
52
|
-
|
53
|
-
cef_logging(result, :successful_attack) if result
|
54
|
-
return unless blocked?
|
60
|
+
return unless result
|
55
61
|
|
62
|
+
append_to_activity(context, result)
|
63
|
+
record_triggered(context)
|
56
64
|
# Raise BotBlocker error
|
57
65
|
exception_message = "#{ rule_name } rule triggered. Unsafe Bot blocked."
|
58
|
-
raise(Contrast::SecurityException.new(self, exception_message))
|
66
|
+
raise(Contrast::SecurityException.new(self, exception_message)) if blocked_violation?(result)
|
59
67
|
end
|
60
68
|
|
61
69
|
# @param context [Contrast::Agent::RequestContext]
|
@@ -21,25 +21,32 @@ module Contrast
|
|
21
21
|
include Contrast::Components::Logger::InstanceMethods
|
22
22
|
include Contrast::Agent::Reporting::InputType
|
23
23
|
NAME = 'cmd-injection'
|
24
|
-
APPLICABLE_USER_INPUTS = [
|
25
|
-
BODY, COOKIE_VALUE, HEADER, PARAMETER_NAME,
|
26
|
-
PARAMETER_VALUE, JSON_VALUE, MULTIPART_VALUE,
|
27
|
-
MULTIPART_FIELD_NAME, XML_VALUE, DWR_VALUE
|
28
|
-
].cs__freeze
|
29
24
|
|
30
25
|
def rule_name
|
31
26
|
NAME
|
32
27
|
end
|
33
28
|
|
29
|
+
# Sub-rules forwarders:
|
30
|
+
|
31
|
+
# @return [Contrast::Agent::Protect::Rule::CmdiBackdoors]
|
32
|
+
def command_backdoors
|
33
|
+
@_command_backdoors ||= Contrast::Agent::Protect::Rule::CmdiBackdoors.new
|
34
|
+
end
|
35
|
+
|
36
|
+
# @return [Contrast::Agent::Protect::Rule::CmdiChainedCommand]
|
37
|
+
def semantic_chained_commands
|
38
|
+
@_semantic_chained_commands ||= Contrast::Agent::Protect::Rule::CmdiChainedCommand.new
|
39
|
+
end
|
40
|
+
|
41
|
+
def semantic_dangerous_paths
|
42
|
+
@_semantic_dangerous_paths ||= Contrast::Agent::Protect::Rule::CmdiDangerousPath.new
|
43
|
+
end
|
44
|
+
|
34
45
|
# Array of sub_rules:
|
35
46
|
#
|
36
47
|
# @return [Array]
|
37
48
|
def sub_rules
|
38
|
-
@_sub_rules ||= [
|
39
|
-
Contrast::Agent::Protect::Rule::CmdiBackdoors.new,
|
40
|
-
Contrast::Agent::Protect::Rule::CmdiChainedCommand.new,
|
41
|
-
Contrast::Agent::Protect::Rule::CmdiDangerousPath.new
|
42
|
-
].cs__freeze
|
49
|
+
@_sub_rules ||= [command_backdoors, semantic_chained_commands, semantic_dangerous_paths].cs__freeze
|
43
50
|
end
|
44
51
|
|
45
52
|
def applicable_user_inputs
|
@@ -72,12 +79,9 @@ module Contrast
|
|
72
79
|
return unless result
|
73
80
|
|
74
81
|
append_to_activity(context, result)
|
75
|
-
|
76
|
-
|
77
|
-
return unless blocked?
|
78
|
-
|
82
|
+
record_triggered(context)
|
79
83
|
# Raise cmdi error
|
80
|
-
raise_error(classname, method)
|
84
|
+
raise_error(classname, method) if blocked_violation?(result)
|
81
85
|
end
|
82
86
|
end
|
83
87
|
end
|
@@ -43,10 +43,8 @@ module Contrast
|
|
43
43
|
**{ classname: classname, method: method }))
|
44
44
|
|
45
45
|
append_to_activity(context, result)
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
raise_error(classname, method)
|
46
|
+
record_triggered(context)
|
47
|
+
raise_error(classname, method) if blocked_violation?(result)
|
50
48
|
end
|
51
49
|
|
52
50
|
private
|
@@ -20,7 +20,7 @@ module Contrast
|
|
20
20
|
APPLICABLE_USER_INPUTS = [
|
21
21
|
BODY, COOKIE_VALUE, HEADER, PARAMETER_NAME,
|
22
22
|
PARAMETER_VALUE, JSON_VALUE, MULTIPART_VALUE,
|
23
|
-
MULTIPART_FIELD_NAME, XML_VALUE, DWR_VALUE
|
23
|
+
MULTIPART_FIELD_NAME, XML_VALUE, DWR_VALUE, UNKNOWN
|
24
24
|
].cs__freeze
|
25
25
|
|
26
26
|
# CMDI input classification
|
@@ -46,6 +46,7 @@ module Contrast
|
|
46
46
|
return unless (result = build_violation(context, command))
|
47
47
|
|
48
48
|
append_to_activity(context, result)
|
49
|
+
record_triggered(context)
|
49
50
|
raise_error(classname, method) if blocked_violation?(result)
|
50
51
|
end
|
51
52
|
|
@@ -50,6 +50,7 @@ module Contrast
|
|
50
50
|
end
|
51
51
|
|
52
52
|
# Return the specific blocking message for this rule.
|
53
|
+
#
|
53
54
|
# @return [String] the reason for the raised security exception.
|
54
55
|
def block_message
|
55
56
|
BLOCK_MESSAGE
|
@@ -84,10 +85,9 @@ module Contrast
|
|
84
85
|
kwargs = { GADGET_TYPE: gadget }
|
85
86
|
result = build_attack_with_match(context, ia_result, nil, serialized_input, **kwargs)
|
86
87
|
append_to_activity(context, result)
|
88
|
+
record_triggered(context)
|
87
89
|
|
88
|
-
|
89
|
-
|
90
|
-
raise(Contrast::SecurityException.new(self, block_message)) if blocked?
|
90
|
+
raise(Contrast::SecurityException.new(self, block_message)) if blocked_violation?(result)
|
91
91
|
end
|
92
92
|
|
93
93
|
# Determine if the issued command was called while we're
|
@@ -106,13 +106,13 @@ module Contrast
|
|
106
106
|
ia_result = build_evaluation(gadget_command)
|
107
107
|
result = build_attack_with_match(context, ia_result, nil, gadget_command, **kwargs)
|
108
108
|
append_to_activity(context, result)
|
109
|
-
cef_logging(result, :successful_attack, value: gadget_command)
|
110
109
|
raise(Contrast::SecurityException.new(self, BLOCK_MESSAGE)) if blocked?
|
111
110
|
end
|
112
111
|
|
113
112
|
protected
|
114
113
|
|
115
114
|
# Build the RaspRuleSample for the detected Deserialization attack.
|
115
|
+
#
|
116
116
|
# @param context [Contrast::Agent::RequestContext] the request
|
117
117
|
# context in which this attack is occurring.
|
118
118
|
# @param input_analysis_result [Contrast::Agent::Reporting::InputAnalysis]
|
@@ -24,7 +24,7 @@ module Contrast
|
|
24
24
|
COOKIE_VALUE, PARAMETER_VALUE, HEADER, JSON_VALUE, MULTIPART_VALUE, XML_VALUE, DWR_VALUE
|
25
25
|
].cs__freeze
|
26
26
|
|
27
|
-
BASE64_INPUT_TYPES = [BODY, COOKIE_VALUE,
|
27
|
+
BASE64_INPUT_TYPES = [BODY, COOKIE_VALUE, PARAMETER_VALUE, MULTIPART_VALUE, XML_VALUE].cs__freeze
|
28
28
|
|
29
29
|
class << self
|
30
30
|
include Contrast::Components::Logger::InstanceMethods
|
@@ -172,7 +172,9 @@ module Contrast
|
|
172
172
|
end
|
173
173
|
|
174
174
|
# Decodes the value for the given input type.
|
175
|
-
#
|
175
|
+
#
|
176
|
+
# This applies to Values sources only:
|
177
|
+
# BODY, COOKIE_VALUE, HEADER, PARAMETER_VALUE, MULTIPART_VALUE, XML_VALUE
|
176
178
|
#
|
177
179
|
# @param value [String]
|
178
180
|
# @param input_type [Symbol]
|
@@ -181,6 +183,9 @@ module Contrast
|
|
181
183
|
return value unless Contrast::PROTECT.normalize_base64?
|
182
184
|
return value unless BASE64_INPUT_TYPES.include?(input_type)
|
183
185
|
|
186
|
+
# TODO: RUBY-2110 Update the HEADER handling if possible.
|
187
|
+
# We need only the Header values.
|
188
|
+
|
184
189
|
cs__decode64(value, input_type)
|
185
190
|
end
|
186
191
|
end
|
@@ -16,7 +16,7 @@ module Contrast
|
|
16
16
|
|
17
17
|
# Still a list is needed for this one, as it is not possible to determine if the value is encoded or not.
|
18
18
|
# As long as the list is short the method has a good percentage of success.
|
19
|
-
KNOWN_DECODING_EXCEPTIONS = %w[cmd].cs__freeze
|
19
|
+
KNOWN_DECODING_EXCEPTIONS = %w[cmd version if_modified_since].cs__freeze
|
20
20
|
|
21
21
|
# This methods is not performant, but is more safe for false positive.
|
22
22
|
# Base64 check is no trivial task. For example if one passes a value like 'stringdw' it will return true,
|