bbot 2.4.2__py3-none-any.whl → 2.4.2.6590rc0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bbot might be problematic. Click here for more details.
- bbot/__init__.py +1 -1
- bbot/core/event/base.py +64 -4
- bbot/core/helpers/diff.py +10 -7
- bbot/core/helpers/helper.py +5 -1
- bbot/core/helpers/misc.py +48 -11
- bbot/core/helpers/regex.py +4 -0
- bbot/core/helpers/regexes.py +45 -8
- bbot/core/helpers/url.py +21 -5
- bbot/core/helpers/web/client.py +25 -5
- bbot/core/helpers/web/engine.py +9 -1
- bbot/core/helpers/web/envelopes.py +352 -0
- bbot/core/helpers/web/web.py +10 -2
- bbot/core/helpers/yara_helper.py +50 -0
- bbot/core/modules.py +23 -7
- bbot/defaults.yml +26 -1
- bbot/modules/base.py +4 -2
- bbot/modules/{deadly/dastardly.py → dastardly.py} +1 -1
- bbot/modules/{deadly/ffuf.py → ffuf.py} +1 -1
- bbot/modules/ffuf_shortnames.py +1 -1
- bbot/modules/httpx.py +14 -0
- bbot/modules/hunt.py +24 -6
- bbot/modules/internal/aggregate.py +1 -0
- bbot/modules/internal/excavate.py +356 -197
- bbot/modules/lightfuzz/lightfuzz.py +203 -0
- bbot/modules/lightfuzz/submodules/__init__.py +0 -0
- bbot/modules/lightfuzz/submodules/base.py +312 -0
- bbot/modules/lightfuzz/submodules/cmdi.py +106 -0
- bbot/modules/lightfuzz/submodules/crypto.py +474 -0
- bbot/modules/lightfuzz/submodules/nosqli.py +183 -0
- bbot/modules/lightfuzz/submodules/path.py +154 -0
- bbot/modules/lightfuzz/submodules/serial.py +179 -0
- bbot/modules/lightfuzz/submodules/sqli.py +187 -0
- bbot/modules/lightfuzz/submodules/ssti.py +39 -0
- bbot/modules/lightfuzz/submodules/xss.py +191 -0
- bbot/modules/{deadly/nuclei.py → nuclei.py} +1 -1
- bbot/modules/paramminer_headers.py +2 -0
- bbot/modules/reflected_parameters.py +80 -0
- bbot/modules/{deadly/vhost.py → vhost.py} +2 -2
- bbot/presets/web/lightfuzz-heavy.yml +16 -0
- bbot/presets/web/lightfuzz-light.yml +20 -0
- bbot/presets/web/lightfuzz-medium.yml +14 -0
- bbot/presets/web/lightfuzz-superheavy.yml +13 -0
- bbot/presets/web/lightfuzz-xss.yml +21 -0
- bbot/presets/web/paramminer.yml +8 -5
- bbot/scanner/preset/args.py +26 -0
- bbot/scanner/scanner.py +6 -0
- bbot/test/test_step_1/test__module__tests.py +1 -1
- bbot/test/test_step_1/test_helpers.py +7 -0
- bbot/test/test_step_1/test_presets.py +2 -2
- bbot/test/test_step_1/test_web.py +20 -0
- bbot/test/test_step_1/test_web_envelopes.py +343 -0
- bbot/test/test_step_2/module_tests/test_module_excavate.py +404 -29
- bbot/test/test_step_2/module_tests/test_module_httpx.py +29 -0
- bbot/test/test_step_2/module_tests/test_module_hunt.py +18 -1
- bbot/test/test_step_2/module_tests/test_module_lightfuzz.py +1947 -0
- bbot/test/test_step_2/module_tests/test_module_paramminer_getparams.py +4 -1
- bbot/test/test_step_2/module_tests/test_module_paramminer_headers.py +46 -2
- bbot/test/test_step_2/module_tests/test_module_reflected_parameters.py +226 -0
- bbot/wordlists/paramminer_parameters.txt +0 -8
- {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/METADATA +2 -1
- {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/RECORD +64 -42
- {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/LICENSE +0 -0
- {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/WHEEL +0 -0
- {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,474 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import hashlib
|
|
3
|
+
from .base import BaseLightfuzz
|
|
4
|
+
from bbot.errors import HttpCompareError
|
|
5
|
+
from urllib.parse import unquote, quote
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Global cache for compiled YARA rules
|
|
9
|
+
_compiled_rules_cache = None
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class crypto(BaseLightfuzz):
|
|
13
|
+
"""
|
|
14
|
+
Detects the use of cryptography in web parameters, and probes for some cryptographic vulnerabilities
|
|
15
|
+
|
|
16
|
+
* Cryptographic Error Detection:
|
|
17
|
+
- Detects known cryptographic error messages in server responses.
|
|
18
|
+
|
|
19
|
+
* Cryptographic Parameter Value Detection:
|
|
20
|
+
- Detects use of cryptography in web parameter values.
|
|
21
|
+
- Validates by attempting to manipulate the value regardless of its encoding.
|
|
22
|
+
|
|
23
|
+
* Length Extension Attack Detection:
|
|
24
|
+
- Identifies parameters which may be expecting hash digests for values, and any linked parameters which invalidate them.
|
|
25
|
+
|
|
26
|
+
* Padding Oracle Vulnerabilities:
|
|
27
|
+
- Identifies the presence of cryptographic oracles that could be exploited to arbitrary decrypt or encrypt data for the parameter value.
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
friendly_name = "Cryptography Probe"
|
|
33
|
+
|
|
34
|
+
# Although we have an envelope system to detect hex and base64 encoded parameter values, those are only assigned when they decode to a valid string.
|
|
35
|
+
# Since crypto values (and serialized objects) will not decode properly, we need a more concise check here to determine how to process them.
|
|
36
|
+
|
|
37
|
+
@staticmethod
|
|
38
|
+
def is_hex(s):
|
|
39
|
+
try:
|
|
40
|
+
bytes.fromhex(s)
|
|
41
|
+
return True
|
|
42
|
+
except ValueError:
|
|
43
|
+
return False
|
|
44
|
+
|
|
45
|
+
@staticmethod
|
|
46
|
+
def is_base64(s):
|
|
47
|
+
try:
|
|
48
|
+
if base64.b64encode(base64.b64decode(s)).decode() == s:
|
|
49
|
+
return True
|
|
50
|
+
except Exception:
|
|
51
|
+
return False
|
|
52
|
+
return False
|
|
53
|
+
|
|
54
|
+
# A list of YARA rules for detecting cryptographic error messages
|
|
55
|
+
crypto_error_strings = [
|
|
56
|
+
"invalid mac",
|
|
57
|
+
"padding is invalid",
|
|
58
|
+
"bad data",
|
|
59
|
+
"length of the data to decrypt is invalid",
|
|
60
|
+
"specify a valid key size",
|
|
61
|
+
"invalid algorithm specified",
|
|
62
|
+
"object already exists",
|
|
63
|
+
"key does not exist",
|
|
64
|
+
"the parameter is incorrect",
|
|
65
|
+
"cryptography exception",
|
|
66
|
+
"access denied",
|
|
67
|
+
"unknown error",
|
|
68
|
+
"invalid provider type",
|
|
69
|
+
"no valid cert found",
|
|
70
|
+
"cannot find the original signer",
|
|
71
|
+
"signature description could not be created",
|
|
72
|
+
"crypto operation failed",
|
|
73
|
+
"OpenSSL Error",
|
|
74
|
+
]
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def compiled_rules(self):
|
|
78
|
+
"""
|
|
79
|
+
We need to cache the compiled YARA rule globally since lightfuzz submodules are recreated for every handle_event
|
|
80
|
+
"""
|
|
81
|
+
global _compiled_rules_cache
|
|
82
|
+
if _compiled_rules_cache is None:
|
|
83
|
+
_compiled_rules_cache = self.lightfuzz.helpers.yara.compile_strings(self.crypto_error_strings, nocase=True)
|
|
84
|
+
return _compiled_rules_cache
|
|
85
|
+
|
|
86
|
+
@staticmethod
|
|
87
|
+
def format_agnostic_decode(input_string, urldecode=False):
|
|
88
|
+
"""
|
|
89
|
+
Decodes a string from either hex or base64 (without knowing which first), and optionally URL-decoding it first.
|
|
90
|
+
|
|
91
|
+
Parameters:
|
|
92
|
+
- input_string (str): The string to decode.
|
|
93
|
+
- urldecode (bool): If True, URL-decodes the input first.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
- tuple: (decoded data, encoding type: 'hex', 'base64', or 'unknown').
|
|
97
|
+
"""
|
|
98
|
+
encoding = "unknown"
|
|
99
|
+
if urldecode:
|
|
100
|
+
input_string = unquote(input_string)
|
|
101
|
+
if BaseLightfuzz.is_hex(input_string):
|
|
102
|
+
data = bytes.fromhex(input_string)
|
|
103
|
+
encoding = "hex"
|
|
104
|
+
elif BaseLightfuzz.is_base64(input_string):
|
|
105
|
+
data = base64.b64decode(input_string)
|
|
106
|
+
encoding = "base64"
|
|
107
|
+
else:
|
|
108
|
+
data = str
|
|
109
|
+
return data, encoding
|
|
110
|
+
|
|
111
|
+
@staticmethod
|
|
112
|
+
def format_agnostic_encode(data, encoding, urlencode=False):
|
|
113
|
+
"""
|
|
114
|
+
Encodes data into hex or base64, with optional URL-encoding.
|
|
115
|
+
|
|
116
|
+
Parameters:
|
|
117
|
+
- data (bytes): The data to encode.
|
|
118
|
+
- encoding (str): The encoding type ('hex' or 'base64').
|
|
119
|
+
- urlencode (bool): If True, URL-encodes the result.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
- str: The encoded data as a string.
|
|
123
|
+
|
|
124
|
+
Raises:
|
|
125
|
+
- ValueError: If an unsupported encoding type is specified.
|
|
126
|
+
"""
|
|
127
|
+
if encoding == "hex":
|
|
128
|
+
encoded_data = data.hex()
|
|
129
|
+
elif encoding == "base64":
|
|
130
|
+
encoded_data = base64.b64encode(data).decode("utf-8") # base64 encoding returns bytes, decode to string
|
|
131
|
+
else:
|
|
132
|
+
raise ValueError("Unsupported encoding type specified")
|
|
133
|
+
if urlencode:
|
|
134
|
+
return quote(encoded_data)
|
|
135
|
+
return encoded_data
|
|
136
|
+
|
|
137
|
+
@staticmethod
|
|
138
|
+
def modify_string(input_string, action="truncate", position=None, extension_length=1):
|
|
139
|
+
"""
|
|
140
|
+
Modifies a cryptographic string by either truncating it, mutating a byte at a specified position, or extending it with null bytes.
|
|
141
|
+
|
|
142
|
+
Parameters:
|
|
143
|
+
- input_string (str): The string to modify.
|
|
144
|
+
- action (str): The action to perform ('truncate', 'mutate', 'extend').
|
|
145
|
+
- position (int): The position to mutate (only used if action is 'mutate').
|
|
146
|
+
- extension_length (int): The number of null bytes to add if action is 'extend'.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
- str: The modified string.
|
|
150
|
+
"""
|
|
151
|
+
if not isinstance(input_string, str):
|
|
152
|
+
input_string = str(input_string)
|
|
153
|
+
|
|
154
|
+
data, encoding = crypto.format_agnostic_decode(input_string)
|
|
155
|
+
if encoding != "base64" and encoding != "hex":
|
|
156
|
+
raise ValueError("Input must be either hex or base64 encoded")
|
|
157
|
+
|
|
158
|
+
if action == "truncate":
|
|
159
|
+
modified_data = data[:-1] # Remove the last byte
|
|
160
|
+
elif action == "mutate":
|
|
161
|
+
if not position:
|
|
162
|
+
position = len(data) // 2
|
|
163
|
+
if position < 0 or position >= len(data):
|
|
164
|
+
raise ValueError("Position out of range")
|
|
165
|
+
byte_list = list(data)
|
|
166
|
+
byte_list[position] = (byte_list[position] + 1) % 256
|
|
167
|
+
modified_data = bytes(byte_list)
|
|
168
|
+
elif action == "extend":
|
|
169
|
+
modified_data = data + (b"\x00" * extension_length)
|
|
170
|
+
elif action == "flip":
|
|
171
|
+
if not position:
|
|
172
|
+
position = len(data) // 2
|
|
173
|
+
if position < 0 or position >= len(data):
|
|
174
|
+
raise ValueError("Position out of range")
|
|
175
|
+
byte_list = list(data)
|
|
176
|
+
byte_list[position] ^= 0xFF # Flip all bits in the byte at the specified position
|
|
177
|
+
modified_data = bytes(byte_list)
|
|
178
|
+
else:
|
|
179
|
+
raise ValueError("Unsupported action")
|
|
180
|
+
return crypto.format_agnostic_encode(modified_data, encoding)
|
|
181
|
+
|
|
182
|
+
# Check if the entropy of the data is greater than the threshold, indicating it is likely encrypted
|
|
183
|
+
def is_likely_encrypted(self, data, threshold=4.5):
|
|
184
|
+
entropy = self.lightfuzz.helpers.calculate_entropy(data)
|
|
185
|
+
return entropy >= threshold
|
|
186
|
+
|
|
187
|
+
# Perform basic cryptanalysis on the input string, attempting to determine if it is likely encrypted and if it is a block cipher
|
|
188
|
+
def cryptanalysis(self, input_string):
|
|
189
|
+
likely_crypto = False
|
|
190
|
+
possible_block_cipher = False
|
|
191
|
+
data, encoding = self.format_agnostic_decode(input_string)
|
|
192
|
+
likely_crypto = self.is_likely_encrypted(data)
|
|
193
|
+
data_length = len(data)
|
|
194
|
+
if data_length % 8 == 0:
|
|
195
|
+
possible_block_cipher = True
|
|
196
|
+
return likely_crypto, possible_block_cipher
|
|
197
|
+
|
|
198
|
+
# Determine possible block sizes for a given ciphertext length
|
|
199
|
+
@staticmethod
|
|
200
|
+
def possible_block_sizes(ciphertext_length):
|
|
201
|
+
potential_block_sizes = [8, 16]
|
|
202
|
+
possible_sizes = []
|
|
203
|
+
for block_size in potential_block_sizes:
|
|
204
|
+
num_blocks = ciphertext_length // block_size
|
|
205
|
+
if ciphertext_length % block_size == 0 and num_blocks >= 2:
|
|
206
|
+
possible_sizes.append(block_size)
|
|
207
|
+
return possible_sizes
|
|
208
|
+
|
|
209
|
+
async def padding_oracle_execute(self, original_data, encoding, block_size, cookies, possible_first_byte=True):
|
|
210
|
+
"""
|
|
211
|
+
Execute the padding oracle attack for a given block size.
|
|
212
|
+
The goal here is not actual exploitation (arbitrary encryption or decryption), but rather to definitively confirm whether padding oracle vulnerability exists and is exploitable.
|
|
213
|
+
|
|
214
|
+
Parameters:
|
|
215
|
+
- original_data (bytes): The original ciphertext data.
|
|
216
|
+
- encoding (str): The encoding type ('hex' or 'base64').
|
|
217
|
+
- block_size (int): The block size to use for the padding oracle attack.
|
|
218
|
+
- cookies (dict): Cookies to include, if any
|
|
219
|
+
- possible_first_byte (bool): If True, use the first byte as the baseline byte.
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
- bool: True if the padding oracle attack is successful.
|
|
223
|
+
"""
|
|
224
|
+
ivblock = b"\x00" * block_size # initialize the IV block with null bytes
|
|
225
|
+
paddingblock = b"\x00" * block_size # initialize the padding block with null bytes
|
|
226
|
+
datablock = original_data[-block_size:] # extract the last block of the original data
|
|
227
|
+
|
|
228
|
+
# This handling the 1/255 chance that the first byte is correct padding which would cause a false negative.
|
|
229
|
+
if possible_first_byte:
|
|
230
|
+
baseline_byte = b"\xff" # set the baseline byte to 0xff
|
|
231
|
+
starting_pos = 0 # set the starting position to 0
|
|
232
|
+
else:
|
|
233
|
+
baseline_byte = b"\x00" # set the baseline byte to 0x00
|
|
234
|
+
starting_pos = 1 # set the starting position to 1
|
|
235
|
+
# first obtain
|
|
236
|
+
baseline = self.compare_baseline(
|
|
237
|
+
self.event.data["type"],
|
|
238
|
+
self.format_agnostic_encode(ivblock + paddingblock[:-1] + baseline_byte + datablock, encoding),
|
|
239
|
+
cookies,
|
|
240
|
+
)
|
|
241
|
+
differ_count = 0
|
|
242
|
+
# for each possible byte value, send a probe and check if the response is different
|
|
243
|
+
for i in range(starting_pos, starting_pos + 254):
|
|
244
|
+
byte = bytes([i])
|
|
245
|
+
oracle_probe = await self.compare_probe(
|
|
246
|
+
baseline,
|
|
247
|
+
self.event.data["type"],
|
|
248
|
+
self.format_agnostic_encode(ivblock + paddingblock[:-1] + byte + datablock, encoding),
|
|
249
|
+
cookies,
|
|
250
|
+
)
|
|
251
|
+
# oracle_probe[0] will be false if the response is different - oracle_probe[1] stores what aspect of the response is different (headers, body, code)
|
|
252
|
+
if oracle_probe[0] is False and "body" in oracle_probe[1]:
|
|
253
|
+
differ_count += 1
|
|
254
|
+
|
|
255
|
+
if i == 2:
|
|
256
|
+
if possible_first_byte is True:
|
|
257
|
+
# Thats two results which appear "different". Since this is the first run, it's entirely possible \x00 was the correct padding.
|
|
258
|
+
# We will break from this loop and redo it with the last byte as the baseline instead of the first
|
|
259
|
+
return None
|
|
260
|
+
else:
|
|
261
|
+
# Now that we have tried the run twice, we know it can't be because the first byte was the correct padding, and we know it is not vulnerable
|
|
262
|
+
return False
|
|
263
|
+
# A padding oracle vulnerability will produce exactly one different response, and no more, so this is likely a real padding oracle
|
|
264
|
+
if differ_count == 1:
|
|
265
|
+
return True
|
|
266
|
+
return False
|
|
267
|
+
|
|
268
|
+
async def padding_oracle(self, probe_value, cookies):
|
|
269
|
+
data, encoding = self.format_agnostic_decode(probe_value)
|
|
270
|
+
possible_block_sizes = self.possible_block_sizes(
|
|
271
|
+
len(data)
|
|
272
|
+
) # determine possible block sizes for the ciphertext
|
|
273
|
+
|
|
274
|
+
for block_size in possible_block_sizes:
|
|
275
|
+
padding_oracle_result = await self.padding_oracle_execute(data, encoding, block_size, cookies)
|
|
276
|
+
# if we get a negative result first, theres a 1/255 change it's a false negative. To rule that out, we must retry again with possible_first_byte set to false
|
|
277
|
+
if padding_oracle_result is None:
|
|
278
|
+
self.debug("still could be in a possible_first_byte situation - retrying with different first byte")
|
|
279
|
+
padding_oracle_result = await self.padding_oracle_execute(
|
|
280
|
+
data, encoding, block_size, cookies, possible_first_byte=False
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
if padding_oracle_result is True:
|
|
284
|
+
context = f"Lightfuzz Cryptographic Probe Submodule detected a probable padding oracle vulnerability after manipulating parameter: [{self.event.data['name']}]"
|
|
285
|
+
self.results.append(
|
|
286
|
+
{
|
|
287
|
+
"type": "VULNERABILITY",
|
|
288
|
+
"severity": "HIGH",
|
|
289
|
+
"description": f"Padding Oracle Vulnerability. Block size: [{str(block_size)}] {self.metadata()}",
|
|
290
|
+
"context": context,
|
|
291
|
+
}
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
async def error_string_search(self, text_dict, baseline_text):
|
|
295
|
+
"""
|
|
296
|
+
Search for cryptographic error strings using YARA rules in the provided text dictionary and baseline text.
|
|
297
|
+
"""
|
|
298
|
+
matching_techniques = set()
|
|
299
|
+
matching_strings = set()
|
|
300
|
+
|
|
301
|
+
# Check each manipulation technique
|
|
302
|
+
for label, text in text_dict.items():
|
|
303
|
+
matches = await self.lightfuzz.helpers.yara.match(self.compiled_rules, text)
|
|
304
|
+
if matches:
|
|
305
|
+
matching_techniques.add(label)
|
|
306
|
+
for matched_string in matches:
|
|
307
|
+
matching_strings.add(matched_string)
|
|
308
|
+
|
|
309
|
+
# Check for false positives by scanning baseline text
|
|
310
|
+
context = f"Lightfuzz Cryptographic Probe Submodule detected a cryptographic error after manipulating parameter: [{self.event.data['name']}]"
|
|
311
|
+
if matching_strings:
|
|
312
|
+
baseline_matches = await self.lightfuzz.helpers.yara.match(self.compiled_rules, baseline_text)
|
|
313
|
+
baseline_strings = set()
|
|
314
|
+
for matched_string in baseline_matches:
|
|
315
|
+
baseline_strings.add(matched_string)
|
|
316
|
+
|
|
317
|
+
# Only report strings that weren't in the baseline
|
|
318
|
+
unique_matches = matching_strings - baseline_strings
|
|
319
|
+
if unique_matches:
|
|
320
|
+
self.results.append(
|
|
321
|
+
{
|
|
322
|
+
"type": "FINDING",
|
|
323
|
+
"description": f"Possible Cryptographic Error. {self.metadata()} Strings: [{','.join(unique_matches)}] Detection Technique(s): [{','.join(matching_techniques)}]",
|
|
324
|
+
"context": context,
|
|
325
|
+
}
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
else:
|
|
329
|
+
self.debug(
|
|
330
|
+
f"Aborting cryptographic error reporting - baseline_text already contained detected string(s) ({','.join(baseline_strings)})"
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
# Identify the hash function based on the length of the hash
|
|
334
|
+
@staticmethod
|
|
335
|
+
def identify_hash_function(hash_bytes):
|
|
336
|
+
hash_length = len(hash_bytes)
|
|
337
|
+
hash_functions = {
|
|
338
|
+
16: hashlib.md5,
|
|
339
|
+
20: hashlib.sha1,
|
|
340
|
+
32: hashlib.sha256,
|
|
341
|
+
48: hashlib.sha384,
|
|
342
|
+
64: hashlib.sha512,
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
if hash_length in hash_functions:
|
|
346
|
+
return hash_functions[hash_length]
|
|
347
|
+
|
|
348
|
+
async def fuzz(self):
|
|
349
|
+
cookies = self.event.data.get("assigned_cookies", {})
|
|
350
|
+
probe_value = self.incoming_probe_value(populate_empty=False)
|
|
351
|
+
|
|
352
|
+
if not probe_value:
|
|
353
|
+
self.debug(
|
|
354
|
+
f"The Cryptography Probe Submodule requires original value, aborting [{self.event.data['type']}] [{self.event.data['name']}]"
|
|
355
|
+
)
|
|
356
|
+
return
|
|
357
|
+
|
|
358
|
+
# obtain the baseline probe to compare against
|
|
359
|
+
baseline_probe = await self.baseline_probe(cookies)
|
|
360
|
+
if not baseline_probe:
|
|
361
|
+
self.verbose(f"Couldn't get baseline_probe for url {self.event.data['url']}, aborting")
|
|
362
|
+
return
|
|
363
|
+
|
|
364
|
+
# perform the manipulation techniques
|
|
365
|
+
try:
|
|
366
|
+
truncate_probe_value = self.modify_string(probe_value, action="truncate")
|
|
367
|
+
mutate_probe_value = self.modify_string(probe_value, action="mutate")
|
|
368
|
+
except ValueError as e:
|
|
369
|
+
self.debug(f"Encountered error modifying value for parameter [{self.event.data['name']}]: {e} , aborting")
|
|
370
|
+
return
|
|
371
|
+
|
|
372
|
+
# Basic crypanalysis
|
|
373
|
+
likely_crypto, possible_block_cipher = self.cryptanalysis(probe_value)
|
|
374
|
+
|
|
375
|
+
# if the value is not likely to be cryptographic, we can skip the rest of the tests
|
|
376
|
+
if not likely_crypto:
|
|
377
|
+
self.debug("Parameter value does not appear to be cryptographic, aborting tests")
|
|
378
|
+
return
|
|
379
|
+
|
|
380
|
+
# Cryptographic Response Divergence Test
|
|
381
|
+
|
|
382
|
+
http_compare = self.compare_baseline(self.event.data["type"], probe_value, cookies)
|
|
383
|
+
try:
|
|
384
|
+
arbitrary_probe = await self.compare_probe(http_compare, self.event.data["type"], "AAAAAAA", cookies) #
|
|
385
|
+
truncate_probe = await self.compare_probe(
|
|
386
|
+
http_compare, self.event.data["type"], truncate_probe_value, cookies
|
|
387
|
+
) # manipulate the value by truncating a byte
|
|
388
|
+
mutate_probe = await self.compare_probe(
|
|
389
|
+
http_compare, self.event.data["type"], mutate_probe_value, cookies
|
|
390
|
+
) # manipulate the value by mutating a byte in place
|
|
391
|
+
except HttpCompareError as e:
|
|
392
|
+
self.verbose(f"Encountered HttpCompareError Sending Compare Probe: {e}")
|
|
393
|
+
return
|
|
394
|
+
|
|
395
|
+
confirmed_techniques = []
|
|
396
|
+
# mutate_probe[0] will be false if the response is different - mutate_probe[1] stores what aspect of the response is different (headers, body, code)
|
|
397
|
+
# ensure the difference is in the body and not the headers or code
|
|
398
|
+
# if the body is different and not empty, we have confirmed that single-byte mutation affected the response body
|
|
399
|
+
if mutate_probe[0] is False and "body" in mutate_probe[1]:
|
|
400
|
+
if (http_compare.compare_body(mutate_probe[3].text, arbitrary_probe[3].text) is False) or mutate_probe[
|
|
401
|
+
3
|
|
402
|
+
].text == "":
|
|
403
|
+
confirmed_techniques.append("Single-byte Mutation")
|
|
404
|
+
|
|
405
|
+
# if the body is different and not empty, we have confirmed that byte truncation affected the response body
|
|
406
|
+
if truncate_probe[0] is False and "body" in truncate_probe[1]:
|
|
407
|
+
if (http_compare.compare_body(truncate_probe[3].text, arbitrary_probe[3].text) is False) or truncate_probe[
|
|
408
|
+
3
|
|
409
|
+
].text == "":
|
|
410
|
+
confirmed_techniques.append("Data Truncation")
|
|
411
|
+
|
|
412
|
+
if confirmed_techniques:
|
|
413
|
+
context = f"Lightfuzz Cryptographic Probe Submodule detected a parameter ({self.event.data['name']}) to appears to drive a cryptographic operation"
|
|
414
|
+
self.results.append(
|
|
415
|
+
{
|
|
416
|
+
"type": "FINDING",
|
|
417
|
+
"description": f"Probable Cryptographic Parameter. {self.metadata()} Detection Technique(s): [{', '.join(confirmed_techniques)}]",
|
|
418
|
+
"context": context,
|
|
419
|
+
}
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
# Cryptographic Error String Test
|
|
423
|
+
# Check if cryptographic error strings are present in the response after performing the manipulation techniques
|
|
424
|
+
await self.error_string_search(
|
|
425
|
+
{"truncate value": truncate_probe[3].text, "mutate value": mutate_probe[3].text}, baseline_probe.text
|
|
426
|
+
)
|
|
427
|
+
# if we have any confirmed techniques, or the word "padding" is in the response, we need to check for a padding oracle
|
|
428
|
+
if confirmed_techniques or (
|
|
429
|
+
"padding" in truncate_probe[3].text.lower() or "padding" in mutate_probe[3].text.lower()
|
|
430
|
+
):
|
|
431
|
+
# Padding Oracle Test
|
|
432
|
+
if possible_block_cipher:
|
|
433
|
+
self.debug(
|
|
434
|
+
"Attempting padding oracle exploit since it looks like a block cipher and we have confirmed crypto"
|
|
435
|
+
)
|
|
436
|
+
await self.padding_oracle(probe_value, cookies)
|
|
437
|
+
|
|
438
|
+
# Hash identification / Potential Length extension attack
|
|
439
|
+
data, encoding = crypto.format_agnostic_decode(probe_value)
|
|
440
|
+
# see if its possible that a given value is a hash, and if so, which one
|
|
441
|
+
hash_function = self.identify_hash_function(data)
|
|
442
|
+
if hash_function:
|
|
443
|
+
hash_instance = hash_function()
|
|
444
|
+
# if there are any hash functions which match the length, we check the additional parameters to see if they cause identical changes
|
|
445
|
+
# this would indicate they are being used to generate the hash
|
|
446
|
+
if (
|
|
447
|
+
hash_function
|
|
448
|
+
and "additional_params" in self.event.data.keys()
|
|
449
|
+
and self.event.data["additional_params"]
|
|
450
|
+
):
|
|
451
|
+
# for each additional parameter, we send a probe and check if it causes the same change in the response as the original probe
|
|
452
|
+
for additional_param_name, additional_param_value in self.event.data["additional_params"].items():
|
|
453
|
+
try:
|
|
454
|
+
additional_param_probe = await self.compare_probe(
|
|
455
|
+
http_compare,
|
|
456
|
+
self.event.data["type"],
|
|
457
|
+
probe_value,
|
|
458
|
+
cookies,
|
|
459
|
+
additional_params_override={additional_param_name: additional_param_value + "A"},
|
|
460
|
+
)
|
|
461
|
+
except HttpCompareError as e:
|
|
462
|
+
self.verbose(f"Encountered HttpCompareError Sending Compare Probe: {e}")
|
|
463
|
+
continue
|
|
464
|
+
# the additional parameter affects the potential hash parameter (suggesting its calculated in the hash)
|
|
465
|
+
# This is a potential length extension attack
|
|
466
|
+
if additional_param_probe[0] is False and (additional_param_probe[1] == mutate_probe[1]):
|
|
467
|
+
context = f"Lightfuzz Cryptographic Probe Submodule detected a parameter ({self.event.data['name']}) that is a likely a hash, which is connected to another parameter {additional_param_name})"
|
|
468
|
+
self.results.append(
|
|
469
|
+
{
|
|
470
|
+
"type": "FINDING",
|
|
471
|
+
"description": f"Possible {self.event.data['type']} parameter with {hash_instance.name.upper()} Hash as value. {self.metadata()}, linked to additional parameter [{additional_param_name}]",
|
|
472
|
+
"context": context,
|
|
473
|
+
}
|
|
474
|
+
)
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
from .base import BaseLightfuzz
|
|
2
|
+
from bbot.errors import HttpCompareError
|
|
3
|
+
import urllib.parse
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class nosqli(BaseLightfuzz):
|
|
7
|
+
"""
|
|
8
|
+
Detects NoSQL injection vulnerabilities.
|
|
9
|
+
|
|
10
|
+
Techniques:
|
|
11
|
+
|
|
12
|
+
* Quote Injection Analysis:
|
|
13
|
+
- Injects single quotes and escaped single quotes into parameters
|
|
14
|
+
- Compares response differences between the two to detect NoSQL parsing
|
|
15
|
+
- Uses baseline comparison to validate findings and reduce false positives
|
|
16
|
+
|
|
17
|
+
* Operator Injection:
|
|
18
|
+
- Tests MongoDB-style operator injection using [$eq] and [$ne]
|
|
19
|
+
- Modifies parameter names to include operators
|
|
20
|
+
- Detects behavioral changes in application responses
|
|
21
|
+
|
|
22
|
+
Validation of findings is achieved using confirmation probes to rule out unstable endpoints
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
friendly_name = "NoSQL Injection"
|
|
26
|
+
|
|
27
|
+
async def fuzz(self):
|
|
28
|
+
cookies = self.event.data.get("assigned_cookies", {})
|
|
29
|
+
probe_value = self.incoming_probe_value(populate_empty=True)
|
|
30
|
+
quote_probe_baseline = None
|
|
31
|
+
try:
|
|
32
|
+
quote_probe_baseline = self.compare_baseline(
|
|
33
|
+
self.event.data["type"], probe_value, cookies, additional_params_populate_empty=True
|
|
34
|
+
)
|
|
35
|
+
except HttpCompareError as e:
|
|
36
|
+
self.verbose(f"Encountered HttpCompareError Sending Compare Baseline: {e}")
|
|
37
|
+
|
|
38
|
+
if quote_probe_baseline:
|
|
39
|
+
try:
|
|
40
|
+
# send the with a single quote, and then another with an escaped single quote
|
|
41
|
+
(
|
|
42
|
+
single_quote_comparison,
|
|
43
|
+
single_quote_diff_reasons,
|
|
44
|
+
single_quote_reflection,
|
|
45
|
+
single_quote_response,
|
|
46
|
+
) = await self.compare_probe(
|
|
47
|
+
quote_probe_baseline,
|
|
48
|
+
self.event.data["type"],
|
|
49
|
+
f"{probe_value}'",
|
|
50
|
+
cookies,
|
|
51
|
+
additional_params_populate_empty=True,
|
|
52
|
+
)
|
|
53
|
+
(
|
|
54
|
+
escaped_single_quote_comparison,
|
|
55
|
+
escaped_single_quote_diff_reasons,
|
|
56
|
+
escaped_single_quote_reflection,
|
|
57
|
+
escaped_single_quote_response,
|
|
58
|
+
) = await self.compare_probe(
|
|
59
|
+
quote_probe_baseline,
|
|
60
|
+
self.event.data["type"],
|
|
61
|
+
rf"{probe_value}\'",
|
|
62
|
+
cookies,
|
|
63
|
+
additional_params_populate_empty=True,
|
|
64
|
+
)
|
|
65
|
+
if not single_quote_comparison and single_quote_response and escaped_single_quote_response:
|
|
66
|
+
# if the single quote probe changed the code or body, and the escaped single quote probe did not cause the same change, injection is possible
|
|
67
|
+
if ("code" in single_quote_diff_reasons or "body" in single_quote_diff_reasons) and (
|
|
68
|
+
single_quote_diff_reasons != escaped_single_quote_diff_reasons
|
|
69
|
+
):
|
|
70
|
+
self.verbose(
|
|
71
|
+
"Initial heuristic indicates possible NoSQL Injection, sending confirmation probes"
|
|
72
|
+
)
|
|
73
|
+
confirm_baseline = self.compare_baseline(
|
|
74
|
+
self.event.data["type"],
|
|
75
|
+
urllib.parse.quote(f"{probe_value}' && 0 && 'x", safe=""),
|
|
76
|
+
cookies,
|
|
77
|
+
additional_params_populate_empty=True,
|
|
78
|
+
skip_urlencoding=True,
|
|
79
|
+
)
|
|
80
|
+
(
|
|
81
|
+
confirmation_probe_false_comparison,
|
|
82
|
+
confirmation_probe_false_diff_reasons,
|
|
83
|
+
confirmation_probe_false_reflection,
|
|
84
|
+
confirmation_probe_false_response,
|
|
85
|
+
) = await self.compare_probe(
|
|
86
|
+
confirm_baseline,
|
|
87
|
+
self.event.data["type"],
|
|
88
|
+
urllib.parse.quote(f"{probe_value}' && 1 && 'x", safe=""),
|
|
89
|
+
cookies,
|
|
90
|
+
additional_params_populate_empty=True,
|
|
91
|
+
skip_urlencoding=True,
|
|
92
|
+
)
|
|
93
|
+
if confirmation_probe_false_response:
|
|
94
|
+
if not confirmation_probe_false_comparison and confirmation_probe_false_diff_reasons != [
|
|
95
|
+
"header"
|
|
96
|
+
]:
|
|
97
|
+
(
|
|
98
|
+
final_confirm_comparison,
|
|
99
|
+
final_confirm_diff_reasons,
|
|
100
|
+
final_confirm_reflection,
|
|
101
|
+
final_confirm_response,
|
|
102
|
+
) = await self.compare_probe(
|
|
103
|
+
confirm_baseline,
|
|
104
|
+
self.event.data["type"],
|
|
105
|
+
urllib.parse.quote(f"{probe_value}' && 0 && 'x", safe=""),
|
|
106
|
+
cookies,
|
|
107
|
+
additional_params_populate_empty=True,
|
|
108
|
+
skip_urlencoding=True,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
if final_confirm_response and final_confirm_comparison:
|
|
112
|
+
self.results.append(
|
|
113
|
+
{
|
|
114
|
+
"type": "FINDING",
|
|
115
|
+
"description": f"Possible NoSQL Injection. {self.metadata()} Detection Method: [Quote/Escaped Quote + Conditional Affect] Differences: [{'.'.join(confirmation_probe_false_diff_reasons)}]",
|
|
116
|
+
}
|
|
117
|
+
)
|
|
118
|
+
else:
|
|
119
|
+
self.verbose(
|
|
120
|
+
"Aborted reporting Possible NoSQL Injection, due to unstable/inconsistent responses"
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
except HttpCompareError as e:
|
|
124
|
+
self.verbose(f"Encountered HttpCompareError Sending Compare Probe: {e}")
|
|
125
|
+
|
|
126
|
+
# Comparison operator injection
|
|
127
|
+
if self.event.data["type"] in ["POSTPARAM", "GETPARAM"]:
|
|
128
|
+
nosqli_negation_baseline = None
|
|
129
|
+
|
|
130
|
+
try:
|
|
131
|
+
nosqli_negation_baseline = self.compare_baseline(
|
|
132
|
+
self.event.data["type"],
|
|
133
|
+
f"{probe_value}'",
|
|
134
|
+
cookies,
|
|
135
|
+
additional_params_populate_empty=True,
|
|
136
|
+
parameter_name_suffix="[$eq]",
|
|
137
|
+
parameter_name_suffix_additional_params="[$eq]",
|
|
138
|
+
)
|
|
139
|
+
except HttpCompareError as e:
|
|
140
|
+
self.verbose(f"Encountered HttpCompareError Sending Compare Baseline: {e}")
|
|
141
|
+
|
|
142
|
+
if nosqli_negation_baseline:
|
|
143
|
+
try:
|
|
144
|
+
(
|
|
145
|
+
nosqli_negate_comparison,
|
|
146
|
+
nosqli_negate_diff_reasons,
|
|
147
|
+
nosqli_negate_reflection,
|
|
148
|
+
nosqli_negate_response,
|
|
149
|
+
) = await self.compare_probe(
|
|
150
|
+
nosqli_negation_baseline,
|
|
151
|
+
self.event.data["type"],
|
|
152
|
+
f"{probe_value}'",
|
|
153
|
+
cookies,
|
|
154
|
+
additional_params_populate_empty=True,
|
|
155
|
+
parameter_name_suffix="[$ne]",
|
|
156
|
+
parameter_name_suffix_additional_params="[$ne]",
|
|
157
|
+
)
|
|
158
|
+
if nosqli_negate_response:
|
|
159
|
+
if not nosqli_negate_comparison and nosqli_negate_diff_reasons != ["header"]:
|
|
160
|
+
# If we are about to report a finding, rule out a false positive from unstable URL by sending another probe with the baseline values, and ensure those dont also come back as different
|
|
161
|
+
(
|
|
162
|
+
nosqli_negate_comfirm_comparison,
|
|
163
|
+
nosqli_negate_confirm_diff_reasons,
|
|
164
|
+
nosqli_negate_confirm_reflection,
|
|
165
|
+
nosqli_negate_confirm_response,
|
|
166
|
+
) = await self.compare_probe(
|
|
167
|
+
nosqli_negation_baseline,
|
|
168
|
+
self.event.data["type"],
|
|
169
|
+
f"{probe_value}'",
|
|
170
|
+
cookies,
|
|
171
|
+
additional_params_populate_empty=True,
|
|
172
|
+
parameter_name_suffix="[$eq]",
|
|
173
|
+
parameter_name_suffix_additional_params="[$eq]",
|
|
174
|
+
)
|
|
175
|
+
if nosqli_negate_comfirm_comparison:
|
|
176
|
+
self.results.append(
|
|
177
|
+
{
|
|
178
|
+
"type": "FINDING",
|
|
179
|
+
"description": f"Possible NoSQL Injection. {self.metadata()} Detection Method: [Parameter Name Operator Injection - Negation ([$ne])] Differences: [{'.'.join(nosqli_negate_diff_reasons)}]",
|
|
180
|
+
}
|
|
181
|
+
)
|
|
182
|
+
except HttpCompareError as e:
|
|
183
|
+
self.verbose(f"Encountered HttpCompareError Sending Compare Probe: {e}")
|