exaai-agent 2.0.5__py3-none-any.whl → 2.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {exaai_agent-2.0.5.dist-info → exaai_agent-2.0.6.dist-info}/METADATA +1 -1
- {exaai_agent-2.0.5.dist-info → exaai_agent-2.0.6.dist-info}/RECORD +18 -10
- exaaiagnt/interface/tui.py +16 -2
- exaaiagnt/llm/__init__.py +13 -0
- exaaiagnt/llm/llm.py +14 -3
- exaaiagnt/llm/llm_traffic_controller.py +351 -0
- exaaiagnt/prompts/auto_loader.py +104 -0
- exaaiagnt/prompts/cloud/aws_cloud_security.jinja +235 -0
- exaaiagnt/prompts/frameworks/modern_js_frameworks.jinja +194 -0
- exaaiagnt/prompts/vulnerabilities/react2shell.jinja +187 -0
- exaaiagnt/tools/__init__.py +58 -0
- exaaiagnt/tools/response_analyzer.py +294 -0
- exaaiagnt/tools/smart_fuzzer.py +286 -0
- exaaiagnt/tools/tool_prompts.py +210 -0
- exaaiagnt/tools/vuln_validator.py +412 -0
- {exaai_agent-2.0.5.dist-info → exaai_agent-2.0.6.dist-info}/WHEEL +0 -0
- {exaai_agent-2.0.5.dist-info → exaai_agent-2.0.6.dist-info}/entry_points.txt +0 -0
- {exaai_agent-2.0.5.dist-info → exaai_agent-2.0.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -28,23 +28,26 @@ exaaiagnt/interface/tool_components/terminal_renderer.py,sha256=-ORL2vBH5XImUZrI
|
|
|
28
28
|
exaaiagnt/interface/tool_components/thinking_renderer.py,sha256=-MQLkKCgOJksrustULFf2jhAjJrP5bbfS2BQ6zgNKCc,927
|
|
29
29
|
exaaiagnt/interface/tool_components/user_message_renderer.py,sha256=6gHJ1hG-pwcTsxLM7JuYZuaDu8cZ2MeOuUDF3LGy-4I,1432
|
|
30
30
|
exaaiagnt/interface/tool_components/web_search_renderer.py,sha256=JnJa22ACIcRksfxxdenesUo8Th9cHSxo-fej9YcuYHs,911
|
|
31
|
-
exaaiagnt/interface/tui.py,sha256=
|
|
31
|
+
exaaiagnt/interface/tui.py,sha256=JqwCjKvqnm4C0Hz9A8oGPbzNAvficd5WqnxQ4WCedvI,49777
|
|
32
32
|
exaaiagnt/interface/utils.py,sha256=xp6eDOC8C0c3cjt791S_jBDs1B-xp_ydIb74QnMLEt8,20219
|
|
33
|
-
exaaiagnt/llm/__init__.py,sha256=
|
|
33
|
+
exaaiagnt/llm/__init__.py,sha256=hUVixjSSIUtwIP2I5D_9e6Kdxhhunnajgxx_2DEYNww,1095
|
|
34
34
|
exaaiagnt/llm/config.py,sha256=HQ0skwQxtHwiDLDWBCU1Fp4UoQ8tbrTNQw9s7JGVaiY,3303
|
|
35
35
|
exaaiagnt/llm/fallback.py,sha256=oPS0PGRxEHnyyBgS4yP9zdwSf4JFJh4dYZ3g8OFwWEE,11413
|
|
36
|
-
exaaiagnt/llm/llm.py,sha256=
|
|
36
|
+
exaaiagnt/llm/llm.py,sha256=EEPte4O37YVhCrJleHS8uU5eaYw_WDXKIEwlK2USJgE,18606
|
|
37
|
+
exaaiagnt/llm/llm_traffic_controller.py,sha256=HZ0OZcbfr5XLVSpuW8EQLn3gQht3rjY1n6SGRFpBZa0,12187
|
|
37
38
|
exaaiagnt/llm/memory_compressor.py,sha256=_At7e5QlDv2vrUDUJMEwm4CjNJ2uGYQsOBhiHiQvVr8,7054
|
|
38
39
|
exaaiagnt/llm/output_processor.py,sha256=JC3TtzYj9DJhJRuKzz_VV3WIwAyYhUdZeY1N9c4SzVw,13568
|
|
39
40
|
exaaiagnt/llm/request_queue.py,sha256=niwJVzWyvNcOl9uVYABO55c6ZReMxY_GIUhKXh946dI,9195
|
|
40
41
|
exaaiagnt/llm/utils.py,sha256=0Z0r6qo9IfUSOJt5FJsq3X-veXrA8A09frc2VIy-aS4,2540
|
|
41
42
|
exaaiagnt/prompts/README.md,sha256=Svgjx8pO1W0aVzC9z0Fxs88-NBA1whEqU8zrGBDqO0M,3813
|
|
42
43
|
exaaiagnt/prompts/__init__.py,sha256=nUs1powNe_6sWmM5_KkAIxZj97t5IiMMgNrL39WwUjg,5042
|
|
43
|
-
exaaiagnt/prompts/auto_loader.py,sha256=
|
|
44
|
+
exaaiagnt/prompts/auto_loader.py,sha256=SNCewvdqfsIXydOcBNVcEE_x_w_j0Pl7ebOeOSNJ94E,12008
|
|
44
45
|
exaaiagnt/prompts/cloud/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
46
|
+
exaaiagnt/prompts/cloud/aws_cloud_security.jinja,sha256=ObimikftsoqGzxhrvI6pQLVRQ4MmkJpjwexORS7P7uo,6768
|
|
45
47
|
exaaiagnt/prompts/coordination/root_agent.jinja,sha256=05VUZKnBX_7-FjEYilEq_OJG4u01_56bf4mzGNbFtJc,1996
|
|
46
48
|
exaaiagnt/prompts/custom/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
49
|
exaaiagnt/prompts/frameworks/fastapi.jinja,sha256=zXa08RDG9tVQu6MZetKCcOUPuDDKJJF1anTei7_2TZg,10200
|
|
50
|
+
exaaiagnt/prompts/frameworks/modern_js_frameworks.jinja,sha256=UmqpJeXF87wsBpU-Nfg7KK9ftUtz4ug9LXw_2g3RXLk,5752
|
|
48
51
|
exaaiagnt/prompts/frameworks/nextjs.jinja,sha256=AhfKOUl2bQzRWqqCw5lrjrXaKIkZ3CEEjVJwPJF8nP8,8175
|
|
49
52
|
exaaiagnt/prompts/protocols/graphql.jinja,sha256=Tm538OmlFOJvuOwd4MXYQ4KYR7k4bJ4r-z4yoFcF6-8,10454
|
|
50
53
|
exaaiagnt/prompts/reconnaissance/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -74,6 +77,7 @@ exaaiagnt/prompts/vulnerabilities/privilege_escalation.jinja,sha256=XY0atQ4nEkGs
|
|
|
74
77
|
exaaiagnt/prompts/vulnerabilities/prototype_pollution.jinja,sha256=g-I6ouqaZpxqoTa4MyGHaUKxwlM-gFrPo_F1T8r_ASI,6486
|
|
75
78
|
exaaiagnt/prompts/vulnerabilities/race_conditions.jinja,sha256=ZHxdZc7v7yvhmUWJK3P3kIzOz3oaYRb2OJEzG1momcg,8372
|
|
76
79
|
exaaiagnt/prompts/vulnerabilities/rce.jinja,sha256=tgmocWH3Y9FJ3fBeR9DTR7ViBcuH5salGis8vywJ2h8,8425
|
|
80
|
+
exaaiagnt/prompts/vulnerabilities/react2shell.jinja,sha256=ZO4lxSpyXm6pVgBw4kUNqWVttSBa04LhqYy6NTj3RQo,6862
|
|
77
81
|
exaaiagnt/prompts/vulnerabilities/reconnaissance_osint.jinja,sha256=Bify6rb6cQGeP-q-PnD9GfOU2dunI0inATpf_IERaik,5552
|
|
78
82
|
exaaiagnt/prompts/vulnerabilities/sql_injection.jinja,sha256=tdFjkbu5p_di0Zui95o2Eh1v-Ulvwg1qcENDNLswv1Q,8426
|
|
79
83
|
exaaiagnt/prompts/vulnerabilities/ssrf.jinja,sha256=XMRAclYIyq0JhVSXopTsbZAm-wRxz-zZfDHdaFgVeiw,7899
|
|
@@ -90,7 +94,7 @@ exaaiagnt/runtime/tool_manager.py,sha256=CMUYlOC3Ez-9eqzz12tJO9tijdIBznFqCUwcT0h
|
|
|
90
94
|
exaaiagnt/runtime/tool_server.py,sha256=e23TJYL5w32gMqLS9UJ0xw3XZ4lM38ETBvHVHKk3APU,6835
|
|
91
95
|
exaaiagnt/telemetry/__init__.py,sha256=8QLHMvrVNLlGKezWTf3mTSmTOLIvDS5xVciry1KVS1Y,130
|
|
92
96
|
exaaiagnt/telemetry/tracer.py,sha256=DZuQ-xGdPWB2FKEn_rRPxP1RO2_acXQRkHzuhApAI88,12582
|
|
93
|
-
exaaiagnt/tools/__init__.py,sha256=
|
|
97
|
+
exaaiagnt/tools/__init__.py,sha256=tFPqL3VKFumF1LgYMhvaecwTQ9b7vHxk0hRArPp3z2U,3402
|
|
94
98
|
exaaiagnt/tools/agents_graph/__init__.py,sha256=FLJ2kGxXICY2pRKrC0sgIc3w3KhZo7VID7hbwYcgBfM,278
|
|
95
99
|
exaaiagnt/tools/agents_graph/agents_graph_actions.py,sha256=ujBj3R3MpOmIVsN4T3nekVhreu60xA2EEg9rYXCdz2c,21103
|
|
96
100
|
exaaiagnt/tools/agents_graph/agents_graph_actions_schema.xml,sha256=fczqDpNw2m58p2O_G0amBZso--EjEGZuK829lB-cXdE,12617
|
|
@@ -123,6 +127,8 @@ exaaiagnt/tools/registry.py,sha256=iBRwtiWLQr3fo1vSAOehWbIlr6cqnNPLB1dluXtsf8s,6
|
|
|
123
127
|
exaaiagnt/tools/reporting/__init__.py,sha256=_cYxb3OP0vZtCwO_ExLBjhAn1ECaG-SH1Z4wfGDyT1Y,110
|
|
124
128
|
exaaiagnt/tools/reporting/reporting_actions.py,sha256=aVEwfG5GgJ68bFJOicO_YD2yp5wCimxlnZzpXX3TJcQ,2200
|
|
125
129
|
exaaiagnt/tools/reporting/reporting_actions_schema.xml,sha256=y_g0iuyBuCh79fvA0ri8fOPlXY7uUd-P-mdzXLUyIJg,1629
|
|
130
|
+
exaaiagnt/tools/response_analyzer.py,sha256=ZBOGwGkH2VM96UH4ZrYhPYI8S_YZp0SG8EDThO7bAX0,10566
|
|
131
|
+
exaaiagnt/tools/smart_fuzzer.py,sha256=FQ3RQ8IYFRTEAsKzEtfephCpHbdmDRMm068AsEQ2B-k,12286
|
|
126
132
|
exaaiagnt/tools/terminal/__init__.py,sha256=xvflcrbLQ31o_K3cWFsIhTm7gxY5JF0nVnhOIadwFV0,80
|
|
127
133
|
exaaiagnt/tools/terminal/terminal_actions.py,sha256=5z3OTF0YwZL0n_CVsx1yLmbSx7oUQg-xD0Wv8IXsIlw,892
|
|
128
134
|
exaaiagnt/tools/terminal/terminal_actions_schema.xml,sha256=L7dzjvKNZpJA0qDGp1gCBuwXiY4mtjOq7T2tNDmUPA4,7257
|
|
@@ -131,12 +137,14 @@ exaaiagnt/tools/terminal/terminal_session.py,sha256=ExC2s0xyS6m157ENkWqAHzPp1Pv9
|
|
|
131
137
|
exaaiagnt/tools/thinking/__init__.py,sha256=-v4fG4fyFkqsTSWspDtCT6IRlyRM8zeUwEM-kscaxDE,58
|
|
132
138
|
exaaiagnt/tools/thinking/thinking_actions.py,sha256=Ynw1gBN4Z8iXGll0v9kObhkjQzc8-dEcHTWKeyFutAw,568
|
|
133
139
|
exaaiagnt/tools/thinking/thinking_actions_schema.xml,sha256=otD4dOhQx4uyudLnjA_HIP6EmUS5NvKG4l3CVFrg8go,2756
|
|
140
|
+
exaaiagnt/tools/tool_prompts.py,sha256=eQL7B8H8mo6d6mvtN_X9rmSfjwgz9Cuzfg7C7WH6TnU,5751
|
|
141
|
+
exaaiagnt/tools/vuln_validator.py,sha256=jgfPOVogjfMSVaorlzXTUA56N1RgYrpF1QshKeHQBn8,13944
|
|
134
142
|
exaaiagnt/tools/waf_bypass.py,sha256=71oPWnDHjn2EHi6I1SluZCKfqfXkA5j61oIkL5kNoSw,12047
|
|
135
143
|
exaaiagnt/tools/web_search/__init__.py,sha256=m5PCHXqeNVraLRLNIbh54Z2N4Y_75d-ftqwyq3dbCd0,70
|
|
136
144
|
exaaiagnt/tools/web_search/web_search_actions.py,sha256=jmlN2uIq8lRbhRnyaMQkC-44jhpfkLQZ_byYNlNNlOY,3111
|
|
137
145
|
exaaiagnt/tools/web_search/web_search_actions_schema.xml,sha256=Ihc3Gv4LaPI_MzBbwZOt3y4pwg9xmtl8KfPNvFihEP4,4805
|
|
138
|
-
exaai_agent-2.0.
|
|
139
|
-
exaai_agent-2.0.
|
|
140
|
-
exaai_agent-2.0.
|
|
141
|
-
exaai_agent-2.0.
|
|
142
|
-
exaai_agent-2.0.
|
|
146
|
+
exaai_agent-2.0.6.dist-info/METADATA,sha256=RaTv_P1HP1FnB2bH9xWJCwILIVymtMDe_5SUxCCAGU8,12285
|
|
147
|
+
exaai_agent-2.0.6.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
148
|
+
exaai_agent-2.0.6.dist-info/entry_points.txt,sha256=oEpWNyB806orvfN_GnSjsmoizJsfQoaU1RTYdGFJrWw,59
|
|
149
|
+
exaai_agent-2.0.6.dist-info/licenses/LICENSE,sha256=RV6IGl0sWdfbbtJmjPr1w_qwGyGt2jv02PXsAzN_kNs,11460
|
|
150
|
+
exaai_agent-2.0.6.dist-info/RECORD,,
|
exaaiagnt/interface/tui.py
CHANGED
|
@@ -80,7 +80,7 @@ class SplashScreen(Static): # type: ignore[misc]
|
|
|
80
80
|
NEON_ORANGE = "#ff8800"
|
|
81
81
|
SOFT_WHITE = "#e0e0e0"
|
|
82
82
|
|
|
83
|
-
#
|
|
83
|
+
# Enhanced ASCII Logo - ExaAi v2.0.4
|
|
84
84
|
BANNER = r"""
|
|
85
85
|
███████╗██╗ ██╗ █████╗ █████╗ ██╗
|
|
86
86
|
██╔════╝╚██╗██╔╝██╔══██╗ ██╔══██╗██║
|
|
@@ -97,7 +97,6 @@ class SplashScreen(Static): # type: ignore[misc]
|
|
|
97
97
|
▒███ ░░ █ ░▒██ ▀█▄ ▒██ ▀█▄ ▒██▒
|
|
98
98
|
▒▓█ ▄ ░ █ █ ▒ ░██▄▄▄▄██ ░██▄▄▄▄██ ░██░
|
|
99
99
|
░▒████▒▒██▒ ▒██▒ ▓█ ▓██▒ ▓█ ▓██▒░██░
|
|
100
|
-
░░ ▒░ ░▒▒ ░ ░▓ ░ ▒▒ ▓▒█░ ▒▒ ▓▒█░░▓
|
|
101
100
|
"""
|
|
102
101
|
|
|
103
102
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
@@ -145,6 +144,7 @@ class SplashScreen(Static): # type: ignore[misc]
|
|
|
145
144
|
Align.center(self._build_version_text()),
|
|
146
145
|
Align.center(self._build_tagline_text()),
|
|
147
146
|
Align.center(self._build_features_text()),
|
|
147
|
+
Align.center(self._build_new_features_text()),
|
|
148
148
|
Align.center(Text(" ")),
|
|
149
149
|
Align.center(start_line.copy()),
|
|
150
150
|
)
|
|
@@ -176,8 +176,22 @@ class SplashScreen(Static): # type: ignore[misc]
|
|
|
176
176
|
def _build_version_text(self) -> Text:
|
|
177
177
|
text = Text("v", style=Style(color=self.SOFT_WHITE, dim=True))
|
|
178
178
|
text.append(f"{self._version}", style=Style(color=self.NEON_GREEN, bold=True))
|
|
179
|
+
text.append(" ", style=Style(color=self.SOFT_WHITE))
|
|
180
|
+
text.append("• Adaptive LLM • Smart Fuzzing • WAF Bypass", style=Style(color=self.SOFT_WHITE, dim=True))
|
|
179
181
|
return text
|
|
180
182
|
|
|
183
|
+
def _build_new_features_text(self) -> Text:
|
|
184
|
+
"""Build new features highlight for v2.0.4."""
|
|
185
|
+
text = Text("🔥 ", style=Style(color=self.NEON_ORANGE))
|
|
186
|
+
text.append("NEW: ", style=Style(color=self.NEON_ORANGE, bold=True))
|
|
187
|
+
text.append("React2Shell", style=Style(color=self.NEON_PINK))
|
|
188
|
+
text.append(" • ", style=Style(color=self.SOFT_WHITE, dim=True))
|
|
189
|
+
text.append("Cloud Security", style=Style(color=self.NEON_CYAN))
|
|
190
|
+
text.append(" • ", style=Style(color=self.SOFT_WHITE, dim=True))
|
|
191
|
+
text.append("Auto-Discovery", style=Style(color=self.NEON_GREEN))
|
|
192
|
+
return text
|
|
193
|
+
|
|
194
|
+
|
|
181
195
|
def _build_tagline_text(self) -> Text:
|
|
182
196
|
text = Text("🔒 ", style=Style(color=self.NEON_ORANGE))
|
|
183
197
|
text.append("Advanced AI-Powered Cybersecurity Agent", style=Style(color=self.SOFT_WHITE, italic=True))
|
exaaiagnt/llm/__init__.py
CHANGED
|
@@ -14,6 +14,13 @@ from .output_processor import (
|
|
|
14
14
|
OutputConfig,
|
|
15
15
|
process_tool_output,
|
|
16
16
|
)
|
|
17
|
+
from .llm_traffic_controller import (
|
|
18
|
+
get_traffic_controller,
|
|
19
|
+
AdaptiveLLMController,
|
|
20
|
+
RequestPriority,
|
|
21
|
+
reset_traffic_controller,
|
|
22
|
+
with_traffic_control,
|
|
23
|
+
)
|
|
17
24
|
|
|
18
25
|
|
|
19
26
|
__all__ = [
|
|
@@ -29,6 +36,12 @@ __all__ = [
|
|
|
29
36
|
"OutputProcessor",
|
|
30
37
|
"OutputConfig",
|
|
31
38
|
"process_tool_output",
|
|
39
|
+
# Traffic Controller
|
|
40
|
+
"get_traffic_controller",
|
|
41
|
+
"AdaptiveLLMController",
|
|
42
|
+
"RequestPriority",
|
|
43
|
+
"reset_traffic_controller",
|
|
44
|
+
"with_traffic_control",
|
|
32
45
|
]
|
|
33
46
|
|
|
34
47
|
litellm._logging._disable_debugging()
|
exaaiagnt/llm/llm.py
CHANGED
|
@@ -17,7 +17,7 @@ from litellm.utils import supports_prompt_caching
|
|
|
17
17
|
|
|
18
18
|
from exaaiagnt.llm.config import LLMConfig
|
|
19
19
|
from exaaiagnt.llm.memory_compressor import MemoryCompressor
|
|
20
|
-
from exaaiagnt.llm.
|
|
20
|
+
from exaaiagnt.llm.llm_traffic_controller import get_traffic_controller, RequestPriority
|
|
21
21
|
from exaaiagnt.llm.utils import _truncate_to_first_function, parse_tool_invocations
|
|
22
22
|
from exaaiagnt.prompts import load_prompt_modules
|
|
23
23
|
from exaaiagnt.tools import get_tools_prompt
|
|
@@ -423,8 +423,19 @@ class LLM:
|
|
|
423
423
|
else:
|
|
424
424
|
completion_args["reasoning_effort"] = "high"
|
|
425
425
|
|
|
426
|
-
|
|
427
|
-
|
|
426
|
+
# Use Adaptive Traffic Controller for intelligent rate limiting
|
|
427
|
+
controller = get_traffic_controller()
|
|
428
|
+
agent_id = self.agent_id or "unknown_agent"
|
|
429
|
+
|
|
430
|
+
async def do_request():
|
|
431
|
+
from litellm import completion
|
|
432
|
+
return completion(**completion_args, stream=False)
|
|
433
|
+
|
|
434
|
+
response = await controller.queue_request(
|
|
435
|
+
do_request,
|
|
436
|
+
agent_id=agent_id,
|
|
437
|
+
priority=RequestPriority.NORMAL
|
|
438
|
+
)
|
|
428
439
|
|
|
429
440
|
self._total_stats.requests += 1
|
|
430
441
|
self._last_request_stats = RequestStats(requests=1)
|
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Adaptive LLM Traffic Controller - Intelligent rate limiting and queue management.
|
|
3
|
+
|
|
4
|
+
Features:
|
|
5
|
+
- Single LLM request at a time (serialized)
|
|
6
|
+
- Non-blocking queue for waiting agents
|
|
7
|
+
- Automatic rate limit detection and delay
|
|
8
|
+
- Tool-first execution mode
|
|
9
|
+
- Automatic recovery with smart retry
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import asyncio
|
|
13
|
+
import logging
|
|
14
|
+
import time
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from enum import Enum
|
|
17
|
+
from typing import Any, Callable, Optional
|
|
18
|
+
from collections import deque
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class RequestPriority(Enum):
|
|
25
|
+
"""Priority levels for LLM requests."""
|
|
26
|
+
CRITICAL = 3 # Must execute ASAP (auth, security critical)
|
|
27
|
+
NORMAL = 2 # Standard agent requests
|
|
28
|
+
LOW = 1 # Background tasks, summaries
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class QueuedRequest:
|
|
33
|
+
"""A queued LLM request with metadata."""
|
|
34
|
+
request_id: str
|
|
35
|
+
agent_id: str
|
|
36
|
+
priority: RequestPriority
|
|
37
|
+
request_func: Callable
|
|
38
|
+
args: tuple
|
|
39
|
+
kwargs: dict
|
|
40
|
+
created_at: float = field(default_factory=time.time)
|
|
41
|
+
future: asyncio.Future = field(default_factory=lambda: asyncio.get_event_loop().create_future())
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class AdaptiveLLMController:
|
|
45
|
+
"""
|
|
46
|
+
Adaptive Multi-Agent LLM Traffic Controller.
|
|
47
|
+
|
|
48
|
+
Implements:
|
|
49
|
+
1. Single concurrent LLM request (serialized calls)
|
|
50
|
+
2. Non-blocking queue for agents
|
|
51
|
+
3. Intelligent throttling with adaptive delays
|
|
52
|
+
4. Tool-first execution mode
|
|
53
|
+
5. Automatic recovery from rate limits
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
_instance: Optional["AdaptiveLLMController"] = None
|
|
57
|
+
|
|
58
|
+
def __new__(cls) -> "AdaptiveLLMController":
|
|
59
|
+
if cls._instance is None:
|
|
60
|
+
cls._instance = super().__new__(cls)
|
|
61
|
+
cls._instance._initialized = False
|
|
62
|
+
return cls._instance
|
|
63
|
+
|
|
64
|
+
def __init__(self):
|
|
65
|
+
if self._initialized:
|
|
66
|
+
return
|
|
67
|
+
|
|
68
|
+
# Core state
|
|
69
|
+
self._queue: deque[QueuedRequest] = deque()
|
|
70
|
+
self._is_processing = False
|
|
71
|
+
self._lock = asyncio.Lock()
|
|
72
|
+
|
|
73
|
+
# Rate limiting state
|
|
74
|
+
self._last_request_time = 0.0
|
|
75
|
+
self._consecutive_rate_limits = 0
|
|
76
|
+
self._base_delay = 2.0 # Base delay between requests
|
|
77
|
+
self._current_delay = 2.0
|
|
78
|
+
self._max_delay = 30.0
|
|
79
|
+
|
|
80
|
+
# Statistics
|
|
81
|
+
self._total_requests = 0
|
|
82
|
+
self._successful_requests = 0
|
|
83
|
+
self._rate_limit_hits = 0
|
|
84
|
+
self._retries = 0
|
|
85
|
+
|
|
86
|
+
# Tool execution mode
|
|
87
|
+
self._tool_executing = False
|
|
88
|
+
self._tool_execution_lock = asyncio.Lock()
|
|
89
|
+
|
|
90
|
+
# Configuration
|
|
91
|
+
self._max_retries = 3
|
|
92
|
+
self._rate_limit_wait = 6.0 # Wait time after rate limit
|
|
93
|
+
self._enable_verbose_logging = False
|
|
94
|
+
|
|
95
|
+
self._initialized = True
|
|
96
|
+
logger.info("AdaptiveLLMController initialized - Traffic Control Enabled")
|
|
97
|
+
|
|
98
|
+
async def queue_request(
|
|
99
|
+
self,
|
|
100
|
+
request_func: Callable,
|
|
101
|
+
*args,
|
|
102
|
+
agent_id: str = "unknown",
|
|
103
|
+
priority: RequestPriority = RequestPriority.NORMAL,
|
|
104
|
+
**kwargs
|
|
105
|
+
) -> Any:
|
|
106
|
+
"""
|
|
107
|
+
Queue an LLM request and wait for result.
|
|
108
|
+
|
|
109
|
+
Non-blocking for the caller - they just await the result.
|
|
110
|
+
Internally, requests are processed one at a time.
|
|
111
|
+
"""
|
|
112
|
+
request_id = f"{agent_id}_{int(time.time() * 1000)}"
|
|
113
|
+
|
|
114
|
+
# Create future for this request
|
|
115
|
+
loop = asyncio.get_event_loop()
|
|
116
|
+
future = loop.create_future()
|
|
117
|
+
|
|
118
|
+
queued = QueuedRequest(
|
|
119
|
+
request_id=request_id,
|
|
120
|
+
agent_id=agent_id,
|
|
121
|
+
priority=priority,
|
|
122
|
+
request_func=request_func,
|
|
123
|
+
args=args,
|
|
124
|
+
kwargs=kwargs,
|
|
125
|
+
future=future
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Add to queue
|
|
129
|
+
async with self._lock:
|
|
130
|
+
# Insert by priority
|
|
131
|
+
if priority == RequestPriority.CRITICAL:
|
|
132
|
+
self._queue.appendleft(queued)
|
|
133
|
+
else:
|
|
134
|
+
self._queue.append(queued)
|
|
135
|
+
|
|
136
|
+
queue_size = len(self._queue)
|
|
137
|
+
if queue_size > 1 and not self._enable_verbose_logging:
|
|
138
|
+
logger.debug(f"Request queued: {request_id}, queue size: {queue_size}")
|
|
139
|
+
|
|
140
|
+
# Start processing if not already running
|
|
141
|
+
asyncio.create_task(self._process_queue())
|
|
142
|
+
|
|
143
|
+
# Wait for result
|
|
144
|
+
return await future
|
|
145
|
+
|
|
146
|
+
async def _process_queue(self):
|
|
147
|
+
"""Process queued requests one at a time."""
|
|
148
|
+
async with self._lock:
|
|
149
|
+
if self._is_processing:
|
|
150
|
+
return
|
|
151
|
+
self._is_processing = True
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
while True:
|
|
155
|
+
# Get next request
|
|
156
|
+
async with self._lock:
|
|
157
|
+
if not self._queue:
|
|
158
|
+
break
|
|
159
|
+
request = self._queue.popleft()
|
|
160
|
+
|
|
161
|
+
# Wait if tool is executing
|
|
162
|
+
if self._tool_executing:
|
|
163
|
+
async with self._tool_execution_lock:
|
|
164
|
+
pass # Wait for tool to finish
|
|
165
|
+
|
|
166
|
+
# Execute request with rate limiting
|
|
167
|
+
await self._execute_request(request)
|
|
168
|
+
|
|
169
|
+
finally:
|
|
170
|
+
async with self._lock:
|
|
171
|
+
self._is_processing = False
|
|
172
|
+
|
|
173
|
+
async def _execute_request(self, request: QueuedRequest):
|
|
174
|
+
"""Execute a single request with rate limiting and retry."""
|
|
175
|
+
self._total_requests += 1
|
|
176
|
+
|
|
177
|
+
# Adaptive delay
|
|
178
|
+
await self._apply_rate_limit_delay()
|
|
179
|
+
|
|
180
|
+
# Try request with retry
|
|
181
|
+
last_error = None
|
|
182
|
+
for attempt in range(self._max_retries + 1):
|
|
183
|
+
try:
|
|
184
|
+
# Execute the request
|
|
185
|
+
result = await self._call_with_timeout(
|
|
186
|
+
request.request_func,
|
|
187
|
+
*request.args,
|
|
188
|
+
**request.kwargs
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# Success!
|
|
192
|
+
self._successful_requests += 1
|
|
193
|
+
self._consecutive_rate_limits = 0
|
|
194
|
+
self._current_delay = self._base_delay # Reset delay
|
|
195
|
+
|
|
196
|
+
request.future.set_result(result)
|
|
197
|
+
return
|
|
198
|
+
|
|
199
|
+
except Exception as e:
|
|
200
|
+
last_error = e
|
|
201
|
+
error_str = str(e).lower()
|
|
202
|
+
|
|
203
|
+
# Check for rate limit
|
|
204
|
+
if "rate" in error_str and "limit" in error_str:
|
|
205
|
+
self._rate_limit_hits += 1
|
|
206
|
+
self._consecutive_rate_limits += 1
|
|
207
|
+
self._retries += 1
|
|
208
|
+
|
|
209
|
+
# Increase delay exponentially
|
|
210
|
+
self._current_delay = min(
|
|
211
|
+
self._current_delay * 1.5,
|
|
212
|
+
self._max_delay
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
if attempt < self._max_retries:
|
|
216
|
+
wait_time = self._rate_limit_wait * (attempt + 1)
|
|
217
|
+
logger.warning(
|
|
218
|
+
f"Rate limit hit (attempt {attempt + 1}/{self._max_retries + 1}), "
|
|
219
|
+
f"waiting {wait_time}s before retry"
|
|
220
|
+
)
|
|
221
|
+
await asyncio.sleep(wait_time)
|
|
222
|
+
continue
|
|
223
|
+
|
|
224
|
+
# Other errors - log and continue to next attempt
|
|
225
|
+
if attempt < self._max_retries:
|
|
226
|
+
logger.warning(f"Request failed (attempt {attempt + 1}): {e}")
|
|
227
|
+
self._retries += 1
|
|
228
|
+
await asyncio.sleep(2.0 * (attempt + 1))
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
# All retries failed
|
|
232
|
+
request.future.set_exception(last_error or Exception("Request failed"))
|
|
233
|
+
|
|
234
|
+
async def _call_with_timeout(self, func: Callable, *args, **kwargs) -> Any:
|
|
235
|
+
"""Call function with timeout."""
|
|
236
|
+
timeout = kwargs.pop('timeout', 300)
|
|
237
|
+
|
|
238
|
+
if asyncio.iscoroutinefunction(func):
|
|
239
|
+
return await asyncio.wait_for(func(*args, **kwargs), timeout=timeout)
|
|
240
|
+
else:
|
|
241
|
+
# Run sync function in thread
|
|
242
|
+
loop = asyncio.get_event_loop()
|
|
243
|
+
return await asyncio.wait_for(
|
|
244
|
+
loop.run_in_executor(None, lambda: func(*args, **kwargs)),
|
|
245
|
+
timeout=timeout
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
async def _apply_rate_limit_delay(self):
|
|
249
|
+
"""Apply intelligent rate limiting delay."""
|
|
250
|
+
now = time.time()
|
|
251
|
+
time_since_last = now - self._last_request_time
|
|
252
|
+
|
|
253
|
+
# Calculate required delay
|
|
254
|
+
required_delay = self._current_delay - time_since_last
|
|
255
|
+
|
|
256
|
+
if required_delay > 0:
|
|
257
|
+
# Add jitter to prevent thundering herd
|
|
258
|
+
jitter = required_delay * 0.1 * (0.5 - asyncio.get_event_loop().time() % 1)
|
|
259
|
+
total_delay = max(0, required_delay + jitter)
|
|
260
|
+
|
|
261
|
+
if total_delay > 0.5 and not self._enable_verbose_logging:
|
|
262
|
+
logger.debug(f"Rate limiting: waiting {total_delay:.2f}s")
|
|
263
|
+
|
|
264
|
+
await asyncio.sleep(total_delay)
|
|
265
|
+
|
|
266
|
+
self._last_request_time = time.time()
|
|
267
|
+
|
|
268
|
+
# Tool execution mode
|
|
269
|
+
async def enter_tool_mode(self):
|
|
270
|
+
"""Enter tool-first execution mode - pause LLM calls."""
|
|
271
|
+
await self._tool_execution_lock.acquire()
|
|
272
|
+
self._tool_executing = True
|
|
273
|
+
|
|
274
|
+
async def exit_tool_mode(self):
|
|
275
|
+
"""Exit tool mode - resume LLM calls."""
|
|
276
|
+
self._tool_executing = False
|
|
277
|
+
if self._tool_execution_lock.locked():
|
|
278
|
+
self._tool_execution_lock.release()
|
|
279
|
+
|
|
280
|
+
def is_tool_executing(self) -> bool:
|
|
281
|
+
"""Check if tool is currently executing."""
|
|
282
|
+
return self._tool_executing
|
|
283
|
+
|
|
284
|
+
# Statistics
|
|
285
|
+
def get_stats(self) -> dict[str, Any]:
|
|
286
|
+
"""Get traffic controller statistics."""
|
|
287
|
+
return {
|
|
288
|
+
"total_requests": self._total_requests,
|
|
289
|
+
"successful_requests": self._successful_requests,
|
|
290
|
+
"rate_limit_hits": self._rate_limit_hits,
|
|
291
|
+
"retries": self._retries,
|
|
292
|
+
"queue_size": len(self._queue),
|
|
293
|
+
"current_delay": self._current_delay,
|
|
294
|
+
"consecutive_rate_limits": self._consecutive_rate_limits,
|
|
295
|
+
"is_processing": self._is_processing,
|
|
296
|
+
"tool_executing": self._tool_executing,
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
def reset_stats(self):
|
|
300
|
+
"""Reset statistics."""
|
|
301
|
+
self._total_requests = 0
|
|
302
|
+
self._successful_requests = 0
|
|
303
|
+
self._rate_limit_hits = 0
|
|
304
|
+
self._retries = 0
|
|
305
|
+
|
|
306
|
+
def set_base_delay(self, delay: float):
|
|
307
|
+
"""Set base delay between requests."""
|
|
308
|
+
self._base_delay = delay
|
|
309
|
+
self._current_delay = delay
|
|
310
|
+
|
|
311
|
+
def set_verbose(self, enabled: bool):
|
|
312
|
+
"""Enable/disable verbose logging."""
|
|
313
|
+
self._enable_verbose_logging = enabled
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
# Global instance
|
|
317
|
+
_controller: Optional[AdaptiveLLMController] = None
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def get_traffic_controller() -> AdaptiveLLMController:
|
|
321
|
+
"""Get or create the global traffic controller."""
|
|
322
|
+
global _controller
|
|
323
|
+
if _controller is None:
|
|
324
|
+
_controller = AdaptiveLLMController()
|
|
325
|
+
return _controller
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
def reset_traffic_controller():
|
|
329
|
+
"""Reset the global traffic controller."""
|
|
330
|
+
global _controller
|
|
331
|
+
_controller = None
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
# Convenience decorator for LLM requests
|
|
335
|
+
def with_traffic_control(priority: RequestPriority = RequestPriority.NORMAL):
|
|
336
|
+
"""Decorator to route LLM requests through traffic controller."""
|
|
337
|
+
def decorator(func: Callable):
|
|
338
|
+
async def wrapper(*args, agent_id: str = "unknown", **kwargs):
|
|
339
|
+
controller = get_traffic_controller()
|
|
340
|
+
return await controller.queue_request(
|
|
341
|
+
func, *args,
|
|
342
|
+
agent_id=agent_id,
|
|
343
|
+
priority=priority,
|
|
344
|
+
**kwargs
|
|
345
|
+
)
|
|
346
|
+
return wrapper
|
|
347
|
+
return decorator
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
# Export confirmation
|
|
351
|
+
logger.info("Adaptive LLM Traffic Control Module Loaded")
|
exaaiagnt/prompts/auto_loader.py
CHANGED
|
@@ -172,6 +172,44 @@ MODULE_PATTERNS = {
|
|
|
172
172
|
"keywords": ["recon", "reconnaissance", "enumerate", "discover", "fingerprint", "osint"],
|
|
173
173
|
"domain_only": True,
|
|
174
174
|
},
|
|
175
|
+
|
|
176
|
+
# React2Shell - RSC Deserialization RCE (CVE-2025-55182)
|
|
177
|
+
"react2shell": {
|
|
178
|
+
"url_patterns": [
|
|
179
|
+
r"/_next/",
|
|
180
|
+
r"/_next/static",
|
|
181
|
+
r"/_actions",
|
|
182
|
+
r"/api/",
|
|
183
|
+
],
|
|
184
|
+
"keywords": ["react", "next.js", "nextjs", "vercel", "rsc", "server components", "app router"],
|
|
185
|
+
"header_patterns": ["x-nextjs", "x-vercel", "x-powered-by.*next"],
|
|
186
|
+
},
|
|
187
|
+
|
|
188
|
+
# Modern JS Frameworks Security
|
|
189
|
+
"modern_js_frameworks": {
|
|
190
|
+
"url_patterns": [
|
|
191
|
+
r"/_next/",
|
|
192
|
+
r"/_nuxt/",
|
|
193
|
+
r"/_svelte",
|
|
194
|
+
r"/__remix",
|
|
195
|
+
r"/_astro/",
|
|
196
|
+
],
|
|
197
|
+
"keywords": ["next.js", "nuxt", "sveltekit", "remix", "astro", "react", "vue", "svelte"],
|
|
198
|
+
},
|
|
199
|
+
|
|
200
|
+
# AWS/Cloud Security
|
|
201
|
+
"aws_cloud_security": {
|
|
202
|
+
"url_patterns": [
|
|
203
|
+
r"\.amazonaws\.com",
|
|
204
|
+
r"\.s3\.",
|
|
205
|
+
r"\.azure\.",
|
|
206
|
+
r"\.blob\.core",
|
|
207
|
+
r"\.cloudfront\.",
|
|
208
|
+
r"\.appspot\.com",
|
|
209
|
+
r"\.storage\.googleapis",
|
|
210
|
+
],
|
|
211
|
+
"keywords": ["aws", "s3", "ec2", "lambda", "azure", "gcp", "cloud", "bucket", "metadata"],
|
|
212
|
+
},
|
|
175
213
|
}
|
|
176
214
|
|
|
177
215
|
|
|
@@ -179,6 +217,10 @@ def detect_modules_from_target(target: str, instruction: str = "") -> List[str]:
|
|
|
179
217
|
"""
|
|
180
218
|
Automatically detect which prompt modules should be loaded based on target URL and instruction.
|
|
181
219
|
|
|
220
|
+
This function:
|
|
221
|
+
1. Checks defined patterns in MODULE_PATTERNS
|
|
222
|
+
2. Auto-discovers ALL .jinja files and matches by filename keywords
|
|
223
|
+
|
|
182
224
|
Args:
|
|
183
225
|
target: The target URL or domain
|
|
184
226
|
instruction: The user's instruction/task description
|
|
@@ -197,6 +239,7 @@ def detect_modules_from_target(target: str, instruction: str = "") -> List[str]:
|
|
|
197
239
|
parsed = urlparse(target if "://" in target else f"https://{target}")
|
|
198
240
|
url_path = parsed.path.lower()
|
|
199
241
|
|
|
242
|
+
# 1. Check defined patterns (MODULE_PATTERNS)
|
|
200
243
|
for module_name, patterns in MODULE_PATTERNS.items():
|
|
201
244
|
should_load = False
|
|
202
245
|
|
|
@@ -220,6 +263,9 @@ def detect_modules_from_target(target: str, instruction: str = "") -> List[str]:
|
|
|
220
263
|
if should_load:
|
|
221
264
|
detected_modules.add(module_name)
|
|
222
265
|
|
|
266
|
+
# 2. AUTO-DISCOVER: Scan all .jinja files and match by filename
|
|
267
|
+
detected_modules.update(_auto_discover_modules(combined_text))
|
|
268
|
+
|
|
223
269
|
# Always include base modules for comprehensive scans
|
|
224
270
|
if any(kw in instruction_lower for kw in ["full", "comprehensive", "thorough", "complete"]):
|
|
225
271
|
detected_modules.update(["sql_injection", "xss", "authentication_jwt"])
|
|
@@ -227,6 +273,64 @@ def detect_modules_from_target(target: str, instruction: str = "") -> List[str]:
|
|
|
227
273
|
return list(detected_modules)
|
|
228
274
|
|
|
229
275
|
|
|
276
|
+
def _auto_discover_modules(search_text: str) -> Set[str]:
|
|
277
|
+
"""
|
|
278
|
+
Auto-discover modules by scanning all .jinja files and matching by filename.
|
|
279
|
+
|
|
280
|
+
Any new .jinja file will be automatically discovered and used if its name
|
|
281
|
+
or keywords from name appear in the target/instruction.
|
|
282
|
+
|
|
283
|
+
Example: 'react2shell.jinja' will be loaded if 'react' or 'shell' appears in target.
|
|
284
|
+
"""
|
|
285
|
+
from pathlib import Path
|
|
286
|
+
|
|
287
|
+
discovered = set()
|
|
288
|
+
prompts_dir = Path(__file__).parent
|
|
289
|
+
|
|
290
|
+
# Get all available modules
|
|
291
|
+
for category_dir in prompts_dir.iterdir():
|
|
292
|
+
if not category_dir.is_dir() or category_dir.name.startswith("__"):
|
|
293
|
+
continue
|
|
294
|
+
|
|
295
|
+
for jinja_file in category_dir.glob("*.jinja"):
|
|
296
|
+
module_name = jinja_file.stem # e.g., "react2shell"
|
|
297
|
+
|
|
298
|
+
# Skip if already in MODULE_PATTERNS (already handled)
|
|
299
|
+
if module_name in MODULE_PATTERNS:
|
|
300
|
+
continue
|
|
301
|
+
|
|
302
|
+
# Generate keywords from filename
|
|
303
|
+
# "react2shell" -> ["react", "shell", "react2shell"]
|
|
304
|
+
# "sql_injection" -> ["sql", "injection"]
|
|
305
|
+
# "modern_js_frameworks" -> ["modern", "js", "frameworks"]
|
|
306
|
+
keywords = _extract_keywords_from_name(module_name)
|
|
307
|
+
|
|
308
|
+
# Check if any keyword matches
|
|
309
|
+
for keyword in keywords:
|
|
310
|
+
if len(keyword) >= 3 and keyword in search_text: # Min 3 chars to avoid false positives
|
|
311
|
+
discovered.add(module_name)
|
|
312
|
+
break
|
|
313
|
+
|
|
314
|
+
return discovered
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def _extract_keywords_from_name(name: str) -> List[str]:
|
|
318
|
+
"""Extract searchable keywords from a module name."""
|
|
319
|
+
keywords = [name] # Full name
|
|
320
|
+
|
|
321
|
+
# Split by underscore
|
|
322
|
+
parts = name.split("_")
|
|
323
|
+
keywords.extend(parts)
|
|
324
|
+
|
|
325
|
+
# Split by numbers (react2shell -> react, shell)
|
|
326
|
+
import re
|
|
327
|
+
alpha_parts = re.split(r'\d+', name)
|
|
328
|
+
keywords.extend([p for p in alpha_parts if p])
|
|
329
|
+
|
|
330
|
+
# Lowercase all
|
|
331
|
+
return [k.lower() for k in keywords if len(k) >= 3]
|
|
332
|
+
|
|
333
|
+
|
|
230
334
|
def get_recommended_modules(target: str, instruction: str = "") -> dict:
|
|
231
335
|
"""
|
|
232
336
|
Get recommended modules with confidence scores.
|