exaai-agent 2.0.5__py3-none-any.whl → 2.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {exaai_agent-2.0.5.dist-info → exaai_agent-2.0.8.dist-info}/METADATA +55 -52
- {exaai_agent-2.0.5.dist-info → exaai_agent-2.0.8.dist-info}/RECORD +19 -11
- exaai_agent-2.0.8.dist-info/entry_points.txt +5 -0
- exaaiagnt/interface/main.py +92 -50
- exaaiagnt/interface/tui.py +17 -3
- exaaiagnt/llm/__init__.py +13 -0
- exaaiagnt/llm/llm.py +14 -3
- exaaiagnt/llm/llm_traffic_controller.py +351 -0
- exaaiagnt/prompts/auto_loader.py +104 -0
- exaaiagnt/prompts/cloud/aws_cloud_security.jinja +235 -0
- exaaiagnt/prompts/frameworks/modern_js_frameworks.jinja +194 -0
- exaaiagnt/prompts/vulnerabilities/react2shell.jinja +187 -0
- exaaiagnt/tools/__init__.py +58 -0
- exaaiagnt/tools/response_analyzer.py +294 -0
- exaaiagnt/tools/smart_fuzzer.py +286 -0
- exaaiagnt/tools/tool_prompts.py +210 -0
- exaaiagnt/tools/vuln_validator.py +412 -0
- exaai_agent-2.0.5.dist-info/entry_points.txt +0 -3
- {exaai_agent-2.0.5.dist-info → exaai_agent-2.0.8.dist-info}/WHEEL +0 -0
- {exaai_agent-2.0.5.dist-info → exaai_agent-2.0.8.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Adaptive LLM Traffic Controller - Intelligent rate limiting and queue management.
|
|
3
|
+
|
|
4
|
+
Features:
|
|
5
|
+
- Single LLM request at a time (serialized)
|
|
6
|
+
- Non-blocking queue for waiting agents
|
|
7
|
+
- Automatic rate limit detection and delay
|
|
8
|
+
- Tool-first execution mode
|
|
9
|
+
- Automatic recovery with smart retry
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import asyncio
|
|
13
|
+
import logging
|
|
14
|
+
import time
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from enum import Enum
|
|
17
|
+
from typing import Any, Callable, Optional
|
|
18
|
+
from collections import deque
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class RequestPriority(Enum):
|
|
25
|
+
"""Priority levels for LLM requests."""
|
|
26
|
+
CRITICAL = 3 # Must execute ASAP (auth, security critical)
|
|
27
|
+
NORMAL = 2 # Standard agent requests
|
|
28
|
+
LOW = 1 # Background tasks, summaries
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class QueuedRequest:
|
|
33
|
+
"""A queued LLM request with metadata."""
|
|
34
|
+
request_id: str
|
|
35
|
+
agent_id: str
|
|
36
|
+
priority: RequestPriority
|
|
37
|
+
request_func: Callable
|
|
38
|
+
args: tuple
|
|
39
|
+
kwargs: dict
|
|
40
|
+
created_at: float = field(default_factory=time.time)
|
|
41
|
+
future: asyncio.Future = field(default_factory=lambda: asyncio.get_event_loop().create_future())
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class AdaptiveLLMController:
|
|
45
|
+
"""
|
|
46
|
+
Adaptive Multi-Agent LLM Traffic Controller.
|
|
47
|
+
|
|
48
|
+
Implements:
|
|
49
|
+
1. Single concurrent LLM request (serialized calls)
|
|
50
|
+
2. Non-blocking queue for agents
|
|
51
|
+
3. Intelligent throttling with adaptive delays
|
|
52
|
+
4. Tool-first execution mode
|
|
53
|
+
5. Automatic recovery from rate limits
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
_instance: Optional["AdaptiveLLMController"] = None
|
|
57
|
+
|
|
58
|
+
def __new__(cls) -> "AdaptiveLLMController":
|
|
59
|
+
if cls._instance is None:
|
|
60
|
+
cls._instance = super().__new__(cls)
|
|
61
|
+
cls._instance._initialized = False
|
|
62
|
+
return cls._instance
|
|
63
|
+
|
|
64
|
+
def __init__(self):
|
|
65
|
+
if self._initialized:
|
|
66
|
+
return
|
|
67
|
+
|
|
68
|
+
# Core state
|
|
69
|
+
self._queue: deque[QueuedRequest] = deque()
|
|
70
|
+
self._is_processing = False
|
|
71
|
+
self._lock = asyncio.Lock()
|
|
72
|
+
|
|
73
|
+
# Rate limiting state
|
|
74
|
+
self._last_request_time = 0.0
|
|
75
|
+
self._consecutive_rate_limits = 0
|
|
76
|
+
self._base_delay = 2.0 # Base delay between requests
|
|
77
|
+
self._current_delay = 2.0
|
|
78
|
+
self._max_delay = 30.0
|
|
79
|
+
|
|
80
|
+
# Statistics
|
|
81
|
+
self._total_requests = 0
|
|
82
|
+
self._successful_requests = 0
|
|
83
|
+
self._rate_limit_hits = 0
|
|
84
|
+
self._retries = 0
|
|
85
|
+
|
|
86
|
+
# Tool execution mode
|
|
87
|
+
self._tool_executing = False
|
|
88
|
+
self._tool_execution_lock = asyncio.Lock()
|
|
89
|
+
|
|
90
|
+
# Configuration
|
|
91
|
+
self._max_retries = 3
|
|
92
|
+
self._rate_limit_wait = 6.0 # Wait time after rate limit
|
|
93
|
+
self._enable_verbose_logging = False
|
|
94
|
+
|
|
95
|
+
self._initialized = True
|
|
96
|
+
logger.info("AdaptiveLLMController initialized - Traffic Control Enabled")
|
|
97
|
+
|
|
98
|
+
async def queue_request(
|
|
99
|
+
self,
|
|
100
|
+
request_func: Callable,
|
|
101
|
+
*args,
|
|
102
|
+
agent_id: str = "unknown",
|
|
103
|
+
priority: RequestPriority = RequestPriority.NORMAL,
|
|
104
|
+
**kwargs
|
|
105
|
+
) -> Any:
|
|
106
|
+
"""
|
|
107
|
+
Queue an LLM request and wait for result.
|
|
108
|
+
|
|
109
|
+
Non-blocking for the caller - they just await the result.
|
|
110
|
+
Internally, requests are processed one at a time.
|
|
111
|
+
"""
|
|
112
|
+
request_id = f"{agent_id}_{int(time.time() * 1000)}"
|
|
113
|
+
|
|
114
|
+
# Create future for this request
|
|
115
|
+
loop = asyncio.get_event_loop()
|
|
116
|
+
future = loop.create_future()
|
|
117
|
+
|
|
118
|
+
queued = QueuedRequest(
|
|
119
|
+
request_id=request_id,
|
|
120
|
+
agent_id=agent_id,
|
|
121
|
+
priority=priority,
|
|
122
|
+
request_func=request_func,
|
|
123
|
+
args=args,
|
|
124
|
+
kwargs=kwargs,
|
|
125
|
+
future=future
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Add to queue
|
|
129
|
+
async with self._lock:
|
|
130
|
+
# Insert by priority
|
|
131
|
+
if priority == RequestPriority.CRITICAL:
|
|
132
|
+
self._queue.appendleft(queued)
|
|
133
|
+
else:
|
|
134
|
+
self._queue.append(queued)
|
|
135
|
+
|
|
136
|
+
queue_size = len(self._queue)
|
|
137
|
+
if queue_size > 1 and not self._enable_verbose_logging:
|
|
138
|
+
logger.debug(f"Request queued: {request_id}, queue size: {queue_size}")
|
|
139
|
+
|
|
140
|
+
# Start processing if not already running
|
|
141
|
+
asyncio.create_task(self._process_queue())
|
|
142
|
+
|
|
143
|
+
# Wait for result
|
|
144
|
+
return await future
|
|
145
|
+
|
|
146
|
+
async def _process_queue(self):
|
|
147
|
+
"""Process queued requests one at a time."""
|
|
148
|
+
async with self._lock:
|
|
149
|
+
if self._is_processing:
|
|
150
|
+
return
|
|
151
|
+
self._is_processing = True
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
while True:
|
|
155
|
+
# Get next request
|
|
156
|
+
async with self._lock:
|
|
157
|
+
if not self._queue:
|
|
158
|
+
break
|
|
159
|
+
request = self._queue.popleft()
|
|
160
|
+
|
|
161
|
+
# Wait if tool is executing
|
|
162
|
+
if self._tool_executing:
|
|
163
|
+
async with self._tool_execution_lock:
|
|
164
|
+
pass # Wait for tool to finish
|
|
165
|
+
|
|
166
|
+
# Execute request with rate limiting
|
|
167
|
+
await self._execute_request(request)
|
|
168
|
+
|
|
169
|
+
finally:
|
|
170
|
+
async with self._lock:
|
|
171
|
+
self._is_processing = False
|
|
172
|
+
|
|
173
|
+
async def _execute_request(self, request: QueuedRequest):
|
|
174
|
+
"""Execute a single request with rate limiting and retry."""
|
|
175
|
+
self._total_requests += 1
|
|
176
|
+
|
|
177
|
+
# Adaptive delay
|
|
178
|
+
await self._apply_rate_limit_delay()
|
|
179
|
+
|
|
180
|
+
# Try request with retry
|
|
181
|
+
last_error = None
|
|
182
|
+
for attempt in range(self._max_retries + 1):
|
|
183
|
+
try:
|
|
184
|
+
# Execute the request
|
|
185
|
+
result = await self._call_with_timeout(
|
|
186
|
+
request.request_func,
|
|
187
|
+
*request.args,
|
|
188
|
+
**request.kwargs
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# Success!
|
|
192
|
+
self._successful_requests += 1
|
|
193
|
+
self._consecutive_rate_limits = 0
|
|
194
|
+
self._current_delay = self._base_delay # Reset delay
|
|
195
|
+
|
|
196
|
+
request.future.set_result(result)
|
|
197
|
+
return
|
|
198
|
+
|
|
199
|
+
except Exception as e:
|
|
200
|
+
last_error = e
|
|
201
|
+
error_str = str(e).lower()
|
|
202
|
+
|
|
203
|
+
# Check for rate limit
|
|
204
|
+
if "rate" in error_str and "limit" in error_str:
|
|
205
|
+
self._rate_limit_hits += 1
|
|
206
|
+
self._consecutive_rate_limits += 1
|
|
207
|
+
self._retries += 1
|
|
208
|
+
|
|
209
|
+
# Increase delay exponentially
|
|
210
|
+
self._current_delay = min(
|
|
211
|
+
self._current_delay * 1.5,
|
|
212
|
+
self._max_delay
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
if attempt < self._max_retries:
|
|
216
|
+
wait_time = self._rate_limit_wait * (attempt + 1)
|
|
217
|
+
logger.warning(
|
|
218
|
+
f"Rate limit hit (attempt {attempt + 1}/{self._max_retries + 1}), "
|
|
219
|
+
f"waiting {wait_time}s before retry"
|
|
220
|
+
)
|
|
221
|
+
await asyncio.sleep(wait_time)
|
|
222
|
+
continue
|
|
223
|
+
|
|
224
|
+
# Other errors - log and continue to next attempt
|
|
225
|
+
if attempt < self._max_retries:
|
|
226
|
+
logger.warning(f"Request failed (attempt {attempt + 1}): {e}")
|
|
227
|
+
self._retries += 1
|
|
228
|
+
await asyncio.sleep(2.0 * (attempt + 1))
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
# All retries failed
|
|
232
|
+
request.future.set_exception(last_error or Exception("Request failed"))
|
|
233
|
+
|
|
234
|
+
async def _call_with_timeout(self, func: Callable, *args, **kwargs) -> Any:
|
|
235
|
+
"""Call function with timeout."""
|
|
236
|
+
timeout = kwargs.pop('timeout', 300)
|
|
237
|
+
|
|
238
|
+
if asyncio.iscoroutinefunction(func):
|
|
239
|
+
return await asyncio.wait_for(func(*args, **kwargs), timeout=timeout)
|
|
240
|
+
else:
|
|
241
|
+
# Run sync function in thread
|
|
242
|
+
loop = asyncio.get_event_loop()
|
|
243
|
+
return await asyncio.wait_for(
|
|
244
|
+
loop.run_in_executor(None, lambda: func(*args, **kwargs)),
|
|
245
|
+
timeout=timeout
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
async def _apply_rate_limit_delay(self):
|
|
249
|
+
"""Apply intelligent rate limiting delay."""
|
|
250
|
+
now = time.time()
|
|
251
|
+
time_since_last = now - self._last_request_time
|
|
252
|
+
|
|
253
|
+
# Calculate required delay
|
|
254
|
+
required_delay = self._current_delay - time_since_last
|
|
255
|
+
|
|
256
|
+
if required_delay > 0:
|
|
257
|
+
# Add jitter to prevent thundering herd
|
|
258
|
+
jitter = required_delay * 0.1 * (0.5 - asyncio.get_event_loop().time() % 1)
|
|
259
|
+
total_delay = max(0, required_delay + jitter)
|
|
260
|
+
|
|
261
|
+
if total_delay > 0.5 and not self._enable_verbose_logging:
|
|
262
|
+
logger.debug(f"Rate limiting: waiting {total_delay:.2f}s")
|
|
263
|
+
|
|
264
|
+
await asyncio.sleep(total_delay)
|
|
265
|
+
|
|
266
|
+
self._last_request_time = time.time()
|
|
267
|
+
|
|
268
|
+
# Tool execution mode
|
|
269
|
+
async def enter_tool_mode(self):
|
|
270
|
+
"""Enter tool-first execution mode - pause LLM calls."""
|
|
271
|
+
await self._tool_execution_lock.acquire()
|
|
272
|
+
self._tool_executing = True
|
|
273
|
+
|
|
274
|
+
async def exit_tool_mode(self):
|
|
275
|
+
"""Exit tool mode - resume LLM calls."""
|
|
276
|
+
self._tool_executing = False
|
|
277
|
+
if self._tool_execution_lock.locked():
|
|
278
|
+
self._tool_execution_lock.release()
|
|
279
|
+
|
|
280
|
+
def is_tool_executing(self) -> bool:
|
|
281
|
+
"""Check if tool is currently executing."""
|
|
282
|
+
return self._tool_executing
|
|
283
|
+
|
|
284
|
+
# Statistics
|
|
285
|
+
def get_stats(self) -> dict[str, Any]:
|
|
286
|
+
"""Get traffic controller statistics."""
|
|
287
|
+
return {
|
|
288
|
+
"total_requests": self._total_requests,
|
|
289
|
+
"successful_requests": self._successful_requests,
|
|
290
|
+
"rate_limit_hits": self._rate_limit_hits,
|
|
291
|
+
"retries": self._retries,
|
|
292
|
+
"queue_size": len(self._queue),
|
|
293
|
+
"current_delay": self._current_delay,
|
|
294
|
+
"consecutive_rate_limits": self._consecutive_rate_limits,
|
|
295
|
+
"is_processing": self._is_processing,
|
|
296
|
+
"tool_executing": self._tool_executing,
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
def reset_stats(self):
|
|
300
|
+
"""Reset statistics."""
|
|
301
|
+
self._total_requests = 0
|
|
302
|
+
self._successful_requests = 0
|
|
303
|
+
self._rate_limit_hits = 0
|
|
304
|
+
self._retries = 0
|
|
305
|
+
|
|
306
|
+
def set_base_delay(self, delay: float):
|
|
307
|
+
"""Set base delay between requests."""
|
|
308
|
+
self._base_delay = delay
|
|
309
|
+
self._current_delay = delay
|
|
310
|
+
|
|
311
|
+
def set_verbose(self, enabled: bool):
|
|
312
|
+
"""Enable/disable verbose logging."""
|
|
313
|
+
self._enable_verbose_logging = enabled
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
# Global instance
|
|
317
|
+
_controller: Optional[AdaptiveLLMController] = None
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def get_traffic_controller() -> AdaptiveLLMController:
|
|
321
|
+
"""Get or create the global traffic controller."""
|
|
322
|
+
global _controller
|
|
323
|
+
if _controller is None:
|
|
324
|
+
_controller = AdaptiveLLMController()
|
|
325
|
+
return _controller
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
def reset_traffic_controller():
|
|
329
|
+
"""Reset the global traffic controller."""
|
|
330
|
+
global _controller
|
|
331
|
+
_controller = None
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
# Convenience decorator for LLM requests
|
|
335
|
+
def with_traffic_control(priority: RequestPriority = RequestPriority.NORMAL):
|
|
336
|
+
"""Decorator to route LLM requests through traffic controller."""
|
|
337
|
+
def decorator(func: Callable):
|
|
338
|
+
async def wrapper(*args, agent_id: str = "unknown", **kwargs):
|
|
339
|
+
controller = get_traffic_controller()
|
|
340
|
+
return await controller.queue_request(
|
|
341
|
+
func, *args,
|
|
342
|
+
agent_id=agent_id,
|
|
343
|
+
priority=priority,
|
|
344
|
+
**kwargs
|
|
345
|
+
)
|
|
346
|
+
return wrapper
|
|
347
|
+
return decorator
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
# Export confirmation
|
|
351
|
+
logger.info("Adaptive LLM Traffic Control Module Loaded")
|
exaaiagnt/prompts/auto_loader.py
CHANGED
|
@@ -172,6 +172,44 @@ MODULE_PATTERNS = {
|
|
|
172
172
|
"keywords": ["recon", "reconnaissance", "enumerate", "discover", "fingerprint", "osint"],
|
|
173
173
|
"domain_only": True,
|
|
174
174
|
},
|
|
175
|
+
|
|
176
|
+
# React2Shell - RSC Deserialization RCE (CVE-2025-55182)
|
|
177
|
+
"react2shell": {
|
|
178
|
+
"url_patterns": [
|
|
179
|
+
r"/_next/",
|
|
180
|
+
r"/_next/static",
|
|
181
|
+
r"/_actions",
|
|
182
|
+
r"/api/",
|
|
183
|
+
],
|
|
184
|
+
"keywords": ["react", "next.js", "nextjs", "vercel", "rsc", "server components", "app router"],
|
|
185
|
+
"header_patterns": ["x-nextjs", "x-vercel", "x-powered-by.*next"],
|
|
186
|
+
},
|
|
187
|
+
|
|
188
|
+
# Modern JS Frameworks Security
|
|
189
|
+
"modern_js_frameworks": {
|
|
190
|
+
"url_patterns": [
|
|
191
|
+
r"/_next/",
|
|
192
|
+
r"/_nuxt/",
|
|
193
|
+
r"/_svelte",
|
|
194
|
+
r"/__remix",
|
|
195
|
+
r"/_astro/",
|
|
196
|
+
],
|
|
197
|
+
"keywords": ["next.js", "nuxt", "sveltekit", "remix", "astro", "react", "vue", "svelte"],
|
|
198
|
+
},
|
|
199
|
+
|
|
200
|
+
# AWS/Cloud Security
|
|
201
|
+
"aws_cloud_security": {
|
|
202
|
+
"url_patterns": [
|
|
203
|
+
r"\.amazonaws\.com",
|
|
204
|
+
r"\.s3\.",
|
|
205
|
+
r"\.azure\.",
|
|
206
|
+
r"\.blob\.core",
|
|
207
|
+
r"\.cloudfront\.",
|
|
208
|
+
r"\.appspot\.com",
|
|
209
|
+
r"\.storage\.googleapis",
|
|
210
|
+
],
|
|
211
|
+
"keywords": ["aws", "s3", "ec2", "lambda", "azure", "gcp", "cloud", "bucket", "metadata"],
|
|
212
|
+
},
|
|
175
213
|
}
|
|
176
214
|
|
|
177
215
|
|
|
@@ -179,6 +217,10 @@ def detect_modules_from_target(target: str, instruction: str = "") -> List[str]:
|
|
|
179
217
|
"""
|
|
180
218
|
Automatically detect which prompt modules should be loaded based on target URL and instruction.
|
|
181
219
|
|
|
220
|
+
This function:
|
|
221
|
+
1. Checks defined patterns in MODULE_PATTERNS
|
|
222
|
+
2. Auto-discovers ALL .jinja files and matches by filename keywords
|
|
223
|
+
|
|
182
224
|
Args:
|
|
183
225
|
target: The target URL or domain
|
|
184
226
|
instruction: The user's instruction/task description
|
|
@@ -197,6 +239,7 @@ def detect_modules_from_target(target: str, instruction: str = "") -> List[str]:
|
|
|
197
239
|
parsed = urlparse(target if "://" in target else f"https://{target}")
|
|
198
240
|
url_path = parsed.path.lower()
|
|
199
241
|
|
|
242
|
+
# 1. Check defined patterns (MODULE_PATTERNS)
|
|
200
243
|
for module_name, patterns in MODULE_PATTERNS.items():
|
|
201
244
|
should_load = False
|
|
202
245
|
|
|
@@ -220,6 +263,9 @@ def detect_modules_from_target(target: str, instruction: str = "") -> List[str]:
|
|
|
220
263
|
if should_load:
|
|
221
264
|
detected_modules.add(module_name)
|
|
222
265
|
|
|
266
|
+
# 2. AUTO-DISCOVER: Scan all .jinja files and match by filename
|
|
267
|
+
detected_modules.update(_auto_discover_modules(combined_text))
|
|
268
|
+
|
|
223
269
|
# Always include base modules for comprehensive scans
|
|
224
270
|
if any(kw in instruction_lower for kw in ["full", "comprehensive", "thorough", "complete"]):
|
|
225
271
|
detected_modules.update(["sql_injection", "xss", "authentication_jwt"])
|
|
@@ -227,6 +273,64 @@ def detect_modules_from_target(target: str, instruction: str = "") -> List[str]:
|
|
|
227
273
|
return list(detected_modules)
|
|
228
274
|
|
|
229
275
|
|
|
276
|
+
def _auto_discover_modules(search_text: str) -> Set[str]:
|
|
277
|
+
"""
|
|
278
|
+
Auto-discover modules by scanning all .jinja files and matching by filename.
|
|
279
|
+
|
|
280
|
+
Any new .jinja file will be automatically discovered and used if its name
|
|
281
|
+
or keywords from name appear in the target/instruction.
|
|
282
|
+
|
|
283
|
+
Example: 'react2shell.jinja' will be loaded if 'react' or 'shell' appears in target.
|
|
284
|
+
"""
|
|
285
|
+
from pathlib import Path
|
|
286
|
+
|
|
287
|
+
discovered = set()
|
|
288
|
+
prompts_dir = Path(__file__).parent
|
|
289
|
+
|
|
290
|
+
# Get all available modules
|
|
291
|
+
for category_dir in prompts_dir.iterdir():
|
|
292
|
+
if not category_dir.is_dir() or category_dir.name.startswith("__"):
|
|
293
|
+
continue
|
|
294
|
+
|
|
295
|
+
for jinja_file in category_dir.glob("*.jinja"):
|
|
296
|
+
module_name = jinja_file.stem # e.g., "react2shell"
|
|
297
|
+
|
|
298
|
+
# Skip if already in MODULE_PATTERNS (already handled)
|
|
299
|
+
if module_name in MODULE_PATTERNS:
|
|
300
|
+
continue
|
|
301
|
+
|
|
302
|
+
# Generate keywords from filename
|
|
303
|
+
# "react2shell" -> ["react", "shell", "react2shell"]
|
|
304
|
+
# "sql_injection" -> ["sql", "injection"]
|
|
305
|
+
# "modern_js_frameworks" -> ["modern", "js", "frameworks"]
|
|
306
|
+
keywords = _extract_keywords_from_name(module_name)
|
|
307
|
+
|
|
308
|
+
# Check if any keyword matches
|
|
309
|
+
for keyword in keywords:
|
|
310
|
+
if len(keyword) >= 3 and keyword in search_text: # Min 3 chars to avoid false positives
|
|
311
|
+
discovered.add(module_name)
|
|
312
|
+
break
|
|
313
|
+
|
|
314
|
+
return discovered
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def _extract_keywords_from_name(name: str) -> List[str]:
|
|
318
|
+
"""Extract searchable keywords from a module name."""
|
|
319
|
+
keywords = [name] # Full name
|
|
320
|
+
|
|
321
|
+
# Split by underscore
|
|
322
|
+
parts = name.split("_")
|
|
323
|
+
keywords.extend(parts)
|
|
324
|
+
|
|
325
|
+
# Split by numbers (react2shell -> react, shell)
|
|
326
|
+
import re
|
|
327
|
+
alpha_parts = re.split(r'\d+', name)
|
|
328
|
+
keywords.extend([p for p in alpha_parts if p])
|
|
329
|
+
|
|
330
|
+
# Lowercase all
|
|
331
|
+
return [k.lower() for k in keywords if len(k) >= 3]
|
|
332
|
+
|
|
333
|
+
|
|
230
334
|
def get_recommended_modules(target: str, instruction: str = "") -> dict:
|
|
231
335
|
"""
|
|
232
336
|
Get recommended modules with confidence scores.
|