devdox-ai-locust 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of devdox-ai-locust might be problematic. Click here for more details.
- devdox_ai_locust/__init__.py +9 -0
- devdox_ai_locust/cli.py +452 -0
- devdox_ai_locust/config.py +24 -0
- devdox_ai_locust/hybrid_loctus_generator.py +904 -0
- devdox_ai_locust/locust_generator.py +732 -0
- devdox_ai_locust/py.typed +0 -0
- devdox_ai_locust/schemas/__init__.py +0 -0
- devdox_ai_locust/schemas/processing_result.py +24 -0
- devdox_ai_locust/templates/base_workflow.py.j2 +180 -0
- devdox_ai_locust/templates/config.py.j2 +173 -0
- devdox_ai_locust/templates/custom_flows.py.j2 +95 -0
- devdox_ai_locust/templates/endpoint_template.py.j2 +34 -0
- devdox_ai_locust/templates/env.example.j2 +3 -0
- devdox_ai_locust/templates/fallback_locust.py.j2 +25 -0
- devdox_ai_locust/templates/locust.py.j2 +70 -0
- devdox_ai_locust/templates/readme.md.j2 +46 -0
- devdox_ai_locust/templates/requirement.txt.j2 +31 -0
- devdox_ai_locust/templates/test_data.py.j2 +276 -0
- devdox_ai_locust/templates/utils.py.j2 +335 -0
- devdox_ai_locust/utils/__init__.py +0 -0
- devdox_ai_locust/utils/file_creation.py +120 -0
- devdox_ai_locust/utils/open_ai_parser.py +431 -0
- devdox_ai_locust/utils/swagger_utils.py +94 -0
- devdox_ai_locust-0.1.1.dist-info/METADATA +424 -0
- devdox_ai_locust-0.1.1.dist-info/RECORD +29 -0
- devdox_ai_locust-0.1.1.dist-info/WHEEL +5 -0
- devdox_ai_locust-0.1.1.dist-info/entry_points.txt +3 -0
- devdox_ai_locust-0.1.1.dist-info/licenses/LICENSE +201 -0
- devdox_ai_locust-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,904 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hybrid Locust Test Generator
|
|
3
|
+
|
|
4
|
+
Combines reliable template-based generation with LLM enhancement for creativity
|
|
5
|
+
and domain-specific optimizations.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import re
|
|
9
|
+
import asyncio
|
|
10
|
+
import logging
|
|
11
|
+
from typing import Dict, List, Any, Optional, Tuple
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from jinja2 import Environment, FileSystemLoader
|
|
14
|
+
from dataclasses import dataclass
|
|
15
|
+
import uuid
|
|
16
|
+
import shutil
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
from devdox_ai_locust.utils.open_ai_parser import Endpoint
|
|
20
|
+
from devdox_ai_locust.utils.file_creation import FileCreationConfig, SafeFileCreator
|
|
21
|
+
from devdox_ai_locust.locust_generator import LocustTestGenerator, TestDataConfig
|
|
22
|
+
from together import Together
|
|
23
|
+
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
test_data_file_path = "test_data.py"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class AIEnhancementConfig:
|
|
32
|
+
"""Configuration for AI enhancement"""
|
|
33
|
+
|
|
34
|
+
model: str = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
|
35
|
+
max_tokens: int = 8000
|
|
36
|
+
temperature: float = 0.3
|
|
37
|
+
timeout: int = 60
|
|
38
|
+
enhance_workflows: bool = True
|
|
39
|
+
enhance_test_data: bool = True
|
|
40
|
+
enhance_validation: bool = True
|
|
41
|
+
create_domain_flows: bool = True
|
|
42
|
+
update_main_locust: bool = True
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class EnhancementResult:
|
|
47
|
+
"""Result of AI enhancement"""
|
|
48
|
+
|
|
49
|
+
success: bool
|
|
50
|
+
enhanced_files: Dict[str, str]
|
|
51
|
+
enhanced_directory_files: List[Dict[str, Any]]
|
|
52
|
+
enhancements_applied: List[str]
|
|
53
|
+
errors: List[str]
|
|
54
|
+
processing_time: float
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class EnhancementProcessor:
|
|
58
|
+
"""Handles individual enhancement operations"""
|
|
59
|
+
|
|
60
|
+
def __init__(
|
|
61
|
+
self,
|
|
62
|
+
ai_config: Optional[AIEnhancementConfig],
|
|
63
|
+
locust_generator: "HybridLocustGenerator",
|
|
64
|
+
) -> None:
|
|
65
|
+
self.ai_config = ai_config
|
|
66
|
+
self.locust_generator = locust_generator
|
|
67
|
+
|
|
68
|
+
async def process_main_locust_enhancement(
|
|
69
|
+
self,
|
|
70
|
+
base_files: Dict[str, str],
|
|
71
|
+
endpoints: List[Endpoint],
|
|
72
|
+
api_info: Dict[str, Any],
|
|
73
|
+
) -> Tuple[Dict[str, str], List[str]]:
|
|
74
|
+
"""Process main locustfile enhancement"""
|
|
75
|
+
enhanced_files = {}
|
|
76
|
+
enhancements = []
|
|
77
|
+
|
|
78
|
+
if self.ai_config and self.ai_config.update_main_locust:
|
|
79
|
+
enhanced_content = await self.locust_generator._enhance_locustfile(
|
|
80
|
+
base_files.get("locustfile.py", ""), endpoints, api_info
|
|
81
|
+
)
|
|
82
|
+
if enhanced_content:
|
|
83
|
+
enhanced_files["locustfile.py"] = enhanced_content
|
|
84
|
+
enhancements.append("main_locust_update")
|
|
85
|
+
return enhanced_files, enhancements
|
|
86
|
+
|
|
87
|
+
async def process_domain_flows_enhancement(
|
|
88
|
+
self,
|
|
89
|
+
endpoints: List[Endpoint],
|
|
90
|
+
api_info: Dict[str, Any],
|
|
91
|
+
custom_requirement: Optional[str] = "",
|
|
92
|
+
) -> Tuple[Dict[str, str], List[str]]:
|
|
93
|
+
"""Process domain flows enhancement"""
|
|
94
|
+
enhanced_files = {}
|
|
95
|
+
enhancements = []
|
|
96
|
+
|
|
97
|
+
if self.ai_config and self.ai_config.create_domain_flows:
|
|
98
|
+
domain_flows = await self.locust_generator._generate_domain_flows(
|
|
99
|
+
endpoints, api_info, custom_requirement=custom_requirement
|
|
100
|
+
)
|
|
101
|
+
if domain_flows:
|
|
102
|
+
enhanced_files["custom_flows.py"] = domain_flows
|
|
103
|
+
enhancements.append("domain_flows")
|
|
104
|
+
|
|
105
|
+
return enhanced_files, enhancements
|
|
106
|
+
|
|
107
|
+
async def process_workflow_enhancements(
|
|
108
|
+
self,
|
|
109
|
+
base_files: Dict[str, str],
|
|
110
|
+
directory_files: List[Dict[str, Any]],
|
|
111
|
+
grouped_endpoints: Dict[str, List[Endpoint]],
|
|
112
|
+
) -> Tuple[List[Dict[str, Any]], List[str]]:
|
|
113
|
+
"""Process workflow enhancements"""
|
|
114
|
+
enhanced_directory_files: List[Dict[str, Any]] = []
|
|
115
|
+
enhancements: List[str] = []
|
|
116
|
+
|
|
117
|
+
if self.ai_config and not self.ai_config.enhance_workflows:
|
|
118
|
+
return enhanced_directory_files, enhancements
|
|
119
|
+
|
|
120
|
+
base_workflow_files = self.locust_generator.get_files_by_key(
|
|
121
|
+
directory_files, "base_workflow.py"
|
|
122
|
+
)
|
|
123
|
+
base_workflow_content = ""
|
|
124
|
+
if base_workflow_files:
|
|
125
|
+
first_workflow = base_workflow_files[0]
|
|
126
|
+
# Get the content from the dictionary - adjust key name as needed
|
|
127
|
+
base_workflow_content = first_workflow.get("base_workflow.py", "")
|
|
128
|
+
|
|
129
|
+
for workflow_item in directory_files:
|
|
130
|
+
enhanced_workflow_item = await self._enhance_single_workflow(
|
|
131
|
+
workflow_item, base_files, base_workflow_content, grouped_endpoints
|
|
132
|
+
)
|
|
133
|
+
if enhanced_workflow_item:
|
|
134
|
+
enhanced_directory_files.append(enhanced_workflow_item["files"])
|
|
135
|
+
enhancements.extend(enhanced_workflow_item["enhancements"])
|
|
136
|
+
|
|
137
|
+
return enhanced_directory_files, enhancements
|
|
138
|
+
|
|
139
|
+
async def _enhance_single_workflow(
|
|
140
|
+
self,
|
|
141
|
+
workflow_item: Dict[str, Any],
|
|
142
|
+
base_files: Dict[str, str],
|
|
143
|
+
base_workflow_files: str,
|
|
144
|
+
grouped_endpoints: Dict[str, List[Endpoint]],
|
|
145
|
+
) -> Dict[str, Any] | None:
|
|
146
|
+
"""Enhance a single workflow file"""
|
|
147
|
+
for key, value in workflow_item.items():
|
|
148
|
+
workflow_key = key.replace("_workflow.py", "")
|
|
149
|
+
endpoints_for_workflow = grouped_endpoints.get(workflow_key, [])
|
|
150
|
+
auth_endpoints = grouped_endpoints.get("Authentication", [])
|
|
151
|
+
workflow_endpoints_dict = {workflow_key: endpoints_for_workflow}
|
|
152
|
+
enhanced_workflow = await self.locust_generator._enhance_workflows(
|
|
153
|
+
base_content=value,
|
|
154
|
+
test_data_content=base_files.get(test_data_file_path, ""),
|
|
155
|
+
base_workflow=base_workflow_files,
|
|
156
|
+
grouped_enpoints=workflow_endpoints_dict,
|
|
157
|
+
auth_endpoints=auth_endpoints,
|
|
158
|
+
)
|
|
159
|
+
if enhanced_workflow:
|
|
160
|
+
return {
|
|
161
|
+
"files": {key: enhanced_workflow},
|
|
162
|
+
"enhancements": [f"enhanced_workflows_{key}"],
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
async def process_test_data_enhancement(
|
|
168
|
+
self, base_files: Dict[str, str], endpoints: List[Endpoint]
|
|
169
|
+
) -> Tuple[Dict[str, str], List[str]]:
|
|
170
|
+
"""Process test data enhancement"""
|
|
171
|
+
enhanced_files = {}
|
|
172
|
+
enhancements = []
|
|
173
|
+
if self.ai_config and self.ai_config.enhance_test_data:
|
|
174
|
+
enhanced_test_data = await self.locust_generator.enhance_test_data_file(
|
|
175
|
+
base_files.get(test_data_file_path, ""), endpoints
|
|
176
|
+
)
|
|
177
|
+
if enhanced_test_data:
|
|
178
|
+
enhanced_files[test_data_file_path] = enhanced_test_data
|
|
179
|
+
enhancements.append("smart_test_data")
|
|
180
|
+
return enhanced_files, enhancements
|
|
181
|
+
|
|
182
|
+
async def process_validation_enhancement(
|
|
183
|
+
self, base_files: Dict[str, str], endpoints: List[Endpoint]
|
|
184
|
+
) -> Tuple[Dict[str, str], List[str]]:
|
|
185
|
+
"""Process validation enhancement"""
|
|
186
|
+
enhanced_files = {}
|
|
187
|
+
enhancements = []
|
|
188
|
+
if self.ai_config and self.ai_config.enhance_validation:
|
|
189
|
+
enhanced_validation = await self.locust_generator._enhance_validation(
|
|
190
|
+
base_files.get("utils.py", ""), endpoints
|
|
191
|
+
)
|
|
192
|
+
if enhanced_validation:
|
|
193
|
+
enhanced_files["utils.py"] = enhanced_validation
|
|
194
|
+
enhancements.append("advanced_validation")
|
|
195
|
+
return enhanced_files, enhancements
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
class HybridLocustGenerator:
|
|
199
|
+
"""
|
|
200
|
+
Hybrid generator that combines template-based reliability with AI creativity
|
|
201
|
+
"""
|
|
202
|
+
|
|
203
|
+
def __init__(
|
|
204
|
+
self,
|
|
205
|
+
ai_client: Together,
|
|
206
|
+
ai_config: Optional[AIEnhancementConfig] = None,
|
|
207
|
+
test_config: Optional[TestDataConfig] = None,
|
|
208
|
+
prompt_dir: str = "prompt",
|
|
209
|
+
):
|
|
210
|
+
self.ai_client = ai_client
|
|
211
|
+
self.ai_config = ai_config or AIEnhancementConfig()
|
|
212
|
+
self.template_generator = LocustTestGenerator(test_config)
|
|
213
|
+
self.prompt_dir = self._find_project_root() / prompt_dir
|
|
214
|
+
self._setup_jinja_env()
|
|
215
|
+
|
|
216
|
+
def _find_project_root(self) -> Path:
|
|
217
|
+
"""Find the project root by looking for setup.py, pyproject.toml, or .git"""
|
|
218
|
+
current_path = Path(__file__).parent
|
|
219
|
+
|
|
220
|
+
return current_path
|
|
221
|
+
|
|
222
|
+
def _setup_jinja_env(self) -> None:
|
|
223
|
+
"""Setup Jinja2 environment with custom filters"""
|
|
224
|
+
self.jinja_env = Environment(
|
|
225
|
+
loader=FileSystemLoader(str(self.prompt_dir)),
|
|
226
|
+
trim_blocks=True,
|
|
227
|
+
lstrip_blocks=True,
|
|
228
|
+
keep_trailing_newline=True,
|
|
229
|
+
autoescape=False,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
async def generate_from_endpoints(
|
|
233
|
+
self,
|
|
234
|
+
endpoints: List[Endpoint],
|
|
235
|
+
api_info: Dict[str, Any],
|
|
236
|
+
custom_requirement: Optional[str] = None,
|
|
237
|
+
target_host: Optional[str] = None,
|
|
238
|
+
include_auth: bool = True,
|
|
239
|
+
) -> Tuple[Dict[str, str], List[Dict[str, Any]]]:
|
|
240
|
+
"""
|
|
241
|
+
Generate Locust tests using hybrid approach
|
|
242
|
+
|
|
243
|
+
1. Generate reliable base structure with templates
|
|
244
|
+
2. Enhance with AI for domain-specific improvements
|
|
245
|
+
3. Validate and merge results
|
|
246
|
+
"""
|
|
247
|
+
start_time = asyncio.get_event_loop().time()
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
# Step 1: Generate reliable base structure
|
|
251
|
+
logger.info("🔧 Generating base test structure with templates...")
|
|
252
|
+
base_files, directory_files, grouped_enpoints = (
|
|
253
|
+
self.template_generator.generate_from_endpoints(
|
|
254
|
+
endpoints,
|
|
255
|
+
api_info,
|
|
256
|
+
include_auth=include_auth,
|
|
257
|
+
target_host=target_host,
|
|
258
|
+
)
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
base_files = self.template_generator.fix_indent(base_files)
|
|
262
|
+
# Step 2: Enhance with AI if available
|
|
263
|
+
if self.ai_client and self._should_enhance(endpoints, api_info):
|
|
264
|
+
logger.info("🤖 Enhancing tests with AI...")
|
|
265
|
+
enhancement_result = await self._enhance_with_ai(
|
|
266
|
+
base_files,
|
|
267
|
+
endpoints,
|
|
268
|
+
api_info,
|
|
269
|
+
directory_files,
|
|
270
|
+
grouped_enpoints,
|
|
271
|
+
custom_requirement,
|
|
272
|
+
)
|
|
273
|
+
if enhancement_result.success:
|
|
274
|
+
logger.info(
|
|
275
|
+
f"✅ AI enhancements applied: {', '.join(enhancement_result.enhancements_applied)}"
|
|
276
|
+
)
|
|
277
|
+
return (
|
|
278
|
+
enhancement_result.enhanced_files,
|
|
279
|
+
enhancement_result.enhanced_directory_files,
|
|
280
|
+
)
|
|
281
|
+
else:
|
|
282
|
+
logger.warning(
|
|
283
|
+
f"⚠️ AI enhancement failed, using template base: {', '.join(enhancement_result.errors)}"
|
|
284
|
+
)
|
|
285
|
+
else:
|
|
286
|
+
logger.info("📋 Using template-based generation only")
|
|
287
|
+
|
|
288
|
+
processing_time = asyncio.get_event_loop().time() - start_time
|
|
289
|
+
logger.info(f"⏱️ Generation completed in {processing_time:.2f}s")
|
|
290
|
+
|
|
291
|
+
return base_files, directory_files
|
|
292
|
+
|
|
293
|
+
except Exception as e:
|
|
294
|
+
logger.error(f"Hybrid generation failed line 267 : {e}")
|
|
295
|
+
|
|
296
|
+
return {}, []
|
|
297
|
+
|
|
298
|
+
def _should_enhance(
|
|
299
|
+
self, endpoints: List[Endpoint], api_info: Dict[str, Any]
|
|
300
|
+
) -> bool:
|
|
301
|
+
"""Determine if AI enhancement is worthwhile"""
|
|
302
|
+
# Enhance if we have enough endpoints or complex schemas
|
|
303
|
+
complex_endpoints = [
|
|
304
|
+
ep
|
|
305
|
+
for ep in endpoints
|
|
306
|
+
if ep.request_body or len(ep.parameters) > 3 or len(ep.responses) > 2
|
|
307
|
+
]
|
|
308
|
+
|
|
309
|
+
return (
|
|
310
|
+
len(endpoints) >= 3
|
|
311
|
+
or len(complex_endpoints) # Enough endpoints for meaningful enhancement
|
|
312
|
+
>= 1
|
|
313
|
+
or self._detect_domain_patterns( # Has complex endpoints
|
|
314
|
+
endpoints, api_info
|
|
315
|
+
) # Has recognizable domain patterns
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
def _detect_domain_patterns(
|
|
319
|
+
self, endpoints: List[Endpoint], api_info: Dict[str, Any]
|
|
320
|
+
) -> bool:
|
|
321
|
+
"""Detect if API belongs to known domains that benefit from custom flows"""
|
|
322
|
+
domain_keywords = {
|
|
323
|
+
"ecommerce": ["product", "cart", "order", "payment", "checkout"],
|
|
324
|
+
"user_management": ["user", "auth", "login", "register", "profile"],
|
|
325
|
+
"content_management": ["post", "article", "comment", "media", "upload"],
|
|
326
|
+
"financial": ["transaction", "account", "balance", "transfer"],
|
|
327
|
+
"social": ["friend", "follow", "message", "notification", "feed"],
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
api_text = f"{api_info.get('title', '')} {api_info.get('description', '')}"
|
|
331
|
+
endpoint_paths = " ".join([ep.path for ep in endpoints])
|
|
332
|
+
combined_text = f"{api_text} {endpoint_paths}".lower()
|
|
333
|
+
|
|
334
|
+
for domain, keywords in domain_keywords.items():
|
|
335
|
+
if any(keyword in combined_text for keyword in keywords):
|
|
336
|
+
return True
|
|
337
|
+
|
|
338
|
+
return False
|
|
339
|
+
|
|
340
|
+
async def _enhance_locustfile(
|
|
341
|
+
self, base_content: str, endpoints: List[Any], api_info: Dict[str, Any]
|
|
342
|
+
) -> Optional[str]:
|
|
343
|
+
# Configuration
|
|
344
|
+
|
|
345
|
+
try:
|
|
346
|
+
template = self.jinja_env.get_template("locust.j2")
|
|
347
|
+
|
|
348
|
+
# Prepare context for template
|
|
349
|
+
context = {
|
|
350
|
+
"base_content": base_content,
|
|
351
|
+
"endpoints_for_prompt": self._format_endpoints_for_prompt(
|
|
352
|
+
endpoints[:5]
|
|
353
|
+
),
|
|
354
|
+
"api_info": api_info,
|
|
355
|
+
}
|
|
356
|
+
# Render enhanced content
|
|
357
|
+
prompt = template.render(**context)
|
|
358
|
+
enhanced_content = await self._call_ai_service(prompt)
|
|
359
|
+
return enhanced_content
|
|
360
|
+
except Exception as e:
|
|
361
|
+
logger.error(f"Enhancement failed: {e}")
|
|
362
|
+
return base_content
|
|
363
|
+
|
|
364
|
+
async def _enhance_with_ai(
|
|
365
|
+
self,
|
|
366
|
+
base_files: Dict[str, str],
|
|
367
|
+
endpoints: List[Endpoint],
|
|
368
|
+
api_info: Dict[str, Any],
|
|
369
|
+
directory_files: List[Dict[str, Any]],
|
|
370
|
+
grouped_endpoints: Dict[str, List[Endpoint]],
|
|
371
|
+
custom_requirement: Optional[str] = None,
|
|
372
|
+
) -> EnhancementResult:
|
|
373
|
+
"""Enhance base files with AI - Refactored for reduced cognitive complexity"""
|
|
374
|
+
start_time = asyncio.get_event_loop().time()
|
|
375
|
+
|
|
376
|
+
try:
|
|
377
|
+
enhancement_result = await self._process_all_enhancements(
|
|
378
|
+
base_files,
|
|
379
|
+
endpoints,
|
|
380
|
+
api_info,
|
|
381
|
+
directory_files,
|
|
382
|
+
grouped_endpoints,
|
|
383
|
+
custom_requirement,
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
processing_time = asyncio.get_event_loop().time() - start_time
|
|
387
|
+
enhancement_result.processing_time = processing_time
|
|
388
|
+
|
|
389
|
+
return enhancement_result
|
|
390
|
+
|
|
391
|
+
except Exception as e:
|
|
392
|
+
logger.error(f"AI enhancement failed: {e}")
|
|
393
|
+
processing_time = asyncio.get_event_loop().time() - start_time
|
|
394
|
+
|
|
395
|
+
return EnhancementResult(
|
|
396
|
+
success=False,
|
|
397
|
+
enhanced_files=base_files,
|
|
398
|
+
enhancements_applied=[],
|
|
399
|
+
enhanced_directory_files=[],
|
|
400
|
+
errors=[str(e)],
|
|
401
|
+
processing_time=processing_time,
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
async def _process_all_enhancements(
|
|
405
|
+
self,
|
|
406
|
+
base_files: Dict[str, str],
|
|
407
|
+
endpoints: List[Endpoint],
|
|
408
|
+
api_info: Dict[str, Any],
|
|
409
|
+
directory_files: List[Dict[str, Any]],
|
|
410
|
+
grouped_endpoints: Dict[str, List[Endpoint]],
|
|
411
|
+
custom_requirement: Optional[str] = None,
|
|
412
|
+
) -> EnhancementResult:
|
|
413
|
+
"""Process all enhancements using the enhancement processor"""
|
|
414
|
+
processor = EnhancementProcessor(self.ai_config, self)
|
|
415
|
+
|
|
416
|
+
enhanced_files = base_files.copy()
|
|
417
|
+
enhanced_directory_files = []
|
|
418
|
+
enhancements_applied: List[str] = []
|
|
419
|
+
errors = []
|
|
420
|
+
# Process each enhancement type
|
|
421
|
+
enhancement_tasks = [
|
|
422
|
+
processor.process_main_locust_enhancement(base_files, endpoints, api_info),
|
|
423
|
+
processor.process_domain_flows_enhancement(
|
|
424
|
+
endpoints, api_info, custom_requirement
|
|
425
|
+
),
|
|
426
|
+
processor.process_test_data_enhancement(base_files, endpoints),
|
|
427
|
+
processor.process_validation_enhancement(base_files, endpoints),
|
|
428
|
+
]
|
|
429
|
+
|
|
430
|
+
# Execute file-based enhancements concurrently
|
|
431
|
+
file_enhancement_results = await asyncio.gather(
|
|
432
|
+
*enhancement_tasks, return_exceptions=True
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
# Process results from file-based enhancements
|
|
436
|
+
for result in file_enhancement_results:
|
|
437
|
+
if isinstance(result, BaseException):
|
|
438
|
+
errors.append(str(result))
|
|
439
|
+
continue
|
|
440
|
+
|
|
441
|
+
files, enhancements = result
|
|
442
|
+
enhanced_files.update(files)
|
|
443
|
+
enhancements_applied.extend(enhancements)
|
|
444
|
+
|
|
445
|
+
# Process workflow enhancements separately (more complex logic)
|
|
446
|
+
try:
|
|
447
|
+
(
|
|
448
|
+
workflow_files,
|
|
449
|
+
workflow_enhancements,
|
|
450
|
+
) = await processor.process_workflow_enhancements(
|
|
451
|
+
base_files, directory_files, grouped_endpoints
|
|
452
|
+
)
|
|
453
|
+
enhanced_directory_files.extend(workflow_files)
|
|
454
|
+
enhancements_applied.extend(workflow_enhancements)
|
|
455
|
+
except Exception as e:
|
|
456
|
+
errors.append(f"Workflow enhancement error: {str(e)}")
|
|
457
|
+
|
|
458
|
+
return EnhancementResult(
|
|
459
|
+
success=len(errors) == 0,
|
|
460
|
+
enhanced_files=enhanced_files,
|
|
461
|
+
enhanced_directory_files=enhanced_directory_files,
|
|
462
|
+
enhancements_applied=enhancements_applied,
|
|
463
|
+
errors=errors,
|
|
464
|
+
processing_time=0, # Will be set by caller
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
async def _generate_domain_flows(
|
|
468
|
+
self,
|
|
469
|
+
endpoints: List[Endpoint],
|
|
470
|
+
api_info: Dict[str, Any],
|
|
471
|
+
custom_requirement: Optional[str] = "",
|
|
472
|
+
) -> Optional[str]:
|
|
473
|
+
"""Generate domain-specific user flows"""
|
|
474
|
+
|
|
475
|
+
# Analyze endpoints to determine domain
|
|
476
|
+
domain_analysis = self._analyze_api_domain(endpoints, api_info)
|
|
477
|
+
try:
|
|
478
|
+
template = self.jinja_env.get_template("domain.j2")
|
|
479
|
+
# Render enhanced content
|
|
480
|
+
prompt = template.render(
|
|
481
|
+
domain_analysis=domain_analysis,
|
|
482
|
+
custom_requirement=custom_requirement,
|
|
483
|
+
endpoints=self._format_endpoints_for_prompt(endpoints),
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
enhanced_content = await self._call_ai_service(prompt)
|
|
487
|
+
if enhanced_content:
|
|
488
|
+
return enhanced_content
|
|
489
|
+
except Exception as e:
|
|
490
|
+
logger.warning(f"Domain flows generation failed: {e}")
|
|
491
|
+
|
|
492
|
+
return ""
|
|
493
|
+
|
|
494
|
+
def get_files_by_key(
|
|
495
|
+
self, directory_files: List[Dict[str, Any]], target_key: str
|
|
496
|
+
) -> List[Dict[str, Any]]:
|
|
497
|
+
"""Return directory items that contain the specified key"""
|
|
498
|
+
return [items for items in directory_files if target_key in items]
|
|
499
|
+
|
|
500
|
+
async def _enhance_workflows(
|
|
501
|
+
self,
|
|
502
|
+
base_content: str,
|
|
503
|
+
test_data_content: str,
|
|
504
|
+
base_workflow: str,
|
|
505
|
+
grouped_enpoints: Dict[str, List[Endpoint]],
|
|
506
|
+
auth_endpoints: List[Endpoint],
|
|
507
|
+
) -> Optional[str]:
|
|
508
|
+
try:
|
|
509
|
+
template = self.jinja_env.get_template("workflow.j2")
|
|
510
|
+
|
|
511
|
+
# Render enhanced content
|
|
512
|
+
prompt = template.render(
|
|
513
|
+
grouped_enpoints=grouped_enpoints,
|
|
514
|
+
test_data_content=test_data_content,
|
|
515
|
+
base_workflow=base_workflow,
|
|
516
|
+
auth_endpoints=auth_endpoints,
|
|
517
|
+
base_content=base_content,
|
|
518
|
+
)
|
|
519
|
+
enhanced_content = await self._call_ai_service(prompt)
|
|
520
|
+
return enhanced_content
|
|
521
|
+
except Exception as e:
|
|
522
|
+
logger.warning(f"Workflow enhancement failed: {e}")
|
|
523
|
+
|
|
524
|
+
return ""
|
|
525
|
+
|
|
526
|
+
async def enhance_test_data_file(
|
|
527
|
+
self, base_content: str, endpoints: List[Endpoint]
|
|
528
|
+
) -> Optional[str]:
|
|
529
|
+
"""Enhance test data generation with domain knowledge"""
|
|
530
|
+
|
|
531
|
+
# Extract schema information
|
|
532
|
+
schemas_info = self._extract_schema_patterns(endpoints)
|
|
533
|
+
|
|
534
|
+
try:
|
|
535
|
+
template = self.jinja_env.get_template("test_data.j2")
|
|
536
|
+
|
|
537
|
+
# Prepare context for template
|
|
538
|
+
context = {
|
|
539
|
+
"base_content": base_content,
|
|
540
|
+
"schemas_info": schemas_info,
|
|
541
|
+
"endpoints": endpoints,
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
# Render enhanced content
|
|
545
|
+
prompt = template.render(**context)
|
|
546
|
+
|
|
547
|
+
enhanced_content = await self._call_ai_service(prompt)
|
|
548
|
+
if enhanced_content and self._validate_python_code(enhanced_content):
|
|
549
|
+
return enhanced_content
|
|
550
|
+
except Exception as e:
|
|
551
|
+
logger.warning(f"Test data enhancement failed: {e}")
|
|
552
|
+
|
|
553
|
+
return ""
|
|
554
|
+
|
|
555
|
+
async def _enhance_validation(
|
|
556
|
+
self, base_content: str, endpoints: List[Endpoint]
|
|
557
|
+
) -> Optional[str]:
|
|
558
|
+
"""Enhance response validation with endpoint-specific checks"""
|
|
559
|
+
|
|
560
|
+
validation_patterns = self._extract_validation_patterns(endpoints)
|
|
561
|
+
try:
|
|
562
|
+
template = self.jinja_env.get_template("validation.j2")
|
|
563
|
+
|
|
564
|
+
# Render enhanced content
|
|
565
|
+
prompt = template.render(
|
|
566
|
+
base_content=base_content, validation_patterns=validation_patterns
|
|
567
|
+
)
|
|
568
|
+
enhanced_content = await self._call_ai_service(prompt)
|
|
569
|
+
if enhanced_content:
|
|
570
|
+
return enhanced_content
|
|
571
|
+
except Exception as e:
|
|
572
|
+
logger.warning(f"Validation enhancement failed: {e}")
|
|
573
|
+
|
|
574
|
+
return ""
|
|
575
|
+
|
|
576
|
+
async def _call_ai_service(self, prompt: str) -> Optional[str]:
|
|
577
|
+
"""Call AI service with retry logic and validation"""
|
|
578
|
+
|
|
579
|
+
messages = [
|
|
580
|
+
{
|
|
581
|
+
"role": "system",
|
|
582
|
+
"content": "You are an expert Python developer specializing in Locust load testing. Generate clean, production-ready code with proper error handling. "
|
|
583
|
+
"Always return your code wrapped in <code></code> tags with no explanations outside the tags and DO NOT TRUNCATE THE CODE. "
|
|
584
|
+
"Format: <code>your_python_code_here</code>",
|
|
585
|
+
},
|
|
586
|
+
{"role": "user", "content": prompt},
|
|
587
|
+
]
|
|
588
|
+
|
|
589
|
+
for attempt in range(3): # Retry logic
|
|
590
|
+
try:
|
|
591
|
+
response = await asyncio.wait_for(
|
|
592
|
+
asyncio.to_thread(
|
|
593
|
+
self.ai_client.chat.completions.create,
|
|
594
|
+
model=self.ai_config.model,
|
|
595
|
+
messages=messages,
|
|
596
|
+
max_tokens=self.ai_config.max_tokens,
|
|
597
|
+
temperature=self.ai_config.temperature,
|
|
598
|
+
top_p=0.9,
|
|
599
|
+
top_k=40,
|
|
600
|
+
repetition_penalty=1.1,
|
|
601
|
+
),
|
|
602
|
+
timeout=self.ai_config.timeout,
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
if response.choices and response.choices[0].message:
|
|
606
|
+
content = response.choices[0].message.content.strip()
|
|
607
|
+
|
|
608
|
+
# Clean up the response
|
|
609
|
+
content = self._clean_ai_response(
|
|
610
|
+
self.extract_code_from_response(content)
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
if content:
|
|
614
|
+
return content
|
|
615
|
+
|
|
616
|
+
except asyncio.TimeoutError:
|
|
617
|
+
logger.warning(f"AI service timeout on attempt {attempt + 1}")
|
|
618
|
+
|
|
619
|
+
except Exception as e:
|
|
620
|
+
logger.warning(f"AI service error on attempt {attempt + 1}: {e}")
|
|
621
|
+
|
|
622
|
+
if attempt < 2: # Wait before retry
|
|
623
|
+
await asyncio.sleep(2**attempt)
|
|
624
|
+
|
|
625
|
+
return ""
|
|
626
|
+
|
|
627
|
+
def extract_code_from_response(self, response_text: str) -> str:
|
|
628
|
+
# Extract content between <code> tags
|
|
629
|
+
|
|
630
|
+
code_match = re.search(r"<code>(.*?)</code>", response_text, re.DOTALL)
|
|
631
|
+
if code_match:
|
|
632
|
+
content = code_match.group(1).strip()
|
|
633
|
+
# Additional validation - ensure we got actual content
|
|
634
|
+
if content and len(content) > 0:
|
|
635
|
+
return content
|
|
636
|
+
|
|
637
|
+
return response_text.strip()
|
|
638
|
+
|
|
639
|
+
def _clean_ai_response(self, content: str) -> str:
|
|
640
|
+
"""Clean and validate AI response"""
|
|
641
|
+
# Remove markdown code blocks if present
|
|
642
|
+
if content.startswith("```python") and content.endswith("```"):
|
|
643
|
+
content = content[9:-3].strip()
|
|
644
|
+
elif content.startswith("```") and content.endswith("```"):
|
|
645
|
+
content = content[3:-3].strip()
|
|
646
|
+
|
|
647
|
+
# Remove any explanatory text before/after code
|
|
648
|
+
lines = content.split("\n")
|
|
649
|
+
start_idx = 0
|
|
650
|
+
end_idx = len(lines)
|
|
651
|
+
|
|
652
|
+
# Find actual Python code start
|
|
653
|
+
for i, line in enumerate(lines):
|
|
654
|
+
if line.strip().startswith(
|
|
655
|
+
("import ", "from ", "class ", "def ", '"""', "'''")
|
|
656
|
+
):
|
|
657
|
+
start_idx = i
|
|
658
|
+
break
|
|
659
|
+
|
|
660
|
+
# Find actual Python code end (remove trailing explanations)
|
|
661
|
+
for i in range(len(lines) - 1, -1, -1):
|
|
662
|
+
line = lines[i].strip()
|
|
663
|
+
if (
|
|
664
|
+
line
|
|
665
|
+
and not line.startswith("#")
|
|
666
|
+
and not line.lower().startswith(("note:", "this", "the "))
|
|
667
|
+
):
|
|
668
|
+
end_idx = i + 1
|
|
669
|
+
break
|
|
670
|
+
|
|
671
|
+
return "\n".join(lines[start_idx:end_idx])
|
|
672
|
+
|
|
673
|
+
def _analyze_api_domain(
|
|
674
|
+
self, endpoints: List[Endpoint], api_info: Dict[str, Any]
|
|
675
|
+
) -> str:
|
|
676
|
+
"""Analyze API to determine domain and patterns"""
|
|
677
|
+
analysis = []
|
|
678
|
+
|
|
679
|
+
# API info analysis
|
|
680
|
+
analysis.append(f"API Title: {api_info.get('title', 'Unknown')}")
|
|
681
|
+
analysis.append(f"Description: {api_info.get('description', 'No description')}")
|
|
682
|
+
|
|
683
|
+
# Endpoint analysis
|
|
684
|
+
methods = [ep.method for ep in endpoints]
|
|
685
|
+
paths = [ep.path for ep in endpoints]
|
|
686
|
+
|
|
687
|
+
analysis.append(f"Total Endpoints: {len(endpoints)}")
|
|
688
|
+
analysis.append(f"HTTP Methods: {', '.join(set(methods))}")
|
|
689
|
+
analysis.append(f"Common Path Patterns: {self._extract_path_patterns(paths)}")
|
|
690
|
+
|
|
691
|
+
# Resource analysis
|
|
692
|
+
resources = self._extract_resources_from_paths(paths)
|
|
693
|
+
analysis.append(f"Main Resources: {', '.join(resources[:5])}")
|
|
694
|
+
|
|
695
|
+
return "\n".join(analysis)
|
|
696
|
+
|
|
697
|
+
def _format_endpoints_for_prompt(self, endpoints: List[Endpoint]) -> str:
|
|
698
|
+
"""Format endpoints for AI prompt"""
|
|
699
|
+
formatted = []
|
|
700
|
+
for ep in endpoints:
|
|
701
|
+
params = f"({len(ep.parameters)} params)" if ep.parameters else ""
|
|
702
|
+
body = "(with body)" if ep.request_body else ""
|
|
703
|
+
formatted.append(
|
|
704
|
+
f"- {ep.method} {ep.path} {params} {body} - {ep.summary or 'No summary'}"
|
|
705
|
+
)
|
|
706
|
+
|
|
707
|
+
return "\n".join(formatted)
|
|
708
|
+
|
|
709
|
+
def _extract_schema_patterns(self, endpoints: List[Endpoint]) -> str:
|
|
710
|
+
"""Extract common schema patterns from endpoints"""
|
|
711
|
+
patterns = []
|
|
712
|
+
|
|
713
|
+
for ep in endpoints:
|
|
714
|
+
if ep.request_body and ep.request_body.schema:
|
|
715
|
+
schema = ep.request_body.schema
|
|
716
|
+
if schema.get("properties"):
|
|
717
|
+
fields = list(schema["properties"].keys())
|
|
718
|
+
patterns.append(f"{ep.path} ({ep.method}): {', '.join(fields[:5])}")
|
|
719
|
+
|
|
720
|
+
return "\n".join(patterns[:10]) # Limit for token efficiency
|
|
721
|
+
|
|
722
|
+
def _extract_validation_patterns(self, endpoints: List[Endpoint]) -> str:
|
|
723
|
+
"""Extract validation patterns needed for endpoints"""
|
|
724
|
+
patterns = []
|
|
725
|
+
|
|
726
|
+
for ep in endpoints:
|
|
727
|
+
for response in ep.responses:
|
|
728
|
+
if response.status_code.startswith("2"): # Success responses
|
|
729
|
+
pattern = f"{ep.method} {ep.path} -> {response.status_code}"
|
|
730
|
+
if response.schema:
|
|
731
|
+
pattern += " (schema validation needed)"
|
|
732
|
+
patterns.append(pattern)
|
|
733
|
+
|
|
734
|
+
return "\n".join(patterns[:10])
|
|
735
|
+
|
|
736
|
+
def _analyze_performance_patterns(self, endpoints: List[Endpoint]) -> str:
|
|
737
|
+
"""Analyze endpoints for performance testing patterns"""
|
|
738
|
+
analysis = []
|
|
739
|
+
|
|
740
|
+
# Categorize endpoints by performance characteristics
|
|
741
|
+
read_heavy = [ep for ep in endpoints if ep.method == "GET"]
|
|
742
|
+
write_heavy = [ep for ep in endpoints if ep.method in ["POST", "PUT", "PATCH"]]
|
|
743
|
+
bulk_candidates = [
|
|
744
|
+
ep
|
|
745
|
+
for ep in endpoints
|
|
746
|
+
if "bulk" in ep.path.lower() or "batch" in ep.path.lower()
|
|
747
|
+
]
|
|
748
|
+
|
|
749
|
+
analysis.append(
|
|
750
|
+
f"Read-heavy endpoints: {len(read_heavy)} (good for load testing)"
|
|
751
|
+
)
|
|
752
|
+
analysis.append(
|
|
753
|
+
f"Write-heavy endpoints: {len(write_heavy)} (good for stress testing)"
|
|
754
|
+
)
|
|
755
|
+
analysis.append(
|
|
756
|
+
f"Bulk operation endpoints: {len(bulk_candidates)} (good for volume testing)"
|
|
757
|
+
)
|
|
758
|
+
|
|
759
|
+
# Identify endpoints that might be resource intensive
|
|
760
|
+
complex_endpoints = [
|
|
761
|
+
ep
|
|
762
|
+
for ep in endpoints
|
|
763
|
+
if ep.request_body
|
|
764
|
+
and ep.request_body.schema
|
|
765
|
+
and len(ep.request_body.schema.get("properties", {})) > 5
|
|
766
|
+
]
|
|
767
|
+
analysis.append(
|
|
768
|
+
f"Complex endpoints: {len(complex_endpoints)} (monitor for performance)"
|
|
769
|
+
)
|
|
770
|
+
|
|
771
|
+
return "\n".join(analysis)
|
|
772
|
+
|
|
773
|
+
def _extract_path_patterns(self, paths: List[str]) -> str:
|
|
774
|
+
"""Extract common patterns from API paths"""
|
|
775
|
+
patterns = set()
|
|
776
|
+
for path in paths:
|
|
777
|
+
# Extract patterns like /api/v1/{resource}
|
|
778
|
+
parts = path.split("/")
|
|
779
|
+
if len(parts) > 2:
|
|
780
|
+
pattern = "/".join(parts[:3])
|
|
781
|
+
if "{" in pattern:
|
|
782
|
+
pattern = (
|
|
783
|
+
pattern.replace("{id}", "{id}")
|
|
784
|
+
.replace("{", "{")
|
|
785
|
+
.replace("}", "}")
|
|
786
|
+
)
|
|
787
|
+
patterns.add(pattern)
|
|
788
|
+
|
|
789
|
+
return ", ".join(list(patterns)[:5])
|
|
790
|
+
|
|
791
|
+
def _extract_resources_from_paths(self, paths: List[str]) -> List[str]:
|
|
792
|
+
"""Extract resource names from API paths"""
|
|
793
|
+
resources = set()
|
|
794
|
+
for path in paths:
|
|
795
|
+
parts = [p for p in path.split("/") if p and not p.startswith("{")]
|
|
796
|
+
for part in parts:
|
|
797
|
+
if len(part) > 2 and part.isalpha(): # Likely a resource name
|
|
798
|
+
resources.add(part)
|
|
799
|
+
|
|
800
|
+
return sorted(resources)
|
|
801
|
+
|
|
802
|
+
async def _create_test_files_safely(
|
|
803
|
+
self,
|
|
804
|
+
test_files: Dict[str, str],
|
|
805
|
+
output_path: Path,
|
|
806
|
+
max_file_size: int = 1024 * 1024,
|
|
807
|
+
) -> List[dict]:
|
|
808
|
+
"""Create test files safely with reduced complexity"""
|
|
809
|
+
|
|
810
|
+
if not test_files:
|
|
811
|
+
return []
|
|
812
|
+
|
|
813
|
+
# Setup
|
|
814
|
+
config = FileCreationConfig()
|
|
815
|
+
config.MAX_FILE_SIZE = max_file_size
|
|
816
|
+
creator = SafeFileCreator(config)
|
|
817
|
+
temp_dir = output_path / f"temp_{uuid.uuid4().hex[:8]}"
|
|
818
|
+
|
|
819
|
+
try:
|
|
820
|
+
return await self._process_file_creation(
|
|
821
|
+
creator, test_files, output_path, temp_dir
|
|
822
|
+
)
|
|
823
|
+
finally:
|
|
824
|
+
await self._cleanup_temp_directory(temp_dir)
|
|
825
|
+
|
|
826
|
+
async def _process_file_creation(
|
|
827
|
+
self,
|
|
828
|
+
creator: SafeFileCreator,
|
|
829
|
+
test_files: Dict[str, str],
|
|
830
|
+
output_path: Path,
|
|
831
|
+
temp_dir: Path,
|
|
832
|
+
) -> List[dict]:
|
|
833
|
+
"""Process the file creation workflow"""
|
|
834
|
+
|
|
835
|
+
# Ensure directories exist
|
|
836
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
837
|
+
temp_dir.mkdir(parents=True, exist_ok=True)
|
|
838
|
+
|
|
839
|
+
# Prepare files in temp directory
|
|
840
|
+
prepared_files = await self._prepare_files_in_temp(
|
|
841
|
+
creator, test_files, temp_dir
|
|
842
|
+
)
|
|
843
|
+
|
|
844
|
+
if not prepared_files:
|
|
845
|
+
return []
|
|
846
|
+
|
|
847
|
+
# Move files atomically to final location
|
|
848
|
+
return await creator.move_files_atomically(prepared_files, output_path)
|
|
849
|
+
|
|
850
|
+
async def _prepare_files_in_temp(
|
|
851
|
+
self, creator: SafeFileCreator, test_files: Dict[str, str], temp_dir: Path
|
|
852
|
+
) -> List[dict]:
|
|
853
|
+
"""Prepare all files in temporary directory"""
|
|
854
|
+
|
|
855
|
+
prepared_files = []
|
|
856
|
+
|
|
857
|
+
for filename, content in test_files.items():
|
|
858
|
+
file_result = await self._prepare_single_file(
|
|
859
|
+
creator, filename, content, temp_dir
|
|
860
|
+
)
|
|
861
|
+
if file_result:
|
|
862
|
+
prepared_files.append(file_result)
|
|
863
|
+
|
|
864
|
+
return prepared_files
|
|
865
|
+
|
|
866
|
+
async def _prepare_single_file(
|
|
867
|
+
self, creator: SafeFileCreator, filename: str, content: str, temp_dir: Path
|
|
868
|
+
) -> Optional[Dict[str, Any]]:
|
|
869
|
+
"""Prepare a single file, return None if failed"""
|
|
870
|
+
|
|
871
|
+
try:
|
|
872
|
+
# Validate file
|
|
873
|
+
is_valid, clean_filename, processed_content = creator.validate_file(
|
|
874
|
+
filename, content
|
|
875
|
+
)
|
|
876
|
+
if not is_valid:
|
|
877
|
+
return None
|
|
878
|
+
|
|
879
|
+
# Create temp file
|
|
880
|
+
file_info = await creator.create_temp_file(
|
|
881
|
+
clean_filename, processed_content, temp_dir
|
|
882
|
+
)
|
|
883
|
+
logger.info(f"Prepared: {clean_filename} ({len(processed_content)} chars)")
|
|
884
|
+
return file_info
|
|
885
|
+
|
|
886
|
+
except Exception as e:
|
|
887
|
+
logger.error(f"Failed to prepare file {filename}: {e}")
|
|
888
|
+
return None
|
|
889
|
+
|
|
890
|
+
async def _cleanup_temp_directory(self, temp_dir: Path) -> None:
|
|
891
|
+
"""Clean up temporary directory"""
|
|
892
|
+
if temp_dir.exists():
|
|
893
|
+
try:
|
|
894
|
+
await asyncio.to_thread(shutil.rmtree, temp_dir, ignore_errors=True)
|
|
895
|
+
except Exception as e:
|
|
896
|
+
logger.warning(f"Failed to cleanup temp directory: {e}")
|
|
897
|
+
|
|
898
|
+
def _validate_python_code(self, content: str) -> bool:
|
|
899
|
+
"""Validate Python code syntax"""
|
|
900
|
+
try:
|
|
901
|
+
compile(content, "<string>", "exec")
|
|
902
|
+
return True
|
|
903
|
+
except SyntaxError:
|
|
904
|
+
return False
|