devdox-ai-locust 0.1.2__tar.gz → 0.1.3.post1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of devdox-ai-locust might be problematic. Click here for more details.
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/PKG-INFO +18 -5
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/README.md +17 -4
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/pyproject.toml +1 -1
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/cli.py +2 -2
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/config.py +1 -1
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/hybrid_loctus_generator.py +115 -35
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/PKG-INFO +18 -5
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/tests/test_cli.py +2 -2
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/tests/test_config.py +4 -4
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/tests/test_hybrid_loctus_generator.py +206 -12
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/LICENSE +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/setup.cfg +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/__init__.py +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/locust_generator.py +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/domain.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/locust.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/test_data.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/validation.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/workflow.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/py.typed +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/schemas/__init__.py +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/schemas/processing_result.py +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/base_workflow.py.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/config.py.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/custom_flows.py.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/endpoint_template.py.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/env.example.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/fallback_locust.py.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/locust.py.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/readme.md.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/requirement.txt.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/test_data.py.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/utils.py.j2 +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/utils/__init__.py +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/utils/file_creation.py +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/utils/open_ai_parser.py +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/utils/swagger_utils.py +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/SOURCES.txt +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/dependency_links.txt +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/entry_points.txt +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/requires.txt +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/top_level.txt +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/tests/test_data.py +0 -0
- {devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/tests/test_locust_generator.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: devdox_ai_locust
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3.post1
|
|
4
4
|
Summary: AI-powered Locust load test generator from API documentation
|
|
5
5
|
Author-email: Hayat Bourji <hayat.bourgi@montyholding.com>
|
|
6
6
|
Maintainer-email: Hayat Bourji <hayat.bourgi@montyholding.com>
|
|
@@ -72,6 +72,21 @@ Dynamic: license-file
|
|
|
72
72
|
|
|
73
73
|
DevDox AI Locust automatically generates comprehensive Locust load testing scripts from your API documentation (OpenAPI/Swagger specs). Using advanced AI capabilities, it creates realistic test scenarios, handles complex authentication flows, and generates production-ready performance tests.
|
|
74
74
|
|
|
75
|
+
|
|
76
|
+
## 🆕 What's New in 0.1.3.post1
|
|
77
|
+
|
|
78
|
+
### Performance & Reliability Improvements
|
|
79
|
+
|
|
80
|
+
- **🚀 Asynchronous API Calls**: Migrated from `Together` to `AsyncTogether` for non-blocking API interactions, significantly improving generation speed and responsiveness
|
|
81
|
+
- **⚡ Enhanced Timeout Handling**: Implemented robust timeout logic with configurable retry mechanisms for better error resilience
|
|
82
|
+
- **🔧 Improved Code Extraction**: Enhanced `<code>` block parsing with detailed validation, multiple fallback scenarios, and better error messages
|
|
83
|
+
- **🛡️ Better Error Management**: Comprehensive error handling throughout the AI generation pipeline with graceful degradation
|
|
84
|
+
|
|
85
|
+
### Bug Fixes & Stability
|
|
86
|
+
|
|
87
|
+
- Fixed edge cases in code block extraction where malformed responses could cause generation failures
|
|
88
|
+
- Improved retry logic to handle transient API errors without interrupting the generation process
|
|
89
|
+
|
|
75
90
|
## ✨ Features
|
|
76
91
|
|
|
77
92
|
- 🤖 **AI-Enhanced Generation**: Uses Together AI to create intelligent, realistic load test scenarios
|
|
@@ -82,6 +97,7 @@ DevDox AI Locust automatically generates comprehensive Locust load testing scrip
|
|
|
82
97
|
- 🛠️ **Template-Based**: Highly customizable Jinja2 templates for different testing needs
|
|
83
98
|
- 🔄 **Hybrid Approach**: Combines rule-based generation with AI enhancement
|
|
84
99
|
- 📈 **Comprehensive Coverage**: Handles various HTTP methods, content types, and response scenarios
|
|
100
|
+
- ⚡ **Asynchronous Processing**: Fast, non-blocking test generation with async/await
|
|
85
101
|
|
|
86
102
|
## 🚀 Quick Start
|
|
87
103
|
|
|
@@ -121,12 +137,9 @@ echo "API_KEY=your_together_ai_api_key_here" > .env
|
|
|
121
137
|
# Generate from OpenAPI URL
|
|
122
138
|
devdox_ai_locust generate --openapi-url https://api.example.com/openapi.json --output ./tests
|
|
123
139
|
|
|
124
|
-
# Generate from local file
|
|
125
|
-
dal generate --openapi-file ./api-spec.yaml --output ./load-tests
|
|
126
|
-
|
|
127
140
|
# Generate with custom configuration
|
|
128
141
|
devdox_ai_locust generate \
|
|
129
|
-
https://petstore.swagger.io/
|
|
142
|
+
https://petstore.swagger.io/v3/swagger.json \
|
|
130
143
|
--output ./petstore-tests \
|
|
131
144
|
--together-api-key your_api_key \
|
|
132
145
|
|
|
@@ -8,6 +8,21 @@
|
|
|
8
8
|
|
|
9
9
|
DevDox AI Locust automatically generates comprehensive Locust load testing scripts from your API documentation (OpenAPI/Swagger specs). Using advanced AI capabilities, it creates realistic test scenarios, handles complex authentication flows, and generates production-ready performance tests.
|
|
10
10
|
|
|
11
|
+
|
|
12
|
+
## 🆕 What's New in 0.1.3.post1
|
|
13
|
+
|
|
14
|
+
### Performance & Reliability Improvements
|
|
15
|
+
|
|
16
|
+
- **🚀 Asynchronous API Calls**: Migrated from `Together` to `AsyncTogether` for non-blocking API interactions, significantly improving generation speed and responsiveness
|
|
17
|
+
- **⚡ Enhanced Timeout Handling**: Implemented robust timeout logic with configurable retry mechanisms for better error resilience
|
|
18
|
+
- **🔧 Improved Code Extraction**: Enhanced `<code>` block parsing with detailed validation, multiple fallback scenarios, and better error messages
|
|
19
|
+
- **🛡️ Better Error Management**: Comprehensive error handling throughout the AI generation pipeline with graceful degradation
|
|
20
|
+
|
|
21
|
+
### Bug Fixes & Stability
|
|
22
|
+
|
|
23
|
+
- Fixed edge cases in code block extraction where malformed responses could cause generation failures
|
|
24
|
+
- Improved retry logic to handle transient API errors without interrupting the generation process
|
|
25
|
+
|
|
11
26
|
## ✨ Features
|
|
12
27
|
|
|
13
28
|
- 🤖 **AI-Enhanced Generation**: Uses Together AI to create intelligent, realistic load test scenarios
|
|
@@ -18,6 +33,7 @@ DevDox AI Locust automatically generates comprehensive Locust load testing scrip
|
|
|
18
33
|
- 🛠️ **Template-Based**: Highly customizable Jinja2 templates for different testing needs
|
|
19
34
|
- 🔄 **Hybrid Approach**: Combines rule-based generation with AI enhancement
|
|
20
35
|
- 📈 **Comprehensive Coverage**: Handles various HTTP methods, content types, and response scenarios
|
|
36
|
+
- ⚡ **Asynchronous Processing**: Fast, non-blocking test generation with async/await
|
|
21
37
|
|
|
22
38
|
## 🚀 Quick Start
|
|
23
39
|
|
|
@@ -57,12 +73,9 @@ echo "API_KEY=your_together_ai_api_key_here" > .env
|
|
|
57
73
|
# Generate from OpenAPI URL
|
|
58
74
|
devdox_ai_locust generate --openapi-url https://api.example.com/openapi.json --output ./tests
|
|
59
75
|
|
|
60
|
-
# Generate from local file
|
|
61
|
-
dal generate --openapi-file ./api-spec.yaml --output ./load-tests
|
|
62
|
-
|
|
63
76
|
# Generate with custom configuration
|
|
64
77
|
devdox_ai_locust generate \
|
|
65
|
-
https://petstore.swagger.io/
|
|
78
|
+
https://petstore.swagger.io/v3/swagger.json \
|
|
66
79
|
--output ./petstore-tests \
|
|
67
80
|
--together-api-key your_api_key \
|
|
68
81
|
|
|
@@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta"
|
|
|
5
5
|
|
|
6
6
|
[project]
|
|
7
7
|
name = "devdox_ai_locust"
|
|
8
|
-
version = "0.1.
|
|
8
|
+
version = "0.1.3.post1"
|
|
9
9
|
description = "AI-powered Locust load test generator from API documentation"
|
|
10
10
|
readme = "README.md"
|
|
11
11
|
license = {text = "Apache-2.0" }
|
|
@@ -6,7 +6,7 @@ from datetime import datetime, timezone
|
|
|
6
6
|
from typing import Optional, Tuple, Union, List, Dict, Any
|
|
7
7
|
from rich.console import Console
|
|
8
8
|
from rich.table import Table
|
|
9
|
-
from together import
|
|
9
|
+
from together import AsyncTogether
|
|
10
10
|
|
|
11
11
|
from .hybrid_loctus_generator import HybridLocustGenerator
|
|
12
12
|
from .config import Settings
|
|
@@ -199,7 +199,7 @@ async def _generate_and_create_tests(
|
|
|
199
199
|
auth: bool = False,
|
|
200
200
|
) -> List[Dict[Any, Any]]:
|
|
201
201
|
"""Generate tests using AI and create test files"""
|
|
202
|
-
together_client =
|
|
202
|
+
together_client = AsyncTogether(api_key=api_key)
|
|
203
203
|
|
|
204
204
|
with console.status("[bold green]Generating Locust tests with AI..."):
|
|
205
205
|
generator = HybridLocustGenerator(ai_client=together_client)
|
|
@@ -19,7 +19,7 @@ import shutil
|
|
|
19
19
|
from devdox_ai_locust.utils.open_ai_parser import Endpoint
|
|
20
20
|
from devdox_ai_locust.utils.file_creation import FileCreationConfig, SafeFileCreator
|
|
21
21
|
from devdox_ai_locust.locust_generator import LocustTestGenerator, TestDataConfig
|
|
22
|
-
from together import
|
|
22
|
+
from together import AsyncTogether
|
|
23
23
|
|
|
24
24
|
logger = logging.getLogger(__name__)
|
|
25
25
|
|
|
@@ -27,6 +27,15 @@ logger = logging.getLogger(__name__)
|
|
|
27
27
|
test_data_file_path = "test_data.py"
|
|
28
28
|
|
|
29
29
|
|
|
30
|
+
@dataclass
|
|
31
|
+
class ErrorClassification:
|
|
32
|
+
"""Classification of an error for retry logic"""
|
|
33
|
+
|
|
34
|
+
is_retryable: bool
|
|
35
|
+
backoff_seconds: float
|
|
36
|
+
error_type: str
|
|
37
|
+
|
|
38
|
+
|
|
30
39
|
@dataclass
|
|
31
40
|
class AIEnhancementConfig:
|
|
32
41
|
"""Configuration for AI enhancement"""
|
|
@@ -202,7 +211,7 @@ class HybridLocustGenerator:
|
|
|
202
211
|
|
|
203
212
|
def __init__(
|
|
204
213
|
self,
|
|
205
|
-
ai_client:
|
|
214
|
+
ai_client: AsyncTogether,
|
|
206
215
|
ai_config: Optional[AIEnhancementConfig] = None,
|
|
207
216
|
test_config: Optional[TestDataConfig] = None,
|
|
208
217
|
prompt_dir: str = "prompt",
|
|
@@ -211,7 +220,12 @@ class HybridLocustGenerator:
|
|
|
211
220
|
self.ai_config = ai_config or AIEnhancementConfig()
|
|
212
221
|
self.template_generator = LocustTestGenerator(test_config)
|
|
213
222
|
self.prompt_dir = self._find_project_root() / prompt_dir
|
|
223
|
+
self._api_semaphore = asyncio.Semaphore(5)
|
|
214
224
|
self._setup_jinja_env()
|
|
225
|
+
self.MAX_RETRIES = 3
|
|
226
|
+
self.RATE_LIMIT_BACKOFF = 10
|
|
227
|
+
self.NON_RETRYABLE_CODES = ["401", "403", "unauthorized", "forbidden"]
|
|
228
|
+
self.RATE_LIMIT_INDICATORS = ["429", "rate limit"]
|
|
215
229
|
|
|
216
230
|
def _find_project_root(self) -> Path:
|
|
217
231
|
"""Find the project root by looking for setup.py, pyproject.toml, or .git"""
|
|
@@ -229,6 +243,45 @@ class HybridLocustGenerator:
|
|
|
229
243
|
autoescape=False,
|
|
230
244
|
)
|
|
231
245
|
|
|
246
|
+
def _classify_error(self, error: Exception, attempt: int) -> ErrorClassification:
|
|
247
|
+
"""
|
|
248
|
+
Classify an error to determine retry behavior.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
error: The exception that occurred
|
|
252
|
+
attempt: Current attempt number (0-indexed)
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
ErrorClassification with retry decision and backoff time
|
|
256
|
+
"""
|
|
257
|
+
error_str = str(error).lower()
|
|
258
|
+
|
|
259
|
+
# Non-retryable errors (auth/permission)
|
|
260
|
+
if any(code in error_str for code in self.NON_RETRYABLE_CODES):
|
|
261
|
+
logger.error(f"Authentication error, not retrying: {error}")
|
|
262
|
+
return ErrorClassification(
|
|
263
|
+
is_retryable=False, backoff_seconds=0, error_type="auth"
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
# Rate limit errors (retryable with longer backoff)
|
|
267
|
+
if any(indicator in error_str for indicator in self.RATE_LIMIT_INDICATORS):
|
|
268
|
+
logger.warning(f"Rate limit hit on attempt {attempt + 1}")
|
|
269
|
+
return ErrorClassification(
|
|
270
|
+
is_retryable=True,
|
|
271
|
+
backoff_seconds=self.RATE_LIMIT_BACKOFF,
|
|
272
|
+
error_type="rate_limit",
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# Other retryable errors (exponential backoff)
|
|
276
|
+
logger.warning(
|
|
277
|
+
f"Retryable error on attempt {attempt + 1}: {type(error).__name__}"
|
|
278
|
+
)
|
|
279
|
+
return ErrorClassification(
|
|
280
|
+
is_retryable=True,
|
|
281
|
+
backoff_seconds=2**attempt, # Exponential: 1s, 2s, 4s
|
|
282
|
+
error_type="retryable",
|
|
283
|
+
)
|
|
284
|
+
|
|
232
285
|
async def generate_from_endpoints(
|
|
233
286
|
self,
|
|
234
287
|
endpoints: List[Endpoint],
|
|
@@ -573,10 +626,8 @@ class HybridLocustGenerator:
|
|
|
573
626
|
|
|
574
627
|
return ""
|
|
575
628
|
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
messages = [
|
|
629
|
+
def _build_messages(self, prompt: str) -> list[dict]:
|
|
630
|
+
return [
|
|
580
631
|
{
|
|
581
632
|
"role": "system",
|
|
582
633
|
"content": "You are an expert Python developer specializing in Locust load testing. Generate clean, production-ready code with proper error handling. "
|
|
@@ -586,30 +637,42 @@ class HybridLocustGenerator:
|
|
|
586
637
|
{"role": "user", "content": prompt},
|
|
587
638
|
]
|
|
588
639
|
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
640
|
+
async def _make_api_call(self, messages: list[dict]) -> Optional[str]:
|
|
641
|
+
"""Make API call - ONE job"""
|
|
642
|
+
async with self._api_semaphore:
|
|
643
|
+
api_call = self.ai_client.chat.completions.create(
|
|
644
|
+
model=self.ai_config.model,
|
|
645
|
+
messages=messages,
|
|
646
|
+
max_tokens=self.ai_config.max_tokens,
|
|
647
|
+
temperature=self.ai_config.temperature,
|
|
648
|
+
top_p=0.9,
|
|
649
|
+
top_k=40,
|
|
650
|
+
repetition_penalty=1.1,
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
# Wait for the API call with timeout
|
|
654
|
+
response = await asyncio.wait_for(
|
|
655
|
+
api_call,
|
|
656
|
+
timeout=self.ai_config.timeout,
|
|
657
|
+
)
|
|
658
|
+
if response.choices and response.choices[0].message:
|
|
659
|
+
content = response.choices[0].message.content.strip()
|
|
660
|
+
# Clean up the response
|
|
661
|
+
content = self._clean_ai_response(
|
|
662
|
+
self.extract_code_from_response(content)
|
|
603
663
|
)
|
|
664
|
+
return content
|
|
604
665
|
|
|
605
|
-
|
|
606
|
-
content = response.choices[0].message.content.strip()
|
|
666
|
+
return None
|
|
607
667
|
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
)
|
|
668
|
+
async def _call_ai_service(self, prompt: str) -> Optional[str]:
|
|
669
|
+
"""Call AI service with retry logic and validation"""
|
|
670
|
+
messages = self._build_messages(prompt)
|
|
612
671
|
|
|
672
|
+
for attempt in range(self.MAX_RETRIES): # Retry logic
|
|
673
|
+
try:
|
|
674
|
+
async with self._api_semaphore:
|
|
675
|
+
content = await self._make_api_call(messages)
|
|
613
676
|
if content:
|
|
614
677
|
return content
|
|
615
678
|
|
|
@@ -617,24 +680,41 @@ class HybridLocustGenerator:
|
|
|
617
680
|
logger.warning(f"AI service timeout on attempt {attempt + 1}")
|
|
618
681
|
|
|
619
682
|
except Exception as e:
|
|
620
|
-
|
|
683
|
+
classification = self._classify_error(e, attempt) # Helper 3
|
|
684
|
+
|
|
685
|
+
if not classification.is_retryable:
|
|
686
|
+
return ""
|
|
687
|
+
|
|
688
|
+
if attempt < self.MAX_RETRIES - 1:
|
|
689
|
+
await asyncio.sleep(classification.backoff_seconds)
|
|
690
|
+
|
|
691
|
+
continue
|
|
621
692
|
|
|
622
|
-
if attempt <
|
|
693
|
+
if attempt < self.MAX_RETRIES - 1:
|
|
623
694
|
await asyncio.sleep(2**attempt)
|
|
624
695
|
|
|
625
696
|
return ""
|
|
626
697
|
|
|
627
698
|
def extract_code_from_response(self, response_text: str) -> str:
|
|
628
699
|
# Extract content between <code> tags
|
|
700
|
+
pattern = r"<code>(.*?)</code>"
|
|
701
|
+
matches = re.findall(pattern, response_text, re.DOTALL)
|
|
629
702
|
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
703
|
+
if not matches:
|
|
704
|
+
logger.warning("No <code> tags found, using full response")
|
|
705
|
+
return response_text.strip()
|
|
706
|
+
|
|
707
|
+
content = max(matches, key=len).strip()
|
|
708
|
+
|
|
709
|
+
# Content too short - use full response
|
|
710
|
+
if not content or len(content) <= 10:
|
|
711
|
+
logger.warning(
|
|
712
|
+
f"Code in tags too short ({len(content)} chars), using full response"
|
|
713
|
+
)
|
|
714
|
+
return response_text.strip()
|
|
636
715
|
|
|
637
|
-
|
|
716
|
+
logger.debug(f"Extracted {len(content)} chars from <code> tags")
|
|
717
|
+
return str(content)
|
|
638
718
|
|
|
639
719
|
def _clean_ai_response(self, content: str) -> str:
|
|
640
720
|
"""Clean and validate AI response"""
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: devdox_ai_locust
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3.post1
|
|
4
4
|
Summary: AI-powered Locust load test generator from API documentation
|
|
5
5
|
Author-email: Hayat Bourji <hayat.bourgi@montyholding.com>
|
|
6
6
|
Maintainer-email: Hayat Bourji <hayat.bourgi@montyholding.com>
|
|
@@ -72,6 +72,21 @@ Dynamic: license-file
|
|
|
72
72
|
|
|
73
73
|
DevDox AI Locust automatically generates comprehensive Locust load testing scripts from your API documentation (OpenAPI/Swagger specs). Using advanced AI capabilities, it creates realistic test scenarios, handles complex authentication flows, and generates production-ready performance tests.
|
|
74
74
|
|
|
75
|
+
|
|
76
|
+
## 🆕 What's New in 0.1.3.post1
|
|
77
|
+
|
|
78
|
+
### Performance & Reliability Improvements
|
|
79
|
+
|
|
80
|
+
- **🚀 Asynchronous API Calls**: Migrated from `Together` to `AsyncTogether` for non-blocking API interactions, significantly improving generation speed and responsiveness
|
|
81
|
+
- **⚡ Enhanced Timeout Handling**: Implemented robust timeout logic with configurable retry mechanisms for better error resilience
|
|
82
|
+
- **🔧 Improved Code Extraction**: Enhanced `<code>` block parsing with detailed validation, multiple fallback scenarios, and better error messages
|
|
83
|
+
- **🛡️ Better Error Management**: Comprehensive error handling throughout the AI generation pipeline with graceful degradation
|
|
84
|
+
|
|
85
|
+
### Bug Fixes & Stability
|
|
86
|
+
|
|
87
|
+
- Fixed edge cases in code block extraction where malformed responses could cause generation failures
|
|
88
|
+
- Improved retry logic to handle transient API errors without interrupting the generation process
|
|
89
|
+
|
|
75
90
|
## ✨ Features
|
|
76
91
|
|
|
77
92
|
- 🤖 **AI-Enhanced Generation**: Uses Together AI to create intelligent, realistic load test scenarios
|
|
@@ -82,6 +97,7 @@ DevDox AI Locust automatically generates comprehensive Locust load testing scrip
|
|
|
82
97
|
- 🛠️ **Template-Based**: Highly customizable Jinja2 templates for different testing needs
|
|
83
98
|
- 🔄 **Hybrid Approach**: Combines rule-based generation with AI enhancement
|
|
84
99
|
- 📈 **Comprehensive Coverage**: Handles various HTTP methods, content types, and response scenarios
|
|
100
|
+
- ⚡ **Asynchronous Processing**: Fast, non-blocking test generation with async/await
|
|
85
101
|
|
|
86
102
|
## 🚀 Quick Start
|
|
87
103
|
|
|
@@ -121,12 +137,9 @@ echo "API_KEY=your_together_ai_api_key_here" > .env
|
|
|
121
137
|
# Generate from OpenAPI URL
|
|
122
138
|
devdox_ai_locust generate --openapi-url https://api.example.com/openapi.json --output ./tests
|
|
123
139
|
|
|
124
|
-
# Generate from local file
|
|
125
|
-
dal generate --openapi-file ./api-spec.yaml --output ./load-tests
|
|
126
|
-
|
|
127
140
|
# Generate with custom configuration
|
|
128
141
|
devdox_ai_locust generate \
|
|
129
|
-
https://petstore.swagger.io/
|
|
142
|
+
https://petstore.swagger.io/v3/swagger.json \
|
|
130
143
|
--output ./petstore-tests \
|
|
131
144
|
--together-api-key your_api_key \
|
|
132
145
|
|
|
@@ -438,7 +438,7 @@ class TestGenerateAndCreateTests:
|
|
|
438
438
|
"""Test test generation and creation functionality."""
|
|
439
439
|
|
|
440
440
|
@pytest.mark.asyncio
|
|
441
|
-
@patch("devdox_ai_locust.cli.
|
|
441
|
+
@patch("devdox_ai_locust.cli.AsyncTogether")
|
|
442
442
|
@patch("devdox_ai_locust.cli.HybridLocustGenerator")
|
|
443
443
|
async def test_generate_and_create_tests_success(
|
|
444
444
|
self,
|
|
@@ -450,7 +450,7 @@ class TestGenerateAndCreateTests:
|
|
|
450
450
|
):
|
|
451
451
|
"""Test successful test generation and creation."""
|
|
452
452
|
# Mock Together client
|
|
453
|
-
mock_client =
|
|
453
|
+
mock_client = AsyncMock()
|
|
454
454
|
mock_together_class.return_value = mock_client
|
|
455
455
|
|
|
456
456
|
# Mock generator
|
|
@@ -15,7 +15,7 @@ class TestSettings:
|
|
|
15
15
|
"""Test default settings values."""
|
|
16
16
|
test_settings = Settings(_env_file=".env.example")
|
|
17
17
|
|
|
18
|
-
assert test_settings.VERSION == "0.1.
|
|
18
|
+
assert test_settings.VERSION == "0.1.3.post1"
|
|
19
19
|
assert test_settings.API_KEY == ""
|
|
20
20
|
|
|
21
21
|
def test_settings_with_env_vars(self):
|
|
@@ -108,7 +108,7 @@ class TestSettings:
|
|
|
108
108
|
def test_global_settings_instance(self):
|
|
109
109
|
"""Test the global settings instance."""
|
|
110
110
|
assert isinstance(settings, Settings)
|
|
111
|
-
assert settings.VERSION == "0.1.
|
|
111
|
+
assert settings.VERSION == "0.1.3.post1"
|
|
112
112
|
|
|
113
113
|
|
|
114
114
|
class TestSettingsMethods:
|
|
@@ -120,7 +120,7 @@ class TestSettingsMethods:
|
|
|
120
120
|
original_version = test_settings.VERSION
|
|
121
121
|
|
|
122
122
|
# VERSION should be a class-level constant
|
|
123
|
-
assert original_version == "0.1.
|
|
123
|
+
assert original_version == "0.1.3.post1"
|
|
124
124
|
|
|
125
125
|
# Even if we try to change it, it should remain the same
|
|
126
126
|
# (depending on Pydantic implementation)
|
|
@@ -131,7 +131,7 @@ class TestSettingsMethods:
|
|
|
131
131
|
|
|
132
132
|
# Create new instance to verify class-level value
|
|
133
133
|
new_settings = Settings()
|
|
134
|
-
assert new_settings.VERSION == "0.1.
|
|
134
|
+
assert new_settings.VERSION == "0.1.3.post1"
|
|
135
135
|
|
|
136
136
|
def test_settings_field_types(self):
|
|
137
137
|
"""Test that settings fields have correct types."""
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/tests/test_hybrid_loctus_generator.py
RENAMED
|
@@ -12,6 +12,7 @@ from devdox_ai_locust.hybrid_loctus_generator import (
|
|
|
12
12
|
AIEnhancementConfig,
|
|
13
13
|
EnhancementResult,
|
|
14
14
|
EnhancementProcessor,
|
|
15
|
+
ErrorClassification,
|
|
15
16
|
)
|
|
16
17
|
from devdox_ai_locust.locust_generator import TestDataConfig
|
|
17
18
|
|
|
@@ -375,21 +376,31 @@ class TestHybridLocustGeneratorAsync:
|
|
|
375
376
|
assert "import locust" in result
|
|
376
377
|
|
|
377
378
|
@pytest.mark.asyncio
|
|
378
|
-
async def
|
|
379
|
-
"""Test AI service call
|
|
380
|
-
|
|
381
|
-
|
|
379
|
+
async def test_ai_call_with_timeout(mock_together_client):
|
|
380
|
+
"""Test AI service call that times out"""
|
|
381
|
+
|
|
382
|
+
async def mock_timeout(*args, **kwargs):
|
|
383
|
+
"""Simulate a timeout by sleeping longer than expected"""
|
|
384
|
+
await asyncio.sleep(10) # Long enough to trigger timeout
|
|
385
|
+
raise asyncio.TimeoutError("Simulated timeout")
|
|
386
|
+
|
|
387
|
+
mock_together_client.chat = Mock()
|
|
388
|
+
mock_together_client.chat.completions = Mock()
|
|
389
|
+
mock_together_client.chat.completions.create = AsyncMock(
|
|
390
|
+
side_effect=mock_timeout
|
|
391
|
+
)
|
|
382
392
|
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
393
|
+
generator = HybridLocustGenerator(
|
|
394
|
+
ai_client=mock_together_client,
|
|
395
|
+
ai_config=AIEnhancementConfig(timeout=1), # Short timeout
|
|
396
|
+
)
|
|
387
397
|
|
|
388
|
-
|
|
389
|
-
|
|
398
|
+
# Call should timeout and return empty string after retries
|
|
399
|
+
result = await generator._call_ai_service("test prompt")
|
|
390
400
|
|
|
391
|
-
|
|
392
|
-
|
|
401
|
+
assert result == ""
|
|
402
|
+
# Should have tried 3 times
|
|
403
|
+
assert mock_together_client.chat.completions.create.call_count == 3
|
|
393
404
|
|
|
394
405
|
@pytest.mark.asyncio
|
|
395
406
|
async def test_call_ai_service_with_retry(self, mock_together_client):
|
|
@@ -894,3 +905,186 @@ class TestHybridLocustGeneratorEdgeCases:
|
|
|
894
905
|
for result in results:
|
|
895
906
|
if not isinstance(result, Exception):
|
|
896
907
|
assert isinstance(result, str)
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
class TestErrorClassification:
|
|
911
|
+
"""Test ErrorClassification dataclass"""
|
|
912
|
+
|
|
913
|
+
def test_error_classification_creation(self):
|
|
914
|
+
"""Test creating ErrorClassification"""
|
|
915
|
+
classification = ErrorClassification(
|
|
916
|
+
is_retryable=True, backoff_seconds=2.0, error_type="rate_limit"
|
|
917
|
+
)
|
|
918
|
+
|
|
919
|
+
assert classification.is_retryable is True
|
|
920
|
+
assert classification.backoff_seconds == 2.0
|
|
921
|
+
assert classification.error_type == "rate_limit"
|
|
922
|
+
|
|
923
|
+
def test_non_retryable_classification(self):
|
|
924
|
+
"""Test non-retryable error classification"""
|
|
925
|
+
classification = ErrorClassification(
|
|
926
|
+
is_retryable=False, backoff_seconds=0, error_type="auth"
|
|
927
|
+
)
|
|
928
|
+
|
|
929
|
+
assert classification.is_retryable is False
|
|
930
|
+
assert classification.backoff_seconds == 0
|
|
931
|
+
|
|
932
|
+
|
|
933
|
+
class TestBuildMessages:
|
|
934
|
+
"""Test _build_messages method"""
|
|
935
|
+
|
|
936
|
+
def test_build_messages_structure(self, mock_together_client):
|
|
937
|
+
"""Test message structure"""
|
|
938
|
+
generator = HybridLocustGenerator(ai_client=mock_together_client)
|
|
939
|
+
|
|
940
|
+
messages = generator._build_messages("test prompt")
|
|
941
|
+
|
|
942
|
+
assert len(messages) == 2
|
|
943
|
+
assert messages[0]["role"] == "system"
|
|
944
|
+
assert messages[1]["role"] == "user"
|
|
945
|
+
assert messages[1]["content"] == "test prompt"
|
|
946
|
+
|
|
947
|
+
def test_build_messages_system_prompt(self, mock_together_client):
|
|
948
|
+
"""Test system prompt content"""
|
|
949
|
+
generator = HybridLocustGenerator(ai_client=mock_together_client)
|
|
950
|
+
|
|
951
|
+
messages = generator._build_messages("test")
|
|
952
|
+
|
|
953
|
+
system_content = messages[0]["content"]
|
|
954
|
+
assert "Locust load testing" in system_content
|
|
955
|
+
assert "<code>" in system_content
|
|
956
|
+
assert "DO NOT TRUNCATE" in system_content
|
|
957
|
+
|
|
958
|
+
|
|
959
|
+
class TestMakeApiCall:
|
|
960
|
+
"""Test _make_api_call method"""
|
|
961
|
+
|
|
962
|
+
@pytest.mark.asyncio
|
|
963
|
+
async def test_make_api_call_success(self, mock_together_client):
|
|
964
|
+
"""Test successful API call"""
|
|
965
|
+
generator = HybridLocustGenerator(ai_client=mock_together_client)
|
|
966
|
+
|
|
967
|
+
messages = [{"role": "user", "content": "test"}]
|
|
968
|
+
result = await generator._make_api_call(messages)
|
|
969
|
+
|
|
970
|
+
assert result is not None
|
|
971
|
+
assert "import locust" in result
|
|
972
|
+
|
|
973
|
+
@pytest.mark.asyncio
|
|
974
|
+
async def test_make_api_call_empty_response(self, mock_together_client):
|
|
975
|
+
"""Test API call with empty response"""
|
|
976
|
+
# Mock empty response
|
|
977
|
+
mock_response = Mock()
|
|
978
|
+
mock_response.choices = []
|
|
979
|
+
|
|
980
|
+
async def mock_create(*args, **kwargs):
|
|
981
|
+
await asyncio.sleep(0.01)
|
|
982
|
+
return mock_response
|
|
983
|
+
|
|
984
|
+
mock_together_client.chat = Mock()
|
|
985
|
+
mock_together_client.chat.completions = Mock()
|
|
986
|
+
mock_together_client.chat.completions.create = AsyncMock(
|
|
987
|
+
side_effect=mock_create
|
|
988
|
+
)
|
|
989
|
+
|
|
990
|
+
generator = HybridLocustGenerator(ai_client=mock_together_client)
|
|
991
|
+
|
|
992
|
+
messages = [{"role": "user", "content": "test"}]
|
|
993
|
+
result = await generator._make_api_call(messages)
|
|
994
|
+
|
|
995
|
+
assert result is None
|
|
996
|
+
|
|
997
|
+
|
|
998
|
+
class TestCallAIService:
|
|
999
|
+
"""Test _call_ai_service method with refactored code"""
|
|
1000
|
+
|
|
1001
|
+
@pytest.mark.asyncio
|
|
1002
|
+
async def test_call_ai_service_success(self, mock_together_client):
|
|
1003
|
+
"""Test successful AI service call"""
|
|
1004
|
+
generator = HybridLocustGenerator(ai_client=mock_together_client)
|
|
1005
|
+
|
|
1006
|
+
result = await generator._call_ai_service("Test prompt")
|
|
1007
|
+
|
|
1008
|
+
assert result is not None
|
|
1009
|
+
assert "import locust" in result
|
|
1010
|
+
assert mock_together_client.chat.completions.create.call_count == 1
|
|
1011
|
+
|
|
1012
|
+
@pytest.mark.asyncio
|
|
1013
|
+
async def test_call_ai_service_with_timeout(self, mock_together_client):
|
|
1014
|
+
"""Test AI service call that times out"""
|
|
1015
|
+
|
|
1016
|
+
async def mock_timeout(*args, **kwargs):
|
|
1017
|
+
await asyncio.sleep(10)
|
|
1018
|
+
raise asyncio.TimeoutError("Simulated timeout")
|
|
1019
|
+
|
|
1020
|
+
mock_together_client.chat = Mock()
|
|
1021
|
+
mock_together_client.chat.completions = Mock()
|
|
1022
|
+
mock_together_client.chat.completions.create = AsyncMock(
|
|
1023
|
+
side_effect=mock_timeout
|
|
1024
|
+
)
|
|
1025
|
+
|
|
1026
|
+
generator = HybridLocustGenerator(
|
|
1027
|
+
ai_client=mock_together_client,
|
|
1028
|
+
ai_config=AIEnhancementConfig(timeout=1),
|
|
1029
|
+
)
|
|
1030
|
+
|
|
1031
|
+
result = await generator._call_ai_service("test prompt")
|
|
1032
|
+
|
|
1033
|
+
assert result == ""
|
|
1034
|
+
assert mock_together_client.chat.completions.create.call_count == 3
|
|
1035
|
+
|
|
1036
|
+
@pytest.mark.asyncio
|
|
1037
|
+
async def test_call_ai_service_auth_error_no_retry(self, mock_together_client):
|
|
1038
|
+
"""Test that auth errors are not retried"""
|
|
1039
|
+
|
|
1040
|
+
async def mock_auth_error(*args, **kwargs):
|
|
1041
|
+
await asyncio.sleep(0.01)
|
|
1042
|
+
raise Exception("401 Unauthorized")
|
|
1043
|
+
|
|
1044
|
+
mock_together_client.chat = Mock()
|
|
1045
|
+
mock_together_client.chat.completions = Mock()
|
|
1046
|
+
mock_together_client.chat.completions.create = AsyncMock(
|
|
1047
|
+
side_effect=mock_auth_error
|
|
1048
|
+
)
|
|
1049
|
+
|
|
1050
|
+
generator = HybridLocustGenerator(ai_client=mock_together_client)
|
|
1051
|
+
|
|
1052
|
+
result = await generator._call_ai_service("test prompt")
|
|
1053
|
+
|
|
1054
|
+
assert result == ""
|
|
1055
|
+
# Should only try once for auth errors
|
|
1056
|
+
assert mock_together_client.chat.completions.create.call_count == 1
|
|
1057
|
+
|
|
1058
|
+
@pytest.mark.asyncio
|
|
1059
|
+
async def test_call_ai_service_rate_limit_retry(self, mock_together_client):
|
|
1060
|
+
"""Test rate limit handling with retries"""
|
|
1061
|
+
call_count = {"count": 0}
|
|
1062
|
+
|
|
1063
|
+
# Create successful response for 3rd attempt
|
|
1064
|
+
mock_message = Mock()
|
|
1065
|
+
mock_message.content = "<code>success_code</code>"
|
|
1066
|
+
mock_choice = Mock()
|
|
1067
|
+
mock_choice.message = mock_message
|
|
1068
|
+
mock_response = Mock()
|
|
1069
|
+
mock_response.choices = [mock_choice]
|
|
1070
|
+
|
|
1071
|
+
async def mock_rate_limit(*args, **kwargs):
|
|
1072
|
+
call_count["count"] += 1
|
|
1073
|
+
await asyncio.sleep(0.01)
|
|
1074
|
+
|
|
1075
|
+
if call_count["count"] < 3:
|
|
1076
|
+
raise Exception("429 Rate limit exceeded")
|
|
1077
|
+
return mock_response
|
|
1078
|
+
|
|
1079
|
+
mock_together_client.chat = Mock()
|
|
1080
|
+
mock_together_client.chat.completions = Mock()
|
|
1081
|
+
mock_together_client.chat.completions.create = AsyncMock(
|
|
1082
|
+
side_effect=mock_rate_limit
|
|
1083
|
+
)
|
|
1084
|
+
|
|
1085
|
+
generator = HybridLocustGenerator(ai_client=mock_together_client)
|
|
1086
|
+
|
|
1087
|
+
result = await generator._call_ai_service("test prompt")
|
|
1088
|
+
|
|
1089
|
+
assert result == "success_code"
|
|
1090
|
+
assert call_count["count"] == 3
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/locust_generator.py
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/domain.j2
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/locust.j2
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/test_data.j2
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/validation.j2
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/prompt/workflow.j2
RENAMED
|
File without changes
|
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/schemas/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/config.py.j2
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/locust.py.j2
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/readme.md.j2
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/templates/utils.py.j2
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/utils/__init__.py
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/utils/file_creation.py
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/utils/open_ai_parser.py
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust/utils/swagger_utils.py
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/SOURCES.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/requires.txt
RENAMED
|
File without changes
|
{devdox_ai_locust-0.1.2 → devdox_ai_locust-0.1.3.post1}/src/devdox_ai_locust.egg-info/top_level.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|