devdox-ai-locust 0.1.2__py3-none-any.whl → 0.1.3.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of devdox-ai-locust might be problematic. Click here for more details.

devdox_ai_locust/cli.py CHANGED
@@ -6,7 +6,7 @@ from datetime import datetime, timezone
6
6
  from typing import Optional, Tuple, Union, List, Dict, Any
7
7
  from rich.console import Console
8
8
  from rich.table import Table
9
- from together import Together
9
+ from together import AsyncTogether
10
10
 
11
11
  from .hybrid_loctus_generator import HybridLocustGenerator
12
12
  from .config import Settings
@@ -199,7 +199,7 @@ async def _generate_and_create_tests(
199
199
  auth: bool = False,
200
200
  ) -> List[Dict[Any, Any]]:
201
201
  """Generate tests using AI and create test files"""
202
- together_client = Together(api_key=api_key)
202
+ together_client = AsyncTogether(api_key=api_key)
203
203
 
204
204
  with console.status("[bold green]Generating Locust tests with AI..."):
205
205
  generator = HybridLocustGenerator(ai_client=together_client)
@@ -8,7 +8,7 @@ from pydantic_settings import BaseSettings
8
8
  class Settings(BaseSettings):
9
9
  """Application settings."""
10
10
 
11
- VERSION: str = "0.1.2"
11
+ VERSION: str = "0.1.3.post1"
12
12
 
13
13
  API_KEY: str = "" # Fallback for backward compatibility
14
14
 
@@ -19,7 +19,7 @@ import shutil
19
19
  from devdox_ai_locust.utils.open_ai_parser import Endpoint
20
20
  from devdox_ai_locust.utils.file_creation import FileCreationConfig, SafeFileCreator
21
21
  from devdox_ai_locust.locust_generator import LocustTestGenerator, TestDataConfig
22
- from together import Together
22
+ from together import AsyncTogether
23
23
 
24
24
  logger = logging.getLogger(__name__)
25
25
 
@@ -27,6 +27,15 @@ logger = logging.getLogger(__name__)
27
27
  test_data_file_path = "test_data.py"
28
28
 
29
29
 
30
+ @dataclass
31
+ class ErrorClassification:
32
+ """Classification of an error for retry logic"""
33
+
34
+ is_retryable: bool
35
+ backoff_seconds: float
36
+ error_type: str
37
+
38
+
30
39
  @dataclass
31
40
  class AIEnhancementConfig:
32
41
  """Configuration for AI enhancement"""
@@ -202,7 +211,7 @@ class HybridLocustGenerator:
202
211
 
203
212
  def __init__(
204
213
  self,
205
- ai_client: Together,
214
+ ai_client: AsyncTogether,
206
215
  ai_config: Optional[AIEnhancementConfig] = None,
207
216
  test_config: Optional[TestDataConfig] = None,
208
217
  prompt_dir: str = "prompt",
@@ -211,7 +220,12 @@ class HybridLocustGenerator:
211
220
  self.ai_config = ai_config or AIEnhancementConfig()
212
221
  self.template_generator = LocustTestGenerator(test_config)
213
222
  self.prompt_dir = self._find_project_root() / prompt_dir
223
+ self._api_semaphore = asyncio.Semaphore(5)
214
224
  self._setup_jinja_env()
225
+ self.MAX_RETRIES = 3
226
+ self.RATE_LIMIT_BACKOFF = 10
227
+ self.NON_RETRYABLE_CODES = ["401", "403", "unauthorized", "forbidden"]
228
+ self.RATE_LIMIT_INDICATORS = ["429", "rate limit"]
215
229
 
216
230
  def _find_project_root(self) -> Path:
217
231
  """Find the project root by looking for setup.py, pyproject.toml, or .git"""
@@ -229,6 +243,45 @@ class HybridLocustGenerator:
229
243
  autoescape=False,
230
244
  )
231
245
 
246
+ def _classify_error(self, error: Exception, attempt: int) -> ErrorClassification:
247
+ """
248
+ Classify an error to determine retry behavior.
249
+
250
+ Args:
251
+ error: The exception that occurred
252
+ attempt: Current attempt number (0-indexed)
253
+
254
+ Returns:
255
+ ErrorClassification with retry decision and backoff time
256
+ """
257
+ error_str = str(error).lower()
258
+
259
+ # Non-retryable errors (auth/permission)
260
+ if any(code in error_str for code in self.NON_RETRYABLE_CODES):
261
+ logger.error(f"Authentication error, not retrying: {error}")
262
+ return ErrorClassification(
263
+ is_retryable=False, backoff_seconds=0, error_type="auth"
264
+ )
265
+
266
+ # Rate limit errors (retryable with longer backoff)
267
+ if any(indicator in error_str for indicator in self.RATE_LIMIT_INDICATORS):
268
+ logger.warning(f"Rate limit hit on attempt {attempt + 1}")
269
+ return ErrorClassification(
270
+ is_retryable=True,
271
+ backoff_seconds=self.RATE_LIMIT_BACKOFF,
272
+ error_type="rate_limit",
273
+ )
274
+
275
+ # Other retryable errors (exponential backoff)
276
+ logger.warning(
277
+ f"Retryable error on attempt {attempt + 1}: {type(error).__name__}"
278
+ )
279
+ return ErrorClassification(
280
+ is_retryable=True,
281
+ backoff_seconds=2**attempt, # Exponential: 1s, 2s, 4s
282
+ error_type="retryable",
283
+ )
284
+
232
285
  async def generate_from_endpoints(
233
286
  self,
234
287
  endpoints: List[Endpoint],
@@ -573,10 +626,8 @@ class HybridLocustGenerator:
573
626
 
574
627
  return ""
575
628
 
576
- async def _call_ai_service(self, prompt: str) -> Optional[str]:
577
- """Call AI service with retry logic and validation"""
578
-
579
- messages = [
629
+ def _build_messages(self, prompt: str) -> list[dict]:
630
+ return [
580
631
  {
581
632
  "role": "system",
582
633
  "content": "You are an expert Python developer specializing in Locust load testing. Generate clean, production-ready code with proper error handling. "
@@ -586,30 +637,42 @@ class HybridLocustGenerator:
586
637
  {"role": "user", "content": prompt},
587
638
  ]
588
639
 
589
- for attempt in range(3): # Retry logic
590
- try:
591
- response = await asyncio.wait_for(
592
- asyncio.to_thread(
593
- self.ai_client.chat.completions.create,
594
- model=self.ai_config.model,
595
- messages=messages,
596
- max_tokens=self.ai_config.max_tokens,
597
- temperature=self.ai_config.temperature,
598
- top_p=0.9,
599
- top_k=40,
600
- repetition_penalty=1.1,
601
- ),
602
- timeout=self.ai_config.timeout,
640
+ async def _make_api_call(self, messages: list[dict]) -> Optional[str]:
641
+ """Make API call - ONE job"""
642
+ async with self._api_semaphore:
643
+ api_call = self.ai_client.chat.completions.create(
644
+ model=self.ai_config.model,
645
+ messages=messages,
646
+ max_tokens=self.ai_config.max_tokens,
647
+ temperature=self.ai_config.temperature,
648
+ top_p=0.9,
649
+ top_k=40,
650
+ repetition_penalty=1.1,
651
+ )
652
+
653
+ # Wait for the API call with timeout
654
+ response = await asyncio.wait_for(
655
+ api_call,
656
+ timeout=self.ai_config.timeout,
657
+ )
658
+ if response.choices and response.choices[0].message:
659
+ content = response.choices[0].message.content.strip()
660
+ # Clean up the response
661
+ content = self._clean_ai_response(
662
+ self.extract_code_from_response(content)
603
663
  )
664
+ return content
604
665
 
605
- if response.choices and response.choices[0].message:
606
- content = response.choices[0].message.content.strip()
666
+ return None
607
667
 
608
- # Clean up the response
609
- content = self._clean_ai_response(
610
- self.extract_code_from_response(content)
611
- )
668
+ async def _call_ai_service(self, prompt: str) -> Optional[str]:
669
+ """Call AI service with retry logic and validation"""
670
+ messages = self._build_messages(prompt)
612
671
 
672
+ for attempt in range(self.MAX_RETRIES): # Retry logic
673
+ try:
674
+ async with self._api_semaphore:
675
+ content = await self._make_api_call(messages)
613
676
  if content:
614
677
  return content
615
678
 
@@ -617,24 +680,41 @@ class HybridLocustGenerator:
617
680
  logger.warning(f"AI service timeout on attempt {attempt + 1}")
618
681
 
619
682
  except Exception as e:
620
- logger.warning(f"AI service error on attempt {attempt + 1}: {e}")
683
+ classification = self._classify_error(e, attempt) # Helper 3
684
+
685
+ if not classification.is_retryable:
686
+ return ""
687
+
688
+ if attempt < self.MAX_RETRIES - 1:
689
+ await asyncio.sleep(classification.backoff_seconds)
690
+
691
+ continue
621
692
 
622
- if attempt < 2: # Wait before retry
693
+ if attempt < self.MAX_RETRIES - 1:
623
694
  await asyncio.sleep(2**attempt)
624
695
 
625
696
  return ""
626
697
 
627
698
  def extract_code_from_response(self, response_text: str) -> str:
628
699
  # Extract content between <code> tags
700
+ pattern = r"<code>(.*?)</code>"
701
+ matches = re.findall(pattern, response_text, re.DOTALL)
629
702
 
630
- code_match = re.search(r"<code>(.*?)</code>", response_text, re.DOTALL)
631
- if code_match:
632
- content = code_match.group(1).strip()
633
- # Additional validation - ensure we got actual content
634
- if content and len(content) > 0:
635
- return content
703
+ if not matches:
704
+ logger.warning("No <code> tags found, using full response")
705
+ return response_text.strip()
706
+
707
+ content = max(matches, key=len).strip()
708
+
709
+ # Content too short - use full response
710
+ if not content or len(content) <= 10:
711
+ logger.warning(
712
+ f"Code in tags too short ({len(content)} chars), using full response"
713
+ )
714
+ return response_text.strip()
636
715
 
637
- return response_text.strip()
716
+ logger.debug(f"Extracted {len(content)} chars from <code> tags")
717
+ return str(content)
638
718
 
639
719
  def _clean_ai_response(self, content: str) -> str:
640
720
  """Clean and validate AI response"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: devdox_ai_locust
3
- Version: 0.1.2
3
+ Version: 0.1.3.post1
4
4
  Summary: AI-powered Locust load test generator from API documentation
5
5
  Author-email: Hayat Bourji <hayat.bourgi@montyholding.com>
6
6
  Maintainer-email: Hayat Bourji <hayat.bourgi@montyholding.com>
@@ -72,6 +72,21 @@ Dynamic: license-file
72
72
 
73
73
  DevDox AI Locust automatically generates comprehensive Locust load testing scripts from your API documentation (OpenAPI/Swagger specs). Using advanced AI capabilities, it creates realistic test scenarios, handles complex authentication flows, and generates production-ready performance tests.
74
74
 
75
+
76
+ ## 🆕 What's New in 0.1.3.post1
77
+
78
+ ### Performance & Reliability Improvements
79
+
80
+ - **🚀 Asynchronous API Calls**: Migrated from `Together` to `AsyncTogether` for non-blocking API interactions, significantly improving generation speed and responsiveness
81
+ - **⚡ Enhanced Timeout Handling**: Implemented robust timeout logic with configurable retry mechanisms for better error resilience
82
+ - **🔧 Improved Code Extraction**: Enhanced `<code>` block parsing with detailed validation, multiple fallback scenarios, and better error messages
83
+ - **🛡️ Better Error Management**: Comprehensive error handling throughout the AI generation pipeline with graceful degradation
84
+
85
+ ### Bug Fixes & Stability
86
+
87
+ - Fixed edge cases in code block extraction where malformed responses could cause generation failures
88
+ - Improved retry logic to handle transient API errors without interrupting the generation process
89
+
75
90
  ## ✨ Features
76
91
 
77
92
  - 🤖 **AI-Enhanced Generation**: Uses Together AI to create intelligent, realistic load test scenarios
@@ -82,6 +97,7 @@ DevDox AI Locust automatically generates comprehensive Locust load testing scrip
82
97
  - 🛠️ **Template-Based**: Highly customizable Jinja2 templates for different testing needs
83
98
  - 🔄 **Hybrid Approach**: Combines rule-based generation with AI enhancement
84
99
  - 📈 **Comprehensive Coverage**: Handles various HTTP methods, content types, and response scenarios
100
+ - ⚡ **Asynchronous Processing**: Fast, non-blocking test generation with async/await
85
101
 
86
102
  ## 🚀 Quick Start
87
103
 
@@ -121,12 +137,9 @@ echo "API_KEY=your_together_ai_api_key_here" > .env
121
137
  # Generate from OpenAPI URL
122
138
  devdox_ai_locust generate --openapi-url https://api.example.com/openapi.json --output ./tests
123
139
 
124
- # Generate from local file
125
- dal generate --openapi-file ./api-spec.yaml --output ./load-tests
126
-
127
140
  # Generate with custom configuration
128
141
  devdox_ai_locust generate \
129
- https://petstore.swagger.io/v2/swagger.json \
142
+ https://petstore.swagger.io/v3/swagger.json \
130
143
  --output ./petstore-tests \
131
144
  --together-api-key your_api_key \
132
145
 
@@ -1,7 +1,7 @@
1
1
  devdox_ai_locust/__init__.py,sha256=LhG8nXZxLkyvWwJxB_OCe9t4TLa4udLtzAVfHpD3CkU,276
2
- devdox_ai_locust/cli.py,sha256=LefUjiboe05vvZAorWqqyAQUQf1WNKLeWEsSg4aCLfE,13834
3
- devdox_ai_locust/config.py,sha256=Ywf66At22QqKK8BtvfbEOnndCuxp7dIXZwWhyH2_9fs,447
4
- devdox_ai_locust/hybrid_loctus_generator.py,sha256=e3ZBZlewNiORygxYDnPf5SG-yRKRH0RYX7l7pGvgnr8,33363
2
+ devdox_ai_locust/cli.py,sha256=DDSAbT37-Rtj9XPFMtfr9nF6JIqz3md12u2DmCTD4MU,13844
3
+ devdox_ai_locust/config.py,sha256=UfyqyI63QYbylxp5PQdAa-MoIGWTxcxr86LQ9cBr12s,453
4
+ devdox_ai_locust/hybrid_loctus_generator.py,sha256=npnNw8OleKjXfhZMuULL1qFwSEjuUzGVGVOUDlIvXpM,36199
5
5
  devdox_ai_locust/locust_generator.py,sha256=dt455ONBs91W4JxRipNX395PlDmHojigM79aKWHHHxs,27017
6
6
  devdox_ai_locust/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  devdox_ai_locust/prompt/domain.j2,sha256=ThK1mZK8bmDF6v1YsOB9wjvijljPRoseLy6lWEUG-vM,1570
@@ -26,9 +26,9 @@ devdox_ai_locust/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
26
26
  devdox_ai_locust/utils/file_creation.py,sha256=sN6rW07VBKfzwUEAG298tCRvmnzx0886w_phdEnsQZg,3957
27
27
  devdox_ai_locust/utils/open_ai_parser.py,sha256=EJsPpPSM9RiTZ0iySYIJ66knREegS324Q7mSk_4CxGM,13158
28
28
  devdox_ai_locust/utils/swagger_utils.py,sha256=L2CV_5J4krCYyIcl-KYW_SAkBxzIKOsn2kcHhZ2CI7k,3191
29
- devdox_ai_locust-0.1.2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
30
- devdox_ai_locust-0.1.2.dist-info/METADATA,sha256=AVhpixM4Y5nwy-mWxdPWTYf2P2gNKauONCMMchG0dDU,13342
31
- devdox_ai_locust-0.1.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
32
- devdox_ai_locust-0.1.2.dist-info/entry_points.txt,sha256=Eoq-gJd4WxkwwQ8pUMsqeSrfZG3yW-NmJ82iVxOc9JA,95
33
- devdox_ai_locust-0.1.2.dist-info/top_level.txt,sha256=ZIpK9RS5xc9RXgG8mw9xPs0kwln8Kggi_7VURxtERQE,17
34
- devdox_ai_locust-0.1.2.dist-info/RECORD,,
29
+ devdox_ai_locust-0.1.3.post1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
30
+ devdox_ai_locust-0.1.3.post1.dist-info/METADATA,sha256=5z4bSGjdnkY3AznoJfsUFvMn6s3WJWgJ6JQBNh5RJ10,14239
31
+ devdox_ai_locust-0.1.3.post1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
32
+ devdox_ai_locust-0.1.3.post1.dist-info/entry_points.txt,sha256=Eoq-gJd4WxkwwQ8pUMsqeSrfZG3yW-NmJ82iVxOc9JA,95
33
+ devdox_ai_locust-0.1.3.post1.dist-info/top_level.txt,sha256=ZIpK9RS5xc9RXgG8mw9xPs0kwln8Kggi_7VURxtERQE,17
34
+ devdox_ai_locust-0.1.3.post1.dist-info/RECORD,,