devdox-ai-locust 0.1.3.post1__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of devdox-ai-locust might be problematic. Click here for more details.

devdox_ai_locust/cli.py CHANGED
@@ -197,19 +197,20 @@ async def _generate_and_create_tests(
197
197
  custom_requirement: Optional[str] = "",
198
198
  host: Optional[str] = "0.0.0.0",
199
199
  auth: bool = False,
200
+ db_type: str = "",
200
201
  ) -> List[Dict[Any, Any]]:
201
202
  """Generate tests using AI and create test files"""
202
203
  together_client = AsyncTogether(api_key=api_key)
203
204
 
204
205
  with console.status("[bold green]Generating Locust tests with AI..."):
205
206
  generator = HybridLocustGenerator(ai_client=together_client)
206
-
207
207
  test_files, test_directories = await generator.generate_from_endpoints(
208
208
  endpoints=endpoints,
209
209
  api_info=api_info,
210
210
  custom_requirement=custom_requirement,
211
211
  target_host=host,
212
212
  include_auth=auth,
213
+ db_type=db_type,
213
214
  )
214
215
 
215
216
  # Create test files
@@ -271,6 +272,12 @@ def cli(ctx: click.Context, verbose: bool) -> None:
271
272
  )
272
273
  @click.option("--host", "-H", type=str, help="Target host URL")
273
274
  @click.option("--auth/--no-auth", default=True, help="Include authentication in tests")
275
+ @click.option(
276
+ "--db-type",
277
+ type=click.Choice(["", "mongo", "postgresql"], case_sensitive=False),
278
+ default="",
279
+ help="Database type for testing (empty for no database, mongo, or postgresql)",
280
+ )
274
281
  @click.option("--dry-run", is_flag=True, help="Generate tests without running them")
275
282
  @click.option(
276
283
  "--custom-requirement", type=str, help="Custom requirements for test generation"
@@ -291,6 +298,7 @@ def generate(
291
298
  run_time: str,
292
299
  host: Optional[str],
293
300
  auth: bool,
301
+ db_type: str,
294
302
  dry_run: bool,
295
303
  custom_requirement: Optional[str],
296
304
  together_api_key: Optional[str],
@@ -309,6 +317,7 @@ def generate(
309
317
  run_time,
310
318
  host,
311
319
  auth,
320
+ db_type,
312
321
  dry_run,
313
322
  custom_requirement,
314
323
  together_api_key,
@@ -332,6 +341,7 @@ async def _async_generate(
332
341
  run_time: str,
333
342
  host: Optional[str],
334
343
  auth: bool,
344
+ db_type: str,
335
345
  dry_run: bool,
336
346
  custom_requirement: Optional[str],
337
347
  together_api_key: Optional[str],
@@ -343,7 +353,6 @@ async def _async_generate(
343
353
  try:
344
354
  _, api_key = _initialize_config(together_api_key)
345
355
  output_dir = _setup_output_directory(output)
346
-
347
356
  # Display configuration
348
357
  if ctx.obj["verbose"]:
349
358
  _display_configuration(
@@ -363,7 +372,14 @@ async def _async_generate(
363
372
  )
364
373
 
365
374
  created_files = await _generate_and_create_tests(
366
- api_key, endpoints, api_info, output_dir, custom_requirement, host, auth
375
+ api_key,
376
+ endpoints,
377
+ api_info,
378
+ output_dir,
379
+ custom_requirement,
380
+ host,
381
+ auth,
382
+ db_type,
367
383
  )
368
384
 
369
385
  # Show results
@@ -8,7 +8,7 @@ from pydantic_settings import BaseSettings
8
8
  class Settings(BaseSettings):
9
9
  """Application settings."""
10
10
 
11
- VERSION: str = "0.1.3.post1"
11
+ VERSION: str = "0.1.4"
12
12
 
13
13
  API_KEY: str = "" # Fallback for backward compatibility
14
14
 
@@ -25,6 +25,7 @@ logger = logging.getLogger(__name__)
25
25
 
26
26
 
27
27
  test_data_file_path = "test_data.py"
28
+ data_provider_path = "data_provider.py"
28
29
 
29
30
 
30
31
  @dataclass
@@ -118,6 +119,7 @@ class EnhancementProcessor:
118
119
  base_files: Dict[str, str],
119
120
  directory_files: List[Dict[str, Any]],
120
121
  grouped_endpoints: Dict[str, List[Endpoint]],
122
+ db_type: str = "",
121
123
  ) -> Tuple[List[Dict[str, Any]], List[str]]:
122
124
  """Process workflow enhancements"""
123
125
  enhanced_directory_files: List[Dict[str, Any]] = []
@@ -134,10 +136,13 @@ class EnhancementProcessor:
134
136
  first_workflow = base_workflow_files[0]
135
137
  # Get the content from the dictionary - adjust key name as needed
136
138
  base_workflow_content = first_workflow.get("base_workflow.py", "")
137
-
138
139
  for workflow_item in directory_files:
139
140
  enhanced_workflow_item = await self._enhance_single_workflow(
140
- workflow_item, base_files, base_workflow_content, grouped_endpoints
141
+ workflow_item,
142
+ base_files,
143
+ base_workflow_content,
144
+ grouped_endpoints,
145
+ db_type,
141
146
  )
142
147
  if enhanced_workflow_item:
143
148
  enhanced_directory_files.append(enhanced_workflow_item["files"])
@@ -151,6 +156,7 @@ class EnhancementProcessor:
151
156
  base_files: Dict[str, str],
152
157
  base_workflow_files: str,
153
158
  grouped_endpoints: Dict[str, List[Endpoint]],
159
+ db_type: str = "",
154
160
  ) -> Dict[str, Any] | None:
155
161
  """Enhance a single workflow file"""
156
162
  for key, value in workflow_item.items():
@@ -164,6 +170,7 @@ class EnhancementProcessor:
164
170
  base_workflow=base_workflow_files,
165
171
  grouped_enpoints=workflow_endpoints_dict,
166
172
  auth_endpoints=auth_endpoints,
173
+ db_type=db_type,
167
174
  )
168
175
  if enhanced_workflow:
169
176
  return {
@@ -174,14 +181,19 @@ class EnhancementProcessor:
174
181
  return None
175
182
 
176
183
  async def process_test_data_enhancement(
177
- self, base_files: Dict[str, str], endpoints: List[Endpoint]
184
+ self, base_files: Dict[str, str], endpoints: List[Endpoint], db_type: str = ""
178
185
  ) -> Tuple[Dict[str, str], List[str]]:
179
186
  """Process test data enhancement"""
180
187
  enhanced_files = {}
181
188
  enhancements = []
182
189
  if self.ai_config and self.ai_config.enhance_test_data:
183
190
  enhanced_test_data = await self.locust_generator.enhance_test_data_file(
184
- base_files.get(test_data_file_path, ""), endpoints
191
+ base_files.get(test_data_file_path, ""),
192
+ endpoints,
193
+ db_type,
194
+ base_files.get(data_provider_path, ""),
195
+ base_files.get("db_config.py", ""),
196
+ data_provider_path,
185
197
  )
186
198
  if enhanced_test_data:
187
199
  enhanced_files[test_data_file_path] = enhanced_test_data
@@ -289,6 +301,7 @@ class HybridLocustGenerator:
289
301
  custom_requirement: Optional[str] = None,
290
302
  target_host: Optional[str] = None,
291
303
  include_auth: bool = True,
304
+ db_type: str = "",
292
305
  ) -> Tuple[Dict[str, str], List[Dict[str, Any]]]:
293
306
  """
294
307
  Generate Locust tests using hybrid approach
@@ -308,6 +321,7 @@ class HybridLocustGenerator:
308
321
  api_info,
309
322
  include_auth=include_auth,
310
323
  target_host=target_host,
324
+ db_type=db_type,
311
325
  )
312
326
  )
313
327
 
@@ -322,6 +336,7 @@ class HybridLocustGenerator:
322
336
  directory_files,
323
337
  grouped_enpoints,
324
338
  custom_requirement,
339
+ db_type,
325
340
  )
326
341
  if enhancement_result.success:
327
342
  logger.info(
@@ -422,6 +437,7 @@ class HybridLocustGenerator:
422
437
  directory_files: List[Dict[str, Any]],
423
438
  grouped_endpoints: Dict[str, List[Endpoint]],
424
439
  custom_requirement: Optional[str] = None,
440
+ db_type: str = "",
425
441
  ) -> EnhancementResult:
426
442
  """Enhance base files with AI - Refactored for reduced cognitive complexity"""
427
443
  start_time = asyncio.get_event_loop().time()
@@ -434,6 +450,7 @@ class HybridLocustGenerator:
434
450
  directory_files,
435
451
  grouped_endpoints,
436
452
  custom_requirement,
453
+ db_type,
437
454
  )
438
455
 
439
456
  processing_time = asyncio.get_event_loop().time() - start_time
@@ -462,6 +479,7 @@ class HybridLocustGenerator:
462
479
  directory_files: List[Dict[str, Any]],
463
480
  grouped_endpoints: Dict[str, List[Endpoint]],
464
481
  custom_requirement: Optional[str] = None,
482
+ db_type: str = "",
465
483
  ) -> EnhancementResult:
466
484
  """Process all enhancements using the enhancement processor"""
467
485
  processor = EnhancementProcessor(self.ai_config, self)
@@ -476,7 +494,7 @@ class HybridLocustGenerator:
476
494
  processor.process_domain_flows_enhancement(
477
495
  endpoints, api_info, custom_requirement
478
496
  ),
479
- processor.process_test_data_enhancement(base_files, endpoints),
497
+ processor.process_test_data_enhancement(base_files, endpoints, db_type),
480
498
  processor.process_validation_enhancement(base_files, endpoints),
481
499
  ]
482
500
 
@@ -501,7 +519,7 @@ class HybridLocustGenerator:
501
519
  workflow_files,
502
520
  workflow_enhancements,
503
521
  ) = await processor.process_workflow_enhancements(
504
- base_files, directory_files, grouped_endpoints
522
+ base_files, directory_files, grouped_endpoints, db_type
505
523
  )
506
524
  enhanced_directory_files.extend(workflow_files)
507
525
  enhancements_applied.extend(workflow_enhancements)
@@ -557,6 +575,7 @@ class HybridLocustGenerator:
557
575
  base_workflow: str,
558
576
  grouped_enpoints: Dict[str, List[Endpoint]],
559
577
  auth_endpoints: List[Endpoint],
578
+ db_type: str = "",
560
579
  ) -> Optional[str]:
561
580
  try:
562
581
  template = self.jinja_env.get_template("workflow.j2")
@@ -568,6 +587,7 @@ class HybridLocustGenerator:
568
587
  base_workflow=base_workflow,
569
588
  auth_endpoints=auth_endpoints,
570
589
  base_content=base_content,
590
+ db_type=db_type,
571
591
  )
572
592
  enhanced_content = await self._call_ai_service(prompt)
573
593
  return enhanced_content
@@ -577,7 +597,13 @@ class HybridLocustGenerator:
577
597
  return ""
578
598
 
579
599
  async def enhance_test_data_file(
580
- self, base_content: str, endpoints: List[Endpoint]
600
+ self,
601
+ base_content: str,
602
+ endpoints: List[Endpoint],
603
+ db_type: str = "",
604
+ data_provider: str = "",
605
+ db_config: str = "",
606
+ data_provider_path: str = "",
581
607
  ) -> Optional[str]:
582
608
  """Enhance test data generation with domain knowledge"""
583
609
 
@@ -592,11 +618,14 @@ class HybridLocustGenerator:
592
618
  "base_content": base_content,
593
619
  "schemas_info": schemas_info,
594
620
  "endpoints": endpoints,
621
+ "db_type": db_type,
622
+ "data_provider_content": data_provider,
623
+ "db_config": db_config,
624
+ "data_provider_path": data_provider_path,
595
625
  }
596
626
 
597
627
  # Render enhanced content
598
628
  prompt = template.render(**context)
599
-
600
629
  enhanced_content = await self._call_ai_service(prompt)
601
630
  if enhanced_content and self._validate_python_code(enhanced_content):
602
631
  return enhanced_content
@@ -12,7 +12,8 @@ import black
12
12
  from jinja2 import Environment, FileSystemLoader
13
13
  from pathlib import Path
14
14
  import logging
15
- from dataclasses import dataclass
15
+ from enum import Enum
16
+ from dataclasses import dataclass, asdict
16
17
  from datetime import datetime
17
18
 
18
19
 
@@ -21,6 +22,48 @@ from devdox_ai_locust.utils.open_ai_parser import Endpoint, Parameter
21
22
  logger = logging.getLogger(__name__)
22
23
 
23
24
 
25
+ class DatabaseType(Enum):
26
+ """Supported database types for testing"""
27
+
28
+ MONGO = "mongo"
29
+ POSTGRES = "postgres"
30
+
31
+
32
+ @dataclass
33
+ class MongoDBConfig:
34
+ """MongoDB-specific configuration"""
35
+
36
+ use_realistic_data: str = "true"
37
+ enable_mongodb: str = "false"
38
+ use_mongodb_for_test_data: str = "false"
39
+ mongodb_uri: str = "mongodb://localhost:27017/"
40
+ mongodb_database: str = "locust_test_data"
41
+ MONGODB_MAX_POOL_SIZE: int = 100
42
+ MONGODB_MIN_POOL_SIZE: int = 10
43
+
44
+ # MongoDB Timeout Settings
45
+ MONGODB_CONNECT_TIMEOUT_MS: int = 5000
46
+ MONGODB_SERVER_SELECTION_TIMEOUT_MS: int = 5000
47
+ MONGODB_SOCKET_TIMEOUT_MS: int = 10000
48
+ MONGODB_MAX_IDLE_TIME_MS: int = 60000
49
+ MONGODB_WAIT_QUEUE_TIMEOUT_MS: int = 10000
50
+
51
+ # MongoDB Collection Names to be added
52
+
53
+
54
+ @dataclass
55
+ class PostgreSQLConfig:
56
+ """PostgreSQL-specific configuration"""
57
+
58
+ host: str = "localhost"
59
+ port: str = "5432"
60
+ database: str = "test_db"
61
+ user: str = "test_user"
62
+ password: str = "test_password"
63
+ pool_size: str = "10"
64
+ max_overflow: str = "20"
65
+
66
+
24
67
  @dataclass
25
68
  class TestDataConfig:
26
69
  """Configuration for test data generation"""
@@ -100,6 +143,7 @@ class LocustTestGenerator:
100
143
  api_info: Dict[str, Any],
101
144
  include_auth: bool = True,
102
145
  target_host: Optional[str] = None,
146
+ db_type: str = "",
103
147
  ) -> Tuple[Dict[str, str], List[Dict[str, Any]], Dict[str, List[Endpoint]]]:
104
148
  """
105
149
  Generate complete Locust test suite from parsed endpoints
@@ -121,14 +165,23 @@ class LocustTestGenerator:
121
165
  "locustfile.py": self._generate_main_locustfile(
122
166
  endpoints, api_info, list(grouped_enpoint.keys())
123
167
  ),
124
- "test_data.py": self._generate_test_data_file(),
168
+ "test_data.py": self._generate_test_data_file(db_type),
125
169
  "config.py": self._generate_config_file(api_info),
126
170
  "utils.py": self._generate_utils_file(),
127
171
  "custom_flows.py": self._generate_custom_flows_file(),
128
172
  "requirements.txt": self._generate_requirements_file(),
129
- "README.md": self._generate_readme_file(api_info),
130
- ".env.example": self._generate_env_example(api_info, target_host),
173
+ "README.md": self._generate_readme_file(api_info, db_type),
174
+ ".env.example": self._generate_env_example(
175
+ api_info, target_host, db_type
176
+ ),
131
177
  }
178
+ if db_type != "":
179
+ self.generated_files["db_config.py"] = self._generate_db_file(
180
+ db_type, "db_config.py.j2"
181
+ )
182
+ self.generated_files["data_provider.py"] = self._generate_db_file(
183
+ db_type, "data_provider.py.j2"
184
+ )
132
185
 
133
186
  return self.generated_files, workflows_files, grouped_enpoint
134
187
  except Exception as e:
@@ -639,10 +692,13 @@ class LocustTestGenerator:
639
692
 
640
693
  '''
641
694
 
642
- def _generate_test_data_file(self) -> str:
695
+ def _generate_test_data_file(self, db_type: str = "") -> str:
643
696
  """Generate test_data.py file content"""
697
+ data_provider_content = None
698
+ if db_type == DatabaseType.MONGO.value:
699
+ data_provider_content = "mongo_data_provider"
644
700
  template = self.jinja_env.get_template("test_data.py.j2")
645
- return template.render()
701
+ return template.render(data_provider_content=data_provider_content)
646
702
 
647
703
  def _generate_config_file(self, api_info: Dict[str, Any]) -> str:
648
704
  """Generate config.py file content"""
@@ -665,8 +721,16 @@ class LocustTestGenerator:
665
721
  content = template.render()
666
722
  return content
667
723
 
724
+ def _generate_db_file(self, db_type: str, file_name: str) -> str:
725
+ """Generate db file content"""
726
+ template = self.jinja_env.get_template(db_type + "/" + file_name)
727
+ return template.render()
728
+
668
729
  def _generate_env_example(
669
- self, api_info: Dict[str, Any], target_host: Optional[str] = None
730
+ self,
731
+ api_info: Dict[str, Any],
732
+ target_host: Optional[str] = None,
733
+ db_type: str = "",
670
734
  ) -> str:
671
735
  """Generate .env.example file content"""
672
736
  try:
@@ -686,12 +750,15 @@ class LocustTestGenerator:
686
750
  "LOCUST_SPAWN_RATE": "5",
687
751
  "LOCUST_RUN_TIME": "10m",
688
752
  "LOCUST_HOST": locust_host,
689
- "USE_REALISTIC_DATA": "true",
690
753
  "DATA_SEED": "42",
691
754
  "REQUEST_TIMEOUT": "30",
692
755
  "MAX_RETRIES": "3",
693
756
  }
694
757
 
758
+ if db_type == DatabaseType.MONGO.value:
759
+ mongodb_config = MongoDBConfig()
760
+ environment_vars.update(asdict(mongodb_config))
761
+
695
762
  context = {
696
763
  "environment_vars": environment_vars,
697
764
  "api_info": api_info,
@@ -706,15 +773,23 @@ class LocustTestGenerator:
706
773
  logger.error(f"❌ Failed to generate .env.example from template: {e}")
707
774
  return ""
708
775
 
709
- def _generate_readme_file(self, api_info: Dict[str, Any]) -> str:
776
+ def _generate_readme_file(self, api_info: Dict[str, Any], db_type: str = "") -> str:
710
777
  try:
711
778
  # Get the template
712
779
  template = self.jinja_env.get_template("readme.md.j2")
780
+ db_using = ""
781
+ if db_type == DatabaseType.MONGO.value:
782
+ template_db = self.jinja_env.get_template(
783
+ DatabaseType.MONGO.value + "/db_integration.j2"
784
+ )
785
+
786
+ db_using = template_db.render()
713
787
 
714
788
  # Prepare template context
715
789
  context = {
716
790
  "api_info": api_info,
717
791
  "generated_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
792
+ "db_using": db_using,
718
793
  }
719
794
 
720
795
  # Render the template
@@ -1,62 +1,186 @@
1
- Enhance this test data generator with domain-specific realistic data while preserving ALL existing methods and their signatures:
1
+ Evolve this Test Data Generator into a fully production-ready, intelligent data generation framework.
2
+ {% if data_provider_content %}Incorporate MongoDB integration for live data enrichment, delivering a complete, robust, and realistic test data engine with domain awareness, caching, and smart relationships.{% endif %}
2
3
 
3
- Current File:
4
+ **CURRENT STATE ANALYSIS**:
5
+ ```python
4
6
  {{base_content}}
7
+ ```
8
+
9
+
10
+ {% if data_provider_content %}
11
+ Database config file content with path to DB{{ data_provider_path }}:
12
+ {{ db_config }}
13
+
14
+
15
+ **MONGODB DATA PROVIDER** (Available for Integration):
16
+ ```python
17
+ {{data_provider_content}}
18
+ ```
19
+ {% endif %}
5
20
 
6
- API Schemas Found:
7
- {{ schemas_info }}
8
21
 
9
22
  API Endpoints Context:
10
- [
11
- {% for ep in endpoints -%}
12
- "Path: {{ ep.path }}, Method: {{ ep.method }}, Tags: {{ ep.tags }}"{% if not loop.last %},{% endif %}
13
- {% endfor -%}
14
- ]
23
+ - **Schemas**: {schemas_info}
24
+ - **Endpoints**: [{% for ep in endpoints -%}
25
+ "Path: {{ ep.path }}, Method: {{ ep.method }}, Tags: {{ ep.tags }}"{% if not loop.last %},{% endif %}{% endfor -%}]
26
+
27
+
15
28
 
16
- REQUIREMENTS - Enhance by adding NEW functionality while keeping ALL existing methods intact:
29
+ **CRITICAL REQUIREMENT**: Every method must have COMPLETE, FUNCTIONAL implementation. NO stubs, NO pass statements, NO placeholder comments.
17
30
 
18
- 1. **Domain-Specific Data Generators**:
31
+ **IMPLEMENTATION SPECIFICATIONS**:
32
+ {% set counter = namespace(value=1) %}
33
+
34
+ {% if data_provider_content %}
35
+ {{ counter.value }}. **MongoDB Integration** (COMPLETE Implementation Required)
36
+ ```python
37
+ def get_from_mongodb_or_generate(self, collection_name: str, entity_type: str, fallback_generator: callable = None, **kwargs) -> Dict[str, Any]:
38
+ \"\"\"COMPLETE implementation with full error handling, logging, and statistics\"\"\"
39
+ # IMPLEMENT: Full MongoDB integration with:
40
+ # - Statistics tracking (mongo_queries, cache_hits, fallback_generations)
41
+ # - Comprehensive error handling with specific exception types
42
+ # - Logging with appropriate levels (DEBUG, WARNING, ERROR)
43
+ # - Performance metrics collection
44
+ # - Graceful degradation when MongoDB unavailable
45
+ # - Return type validation and data sanitization
46
+ ```
47
+
48
+ {% set counter.value = counter.value + 1 %}
49
+ {% endif %}
50
+
51
+ {{ counter.value }}. **Domain-Specific Data Generators**(COMPLETE Implementation Required)
19
52
  - Add methods like `generate_affiliate_data()`, `generate_user_credentials()`, `generate_product_data()`
20
53
  - Create realistic data based on API endpoint patterns (affiliate, user, product, etc.)
21
54
  - Add specific payload generators for common API patterns
55
+ {% if data_provider_content %}- Use MongoDB data as templates when available{% endif %}
56
+ - Add `generate_realistic_data(entity_type: str)` for entity-specific data
57
+ {% set counter.value = counter.value + 1 %}
58
+
59
+
60
+
61
+ {{ counter.value }} **Realistic Data Generation**:
62
+ - Add `generate_realistic_data(entity_type: str, **kwargs)` for entity-specific data
63
+ - Create realistic data based on API endpoint patterns (user, product, order, etc.)
64
+ {% if data_provider_content %}- Integrate MongoDB data patterns into generated data {% endif %}
65
+ - Add business logic validation for generated data
66
+ {% set counter.value = counter.value + 1 %}
22
67
 
23
- 2. **Realistic ID Generation**:
68
+
69
+ {{ counter.value }}. **Realistic ID Generation**:
24
70
  - Add `generate_realistic_id(entity_type: str)` for entity-specific IDs
25
71
  - Create correlated IDs (user_id -> session_id -> transaction_id)
26
72
  - Add methods like `generate_affiliate_id()`, `generate_partner_id()`, etc.
73
+ {% set counter.value = counter.value + 1 %}
27
74
 
28
- 3. **Payload Templates**:
75
+ {{ counter.value }}. **Payload Templates**:
29
76
  - Add `get_payload_template(endpoint_path: str, method: str)`
30
77
  - Create endpoint-specific payload generators
31
78
  - Add `generate_login_payload()`, `generate_registration_payload()`, etc.
79
+ {% set counter.value = counter.value + 1 %}
32
80
 
33
- 4. **Data Relationships & Correlation**:
81
+ {{ counter.value }}. **Data Relationships & Correlation**:
34
82
  - Add session management: `create_user_session()`, `get_session_data()`
35
83
  - Create data dependency chains (parent-child relationships)
36
84
  - Add `link_related_entities(parent_id, child_type)` for realistic relationships
85
+ {% set counter.value = counter.value + 1 %}
86
+
87
+
88
+ {{ counter.value }}. COMPLETE Implementation Required)
89
+ ```python
90
+ def cache_generated_data(self, key: str, data: Any, ttl_seconds: int = 300) -> None:
91
+ \"\"\"COMPLETE thread-safe caching implementation\"\"\"
92
+ # IMPLEMENT: Full caching system with:
93
+ # - Thread-safe operations using threading.Lock()
94
+ # - TTL (time-to-live) management with automatic expiration
95
+ # - Cache size limits with LRU eviction
96
+ # - Memory usage tracking and optimization
97
+ # - Cache hit/miss statistics
98
+ # - Data serialization/deserialization if needed
37
99
 
38
- 5. **Smart Caching & Performance**:
39
- - Add `cache_generated_data(key, data)` and `get_cached_data(key)`
40
- - Create reusable entity pools for better performance
41
- - Add `get_or_create_entity(entity_type, **kwargs)`
100
+ def get_cached_data(self, key: str) -> Optional[Any]:
101
+ \"\"\"COMPLETE cache retrieval with validation\"\"\"
102
+ # IMPLEMENT: Full cache retrieval with:
103
+ # - TTL validation and automatic cleanup
104
+ # - Thread-safe access
105
+ # - Statistics updating (cache_hits counter)
106
+ # - Data integrity validation
107
+ # - Proper None handling for cache misses
42
108
 
43
- 6. **Specialized Pattern Generators**:
109
+ def get_or_create_entity(self, entity_type: str, **kwargs) -> Dict[str, Any]:
110
+ \"\"\"COMPLETE entity management with intelligent caching\"\"\"
111
+ # IMPLEMENT: Full entity lifecycle management:
112
+ # - Cache key generation from entity_type and kwargs
113
+ # - Cache lookup with TTL validation
114
+ # - MongoDB integration for real data
115
+ # - Fallback to realistic generation
116
+ # - Automatic caching of generated/retrieved data
117
+ # - Relationship tracking and linking
118
+ ```
119
+ {% set counter.value = counter.value + 1 %}
120
+
121
+ {{ counter.value }}. **Specialized Pattern Generators**:
44
122
  - Add `generate_api_key_data()`, `generate_webhook_payload()`
45
123
  - Create `generate_pagination_data()`, `generate_filter_data()`
46
124
  - Add `generate_error_scenarios()` for negative testing
125
+ {% set counter.value = counter.value + 1 %}
47
126
 
48
- 7. **Validation & Constraints**:
127
+ {{ counter.value }}. **Validation & Constraints**:
49
128
  - Add `validate_generated_data(data, schema)`
50
129
  - Create constraint-aware generation
51
130
  - Add business rule validation
131
+ {% set counter.value = counter.value + 1 %}
132
+
133
+ {% if data_provider_content %}
134
+ {{ counter.value }}. **MongoDB Integration Specifics**:
135
+ - **MANDATORY**: `mongo_data_provider.get_multiple_documents(collection_name, count, query)` when retrieving multiple realistic to return list records from MongoDB.
136
+ - Use `mongo_data_provider.get_document(collection_name,query,projection)` when retrieving a single realistic record from MongoDB or fallback generation.
137
+ - Add `mongo_data_provider.preload_cache()` integration
138
+ - Use `mongo_config.enable_mongodb` for availability checking
139
+ - Implement proper connection error handling
140
+ - Add MongoDB query optimization
141
+ - Include data freshness validation
142
+ {% set counter.value = counter.value + 1 %}
143
+ {% endif %}
144
+
145
+ {% if data_provider_content %}
146
+
147
+ **🚨 FINAL VALIDATION REQUIREMENTS**:
148
+
149
+ The generated code MUST contain these exact function calls:
150
+ 1. `mongo_data_provider.get_multiple_documents()` - Used at least 5 times
151
+ 2. `mongo_data_provider.get_random_document()` - Used for single items only
152
+ 3. Both methods must have proper error handling and fallback generation
153
+
154
+
155
+ **VERIFICATION CHECKLIST**:
156
+ - [ ] `get_multiple_documents()` appears in at least 5 different methods
157
+ - [ ] `get_document()` is used for single entity retrieval
158
+ - [ ] No method uses only `get_document()` when batch data is needed
159
+ - [ ] All MongoDB calls have try/catch error handling
160
+ - [ ] All MongoDB calls have fallback generation
161
+ **FAILURE CRITERIA**: If `get_multiple_documents()` appears less than 5 times in the generated code, the implementation is incomplete and must be regenerated.
162
+ {% endif %}
163
+
164
+ **OUTPUT REQUIREMENTS**:
165
+
166
+ 1. **ZERO Placeholder Methods**: Every method must be FULLY implemented
167
+ 2. **Production Quality**: Include proper error handling, logging, and documentation
168
+ 3. **Thread Safety**: All caching and state management must be thread-safe
169
+ 4. **Performance Optimized**: Efficient algorithms and memory usage
170
+ 5. **Comprehensive Testing**: Generate data that covers edge cases and business scenarios
171
+ 6. **Backward Compatibility**: All existing functionality must continue working
172
+ 7. **Rich Data**: Generate realistic, coherent data with proper relationships
173
+ 8. **Extensive Logging**: Use logging module with appropriate levels
174
+ 9. **Statistics Tracking**: Detailed metrics for monitoring and optimization
175
+ 10. **MongoDB Integration**: Seamless integration with provided data provider
52
176
 
53
- CRITICAL CONSTRAINTS:
54
- - MUST preserve ALL existing method signatures exactly as they are
55
- - MUST keep all existing functionality working
56
- - MUST maintain backward compatibility
57
- - ADD new methods, don't modify existing ones
58
- - Keep the global `test_data_generator` instance
177
+ 🚨 **CRITICAL SUCCESS CRITERIA**:
178
+ - NO methods should contain `pass` or placeholder comments
179
+ - ALL caching methods must be fully functional
180
+ - ALL data generation must produce realistic, coherent data
181
+ - ALL MongoDB integration must handle errors gracefully
182
+ - ALL relationships must be properly tracked and maintained
59
183
 
60
- Focus on making the data generation smarter and more realistic while ensuring all existing code that imports and uses this generator continues to work without changes.
184
+ Return the COMPLETE, PRODUCTION-READY Python file with every method fully implemented and tested-quality code.
61
185
 
62
- Output: Complete enhanced Python file with ALL existing content preserved + new intelligent features
186
+ **Format**: Return ONLY the complete Python code wrapped in ```python``` tags with NO explanations outside the code block.