api-mocker 0.1.2__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
api_mocker/testing.py ADDED
@@ -0,0 +1,699 @@
1
+ """
2
+ Advanced Testing Framework for API-Mocker.
3
+ """
4
+
5
+ import json
6
+ import asyncio
7
+ import time
8
+ import statistics
9
+ from typing import Dict, List, Optional, Any, Callable
10
+ from pathlib import Path
11
+ import logging
12
+ from dataclasses import dataclass, asdict
13
+ from datetime import datetime
14
+ import concurrent.futures
15
+ import requests
16
+ import yaml
17
+
18
+ try:
19
+ from .ai_generator import AIGenerationManager
20
+ AI_AVAILABLE = True
21
+ except ImportError:
22
+ AI_AVAILABLE = False
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+ @dataclass
27
+ class TestCase:
28
+ """Represents a test case."""
29
+ name: str
30
+ description: str
31
+ method: str
32
+ url: str
33
+ headers: Dict = None
34
+ body: Dict = None
35
+ expected_status: int = 200
36
+ expected_schema: Dict = None
37
+ assertions: List[Dict] = None
38
+ timeout: int = 30
39
+ retries: int = 0
40
+
41
+ def __post_init__(self):
42
+ if self.headers is None:
43
+ self.headers = {}
44
+ if self.body is None:
45
+ self.body = {}
46
+ if self.assertions is None:
47
+ self.assertions = []
48
+
49
+ @dataclass
50
+ class TestSuite:
51
+ """Represents a test suite."""
52
+ name: str
53
+ description: str
54
+ base_url: str
55
+ test_cases: List[TestCase]
56
+ setup_hooks: List[Dict] = None
57
+ teardown_hooks: List[Dict] = None
58
+ variables: Dict = None
59
+
60
+ def __post_init__(self):
61
+ if self.setup_hooks is None:
62
+ self.setup_hooks = []
63
+ if self.teardown_hooks is None:
64
+ self.teardown_hooks = []
65
+ if self.variables is None:
66
+ self.variables = {}
67
+
68
+ @dataclass
69
+ class TestResult:
70
+ """Represents a test result."""
71
+ test_name: str
72
+ status: str # passed, failed, skipped, error
73
+ duration: float
74
+ response: Dict = None
75
+ error: str = None
76
+ assertions: List[Dict] = None
77
+ timestamp: str = None
78
+
79
+ def __post_init__(self):
80
+ if self.assertions is None:
81
+ self.assertions = []
82
+ if self.timestamp is None:
83
+ self.timestamp = datetime.now().isoformat()
84
+
85
+ @dataclass
86
+ class PerformanceTest:
87
+ """Represents a performance test."""
88
+ name: str
89
+ test_case: TestCase
90
+ concurrent_users: int
91
+ duration_seconds: int
92
+ ramp_up_seconds: int = 0
93
+ target_rps: Optional[float] = None
94
+
95
+ @dataclass
96
+ class PerformanceResult:
97
+ """Represents performance test results."""
98
+ test_name: str
99
+ total_requests: int
100
+ successful_requests: int
101
+ failed_requests: int
102
+ average_response_time: float
103
+ min_response_time: float
104
+ max_response_time: float
105
+ p95_response_time: float
106
+ p99_response_time: float
107
+ requests_per_second: float
108
+ error_rate: float
109
+ duration: float
110
+ timestamp: str = None
111
+
112
+ def __post_init__(self):
113
+ if self.timestamp is None:
114
+ self.timestamp = datetime.now().isoformat()
115
+
116
+ class TestRunner:
117
+ """Runs test suites and individual tests."""
118
+
119
+ def __init__(self):
120
+ self.variables: Dict[str, Any] = {}
121
+ self.session = requests.Session()
122
+ self.ai_manager = AIGenerationManager() if AI_AVAILABLE else None
123
+
124
+ def run_test_case(self, test_case: TestCase, base_url: str = "") -> TestResult:
125
+ """Run a single test case."""
126
+ start_time = time.time()
127
+
128
+ try:
129
+ # Prepare URL
130
+ url = base_url + test_case.url if base_url else test_case.url
131
+
132
+ # Prepare headers
133
+ headers = test_case.headers.copy()
134
+
135
+ # Prepare body
136
+ body = test_case.body.copy() if test_case.body else None
137
+
138
+ # Make request
139
+ response = self.session.request(
140
+ method=test_case.method,
141
+ url=url,
142
+ headers=headers,
143
+ json=body,
144
+ timeout=test_case.timeout
145
+ )
146
+
147
+ duration = time.time() - start_time
148
+
149
+ # Create response dict
150
+ response_dict = {
151
+ "status_code": response.status_code,
152
+ "headers": dict(response.headers),
153
+ "body": response.json() if response.headers.get('content-type', '').startswith('application/json') else response.text
154
+ }
155
+
156
+ # Run assertions
157
+ assertions = self._run_assertions(test_case, response_dict)
158
+
159
+ # Determine status
160
+ status = "passed"
161
+ if any(not assertion["passed"] for assertion in assertions):
162
+ status = "failed"
163
+
164
+ return TestResult(
165
+ test_name=test_case.name,
166
+ status=status,
167
+ duration=duration,
168
+ response=response_dict,
169
+ assertions=assertions
170
+ )
171
+
172
+ except Exception as e:
173
+ duration = time.time() - start_time
174
+ return TestResult(
175
+ test_name=test_case.name,
176
+ status="error",
177
+ duration=duration,
178
+ error=str(e)
179
+ )
180
+
181
+ def run_test_suite(self, test_suite: TestSuite) -> List[TestResult]:
182
+ """Run a complete test suite."""
183
+ results = []
184
+
185
+ # Run setup hooks
186
+ self._run_hooks(test_suite.setup_hooks)
187
+
188
+ # Set variables
189
+ self.variables.update(test_suite.variables)
190
+
191
+ # Run test cases
192
+ for test_case in test_suite.test_cases:
193
+ result = self.run_test_case(test_case, test_suite.base_url)
194
+ results.append(result)
195
+
196
+ # Update variables with response data if needed
197
+ if result.response:
198
+ self.variables[f"{test_case.name}_response"] = result.response
199
+
200
+ # Run teardown hooks
201
+ self._run_hooks(test_suite.teardown_hooks)
202
+
203
+ return results
204
+
205
+ def _run_assertions(self, test_case: TestCase, response: Dict) -> List[Dict]:
206
+ """Run assertions on the response."""
207
+ assertions = []
208
+
209
+ # Status code assertion
210
+ status_assertion = {
211
+ "name": "Status Code",
212
+ "expected": test_case.expected_status,
213
+ "actual": response["status_code"],
214
+ "passed": response["status_code"] == test_case.expected_status
215
+ }
216
+ assertions.append(status_assertion)
217
+
218
+ # Custom assertions
219
+ for assertion in test_case.assertions:
220
+ assertion_result = self._evaluate_assertion(assertion, response)
221
+ assertions.append(assertion_result)
222
+
223
+ return assertions
224
+
225
+ def _evaluate_assertion(self, assertion: Dict, response: Dict) -> Dict:
226
+ """Evaluate a single assertion."""
227
+ assertion_type = assertion.get("type", "json_path")
228
+
229
+ if assertion_type == "json_path":
230
+ return self._evaluate_json_path_assertion(assertion, response)
231
+ elif assertion_type == "header":
232
+ return self._evaluate_header_assertion(assertion, response)
233
+ elif assertion_type == "contains":
234
+ return self._evaluate_contains_assertion(assertion, response)
235
+ elif assertion_type == "regex":
236
+ return self._evaluate_regex_assertion(assertion, response)
237
+ else:
238
+ return {
239
+ "name": assertion.get("name", "Unknown"),
240
+ "passed": False,
241
+ "error": f"Unknown assertion type: {assertion_type}"
242
+ }
243
+
244
+ def _evaluate_json_path_assertion(self, assertion: Dict, response: Dict) -> Dict:
245
+ """Evaluate JSON path assertion."""
246
+ try:
247
+ json_path = assertion["path"]
248
+ expected_value = assertion.get("value")
249
+ operator = assertion.get("operator", "equals")
250
+
251
+ # Simple JSON path evaluation (in production, use jsonpath-ng)
252
+ path_parts = json_path.split(".")
253
+ current = response.get("body", {})
254
+
255
+ for part in path_parts:
256
+ if isinstance(current, dict):
257
+ current = current.get(part)
258
+ elif isinstance(current, list) and part.isdigit():
259
+ current = current[int(part)]
260
+ else:
261
+ current = None
262
+ break
263
+
264
+ # Evaluate operator
265
+ passed = False
266
+ if operator == "equals":
267
+ passed = current == expected_value
268
+ elif operator == "not_equals":
269
+ passed = current != expected_value
270
+ elif operator == "exists":
271
+ passed = current is not None
272
+ elif operator == "not_exists":
273
+ passed = current is None
274
+ elif operator == "greater_than":
275
+ passed = current > expected_value
276
+ elif operator == "less_than":
277
+ passed = current < expected_value
278
+
279
+ return {
280
+ "name": assertion.get("name", f"JSON Path: {json_path}"),
281
+ "expected": expected_value,
282
+ "actual": current,
283
+ "passed": passed
284
+ }
285
+
286
+ except Exception as e:
287
+ return {
288
+ "name": assertion.get("name", "JSON Path Assertion"),
289
+ "passed": False,
290
+ "error": str(e)
291
+ }
292
+
293
+ def _evaluate_header_assertion(self, assertion: Dict, response: Dict) -> Dict:
294
+ """Evaluate header assertion."""
295
+ header_name = assertion["header"]
296
+ expected_value = assertion.get("value")
297
+ operator = assertion.get("operator", "equals")
298
+
299
+ actual_value = response.get("headers", {}).get(header_name)
300
+
301
+ passed = False
302
+ if operator == "equals":
303
+ passed = actual_value == expected_value
304
+ elif operator == "contains":
305
+ passed = expected_value in actual_value if actual_value else False
306
+ elif operator == "exists":
307
+ passed = actual_value is not None
308
+
309
+ return {
310
+ "name": assertion.get("name", f"Header: {header_name}"),
311
+ "expected": expected_value,
312
+ "actual": actual_value,
313
+ "passed": passed
314
+ }
315
+
316
+ def _evaluate_contains_assertion(self, assertion: Dict, response: Dict) -> Dict:
317
+ """Evaluate contains assertion."""
318
+ expected_text = assertion["text"]
319
+ response_text = str(response.get("body", ""))
320
+
321
+ passed = expected_text in response_text
322
+
323
+ return {
324
+ "name": assertion.get("name", f"Contains: {expected_text}"),
325
+ "expected": expected_text,
326
+ "actual": response_text[:100] + "..." if len(response_text) > 100 else response_text,
327
+ "passed": passed
328
+ }
329
+
330
+ def _evaluate_regex_assertion(self, assertion: Dict, response: Dict) -> Dict:
331
+ """Evaluate regex assertion."""
332
+ import re
333
+
334
+ pattern = assertion["pattern"]
335
+ response_text = str(response.get("body", ""))
336
+
337
+ match = re.search(pattern, response_text)
338
+ passed = match is not None
339
+
340
+ return {
341
+ "name": assertion.get("name", f"Regex: {pattern}"),
342
+ "expected": pattern,
343
+ "actual": response_text[:100] + "..." if len(response_text) > 100 else response_text,
344
+ "passed": passed
345
+ }
346
+
347
+ def _run_hooks(self, hooks: List[Dict]):
348
+ """Run setup/teardown hooks."""
349
+ for hook in hooks:
350
+ hook_type = hook.get("type", "http")
351
+
352
+ if hook_type == "http":
353
+ self._run_http_hook(hook)
354
+ elif hook_type == "variable":
355
+ self._run_variable_hook(hook)
356
+
357
+ def _run_http_hook(self, hook: Dict):
358
+ """Run HTTP hook."""
359
+ try:
360
+ method = hook.get("method", "GET")
361
+ url = hook["url"]
362
+ headers = hook.get("headers", {})
363
+ body = hook.get("body")
364
+
365
+ response = self.session.request(
366
+ method=method,
367
+ url=url,
368
+ headers=headers,
369
+ json=body
370
+ )
371
+
372
+ # Store response in variables if needed
373
+ if "store_as" in hook:
374
+ self.variables[hook["store_as"]] = {
375
+ "status_code": response.status_code,
376
+ "headers": dict(response.headers),
377
+ "body": response.json() if response.headers.get('content-type', '').startswith('application/json') else response.text
378
+ }
379
+
380
+ except Exception as e:
381
+ logger.error(f"HTTP hook failed: {e}")
382
+
383
+ def _run_variable_hook(self, hook: Dict):
384
+ """Run variable hook."""
385
+ variable_name = hook["name"]
386
+ value = hook["value"]
387
+
388
+ self.variables[variable_name] = value
389
+
390
+ class PerformanceTester:
391
+ """Runs performance tests."""
392
+
393
+ def __init__(self):
394
+ self.session = requests.Session()
395
+
396
+ def run_performance_test(self, perf_test: PerformanceTest) -> PerformanceResult:
397
+ """Run a performance test."""
398
+ start_time = time.time()
399
+ results = []
400
+
401
+ # Calculate ramp-up
402
+ if perf_test.ramp_up_seconds > 0:
403
+ users_per_second = perf_test.concurrent_users / perf_test.ramp_up_seconds
404
+ else:
405
+ users_per_second = perf_test.concurrent_users
406
+
407
+ # Run concurrent requests
408
+ with concurrent.futures.ThreadPoolExecutor(max_workers=perf_test.concurrent_users) as executor:
409
+ futures = []
410
+
411
+ for i in range(perf_test.concurrent_users):
412
+ # Ramp up delay
413
+ if perf_test.ramp_up_seconds > 0:
414
+ delay = i / users_per_second
415
+ time.sleep(delay)
416
+
417
+ future = executor.submit(self._make_request, perf_test.test_case)
418
+ futures.append(future)
419
+
420
+ # Collect results
421
+ for future in concurrent.futures.as_completed(futures, timeout=perf_test.duration_seconds):
422
+ try:
423
+ result = future.result()
424
+ results.append(result)
425
+ except Exception as e:
426
+ results.append({"error": str(e), "duration": 0})
427
+
428
+ duration = time.time() - start_time
429
+
430
+ return self._calculate_performance_metrics(results, duration)
431
+
432
+ def _make_request(self, test_case: TestCase) -> Dict:
433
+ """Make a single request for performance testing."""
434
+ start_time = time.time()
435
+
436
+ try:
437
+ response = self.session.request(
438
+ method=test_case.method,
439
+ url=test_case.url,
440
+ headers=test_case.headers,
441
+ json=test_case.body,
442
+ timeout=test_case.timeout
443
+ )
444
+
445
+ duration = time.time() - start_time
446
+
447
+ return {
448
+ "status_code": response.status_code,
449
+ "duration": duration,
450
+ "success": response.status_code < 400
451
+ }
452
+
453
+ except Exception as e:
454
+ duration = time.time() - start_time
455
+ return {
456
+ "error": str(e),
457
+ "duration": duration,
458
+ "success": False
459
+ }
460
+
461
+ def _calculate_performance_metrics(self, results: List[Dict], duration: float) -> PerformanceResult:
462
+ """Calculate performance metrics from results."""
463
+ successful_requests = [r for r in results if r.get("success", False)]
464
+ failed_requests = [r for r in results if not r.get("success", False)]
465
+
466
+ durations = [r["duration"] for r in results if "duration" in r]
467
+
468
+ if not durations:
469
+ return PerformanceResult(
470
+ test_name="Performance Test",
471
+ total_requests=len(results),
472
+ successful_requests=0,
473
+ failed_requests=len(results),
474
+ average_response_time=0,
475
+ min_response_time=0,
476
+ max_response_time=0,
477
+ p95_response_time=0,
478
+ p99_response_time=0,
479
+ requests_per_second=0,
480
+ error_rate=100.0,
481
+ duration=duration
482
+ )
483
+
484
+ # Calculate percentiles
485
+ sorted_durations = sorted(durations)
486
+ p95_index = int(len(sorted_durations) * 0.95)
487
+ p99_index = int(len(sorted_durations) * 0.99)
488
+
489
+ return PerformanceResult(
490
+ test_name="Performance Test",
491
+ total_requests=len(results),
492
+ successful_requests=len(successful_requests),
493
+ failed_requests=len(failed_requests),
494
+ average_response_time=statistics.mean(durations),
495
+ min_response_time=min(durations),
496
+ max_response_time=max(durations),
497
+ p95_response_time=sorted_durations[p95_index] if p95_index < len(sorted_durations) else max(durations),
498
+ p99_response_time=sorted_durations[p99_index] if p99_index < len(sorted_durations) else max(durations),
499
+ requests_per_second=len(results) / duration,
500
+ error_rate=(len(failed_requests) / len(results)) * 100,
501
+ duration=duration
502
+ )
503
+
504
+ class TestGenerator:
505
+ """Generates test cases automatically."""
506
+
507
+ def __init__(self):
508
+ self.ai_manager = AIGenerationManager() if AI_AVAILABLE else None
509
+
510
+ def generate_tests_from_config(self, config: Dict) -> TestSuite:
511
+ """Generate test cases from API configuration."""
512
+ test_cases = []
513
+
514
+ for route in config.get("routes", []):
515
+ test_case = self._create_test_case_from_route(route)
516
+ test_cases.append(test_case)
517
+
518
+ return TestSuite(
519
+ name="Auto-generated Test Suite",
520
+ description="Automatically generated tests from API configuration",
521
+ base_url=config.get("server", {}).get("base_url", "http://localhost:8000"),
522
+ test_cases=test_cases
523
+ )
524
+
525
+ def _create_test_case_from_route(self, route: Dict) -> TestCase:
526
+ """Create a test case from a route configuration."""
527
+ method = route.get("method", "GET")
528
+ path = route.get("path", "/")
529
+ response = route.get("response", {})
530
+
531
+ # Generate test name
532
+ test_name = f"{method}_{path.replace('/', '_').replace('{', '').replace('}', '')}"
533
+
534
+ # Create assertions based on response
535
+ assertions = []
536
+ if isinstance(response, dict):
537
+ assertions.append({
538
+ "type": "json_path",
539
+ "path": "$",
540
+ "operator": "exists",
541
+ "name": "Response is JSON object"
542
+ })
543
+
544
+ return TestCase(
545
+ name=test_name,
546
+ description=f"Test for {method} {path}",
547
+ method=method,
548
+ url=path,
549
+ expected_status=200,
550
+ assertions=assertions
551
+ )
552
+
553
+ def generate_ai_tests(self, api_description: str, endpoints: List[str]) -> TestSuite:
554
+ """Generate test cases using AI."""
555
+ if not self.ai_manager:
556
+ raise ValueError("AI generation not available")
557
+
558
+ prompt = f"""
559
+ Generate comprehensive test cases for the following API:
560
+
561
+ Description: {api_description}
562
+ Endpoints: {', '.join(endpoints)}
563
+
564
+ Generate test cases that cover:
565
+ 1. Happy path scenarios
566
+ 2. Error cases
567
+ 3. Edge cases
568
+ 4. Validation tests
569
+
570
+ Return the test cases in JSON format.
571
+ """
572
+
573
+ result = self.ai_manager.generate_mock_data(
574
+ prompt=prompt,
575
+ endpoint="/generate-tests",
576
+ count=1
577
+ )
578
+
579
+ # Parse AI response and convert to test cases
580
+ # This is a simplified implementation
581
+ test_cases = []
582
+ for i, endpoint in enumerate(endpoints):
583
+ test_case = TestCase(
584
+ name=f"AI_Generated_Test_{i+1}",
585
+ description=f"AI-generated test for {endpoint}",
586
+ method="GET",
587
+ url=endpoint,
588
+ expected_status=200
589
+ )
590
+ test_cases.append(test_case)
591
+
592
+ return TestSuite(
593
+ name="AI-Generated Test Suite",
594
+ description="Test cases generated using AI",
595
+ base_url="http://localhost:8000",
596
+ test_cases=test_cases
597
+ )
598
+
599
+ class TestingFramework:
600
+ """Main testing framework class."""
601
+
602
+ def __init__(self):
603
+ self.test_runner = TestRunner()
604
+ self.performance_tester = PerformanceTester()
605
+ self.test_generator = TestGenerator()
606
+
607
+ def run_tests_from_file(self, test_file: str) -> List[TestResult]:
608
+ """Run tests from a test file."""
609
+ with open(test_file, 'r') as f:
610
+ if test_file.endswith('.yaml') or test_file.endswith('.yml'):
611
+ data = yaml.safe_load(f)
612
+ else:
613
+ data = json.load(f)
614
+
615
+ test_suite = self._load_test_suite_from_data(data)
616
+ return self.test_runner.run_test_suite(test_suite)
617
+
618
+ def run_performance_test_from_file(self, perf_file: str) -> PerformanceResult:
619
+ """Run performance test from a file."""
620
+ with open(perf_file, 'r') as f:
621
+ if perf_file.endswith('.yaml') or perf_file.endswith('.yml'):
622
+ data = yaml.safe_load(f)
623
+ else:
624
+ data = json.load(f)
625
+
626
+ perf_test = self._load_performance_test_from_data(data)
627
+ return self.performance_tester.run_performance_test(perf_test)
628
+
629
+ def generate_tests(self, config_file: str, output_file: str):
630
+ """Generate tests from configuration."""
631
+ with open(config_file, 'r') as f:
632
+ if config_file.endswith('.yaml') or config_file.endswith('.yml'):
633
+ config = yaml.safe_load(f)
634
+ else:
635
+ config = json.load(f)
636
+
637
+ test_suite = self.test_generator.generate_tests_from_config(config)
638
+
639
+ # Save test suite
640
+ with open(output_file, 'w') as f:
641
+ if output_file.endswith('.yaml') or output_file.endswith('.yml'):
642
+ yaml.dump(asdict(test_suite), f, indent=2)
643
+ else:
644
+ json.dump(asdict(test_suite), f, indent=2)
645
+
646
+ def _load_test_suite_from_data(self, data: Dict) -> TestSuite:
647
+ """Load test suite from data dictionary."""
648
+ test_cases = []
649
+ for test_data in data.get("test_cases", []):
650
+ test_case = TestCase(
651
+ name=test_data.get("name", "Unnamed Test"),
652
+ description=test_data.get("description", ""),
653
+ method=test_data.get("method", "GET"),
654
+ url=test_data.get("url", "/"),
655
+ headers=test_data.get("headers", {}),
656
+ body=test_data.get("body", {}),
657
+ expected_status=test_data.get("expected_status", 200),
658
+ expected_schema=test_data.get("expected_schema"),
659
+ assertions=test_data.get("assertions", []),
660
+ timeout=test_data.get("timeout", 30),
661
+ retries=test_data.get("retries", 0)
662
+ )
663
+ test_cases.append(test_case)
664
+
665
+ return TestSuite(
666
+ name=data.get("name", "Test Suite"),
667
+ description=data.get("description", ""),
668
+ base_url=data.get("base_url", ""),
669
+ test_cases=test_cases,
670
+ setup_hooks=data.get("setup_hooks", []),
671
+ teardown_hooks=data.get("teardown_hooks", []),
672
+ variables=data.get("variables", {})
673
+ )
674
+
675
+ def _load_performance_test_from_data(self, data: Dict) -> PerformanceTest:
676
+ """Load performance test from data dictionary."""
677
+ test_case_data = data.get("test_case", {})
678
+ test_case = TestCase(
679
+ name=test_case_data.get("name", "Performance Test Case"),
680
+ description=test_case_data.get("description", ""),
681
+ method=test_case_data.get("method", "GET"),
682
+ url=test_case_data.get("url", "/"),
683
+ headers=test_case_data.get("headers", {}),
684
+ body=test_case_data.get("body", {}),
685
+ expected_status=test_case_data.get("expected_status", 200),
686
+ expected_schema=test_case_data.get("expected_schema"),
687
+ assertions=test_case_data.get("assertions", []),
688
+ timeout=test_case_data.get("timeout", 30),
689
+ retries=test_case_data.get("retries", 0)
690
+ )
691
+
692
+ return PerformanceTest(
693
+ name=data.get("name", "Performance Test"),
694
+ test_case=test_case,
695
+ concurrent_users=data.get("concurrent_users", 10),
696
+ duration_seconds=data.get("duration_seconds", 60),
697
+ ramp_up_seconds=data.get("ramp_up_seconds", 0),
698
+ target_rps=data.get("target_rps")
699
+ )