devdox-ai-locust 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of devdox-ai-locust might be problematic. Click here for more details.
- devdox_ai_locust/__init__.py +9 -0
- devdox_ai_locust/cli.py +452 -0
- devdox_ai_locust/config.py +24 -0
- devdox_ai_locust/hybrid_loctus_generator.py +904 -0
- devdox_ai_locust/locust_generator.py +732 -0
- devdox_ai_locust/py.typed +0 -0
- devdox_ai_locust/schemas/__init__.py +0 -0
- devdox_ai_locust/schemas/processing_result.py +24 -0
- devdox_ai_locust/templates/base_workflow.py.j2 +180 -0
- devdox_ai_locust/templates/config.py.j2 +173 -0
- devdox_ai_locust/templates/custom_flows.py.j2 +95 -0
- devdox_ai_locust/templates/endpoint_template.py.j2 +34 -0
- devdox_ai_locust/templates/env.example.j2 +3 -0
- devdox_ai_locust/templates/fallback_locust.py.j2 +25 -0
- devdox_ai_locust/templates/locust.py.j2 +70 -0
- devdox_ai_locust/templates/readme.md.j2 +46 -0
- devdox_ai_locust/templates/requirement.txt.j2 +31 -0
- devdox_ai_locust/templates/test_data.py.j2 +276 -0
- devdox_ai_locust/templates/utils.py.j2 +335 -0
- devdox_ai_locust/utils/__init__.py +0 -0
- devdox_ai_locust/utils/file_creation.py +120 -0
- devdox_ai_locust/utils/open_ai_parser.py +431 -0
- devdox_ai_locust/utils/swagger_utils.py +94 -0
- devdox_ai_locust-0.1.1.dist-info/METADATA +424 -0
- devdox_ai_locust-0.1.1.dist-info/RECORD +29 -0
- devdox_ai_locust-0.1.1.dist-info/WHEEL +5 -0
- devdox_ai_locust-0.1.1.dist-info/entry_points.txt +3 -0
- devdox_ai_locust-0.1.1.dist-info/licenses/LICENSE +201 -0
- devdox_ai_locust-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test Data Generator for Locust Performance Tests
|
|
3
|
+
|
|
4
|
+
Provides realistic test data generation for API endpoints.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import random
|
|
8
|
+
import string
|
|
9
|
+
import uuid
|
|
10
|
+
from datetime import datetime, timedelta
|
|
11
|
+
from typing import Any, Dict, List, Optional, Union
|
|
12
|
+
from faker import Faker
|
|
13
|
+
import json
|
|
14
|
+
|
|
15
|
+
fake = Faker()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class TestDataGenerator:
|
|
19
|
+
"""Generates realistic test data for API testing"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, seed: Optional[int] = None):
|
|
22
|
+
if seed:
|
|
23
|
+
random.seed(seed)
|
|
24
|
+
Faker.seed(seed)
|
|
25
|
+
|
|
26
|
+
self.generated_ids = set()
|
|
27
|
+
self.user_sessions = {}
|
|
28
|
+
|
|
29
|
+
def generate_json_data(self, schema: Dict[str, Any], required_only: bool = False) -> Dict[str, Any]:
|
|
30
|
+
"""
|
|
31
|
+
Generate realistic JSON data based on JSON Schema
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
schema: JSON Schema dictionary
|
|
35
|
+
required_only: If True, only generate required fields
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Dictionary with generated test data
|
|
39
|
+
"""
|
|
40
|
+
if not isinstance(schema, dict):
|
|
41
|
+
return {}
|
|
42
|
+
|
|
43
|
+
schema_type = schema.get('type', 'object')
|
|
44
|
+
|
|
45
|
+
if schema_type == 'object':
|
|
46
|
+
return self._generate_object_data(schema, required_only)
|
|
47
|
+
elif schema_type == 'array':
|
|
48
|
+
return self._generate_array_data(schema)
|
|
49
|
+
elif schema_type == 'string':
|
|
50
|
+
return self._generate_string_value(schema)
|
|
51
|
+
elif schema_type == 'integer':
|
|
52
|
+
return self._generate_integer_value(schema)
|
|
53
|
+
elif schema_type == 'number':
|
|
54
|
+
return self._generate_number_value(schema)
|
|
55
|
+
elif schema_type == 'boolean':
|
|
56
|
+
return self._generate_boolean_value(schema)
|
|
57
|
+
else:
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
def _generate_object_data(self, schema: Dict[str, Any], required_only: bool = False) -> Dict[str, Any]:
|
|
61
|
+
"""Generate object data from schema properties"""
|
|
62
|
+
result = {}
|
|
63
|
+
properties = schema.get('properties', {})
|
|
64
|
+
required = schema.get('required', [])
|
|
65
|
+
|
|
66
|
+
for prop_name, prop_schema in properties.items():
|
|
67
|
+
if required_only and prop_name not in required:
|
|
68
|
+
continue
|
|
69
|
+
|
|
70
|
+
if '$ref' in prop_schema:
|
|
71
|
+
result[prop_name] = self._handle_reference(prop_schema['$ref'])
|
|
72
|
+
|
|
73
|
+
return result
|
|
74
|
+
|
|
75
|
+
def _generate_array_data(self, schema: Dict[str, Any]) -> List[Any]:
|
|
76
|
+
"""Generate array data from schema"""
|
|
77
|
+
items_schema = schema.get('items', {})
|
|
78
|
+
min_items = schema.get('minItems', 1)
|
|
79
|
+
max_items = schema.get('maxItems', 3)
|
|
80
|
+
|
|
81
|
+
array_length = random.randint(min_items, max_items)
|
|
82
|
+
result = []
|
|
83
|
+
|
|
84
|
+
for _ in range(array_length):
|
|
85
|
+
if '$ref' in items_schema:
|
|
86
|
+
result.append(self._handle_reference(items_schema['$ref']))
|
|
87
|
+
else:
|
|
88
|
+
result.append(self.generate_json_data(items_schema))
|
|
89
|
+
|
|
90
|
+
return result
|
|
91
|
+
|
|
92
|
+
def _generate_integer_value(self, schema: Dict[str, Any]) -> int:
|
|
93
|
+
"""Generate integer value based on schema constraints"""
|
|
94
|
+
minimum = schema.get('minimum', 0)
|
|
95
|
+
maximum = schema.get('maximum', 1000)
|
|
96
|
+
multiple_of = schema.get('multipleOf')
|
|
97
|
+
|
|
98
|
+
value = random.randint(minimum, maximum)
|
|
99
|
+
|
|
100
|
+
if multiple_of:
|
|
101
|
+
value = (value // multiple_of) * multiple_of
|
|
102
|
+
|
|
103
|
+
return value
|
|
104
|
+
|
|
105
|
+
def _generate_number_value(self, schema: Dict[str, Any]) -> float:
|
|
106
|
+
"""Generate number/float value based on schema constraints"""
|
|
107
|
+
minimum = schema.get('minimum', 0.0)
|
|
108
|
+
maximum = schema.get('maximum', 1000.0)
|
|
109
|
+
multiple_of = schema.get('multipleOf')
|
|
110
|
+
|
|
111
|
+
value = random.uniform(minimum, maximum)
|
|
112
|
+
|
|
113
|
+
if multiple_of:
|
|
114
|
+
value = round((value / multiple_of)) * multiple_of
|
|
115
|
+
else:
|
|
116
|
+
value = round(value, 2)
|
|
117
|
+
|
|
118
|
+
return value
|
|
119
|
+
|
|
120
|
+
def _generate_boolean_value(self, schema: Dict[str, Any]) -> bool:
|
|
121
|
+
"""Generate boolean value"""
|
|
122
|
+
return random.choice([True, False])
|
|
123
|
+
|
|
124
|
+
def _handle_reference(self, ref: str) -> Any:
|
|
125
|
+
"""Handle $ref references in schema"""
|
|
126
|
+
ref_name = ref.split('/')[-1]
|
|
127
|
+
ref_lower = ref_name.lower()
|
|
128
|
+
|
|
129
|
+
if 'hosting' in ref_lower or 'provider' in ref_lower:
|
|
130
|
+
return random.choice(['github', 'gitlab', 'bitbucket'])
|
|
131
|
+
elif 'role' in ref_lower:
|
|
132
|
+
return random.choice(['admin', 'user', 'viewer'])
|
|
133
|
+
elif 'status' in ref_lower:
|
|
134
|
+
return random.choice(['active', 'inactive', 'pending'])
|
|
135
|
+
elif 'type' in ref_lower:
|
|
136
|
+
return random.choice(['primary', 'secondary', 'tertiary'])
|
|
137
|
+
else:
|
|
138
|
+
return f"{ref_name.lower()}_value_{random.randint(1, 100)}"
|
|
139
|
+
|
|
140
|
+
def _generate_by_name_pattern(self, prop_name: str) -> str:
|
|
141
|
+
"""Generate value based on property name patterns as fallback"""
|
|
142
|
+
prop_name_lower = prop_name.lower()
|
|
143
|
+
|
|
144
|
+
if 'email' in prop_name_lower:
|
|
145
|
+
return fake.email()
|
|
146
|
+
elif any(keyword in prop_name_lower for keyword in ['name', 'label']):
|
|
147
|
+
return fake.catch_phrase()
|
|
148
|
+
elif any(keyword in prop_name_lower for keyword in ['token', 'key']):
|
|
149
|
+
return ''.join(random.choices(string.ascii_letters + string.digits, k=32))
|
|
150
|
+
elif 'id' in prop_name_lower:
|
|
151
|
+
return str(uuid.uuid4())
|
|
152
|
+
elif 'url' in prop_name_lower:
|
|
153
|
+
return fake.url()
|
|
154
|
+
else:
|
|
155
|
+
return self.random_string(10)
|
|
156
|
+
|
|
157
|
+
def _generate_string_value(self, schema: Dict[str, Any], prop_name: str = "") -> str:
|
|
158
|
+
"""Generate string value based on schema constraints and property name"""
|
|
159
|
+
min_length = schema.get('minLength', 1)
|
|
160
|
+
max_length = schema.get('maxLength', 50)
|
|
161
|
+
enum_values = schema.get('enum')
|
|
162
|
+
prop_name_lower = prop_name.lower()
|
|
163
|
+
|
|
164
|
+
if enum_values:
|
|
165
|
+
return random.choice(enum_values)
|
|
166
|
+
|
|
167
|
+
if any(keyword in prop_name_lower for keyword in ['email', 'mail']):
|
|
168
|
+
return fake.email()
|
|
169
|
+
|
|
170
|
+
if any(keyword in prop_name_lower for keyword in ['name', 'label', 'title']):
|
|
171
|
+
if 'first' in prop_name_lower:
|
|
172
|
+
return fake.first_name()
|
|
173
|
+
elif 'last' in prop_name_lower:
|
|
174
|
+
return fake.last_name()
|
|
175
|
+
elif 'company' in prop_name_lower:
|
|
176
|
+
return fake.company()
|
|
177
|
+
else:
|
|
178
|
+
return fake.catch_phrase()[:max_length]
|
|
179
|
+
|
|
180
|
+
if any(keyword in prop_name_lower for keyword in ['token', 'key', 'secret', 'password']):
|
|
181
|
+
token_length = min(max_length, 32)
|
|
182
|
+
return ''.join(random.choices(string.ascii_letters + string.digits, k=token_length))
|
|
183
|
+
|
|
184
|
+
if any(keyword in prop_name_lower for keyword in ['url', 'uri', 'endpoint']):
|
|
185
|
+
return fake.url()
|
|
186
|
+
|
|
187
|
+
if 'phone' in prop_name_lower:
|
|
188
|
+
return fake.phone_number()
|
|
189
|
+
|
|
190
|
+
if 'address' in prop_name_lower:
|
|
191
|
+
return fake.address().replace('\\n', ', ')
|
|
192
|
+
|
|
193
|
+
if any(keyword in prop_name_lower for keyword in ['description', 'comment', 'note', 'text']):
|
|
194
|
+
return fake.text(max_nb_chars=max_length)
|
|
195
|
+
|
|
196
|
+
if any(keyword in prop_name_lower for keyword in ['id', 'uuid']):
|
|
197
|
+
return str(uuid.uuid4())
|
|
198
|
+
|
|
199
|
+
actual_length = min(max_length, max(min_length, random.randint(5, 20)))
|
|
200
|
+
return ''.join(random.choices(string.ascii_letters + string.digits, k=actual_length))
|
|
201
|
+
|
|
202
|
+
def generate_string(self, length: int = 10, pattern: str = None, default: str = None) -> str:
|
|
203
|
+
if default:
|
|
204
|
+
return default
|
|
205
|
+
|
|
206
|
+
if pattern:
|
|
207
|
+
if 'email' in pattern.lower():
|
|
208
|
+
return fake.email()
|
|
209
|
+
elif 'name' in pattern.lower():
|
|
210
|
+
return fake.name()
|
|
211
|
+
elif 'phone' in pattern.lower():
|
|
212
|
+
return fake.phone_number()
|
|
213
|
+
elif 'address' in pattern.lower():
|
|
214
|
+
return fake.address()
|
|
215
|
+
elif 'url' in pattern.lower():
|
|
216
|
+
return fake.url()
|
|
217
|
+
elif 'uuid' in pattern.lower():
|
|
218
|
+
return str(uuid.uuid4())
|
|
219
|
+
|
|
220
|
+
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
|
|
221
|
+
|
|
222
|
+
def generate_integer(self, min_val: int = 1, max_val: int = 1000, default: int = None) -> int:
|
|
223
|
+
if default is not None:
|
|
224
|
+
return default
|
|
225
|
+
return random.randint(min_val, max_val)
|
|
226
|
+
|
|
227
|
+
def generate_float(self, min_val: float = 0.0, max_val: float = 1000.0, default: float = None) -> float:
|
|
228
|
+
if default is not None:
|
|
229
|
+
return default
|
|
230
|
+
return round(random.uniform(min_val, max_val), 2)
|
|
231
|
+
|
|
232
|
+
def generate_boolean(self, default: bool = None) -> bool:
|
|
233
|
+
if default is not None:
|
|
234
|
+
return default
|
|
235
|
+
return random.choice([True, False])
|
|
236
|
+
|
|
237
|
+
def generate_id(self, prefix: str = "", id_type: str = "uuid") -> str:
|
|
238
|
+
if id_type == "uuid":
|
|
239
|
+
new_id = str(uuid.uuid4())
|
|
240
|
+
elif id_type == "incremental":
|
|
241
|
+
new_id = f"{len(self.generated_ids) + 1}"
|
|
242
|
+
else:
|
|
243
|
+
new_id = ''.join(random.choices(string.ascii_letters + string.digits, k=8))
|
|
244
|
+
|
|
245
|
+
if prefix:
|
|
246
|
+
new_id = f"{prefix}_{new_id}"
|
|
247
|
+
|
|
248
|
+
self.generated_ids.add(new_id)
|
|
249
|
+
return new_id
|
|
250
|
+
|
|
251
|
+
def generate_email(self) -> str:
|
|
252
|
+
return fake.email()
|
|
253
|
+
|
|
254
|
+
def random_string(self, length: int = 10) -> str:
|
|
255
|
+
return ''.join(random.choices(string.ascii_letters, k=length))
|
|
256
|
+
|
|
257
|
+
def random_int(self, min_val: int = 0, max_val: int = 1000) -> int:
|
|
258
|
+
return random.randint(min_val, max_val)
|
|
259
|
+
|
|
260
|
+
def random_float(self, min_val: float = 0.0, max_val: float = 1000.0) -> float:
|
|
261
|
+
return random.uniform(min_val, max_val)
|
|
262
|
+
|
|
263
|
+
def random_bool(self) -> bool:
|
|
264
|
+
return random.choice([True, False])
|
|
265
|
+
|
|
266
|
+
def random_uuid(self) -> str:
|
|
267
|
+
return str(uuid.uuid4())
|
|
268
|
+
|
|
269
|
+
def random_date(self, start_days_ago: int = 365, end_days_ahead: int = 365) -> str:
|
|
270
|
+
start_date = datetime.now() - timedelta(days=start_days_ago)
|
|
271
|
+
end_date = datetime.now() + timedelta(days=end_days_ahead)
|
|
272
|
+
return fake.date_between(start_date=start_date, end_date=end_date).isoformat()
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
# Global instance for easy access
|
|
276
|
+
test_data_generator = TestDataGenerator()
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility classes for Locust load testing
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
import time
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import Dict, List, Any, Optional
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
import csv
|
|
12
|
+
from dataclasses import dataclass, asdict
|
|
13
|
+
import statistics
|
|
14
|
+
from collections import defaultdict
|
|
15
|
+
|
|
16
|
+
import requests
|
|
17
|
+
from locust.runners import MasterRunner
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class ResponseMetric:
|
|
24
|
+
"""Response metric data structure"""
|
|
25
|
+
method: str
|
|
26
|
+
endpoint: str
|
|
27
|
+
response_time: float
|
|
28
|
+
status_code: int
|
|
29
|
+
response_size: int
|
|
30
|
+
timestamp: datetime
|
|
31
|
+
success: bool
|
|
32
|
+
error_message: Optional[str] = None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class ResponseValidator:
|
|
36
|
+
"""Validates HTTP responses against expected criteria"""
|
|
37
|
+
|
|
38
|
+
def __init__(self):
|
|
39
|
+
self.validation_rules = {
|
|
40
|
+
'GET': {
|
|
41
|
+
'expected_status': [200, 202, 206],
|
|
42
|
+
'max_response_time_ms': 2000,
|
|
43
|
+
'required_headers': ['content-type'],
|
|
44
|
+
'forbidden_headers': ['x-debug', 'x-error-detail']
|
|
45
|
+
},
|
|
46
|
+
'POST': {
|
|
47
|
+
'expected_status': [200, 201, 202],
|
|
48
|
+
'max_response_time_ms': 3000,
|
|
49
|
+
'required_headers': ['content-type']
|
|
50
|
+
},
|
|
51
|
+
'PUT': {
|
|
52
|
+
'expected_status': [200, 202, 204],
|
|
53
|
+
'max_response_time_ms': 3000,
|
|
54
|
+
'required_headers': ['content-type']
|
|
55
|
+
},
|
|
56
|
+
'PATCH': {
|
|
57
|
+
'expected_status': [200, 202, 204],
|
|
58
|
+
'max_response_time_ms': 3000,
|
|
59
|
+
'required_headers': ['content-type']
|
|
60
|
+
},
|
|
61
|
+
'DELETE': {
|
|
62
|
+
'expected_status': [200, 202, 204],
|
|
63
|
+
'max_response_time_ms': 2000
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
def validate_response(self, response, method: str, endpoint: str) -> bool:
|
|
68
|
+
"""Validate HTTP response against expected criteria"""
|
|
69
|
+
is_valid = True
|
|
70
|
+
method_upper = method.upper()
|
|
71
|
+
rules = self.validation_rules.get(method_upper, {})
|
|
72
|
+
|
|
73
|
+
# Validate status code
|
|
74
|
+
expected_status = rules.get('expected_status', [200])
|
|
75
|
+
if response.status_code not in expected_status:
|
|
76
|
+
logger.warning(f"Unexpected status code {response.status_code} for {method_upper} {endpoint}")
|
|
77
|
+
is_valid = False
|
|
78
|
+
|
|
79
|
+
# Validate response time
|
|
80
|
+
max_time = rules.get('max_response_time_ms', 5000)
|
|
81
|
+
if response.elapsed.total_seconds() * 1000 > max_time:
|
|
82
|
+
logger.warning(f"Slow response {response.elapsed.total_seconds() * 1000:.2f}ms for {method_upper} {endpoint}")
|
|
83
|
+
is_valid = False
|
|
84
|
+
|
|
85
|
+
# Check forbidden headers (security)
|
|
86
|
+
forbidden_headers = rules.get('forbidden_headers', [])
|
|
87
|
+
for header in forbidden_headers:
|
|
88
|
+
if header.lower() in [h.lower() for h in response.headers.keys()]:
|
|
89
|
+
logger.warning(f"Forbidden header '{header}' present for {method_upper} {endpoint}")
|
|
90
|
+
is_valid = False
|
|
91
|
+
|
|
92
|
+
# Validate JSON response structure
|
|
93
|
+
if response.status_code < 400 and 'application/json' in response.headers.get('content-type', ''):
|
|
94
|
+
try:
|
|
95
|
+
json_data = response.json()
|
|
96
|
+
if not self._validate_json_structure(json_data, method_upper, endpoint):
|
|
97
|
+
is_valid = False
|
|
98
|
+
except json.JSONDecodeError:
|
|
99
|
+
logger.warning(f"Invalid JSON response for {method_upper} {endpoint}")
|
|
100
|
+
is_valid = False
|
|
101
|
+
|
|
102
|
+
return is_valid
|
|
103
|
+
|
|
104
|
+
def _validate_json_structure(self, json_data: Any, method: str, endpoint: str) -> bool:
|
|
105
|
+
"""Validate JSON response structure"""
|
|
106
|
+
if method == 'GET' and isinstance(json_data, list):
|
|
107
|
+
if json_data and isinstance(json_data[0], dict):
|
|
108
|
+
return 'id' in json_data[0] or len(json_data[0]) > 0
|
|
109
|
+
elif method in ['POST', 'PUT', 'PATCH'] and isinstance(json_data, dict):
|
|
110
|
+
return len(json_data) > 0
|
|
111
|
+
return True
|
|
112
|
+
|
|
113
|
+
def add_custom_validation(self, method: str, validation_func):
|
|
114
|
+
"""Add custom validation function for specific method"""
|
|
115
|
+
if not hasattr(self, 'custom_validators'):
|
|
116
|
+
self.custom_validators = {}
|
|
117
|
+
self.custom_validators[method] = validation_func
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class RequestLogger:
|
|
121
|
+
"""Logs HTTP requests for debugging and analysis"""
|
|
122
|
+
|
|
123
|
+
def __init__(self, log_file: str = "requests.log"):
|
|
124
|
+
self.log_file = log_file
|
|
125
|
+
self.requests_logged = 0
|
|
126
|
+
self.file_handler = logging.FileHandler(log_file)
|
|
127
|
+
self.file_handler.setLevel(logging.INFO)
|
|
128
|
+
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
|
129
|
+
self.file_handler.setFormatter(formatter)
|
|
130
|
+
|
|
131
|
+
self.request_logger = logging.getLogger('request_logger')
|
|
132
|
+
self.request_logger.addHandler(self.file_handler)
|
|
133
|
+
self.request_logger.setLevel(logging.INFO)
|
|
134
|
+
|
|
135
|
+
@staticmethod
|
|
136
|
+
def log_request(method: str, url: str, kwargs: Dict):
|
|
137
|
+
logger.info(f"REQUEST: {method.upper()} {url}")
|
|
138
|
+
if 'params' in kwargs and kwargs['params']:
|
|
139
|
+
logger.debug(f"Query params: {kwargs['params']}")
|
|
140
|
+
if 'json' in kwargs and kwargs['json']:
|
|
141
|
+
logger.debug(f"JSON body: {json.dumps(kwargs['json'], indent=2)}")
|
|
142
|
+
if 'data' in kwargs and kwargs['data']:
|
|
143
|
+
logger.debug(f"Form data: {kwargs['data']}")
|
|
144
|
+
|
|
145
|
+
def log_response(self, response, method: str, url: str):
|
|
146
|
+
self.requests_logged += 1
|
|
147
|
+
log_entry = {
|
|
148
|
+
'timestamp': datetime.now().isoformat(),
|
|
149
|
+
'method': method.upper(),
|
|
150
|
+
'url': url,
|
|
151
|
+
'status_code': response.status_code,
|
|
152
|
+
'response_time_ms': response.elapsed.total_seconds() * 1000,
|
|
153
|
+
'response_size': len(response.content) if response.content else 0
|
|
154
|
+
}
|
|
155
|
+
self.request_logger.info(json.dumps(log_entry))
|
|
156
|
+
if response.status_code >= 400:
|
|
157
|
+
logger.error(f"ERROR RESPONSE: {response.status_code} for {method.upper()} {url}")
|
|
158
|
+
if response.text:
|
|
159
|
+
logger.error(f"Error details: {response.text[:500]}")
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class PerformanceMonitor:
|
|
163
|
+
"""Monitors and reports performance metrics"""
|
|
164
|
+
|
|
165
|
+
def __init__(self):
|
|
166
|
+
self.metrics: List[ResponseMetric] = []
|
|
167
|
+
self.start_time = None
|
|
168
|
+
self.end_time = None
|
|
169
|
+
self.request_counts = defaultdict(int)
|
|
170
|
+
self.error_counts = defaultdict(int)
|
|
171
|
+
|
|
172
|
+
def test_start(self):
|
|
173
|
+
self.start_time = datetime.now()
|
|
174
|
+
logger.info("Performance monitoring started")
|
|
175
|
+
|
|
176
|
+
def test_stop(self):
|
|
177
|
+
self.end_time = datetime.now()
|
|
178
|
+
logger.info("Performance monitoring stopped")
|
|
179
|
+
|
|
180
|
+
def record_response(self, response, method: str, endpoint: str):
|
|
181
|
+
metric = ResponseMetric(
|
|
182
|
+
method=method.upper(),
|
|
183
|
+
endpoint=endpoint,
|
|
184
|
+
response_time=response.elapsed.total_seconds() * 1000,
|
|
185
|
+
status_code=response.status_code,
|
|
186
|
+
response_size=len(response.content) if response.content else 0,
|
|
187
|
+
timestamp=datetime.now(),
|
|
188
|
+
success=response.status_code < 400
|
|
189
|
+
)
|
|
190
|
+
if not metric.success:
|
|
191
|
+
metric.error_message = response.text[:200]
|
|
192
|
+
self.metrics.append(metric)
|
|
193
|
+
endpoint_key = f"{method.upper()} {endpoint}"
|
|
194
|
+
self.request_counts[endpoint_key] += 1
|
|
195
|
+
if not metric.success:
|
|
196
|
+
self.error_counts[endpoint_key] += 1
|
|
197
|
+
|
|
198
|
+
def on_request_event(self, request_type, name, response_time, response_length, exception, context):
|
|
199
|
+
if exception:
|
|
200
|
+
logger.error(f"Request failed: {request_type} {name} - {exception}")
|
|
201
|
+
|
|
202
|
+
def get_statistics(self) -> Dict[str, Any]:
|
|
203
|
+
if not self.metrics:
|
|
204
|
+
return {}
|
|
205
|
+
response_times = [m.response_time for m in self.metrics]
|
|
206
|
+
successful_requests = [m for m in self.metrics if m.success]
|
|
207
|
+
stats = {
|
|
208
|
+
'total_requests': len(self.metrics),
|
|
209
|
+
'successful_requests': len(successful_requests),
|
|
210
|
+
'failed_requests': len(self.metrics) - len(successful_requests),
|
|
211
|
+
'error_rate': (len(self.metrics) - len(successful_requests)) / len(self.metrics) * 100,
|
|
212
|
+
'response_times': {
|
|
213
|
+
'min': min(response_times),
|
|
214
|
+
'max': max(response_times),
|
|
215
|
+
'mean': statistics.mean(response_times),
|
|
216
|
+
'median': statistics.median(response_times),
|
|
217
|
+
'p95': self._percentile(response_times, 95),
|
|
218
|
+
'p99': self._percentile(response_times, 99)
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
if self.start_time and self.end_time:
|
|
222
|
+
duration = (self.end_time - self.start_time).total_seconds()
|
|
223
|
+
stats['test_duration_seconds'] = duration
|
|
224
|
+
stats['requests_per_second'] = len(self.metrics) / duration
|
|
225
|
+
stats['endpoints'] = self._get_endpoint_stats()
|
|
226
|
+
return stats
|
|
227
|
+
|
|
228
|
+
def _percentile(self, data: List[float], percentile: int) -> float:
|
|
229
|
+
sorted_data = sorted(data)
|
|
230
|
+
index = int((percentile / 100) * len(sorted_data))
|
|
231
|
+
return sorted_data[min(index, len(sorted_data) - 1)]
|
|
232
|
+
|
|
233
|
+
def _get_endpoint_stats(self) -> Dict[str, Dict]:
|
|
234
|
+
endpoint_stats = {}
|
|
235
|
+
for endpoint_key in self.request_counts.keys():
|
|
236
|
+
endpoint_metrics = [m for m in self.metrics if f"{m.method} {m.endpoint}" == endpoint_key]
|
|
237
|
+
if endpoint_metrics:
|
|
238
|
+
response_times = [m.response_time for m in endpoint_metrics]
|
|
239
|
+
successful = len([m for m in endpoint_metrics if m.success])
|
|
240
|
+
endpoint_stats[endpoint_key] = {
|
|
241
|
+
'total_requests': len(endpoint_metrics),
|
|
242
|
+
'successful_requests': successful,
|
|
243
|
+
'error_rate': (len(endpoint_metrics) - successful) / len(endpoint_metrics) * 100,
|
|
244
|
+
'avg_response_time': statistics.mean(response_times),
|
|
245
|
+
'p95_response_time': self._percentile(response_times, 95)
|
|
246
|
+
}
|
|
247
|
+
return endpoint_stats
|
|
248
|
+
|
|
249
|
+
def generate_report(self, output_dir: str = "./reports"):
|
|
250
|
+
Path(output_dir).mkdir(exist_ok=True)
|
|
251
|
+
stats = self.get_statistics()
|
|
252
|
+
|
|
253
|
+
json_file = Path(output_dir) / f"performance_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
|
254
|
+
with open(json_file, 'w') as f:
|
|
255
|
+
json.dump(stats, f, indent=2, default=str)
|
|
256
|
+
|
|
257
|
+
csv_file = Path(output_dir) / f"performance_metrics_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
|
|
258
|
+
with open(csv_file, 'w', newline='') as f:
|
|
259
|
+
writer = csv.writer(f)
|
|
260
|
+
writer.writerow(['timestamp', 'method', 'endpoint', 'response_time_ms', 'status_code', 'success'])
|
|
261
|
+
for metric in self.metrics:
|
|
262
|
+
writer.writerow([
|
|
263
|
+
metric.timestamp,
|
|
264
|
+
metric.method,
|
|
265
|
+
metric.endpoint,
|
|
266
|
+
metric.response_time,
|
|
267
|
+
metric.status_code,
|
|
268
|
+
metric.success
|
|
269
|
+
])
|
|
270
|
+
|
|
271
|
+
self._generate_summary_report(stats, output_dir)
|
|
272
|
+
logger.info(f"Performance reports generated in {output_dir}")
|
|
273
|
+
|
|
274
|
+
def _generate_summary_report(self, stats: Dict, output_dir: str):
|
|
275
|
+
summary_file = Path(output_dir) / f"summary_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt"
|
|
276
|
+
with open(summary_file, 'w') as f:
|
|
277
|
+
f.write("PERFORMANCE TEST SUMMARY\n")
|
|
278
|
+
f.write("=" * 50 + "\n\n")
|
|
279
|
+
f.write(f"Test Duration: {stats.get('test_duration_seconds', 0):.2f} seconds\n")
|
|
280
|
+
f.write(f"Total Requests: {stats.get('total_requests', 0)}\n")
|
|
281
|
+
f.write(f"Successful Requests: {stats.get('successful_requests', 0)}\n")
|
|
282
|
+
f.write(f"Failed Requests: {stats.get('failed_requests', 0)}\n")
|
|
283
|
+
f.write(f"Error Rate: {stats.get('error_rate', 0):.2f}%\n")
|
|
284
|
+
f.write(f"Requests/Second: {stats.get('requests_per_second', 0):.2f}\n\n")
|
|
285
|
+
|
|
286
|
+
response_times = stats.get('response_times', {})
|
|
287
|
+
f.write("RESPONSE TIMES\n")
|
|
288
|
+
f.write("-" * 20 + "\n")
|
|
289
|
+
f.write(f"Min: {response_times.get('min', 0):.2f}ms\n")
|
|
290
|
+
f.write(f"Max: {response_times.get('max', 0):.2f}ms\n")
|
|
291
|
+
f.write(f"Mean: {response_times.get('mean', 0):.2f}ms\n")
|
|
292
|
+
f.write(f"Median: {response_times.get('median', 0):.2f}ms\n")
|
|
293
|
+
f.write(f"95th Percentile: {response_times.get('p95', 0):.2f}ms\n")
|
|
294
|
+
f.write(f"99th Percentile: {response_times.get('p99', 0):.2f}ms\n\n")
|
|
295
|
+
|
|
296
|
+
endpoints = stats.get('endpoints', {})
|
|
297
|
+
if endpoints:
|
|
298
|
+
f.write("PER-ENDPOINT STATISTICS\n")
|
|
299
|
+
f.write("-" * 30 + "\n")
|
|
300
|
+
for endpoint, endpoint_stats in endpoints.items():
|
|
301
|
+
f.write(f"\n{endpoint}:\n")
|
|
302
|
+
f.write(f" Requests: {endpoint_stats['total_requests']}\n")
|
|
303
|
+
f.write(f" Error Rate: {endpoint_stats['error_rate']:.2f}%\n")
|
|
304
|
+
f.write(f" Avg Response Time: {endpoint_stats['avg_response_time']:.2f}ms\n")
|
|
305
|
+
f.write(f" 95th Percentile: {endpoint_stats['p95_response_time']:.2f}ms\n")
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
class DataManager:
|
|
309
|
+
"""Manages test data and state across users"""
|
|
310
|
+
|
|
311
|
+
def __init__(self):
|
|
312
|
+
self.shared_data = {}
|
|
313
|
+
self.user_data = {}
|
|
314
|
+
|
|
315
|
+
def store_shared_data(self, key: str, value: Any):
|
|
316
|
+
self.shared_data[key] = value
|
|
317
|
+
|
|
318
|
+
def get_shared_data(self, key: str) -> Any:
|
|
319
|
+
return self.shared_data.get(key)
|
|
320
|
+
|
|
321
|
+
def store_user_data(self, user_id: str, key: str, value: Any):
|
|
322
|
+
if user_id not in self.user_data:
|
|
323
|
+
self.user_data[user_id] = {}
|
|
324
|
+
self.user_data[user_id][key] = value
|
|
325
|
+
|
|
326
|
+
def get_user_data(self, user_id: str, key: str) -> Any:
|
|
327
|
+
return self.user_data.get(user_id, {}).get(key)
|
|
328
|
+
|
|
329
|
+
def cleanup_user_data(self, user_id: str):
|
|
330
|
+
if user_id in self.user_data:
|
|
331
|
+
del self.user_data[user_id]
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
# Global instances
|
|
335
|
+
data_manager = DataManager()
|
|
File without changes
|