setu-trafficmonitor 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- setu_trafficmonitor-2.0.0.dist-info/LICENSE +21 -0
- setu_trafficmonitor-2.0.0.dist-info/METADATA +401 -0
- setu_trafficmonitor-2.0.0.dist-info/RECORD +42 -0
- setu_trafficmonitor-2.0.0.dist-info/WHEEL +5 -0
- setu_trafficmonitor-2.0.0.dist-info/top_level.txt +1 -0
- trafficmonitor/__init__.py +11 -0
- trafficmonitor/admin.py +217 -0
- trafficmonitor/analytics/__init__.py +0 -0
- trafficmonitor/analytics/enhanced_queries.py +286 -0
- trafficmonitor/analytics/serializers.py +238 -0
- trafficmonitor/analytics/tests.py +757 -0
- trafficmonitor/analytics/urls.py +18 -0
- trafficmonitor/analytics/views.py +694 -0
- trafficmonitor/apps.py +7 -0
- trafficmonitor/circuit_breaker.py +63 -0
- trafficmonitor/conf.py +154 -0
- trafficmonitor/dashboard_security.py +111 -0
- trafficmonitor/db_utils.py +37 -0
- trafficmonitor/exceptions.py +93 -0
- trafficmonitor/health.py +66 -0
- trafficmonitor/load_test.py +423 -0
- trafficmonitor/load_test_api.py +307 -0
- trafficmonitor/management/__init__.py +1 -0
- trafficmonitor/management/commands/__init__.py +1 -0
- trafficmonitor/management/commands/cleanup_request_logs.py +77 -0
- trafficmonitor/middleware.py +383 -0
- trafficmonitor/migrations/0001_initial.py +93 -0
- trafficmonitor/migrations/__init__.py +0 -0
- trafficmonitor/models.py +206 -0
- trafficmonitor/monitoring.py +104 -0
- trafficmonitor/permissions.py +64 -0
- trafficmonitor/security.py +180 -0
- trafficmonitor/settings_production.py +105 -0
- trafficmonitor/static/analytics/css/dashboard.css +99 -0
- trafficmonitor/static/analytics/js/dashboard-production.js +339 -0
- trafficmonitor/static/analytics/js/dashboard-v2.js +697 -0
- trafficmonitor/static/analytics/js/dashboard.js +693 -0
- trafficmonitor/tasks.py +137 -0
- trafficmonitor/templates/analytics/dashboard.html +500 -0
- trafficmonitor/tests.py +246 -0
- trafficmonitor/views.py +3 -0
- trafficmonitor/websocket_consumers.py +128 -0
|
@@ -0,0 +1,307 @@
|
|
|
1
|
+
"""
|
|
2
|
+
API Load Testing Script - Tests the actual dashboard API endpoints
|
|
3
|
+
Hammers the analytics API to find breaking points.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python request_logging/load_test_api.py --concurrent 50 --duration 30
|
|
7
|
+
python request_logging/load_test_api.py --preset extreme
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import argparse
|
|
11
|
+
import concurrent.futures
|
|
12
|
+
import json
|
|
13
|
+
import random
|
|
14
|
+
import sys
|
|
15
|
+
import time
|
|
16
|
+
from datetime import datetime, timedelta
|
|
17
|
+
|
|
18
|
+
import requests
|
|
19
|
+
from requests.auth import HTTPBasicAuth
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class APILoadTester:
|
|
23
|
+
"""Tests the actual HTTP API endpoints."""
|
|
24
|
+
|
|
25
|
+
def __init__(self, base_url='http://localhost:8000', username=None, password=None):
|
|
26
|
+
self.base_url = base_url
|
|
27
|
+
self.session = requests.Session()
|
|
28
|
+
self.username = username
|
|
29
|
+
self.password = password
|
|
30
|
+
self.stats = {
|
|
31
|
+
'total': 0,
|
|
32
|
+
'success': 0,
|
|
33
|
+
'failed': 0,
|
|
34
|
+
'timeouts': 0,
|
|
35
|
+
'errors': {},
|
|
36
|
+
'response_times': [],
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
def setup_auth(self):
|
|
40
|
+
"""Setup authentication."""
|
|
41
|
+
if self.username and self.password:
|
|
42
|
+
# Try to get session cookie first
|
|
43
|
+
login_url = f"{self.base_url}/admin/login/"
|
|
44
|
+
csrf_url = f"{self.base_url}/admin/"
|
|
45
|
+
|
|
46
|
+
# Get CSRF token
|
|
47
|
+
resp = self.session.get(csrf_url)
|
|
48
|
+
csrftoken = resp.cookies.get('csrftoken')
|
|
49
|
+
|
|
50
|
+
# Login
|
|
51
|
+
login_data = {
|
|
52
|
+
'username': self.username,
|
|
53
|
+
'password': self.password,
|
|
54
|
+
'csrfmiddlewaretoken': csrftoken,
|
|
55
|
+
'next': '/admin/',
|
|
56
|
+
}
|
|
57
|
+
self.session.post(login_url, data=login_data, headers={'Referer': login_url})
|
|
58
|
+
|
|
59
|
+
print(f"✓ Authenticated as {self.username}")
|
|
60
|
+
else:
|
|
61
|
+
print("⚠ Warning: No credentials provided, API calls may fail")
|
|
62
|
+
|
|
63
|
+
def test_endpoint(self, endpoint, params=None, timeout=10):
|
|
64
|
+
"""Test a single endpoint."""
|
|
65
|
+
url = f"{self.base_url}{endpoint}"
|
|
66
|
+
|
|
67
|
+
start = time.time()
|
|
68
|
+
try:
|
|
69
|
+
response = self.session.get(url, params=params, timeout=timeout)
|
|
70
|
+
elapsed = time.time() - start
|
|
71
|
+
|
|
72
|
+
self.stats['total'] += 1
|
|
73
|
+
self.stats['response_times'].append(elapsed)
|
|
74
|
+
|
|
75
|
+
if response.status_code == 200:
|
|
76
|
+
self.stats['success'] += 1
|
|
77
|
+
return True, elapsed, response.status_code
|
|
78
|
+
else:
|
|
79
|
+
self.stats['failed'] += 1
|
|
80
|
+
error_key = f"HTTP_{response.status_code}"
|
|
81
|
+
self.stats['errors'][error_key] = self.stats['errors'].get(error_key, 0) + 1
|
|
82
|
+
return False, elapsed, response.status_code
|
|
83
|
+
|
|
84
|
+
except requests.Timeout:
|
|
85
|
+
elapsed = time.time() - start
|
|
86
|
+
self.stats['total'] += 1
|
|
87
|
+
self.stats['timeouts'] += 1
|
|
88
|
+
self.stats['errors']['TIMEOUT'] = self.stats['errors'].get('TIMEOUT', 0) + 1
|
|
89
|
+
return False, elapsed, 'TIMEOUT'
|
|
90
|
+
|
|
91
|
+
except Exception as e:
|
|
92
|
+
elapsed = time.time() - start
|
|
93
|
+
self.stats['total'] += 1
|
|
94
|
+
self.stats['failed'] += 1
|
|
95
|
+
error_key = type(e).__name__
|
|
96
|
+
self.stats['errors'][error_key] = self.stats['errors'].get(error_key, 0) + 1
|
|
97
|
+
return False, elapsed, str(e)
|
|
98
|
+
|
|
99
|
+
def generate_random_params(self):
|
|
100
|
+
"""Generate random query parameters."""
|
|
101
|
+
ranges = ['today', 'yesterday', 'last_7_days', 'last_30_days']
|
|
102
|
+
methods = ['', 'GET', 'POST', 'PUT', 'DELETE']
|
|
103
|
+
statuses = ['', '200', '400', '404', '500']
|
|
104
|
+
|
|
105
|
+
params = {'range': random.choice(ranges)}
|
|
106
|
+
|
|
107
|
+
if random.random() > 0.5:
|
|
108
|
+
params['method'] = random.choice(methods)
|
|
109
|
+
if random.random() > 0.5:
|
|
110
|
+
params['status'] = random.choice(statuses)
|
|
111
|
+
|
|
112
|
+
return params
|
|
113
|
+
|
|
114
|
+
def run_concurrent_load(self, num_workers=10, duration=30, endpoints=None):
|
|
115
|
+
"""Run concurrent load test."""
|
|
116
|
+
if endpoints is None:
|
|
117
|
+
endpoints = [
|
|
118
|
+
'/api/analytics/overview/',
|
|
119
|
+
'/api/analytics/chart/time-series/',
|
|
120
|
+
'/api/analytics/chart/status-codes/',
|
|
121
|
+
'/api/analytics/chart/methods/',
|
|
122
|
+
'/api/analytics/chart/endpoints/',
|
|
123
|
+
'/api/analytics/chart/performance/',
|
|
124
|
+
'/api/analytics/chart/heatmap/',
|
|
125
|
+
'/api/analytics/chart/errors/',
|
|
126
|
+
]
|
|
127
|
+
|
|
128
|
+
print("=" * 70)
|
|
129
|
+
print("API LOAD TEST")
|
|
130
|
+
print("=" * 70)
|
|
131
|
+
print(f"Base URL: {self.base_url}")
|
|
132
|
+
print(f"Workers: {num_workers}")
|
|
133
|
+
print(f"Duration: {duration}s")
|
|
134
|
+
print(f"Endpoints: {len(endpoints)}")
|
|
135
|
+
print("=" * 70)
|
|
136
|
+
|
|
137
|
+
start_time = time.time()
|
|
138
|
+
end_time = start_time + duration
|
|
139
|
+
|
|
140
|
+
def worker():
|
|
141
|
+
"""Worker function."""
|
|
142
|
+
requests_made = 0
|
|
143
|
+
while time.time() < end_time:
|
|
144
|
+
endpoint = random.choice(endpoints)
|
|
145
|
+
params = self.generate_random_params()
|
|
146
|
+
|
|
147
|
+
success, elapsed, status = self.test_endpoint(endpoint, params)
|
|
148
|
+
|
|
149
|
+
requests_made += 1
|
|
150
|
+
|
|
151
|
+
# Print progress
|
|
152
|
+
if requests_made % 10 == 0:
|
|
153
|
+
print(f"Worker: {requests_made} requests, last: {status} in {elapsed*1000:.0f}ms")
|
|
154
|
+
|
|
155
|
+
# Small delay to avoid overwhelming
|
|
156
|
+
time.sleep(random.uniform(0.01, 0.1))
|
|
157
|
+
|
|
158
|
+
# Run workers
|
|
159
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
|
|
160
|
+
futures = [executor.submit(worker) for _ in range(num_workers)]
|
|
161
|
+
|
|
162
|
+
# Monitor progress
|
|
163
|
+
try:
|
|
164
|
+
while time.time() < end_time:
|
|
165
|
+
time.sleep(5)
|
|
166
|
+
elapsed = time.time() - start_time
|
|
167
|
+
rps = self.stats['total'] / elapsed if elapsed > 0 else 0
|
|
168
|
+
success_rate = (self.stats['success'] / self.stats['total'] * 100) if self.stats['total'] > 0 else 0
|
|
169
|
+
|
|
170
|
+
print(f"\n[{elapsed:.0f}s] Requests: {self.stats['total']}, "
|
|
171
|
+
f"RPS: {rps:.1f}, Success: {success_rate:.1f}%")
|
|
172
|
+
|
|
173
|
+
except KeyboardInterrupt:
|
|
174
|
+
print("\n\nStopping load test...")
|
|
175
|
+
|
|
176
|
+
# Wait for completion
|
|
177
|
+
concurrent.futures.wait(futures, timeout=10)
|
|
178
|
+
|
|
179
|
+
total_duration = time.time() - start_time
|
|
180
|
+
self._print_results(total_duration)
|
|
181
|
+
|
|
182
|
+
def _print_results(self, duration):
|
|
183
|
+
"""Print test results."""
|
|
184
|
+
if not self.stats['response_times']:
|
|
185
|
+
print("\n⚠ No requests completed!")
|
|
186
|
+
return
|
|
187
|
+
|
|
188
|
+
sorted_times = sorted(self.stats['response_times'])
|
|
189
|
+
p50 = sorted_times[len(sorted_times) // 2]
|
|
190
|
+
p95 = sorted_times[int(len(sorted_times) * 0.95)] if len(sorted_times) > 20 else sorted_times[-1]
|
|
191
|
+
p99 = sorted_times[int(len(sorted_times) * 0.99)] if len(sorted_times) > 100 else sorted_times[-1]
|
|
192
|
+
avg = sum(sorted_times) / len(sorted_times)
|
|
193
|
+
|
|
194
|
+
print("\n" + "=" * 70)
|
|
195
|
+
print("API LOAD TEST RESULTS")
|
|
196
|
+
print("=" * 70)
|
|
197
|
+
print(f"Duration: {duration:.2f}s")
|
|
198
|
+
print(f"Total Requests: {self.stats['total']:,}")
|
|
199
|
+
print(f"Successful: {self.stats['success']:,}")
|
|
200
|
+
print(f"Failed: {self.stats['failed']:,}")
|
|
201
|
+
print(f"Timeouts: {self.stats['timeouts']:,}")
|
|
202
|
+
print(f"Requests/sec: {self.stats['total'] / duration:.2f}")
|
|
203
|
+
print(f"Success Rate: {self.stats['success'] / self.stats['total'] * 100:.2f}%")
|
|
204
|
+
|
|
205
|
+
print("\nResponse Times:")
|
|
206
|
+
print(f" Average: {avg * 1000:.2f}ms")
|
|
207
|
+
print(f" Median (p50): {p50 * 1000:.2f}ms")
|
|
208
|
+
print(f" 95th percentile: {p95 * 1000:.2f}ms")
|
|
209
|
+
print(f" 99th percentile: {p99 * 1000:.2f}ms")
|
|
210
|
+
print(f" Min: {sorted_times[0] * 1000:.2f}ms")
|
|
211
|
+
print(f" Max: {sorted_times[-1] * 1000:.2f}ms")
|
|
212
|
+
|
|
213
|
+
if self.stats['errors']:
|
|
214
|
+
print("\nErrors:")
|
|
215
|
+
for error_type, count in sorted(self.stats['errors'].items(), key=lambda x: x[1], reverse=True):
|
|
216
|
+
print(f" {error_type:.<30} {count:>6}")
|
|
217
|
+
|
|
218
|
+
print("=" * 70)
|
|
219
|
+
|
|
220
|
+
# Recommendations
|
|
221
|
+
self._print_recommendations(avg, p95, p99)
|
|
222
|
+
|
|
223
|
+
def _print_recommendations(self, avg, p95, p99):
|
|
224
|
+
"""Print performance recommendations."""
|
|
225
|
+
print("\nPERFORMANCE ANALYSIS:")
|
|
226
|
+
|
|
227
|
+
if avg < 0.1:
|
|
228
|
+
print(" ✓ Excellent: Average response time < 100ms")
|
|
229
|
+
elif avg < 0.5:
|
|
230
|
+
print(" ✓ Good: Average response time < 500ms")
|
|
231
|
+
elif avg < 1.0:
|
|
232
|
+
print(" ⚠ Warning: Average response time approaching 1s")
|
|
233
|
+
else:
|
|
234
|
+
print(" ✗ Critical: Average response time > 1s - optimization needed!")
|
|
235
|
+
|
|
236
|
+
if p95 < 0.5:
|
|
237
|
+
print(" ✓ Excellent: 95th percentile < 500ms")
|
|
238
|
+
elif p95 < 1.0:
|
|
239
|
+
print(" ⚠ Warning: 95th percentile approaching 1s")
|
|
240
|
+
else:
|
|
241
|
+
print(" ✗ Critical: 95th percentile > 1s - some requests are very slow")
|
|
242
|
+
|
|
243
|
+
if p99 < 2.0:
|
|
244
|
+
print(" ✓ Good: 99th percentile < 2s")
|
|
245
|
+
else:
|
|
246
|
+
print(" ✗ Critical: 99th percentile > 2s - tail latency is high")
|
|
247
|
+
|
|
248
|
+
error_rate = self.stats['failed'] / self.stats['total'] * 100 if self.stats['total'] > 0 else 0
|
|
249
|
+
if error_rate == 0:
|
|
250
|
+
print(" ✓ Perfect: No errors!")
|
|
251
|
+
elif error_rate < 1:
|
|
252
|
+
print(f" ✓ Good: Error rate < 1% ({error_rate:.2f}%)")
|
|
253
|
+
elif error_rate < 5:
|
|
254
|
+
print(f" ⚠ Warning: Error rate {error_rate:.2f}%")
|
|
255
|
+
else:
|
|
256
|
+
print(f" ✗ Critical: Error rate {error_rate:.2f}% - system unstable!")
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def main():
|
|
260
|
+
parser = argparse.ArgumentParser(description='API Load Testing')
|
|
261
|
+
parser.add_argument('--url', default='http://localhost:8000',
|
|
262
|
+
help='Base URL (default: http://localhost:8000)')
|
|
263
|
+
parser.add_argument('--username', '-u', help='Username for authentication')
|
|
264
|
+
parser.add_argument('--password', '-p', help='Password for authentication')
|
|
265
|
+
parser.add_argument('--concurrent', '-c', type=int, default=10,
|
|
266
|
+
help='Number of concurrent workers (default: 10)')
|
|
267
|
+
parser.add_argument('--duration', '-d', type=int, default=30,
|
|
268
|
+
help='Test duration in seconds (default: 30)')
|
|
269
|
+
parser.add_argument('--preset', choices=['light', 'medium', 'heavy', 'extreme'],
|
|
270
|
+
help='Use preset configuration')
|
|
271
|
+
|
|
272
|
+
args = parser.parse_args()
|
|
273
|
+
|
|
274
|
+
# Handle presets
|
|
275
|
+
if args.preset == 'light':
|
|
276
|
+
args.concurrent = 5
|
|
277
|
+
args.duration = 10
|
|
278
|
+
elif args.preset == 'medium':
|
|
279
|
+
args.concurrent = 20
|
|
280
|
+
args.duration = 30
|
|
281
|
+
elif args.preset == 'heavy':
|
|
282
|
+
args.concurrent = 50
|
|
283
|
+
args.duration = 60
|
|
284
|
+
elif args.preset == 'extreme':
|
|
285
|
+
args.concurrent = 100
|
|
286
|
+
args.duration = 120
|
|
287
|
+
|
|
288
|
+
# Create tester
|
|
289
|
+
tester = APILoadTester(
|
|
290
|
+
base_url=args.url,
|
|
291
|
+
username=args.username,
|
|
292
|
+
password=args.password
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# Setup auth if credentials provided
|
|
296
|
+
if args.username and args.password:
|
|
297
|
+
tester.setup_auth()
|
|
298
|
+
|
|
299
|
+
# Run test
|
|
300
|
+
tester.run_concurrent_load(
|
|
301
|
+
num_workers=args.concurrent,
|
|
302
|
+
duration=args.duration
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
if __name__ == '__main__':
|
|
307
|
+
main()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Management commands package
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Management commands
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Management command to cleanup old request logs
|
|
3
|
+
"""
|
|
4
|
+
from django.core.management.base import BaseCommand
|
|
5
|
+
from django.utils import timezone
|
|
6
|
+
from datetime import timedelta
|
|
7
|
+
from trafficmonitor.models import RequestLog
|
|
8
|
+
from trafficmonitor.conf import TrafficMonitorConfig
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Command(BaseCommand):
|
|
12
|
+
help = 'Cleanup old request logs based on retention policy'
|
|
13
|
+
|
|
14
|
+
def add_arguments(self, parser):
|
|
15
|
+
parser.add_argument(
|
|
16
|
+
'--days',
|
|
17
|
+
type=int,
|
|
18
|
+
default=TrafficMonitorConfig.RETENTION_DAYS,
|
|
19
|
+
help=f'Number of days to retain (default: {TrafficMonitorConfig.RETENTION_DAYS})'
|
|
20
|
+
)
|
|
21
|
+
parser.add_argument(
|
|
22
|
+
'--batch-size',
|
|
23
|
+
type=int,
|
|
24
|
+
default=10000,
|
|
25
|
+
help='Batch size for deletion (default: 10000)'
|
|
26
|
+
)
|
|
27
|
+
parser.add_argument(
|
|
28
|
+
'--dry-run',
|
|
29
|
+
action='store_true',
|
|
30
|
+
help='Show what would be deleted without actually deleting'
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
def handle(self, *args, **options):
|
|
34
|
+
days = options['days']
|
|
35
|
+
batch_size = options['batch_size']
|
|
36
|
+
dry_run = options['dry_run']
|
|
37
|
+
|
|
38
|
+
cutoff_date = timezone.now() - timedelta(days=days)
|
|
39
|
+
|
|
40
|
+
# Count logs to be deleted
|
|
41
|
+
logs_to_delete = RequestLog.objects.filter(timestamp__lt=cutoff_date)
|
|
42
|
+
total_count = logs_to_delete.count()
|
|
43
|
+
|
|
44
|
+
if total_count == 0:
|
|
45
|
+
self.stdout.write(
|
|
46
|
+
self.style.SUCCESS(f'No logs older than {days} days found.')
|
|
47
|
+
)
|
|
48
|
+
return
|
|
49
|
+
|
|
50
|
+
if dry_run:
|
|
51
|
+
self.stdout.write(
|
|
52
|
+
self.style.WARNING(
|
|
53
|
+
f'DRY RUN: Would delete {total_count} logs older than {cutoff_date}'
|
|
54
|
+
)
|
|
55
|
+
)
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
# Delete in batches
|
|
59
|
+
deleted_total = 0
|
|
60
|
+
while True:
|
|
61
|
+
batch_ids = list(
|
|
62
|
+
logs_to_delete.values_list('id', flat=True)[:batch_size]
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
if not batch_ids:
|
|
66
|
+
break
|
|
67
|
+
|
|
68
|
+
deleted_count, _ = RequestLog.objects.filter(id__in=batch_ids).delete()
|
|
69
|
+
deleted_total += deleted_count
|
|
70
|
+
|
|
71
|
+
self.stdout.write(f'Deleted {deleted_count} logs (total: {deleted_total}/{total_count})')
|
|
72
|
+
|
|
73
|
+
self.stdout.write(
|
|
74
|
+
self.style.SUCCESS(
|
|
75
|
+
f'Successfully deleted {deleted_total} logs older than {days} days'
|
|
76
|
+
)
|
|
77
|
+
)
|