socrates-ai-api 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,222 @@
1
+ """
2
+ Monitoring and metrics collection for Socrates API.
3
+
4
+ Provides:
5
+ - Request/response metrics (latency, status codes)
6
+ - Database query performance tracking
7
+ - Error tracking and categorization
8
+ - User activity metrics
9
+ - Subscription usage tracking
10
+ """
11
+
12
+ import logging
13
+ import time
14
+ from typing import Dict, Optional
15
+ from functools import wraps
16
+
17
+ from fastapi import Request, Response
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class MetricsCollector:
23
+ """Collects API metrics for monitoring and observability."""
24
+
25
+ # In-memory metrics storage (for development/small scale)
26
+ # For production, integrate with Prometheus, DataDog, New Relic, etc.
27
+ _metrics = {
28
+ "requests": {}, # endpoint -> count
29
+ "latencies": {}, # endpoint -> list of latencies (ms)
30
+ "errors": {}, # error_type -> count
31
+ "db_queries": {}, # query_type -> list of latencies
32
+ "users": {}, # user_id -> activity_data
33
+ "subscriptions": {}, # tier -> usage_data
34
+ }
35
+
36
+ @classmethod
37
+ def record_request(
38
+ cls,
39
+ method: str,
40
+ path: str,
41
+ status_code: int,
42
+ latency_ms: float,
43
+ user_id: Optional[str] = None,
44
+ ) -> None:
45
+ """Record HTTP request metrics."""
46
+ endpoint = f"{method} {path}"
47
+
48
+ # Track request count
49
+ if endpoint not in cls._metrics["requests"]:
50
+ cls._metrics["requests"][endpoint] = 0
51
+ cls._metrics["requests"][endpoint] += 1
52
+
53
+ # Track latency
54
+ if endpoint not in cls._metrics["latencies"]:
55
+ cls._metrics["latencies"][endpoint] = []
56
+ cls._metrics["latencies"][endpoint].append(latency_ms)
57
+
58
+ # Track user activity
59
+ if user_id:
60
+ if user_id not in cls._metrics["users"]:
61
+ cls._metrics["users"][user_id] = {"requests": 0, "last_active": None}
62
+ cls._metrics["users"][user_id]["requests"] += 1
63
+ cls._metrics["users"][user_id]["last_active"] = time.time()
64
+
65
+ # Log slow requests
66
+ if latency_ms > 1000: # > 1 second
67
+ logger.warning(
68
+ f"Slow request: {method} {path} took {latency_ms:.2f}ms (status: {status_code})"
69
+ )
70
+
71
+ @classmethod
72
+ def record_error(cls, error_type: str, error_message: str) -> None:
73
+ """Record error metrics."""
74
+ if error_type not in cls._metrics["errors"]:
75
+ cls._metrics["errors"][error_type] = 0
76
+ cls._metrics["errors"][error_type] += 1
77
+
78
+ logger.error(f"Error recorded: {error_type} - {error_message}")
79
+
80
+ @classmethod
81
+ def record_db_query(cls, query_type: str, latency_ms: float) -> None:
82
+ """Record database query metrics."""
83
+ if query_type not in cls._metrics["db_queries"]:
84
+ cls._metrics["db_queries"][query_type] = []
85
+ cls._metrics["db_queries"][query_type].append(latency_ms)
86
+
87
+ # Log slow queries
88
+ if latency_ms > 500: # > 500ms
89
+ logger.warning(f"Slow database query: {query_type} took {latency_ms:.2f}ms")
90
+
91
+ @classmethod
92
+ def record_subscription_usage(
93
+ cls, tier: str, feature: str, usage_amount: int = 1
94
+ ) -> None:
95
+ """Record subscription usage metrics."""
96
+ if tier not in cls._metrics["subscriptions"]:
97
+ cls._metrics["subscriptions"][tier] = {}
98
+ if feature not in cls._metrics["subscriptions"][tier]:
99
+ cls._metrics["subscriptions"][tier][feature] = 0
100
+ cls._metrics["subscriptions"][tier][feature] += usage_amount
101
+
102
+ @classmethod
103
+ def get_metrics(cls) -> Dict:
104
+ """Get all collected metrics."""
105
+ return {
106
+ "requests": cls._metrics["requests"],
107
+ "latencies": {
108
+ k: {
109
+ "count": len(v),
110
+ "avg_ms": sum(v) / len(v) if v else 0,
111
+ "min_ms": min(v) if v else 0,
112
+ "max_ms": max(v) if v else 0,
113
+ }
114
+ for k, v in cls._metrics["latencies"].items()
115
+ },
116
+ "errors": cls._metrics["errors"],
117
+ "db_queries": {
118
+ k: {
119
+ "count": len(v),
120
+ "avg_ms": sum(v) / len(v) if v else 0,
121
+ "min_ms": min(v) if v else 0,
122
+ "max_ms": max(v) if v else 0,
123
+ }
124
+ for k, v in cls._metrics["db_queries"].items()
125
+ },
126
+ "active_users": len(cls._metrics["users"]),
127
+ "subscription_usage": cls._metrics["subscriptions"],
128
+ }
129
+
130
+ @classmethod
131
+ def reset_metrics(cls) -> None:
132
+ """Reset all metrics (for testing)."""
133
+ cls._metrics = {
134
+ "requests": {},
135
+ "latencies": {},
136
+ "errors": {},
137
+ "db_queries": {},
138
+ "users": {},
139
+ "subscriptions": {},
140
+ }
141
+
142
+
143
+ async def metrics_middleware(request: Request, call_next) -> Response:
144
+ """FastAPI middleware for collecting request metrics."""
145
+ start_time = time.time()
146
+
147
+ try:
148
+ response = await call_next(request)
149
+ latency_ms = (time.time() - start_time) * 1000
150
+
151
+ # Get user ID if available
152
+ user_id = None
153
+ if hasattr(request.state, "user_id"):
154
+ user_id = request.state.user_id
155
+
156
+ # Record metrics
157
+ MetricsCollector.record_request(
158
+ method=request.method,
159
+ path=request.url.path,
160
+ status_code=response.status_code,
161
+ latency_ms=latency_ms,
162
+ user_id=user_id,
163
+ )
164
+
165
+ return response
166
+
167
+ except Exception as e:
168
+ latency_ms = (time.time() - start_time) * 1000
169
+ MetricsCollector.record_error(type(e).__name__, str(e))
170
+ raise
171
+
172
+
173
+ def track_db_query(query_type: str):
174
+ """Decorator to track database query performance."""
175
+
176
+ def decorator(func):
177
+ @wraps(func)
178
+ async def wrapper(*args, **kwargs):
179
+ start_time = time.time()
180
+ try:
181
+ result = await func(*args, **kwargs)
182
+ latency_ms = (time.time() - start_time) * 1000
183
+ MetricsCollector.record_db_query(query_type, latency_ms)
184
+ return result
185
+ except Exception as e:
186
+ latency_ms = (time.time() - start_time) * 1000
187
+ MetricsCollector.record_db_query(f"{query_type}_error", latency_ms)
188
+ raise
189
+
190
+ return wrapper
191
+
192
+ return decorator
193
+
194
+
195
+ # Health check metrics
196
+ class HealthMetrics:
197
+ """Health check metrics for monitoring."""
198
+
199
+ @staticmethod
200
+ def get_health_status() -> Dict:
201
+ """Get current API health status."""
202
+ metrics = MetricsCollector.get_metrics()
203
+
204
+ # Calculate health indicators
205
+ error_count = sum(metrics["errors"].values())
206
+ total_requests = sum(metrics["requests"].values())
207
+ error_rate = (error_count / total_requests * 100) if total_requests > 0 else 0
208
+
209
+ # Average latencies
210
+ latencies = metrics["latencies"]
211
+ avg_latencies = {k: v["avg_ms"] for k, v in latencies.items()}
212
+
213
+ return {
214
+ "status": "healthy" if error_rate < 5 else "degraded" if error_rate < 10 else "unhealthy",
215
+ "error_rate": error_rate,
216
+ "total_requests": total_requests,
217
+ "total_errors": error_count,
218
+ "active_users": metrics["active_users"],
219
+ "avg_latency_ms": sum(avg_latencies.values()) / len(avg_latencies)
220
+ if avg_latencies
221
+ else 0,
222
+ }
@@ -0,0 +1,77 @@
1
+ """
2
+ Testing Mode Utility
3
+
4
+ Handles testing mode detection and application for subscription testing.
5
+ When testing mode is enabled via the /subscription testing-mode on command,
6
+ subscription limits are bypassed for development and testing purposes.
7
+ """
8
+
9
+ import logging
10
+ from typing import Optional, Tuple
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class TestingModeChecker:
16
+ """Check and apply testing mode rules"""
17
+
18
+ TESTING_MODE_HEADER = "x-testing-mode"
19
+
20
+ @staticmethod
21
+ def is_testing_mode_enabled(headers: dict) -> bool:
22
+ """
23
+ Check if testing mode is enabled via request headers.
24
+
25
+ Args:
26
+ headers: Request headers dict
27
+
28
+ Returns:
29
+ True if testing mode is enabled, False otherwise
30
+ """
31
+ if not headers:
32
+ return False
33
+
34
+ testing_mode = headers.get(TestingModeChecker.TESTING_MODE_HEADER, "").lower()
35
+ return testing_mode == "enabled"
36
+
37
+ @staticmethod
38
+ def bypass_subscription_check(headers: dict) -> bool:
39
+ """
40
+ Check if subscription check should be bypassed due to testing mode.
41
+
42
+ Args:
43
+ headers: Request headers dict
44
+
45
+ Returns:
46
+ True if subscription check should be skipped, False otherwise
47
+ """
48
+ return TestingModeChecker.is_testing_mode_enabled(headers)
49
+
50
+ @staticmethod
51
+ def get_testing_tier(headers: dict) -> str:
52
+ """
53
+ Get subscription tier for testing mode.
54
+
55
+ Args:
56
+ headers: Request headers dict
57
+
58
+ Returns:
59
+ "pro" if testing mode enabled, otherwise normal tier determination
60
+ """
61
+ if TestingModeChecker.is_testing_mode_enabled(headers):
62
+ logger.info("Testing mode detected - using pro subscription tier")
63
+ return "pro"
64
+ return "free"
65
+
66
+
67
+ def get_testing_mode_from_request(request) -> bool:
68
+ """
69
+ Extract testing mode status from FastAPI request.
70
+
71
+ Args:
72
+ request: FastAPI Request object
73
+
74
+ Returns:
75
+ True if testing mode is enabled, False otherwise
76
+ """
77
+ return TestingModeChecker.is_testing_mode_enabled(request.headers)