mcli-framework 7.10.0__py3-none-any.whl → 7.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/lib/custom_commands.py +10 -0
- mcli/lib/optional_deps.py +240 -0
- mcli/ml/backtesting/run.py +5 -3
- mcli/ml/models/ensemble_models.py +1 -0
- mcli/ml/models/recommendation_models.py +1 -0
- mcli/ml/optimization/optimize.py +6 -4
- mcli/ml/serving/serve.py +2 -2
- mcli/ml/training/train.py +14 -7
- mcli/self/completion_cmd.py +2 -2
- mcli/workflow/doc_convert.py +82 -112
- mcli/workflow/git_commit/ai_service.py +13 -2
- mcli/workflow/notebook/converter.py +375 -0
- mcli/workflow/notebook/notebook_cmd.py +441 -0
- mcli/workflow/notebook/schema.py +402 -0
- mcli/workflow/notebook/validator.py +313 -0
- mcli/workflow/workflow.py +14 -0
- {mcli_framework-7.10.0.dist-info → mcli_framework-7.10.2.dist-info}/METADATA +37 -3
- {mcli_framework-7.10.0.dist-info → mcli_framework-7.10.2.dist-info}/RECORD +22 -37
- mcli/ml/features/political_features.py +0 -677
- mcli/ml/preprocessing/politician_trading_preprocessor.py +0 -570
- mcli/workflow/politician_trading/config.py +0 -134
- mcli/workflow/politician_trading/connectivity.py +0 -492
- mcli/workflow/politician_trading/data_sources.py +0 -654
- mcli/workflow/politician_trading/database.py +0 -412
- mcli/workflow/politician_trading/demo.py +0 -249
- mcli/workflow/politician_trading/models.py +0 -327
- mcli/workflow/politician_trading/monitoring.py +0 -413
- mcli/workflow/politician_trading/scrapers.py +0 -1074
- mcli/workflow/politician_trading/scrapers_california.py +0 -434
- mcli/workflow/politician_trading/scrapers_corporate_registry.py +0 -797
- mcli/workflow/politician_trading/scrapers_eu.py +0 -376
- mcli/workflow/politician_trading/scrapers_free_sources.py +0 -509
- mcli/workflow/politician_trading/scrapers_third_party.py +0 -373
- mcli/workflow/politician_trading/scrapers_uk.py +0 -378
- mcli/workflow/politician_trading/scrapers_us_states.py +0 -471
- mcli/workflow/politician_trading/seed_database.py +0 -520
- mcli/workflow/politician_trading/supabase_functions.py +0 -354
- mcli/workflow/politician_trading/workflow.py +0 -879
- {mcli_framework-7.10.0.dist-info → mcli_framework-7.10.2.dist-info}/WHEEL +0 -0
- {mcli_framework-7.10.0.dist-info → mcli_framework-7.10.2.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.10.0.dist-info → mcli_framework-7.10.2.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.10.0.dist-info → mcli_framework-7.10.2.dist-info}/top_level.txt +0 -0
|
@@ -1,492 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Continuous Supabase connectivity validation and monitoring
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import asyncio
|
|
6
|
-
import json
|
|
7
|
-
import logging
|
|
8
|
-
import time
|
|
9
|
-
from datetime import datetime, timedelta
|
|
10
|
-
from typing import Any, Dict, Optional
|
|
11
|
-
|
|
12
|
-
from rich.console import Console
|
|
13
|
-
from rich.live import Live
|
|
14
|
-
from rich.panel import Panel
|
|
15
|
-
from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
|
|
16
|
-
from rich.table import Table
|
|
17
|
-
|
|
18
|
-
from .config import WorkflowConfig
|
|
19
|
-
from .database import PoliticianTradingDB
|
|
20
|
-
|
|
21
|
-
logger = logging.getLogger(__name__)
|
|
22
|
-
console = Console()
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class SupabaseConnectivityValidator:
|
|
26
|
-
"""Validates and monitors Supabase connectivity in real-time"""
|
|
27
|
-
|
|
28
|
-
def __init__(self, config: WorkflowConfig = None):
|
|
29
|
-
self.config = config or WorkflowConfig.default()
|
|
30
|
-
self.db = PoliticianTradingDB(self.config)
|
|
31
|
-
self.last_test_results = {}
|
|
32
|
-
|
|
33
|
-
async def validate_connectivity(self) -> Dict[str, Any]:
|
|
34
|
-
"""Comprehensive connectivity validation"""
|
|
35
|
-
validation_start = time.time()
|
|
36
|
-
results = {
|
|
37
|
-
"timestamp": datetime.utcnow().isoformat(),
|
|
38
|
-
"overall_status": "unknown",
|
|
39
|
-
"tests": {},
|
|
40
|
-
"duration_ms": 0,
|
|
41
|
-
"supabase_url": self.config.supabase.url,
|
|
42
|
-
"connectivity_score": 0,
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
tests = [
|
|
46
|
-
("basic_connection", self._test_basic_connection),
|
|
47
|
-
("read_operations", self._test_read_operations),
|
|
48
|
-
("write_operations", self._test_write_operations),
|
|
49
|
-
("table_access", self._test_table_access),
|
|
50
|
-
("job_tracking", self._test_job_tracking),
|
|
51
|
-
("real_time_sync", self._test_real_time_sync),
|
|
52
|
-
]
|
|
53
|
-
|
|
54
|
-
passed_tests = 0
|
|
55
|
-
|
|
56
|
-
for test_name, test_func in tests:
|
|
57
|
-
try:
|
|
58
|
-
test_start = time.time()
|
|
59
|
-
test_result = await test_func()
|
|
60
|
-
test_duration = (time.time() - test_start) * 1000
|
|
61
|
-
|
|
62
|
-
results["tests"][test_name] = {
|
|
63
|
-
"status": "passed" if test_result["success"] else "failed",
|
|
64
|
-
"duration_ms": round(test_duration, 2),
|
|
65
|
-
"details": test_result,
|
|
66
|
-
"timestamp": datetime.utcnow().isoformat(),
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
if test_result["success"]:
|
|
70
|
-
passed_tests += 1
|
|
71
|
-
|
|
72
|
-
except Exception as e:
|
|
73
|
-
results["tests"][test_name] = {
|
|
74
|
-
"status": "error",
|
|
75
|
-
"duration_ms": 0,
|
|
76
|
-
"error": str(e),
|
|
77
|
-
"timestamp": datetime.utcnow().isoformat(),
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
# Calculate overall status
|
|
81
|
-
connectivity_score = (passed_tests / len(tests)) * 100
|
|
82
|
-
results["connectivity_score"] = round(connectivity_score, 1)
|
|
83
|
-
|
|
84
|
-
if connectivity_score >= 90:
|
|
85
|
-
results["overall_status"] = "excellent"
|
|
86
|
-
elif connectivity_score >= 75:
|
|
87
|
-
results["overall_status"] = "good"
|
|
88
|
-
elif connectivity_score >= 50:
|
|
89
|
-
results["overall_status"] = "degraded"
|
|
90
|
-
else:
|
|
91
|
-
results["overall_status"] = "critical"
|
|
92
|
-
|
|
93
|
-
results["duration_ms"] = round((time.time() - validation_start) * 1000, 2)
|
|
94
|
-
self.last_test_results = results
|
|
95
|
-
|
|
96
|
-
return results
|
|
97
|
-
|
|
98
|
-
async def _test_basic_connection(self) -> Dict[str, Any]:
|
|
99
|
-
"""Test basic database connection"""
|
|
100
|
-
try:
|
|
101
|
-
# Test basic REST API connectivity instead of RPC
|
|
102
|
-
import httpx
|
|
103
|
-
|
|
104
|
-
async with httpx.AsyncClient() as client:
|
|
105
|
-
response = await client.get(
|
|
106
|
-
self.config.supabase.url + "/rest/v1/",
|
|
107
|
-
headers={"apikey": self.config.supabase.key},
|
|
108
|
-
timeout=30.0,
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
if response.status_code == 200:
|
|
112
|
-
return {
|
|
113
|
-
"success": True,
|
|
114
|
-
"message": "Basic connection successful",
|
|
115
|
-
"status_code": response.status_code,
|
|
116
|
-
}
|
|
117
|
-
else:
|
|
118
|
-
return {
|
|
119
|
-
"success": False,
|
|
120
|
-
"error": f"HTTP {response.status_code}: {response.text[:100]}",
|
|
121
|
-
}
|
|
122
|
-
except Exception as e:
|
|
123
|
-
return {"success": False, "error": str(e)}
|
|
124
|
-
|
|
125
|
-
async def _test_read_operations(self) -> Dict[str, Any]:
|
|
126
|
-
"""Test read operations"""
|
|
127
|
-
try:
|
|
128
|
-
# Try reading from multiple tables
|
|
129
|
-
tables_to_test = [
|
|
130
|
-
"politicians",
|
|
131
|
-
"trading_disclosures",
|
|
132
|
-
"data_pull_jobs",
|
|
133
|
-
"data_sources",
|
|
134
|
-
]
|
|
135
|
-
read_results = {}
|
|
136
|
-
schema_missing = False
|
|
137
|
-
|
|
138
|
-
for table in tables_to_test:
|
|
139
|
-
try:
|
|
140
|
-
result = self.db.client.table(table).select("*").limit(1).execute()
|
|
141
|
-
read_results[table] = "accessible"
|
|
142
|
-
except Exception as e:
|
|
143
|
-
error_msg = str(e)
|
|
144
|
-
if "Could not find" in error_msg and "schema cache" in error_msg:
|
|
145
|
-
read_results[table] = "table_missing"
|
|
146
|
-
schema_missing = True
|
|
147
|
-
else:
|
|
148
|
-
read_results[table] = f"error: {error_msg[:50]}..."
|
|
149
|
-
|
|
150
|
-
accessible_count = sum(1 for status in read_results.values() if status == "accessible")
|
|
151
|
-
missing_count = sum(1 for status in read_results.values() if status == "table_missing")
|
|
152
|
-
|
|
153
|
-
if schema_missing and accessible_count == 0:
|
|
154
|
-
return {
|
|
155
|
-
"success": False,
|
|
156
|
-
"tables_tested": read_results,
|
|
157
|
-
"accessible_tables": accessible_count,
|
|
158
|
-
"missing_tables": missing_count,
|
|
159
|
-
"message": "Database schema not set up. Run 'mcli workflow politician-trading setup --generate-schema' to get setup instructions.",
|
|
160
|
-
}
|
|
161
|
-
else:
|
|
162
|
-
success = accessible_count > 0
|
|
163
|
-
return {
|
|
164
|
-
"success": success,
|
|
165
|
-
"tables_tested": read_results,
|
|
166
|
-
"accessible_tables": accessible_count,
|
|
167
|
-
"missing_tables": missing_count,
|
|
168
|
-
}
|
|
169
|
-
except Exception as e:
|
|
170
|
-
return {"success": False, "error": str(e)}
|
|
171
|
-
|
|
172
|
-
async def _test_write_operations(self) -> Dict[str, Any]:
|
|
173
|
-
"""Test write operations with a connectivity test record"""
|
|
174
|
-
try:
|
|
175
|
-
test_job_id = f"connectivity_test_{int(time.time())}"
|
|
176
|
-
|
|
177
|
-
# Create a test job record
|
|
178
|
-
try:
|
|
179
|
-
insert_result = (
|
|
180
|
-
self.db.client.table("data_pull_jobs")
|
|
181
|
-
.insert(
|
|
182
|
-
{
|
|
183
|
-
"job_type": "connectivity_test",
|
|
184
|
-
"status": "running",
|
|
185
|
-
"started_at": datetime.utcnow().isoformat(),
|
|
186
|
-
"config_snapshot": {
|
|
187
|
-
"test": True,
|
|
188
|
-
"validator": "SupabaseConnectivityValidator",
|
|
189
|
-
},
|
|
190
|
-
}
|
|
191
|
-
)
|
|
192
|
-
.execute()
|
|
193
|
-
)
|
|
194
|
-
except Exception as e:
|
|
195
|
-
if "Could not find" in str(e) and "schema cache" in str(e):
|
|
196
|
-
return {
|
|
197
|
-
"success": False,
|
|
198
|
-
"error": "Table 'data_pull_jobs' not found",
|
|
199
|
-
"message": "Database schema not set up. Run schema setup first.",
|
|
200
|
-
}
|
|
201
|
-
else:
|
|
202
|
-
raise e
|
|
203
|
-
|
|
204
|
-
# Get the inserted record ID
|
|
205
|
-
if insert_result.data and len(insert_result.data) > 0:
|
|
206
|
-
inserted_id = insert_result.data[0]["id"]
|
|
207
|
-
|
|
208
|
-
# Update the record
|
|
209
|
-
update_result = (
|
|
210
|
-
self.db.client.table("data_pull_jobs")
|
|
211
|
-
.update(
|
|
212
|
-
{
|
|
213
|
-
"status": "completed",
|
|
214
|
-
"completed_at": datetime.utcnow().isoformat(),
|
|
215
|
-
"records_processed": 1,
|
|
216
|
-
}
|
|
217
|
-
)
|
|
218
|
-
.eq("id", inserted_id)
|
|
219
|
-
.execute()
|
|
220
|
-
)
|
|
221
|
-
|
|
222
|
-
# Read it back
|
|
223
|
-
read_result = (
|
|
224
|
-
self.db.client.table("data_pull_jobs")
|
|
225
|
-
.select("*")
|
|
226
|
-
.eq("id", inserted_id)
|
|
227
|
-
.execute()
|
|
228
|
-
)
|
|
229
|
-
else:
|
|
230
|
-
return {"success": False, "error": "Failed to get inserted record ID"}
|
|
231
|
-
|
|
232
|
-
# Clean up test record
|
|
233
|
-
self.db.client.table("data_pull_jobs").delete().eq("id", inserted_id).execute()
|
|
234
|
-
|
|
235
|
-
return {
|
|
236
|
-
"success": True,
|
|
237
|
-
"message": "Write operations successful",
|
|
238
|
-
"operations_tested": ["insert", "update", "select", "delete"],
|
|
239
|
-
"test_record_id": test_job_id,
|
|
240
|
-
"record_retrieved": len(read_result.data) > 0 if read_result.data else False,
|
|
241
|
-
}
|
|
242
|
-
except Exception as e:
|
|
243
|
-
return {"success": False, "error": str(e)}
|
|
244
|
-
|
|
245
|
-
async def _test_table_access(self) -> Dict[str, Any]:
|
|
246
|
-
"""Test access to all required tables"""
|
|
247
|
-
try:
|
|
248
|
-
required_tables = {
|
|
249
|
-
"politicians": ["id", "full_name", "role"],
|
|
250
|
-
"trading_disclosures": ["id", "politician_id", "transaction_date"],
|
|
251
|
-
"data_pull_jobs": ["id", "job_type", "status"],
|
|
252
|
-
"data_sources": ["id", "name", "url"],
|
|
253
|
-
}
|
|
254
|
-
|
|
255
|
-
table_access = {}
|
|
256
|
-
|
|
257
|
-
for table_name, required_columns in required_tables.items():
|
|
258
|
-
try:
|
|
259
|
-
# Test table structure
|
|
260
|
-
result = (
|
|
261
|
-
self.db.client.table(table_name)
|
|
262
|
-
.select(",".join(required_columns))
|
|
263
|
-
.limit(1)
|
|
264
|
-
.execute()
|
|
265
|
-
)
|
|
266
|
-
table_access[table_name] = {
|
|
267
|
-
"accessible": True,
|
|
268
|
-
"columns_verified": required_columns,
|
|
269
|
-
"record_count_sample": len(result.data) if result.data else 0,
|
|
270
|
-
}
|
|
271
|
-
except Exception as e:
|
|
272
|
-
table_access[table_name] = {"accessible": False, "error": str(e)}
|
|
273
|
-
|
|
274
|
-
accessible_count = sum(
|
|
275
|
-
1 for info in table_access.values() if info.get("accessible", False)
|
|
276
|
-
)
|
|
277
|
-
|
|
278
|
-
return {
|
|
279
|
-
"success": accessible_count == len(required_tables),
|
|
280
|
-
"tables_tested": len(required_tables),
|
|
281
|
-
"tables_accessible": accessible_count,
|
|
282
|
-
"table_details": table_access,
|
|
283
|
-
}
|
|
284
|
-
except Exception as e:
|
|
285
|
-
return {"success": False, "error": str(e)}
|
|
286
|
-
|
|
287
|
-
async def _test_job_tracking(self) -> Dict[str, Any]:
|
|
288
|
-
"""Test job tracking functionality"""
|
|
289
|
-
try:
|
|
290
|
-
# Get recent jobs
|
|
291
|
-
recent_jobs = (
|
|
292
|
-
self.db.client.table("data_pull_jobs")
|
|
293
|
-
.select("*")
|
|
294
|
-
.order("created_at", desc=True)
|
|
295
|
-
.limit(5)
|
|
296
|
-
.execute()
|
|
297
|
-
)
|
|
298
|
-
|
|
299
|
-
# Get job statistics
|
|
300
|
-
job_stats = self.db.client.table("data_pull_jobs").select("status").execute()
|
|
301
|
-
|
|
302
|
-
status_counts = {}
|
|
303
|
-
if job_stats.data:
|
|
304
|
-
for job in job_stats.data:
|
|
305
|
-
status = job.get("status", "unknown")
|
|
306
|
-
status_counts[status] = status_counts.get(status, 0) + 1
|
|
307
|
-
|
|
308
|
-
return {
|
|
309
|
-
"success": True,
|
|
310
|
-
"recent_jobs_count": len(recent_jobs.data) if recent_jobs.data else 0,
|
|
311
|
-
"total_jobs": len(job_stats.data) if job_stats.data else 0,
|
|
312
|
-
"status_distribution": status_counts,
|
|
313
|
-
"job_tracking_functional": True,
|
|
314
|
-
}
|
|
315
|
-
except Exception as e:
|
|
316
|
-
return {"success": False, "error": str(e)}
|
|
317
|
-
|
|
318
|
-
async def _test_real_time_sync(self) -> Dict[str, Any]:
|
|
319
|
-
"""Test real-time synchronization capabilities"""
|
|
320
|
-
try:
|
|
321
|
-
# Create a timestamped record and verify immediate retrieval
|
|
322
|
-
timestamp = datetime.utcnow().isoformat()
|
|
323
|
-
test_source_id = f"rt_test_{int(time.time())}"
|
|
324
|
-
|
|
325
|
-
# Insert
|
|
326
|
-
insert_result = (
|
|
327
|
-
self.db.client.table("data_sources")
|
|
328
|
-
.insert(
|
|
329
|
-
{
|
|
330
|
-
"name": "Real-time Test Source",
|
|
331
|
-
"url": "https://test.example.com",
|
|
332
|
-
"source_type": "test",
|
|
333
|
-
"region": "test",
|
|
334
|
-
"is_active": True,
|
|
335
|
-
"created_at": timestamp,
|
|
336
|
-
}
|
|
337
|
-
)
|
|
338
|
-
.execute()
|
|
339
|
-
)
|
|
340
|
-
|
|
341
|
-
if insert_result.data and len(insert_result.data) > 0:
|
|
342
|
-
inserted_id = insert_result.data[0]["id"]
|
|
343
|
-
|
|
344
|
-
# Immediate read-back
|
|
345
|
-
result = (
|
|
346
|
-
self.db.client.table("data_sources").select("*").eq("id", inserted_id).execute()
|
|
347
|
-
)
|
|
348
|
-
|
|
349
|
-
# Clean up
|
|
350
|
-
self.db.client.table("data_sources").delete().eq("id", inserted_id).execute()
|
|
351
|
-
else:
|
|
352
|
-
return {"success": False, "error": "Failed to insert test record"}
|
|
353
|
-
|
|
354
|
-
sync_successful = len(result.data) > 0 if result.data else False
|
|
355
|
-
|
|
356
|
-
return {
|
|
357
|
-
"success": sync_successful,
|
|
358
|
-
"message": "Real-time sync test completed",
|
|
359
|
-
"record_immediately_available": sync_successful,
|
|
360
|
-
"test_timestamp": timestamp,
|
|
361
|
-
}
|
|
362
|
-
except Exception as e:
|
|
363
|
-
return {"success": False, "error": str(e)}
|
|
364
|
-
|
|
365
|
-
def display_connectivity_report(self, results: Dict[str, Any]):
|
|
366
|
-
"""Display a formatted connectivity report"""
|
|
367
|
-
console.print(
|
|
368
|
-
f"\n🔗 Supabase Connectivity Report - {results['timestamp']}", style="bold cyan"
|
|
369
|
-
)
|
|
370
|
-
|
|
371
|
-
# Overall status
|
|
372
|
-
status_colors = {
|
|
373
|
-
"excellent": "bright_green",
|
|
374
|
-
"good": "green",
|
|
375
|
-
"degraded": "yellow",
|
|
376
|
-
"critical": "red",
|
|
377
|
-
}
|
|
378
|
-
|
|
379
|
-
status_color = status_colors.get(results["overall_status"], "white")
|
|
380
|
-
|
|
381
|
-
overall_panel = Panel(
|
|
382
|
-
f"Status: [{status_color}]{results['overall_status'].upper()}[/{status_color}]\n"
|
|
383
|
-
f"Connectivity Score: {results['connectivity_score']}%\n"
|
|
384
|
-
f"Test Duration: {results['duration_ms']}ms\n"
|
|
385
|
-
f"Supabase URL: {results['supabase_url']}",
|
|
386
|
-
title="🎯 Overall Connectivity",
|
|
387
|
-
border_style=status_color,
|
|
388
|
-
)
|
|
389
|
-
console.print(overall_panel)
|
|
390
|
-
|
|
391
|
-
# Test results table
|
|
392
|
-
test_table = Table(title="Test Results")
|
|
393
|
-
test_table.add_column("Test", style="cyan")
|
|
394
|
-
test_table.add_column("Status", style="bold")
|
|
395
|
-
test_table.add_column("Duration", justify="right")
|
|
396
|
-
test_table.add_column("Details")
|
|
397
|
-
|
|
398
|
-
for test_name, test_result in results["tests"].items():
|
|
399
|
-
status = test_result["status"]
|
|
400
|
-
status_style = {"passed": "green", "failed": "red", "error": "red"}.get(status, "white")
|
|
401
|
-
|
|
402
|
-
details = ""
|
|
403
|
-
if "details" in test_result:
|
|
404
|
-
if "message" in test_result["details"]:
|
|
405
|
-
details = test_result["details"]["message"]
|
|
406
|
-
elif "operations_tested" in test_result["details"]:
|
|
407
|
-
details = f"Ops: {', '.join(test_result['details']['operations_tested'])}"
|
|
408
|
-
elif "tables_accessible" in test_result["details"]:
|
|
409
|
-
details = f"{test_result['details']['tables_accessible']}/{test_result['details']['tables_tested']} tables"
|
|
410
|
-
|
|
411
|
-
if "error" in test_result:
|
|
412
|
-
details = (
|
|
413
|
-
test_result["error"][:50] + "..."
|
|
414
|
-
if len(test_result["error"]) > 50
|
|
415
|
-
else test_result["error"]
|
|
416
|
-
)
|
|
417
|
-
|
|
418
|
-
test_table.add_row(
|
|
419
|
-
test_name.replace("_", " ").title(),
|
|
420
|
-
f"[{status_style}]{status.upper()}[/{status_style}]",
|
|
421
|
-
f"{test_result['duration_ms']:.1f}ms",
|
|
422
|
-
details,
|
|
423
|
-
)
|
|
424
|
-
|
|
425
|
-
console.print(test_table)
|
|
426
|
-
|
|
427
|
-
async def continuous_monitoring(self, interval_seconds: int = 30, duration_minutes: int = 0):
|
|
428
|
-
"""Run continuous connectivity monitoring"""
|
|
429
|
-
console.print(
|
|
430
|
-
f"🔄 Starting continuous Supabase connectivity monitoring (interval: {interval_seconds}s)",
|
|
431
|
-
style="bold blue",
|
|
432
|
-
)
|
|
433
|
-
|
|
434
|
-
start_time = time.time()
|
|
435
|
-
check_count = 0
|
|
436
|
-
|
|
437
|
-
try:
|
|
438
|
-
while True:
|
|
439
|
-
check_count += 1
|
|
440
|
-
console.print(
|
|
441
|
-
f"\n📊 Check #{check_count} - {datetime.now().strftime('%H:%M:%S')}",
|
|
442
|
-
style="dim",
|
|
443
|
-
)
|
|
444
|
-
|
|
445
|
-
# Run validation
|
|
446
|
-
results = await self.validate_connectivity()
|
|
447
|
-
self.display_connectivity_report(results)
|
|
448
|
-
|
|
449
|
-
# Check duration limit
|
|
450
|
-
if duration_minutes > 0:
|
|
451
|
-
elapsed_minutes = (time.time() - start_time) / 60
|
|
452
|
-
if elapsed_minutes >= duration_minutes:
|
|
453
|
-
console.print(
|
|
454
|
-
f"\n⏰ Monitoring completed after {duration_minutes} minutes",
|
|
455
|
-
style="green",
|
|
456
|
-
)
|
|
457
|
-
break
|
|
458
|
-
|
|
459
|
-
# Wait for next check
|
|
460
|
-
console.print(
|
|
461
|
-
f"\n⏱️ Next check in {interval_seconds} seconds... (Ctrl+C to stop)", style="dim"
|
|
462
|
-
)
|
|
463
|
-
await asyncio.sleep(interval_seconds)
|
|
464
|
-
|
|
465
|
-
except KeyboardInterrupt:
|
|
466
|
-
console.print("\n👋 Monitoring stopped by user", style="yellow")
|
|
467
|
-
except Exception as e:
|
|
468
|
-
console.print(f"\n❌ Monitoring error: {e}", style="red")
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
async def run_connectivity_validation() -> Dict[str, Any]:
|
|
472
|
-
"""Standalone function to run connectivity validation"""
|
|
473
|
-
validator = SupabaseConnectivityValidator()
|
|
474
|
-
return await validator.validate_connectivity()
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
async def run_continuous_monitoring(interval: int = 30, duration: int = 0):
|
|
478
|
-
"""Standalone function for continuous monitoring"""
|
|
479
|
-
validator = SupabaseConnectivityValidator()
|
|
480
|
-
await validator.continuous_monitoring(interval, duration)
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
if __name__ == "__main__":
|
|
484
|
-
# Allow running this file directly for testing
|
|
485
|
-
async def main():
|
|
486
|
-
validator = SupabaseConnectivityValidator()
|
|
487
|
-
|
|
488
|
-
console.print("🧪 Running Supabase connectivity validation...", style="bold blue")
|
|
489
|
-
results = await validator.validate_connectivity()
|
|
490
|
-
validator.display_connectivity_report(results)
|
|
491
|
-
|
|
492
|
-
asyncio.run(main())
|