mcli-framework 7.10.1__py3-none-any.whl → 7.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/lib/custom_commands.py +10 -0
- mcli/lib/optional_deps.py +240 -0
- mcli/workflow/git_commit/ai_service.py +13 -2
- mcli/workflow/notebook/converter.py +375 -0
- mcli/workflow/notebook/notebook_cmd.py +441 -0
- mcli/workflow/notebook/schema.py +402 -0
- mcli/workflow/notebook/validator.py +313 -0
- mcli/workflow/workflow.py +14 -0
- {mcli_framework-7.10.1.dist-info → mcli_framework-7.10.2.dist-info}/METADATA +36 -2
- {mcli_framework-7.10.1.dist-info → mcli_framework-7.10.2.dist-info}/RECORD +14 -94
- mcli/__init__.py +0 -160
- mcli/__main__.py +0 -14
- mcli/app/__init__.py +0 -23
- mcli/app/model/__init__.py +0 -0
- mcli/app/video/__init__.py +0 -5
- mcli/chat/__init__.py +0 -34
- mcli/lib/__init__.py +0 -0
- mcli/lib/api/__init__.py +0 -0
- mcli/lib/auth/__init__.py +0 -1
- mcli/lib/config/__init__.py +0 -1
- mcli/lib/erd/__init__.py +0 -25
- mcli/lib/files/__init__.py +0 -0
- mcli/lib/fs/__init__.py +0 -1
- mcli/lib/logger/__init__.py +0 -3
- mcli/lib/performance/__init__.py +0 -17
- mcli/lib/pickles/__init__.py +0 -1
- mcli/lib/secrets/__init__.py +0 -10
- mcli/lib/shell/__init__.py +0 -0
- mcli/lib/toml/__init__.py +0 -1
- mcli/lib/watcher/__init__.py +0 -0
- mcli/ml/__init__.py +0 -16
- mcli/ml/api/__init__.py +0 -30
- mcli/ml/api/routers/__init__.py +0 -27
- mcli/ml/auth/__init__.py +0 -41
- mcli/ml/backtesting/__init__.py +0 -33
- mcli/ml/cli/__init__.py +0 -5
- mcli/ml/config/__init__.py +0 -33
- mcli/ml/configs/__init__.py +0 -16
- mcli/ml/dashboard/__init__.py +0 -12
- mcli/ml/dashboard/components/__init__.py +0 -7
- mcli/ml/dashboard/pages/__init__.py +0 -6
- mcli/ml/data_ingestion/__init__.py +0 -29
- mcli/ml/database/__init__.py +0 -40
- mcli/ml/experimentation/__init__.py +0 -29
- mcli/ml/features/__init__.py +0 -39
- mcli/ml/features/political_features.py +0 -677
- mcli/ml/mlops/__init__.py +0 -19
- mcli/ml/models/__init__.py +0 -90
- mcli/ml/monitoring/__init__.py +0 -25
- mcli/ml/optimization/__init__.py +0 -27
- mcli/ml/predictions/__init__.py +0 -5
- mcli/ml/preprocessing/__init__.py +0 -24
- mcli/ml/preprocessing/politician_trading_preprocessor.py +0 -570
- mcli/ml/scripts/__init__.py +0 -1
- mcli/ml/serving/__init__.py +0 -1
- mcli/ml/trading/__init__.py +0 -63
- mcli/ml/training/__init__.py +0 -7
- mcli/mygroup/__init__.py +0 -3
- mcli/public/__init__.py +0 -1
- mcli/public/commands/__init__.py +0 -2
- mcli/self/__init__.py +0 -3
- mcli/workflow/__init__.py +0 -0
- mcli/workflow/daemon/__init__.py +0 -15
- mcli/workflow/dashboard/__init__.py +0 -5
- mcli/workflow/docker/__init__.py +0 -0
- mcli/workflow/file/__init__.py +0 -0
- mcli/workflow/gcloud/__init__.py +0 -1
- mcli/workflow/git_commit/__init__.py +0 -0
- mcli/workflow/interview/__init__.py +0 -0
- mcli/workflow/politician_trading/__init__.py +0 -4
- mcli/workflow/politician_trading/config.py +0 -134
- mcli/workflow/politician_trading/connectivity.py +0 -492
- mcli/workflow/politician_trading/data_sources.py +0 -654
- mcli/workflow/politician_trading/database.py +0 -412
- mcli/workflow/politician_trading/demo.py +0 -249
- mcli/workflow/politician_trading/models.py +0 -327
- mcli/workflow/politician_trading/monitoring.py +0 -413
- mcli/workflow/politician_trading/scrapers.py +0 -1074
- mcli/workflow/politician_trading/scrapers_california.py +0 -434
- mcli/workflow/politician_trading/scrapers_corporate_registry.py +0 -797
- mcli/workflow/politician_trading/scrapers_eu.py +0 -376
- mcli/workflow/politician_trading/scrapers_free_sources.py +0 -509
- mcli/workflow/politician_trading/scrapers_third_party.py +0 -373
- mcli/workflow/politician_trading/scrapers_uk.py +0 -378
- mcli/workflow/politician_trading/scrapers_us_states.py +0 -471
- mcli/workflow/politician_trading/seed_database.py +0 -520
- mcli/workflow/politician_trading/supabase_functions.py +0 -354
- mcli/workflow/politician_trading/workflow.py +0 -879
- mcli/workflow/registry/__init__.py +0 -0
- mcli/workflow/repo/__init__.py +0 -0
- mcli/workflow/scheduler/__init__.py +0 -25
- mcli/workflow/search/__init__.py +0 -0
- mcli/workflow/sync/__init__.py +0 -5
- mcli/workflow/videos/__init__.py +0 -1
- mcli/workflow/wakatime/__init__.py +0 -80
- {mcli_framework-7.10.1.dist-info → mcli_framework-7.10.2.dist-info}/WHEEL +0 -0
- {mcli_framework-7.10.1.dist-info → mcli_framework-7.10.2.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.10.1.dist-info → mcli_framework-7.10.2.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.10.1.dist-info → mcli_framework-7.10.2.dist-info}/top_level.txt +0 -0
mcli/workflow/file/__init__.py
DELETED
|
File without changes
|
mcli/workflow/gcloud/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
from .gcloud import gcloud
|
|
File without changes
|
|
File without changes
|
|
@@ -1,134 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Configuration for politician trading data workflow
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import os
|
|
6
|
-
from dataclasses import dataclass
|
|
7
|
-
from typing import Optional
|
|
8
|
-
|
|
9
|
-
from dotenv import load_dotenv
|
|
10
|
-
|
|
11
|
-
load_dotenv()
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
@dataclass
|
|
15
|
-
class SupabaseConfig:
|
|
16
|
-
"""Supabase database configuration"""
|
|
17
|
-
|
|
18
|
-
url: str
|
|
19
|
-
key: str
|
|
20
|
-
service_role_key: Optional[str] = None
|
|
21
|
-
|
|
22
|
-
@classmethod
|
|
23
|
-
def from_env(cls) -> "SupabaseConfig":
|
|
24
|
-
"""Load configuration from environment or use provided values"""
|
|
25
|
-
# Your provided Supabase details
|
|
26
|
-
url = os.getenv("SUPABASE_URL", "https://uljsqvwkomdrlnofmlad.supabase.co")
|
|
27
|
-
key = os.getenv(
|
|
28
|
-
"SUPABASE_ANON_KEY",
|
|
29
|
-
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InVsanNxdndrb21kcmxub2ZtbGFkIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NTY4MDIyNDQsImV4cCI6MjA3MjM3ODI0NH0.QCpfcEpxGX_5Wn8ljf_J2KWjJLGdF8zRsV_7OatxmHI",
|
|
30
|
-
)
|
|
31
|
-
service_role_key = os.getenv("SUPABASE_SERVICE_ROLE_KEY")
|
|
32
|
-
|
|
33
|
-
return cls(url=url, key=key, service_role_key=service_role_key)
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
@dataclass
|
|
37
|
-
class ScrapingConfig:
|
|
38
|
-
"""Web scraping configuration with comprehensive data sources"""
|
|
39
|
-
|
|
40
|
-
# Rate limiting
|
|
41
|
-
request_delay: float = 1.0 # seconds between requests
|
|
42
|
-
max_retries: int = 3
|
|
43
|
-
timeout: int = 30
|
|
44
|
-
|
|
45
|
-
# User agent for requests
|
|
46
|
-
user_agent: str = "Mozilla/5.0 (compatible; MCLI-PoliticianTracker/1.0)"
|
|
47
|
-
|
|
48
|
-
# Enable/disable source categories
|
|
49
|
-
enable_us_federal: bool = True
|
|
50
|
-
enable_us_states: bool = True
|
|
51
|
-
enable_eu_parliament: bool = True
|
|
52
|
-
enable_eu_national: bool = True
|
|
53
|
-
enable_third_party: bool = True
|
|
54
|
-
|
|
55
|
-
# Legacy properties for backward compatibility
|
|
56
|
-
us_congress_sources: list = None
|
|
57
|
-
eu_sources: list = None
|
|
58
|
-
|
|
59
|
-
def __post_init__(self):
|
|
60
|
-
# Maintain backward compatibility
|
|
61
|
-
if self.us_congress_sources is None:
|
|
62
|
-
self.us_congress_sources = [
|
|
63
|
-
"https://disclosures-clerk.house.gov/FinancialDisclosure",
|
|
64
|
-
"https://efd.senate.gov",
|
|
65
|
-
"https://api.quiverquant.com/beta/live/congresstrading",
|
|
66
|
-
]
|
|
67
|
-
|
|
68
|
-
if self.eu_sources is None:
|
|
69
|
-
self.eu_sources = [
|
|
70
|
-
"https://www.europarl.europa.eu/meps/en/declarations",
|
|
71
|
-
]
|
|
72
|
-
|
|
73
|
-
def get_active_sources(self):
|
|
74
|
-
"""Get all active data sources based on configuration"""
|
|
75
|
-
from .data_sources import ALL_DATA_SOURCES
|
|
76
|
-
|
|
77
|
-
active_sources = []
|
|
78
|
-
|
|
79
|
-
if self.enable_us_federal:
|
|
80
|
-
active_sources.extend(ALL_DATA_SOURCES["us_federal"])
|
|
81
|
-
|
|
82
|
-
if self.enable_us_states:
|
|
83
|
-
active_sources.extend(ALL_DATA_SOURCES["us_states"])
|
|
84
|
-
|
|
85
|
-
if self.enable_eu_parliament:
|
|
86
|
-
active_sources.extend(ALL_DATA_SOURCES["eu_parliament"])
|
|
87
|
-
|
|
88
|
-
if self.enable_eu_national:
|
|
89
|
-
active_sources.extend(ALL_DATA_SOURCES["eu_national"])
|
|
90
|
-
|
|
91
|
-
if self.enable_third_party:
|
|
92
|
-
active_sources.extend(ALL_DATA_SOURCES["third_party"])
|
|
93
|
-
|
|
94
|
-
# Filter to only active status sources
|
|
95
|
-
return [source for source in active_sources if source.status == "active"]
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
@dataclass
|
|
99
|
-
class WorkflowConfig:
|
|
100
|
-
"""Overall workflow configuration"""
|
|
101
|
-
|
|
102
|
-
supabase: SupabaseConfig
|
|
103
|
-
scraping: ScrapingConfig
|
|
104
|
-
|
|
105
|
-
# Cron schedule (for reference, actual scheduling done in Supabase)
|
|
106
|
-
cron_schedule: str = "0 */6 * * *" # Every 6 hours
|
|
107
|
-
|
|
108
|
-
# Data retention
|
|
109
|
-
retention_days: int = 365 # Keep data for 1 year
|
|
110
|
-
|
|
111
|
-
@classmethod
|
|
112
|
-
def default(cls) -> "WorkflowConfig":
|
|
113
|
-
"""Create default configuration"""
|
|
114
|
-
return cls(supabase=SupabaseConfig.from_env(), scraping=ScrapingConfig())
|
|
115
|
-
|
|
116
|
-
def to_serializable_dict(self) -> dict:
|
|
117
|
-
"""Convert to a JSON-serializable dictionary"""
|
|
118
|
-
return {
|
|
119
|
-
"supabase": {
|
|
120
|
-
"url": self.supabase.url,
|
|
121
|
-
"has_service_key": bool(self.supabase.service_role_key),
|
|
122
|
-
# Don't include actual keys for security
|
|
123
|
-
},
|
|
124
|
-
"scraping": {
|
|
125
|
-
"request_delay": self.scraping.request_delay,
|
|
126
|
-
"max_retries": self.scraping.max_retries,
|
|
127
|
-
"timeout": self.scraping.timeout,
|
|
128
|
-
"user_agent": self.scraping.user_agent,
|
|
129
|
-
"us_congress_sources": self.scraping.us_congress_sources,
|
|
130
|
-
"eu_sources": self.scraping.eu_sources,
|
|
131
|
-
},
|
|
132
|
-
"cron_schedule": self.cron_schedule,
|
|
133
|
-
"retention_days": self.retention_days,
|
|
134
|
-
}
|
|
@@ -1,492 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Continuous Supabase connectivity validation and monitoring
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import asyncio
|
|
6
|
-
import json
|
|
7
|
-
import logging
|
|
8
|
-
import time
|
|
9
|
-
from datetime import datetime, timedelta
|
|
10
|
-
from typing import Any, Dict, Optional
|
|
11
|
-
|
|
12
|
-
from rich.console import Console
|
|
13
|
-
from rich.live import Live
|
|
14
|
-
from rich.panel import Panel
|
|
15
|
-
from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
|
|
16
|
-
from rich.table import Table
|
|
17
|
-
|
|
18
|
-
from .config import WorkflowConfig
|
|
19
|
-
from .database import PoliticianTradingDB
|
|
20
|
-
|
|
21
|
-
logger = logging.getLogger(__name__)
|
|
22
|
-
console = Console()
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class SupabaseConnectivityValidator:
|
|
26
|
-
"""Validates and monitors Supabase connectivity in real-time"""
|
|
27
|
-
|
|
28
|
-
def __init__(self, config: WorkflowConfig = None):
|
|
29
|
-
self.config = config or WorkflowConfig.default()
|
|
30
|
-
self.db = PoliticianTradingDB(self.config)
|
|
31
|
-
self.last_test_results = {}
|
|
32
|
-
|
|
33
|
-
async def validate_connectivity(self) -> Dict[str, Any]:
|
|
34
|
-
"""Comprehensive connectivity validation"""
|
|
35
|
-
validation_start = time.time()
|
|
36
|
-
results = {
|
|
37
|
-
"timestamp": datetime.utcnow().isoformat(),
|
|
38
|
-
"overall_status": "unknown",
|
|
39
|
-
"tests": {},
|
|
40
|
-
"duration_ms": 0,
|
|
41
|
-
"supabase_url": self.config.supabase.url,
|
|
42
|
-
"connectivity_score": 0,
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
tests = [
|
|
46
|
-
("basic_connection", self._test_basic_connection),
|
|
47
|
-
("read_operations", self._test_read_operations),
|
|
48
|
-
("write_operations", self._test_write_operations),
|
|
49
|
-
("table_access", self._test_table_access),
|
|
50
|
-
("job_tracking", self._test_job_tracking),
|
|
51
|
-
("real_time_sync", self._test_real_time_sync),
|
|
52
|
-
]
|
|
53
|
-
|
|
54
|
-
passed_tests = 0
|
|
55
|
-
|
|
56
|
-
for test_name, test_func in tests:
|
|
57
|
-
try:
|
|
58
|
-
test_start = time.time()
|
|
59
|
-
test_result = await test_func()
|
|
60
|
-
test_duration = (time.time() - test_start) * 1000
|
|
61
|
-
|
|
62
|
-
results["tests"][test_name] = {
|
|
63
|
-
"status": "passed" if test_result["success"] else "failed",
|
|
64
|
-
"duration_ms": round(test_duration, 2),
|
|
65
|
-
"details": test_result,
|
|
66
|
-
"timestamp": datetime.utcnow().isoformat(),
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
if test_result["success"]:
|
|
70
|
-
passed_tests += 1
|
|
71
|
-
|
|
72
|
-
except Exception as e:
|
|
73
|
-
results["tests"][test_name] = {
|
|
74
|
-
"status": "error",
|
|
75
|
-
"duration_ms": 0,
|
|
76
|
-
"error": str(e),
|
|
77
|
-
"timestamp": datetime.utcnow().isoformat(),
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
# Calculate overall status
|
|
81
|
-
connectivity_score = (passed_tests / len(tests)) * 100
|
|
82
|
-
results["connectivity_score"] = round(connectivity_score, 1)
|
|
83
|
-
|
|
84
|
-
if connectivity_score >= 90:
|
|
85
|
-
results["overall_status"] = "excellent"
|
|
86
|
-
elif connectivity_score >= 75:
|
|
87
|
-
results["overall_status"] = "good"
|
|
88
|
-
elif connectivity_score >= 50:
|
|
89
|
-
results["overall_status"] = "degraded"
|
|
90
|
-
else:
|
|
91
|
-
results["overall_status"] = "critical"
|
|
92
|
-
|
|
93
|
-
results["duration_ms"] = round((time.time() - validation_start) * 1000, 2)
|
|
94
|
-
self.last_test_results = results
|
|
95
|
-
|
|
96
|
-
return results
|
|
97
|
-
|
|
98
|
-
async def _test_basic_connection(self) -> Dict[str, Any]:
|
|
99
|
-
"""Test basic database connection"""
|
|
100
|
-
try:
|
|
101
|
-
# Test basic REST API connectivity instead of RPC
|
|
102
|
-
import httpx
|
|
103
|
-
|
|
104
|
-
async with httpx.AsyncClient() as client:
|
|
105
|
-
response = await client.get(
|
|
106
|
-
self.config.supabase.url + "/rest/v1/",
|
|
107
|
-
headers={"apikey": self.config.supabase.key},
|
|
108
|
-
timeout=30.0,
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
if response.status_code == 200:
|
|
112
|
-
return {
|
|
113
|
-
"success": True,
|
|
114
|
-
"message": "Basic connection successful",
|
|
115
|
-
"status_code": response.status_code,
|
|
116
|
-
}
|
|
117
|
-
else:
|
|
118
|
-
return {
|
|
119
|
-
"success": False,
|
|
120
|
-
"error": f"HTTP {response.status_code}: {response.text[:100]}",
|
|
121
|
-
}
|
|
122
|
-
except Exception as e:
|
|
123
|
-
return {"success": False, "error": str(e)}
|
|
124
|
-
|
|
125
|
-
async def _test_read_operations(self) -> Dict[str, Any]:
|
|
126
|
-
"""Test read operations"""
|
|
127
|
-
try:
|
|
128
|
-
# Try reading from multiple tables
|
|
129
|
-
tables_to_test = [
|
|
130
|
-
"politicians",
|
|
131
|
-
"trading_disclosures",
|
|
132
|
-
"data_pull_jobs",
|
|
133
|
-
"data_sources",
|
|
134
|
-
]
|
|
135
|
-
read_results = {}
|
|
136
|
-
schema_missing = False
|
|
137
|
-
|
|
138
|
-
for table in tables_to_test:
|
|
139
|
-
try:
|
|
140
|
-
result = self.db.client.table(table).select("*").limit(1).execute()
|
|
141
|
-
read_results[table] = "accessible"
|
|
142
|
-
except Exception as e:
|
|
143
|
-
error_msg = str(e)
|
|
144
|
-
if "Could not find" in error_msg and "schema cache" in error_msg:
|
|
145
|
-
read_results[table] = "table_missing"
|
|
146
|
-
schema_missing = True
|
|
147
|
-
else:
|
|
148
|
-
read_results[table] = f"error: {error_msg[:50]}..."
|
|
149
|
-
|
|
150
|
-
accessible_count = sum(1 for status in read_results.values() if status == "accessible")
|
|
151
|
-
missing_count = sum(1 for status in read_results.values() if status == "table_missing")
|
|
152
|
-
|
|
153
|
-
if schema_missing and accessible_count == 0:
|
|
154
|
-
return {
|
|
155
|
-
"success": False,
|
|
156
|
-
"tables_tested": read_results,
|
|
157
|
-
"accessible_tables": accessible_count,
|
|
158
|
-
"missing_tables": missing_count,
|
|
159
|
-
"message": "Database schema not set up. Run 'mcli workflow politician-trading setup --generate-schema' to get setup instructions.",
|
|
160
|
-
}
|
|
161
|
-
else:
|
|
162
|
-
success = accessible_count > 0
|
|
163
|
-
return {
|
|
164
|
-
"success": success,
|
|
165
|
-
"tables_tested": read_results,
|
|
166
|
-
"accessible_tables": accessible_count,
|
|
167
|
-
"missing_tables": missing_count,
|
|
168
|
-
}
|
|
169
|
-
except Exception as e:
|
|
170
|
-
return {"success": False, "error": str(e)}
|
|
171
|
-
|
|
172
|
-
async def _test_write_operations(self) -> Dict[str, Any]:
|
|
173
|
-
"""Test write operations with a connectivity test record"""
|
|
174
|
-
try:
|
|
175
|
-
test_job_id = f"connectivity_test_{int(time.time())}"
|
|
176
|
-
|
|
177
|
-
# Create a test job record
|
|
178
|
-
try:
|
|
179
|
-
insert_result = (
|
|
180
|
-
self.db.client.table("data_pull_jobs")
|
|
181
|
-
.insert(
|
|
182
|
-
{
|
|
183
|
-
"job_type": "connectivity_test",
|
|
184
|
-
"status": "running",
|
|
185
|
-
"started_at": datetime.utcnow().isoformat(),
|
|
186
|
-
"config_snapshot": {
|
|
187
|
-
"test": True,
|
|
188
|
-
"validator": "SupabaseConnectivityValidator",
|
|
189
|
-
},
|
|
190
|
-
}
|
|
191
|
-
)
|
|
192
|
-
.execute()
|
|
193
|
-
)
|
|
194
|
-
except Exception as e:
|
|
195
|
-
if "Could not find" in str(e) and "schema cache" in str(e):
|
|
196
|
-
return {
|
|
197
|
-
"success": False,
|
|
198
|
-
"error": "Table 'data_pull_jobs' not found",
|
|
199
|
-
"message": "Database schema not set up. Run schema setup first.",
|
|
200
|
-
}
|
|
201
|
-
else:
|
|
202
|
-
raise e
|
|
203
|
-
|
|
204
|
-
# Get the inserted record ID
|
|
205
|
-
if insert_result.data and len(insert_result.data) > 0:
|
|
206
|
-
inserted_id = insert_result.data[0]["id"]
|
|
207
|
-
|
|
208
|
-
# Update the record
|
|
209
|
-
update_result = (
|
|
210
|
-
self.db.client.table("data_pull_jobs")
|
|
211
|
-
.update(
|
|
212
|
-
{
|
|
213
|
-
"status": "completed",
|
|
214
|
-
"completed_at": datetime.utcnow().isoformat(),
|
|
215
|
-
"records_processed": 1,
|
|
216
|
-
}
|
|
217
|
-
)
|
|
218
|
-
.eq("id", inserted_id)
|
|
219
|
-
.execute()
|
|
220
|
-
)
|
|
221
|
-
|
|
222
|
-
# Read it back
|
|
223
|
-
read_result = (
|
|
224
|
-
self.db.client.table("data_pull_jobs")
|
|
225
|
-
.select("*")
|
|
226
|
-
.eq("id", inserted_id)
|
|
227
|
-
.execute()
|
|
228
|
-
)
|
|
229
|
-
else:
|
|
230
|
-
return {"success": False, "error": "Failed to get inserted record ID"}
|
|
231
|
-
|
|
232
|
-
# Clean up test record
|
|
233
|
-
self.db.client.table("data_pull_jobs").delete().eq("id", inserted_id).execute()
|
|
234
|
-
|
|
235
|
-
return {
|
|
236
|
-
"success": True,
|
|
237
|
-
"message": "Write operations successful",
|
|
238
|
-
"operations_tested": ["insert", "update", "select", "delete"],
|
|
239
|
-
"test_record_id": test_job_id,
|
|
240
|
-
"record_retrieved": len(read_result.data) > 0 if read_result.data else False,
|
|
241
|
-
}
|
|
242
|
-
except Exception as e:
|
|
243
|
-
return {"success": False, "error": str(e)}
|
|
244
|
-
|
|
245
|
-
async def _test_table_access(self) -> Dict[str, Any]:
|
|
246
|
-
"""Test access to all required tables"""
|
|
247
|
-
try:
|
|
248
|
-
required_tables = {
|
|
249
|
-
"politicians": ["id", "full_name", "role"],
|
|
250
|
-
"trading_disclosures": ["id", "politician_id", "transaction_date"],
|
|
251
|
-
"data_pull_jobs": ["id", "job_type", "status"],
|
|
252
|
-
"data_sources": ["id", "name", "url"],
|
|
253
|
-
}
|
|
254
|
-
|
|
255
|
-
table_access = {}
|
|
256
|
-
|
|
257
|
-
for table_name, required_columns in required_tables.items():
|
|
258
|
-
try:
|
|
259
|
-
# Test table structure
|
|
260
|
-
result = (
|
|
261
|
-
self.db.client.table(table_name)
|
|
262
|
-
.select(",".join(required_columns))
|
|
263
|
-
.limit(1)
|
|
264
|
-
.execute()
|
|
265
|
-
)
|
|
266
|
-
table_access[table_name] = {
|
|
267
|
-
"accessible": True,
|
|
268
|
-
"columns_verified": required_columns,
|
|
269
|
-
"record_count_sample": len(result.data) if result.data else 0,
|
|
270
|
-
}
|
|
271
|
-
except Exception as e:
|
|
272
|
-
table_access[table_name] = {"accessible": False, "error": str(e)}
|
|
273
|
-
|
|
274
|
-
accessible_count = sum(
|
|
275
|
-
1 for info in table_access.values() if info.get("accessible", False)
|
|
276
|
-
)
|
|
277
|
-
|
|
278
|
-
return {
|
|
279
|
-
"success": accessible_count == len(required_tables),
|
|
280
|
-
"tables_tested": len(required_tables),
|
|
281
|
-
"tables_accessible": accessible_count,
|
|
282
|
-
"table_details": table_access,
|
|
283
|
-
}
|
|
284
|
-
except Exception as e:
|
|
285
|
-
return {"success": False, "error": str(e)}
|
|
286
|
-
|
|
287
|
-
async def _test_job_tracking(self) -> Dict[str, Any]:
|
|
288
|
-
"""Test job tracking functionality"""
|
|
289
|
-
try:
|
|
290
|
-
# Get recent jobs
|
|
291
|
-
recent_jobs = (
|
|
292
|
-
self.db.client.table("data_pull_jobs")
|
|
293
|
-
.select("*")
|
|
294
|
-
.order("created_at", desc=True)
|
|
295
|
-
.limit(5)
|
|
296
|
-
.execute()
|
|
297
|
-
)
|
|
298
|
-
|
|
299
|
-
# Get job statistics
|
|
300
|
-
job_stats = self.db.client.table("data_pull_jobs").select("status").execute()
|
|
301
|
-
|
|
302
|
-
status_counts = {}
|
|
303
|
-
if job_stats.data:
|
|
304
|
-
for job in job_stats.data:
|
|
305
|
-
status = job.get("status", "unknown")
|
|
306
|
-
status_counts[status] = status_counts.get(status, 0) + 1
|
|
307
|
-
|
|
308
|
-
return {
|
|
309
|
-
"success": True,
|
|
310
|
-
"recent_jobs_count": len(recent_jobs.data) if recent_jobs.data else 0,
|
|
311
|
-
"total_jobs": len(job_stats.data) if job_stats.data else 0,
|
|
312
|
-
"status_distribution": status_counts,
|
|
313
|
-
"job_tracking_functional": True,
|
|
314
|
-
}
|
|
315
|
-
except Exception as e:
|
|
316
|
-
return {"success": False, "error": str(e)}
|
|
317
|
-
|
|
318
|
-
async def _test_real_time_sync(self) -> Dict[str, Any]:
|
|
319
|
-
"""Test real-time synchronization capabilities"""
|
|
320
|
-
try:
|
|
321
|
-
# Create a timestamped record and verify immediate retrieval
|
|
322
|
-
timestamp = datetime.utcnow().isoformat()
|
|
323
|
-
test_source_id = f"rt_test_{int(time.time())}"
|
|
324
|
-
|
|
325
|
-
# Insert
|
|
326
|
-
insert_result = (
|
|
327
|
-
self.db.client.table("data_sources")
|
|
328
|
-
.insert(
|
|
329
|
-
{
|
|
330
|
-
"name": "Real-time Test Source",
|
|
331
|
-
"url": "https://test.example.com",
|
|
332
|
-
"source_type": "test",
|
|
333
|
-
"region": "test",
|
|
334
|
-
"is_active": True,
|
|
335
|
-
"created_at": timestamp,
|
|
336
|
-
}
|
|
337
|
-
)
|
|
338
|
-
.execute()
|
|
339
|
-
)
|
|
340
|
-
|
|
341
|
-
if insert_result.data and len(insert_result.data) > 0:
|
|
342
|
-
inserted_id = insert_result.data[0]["id"]
|
|
343
|
-
|
|
344
|
-
# Immediate read-back
|
|
345
|
-
result = (
|
|
346
|
-
self.db.client.table("data_sources").select("*").eq("id", inserted_id).execute()
|
|
347
|
-
)
|
|
348
|
-
|
|
349
|
-
# Clean up
|
|
350
|
-
self.db.client.table("data_sources").delete().eq("id", inserted_id).execute()
|
|
351
|
-
else:
|
|
352
|
-
return {"success": False, "error": "Failed to insert test record"}
|
|
353
|
-
|
|
354
|
-
sync_successful = len(result.data) > 0 if result.data else False
|
|
355
|
-
|
|
356
|
-
return {
|
|
357
|
-
"success": sync_successful,
|
|
358
|
-
"message": "Real-time sync test completed",
|
|
359
|
-
"record_immediately_available": sync_successful,
|
|
360
|
-
"test_timestamp": timestamp,
|
|
361
|
-
}
|
|
362
|
-
except Exception as e:
|
|
363
|
-
return {"success": False, "error": str(e)}
|
|
364
|
-
|
|
365
|
-
def display_connectivity_report(self, results: Dict[str, Any]):
|
|
366
|
-
"""Display a formatted connectivity report"""
|
|
367
|
-
console.print(
|
|
368
|
-
f"\n🔗 Supabase Connectivity Report - {results['timestamp']}", style="bold cyan"
|
|
369
|
-
)
|
|
370
|
-
|
|
371
|
-
# Overall status
|
|
372
|
-
status_colors = {
|
|
373
|
-
"excellent": "bright_green",
|
|
374
|
-
"good": "green",
|
|
375
|
-
"degraded": "yellow",
|
|
376
|
-
"critical": "red",
|
|
377
|
-
}
|
|
378
|
-
|
|
379
|
-
status_color = status_colors.get(results["overall_status"], "white")
|
|
380
|
-
|
|
381
|
-
overall_panel = Panel(
|
|
382
|
-
f"Status: [{status_color}]{results['overall_status'].upper()}[/{status_color}]\n"
|
|
383
|
-
f"Connectivity Score: {results['connectivity_score']}%\n"
|
|
384
|
-
f"Test Duration: {results['duration_ms']}ms\n"
|
|
385
|
-
f"Supabase URL: {results['supabase_url']}",
|
|
386
|
-
title="🎯 Overall Connectivity",
|
|
387
|
-
border_style=status_color,
|
|
388
|
-
)
|
|
389
|
-
console.print(overall_panel)
|
|
390
|
-
|
|
391
|
-
# Test results table
|
|
392
|
-
test_table = Table(title="Test Results")
|
|
393
|
-
test_table.add_column("Test", style="cyan")
|
|
394
|
-
test_table.add_column("Status", style="bold")
|
|
395
|
-
test_table.add_column("Duration", justify="right")
|
|
396
|
-
test_table.add_column("Details")
|
|
397
|
-
|
|
398
|
-
for test_name, test_result in results["tests"].items():
|
|
399
|
-
status = test_result["status"]
|
|
400
|
-
status_style = {"passed": "green", "failed": "red", "error": "red"}.get(status, "white")
|
|
401
|
-
|
|
402
|
-
details = ""
|
|
403
|
-
if "details" in test_result:
|
|
404
|
-
if "message" in test_result["details"]:
|
|
405
|
-
details = test_result["details"]["message"]
|
|
406
|
-
elif "operations_tested" in test_result["details"]:
|
|
407
|
-
details = f"Ops: {', '.join(test_result['details']['operations_tested'])}"
|
|
408
|
-
elif "tables_accessible" in test_result["details"]:
|
|
409
|
-
details = f"{test_result['details']['tables_accessible']}/{test_result['details']['tables_tested']} tables"
|
|
410
|
-
|
|
411
|
-
if "error" in test_result:
|
|
412
|
-
details = (
|
|
413
|
-
test_result["error"][:50] + "..."
|
|
414
|
-
if len(test_result["error"]) > 50
|
|
415
|
-
else test_result["error"]
|
|
416
|
-
)
|
|
417
|
-
|
|
418
|
-
test_table.add_row(
|
|
419
|
-
test_name.replace("_", " ").title(),
|
|
420
|
-
f"[{status_style}]{status.upper()}[/{status_style}]",
|
|
421
|
-
f"{test_result['duration_ms']:.1f}ms",
|
|
422
|
-
details,
|
|
423
|
-
)
|
|
424
|
-
|
|
425
|
-
console.print(test_table)
|
|
426
|
-
|
|
427
|
-
async def continuous_monitoring(self, interval_seconds: int = 30, duration_minutes: int = 0):
|
|
428
|
-
"""Run continuous connectivity monitoring"""
|
|
429
|
-
console.print(
|
|
430
|
-
f"🔄 Starting continuous Supabase connectivity monitoring (interval: {interval_seconds}s)",
|
|
431
|
-
style="bold blue",
|
|
432
|
-
)
|
|
433
|
-
|
|
434
|
-
start_time = time.time()
|
|
435
|
-
check_count = 0
|
|
436
|
-
|
|
437
|
-
try:
|
|
438
|
-
while True:
|
|
439
|
-
check_count += 1
|
|
440
|
-
console.print(
|
|
441
|
-
f"\n📊 Check #{check_count} - {datetime.now().strftime('%H:%M:%S')}",
|
|
442
|
-
style="dim",
|
|
443
|
-
)
|
|
444
|
-
|
|
445
|
-
# Run validation
|
|
446
|
-
results = await self.validate_connectivity()
|
|
447
|
-
self.display_connectivity_report(results)
|
|
448
|
-
|
|
449
|
-
# Check duration limit
|
|
450
|
-
if duration_minutes > 0:
|
|
451
|
-
elapsed_minutes = (time.time() - start_time) / 60
|
|
452
|
-
if elapsed_minutes >= duration_minutes:
|
|
453
|
-
console.print(
|
|
454
|
-
f"\n⏰ Monitoring completed after {duration_minutes} minutes",
|
|
455
|
-
style="green",
|
|
456
|
-
)
|
|
457
|
-
break
|
|
458
|
-
|
|
459
|
-
# Wait for next check
|
|
460
|
-
console.print(
|
|
461
|
-
f"\n⏱️ Next check in {interval_seconds} seconds... (Ctrl+C to stop)", style="dim"
|
|
462
|
-
)
|
|
463
|
-
await asyncio.sleep(interval_seconds)
|
|
464
|
-
|
|
465
|
-
except KeyboardInterrupt:
|
|
466
|
-
console.print("\n👋 Monitoring stopped by user", style="yellow")
|
|
467
|
-
except Exception as e:
|
|
468
|
-
console.print(f"\n❌ Monitoring error: {e}", style="red")
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
async def run_connectivity_validation() -> Dict[str, Any]:
|
|
472
|
-
"""Standalone function to run connectivity validation"""
|
|
473
|
-
validator = SupabaseConnectivityValidator()
|
|
474
|
-
return await validator.validate_connectivity()
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
async def run_continuous_monitoring(interval: int = 30, duration: int = 0):
|
|
478
|
-
"""Standalone function for continuous monitoring"""
|
|
479
|
-
validator = SupabaseConnectivityValidator()
|
|
480
|
-
await validator.continuous_monitoring(interval, duration)
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
if __name__ == "__main__":
|
|
484
|
-
# Allow running this file directly for testing
|
|
485
|
-
async def main():
|
|
486
|
-
validator = SupabaseConnectivityValidator()
|
|
487
|
-
|
|
488
|
-
console.print("🧪 Running Supabase connectivity validation...", style="bold blue")
|
|
489
|
-
results = await validator.validate_connectivity()
|
|
490
|
-
validator.display_connectivity_report(results)
|
|
491
|
-
|
|
492
|
-
asyncio.run(main())
|