mcli-framework 7.1.1__py3-none-any.whl → 7.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (94) hide show
  1. mcli/app/completion_cmd.py +59 -49
  2. mcli/app/completion_helpers.py +60 -138
  3. mcli/app/logs_cmd.py +6 -2
  4. mcli/app/main.py +17 -14
  5. mcli/app/model_cmd.py +19 -4
  6. mcli/chat/chat.py +3 -2
  7. mcli/lib/search/cached_vectorizer.py +1 -0
  8. mcli/lib/services/data_pipeline.py +12 -5
  9. mcli/lib/services/lsh_client.py +68 -57
  10. mcli/ml/api/app.py +28 -36
  11. mcli/ml/api/middleware.py +8 -16
  12. mcli/ml/api/routers/admin_router.py +3 -1
  13. mcli/ml/api/routers/auth_router.py +32 -56
  14. mcli/ml/api/routers/backtest_router.py +3 -1
  15. mcli/ml/api/routers/data_router.py +3 -1
  16. mcli/ml/api/routers/model_router.py +35 -74
  17. mcli/ml/api/routers/monitoring_router.py +3 -1
  18. mcli/ml/api/routers/portfolio_router.py +3 -1
  19. mcli/ml/api/routers/prediction_router.py +60 -65
  20. mcli/ml/api/routers/trade_router.py +6 -2
  21. mcli/ml/api/routers/websocket_router.py +12 -9
  22. mcli/ml/api/schemas.py +10 -2
  23. mcli/ml/auth/auth_manager.py +49 -114
  24. mcli/ml/auth/models.py +30 -15
  25. mcli/ml/auth/permissions.py +12 -19
  26. mcli/ml/backtesting/backtest_engine.py +134 -108
  27. mcli/ml/backtesting/performance_metrics.py +142 -108
  28. mcli/ml/cache.py +12 -18
  29. mcli/ml/cli/main.py +37 -23
  30. mcli/ml/config/settings.py +29 -12
  31. mcli/ml/dashboard/app.py +122 -130
  32. mcli/ml/dashboard/app_integrated.py +216 -150
  33. mcli/ml/dashboard/app_supabase.py +176 -108
  34. mcli/ml/dashboard/app_training.py +212 -206
  35. mcli/ml/dashboard/cli.py +14 -5
  36. mcli/ml/data_ingestion/api_connectors.py +51 -81
  37. mcli/ml/data_ingestion/data_pipeline.py +127 -125
  38. mcli/ml/data_ingestion/stream_processor.py +72 -80
  39. mcli/ml/database/migrations/env.py +3 -2
  40. mcli/ml/database/models.py +112 -79
  41. mcli/ml/database/session.py +6 -5
  42. mcli/ml/experimentation/ab_testing.py +149 -99
  43. mcli/ml/features/ensemble_features.py +9 -8
  44. mcli/ml/features/political_features.py +6 -5
  45. mcli/ml/features/recommendation_engine.py +15 -14
  46. mcli/ml/features/stock_features.py +7 -6
  47. mcli/ml/features/test_feature_engineering.py +8 -7
  48. mcli/ml/logging.py +10 -15
  49. mcli/ml/mlops/data_versioning.py +57 -64
  50. mcli/ml/mlops/experiment_tracker.py +49 -41
  51. mcli/ml/mlops/model_serving.py +59 -62
  52. mcli/ml/mlops/pipeline_orchestrator.py +203 -149
  53. mcli/ml/models/base_models.py +8 -7
  54. mcli/ml/models/ensemble_models.py +6 -5
  55. mcli/ml/models/recommendation_models.py +7 -6
  56. mcli/ml/models/test_models.py +18 -14
  57. mcli/ml/monitoring/drift_detection.py +95 -74
  58. mcli/ml/monitoring/metrics.py +10 -22
  59. mcli/ml/optimization/portfolio_optimizer.py +172 -132
  60. mcli/ml/predictions/prediction_engine.py +62 -50
  61. mcli/ml/preprocessing/data_cleaners.py +6 -5
  62. mcli/ml/preprocessing/feature_extractors.py +7 -6
  63. mcli/ml/preprocessing/ml_pipeline.py +3 -2
  64. mcli/ml/preprocessing/politician_trading_preprocessor.py +11 -10
  65. mcli/ml/preprocessing/test_preprocessing.py +4 -4
  66. mcli/ml/scripts/populate_sample_data.py +36 -16
  67. mcli/ml/tasks.py +82 -83
  68. mcli/ml/tests/test_integration.py +86 -76
  69. mcli/ml/tests/test_training_dashboard.py +169 -142
  70. mcli/mygroup/test_cmd.py +2 -1
  71. mcli/self/self_cmd.py +31 -16
  72. mcli/self/test_cmd.py +2 -1
  73. mcli/workflow/dashboard/dashboard_cmd.py +13 -6
  74. mcli/workflow/lsh_integration.py +46 -58
  75. mcli/workflow/politician_trading/commands.py +576 -427
  76. mcli/workflow/politician_trading/config.py +7 -7
  77. mcli/workflow/politician_trading/connectivity.py +35 -33
  78. mcli/workflow/politician_trading/data_sources.py +72 -71
  79. mcli/workflow/politician_trading/database.py +18 -16
  80. mcli/workflow/politician_trading/demo.py +4 -3
  81. mcli/workflow/politician_trading/models.py +5 -5
  82. mcli/workflow/politician_trading/monitoring.py +13 -13
  83. mcli/workflow/politician_trading/scrapers.py +332 -224
  84. mcli/workflow/politician_trading/scrapers_california.py +116 -94
  85. mcli/workflow/politician_trading/scrapers_eu.py +70 -71
  86. mcli/workflow/politician_trading/scrapers_uk.py +118 -90
  87. mcli/workflow/politician_trading/scrapers_us_states.py +125 -92
  88. mcli/workflow/politician_trading/workflow.py +98 -71
  89. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.2.dist-info}/METADATA +1 -1
  90. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.2.dist-info}/RECORD +94 -94
  91. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.2.dist-info}/WHEEL +0 -0
  92. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.2.dist-info}/entry_points.txt +0 -0
  93. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.2.dist-info}/licenses/LICENSE +0 -0
  94. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.2.dist-info}/top_level.txt +0 -0
mcli/app/main.py CHANGED
@@ -251,11 +251,11 @@ class LazyCommand(click.Command):
251
251
  """Get parameters from the lazily loaded command."""
252
252
  cmd = self._load_command()
253
253
  return cmd.get_params(ctx)
254
-
254
+
255
255
  def shell_complete(self, ctx, param, incomplete):
256
256
  """Provide shell completion for the lazily loaded command."""
257
257
  cmd = self._load_command()
258
- if hasattr(cmd, 'shell_complete'):
258
+ if hasattr(cmd, "shell_complete"):
259
259
  return cmd.shell_complete(ctx, param, incomplete)
260
260
  return []
261
261
 
@@ -305,11 +305,11 @@ class LazyGroup(click.Group):
305
305
  """Get parameters from the lazily loaded group."""
306
306
  group = self._load_group()
307
307
  return group.get_params(ctx)
308
-
308
+
309
309
  def shell_complete(self, ctx, param, incomplete):
310
310
  """Provide shell completion for the lazily loaded group."""
311
311
  group = self._load_group()
312
- if hasattr(group, 'shell_complete'):
312
+ if hasattr(group, "shell_complete"):
313
313
  return group.shell_complete(ctx, param, incomplete)
314
314
  return []
315
315
 
@@ -333,11 +333,11 @@ def _add_lazy_commands(app: click.Group):
333
333
  logger.debug("Added self management commands")
334
334
  except Exception as e:
335
335
  logger.debug(f"Could not load self commands: {e}")
336
-
336
+
337
337
  # Shell completion - load immediately as it's lightweight and useful
338
338
  try:
339
339
  from mcli.app.completion_cmd import completion
340
-
340
+
341
341
  app.add_command(completion, name="completion")
342
342
  logger.debug("Added completion commands")
343
343
  except ImportError as e:
@@ -346,18 +346,22 @@ def _add_lazy_commands(app: click.Group):
346
346
  # Add workflow with completion-aware lazy loading
347
347
  try:
348
348
  from mcli.app.completion_helpers import create_completion_aware_lazy_group
349
+
349
350
  workflow_group = create_completion_aware_lazy_group(
350
- "workflow",
351
+ "workflow",
351
352
  "mcli.workflow.workflow.workflow",
352
- "Workflow commands for automation, video processing, and daemon management"
353
+ "Workflow commands for automation, video processing, and daemon management",
353
354
  )
354
355
  app.add_command(workflow_group, name="workflow")
355
356
  logger.debug("Added completion-aware workflow group")
356
357
  except ImportError as e:
357
358
  logger.debug(f"Could not load completion helpers, using standard lazy group: {e}")
358
359
  # Fallback to standard lazy group
359
- workflow_group = LazyGroup("workflow", "mcli.workflow.workflow.workflow",
360
- help="Workflow commands for automation, video processing, and daemon management")
360
+ workflow_group = LazyGroup(
361
+ "workflow",
362
+ "mcli.workflow.workflow.workflow",
363
+ help="Workflow commands for automation, video processing, and daemon management",
364
+ )
361
365
  app.add_command(workflow_group, name="workflow")
362
366
 
363
367
  # Lazy load other heavy commands that are used less frequently
@@ -392,15 +396,14 @@ def _add_lazy_commands(app: click.Group):
392
396
  # Skip workflow since we already added it with completion support
393
397
  if cmd_name == "workflow":
394
398
  continue
395
-
399
+
396
400
  if cmd_name in ["model", "redis", "logs"]:
397
401
  # Use completion-aware LazyGroup for commands that have subcommands
398
402
  try:
399
403
  from mcli.app.completion_helpers import create_completion_aware_lazy_group
404
+
400
405
  lazy_cmd = create_completion_aware_lazy_group(
401
- cmd_name,
402
- cmd_info["import_path"],
403
- cmd_info["help"]
406
+ cmd_name, cmd_info["import_path"], cmd_info["help"]
404
407
  )
405
408
  except ImportError:
406
409
  # Fallback to standard LazyGroup
mcli/app/model_cmd.py CHANGED
@@ -100,7 +100,9 @@ def download(model_name: str):
100
100
 
101
101
  @model.command()
102
102
  @click.option("--model", "-m", help="Specific model to use")
103
- @click.option("--port", "-p", default=None, help="Port to run server on (default: from config or 51234)")
103
+ @click.option(
104
+ "--port", "-p", default=None, help="Port to run server on (default: from config or 51234)"
105
+ )
104
106
  @click.option(
105
107
  "--auto-download",
106
108
  is_flag=True,
@@ -113,6 +115,7 @@ def start(model: Optional[str], port: Optional[int], auto_download: bool):
113
115
  if port is None:
114
116
  try:
115
117
  from mcli.lib.config.config import load_config
118
+
116
119
  config = load_config()
117
120
  port = config.get("model", {}).get("server_port", 51234)
118
121
  except Exception:
@@ -201,13 +204,19 @@ def recommend():
201
204
 
202
205
 
203
206
  @model.command()
204
- @click.option("--port", "-p", default=None, help="Port where server is running (default: from config or 51234)")
207
+ @click.option(
208
+ "--port",
209
+ "-p",
210
+ default=None,
211
+ help="Port where server is running (default: from config or 51234)",
212
+ )
205
213
  def status(port: Optional[int]):
206
214
  """Check status of the lightweight model server."""
207
215
  # Load port from config if not specified
208
216
  if port is None:
209
217
  try:
210
218
  from mcli.lib.config.config import load_config
219
+
211
220
  config = load_config()
212
221
  port = config.get("model", {}).get("server_port", 51234)
213
222
  except Exception:
@@ -243,20 +252,26 @@ def status(port: Optional[int]):
243
252
 
244
253
 
245
254
  @model.command()
246
- @click.option("--port", "-p", default=None, help="Port where server is running (default: from config or 51234)")
255
+ @click.option(
256
+ "--port",
257
+ "-p",
258
+ default=None,
259
+ help="Port where server is running (default: from config or 51234)",
260
+ )
247
261
  def stop(port: Optional[int]):
248
262
  """Stop the lightweight model server."""
249
263
  # Load port from config if not specified
250
264
  if port is None:
251
265
  try:
252
266
  from mcli.lib.config.config import load_config
267
+
253
268
  config = load_config()
254
269
  port = config.get("model", {}).get("server_port", 51234)
255
270
  except Exception:
256
271
  port = 51234 # Default ephemeral port
257
272
 
258
- import requests
259
273
  import psutil
274
+ import requests
260
275
 
261
276
  try:
262
277
  # First check if server is running
mcli/chat/chat.py CHANGED
@@ -7,6 +7,7 @@ import requests
7
7
  # Optional ollama import - gracefully handle if not installed
8
8
  try:
9
9
  import ollama
10
+
10
11
  OLLAMA_AVAILABLE = True
11
12
  except ImportError:
12
13
  OLLAMA_AVAILABLE = False
@@ -937,8 +938,8 @@ Respond naturally and helpfully, considering both MCLI commands and system contr
937
938
  console.print("[yellow]For local model support, install ollama:[/yellow]")
938
939
  console.print(" pip install ollama")
939
940
  console.print("\n[yellow]Or switch to OpenAI by configuring:[/yellow]")
940
- console.print(" provider = \"openai\"")
941
- console.print(" openai_api_key = \"your-key-here\"")
941
+ console.print(' provider = "openai"')
942
+ console.print(' openai_api_key = "your-key-here"')
942
943
  return
943
944
 
944
945
  # Use Ollama SDK for local model inference
@@ -14,6 +14,7 @@ import numpy as np
14
14
  # Optional redis import - gracefully handle if not installed
15
15
  try:
16
16
  import redis.asyncio as redis
17
+
17
18
  REDIS_AVAILABLE = True
18
19
  except ImportError:
19
20
  REDIS_AVAILABLE = False
@@ -6,11 +6,12 @@ Handles ETL processes for data received from LSH daemon
6
6
  import asyncio
7
7
  import json
8
8
  import time
9
- from typing import Any, Dict, List, Optional, Callable
10
9
  from datetime import datetime, timezone
11
10
  from pathlib import Path
11
+ from typing import Any, Callable, Dict, List, Optional
12
12
 
13
13
  from mcli.lib.logger.logger import get_logger
14
+
14
15
  from .lsh_client import LSHClient, LSHEventProcessor
15
16
 
16
17
  logger = get_logger(__name__)
@@ -186,7 +187,9 @@ class DataProcessor:
186
187
  # Validate
187
188
  if self.config.enable_validation:
188
189
  if not await self.validator.validate_trading_record(record):
189
- self.logger.warning(f"Validation failed for record: {record.get('id', 'unknown')}")
190
+ self.logger.warning(
191
+ f"Validation failed for record: {record.get('id', 'unknown')}"
192
+ )
190
193
  continue
191
194
 
192
195
  # Enrich
@@ -208,7 +211,9 @@ class DataProcessor:
208
211
  self.logger.info(f"Processed {len(processed_records)}/{len(records)} trading records")
209
212
  return processed_records
210
213
 
211
- async def process_supabase_sync(self, table: str, operation: str, data: Dict[str, Any]) -> Dict[str, Any]:
214
+ async def process_supabase_sync(
215
+ self, table: str, operation: str, data: Dict[str, Any]
216
+ ) -> Dict[str, Any]:
212
217
  """Process Supabase sync data"""
213
218
  try:
214
219
  # Validate
@@ -231,7 +236,9 @@ class DataProcessor:
231
236
  self.logger.error(f"Error processing Supabase sync: {e}")
232
237
  return {}
233
238
 
234
- async def _transform_supabase_data(self, table: str, operation: str, data: Dict[str, Any]) -> Dict[str, Any]:
239
+ async def _transform_supabase_data(
240
+ self, table: str, operation: str, data: Dict[str, Any]
241
+ ) -> Dict[str, Any]:
235
242
  """Transform Supabase data based on table schema"""
236
243
  transformed = data.copy()
237
244
 
@@ -457,4 +464,4 @@ class LSHDataPipeline:
457
464
  "batch_timeout": self.config.batch_timeout,
458
465
  "output_dir": str(self.config.output_dir),
459
466
  },
460
- }
467
+ }
@@ -8,7 +8,7 @@ import json
8
8
  import logging
9
9
  import os
10
10
  import time
11
- from typing import Any, Dict, List, Optional, Callable
11
+ from typing import Any, Callable, Dict, List, Optional
12
12
  from urllib.parse import urljoin
13
13
 
14
14
  import aiohttp
@@ -51,9 +51,7 @@ class LSHClient:
51
51
  """Initialize aiohttp session"""
52
52
  if not self.session:
53
53
  connector = aiohttp.TCPConnector(limit=10)
54
- self.session = aiohttp.ClientSession(
55
- connector=connector, timeout=self.timeout
56
- )
54
+ self.session = aiohttp.ClientSession(connector=connector, timeout=self.timeout)
57
55
  logger.info(f"Connected to LSH API at {self.base_url}")
58
56
 
59
57
  async def disconnect(self):
@@ -81,9 +79,7 @@ class LSHClient:
81
79
  headers = self._get_headers()
82
80
 
83
81
  try:
84
- async with self.session.request(
85
- method, url, headers=headers, json=data
86
- ) as response:
82
+ async with self.session.request(method, url, headers=headers, json=data) as response:
87
83
  if response.status == 401:
88
84
  raise ValueError("LSH API authentication failed - check API key")
89
85
 
@@ -104,9 +100,7 @@ class LSHClient:
104
100
  endpoint = "/api/jobs"
105
101
  if filter_params:
106
102
  # Convert filter to query params
107
- endpoint += "?" + "&".join(
108
- f"{k}={v}" for k, v in filter_params.items()
109
- )
103
+ endpoint += "?" + "&".join(f"{k}={v}" for k, v in filter_params.items())
110
104
  return await self._request("GET", endpoint)
111
105
 
112
106
  async def get_job(self, job_id: str) -> Dict[str, Any]:
@@ -261,12 +255,15 @@ class LSHEventProcessor:
261
255
  self.logger.info(f"LSH job started: {job_name} ({job_id})")
262
256
 
263
257
  # Emit mcli-specific event
264
- await self._emit_mcli_event("lsh.job.started", {
265
- "job_id": job_id,
266
- "job_name": job_name,
267
- "timestamp": data.get("timestamp"),
268
- "job_data": job_data
269
- })
258
+ await self._emit_mcli_event(
259
+ "lsh.job.started",
260
+ {
261
+ "job_id": job_id,
262
+ "job_name": job_name,
263
+ "timestamp": data.get("timestamp"),
264
+ "job_data": job_data,
265
+ },
266
+ )
270
267
 
271
268
  async def _handle_job_completed(self, data: Dict[str, Any]):
272
269
  """Handle job completion event"""
@@ -289,14 +286,17 @@ class LSHEventProcessor:
289
286
  await self._process_supabase_job(job_data)
290
287
 
291
288
  # Emit mcli-specific event
292
- await self._emit_mcli_event("lsh.job.completed", {
293
- "job_id": job_id,
294
- "job_name": job_name,
295
- "timestamp": data.get("timestamp"),
296
- "job_data": job_data,
297
- "stdout": stdout,
298
- "stderr": stderr
299
- })
289
+ await self._emit_mcli_event(
290
+ "lsh.job.completed",
291
+ {
292
+ "job_id": job_id,
293
+ "job_name": job_name,
294
+ "timestamp": data.get("timestamp"),
295
+ "job_data": job_data,
296
+ "stdout": stdout,
297
+ "stderr": stderr,
298
+ },
299
+ )
300
300
 
301
301
  async def _handle_job_failed(self, data: Dict[str, Any]):
302
302
  """Handle job failure event"""
@@ -308,13 +308,16 @@ class LSHEventProcessor:
308
308
  self.logger.error(f"LSH job failed: {job_name} ({job_id}) - {error}")
309
309
 
310
310
  # Emit mcli-specific event
311
- await self._emit_mcli_event("lsh.job.failed", {
312
- "job_id": job_id,
313
- "job_name": job_name,
314
- "timestamp": data.get("timestamp"),
315
- "error": error,
316
- "job_data": job_data
317
- })
311
+ await self._emit_mcli_event(
312
+ "lsh.job.failed",
313
+ {
314
+ "job_id": job_id,
315
+ "job_name": job_name,
316
+ "timestamp": data.get("timestamp"),
317
+ "error": error,
318
+ "job_data": job_data,
319
+ },
320
+ )
318
321
 
319
322
  async def _handle_supabase_sync(self, data: Dict[str, Any]):
320
323
  """Handle Supabase data sync event"""
@@ -329,12 +332,15 @@ class LSHEventProcessor:
329
332
  await self._process_politician_data(table, operation, sync_data)
330
333
 
331
334
  # Emit mcli-specific event
332
- await self._emit_mcli_event("lsh.supabase.sync", {
333
- "table": table,
334
- "operation": operation,
335
- "data": sync_data,
336
- "timestamp": data.get("timestamp")
337
- })
335
+ await self._emit_mcli_event(
336
+ "lsh.supabase.sync",
337
+ {
338
+ "table": table,
339
+ "operation": operation,
340
+ "data": sync_data,
341
+ "timestamp": data.get("timestamp"),
342
+ },
343
+ )
338
344
 
339
345
  async def _process_trading_data(self, job_data: Dict, stdout: str):
340
346
  """Process politician trading data from job output"""
@@ -343,7 +349,7 @@ class LSHEventProcessor:
343
349
  if stdout.strip():
344
350
  # Assuming JSON output format
345
351
  trading_records = []
346
- for line in stdout.strip().split('\n'):
352
+ for line in stdout.strip().split("\n"):
347
353
  try:
348
354
  record = json.loads(line)
349
355
  trading_records.append(record)
@@ -354,12 +360,15 @@ class LSHEventProcessor:
354
360
  self.logger.info(f"Processed {len(trading_records)} trading records")
355
361
 
356
362
  # Emit processed data event
357
- await self._emit_mcli_event("trading.data.processed", {
358
- "records": trading_records,
359
- "count": len(trading_records),
360
- "job_id": job_data.get("id"),
361
- "timestamp": time.time()
362
- })
363
+ await self._emit_mcli_event(
364
+ "trading.data.processed",
365
+ {
366
+ "records": trading_records,
367
+ "count": len(trading_records),
368
+ "job_id": job_data.get("id"),
369
+ "timestamp": time.time(),
370
+ },
371
+ )
363
372
 
364
373
  except Exception as e:
365
374
  self.logger.error(f"Error processing trading data: {e}")
@@ -373,11 +382,10 @@ class LSHEventProcessor:
373
382
  self.logger.info(f"Processing Supabase sync job: {job_data.get('name')}")
374
383
 
375
384
  # Emit database sync event
376
- await self._emit_mcli_event("database.sync.completed", {
377
- "job_id": job_data.get("id"),
378
- "sync_info": sync_info,
379
- "timestamp": time.time()
380
- })
385
+ await self._emit_mcli_event(
386
+ "database.sync.completed",
387
+ {"job_id": job_data.get("id"), "sync_info": sync_info, "timestamp": time.time()},
388
+ )
381
389
 
382
390
  except Exception as e:
383
391
  self.logger.error(f"Error processing Supabase job: {e}")
@@ -391,13 +399,16 @@ class LSHEventProcessor:
391
399
  processed_data = await self._transform_politician_data(table, operation, data)
392
400
 
393
401
  # Emit transformed data event
394
- await self._emit_mcli_event("politician.data.updated", {
395
- "table": table,
396
- "operation": operation,
397
- "original_data": data,
398
- "processed_data": processed_data,
399
- "timestamp": time.time()
400
- })
402
+ await self._emit_mcli_event(
403
+ "politician.data.updated",
404
+ {
405
+ "table": table,
406
+ "operation": operation,
407
+ "original_data": data,
408
+ "processed_data": processed_data,
409
+ "timestamp": time.time(),
410
+ },
411
+ )
401
412
 
402
413
  except Exception as e:
403
414
  self.logger.error(f"Error processing politician data: {e}")
@@ -438,4 +449,4 @@ class LSHEventProcessor:
438
449
  async def start_processing(self):
439
450
  """Start processing LSH events"""
440
451
  self.logger.info("Starting LSH event processing...")
441
- await self.client.stream_events()
452
+ await self.client.stream_events()
mcli/ml/api/app.py CHANGED
@@ -1,37 +1,38 @@
1
1
  """FastAPI application factory and configuration"""
2
2
 
3
3
  from contextlib import asynccontextmanager
4
- from typing import Dict, Any
4
+ from typing import Any, Dict
5
5
 
6
+ import uvicorn
6
7
  from fastapi import FastAPI, Request, Response
7
8
  from fastapi.middleware.cors import CORSMiddleware
8
9
  from fastapi.middleware.gzip import GZipMiddleware
9
10
  from fastapi.middleware.trustedhost import TrustedHostMiddleware
10
11
  from fastapi.responses import JSONResponse
11
12
  from starlette.middleware.sessions import SessionMiddleware
12
- import uvicorn
13
13
 
14
+ from mcli.ml.cache import init_cache
14
15
  from mcli.ml.config import settings
15
16
  from mcli.ml.database.session import init_db
16
- from mcli.ml.cache import init_cache
17
- from mcli.ml.logging import setup_logging, get_logger
17
+ from mcli.ml.logging import get_logger, setup_logging
18
+
19
+ from .middleware import (
20
+ ErrorHandlingMiddleware,
21
+ RateLimitMiddleware,
22
+ RequestLoggingMiddleware,
23
+ )
18
24
  from .routers import (
25
+ admin_router,
19
26
  auth_router,
27
+ backtest_router,
28
+ data_router,
20
29
  model_router,
21
- prediction_router,
30
+ monitoring_router,
22
31
  portfolio_router,
23
- data_router,
32
+ prediction_router,
24
33
  trade_router,
25
- backtest_router,
26
- monitoring_router,
27
- admin_router,
28
34
  websocket_router,
29
35
  )
30
- from .middleware import (
31
- RequestLoggingMiddleware,
32
- RateLimitMiddleware,
33
- ErrorHandlingMiddleware,
34
- )
35
36
 
36
37
  logger = get_logger(__name__)
37
38
 
@@ -52,6 +53,7 @@ async def lifespan(app: FastAPI):
52
53
 
53
54
  # Initialize ML models
54
55
  from mcli.ml.models import load_production_models
56
+
55
57
  await load_production_models()
56
58
  logger.info("ML models loaded")
57
59
 
@@ -62,10 +64,12 @@ async def lifespan(app: FastAPI):
62
64
 
63
65
  # Cleanup cache connections
64
66
  from mcli.ml.cache import close_cache
67
+
65
68
  await close_cache()
66
69
 
67
70
  # Cleanup database connections
68
71
  from mcli.ml.database.session import async_engine
72
+
69
73
  await async_engine.dispose()
70
74
 
71
75
 
@@ -109,10 +113,7 @@ def create_app() -> FastAPI:
109
113
 
110
114
  # Trusted host middleware
111
115
  if settings.is_production:
112
- app.add_middleware(
113
- TrustedHostMiddleware,
114
- allowed_hosts=["*.mcli-ml.com", "mcli-ml.com"]
115
- )
116
+ app.add_middleware(TrustedHostMiddleware, allowed_hosts=["*.mcli-ml.com", "mcli-ml.com"])
116
117
 
117
118
  # Include routers
118
119
  app.include_router(auth_router.router, prefix="/api/v1/auth", tags=["Authentication"])
@@ -130,18 +131,14 @@ def create_app() -> FastAPI:
130
131
  @app.get("/health", tags=["Health"])
131
132
  async def health_check():
132
133
  """Health check endpoint"""
133
- return {
134
- "status": "healthy",
135
- "environment": settings.environment,
136
- "version": "1.0.0"
137
- }
134
+ return {"status": "healthy", "environment": settings.environment, "version": "1.0.0"}
138
135
 
139
136
  # Ready check endpoint
140
137
  @app.get("/ready", tags=["Health"])
141
138
  async def ready_check():
142
139
  """Readiness check endpoint"""
143
- from mcli.ml.database.session import check_database_health
144
140
  from mcli.ml.cache import check_cache_health
141
+ from mcli.ml.database.session import check_database_health
145
142
 
146
143
  db_healthy = await check_database_health()
147
144
  cache_healthy = await check_cache_health()
@@ -154,8 +151,8 @@ def create_app() -> FastAPI:
154
151
  content={
155
152
  "status": "not ready",
156
153
  "database": "healthy" if db_healthy else "unhealthy",
157
- "cache": "healthy" if cache_healthy else "unhealthy"
158
- }
154
+ "cache": "healthy" if cache_healthy else "unhealthy",
155
+ },
159
156
  )
160
157
 
161
158
  # Metrics endpoint (Prometheus format)
@@ -163,6 +160,7 @@ def create_app() -> FastAPI:
163
160
  async def metrics():
164
161
  """Prometheus metrics endpoint"""
165
162
  from mcli.ml.monitoring.metrics import get_metrics
163
+
166
164
  return Response(content=get_metrics(), media_type="text/plain")
167
165
 
168
166
  # Root endpoint
@@ -172,24 +170,18 @@ def create_app() -> FastAPI:
172
170
  return {
173
171
  "message": "MCLI ML System API",
174
172
  "version": "1.0.0",
175
- "docs": "/docs" if settings.debug else None
173
+ "docs": "/docs" if settings.debug else None,
176
174
  }
177
175
 
178
176
  # Exception handlers
179
177
  @app.exception_handler(404)
180
178
  async def not_found_handler(request: Request, exc):
181
- return JSONResponse(
182
- status_code=404,
183
- content={"detail": "Resource not found"}
184
- )
179
+ return JSONResponse(status_code=404, content={"detail": "Resource not found"})
185
180
 
186
181
  @app.exception_handler(500)
187
182
  async def internal_server_error_handler(request: Request, exc):
188
183
  logger.error(f"Internal server error: {exc}")
189
- return JSONResponse(
190
- status_code=500,
191
- content={"detail": "Internal server error"}
192
- )
184
+ return JSONResponse(status_code=500, content={"detail": "Internal server error"})
193
185
 
194
186
  return app
195
187
 
@@ -212,4 +204,4 @@ if __name__ == "__main__":
212
204
  workers=settings.api.workers,
213
205
  reload=settings.debug,
214
206
  log_level="debug" if settings.debug else "info",
215
- )
207
+ )
mcli/ml/api/middleware.py CHANGED
@@ -2,11 +2,11 @@
2
2
 
3
3
  import time
4
4
  import uuid
5
- from typing import Callable
6
5
  from collections import defaultdict
7
6
  from datetime import datetime, timedelta
7
+ from typing import Callable
8
8
 
9
- from fastapi import Request, Response, HTTPException
9
+ from fastapi import HTTPException, Request, Response
10
10
  from fastapi.responses import JSONResponse
11
11
  from starlette.middleware.base import BaseHTTPMiddleware
12
12
  from starlette.types import ASGIApp
@@ -34,8 +34,7 @@ class RequestLoggingMiddleware(BaseHTTPMiddleware):
34
34
  # Log response
35
35
  process_time = time.time() - start_time
36
36
  logger.info(
37
- f"Response {request_id}: status={response.status_code} "
38
- f"duration={process_time:.3f}s"
37
+ f"Response {request_id}: status={response.status_code} " f"duration={process_time:.3f}s"
39
38
  )
40
39
 
41
40
  # Add headers
@@ -67,8 +66,7 @@ class RateLimitMiddleware(BaseHTTPMiddleware):
67
66
 
68
67
  # Clean old requests
69
68
  self.clients[client_ip] = [
70
- req_time for req_time in self.clients[client_ip]
71
- if req_time > minute_ago
69
+ req_time for req_time in self.clients[client_ip] if req_time > minute_ago
72
70
  ]
73
71
 
74
72
  # Check if limit exceeded
@@ -77,7 +75,7 @@ class RateLimitMiddleware(BaseHTTPMiddleware):
77
75
  return JSONResponse(
78
76
  status_code=429,
79
77
  content={"detail": "Rate limit exceeded. Please try again later."},
80
- headers={"Retry-After": "60"}
78
+ headers={"Retry-After": "60"},
81
79
  )
82
80
 
83
81
  # Record request
@@ -102,18 +100,12 @@ class ErrorHandlingMiddleware(BaseHTTPMiddleware):
102
100
  except Exception as e:
103
101
  # Log unexpected errors
104
102
  request_id = getattr(request.state, "request_id", "unknown")
105
- logger.error(
106
- f"Unhandled exception in request {request_id}: {str(e)}",
107
- exc_info=True
108
- )
103
+ logger.error(f"Unhandled exception in request {request_id}: {str(e)}", exc_info=True)
109
104
 
110
105
  # Return generic error response
111
106
  return JSONResponse(
112
107
  status_code=500,
113
- content={
114
- "detail": "An internal error occurred",
115
- "request_id": request_id
116
- }
108
+ content={"detail": "An internal error occurred", "request_id": request_id},
117
109
  )
118
110
 
119
111
 
@@ -221,4 +213,4 @@ class MetricsMiddleware(BaseHTTPMiddleware):
221
213
  "min_duration": min(durations),
222
214
  "max_duration": max(durations),
223
215
  }
224
- return metrics
216
+ return metrics