mcli-framework 7.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/chat_cmd.py +42 -0
- mcli/app/commands_cmd.py +226 -0
- mcli/app/completion_cmd.py +216 -0
- mcli/app/completion_helpers.py +288 -0
- mcli/app/cron_test_cmd.py +697 -0
- mcli/app/logs_cmd.py +419 -0
- mcli/app/main.py +492 -0
- mcli/app/model/model.py +1060 -0
- mcli/app/model_cmd.py +227 -0
- mcli/app/redis_cmd.py +269 -0
- mcli/app/video/video.py +1114 -0
- mcli/app/visual_cmd.py +303 -0
- mcli/chat/chat.py +2409 -0
- mcli/chat/command_rag.py +514 -0
- mcli/chat/enhanced_chat.py +652 -0
- mcli/chat/system_controller.py +1010 -0
- mcli/chat/system_integration.py +1016 -0
- mcli/cli.py +25 -0
- mcli/config.toml +20 -0
- mcli/lib/api/api.py +586 -0
- mcli/lib/api/daemon_client.py +203 -0
- mcli/lib/api/daemon_client_local.py +44 -0
- mcli/lib/api/daemon_decorator.py +217 -0
- mcli/lib/api/mcli_decorators.py +1032 -0
- mcli/lib/auth/auth.py +85 -0
- mcli/lib/auth/aws_manager.py +85 -0
- mcli/lib/auth/azure_manager.py +91 -0
- mcli/lib/auth/credential_manager.py +192 -0
- mcli/lib/auth/gcp_manager.py +93 -0
- mcli/lib/auth/key_manager.py +117 -0
- mcli/lib/auth/mcli_manager.py +93 -0
- mcli/lib/auth/token_manager.py +75 -0
- mcli/lib/auth/token_util.py +1011 -0
- mcli/lib/config/config.py +47 -0
- mcli/lib/discovery/__init__.py +1 -0
- mcli/lib/discovery/command_discovery.py +274 -0
- mcli/lib/erd/erd.py +1345 -0
- mcli/lib/erd/generate_graph.py +453 -0
- mcli/lib/files/files.py +76 -0
- mcli/lib/fs/fs.py +109 -0
- mcli/lib/lib.py +29 -0
- mcli/lib/logger/logger.py +611 -0
- mcli/lib/performance/optimizer.py +409 -0
- mcli/lib/performance/rust_bridge.py +502 -0
- mcli/lib/performance/uvloop_config.py +154 -0
- mcli/lib/pickles/pickles.py +50 -0
- mcli/lib/search/cached_vectorizer.py +479 -0
- mcli/lib/services/data_pipeline.py +460 -0
- mcli/lib/services/lsh_client.py +441 -0
- mcli/lib/services/redis_service.py +387 -0
- mcli/lib/shell/shell.py +137 -0
- mcli/lib/toml/toml.py +33 -0
- mcli/lib/ui/styling.py +47 -0
- mcli/lib/ui/visual_effects.py +634 -0
- mcli/lib/watcher/watcher.py +185 -0
- mcli/ml/api/app.py +215 -0
- mcli/ml/api/middleware.py +224 -0
- mcli/ml/api/routers/admin_router.py +12 -0
- mcli/ml/api/routers/auth_router.py +244 -0
- mcli/ml/api/routers/backtest_router.py +12 -0
- mcli/ml/api/routers/data_router.py +12 -0
- mcli/ml/api/routers/model_router.py +302 -0
- mcli/ml/api/routers/monitoring_router.py +12 -0
- mcli/ml/api/routers/portfolio_router.py +12 -0
- mcli/ml/api/routers/prediction_router.py +267 -0
- mcli/ml/api/routers/trade_router.py +12 -0
- mcli/ml/api/routers/websocket_router.py +76 -0
- mcli/ml/api/schemas.py +64 -0
- mcli/ml/auth/auth_manager.py +425 -0
- mcli/ml/auth/models.py +154 -0
- mcli/ml/auth/permissions.py +302 -0
- mcli/ml/backtesting/backtest_engine.py +502 -0
- mcli/ml/backtesting/performance_metrics.py +393 -0
- mcli/ml/cache.py +400 -0
- mcli/ml/cli/main.py +398 -0
- mcli/ml/config/settings.py +394 -0
- mcli/ml/configs/dvc_config.py +230 -0
- mcli/ml/configs/mlflow_config.py +131 -0
- mcli/ml/configs/mlops_manager.py +293 -0
- mcli/ml/dashboard/app.py +532 -0
- mcli/ml/dashboard/app_integrated.py +738 -0
- mcli/ml/dashboard/app_supabase.py +560 -0
- mcli/ml/dashboard/app_training.py +615 -0
- mcli/ml/dashboard/cli.py +51 -0
- mcli/ml/data_ingestion/api_connectors.py +501 -0
- mcli/ml/data_ingestion/data_pipeline.py +567 -0
- mcli/ml/data_ingestion/stream_processor.py +512 -0
- mcli/ml/database/migrations/env.py +94 -0
- mcli/ml/database/models.py +667 -0
- mcli/ml/database/session.py +200 -0
- mcli/ml/experimentation/ab_testing.py +845 -0
- mcli/ml/features/ensemble_features.py +607 -0
- mcli/ml/features/political_features.py +676 -0
- mcli/ml/features/recommendation_engine.py +809 -0
- mcli/ml/features/stock_features.py +573 -0
- mcli/ml/features/test_feature_engineering.py +346 -0
- mcli/ml/logging.py +85 -0
- mcli/ml/mlops/data_versioning.py +518 -0
- mcli/ml/mlops/experiment_tracker.py +377 -0
- mcli/ml/mlops/model_serving.py +481 -0
- mcli/ml/mlops/pipeline_orchestrator.py +614 -0
- mcli/ml/models/base_models.py +324 -0
- mcli/ml/models/ensemble_models.py +675 -0
- mcli/ml/models/recommendation_models.py +474 -0
- mcli/ml/models/test_models.py +487 -0
- mcli/ml/monitoring/drift_detection.py +676 -0
- mcli/ml/monitoring/metrics.py +45 -0
- mcli/ml/optimization/portfolio_optimizer.py +834 -0
- mcli/ml/preprocessing/data_cleaners.py +451 -0
- mcli/ml/preprocessing/feature_extractors.py +491 -0
- mcli/ml/preprocessing/ml_pipeline.py +382 -0
- mcli/ml/preprocessing/politician_trading_preprocessor.py +569 -0
- mcli/ml/preprocessing/test_preprocessing.py +294 -0
- mcli/ml/scripts/populate_sample_data.py +200 -0
- mcli/ml/tasks.py +400 -0
- mcli/ml/tests/test_integration.py +429 -0
- mcli/ml/tests/test_training_dashboard.py +387 -0
- mcli/public/oi/oi.py +15 -0
- mcli/public/public.py +4 -0
- mcli/self/self_cmd.py +1246 -0
- mcli/workflow/daemon/api_daemon.py +800 -0
- mcli/workflow/daemon/async_command_database.py +681 -0
- mcli/workflow/daemon/async_process_manager.py +591 -0
- mcli/workflow/daemon/client.py +530 -0
- mcli/workflow/daemon/commands.py +1196 -0
- mcli/workflow/daemon/daemon.py +905 -0
- mcli/workflow/daemon/daemon_api.py +59 -0
- mcli/workflow/daemon/enhanced_daemon.py +571 -0
- mcli/workflow/daemon/process_cli.py +244 -0
- mcli/workflow/daemon/process_manager.py +439 -0
- mcli/workflow/daemon/test_daemon.py +275 -0
- mcli/workflow/dashboard/dashboard_cmd.py +113 -0
- mcli/workflow/docker/docker.py +0 -0
- mcli/workflow/file/file.py +100 -0
- mcli/workflow/gcloud/config.toml +21 -0
- mcli/workflow/gcloud/gcloud.py +58 -0
- mcli/workflow/git_commit/ai_service.py +328 -0
- mcli/workflow/git_commit/commands.py +430 -0
- mcli/workflow/lsh_integration.py +355 -0
- mcli/workflow/model_service/client.py +594 -0
- mcli/workflow/model_service/download_and_run_efficient_models.py +288 -0
- mcli/workflow/model_service/lightweight_embedder.py +397 -0
- mcli/workflow/model_service/lightweight_model_server.py +714 -0
- mcli/workflow/model_service/lightweight_test.py +241 -0
- mcli/workflow/model_service/model_service.py +1955 -0
- mcli/workflow/model_service/ollama_efficient_runner.py +425 -0
- mcli/workflow/model_service/pdf_processor.py +386 -0
- mcli/workflow/model_service/test_efficient_runner.py +234 -0
- mcli/workflow/model_service/test_example.py +315 -0
- mcli/workflow/model_service/test_integration.py +131 -0
- mcli/workflow/model_service/test_new_features.py +149 -0
- mcli/workflow/openai/openai.py +99 -0
- mcli/workflow/politician_trading/commands.py +1790 -0
- mcli/workflow/politician_trading/config.py +134 -0
- mcli/workflow/politician_trading/connectivity.py +490 -0
- mcli/workflow/politician_trading/data_sources.py +395 -0
- mcli/workflow/politician_trading/database.py +410 -0
- mcli/workflow/politician_trading/demo.py +248 -0
- mcli/workflow/politician_trading/models.py +165 -0
- mcli/workflow/politician_trading/monitoring.py +413 -0
- mcli/workflow/politician_trading/scrapers.py +966 -0
- mcli/workflow/politician_trading/scrapers_california.py +412 -0
- mcli/workflow/politician_trading/scrapers_eu.py +377 -0
- mcli/workflow/politician_trading/scrapers_uk.py +350 -0
- mcli/workflow/politician_trading/scrapers_us_states.py +438 -0
- mcli/workflow/politician_trading/supabase_functions.py +354 -0
- mcli/workflow/politician_trading/workflow.py +852 -0
- mcli/workflow/registry/registry.py +180 -0
- mcli/workflow/repo/repo.py +223 -0
- mcli/workflow/scheduler/commands.py +493 -0
- mcli/workflow/scheduler/cron_parser.py +238 -0
- mcli/workflow/scheduler/job.py +182 -0
- mcli/workflow/scheduler/monitor.py +139 -0
- mcli/workflow/scheduler/persistence.py +324 -0
- mcli/workflow/scheduler/scheduler.py +679 -0
- mcli/workflow/sync/sync_cmd.py +437 -0
- mcli/workflow/sync/test_cmd.py +314 -0
- mcli/workflow/videos/videos.py +242 -0
- mcli/workflow/wakatime/wakatime.py +11 -0
- mcli/workflow/workflow.py +37 -0
- mcli_framework-7.0.0.dist-info/METADATA +479 -0
- mcli_framework-7.0.0.dist-info/RECORD +186 -0
- mcli_framework-7.0.0.dist-info/WHEEL +5 -0
- mcli_framework-7.0.0.dist-info/entry_points.txt +7 -0
- mcli_framework-7.0.0.dist-info/licenses/LICENSE +21 -0
- mcli_framework-7.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
from typing import List, Optional
|
|
2
|
+
|
|
3
|
+
from fastapi import FastAPI, HTTPException, Query
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
|
|
6
|
+
from mcli.workflow.daemon.daemon import DaemonService
|
|
7
|
+
|
|
8
|
+
app = FastAPI(title="MCLI Daemon API")
|
|
9
|
+
service = DaemonService()
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class CommandOut(BaseModel):
|
|
13
|
+
id: str
|
|
14
|
+
name: str
|
|
15
|
+
description: Optional[str]
|
|
16
|
+
language: str
|
|
17
|
+
group: Optional[str]
|
|
18
|
+
tags: List[str]
|
|
19
|
+
created_at: Optional[str]
|
|
20
|
+
updated_at: Optional[str]
|
|
21
|
+
execution_count: int
|
|
22
|
+
last_executed: Optional[str]
|
|
23
|
+
is_active: bool
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ExecuteRequest(BaseModel):
|
|
27
|
+
command_name: str
|
|
28
|
+
args: Optional[List[str]] = []
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@app.get("/commands", response_model=List[CommandOut])
|
|
32
|
+
def list_commands(all: bool = Query(False, description="Show all commands, including inactive")):
|
|
33
|
+
commands = service.db.get_all_commands(include_inactive=all)
|
|
34
|
+
return [
|
|
35
|
+
CommandOut(
|
|
36
|
+
id=cmd.id,
|
|
37
|
+
name=cmd.name,
|
|
38
|
+
description=cmd.description,
|
|
39
|
+
language=cmd.language,
|
|
40
|
+
group=cmd.group,
|
|
41
|
+
tags=cmd.tags,
|
|
42
|
+
created_at=cmd.created_at.isoformat() if cmd.created_at else None,
|
|
43
|
+
updated_at=cmd.updated_at.isoformat() if cmd.updated_at else None,
|
|
44
|
+
execution_count=cmd.execution_count,
|
|
45
|
+
last_executed=cmd.last_executed.isoformat() if cmd.last_executed else None,
|
|
46
|
+
is_active=cmd.is_active,
|
|
47
|
+
)
|
|
48
|
+
for cmd in commands
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@app.post("/execute")
|
|
53
|
+
def execute_command(req: ExecuteRequest):
|
|
54
|
+
commands = service.db.get_all_commands()
|
|
55
|
+
cmd = next((c for c in commands if c.name == req.command_name), None)
|
|
56
|
+
if not cmd:
|
|
57
|
+
raise HTTPException(status_code=404, detail=f"Command '{req.command_name}' not found.")
|
|
58
|
+
result = service.executor.execute_command(cmd, req.args or [])
|
|
59
|
+
return result
|
|
@@ -0,0 +1,571 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Enhanced async daemon with Rust extensions and performance optimizations
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
import signal
|
|
9
|
+
import uuid
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any, Dict, List, Optional
|
|
13
|
+
|
|
14
|
+
import aiosqlite
|
|
15
|
+
import redis.asyncio as redis
|
|
16
|
+
|
|
17
|
+
from mcli.lib.logger.logger import get_logger
|
|
18
|
+
from mcli.lib.performance.rust_bridge import (
|
|
19
|
+
get_command_matcher,
|
|
20
|
+
get_file_watcher,
|
|
21
|
+
get_process_manager,
|
|
22
|
+
get_tfidf_vectorizer,
|
|
23
|
+
)
|
|
24
|
+
from mcli.lib.search.cached_vectorizer import CachedTfIdfVectorizer, SmartVectorizerManager
|
|
25
|
+
from mcli.workflow.daemon.async_command_database import (
|
|
26
|
+
AsyncCommandDatabase,
|
|
27
|
+
Command,
|
|
28
|
+
ExecutionRecord,
|
|
29
|
+
)
|
|
30
|
+
from mcli.workflow.daemon.async_process_manager import AsyncProcessManager
|
|
31
|
+
|
|
32
|
+
logger = get_logger(__name__)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class EnhancedDaemon:
|
|
36
|
+
"""High-performance async daemon with Rust extensions"""
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self, db_path: Optional[str] = None, redis_url: Optional[str] = None, use_rust: bool = True
|
|
40
|
+
):
|
|
41
|
+
|
|
42
|
+
self.use_rust = use_rust
|
|
43
|
+
self.running = False
|
|
44
|
+
self.shutdown_event = asyncio.Event()
|
|
45
|
+
|
|
46
|
+
# Core components
|
|
47
|
+
self.command_db: Optional[AsyncCommandDatabase] = None
|
|
48
|
+
self.process_manager: Optional[AsyncProcessManager] = None
|
|
49
|
+
self.vectorizer_manager: Optional[SmartVectorizerManager] = None
|
|
50
|
+
self.file_watcher = None
|
|
51
|
+
self.command_matcher = None
|
|
52
|
+
|
|
53
|
+
# Configuration
|
|
54
|
+
self.db_path = db_path or Path.home() / ".local" / "mcli" / "daemon" / "enhanced.db"
|
|
55
|
+
self.redis_url = redis_url or "redis://localhost:6379"
|
|
56
|
+
|
|
57
|
+
# Performance metrics
|
|
58
|
+
self.metrics = {
|
|
59
|
+
"commands_executed": 0,
|
|
60
|
+
"search_queries": 0,
|
|
61
|
+
"cache_hits": 0,
|
|
62
|
+
"cache_misses": 0,
|
|
63
|
+
"start_time": None,
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
async def initialize(self):
|
|
67
|
+
"""Initialize all daemon components"""
|
|
68
|
+
logger.info("Initializing Enhanced Daemon...")
|
|
69
|
+
|
|
70
|
+
try:
|
|
71
|
+
# Initialize database
|
|
72
|
+
self.command_db = AsyncCommandDatabase(
|
|
73
|
+
db_path=str(self.db_path), redis_url=self.redis_url
|
|
74
|
+
)
|
|
75
|
+
await self.command_db.initialize()
|
|
76
|
+
|
|
77
|
+
# Initialize process manager
|
|
78
|
+
self.process_manager = AsyncProcessManager(
|
|
79
|
+
db_path=str(self.db_path.parent / "processes.db"), redis_url=self.redis_url
|
|
80
|
+
)
|
|
81
|
+
await self.process_manager.initialize()
|
|
82
|
+
|
|
83
|
+
# Initialize vectorizer manager
|
|
84
|
+
self.vectorizer_manager = SmartVectorizerManager(redis_url=self.redis_url)
|
|
85
|
+
|
|
86
|
+
# Initialize Rust components if available
|
|
87
|
+
await self._initialize_rust_components()
|
|
88
|
+
|
|
89
|
+
# Set up signal handlers
|
|
90
|
+
self._setup_signal_handlers()
|
|
91
|
+
|
|
92
|
+
logger.info("Enhanced Daemon initialized successfully")
|
|
93
|
+
|
|
94
|
+
except Exception as e:
|
|
95
|
+
logger.error(f"Failed to initialize Enhanced Daemon: {e}")
|
|
96
|
+
raise
|
|
97
|
+
|
|
98
|
+
async def _initialize_rust_components(self):
|
|
99
|
+
"""Initialize Rust-based components"""
|
|
100
|
+
try:
|
|
101
|
+
# File watcher
|
|
102
|
+
self.file_watcher = get_file_watcher(use_rust=self.use_rust)
|
|
103
|
+
if self.file_watcher:
|
|
104
|
+
# Start watching command directories
|
|
105
|
+
watch_dirs = [
|
|
106
|
+
str(Path.home() / ".local" / "mcli" / "commands"),
|
|
107
|
+
str(Path.cwd() / "commands"),
|
|
108
|
+
]
|
|
109
|
+
|
|
110
|
+
existing_dirs = [d for d in watch_dirs if Path(d).exists()]
|
|
111
|
+
if existing_dirs:
|
|
112
|
+
self.file_watcher.start_watching(existing_dirs)
|
|
113
|
+
logger.info(f"File watcher monitoring: {existing_dirs}")
|
|
114
|
+
|
|
115
|
+
# Command matcher
|
|
116
|
+
self.command_matcher = get_command_matcher(use_rust=self.use_rust)
|
|
117
|
+
if self.command_matcher:
|
|
118
|
+
# Load existing commands into matcher
|
|
119
|
+
commands = await self.command_db.get_all_commands()
|
|
120
|
+
if commands:
|
|
121
|
+
command_dicts = [
|
|
122
|
+
{
|
|
123
|
+
"id": cmd.id,
|
|
124
|
+
"name": cmd.name,
|
|
125
|
+
"description": cmd.description,
|
|
126
|
+
"tags": cmd.tags,
|
|
127
|
+
"execution_count": cmd.execution_count,
|
|
128
|
+
}
|
|
129
|
+
for cmd in commands
|
|
130
|
+
]
|
|
131
|
+
|
|
132
|
+
if hasattr(self.command_matcher, "add_commands"):
|
|
133
|
+
self.command_matcher.add_commands(command_dicts)
|
|
134
|
+
|
|
135
|
+
logger.info(f"Command matcher loaded {len(commands)} commands")
|
|
136
|
+
|
|
137
|
+
except Exception as e:
|
|
138
|
+
logger.warning(f"Failed to initialize some Rust components: {e}")
|
|
139
|
+
|
|
140
|
+
def _setup_signal_handlers(self):
|
|
141
|
+
"""Setup signal handlers for graceful shutdown"""
|
|
142
|
+
|
|
143
|
+
def signal_handler():
|
|
144
|
+
logger.info("Received shutdown signal")
|
|
145
|
+
asyncio.create_task(self.shutdown())
|
|
146
|
+
|
|
147
|
+
for sig in (signal.SIGTERM, signal.SIGINT):
|
|
148
|
+
try:
|
|
149
|
+
asyncio.get_event_loop().add_signal_handler(sig, signal_handler)
|
|
150
|
+
except NotImplementedError:
|
|
151
|
+
# Windows doesn't support add_signal_handler
|
|
152
|
+
signal.signal(sig, lambda s, f: asyncio.create_task(self.shutdown()))
|
|
153
|
+
|
|
154
|
+
async def start(self):
|
|
155
|
+
"""Start the daemon"""
|
|
156
|
+
if self.running:
|
|
157
|
+
logger.warning("Daemon is already running")
|
|
158
|
+
return
|
|
159
|
+
|
|
160
|
+
logger.info("Starting Enhanced Daemon...")
|
|
161
|
+
self.running = True
|
|
162
|
+
self.metrics["start_time"] = datetime.now()
|
|
163
|
+
|
|
164
|
+
# Start background tasks
|
|
165
|
+
tasks = [
|
|
166
|
+
asyncio.create_task(self._file_watcher_loop()),
|
|
167
|
+
asyncio.create_task(self._maintenance_loop()),
|
|
168
|
+
asyncio.create_task(self._metrics_loop()),
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
try:
|
|
172
|
+
# Wait for shutdown signal
|
|
173
|
+
await self.shutdown_event.wait()
|
|
174
|
+
|
|
175
|
+
finally:
|
|
176
|
+
# Cancel background tasks
|
|
177
|
+
for task in tasks:
|
|
178
|
+
task.cancel()
|
|
179
|
+
|
|
180
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
181
|
+
|
|
182
|
+
logger.info("Enhanced Daemon stopped")
|
|
183
|
+
|
|
184
|
+
async def shutdown(self):
|
|
185
|
+
"""Graceful shutdown"""
|
|
186
|
+
if not self.running:
|
|
187
|
+
return
|
|
188
|
+
|
|
189
|
+
logger.info("Shutting down Enhanced Daemon...")
|
|
190
|
+
self.running = False
|
|
191
|
+
self.shutdown_event.set()
|
|
192
|
+
|
|
193
|
+
# Stop file watcher
|
|
194
|
+
if self.file_watcher and hasattr(self.file_watcher, "stop_watching"):
|
|
195
|
+
try:
|
|
196
|
+
self.file_watcher.stop_watching()
|
|
197
|
+
except Exception as e:
|
|
198
|
+
logger.warning(f"Error stopping file watcher: {e}")
|
|
199
|
+
|
|
200
|
+
# Close components
|
|
201
|
+
if self.command_db:
|
|
202
|
+
await self.command_db.close()
|
|
203
|
+
|
|
204
|
+
if self.process_manager:
|
|
205
|
+
await self.process_manager.close()
|
|
206
|
+
|
|
207
|
+
if self.vectorizer_manager:
|
|
208
|
+
await self.vectorizer_manager.close_all()
|
|
209
|
+
|
|
210
|
+
async def _file_watcher_loop(self):
|
|
211
|
+
"""Background loop for processing file system events"""
|
|
212
|
+
if not self.file_watcher:
|
|
213
|
+
return
|
|
214
|
+
|
|
215
|
+
while self.running:
|
|
216
|
+
try:
|
|
217
|
+
if hasattr(self.file_watcher, "get_events"):
|
|
218
|
+
events = self.file_watcher.get_events()
|
|
219
|
+
|
|
220
|
+
for event in events:
|
|
221
|
+
await self._handle_file_event(event)
|
|
222
|
+
|
|
223
|
+
await asyncio.sleep(1) # Check for events every second
|
|
224
|
+
|
|
225
|
+
except Exception as e:
|
|
226
|
+
logger.error(f"Error in file watcher loop: {e}")
|
|
227
|
+
await asyncio.sleep(5) # Back off on error
|
|
228
|
+
|
|
229
|
+
async def _handle_file_event(self, event):
|
|
230
|
+
"""Handle a file system event"""
|
|
231
|
+
try:
|
|
232
|
+
event_type = event.get("event_type") or event.get("type", "unknown")
|
|
233
|
+
path = event.get("path", "")
|
|
234
|
+
|
|
235
|
+
if not path.endswith(".json"):
|
|
236
|
+
return # Only process JSON command files
|
|
237
|
+
|
|
238
|
+
if event_type in ["created", "modified"]:
|
|
239
|
+
await self._reload_command_file(path)
|
|
240
|
+
elif event_type == "deleted":
|
|
241
|
+
await self._remove_command_file(path)
|
|
242
|
+
|
|
243
|
+
except Exception as e:
|
|
244
|
+
logger.error(f"Error handling file event {event}: {e}")
|
|
245
|
+
|
|
246
|
+
async def _reload_command_file(self, file_path: str):
|
|
247
|
+
"""Reload a command from a JSON file"""
|
|
248
|
+
try:
|
|
249
|
+
with open(file_path, "r") as f:
|
|
250
|
+
data = json.load(f)
|
|
251
|
+
|
|
252
|
+
command = Command(
|
|
253
|
+
id=data.get("id") or str(uuid.uuid4()),
|
|
254
|
+
name=data["name"],
|
|
255
|
+
description=data.get("description", ""),
|
|
256
|
+
code=data["code"],
|
|
257
|
+
language=data["language"],
|
|
258
|
+
group=data.get("group"),
|
|
259
|
+
tags=data.get("tags", []),
|
|
260
|
+
version=data.get("version", "1.0"),
|
|
261
|
+
author=data.get("author"),
|
|
262
|
+
dependencies=data.get("dependencies", []),
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
# Add/update in database
|
|
266
|
+
existing = await self.command_db.get_command(command.id)
|
|
267
|
+
if existing:
|
|
268
|
+
await self.command_db.update_command(command)
|
|
269
|
+
else:
|
|
270
|
+
await self.command_db.add_command(command)
|
|
271
|
+
|
|
272
|
+
# Update command matcher
|
|
273
|
+
if self.command_matcher and hasattr(self.command_matcher, "add_command"):
|
|
274
|
+
command_dict = {
|
|
275
|
+
"id": command.id,
|
|
276
|
+
"name": command.name,
|
|
277
|
+
"description": command.description,
|
|
278
|
+
"tags": command.tags,
|
|
279
|
+
"execution_count": command.execution_count,
|
|
280
|
+
}
|
|
281
|
+
self.command_matcher.add_command(command_dict)
|
|
282
|
+
|
|
283
|
+
logger.info(f"Reloaded command: {command.name}")
|
|
284
|
+
|
|
285
|
+
except Exception as e:
|
|
286
|
+
logger.error(f"Failed to reload command file {file_path}: {e}")
|
|
287
|
+
|
|
288
|
+
async def _remove_command_file(self, file_path: str):
|
|
289
|
+
"""Remove a command when its file is deleted"""
|
|
290
|
+
try:
|
|
291
|
+
# Extract command ID from filename
|
|
292
|
+
command_id = Path(file_path).stem
|
|
293
|
+
|
|
294
|
+
# Soft delete from database
|
|
295
|
+
await self.command_db.delete_command(command_id)
|
|
296
|
+
|
|
297
|
+
logger.info(f"Removed command: {command_id}")
|
|
298
|
+
|
|
299
|
+
except Exception as e:
|
|
300
|
+
logger.error(f"Failed to remove command file {file_path}: {e}")
|
|
301
|
+
|
|
302
|
+
async def _maintenance_loop(self):
|
|
303
|
+
"""Background maintenance tasks"""
|
|
304
|
+
while self.running:
|
|
305
|
+
try:
|
|
306
|
+
# Clean up finished processes
|
|
307
|
+
if self.process_manager:
|
|
308
|
+
cleaned = await self.process_manager.cleanup_finished()
|
|
309
|
+
if cleaned:
|
|
310
|
+
logger.debug(f"Cleaned up {len(cleaned)} finished processes")
|
|
311
|
+
|
|
312
|
+
# Update search indexes
|
|
313
|
+
await self._update_search_indexes()
|
|
314
|
+
|
|
315
|
+
# Wait 5 minutes before next maintenance
|
|
316
|
+
await asyncio.sleep(300)
|
|
317
|
+
|
|
318
|
+
except Exception as e:
|
|
319
|
+
logger.error(f"Error in maintenance loop: {e}")
|
|
320
|
+
await asyncio.sleep(60)
|
|
321
|
+
|
|
322
|
+
async def _metrics_loop(self):
|
|
323
|
+
"""Background metrics collection"""
|
|
324
|
+
while self.running:
|
|
325
|
+
try:
|
|
326
|
+
# Log performance metrics every 10 minutes
|
|
327
|
+
uptime = datetime.now() - self.metrics["start_time"]
|
|
328
|
+
|
|
329
|
+
logger.info(
|
|
330
|
+
f"Daemon metrics - Uptime: {uptime}, "
|
|
331
|
+
f"Commands executed: {self.metrics['commands_executed']}, "
|
|
332
|
+
f"Search queries: {self.metrics['search_queries']}"
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
await asyncio.sleep(600) # 10 minutes
|
|
336
|
+
|
|
337
|
+
except Exception as e:
|
|
338
|
+
logger.error(f"Error in metrics loop: {e}")
|
|
339
|
+
await asyncio.sleep(60)
|
|
340
|
+
|
|
341
|
+
async def _update_search_indexes(self):
|
|
342
|
+
"""Update search indexes for better performance"""
|
|
343
|
+
try:
|
|
344
|
+
# Get all commands
|
|
345
|
+
commands = await self.command_db.get_all_commands()
|
|
346
|
+
|
|
347
|
+
if not commands:
|
|
348
|
+
return
|
|
349
|
+
|
|
350
|
+
# Prepare documents for vectorization
|
|
351
|
+
documents = []
|
|
352
|
+
for cmd in commands:
|
|
353
|
+
text_parts = [cmd.name, cmd.description or ""]
|
|
354
|
+
text_parts.extend(cmd.tags or [])
|
|
355
|
+
documents.append(" ".join(text_parts))
|
|
356
|
+
|
|
357
|
+
# Update vectorizer cache
|
|
358
|
+
vectorizer = await self.vectorizer_manager.get_vectorizer("commands")
|
|
359
|
+
await vectorizer.fit_transform(documents)
|
|
360
|
+
|
|
361
|
+
logger.debug(f"Updated search indexes for {len(commands)} commands")
|
|
362
|
+
|
|
363
|
+
except Exception as e:
|
|
364
|
+
logger.error(f"Failed to update search indexes: {e}")
|
|
365
|
+
|
|
366
|
+
# Public API methods
|
|
367
|
+
|
|
368
|
+
async def add_command(self, command_data: Dict[str, Any]) -> str:
|
|
369
|
+
"""Add a new command"""
|
|
370
|
+
command = Command(
|
|
371
|
+
id=command_data.get("id") or str(uuid.uuid4()),
|
|
372
|
+
name=command_data["name"],
|
|
373
|
+
description=command_data.get("description", ""),
|
|
374
|
+
code=command_data["code"],
|
|
375
|
+
language=command_data["language"],
|
|
376
|
+
group=command_data.get("group"),
|
|
377
|
+
tags=command_data.get("tags", []),
|
|
378
|
+
version=command_data.get("version", "1.0"),
|
|
379
|
+
author=command_data.get("author"),
|
|
380
|
+
dependencies=command_data.get("dependencies", []),
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
command_id = await self.command_db.add_command(command)
|
|
384
|
+
|
|
385
|
+
# Update command matcher
|
|
386
|
+
if self.command_matcher and hasattr(self.command_matcher, "add_command"):
|
|
387
|
+
command_dict = {
|
|
388
|
+
"id": command.id,
|
|
389
|
+
"name": command.name,
|
|
390
|
+
"description": command.description,
|
|
391
|
+
"tags": command.tags,
|
|
392
|
+
"execution_count": command.execution_count,
|
|
393
|
+
}
|
|
394
|
+
self.command_matcher.add_command(command_dict)
|
|
395
|
+
|
|
396
|
+
return command_id
|
|
397
|
+
|
|
398
|
+
async def search_commands(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
|
|
399
|
+
"""Search for commands"""
|
|
400
|
+
self.metrics["search_queries"] += 1
|
|
401
|
+
|
|
402
|
+
# Try Rust command matcher first
|
|
403
|
+
if self.command_matcher and hasattr(self.command_matcher, "search"):
|
|
404
|
+
try:
|
|
405
|
+
results = self.command_matcher.search(query, limit)
|
|
406
|
+
return [
|
|
407
|
+
{
|
|
408
|
+
"command": result.get("command", {}),
|
|
409
|
+
"score": result.get("score", 0.0),
|
|
410
|
+
"match_type": result.get("match_type", "rust"),
|
|
411
|
+
"matched_fields": result.get("matched_fields", []),
|
|
412
|
+
}
|
|
413
|
+
for result in results
|
|
414
|
+
]
|
|
415
|
+
except Exception as e:
|
|
416
|
+
logger.warning(f"Rust command matcher failed: {e}")
|
|
417
|
+
|
|
418
|
+
# Fallback to database search
|
|
419
|
+
commands = await self.command_db.search_commands(query, limit)
|
|
420
|
+
|
|
421
|
+
# Use vectorizer for similarity scoring
|
|
422
|
+
if commands and self.vectorizer_manager:
|
|
423
|
+
try:
|
|
424
|
+
command_dicts = [
|
|
425
|
+
{
|
|
426
|
+
"id": cmd.id,
|
|
427
|
+
"name": cmd.name,
|
|
428
|
+
"description": cmd.description,
|
|
429
|
+
"tags": cmd.tags,
|
|
430
|
+
}
|
|
431
|
+
for cmd in commands
|
|
432
|
+
]
|
|
433
|
+
|
|
434
|
+
results = await self.vectorizer_manager.search_commands(query, command_dicts, limit)
|
|
435
|
+
|
|
436
|
+
return [
|
|
437
|
+
{
|
|
438
|
+
"command": cmd_dict,
|
|
439
|
+
"score": score,
|
|
440
|
+
"match_type": "vectorized",
|
|
441
|
+
"matched_fields": ["name", "description", "tags"],
|
|
442
|
+
}
|
|
443
|
+
for cmd_dict, score in results
|
|
444
|
+
]
|
|
445
|
+
|
|
446
|
+
except Exception as e:
|
|
447
|
+
logger.warning(f"Vectorized search failed: {e}")
|
|
448
|
+
|
|
449
|
+
# Basic fallback
|
|
450
|
+
return [
|
|
451
|
+
{
|
|
452
|
+
"command": {
|
|
453
|
+
"id": cmd.id,
|
|
454
|
+
"name": cmd.name,
|
|
455
|
+
"description": cmd.description,
|
|
456
|
+
"tags": cmd.tags,
|
|
457
|
+
},
|
|
458
|
+
"score": 1.0,
|
|
459
|
+
"match_type": "database",
|
|
460
|
+
"matched_fields": ["name"],
|
|
461
|
+
}
|
|
462
|
+
for cmd in commands
|
|
463
|
+
]
|
|
464
|
+
|
|
465
|
+
async def execute_command(
|
|
466
|
+
self, command_id: str, context: Optional[Dict[str, Any]] = None
|
|
467
|
+
) -> str:
|
|
468
|
+
"""Execute a command"""
|
|
469
|
+
self.metrics["commands_executed"] += 1
|
|
470
|
+
|
|
471
|
+
# Get command
|
|
472
|
+
command = await self.command_db.get_command(command_id)
|
|
473
|
+
if not command:
|
|
474
|
+
raise ValueError(f"Command not found: {command_id}")
|
|
475
|
+
|
|
476
|
+
# Start process
|
|
477
|
+
process_id = await self.process_manager.start_process(
|
|
478
|
+
name=f"{command.name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
|
479
|
+
command=self._get_command_executor(command.language),
|
|
480
|
+
args=self._prepare_command_args(command),
|
|
481
|
+
working_dir=context.get("working_dir") if context else None,
|
|
482
|
+
environment=context.get("environment") if context else None,
|
|
483
|
+
timeout=context.get("timeout") if context else None,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
# Record execution
|
|
487
|
+
execution = ExecutionRecord(
|
|
488
|
+
id=str(uuid.uuid4()),
|
|
489
|
+
command_id=command_id,
|
|
490
|
+
executed_at=datetime.now(),
|
|
491
|
+
status="started",
|
|
492
|
+
user=context.get("user") if context else None,
|
|
493
|
+
context=context,
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
await self.command_db.record_execution(execution)
|
|
497
|
+
|
|
498
|
+
return process_id
|
|
499
|
+
|
|
500
|
+
def _get_command_executor(self, language: str) -> str:
|
|
501
|
+
"""Get the appropriate executor for a language"""
|
|
502
|
+
executors = {
|
|
503
|
+
"python": "python",
|
|
504
|
+
"node": "node",
|
|
505
|
+
"shell": "bash",
|
|
506
|
+
"lua": "lua",
|
|
507
|
+
"rust": "cargo",
|
|
508
|
+
}
|
|
509
|
+
return executors.get(language, "bash")
|
|
510
|
+
|
|
511
|
+
def _prepare_command_args(self, command: Command) -> List[str]:
|
|
512
|
+
"""Prepare command arguments based on language"""
|
|
513
|
+
if command.language == "python":
|
|
514
|
+
return ["-c", command.code]
|
|
515
|
+
elif command.language == "node":
|
|
516
|
+
return ["-e", command.code]
|
|
517
|
+
elif command.language == "shell":
|
|
518
|
+
return ["-c", command.code]
|
|
519
|
+
elif command.language == "lua":
|
|
520
|
+
return ["-e", command.code]
|
|
521
|
+
else:
|
|
522
|
+
return [command.code]
|
|
523
|
+
|
|
524
|
+
async def get_status(self) -> Dict[str, Any]:
|
|
525
|
+
"""Get daemon status"""
|
|
526
|
+
return {
|
|
527
|
+
"running": self.running,
|
|
528
|
+
"uptime": (
|
|
529
|
+
(datetime.now() - self.metrics["start_time"]).total_seconds()
|
|
530
|
+
if self.metrics["start_time"]
|
|
531
|
+
else 0
|
|
532
|
+
),
|
|
533
|
+
"metrics": self.metrics.copy(),
|
|
534
|
+
"components": {
|
|
535
|
+
"command_db": self.command_db is not None,
|
|
536
|
+
"process_manager": self.process_manager is not None,
|
|
537
|
+
"vectorizer_manager": self.vectorizer_manager is not None,
|
|
538
|
+
"file_watcher": self.file_watcher is not None,
|
|
539
|
+
"command_matcher": self.command_matcher is not None,
|
|
540
|
+
},
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
|
|
544
|
+
# Daemon instance management
|
|
545
|
+
_daemon_instance: Optional[EnhancedDaemon] = None
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
async def get_daemon() -> EnhancedDaemon:
|
|
549
|
+
"""Get the global daemon instance"""
|
|
550
|
+
global _daemon_instance
|
|
551
|
+
|
|
552
|
+
if _daemon_instance is None:
|
|
553
|
+
_daemon_instance = EnhancedDaemon()
|
|
554
|
+
await _daemon_instance.initialize()
|
|
555
|
+
|
|
556
|
+
return _daemon_instance
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
async def start_daemon():
|
|
560
|
+
"""Start the global daemon"""
|
|
561
|
+
daemon = await get_daemon()
|
|
562
|
+
await daemon.start()
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
async def stop_daemon():
|
|
566
|
+
"""Stop the global daemon"""
|
|
567
|
+
global _daemon_instance
|
|
568
|
+
|
|
569
|
+
if _daemon_instance:
|
|
570
|
+
await _daemon_instance.shutdown()
|
|
571
|
+
_daemon_instance = None
|