dtSpark 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dtSpark/__init__.py +0 -0
- dtSpark/_description.txt +1 -0
- dtSpark/_full_name.txt +1 -0
- dtSpark/_licence.txt +21 -0
- dtSpark/_metadata.yaml +6 -0
- dtSpark/_name.txt +1 -0
- dtSpark/_version.txt +1 -0
- dtSpark/aws/__init__.py +7 -0
- dtSpark/aws/authentication.py +296 -0
- dtSpark/aws/bedrock.py +578 -0
- dtSpark/aws/costs.py +318 -0
- dtSpark/aws/pricing.py +580 -0
- dtSpark/cli_interface.py +2645 -0
- dtSpark/conversation_manager.py +3050 -0
- dtSpark/core/__init__.py +12 -0
- dtSpark/core/application.py +3355 -0
- dtSpark/core/context_compaction.py +735 -0
- dtSpark/daemon/__init__.py +104 -0
- dtSpark/daemon/__main__.py +10 -0
- dtSpark/daemon/action_monitor.py +213 -0
- dtSpark/daemon/daemon_app.py +730 -0
- dtSpark/daemon/daemon_manager.py +289 -0
- dtSpark/daemon/execution_coordinator.py +194 -0
- dtSpark/daemon/pid_file.py +169 -0
- dtSpark/database/__init__.py +482 -0
- dtSpark/database/autonomous_actions.py +1191 -0
- dtSpark/database/backends.py +329 -0
- dtSpark/database/connection.py +122 -0
- dtSpark/database/conversations.py +520 -0
- dtSpark/database/credential_prompt.py +218 -0
- dtSpark/database/files.py +205 -0
- dtSpark/database/mcp_ops.py +355 -0
- dtSpark/database/messages.py +161 -0
- dtSpark/database/schema.py +673 -0
- dtSpark/database/tool_permissions.py +186 -0
- dtSpark/database/usage.py +167 -0
- dtSpark/files/__init__.py +4 -0
- dtSpark/files/manager.py +322 -0
- dtSpark/launch.py +39 -0
- dtSpark/limits/__init__.py +10 -0
- dtSpark/limits/costs.py +296 -0
- dtSpark/limits/tokens.py +342 -0
- dtSpark/llm/__init__.py +17 -0
- dtSpark/llm/anthropic_direct.py +446 -0
- dtSpark/llm/base.py +146 -0
- dtSpark/llm/context_limits.py +438 -0
- dtSpark/llm/manager.py +177 -0
- dtSpark/llm/ollama.py +578 -0
- dtSpark/mcp_integration/__init__.py +5 -0
- dtSpark/mcp_integration/manager.py +653 -0
- dtSpark/mcp_integration/tool_selector.py +225 -0
- dtSpark/resources/config.yaml.template +631 -0
- dtSpark/safety/__init__.py +22 -0
- dtSpark/safety/llm_service.py +111 -0
- dtSpark/safety/patterns.py +229 -0
- dtSpark/safety/prompt_inspector.py +442 -0
- dtSpark/safety/violation_logger.py +346 -0
- dtSpark/scheduler/__init__.py +20 -0
- dtSpark/scheduler/creation_tools.py +599 -0
- dtSpark/scheduler/execution_queue.py +159 -0
- dtSpark/scheduler/executor.py +1152 -0
- dtSpark/scheduler/manager.py +395 -0
- dtSpark/tools/__init__.py +4 -0
- dtSpark/tools/builtin.py +833 -0
- dtSpark/web/__init__.py +20 -0
- dtSpark/web/auth.py +152 -0
- dtSpark/web/dependencies.py +37 -0
- dtSpark/web/endpoints/__init__.py +17 -0
- dtSpark/web/endpoints/autonomous_actions.py +1125 -0
- dtSpark/web/endpoints/chat.py +621 -0
- dtSpark/web/endpoints/conversations.py +353 -0
- dtSpark/web/endpoints/main_menu.py +547 -0
- dtSpark/web/endpoints/streaming.py +421 -0
- dtSpark/web/server.py +578 -0
- dtSpark/web/session.py +167 -0
- dtSpark/web/ssl_utils.py +195 -0
- dtSpark/web/static/css/dark-theme.css +427 -0
- dtSpark/web/static/js/actions.js +1101 -0
- dtSpark/web/static/js/chat.js +614 -0
- dtSpark/web/static/js/main.js +496 -0
- dtSpark/web/static/js/sse-client.js +242 -0
- dtSpark/web/templates/actions.html +408 -0
- dtSpark/web/templates/base.html +93 -0
- dtSpark/web/templates/chat.html +814 -0
- dtSpark/web/templates/conversations.html +350 -0
- dtSpark/web/templates/goodbye.html +81 -0
- dtSpark/web/templates/login.html +90 -0
- dtSpark/web/templates/main_menu.html +983 -0
- dtSpark/web/templates/new_conversation.html +191 -0
- dtSpark/web/web_interface.py +137 -0
- dtspark-1.0.4.dist-info/METADATA +187 -0
- dtspark-1.0.4.dist-info/RECORD +96 -0
- dtspark-1.0.4.dist-info/WHEEL +5 -0
- dtspark-1.0.4.dist-info/entry_points.txt +3 -0
- dtspark-1.0.4.dist-info/licenses/LICENSE +21 -0
- dtspark-1.0.4.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,730 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Daemon application for autonomous action execution.
|
|
3
|
+
|
|
4
|
+
Extends AbstractApp from dtPyAppFramework to provide a long-running
|
|
5
|
+
background process for executing scheduled autonomous actions.
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
import logging
|
|
13
|
+
import socket
|
|
14
|
+
import uuid
|
|
15
|
+
from typing import Optional, Dict, Any
|
|
16
|
+
|
|
17
|
+
from dtPyAppFramework.application import AbstractApp
|
|
18
|
+
from dtPyAppFramework.process import ProcessManager
|
|
19
|
+
from dtPyAppFramework.settings import Settings
|
|
20
|
+
|
|
21
|
+
from .pid_file import PIDFile
|
|
22
|
+
from .action_monitor import ActionChangeMonitor
|
|
23
|
+
from .execution_coordinator import ExecutionCoordinator
|
|
24
|
+
|
|
25
|
+
# Import version info
|
|
26
|
+
from dtSpark.core.application import version, full_name, agent_type
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class DaemonApplication(AbstractApp):
|
|
32
|
+
"""
|
|
33
|
+
Daemon process for executing autonomous actions.
|
|
34
|
+
|
|
35
|
+
Runs independently of CLI/Web interface, polls database for changes,
|
|
36
|
+
and executes scheduled actions.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self):
|
|
40
|
+
"""Initialise the daemon application."""
|
|
41
|
+
# Use same short_name as main app to share secret store (user_guid, etc.)
|
|
42
|
+
super().__init__(
|
|
43
|
+
short_name=agent_type(),
|
|
44
|
+
full_name=f"{full_name()} (Daemon Mode)",
|
|
45
|
+
version=version(),
|
|
46
|
+
description="Background daemon for autonomous action execution",
|
|
47
|
+
console_app=True
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# Core components
|
|
51
|
+
self.settings: Optional[Settings] = None
|
|
52
|
+
self.database = None
|
|
53
|
+
self.llm_manager = None
|
|
54
|
+
self.mcp_manager = None
|
|
55
|
+
|
|
56
|
+
# Scheduler components
|
|
57
|
+
self.action_scheduler = None
|
|
58
|
+
self.execution_queue = None
|
|
59
|
+
self.action_executor = None
|
|
60
|
+
|
|
61
|
+
# Daemon-specific components
|
|
62
|
+
self.action_monitor = None
|
|
63
|
+
self.execution_coordinator = None
|
|
64
|
+
self.pid_file = None
|
|
65
|
+
|
|
66
|
+
# Identifiers
|
|
67
|
+
self.daemon_id = None
|
|
68
|
+
self.user_guid = None
|
|
69
|
+
self.hostname = socket.gethostname()
|
|
70
|
+
|
|
71
|
+
def define_args(self, arg_parser):
|
|
72
|
+
"""Define daemon-specific command-line arguments."""
|
|
73
|
+
arg_parser.add_argument(
|
|
74
|
+
'--poll-interval',
|
|
75
|
+
type=int,
|
|
76
|
+
default=30,
|
|
77
|
+
help='Seconds between database polls for changes (default: 30)'
|
|
78
|
+
)
|
|
79
|
+
arg_parser.add_argument(
|
|
80
|
+
'--daemon-id',
|
|
81
|
+
type=str,
|
|
82
|
+
default=None,
|
|
83
|
+
help='Unique daemon identifier (default: auto-generated)'
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
def main(self, args):
|
|
87
|
+
"""
|
|
88
|
+
Main daemon loop.
|
|
89
|
+
|
|
90
|
+
Implements the long-running application pattern.
|
|
91
|
+
"""
|
|
92
|
+
import threading
|
|
93
|
+
self._shutdown_event = threading.Event()
|
|
94
|
+
|
|
95
|
+
print("=" * 60)
|
|
96
|
+
print(f"Starting {self.full_name} v{self.version}")
|
|
97
|
+
print("Running in DAEMON MODE")
|
|
98
|
+
print("=" * 60)
|
|
99
|
+
logger.info("=" * 60)
|
|
100
|
+
logger.info(f"Starting {self.full_name} v{self.version}")
|
|
101
|
+
logger.info("Running in DAEMON MODE - background autonomous action execution")
|
|
102
|
+
logger.info("=" * 60)
|
|
103
|
+
|
|
104
|
+
# Load settings
|
|
105
|
+
print("Loading settings...")
|
|
106
|
+
self.settings = Settings()
|
|
107
|
+
print("Settings loaded successfully")
|
|
108
|
+
|
|
109
|
+
# Get daemon configuration
|
|
110
|
+
poll_interval = args.poll_interval if hasattr(args, 'poll_interval') else 30
|
|
111
|
+
poll_interval = self.settings.get('daemon.poll_interval', poll_interval)
|
|
112
|
+
|
|
113
|
+
# Generate or use provided daemon ID
|
|
114
|
+
self.daemon_id = args.daemon_id if hasattr(args, 'daemon_id') and args.daemon_id else str(uuid.uuid4())[:8]
|
|
115
|
+
|
|
116
|
+
# Get or create user GUID from secret manager
|
|
117
|
+
# Since daemon uses same short_name as main app, they share the secret store
|
|
118
|
+
self.user_guid = self.settings.secret_manager.get_secret('user_guid', None, 'User_Local_Store')
|
|
119
|
+
if self.user_guid is None:
|
|
120
|
+
# Generate new GUID for this user (only if main app hasn't run yet)
|
|
121
|
+
self.user_guid = str(uuid.uuid4())
|
|
122
|
+
self.settings.secret_manager.set_secret('user_guid', self.user_guid, 'User_Local_Store')
|
|
123
|
+
print(f"Generated new user GUID: {self.user_guid}")
|
|
124
|
+
logger.info(f"Generated new user GUID: {self.user_guid}")
|
|
125
|
+
else:
|
|
126
|
+
print(f"Using existing user GUID: {self.user_guid}")
|
|
127
|
+
logger.info(f"Using existing user GUID: {self.user_guid}")
|
|
128
|
+
print(f"Daemon ID: {self.daemon_id}")
|
|
129
|
+
logger.info(f"Daemon ID: {self.daemon_id}")
|
|
130
|
+
|
|
131
|
+
# Set up PID file
|
|
132
|
+
pid_file_path = self.settings.get('daemon.pid_file', './daemon.pid')
|
|
133
|
+
self.pid_file = PIDFile(pid_file_path)
|
|
134
|
+
|
|
135
|
+
if not self.pid_file.acquire():
|
|
136
|
+
print("Failed to acquire PID file - another daemon may be running")
|
|
137
|
+
logger.error("Failed to acquire PID file - another daemon may be running")
|
|
138
|
+
return 1
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
# Initialise all components
|
|
142
|
+
print("Initialising components...")
|
|
143
|
+
self._initialise_components()
|
|
144
|
+
print("Components initialised")
|
|
145
|
+
|
|
146
|
+
# Register daemon in database
|
|
147
|
+
self._register_daemon()
|
|
148
|
+
|
|
149
|
+
# Set up action monitor with callbacks
|
|
150
|
+
lock_timeout = self.settings.get('daemon.lock_timeout', 300)
|
|
151
|
+
self.execution_coordinator = ExecutionCoordinator(
|
|
152
|
+
database=self.database,
|
|
153
|
+
process_id=self.daemon_id,
|
|
154
|
+
user_guid=self.user_guid,
|
|
155
|
+
lock_timeout_seconds=lock_timeout
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
self.action_monitor = ActionChangeMonitor(
|
|
159
|
+
database=self.database,
|
|
160
|
+
user_guid=self.user_guid,
|
|
161
|
+
poll_interval=poll_interval,
|
|
162
|
+
on_action_added=self._on_action_added,
|
|
163
|
+
on_action_modified=self._on_action_modified,
|
|
164
|
+
on_action_deleted=self._on_action_deleted,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Start all components
|
|
168
|
+
self.execution_queue.start()
|
|
169
|
+
self.action_scheduler.start()
|
|
170
|
+
self.action_monitor.start()
|
|
171
|
+
|
|
172
|
+
# Load and schedule existing actions
|
|
173
|
+
self._load_existing_actions()
|
|
174
|
+
|
|
175
|
+
# Start heartbeat thread
|
|
176
|
+
self._start_heartbeat()
|
|
177
|
+
|
|
178
|
+
print("=" * 60)
|
|
179
|
+
print("Daemon started successfully")
|
|
180
|
+
print(f" PID: {os.getpid()}")
|
|
181
|
+
print(f" Daemon ID: {self.daemon_id}")
|
|
182
|
+
print(f" Poll interval: {poll_interval}s")
|
|
183
|
+
print("Waiting for shutdown signal (SIGTERM/SIGINT)...")
|
|
184
|
+
print("=" * 60)
|
|
185
|
+
logger.info("=" * 60)
|
|
186
|
+
logger.info("Daemon started successfully")
|
|
187
|
+
logger.info(f" PID: {os.getpid()}")
|
|
188
|
+
logger.info(f" Daemon ID: {self.daemon_id}")
|
|
189
|
+
logger.info(f" Poll interval: {poll_interval}s")
|
|
190
|
+
logger.info("Waiting for shutdown signal (SIGTERM/SIGINT)...")
|
|
191
|
+
logger.info("=" * 60)
|
|
192
|
+
|
|
193
|
+
# Set up signal handlers for graceful shutdown
|
|
194
|
+
import signal
|
|
195
|
+
|
|
196
|
+
def signal_handler(signum, frame):
|
|
197
|
+
print(f"\nReceived signal {signum}, initiating shutdown...")
|
|
198
|
+
logger.info(f"Received signal {signum}, initiating shutdown...")
|
|
199
|
+
self._shutdown_event.set()
|
|
200
|
+
|
|
201
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
202
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
|
203
|
+
if sys.platform == 'win32':
|
|
204
|
+
signal.signal(signal.SIGBREAK, signal_handler)
|
|
205
|
+
|
|
206
|
+
# Block until shutdown signal
|
|
207
|
+
# Use polling with timeout to allow signal processing on Windows
|
|
208
|
+
# Also check for stop signal file (Windows cross-console shutdown)
|
|
209
|
+
stop_signal_file = pid_file_path + '.stop'
|
|
210
|
+
|
|
211
|
+
while not self._shutdown_event.is_set():
|
|
212
|
+
try:
|
|
213
|
+
# Check for stop signal file (used by 'daemon stop' on Windows)
|
|
214
|
+
if os.path.exists(stop_signal_file):
|
|
215
|
+
print("\nStop signal file detected, initiating shutdown...")
|
|
216
|
+
logger.info("Stop signal file detected, initiating shutdown...")
|
|
217
|
+
# Remove the signal file
|
|
218
|
+
try:
|
|
219
|
+
os.remove(stop_signal_file)
|
|
220
|
+
except Exception:
|
|
221
|
+
pass
|
|
222
|
+
self._shutdown_event.set()
|
|
223
|
+
break
|
|
224
|
+
|
|
225
|
+
self._shutdown_event.wait(timeout=1.0)
|
|
226
|
+
except KeyboardInterrupt:
|
|
227
|
+
print("\nKeyboard interrupt received, initiating shutdown...")
|
|
228
|
+
logger.info("Keyboard interrupt received, initiating shutdown...")
|
|
229
|
+
self._shutdown_event.set()
|
|
230
|
+
break
|
|
231
|
+
|
|
232
|
+
print("Shutdown signal received")
|
|
233
|
+
logger.info("Shutdown signal received")
|
|
234
|
+
|
|
235
|
+
except Exception as e:
|
|
236
|
+
print(f"Daemon error: {e}")
|
|
237
|
+
import traceback
|
|
238
|
+
traceback.print_exc()
|
|
239
|
+
logger.error(f"Daemon error: {e}", exc_info=True)
|
|
240
|
+
return 1
|
|
241
|
+
|
|
242
|
+
finally:
|
|
243
|
+
# Graceful shutdown
|
|
244
|
+
self._shutdown()
|
|
245
|
+
|
|
246
|
+
return 0
|
|
247
|
+
|
|
248
|
+
def _initialise_components(self):
|
|
249
|
+
"""Initialise database, LLM manager, and scheduler components."""
|
|
250
|
+
print(" - Initialising daemon components...")
|
|
251
|
+
logger.info("Initialising daemon components...")
|
|
252
|
+
|
|
253
|
+
# Initialise database
|
|
254
|
+
print(" - Initialising database...")
|
|
255
|
+
from dtSpark.database import ConversationDatabase
|
|
256
|
+
db_type, db_credentials = self._load_database_configuration()
|
|
257
|
+
self.database = ConversationDatabase(
|
|
258
|
+
db_type=db_type,
|
|
259
|
+
credentials=db_credentials,
|
|
260
|
+
user_guid=self.user_guid
|
|
261
|
+
)
|
|
262
|
+
print(" - Database initialised")
|
|
263
|
+
logger.info("Database initialised")
|
|
264
|
+
|
|
265
|
+
# Initialise LLM manager
|
|
266
|
+
print(" - Initialising LLM manager...")
|
|
267
|
+
from dtSpark.llm import LLMManager
|
|
268
|
+
self.llm_manager = LLMManager()
|
|
269
|
+
self._configure_llm_providers()
|
|
270
|
+
print(" - LLM manager initialised")
|
|
271
|
+
logger.info("LLM manager initialised")
|
|
272
|
+
|
|
273
|
+
# Optionally initialise MCP manager
|
|
274
|
+
mcp_enabled = self.settings.get('mcp_config.enabled', False)
|
|
275
|
+
print(f" - MCP enabled: {mcp_enabled}")
|
|
276
|
+
if mcp_enabled:
|
|
277
|
+
print(" - Initialising MCP manager...")
|
|
278
|
+
self._initialise_mcp()
|
|
279
|
+
print(" - MCP manager initialised")
|
|
280
|
+
|
|
281
|
+
# Initialise scheduler components
|
|
282
|
+
print(" - Initialising scheduler components...")
|
|
283
|
+
self._initialise_scheduler()
|
|
284
|
+
print(" - Scheduler components initialised")
|
|
285
|
+
|
|
286
|
+
print(" - All daemon components initialised")
|
|
287
|
+
logger.info("All daemon components initialised")
|
|
288
|
+
|
|
289
|
+
def _configure_llm_providers(self):
|
|
290
|
+
"""Configure LLM providers based on settings."""
|
|
291
|
+
# AWS Bedrock
|
|
292
|
+
aws_enabled = self._get_nested_setting('llm_providers.aws_bedrock.enabled', True)
|
|
293
|
+
if aws_enabled:
|
|
294
|
+
try:
|
|
295
|
+
from dtSpark.llm import BedrockService
|
|
296
|
+
from dtSpark.aws.authenticator import AWSAuthenticator
|
|
297
|
+
|
|
298
|
+
aws_region = self._get_nested_setting('llm_providers.aws_bedrock.region', 'us-east-1')
|
|
299
|
+
aws_profile = self._get_nested_setting('llm_providers.aws_bedrock.sso_profile', 'default')
|
|
300
|
+
request_timeout = self.settings.get('bedrock.request_timeout', 300)
|
|
301
|
+
|
|
302
|
+
# Check for API key authentication
|
|
303
|
+
aws_access_key_id = self._get_nested_setting('llm_providers.aws_bedrock.access_key_id', None)
|
|
304
|
+
aws_secret_access_key = self._get_nested_setting('llm_providers.aws_bedrock.secret_access_key', None)
|
|
305
|
+
|
|
306
|
+
authenticator = AWSAuthenticator(
|
|
307
|
+
region=aws_region,
|
|
308
|
+
sso_profile=aws_profile,
|
|
309
|
+
access_key_id=aws_access_key_id,
|
|
310
|
+
secret_access_key=aws_secret_access_key
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
if authenticator.authenticate():
|
|
314
|
+
bedrock_service = BedrockService(
|
|
315
|
+
session=authenticator.session,
|
|
316
|
+
region=aws_region,
|
|
317
|
+
request_timeout=request_timeout
|
|
318
|
+
)
|
|
319
|
+
self.llm_manager.register_provider(bedrock_service)
|
|
320
|
+
logger.info("AWS Bedrock provider configured")
|
|
321
|
+
|
|
322
|
+
except Exception as e:
|
|
323
|
+
logger.warning(f"Failed to configure AWS Bedrock: {e}")
|
|
324
|
+
|
|
325
|
+
# Anthropic Direct
|
|
326
|
+
anthropic_enabled = self._get_nested_setting('llm_providers.anthropic.enabled', False)
|
|
327
|
+
logger.debug(f"Anthropic Direct enabled: {anthropic_enabled}")
|
|
328
|
+
if anthropic_enabled:
|
|
329
|
+
try:
|
|
330
|
+
from dtSpark.llm import AnthropicService
|
|
331
|
+
|
|
332
|
+
api_key = self._get_nested_setting('llm_providers.anthropic.api_key', None)
|
|
333
|
+
max_tokens = self.settings.get('bedrock.max_tokens', 8192)
|
|
334
|
+
|
|
335
|
+
# Log whether API key was found (don't log the actual key)
|
|
336
|
+
if api_key:
|
|
337
|
+
logger.info(f"Anthropic API key found (starts with: {api_key[:10] if len(api_key) > 10 else 'SHORT'}...)")
|
|
338
|
+
else:
|
|
339
|
+
logger.warning("Anthropic API key not found in settings")
|
|
340
|
+
|
|
341
|
+
anthropic_service = AnthropicService(
|
|
342
|
+
api_key=api_key,
|
|
343
|
+
default_max_tokens=max_tokens
|
|
344
|
+
)
|
|
345
|
+
self.llm_manager.register_provider(anthropic_service)
|
|
346
|
+
print(f" - Anthropic Direct provider configured")
|
|
347
|
+
logger.info("Anthropic Direct provider configured")
|
|
348
|
+
|
|
349
|
+
except Exception as e:
|
|
350
|
+
print(f" - Warning: Failed to configure Anthropic Direct: {e}")
|
|
351
|
+
logger.warning(f"Failed to configure Anthropic Direct: {e}")
|
|
352
|
+
|
|
353
|
+
# Ollama
|
|
354
|
+
ollama_enabled = self._get_nested_setting('llm_providers.ollama.enabled', False)
|
|
355
|
+
if ollama_enabled:
|
|
356
|
+
try:
|
|
357
|
+
from dtSpark.llm import OllamaService
|
|
358
|
+
|
|
359
|
+
base_url = self._get_nested_setting('llm_providers.ollama.base_url', 'http://localhost:11434')
|
|
360
|
+
verify_ssl = self._get_nested_setting('llm_providers.ollama.verify_ssl', True)
|
|
361
|
+
|
|
362
|
+
ollama_service = OllamaService(base_url=base_url, verify_ssl=verify_ssl)
|
|
363
|
+
self.llm_manager.register_provider(ollama_service)
|
|
364
|
+
logger.info("Ollama provider configured")
|
|
365
|
+
|
|
366
|
+
except Exception as e:
|
|
367
|
+
logger.warning(f"Failed to configure Ollama: {e}")
|
|
368
|
+
|
|
369
|
+
# Log summary of configured providers
|
|
370
|
+
providers = list(self.llm_manager.providers.keys())
|
|
371
|
+
if providers:
|
|
372
|
+
print(f" - LLM providers configured: {', '.join(providers)}")
|
|
373
|
+
logger.info(f"LLM providers configured: {providers}")
|
|
374
|
+
else:
|
|
375
|
+
print(" - Warning: No LLM providers configured!")
|
|
376
|
+
logger.warning("No LLM providers configured - actions will fail to execute")
|
|
377
|
+
|
|
378
|
+
def _initialise_mcp(self):
|
|
379
|
+
"""Initialise MCP manager if enabled."""
|
|
380
|
+
import asyncio
|
|
381
|
+
|
|
382
|
+
try:
|
|
383
|
+
from dtSpark.mcp_integration import MCPManager
|
|
384
|
+
|
|
385
|
+
servers_config = self.settings.get('mcp_config.servers', [])
|
|
386
|
+
if not servers_config:
|
|
387
|
+
print(" No MCP servers configured")
|
|
388
|
+
logger.info("No MCP servers configured")
|
|
389
|
+
return
|
|
390
|
+
|
|
391
|
+
# Create MCP manager from config
|
|
392
|
+
config_dict = {
|
|
393
|
+
'mcp_config': {
|
|
394
|
+
'servers': servers_config
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
self.mcp_manager = MCPManager.from_config(config_dict)
|
|
398
|
+
|
|
399
|
+
num_servers = len(self.mcp_manager.clients)
|
|
400
|
+
print(f" Found {num_servers} MCP server(s) in configuration")
|
|
401
|
+
logger.info(f"Found {num_servers} MCP server(s) in configuration")
|
|
402
|
+
|
|
403
|
+
# Connect to all MCP servers
|
|
404
|
+
loop = asyncio.new_event_loop()
|
|
405
|
+
asyncio.set_event_loop(loop)
|
|
406
|
+
|
|
407
|
+
try:
|
|
408
|
+
results = loop.run_until_complete(self.mcp_manager.connect_all())
|
|
409
|
+
|
|
410
|
+
# Count successful connections
|
|
411
|
+
connected_count = sum(1 for success in results.values() if success)
|
|
412
|
+
failed_count = num_servers - connected_count
|
|
413
|
+
|
|
414
|
+
# Log each server's status
|
|
415
|
+
for server_name, success in results.items():
|
|
416
|
+
status = "connected" if success else "FAILED"
|
|
417
|
+
print(f" - {server_name}: {status}")
|
|
418
|
+
logger.info(f"MCP server '{server_name}': {status}")
|
|
419
|
+
|
|
420
|
+
print(f" MCP servers: {connected_count} connected, {failed_count} failed")
|
|
421
|
+
logger.info(f"MCP servers: {connected_count} connected, {failed_count} failed")
|
|
422
|
+
|
|
423
|
+
# Fetch and cache tools if any servers connected
|
|
424
|
+
if connected_count > 0:
|
|
425
|
+
try:
|
|
426
|
+
tools = loop.run_until_complete(
|
|
427
|
+
asyncio.wait_for(
|
|
428
|
+
self.mcp_manager.list_all_tools(),
|
|
429
|
+
timeout=15.0
|
|
430
|
+
)
|
|
431
|
+
)
|
|
432
|
+
print(f" MCP tools available: {len(tools)}")
|
|
433
|
+
logger.info(f"MCP tools available: {len(tools)}")
|
|
434
|
+
except asyncio.TimeoutError:
|
|
435
|
+
print(" Warning: Timeout fetching MCP tools")
|
|
436
|
+
logger.warning("Timeout fetching MCP tools")
|
|
437
|
+
except Exception as e:
|
|
438
|
+
print(f" Warning: Failed to fetch MCP tools: {e}")
|
|
439
|
+
logger.warning(f"Failed to fetch MCP tools: {e}")
|
|
440
|
+
|
|
441
|
+
finally:
|
|
442
|
+
# Store the loop for later use
|
|
443
|
+
self.mcp_manager._initialization_loop = loop
|
|
444
|
+
|
|
445
|
+
except Exception as e:
|
|
446
|
+
print(f" Failed to initialise MCP manager: {e}")
|
|
447
|
+
logger.warning(f"Failed to initialise MCP manager: {e}")
|
|
448
|
+
self.mcp_manager = None
|
|
449
|
+
|
|
450
|
+
def _initialise_scheduler(self):
|
|
451
|
+
"""Initialise scheduler components."""
|
|
452
|
+
from dtSpark.scheduler import (
|
|
453
|
+
ActionSchedulerManager,
|
|
454
|
+
ActionExecutionQueue,
|
|
455
|
+
ActionExecutor
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
# Get database path
|
|
459
|
+
db_path = self.database.db_path or ':memory:'
|
|
460
|
+
|
|
461
|
+
# Build config for executor
|
|
462
|
+
config = {}
|
|
463
|
+
if self.settings:
|
|
464
|
+
config = {
|
|
465
|
+
'conversation': {
|
|
466
|
+
'max_tool_result_tokens': self.settings.get('conversation.max_tool_result_tokens', 10000),
|
|
467
|
+
'max_tool_iterations': self.settings.get('conversation.max_tool_iterations', 25),
|
|
468
|
+
},
|
|
469
|
+
'embedded_tools': self.settings.get('embedded_tools', {}),
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
# Create get_tools function for MCP tools
|
|
473
|
+
get_tools_func = None
|
|
474
|
+
if self.mcp_manager:
|
|
475
|
+
def get_tools_func():
|
|
476
|
+
import asyncio
|
|
477
|
+
loop = getattr(self.mcp_manager, '_initialization_loop', None)
|
|
478
|
+
if loop and not loop.is_closed():
|
|
479
|
+
return loop.run_until_complete(self.mcp_manager.list_all_tools())
|
|
480
|
+
return []
|
|
481
|
+
|
|
482
|
+
# Create executor
|
|
483
|
+
self.action_executor = ActionExecutor(
|
|
484
|
+
database=self.database,
|
|
485
|
+
llm_manager=self.llm_manager,
|
|
486
|
+
mcp_manager=self.mcp_manager,
|
|
487
|
+
get_tools_func=get_tools_func,
|
|
488
|
+
config=config
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
# Create execution queue
|
|
492
|
+
self.execution_queue = ActionExecutionQueue(
|
|
493
|
+
executor_func=self._execute_with_coordination
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
# Create scheduler manager
|
|
497
|
+
self.action_scheduler = ActionSchedulerManager(
|
|
498
|
+
db_path=db_path,
|
|
499
|
+
execution_callback=lambda action_id, user_guid: self.execution_queue.enqueue(
|
|
500
|
+
action_id, user_guid, is_manual=False
|
|
501
|
+
)
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
self.action_scheduler.initialise()
|
|
505
|
+
logger.info("Scheduler components initialised")
|
|
506
|
+
|
|
507
|
+
def _execute_with_coordination(self, action_id: int, user_guid: str, is_manual: bool = False):
|
|
508
|
+
"""
|
|
509
|
+
Execute an action with coordination to prevent conflicts.
|
|
510
|
+
|
|
511
|
+
Args:
|
|
512
|
+
action_id: Action ID to execute
|
|
513
|
+
user_guid: User GUID
|
|
514
|
+
is_manual: Whether this is a manual execution
|
|
515
|
+
"""
|
|
516
|
+
# Try to acquire lock
|
|
517
|
+
if not self.execution_coordinator.try_acquire_lock(action_id):
|
|
518
|
+
lock_holder = self.execution_coordinator.get_lock_holder(action_id)
|
|
519
|
+
logger.info(f"Skipping action {action_id} - locked by {lock_holder}")
|
|
520
|
+
return None
|
|
521
|
+
|
|
522
|
+
try:
|
|
523
|
+
return self.action_executor.execute(action_id, user_guid, is_manual)
|
|
524
|
+
finally:
|
|
525
|
+
self.execution_coordinator.release_lock(action_id)
|
|
526
|
+
|
|
527
|
+
def _load_existing_actions(self):
|
|
528
|
+
"""Load and schedule existing actions from database."""
|
|
529
|
+
try:
|
|
530
|
+
actions = self.database.get_all_actions(include_disabled=False)
|
|
531
|
+
self.action_scheduler.reload_all_actions(actions)
|
|
532
|
+
logger.info(f"Loaded {len(actions)} existing actions")
|
|
533
|
+
except Exception as e:
|
|
534
|
+
logger.error(f"Failed to load existing actions: {e}")
|
|
535
|
+
|
|
536
|
+
def _register_daemon(self):
|
|
537
|
+
"""Register daemon in database."""
|
|
538
|
+
try:
|
|
539
|
+
from dtSpark.database.autonomous_actions import register_daemon
|
|
540
|
+
register_daemon(
|
|
541
|
+
conn=self.database.conn,
|
|
542
|
+
daemon_id=self.daemon_id,
|
|
543
|
+
hostname=self.hostname,
|
|
544
|
+
pid=os.getpid(),
|
|
545
|
+
user_guid=self.user_guid
|
|
546
|
+
)
|
|
547
|
+
except Exception as e:
|
|
548
|
+
logger.warning(f"Failed to register daemon: {e}")
|
|
549
|
+
|
|
550
|
+
def _unregister_daemon(self):
|
|
551
|
+
"""Unregister daemon from database."""
|
|
552
|
+
try:
|
|
553
|
+
from dtSpark.database.autonomous_actions import unregister_daemon
|
|
554
|
+
unregister_daemon(
|
|
555
|
+
conn=self.database.conn,
|
|
556
|
+
daemon_id=self.daemon_id
|
|
557
|
+
)
|
|
558
|
+
except Exception as e:
|
|
559
|
+
logger.warning(f"Failed to unregister daemon: {e}")
|
|
560
|
+
|
|
561
|
+
def _start_heartbeat(self):
|
|
562
|
+
"""Start heartbeat thread to update daemon registry."""
|
|
563
|
+
import threading
|
|
564
|
+
|
|
565
|
+
heartbeat_interval = self.settings.get('daemon.heartbeat_interval', 60)
|
|
566
|
+
|
|
567
|
+
def heartbeat_loop():
|
|
568
|
+
while True:
|
|
569
|
+
try:
|
|
570
|
+
from dtSpark.database.autonomous_actions import update_daemon_heartbeat
|
|
571
|
+
update_daemon_heartbeat(
|
|
572
|
+
conn=self.database.conn,
|
|
573
|
+
daemon_id=self.daemon_id
|
|
574
|
+
)
|
|
575
|
+
except Exception as e:
|
|
576
|
+
logger.warning(f"Heartbeat failed: {e}")
|
|
577
|
+
|
|
578
|
+
# Wait for next heartbeat or until daemon stops
|
|
579
|
+
import time
|
|
580
|
+
time.sleep(heartbeat_interval)
|
|
581
|
+
|
|
582
|
+
heartbeat_thread = threading.Thread(
|
|
583
|
+
target=heartbeat_loop,
|
|
584
|
+
name="DaemonHeartbeat",
|
|
585
|
+
daemon=True # Thread will stop when main process exits
|
|
586
|
+
)
|
|
587
|
+
heartbeat_thread.start()
|
|
588
|
+
logger.info(f"Heartbeat thread started (interval: {heartbeat_interval}s)")
|
|
589
|
+
|
|
590
|
+
def _on_action_added(self, action: Dict[str, Any]):
|
|
591
|
+
"""Handle new action detected by monitor."""
|
|
592
|
+
logger.info(f"Scheduling new action: {action['name']} (ID: {action['id']})")
|
|
593
|
+
try:
|
|
594
|
+
self.action_scheduler.schedule_action(action)
|
|
595
|
+
except Exception as e:
|
|
596
|
+
logger.error(f"Failed to schedule action {action['id']}: {e}")
|
|
597
|
+
|
|
598
|
+
def _on_action_modified(self, action: Dict[str, Any]):
|
|
599
|
+
"""Handle modified action detected by monitor."""
|
|
600
|
+
logger.info(f"Rescheduling modified action: {action['name']} (ID: {action['id']})")
|
|
601
|
+
try:
|
|
602
|
+
# Unschedule and reschedule
|
|
603
|
+
self.action_scheduler.unschedule_action(action['id'])
|
|
604
|
+
if action.get('is_enabled', True):
|
|
605
|
+
self.action_scheduler.schedule_action(action)
|
|
606
|
+
except Exception as e:
|
|
607
|
+
logger.error(f"Failed to reschedule action {action['id']}: {e}")
|
|
608
|
+
|
|
609
|
+
def _on_action_deleted(self, action_id: int):
|
|
610
|
+
"""Handle deleted action detected by monitor."""
|
|
611
|
+
logger.info(f"Unscheduling deleted action: {action_id}")
|
|
612
|
+
try:
|
|
613
|
+
self.action_scheduler.unschedule_action(action_id)
|
|
614
|
+
except Exception as e:
|
|
615
|
+
logger.error(f"Failed to unschedule action {action_id}: {e}")
|
|
616
|
+
|
|
617
|
+
def _shutdown(self):
|
|
618
|
+
"""Graceful shutdown of all components."""
|
|
619
|
+
logger.info("Shutting down daemon components...")
|
|
620
|
+
|
|
621
|
+
# Stop action monitor
|
|
622
|
+
if self.action_monitor:
|
|
623
|
+
self.action_monitor.stop()
|
|
624
|
+
|
|
625
|
+
# Stop scheduler
|
|
626
|
+
if self.action_scheduler:
|
|
627
|
+
self.action_scheduler.stop()
|
|
628
|
+
|
|
629
|
+
# Stop execution queue
|
|
630
|
+
if self.execution_queue:
|
|
631
|
+
self.execution_queue.stop()
|
|
632
|
+
|
|
633
|
+
# Unregister daemon
|
|
634
|
+
self._unregister_daemon()
|
|
635
|
+
|
|
636
|
+
# Close database
|
|
637
|
+
if self.database:
|
|
638
|
+
self.database.close()
|
|
639
|
+
|
|
640
|
+
# Release PID file
|
|
641
|
+
if self.pid_file:
|
|
642
|
+
self.pid_file.release()
|
|
643
|
+
|
|
644
|
+
logger.info("Daemon shutdown complete")
|
|
645
|
+
|
|
646
|
+
def _load_database_configuration(self):
|
|
647
|
+
"""
|
|
648
|
+
Load database configuration from settings.
|
|
649
|
+
|
|
650
|
+
Returns:
|
|
651
|
+
Tuple of (db_type, credentials)
|
|
652
|
+
"""
|
|
653
|
+
from dtSpark.database.backends import DatabaseCredentials
|
|
654
|
+
|
|
655
|
+
# Get database type from configuration
|
|
656
|
+
db_type = self.settings.get('database.type', 'sqlite')
|
|
657
|
+
print(f" - Database type: {db_type}")
|
|
658
|
+
logger.info(f"Database type: {db_type}")
|
|
659
|
+
|
|
660
|
+
# Load credentials from configuration
|
|
661
|
+
if db_type.lower() == 'sqlite':
|
|
662
|
+
db_path = self.settings.get('database.sqlite.path', './data/conversations.db')
|
|
663
|
+
# Expand path relative to current working directory (app root)
|
|
664
|
+
if not os.path.isabs(db_path):
|
|
665
|
+
db_path = os.path.abspath(db_path)
|
|
666
|
+
|
|
667
|
+
credentials = DatabaseCredentials(path=db_path)
|
|
668
|
+
print(f" - SQLite database path: {db_path}")
|
|
669
|
+
logger.info(f"SQLite database path: {db_path}")
|
|
670
|
+
|
|
671
|
+
else:
|
|
672
|
+
# Remote database - load credentials from config
|
|
673
|
+
db_config_key = f'database.{db_type.lower()}'
|
|
674
|
+
|
|
675
|
+
credentials = DatabaseCredentials(
|
|
676
|
+
host=self.settings.get(f'{db_config_key}.host'),
|
|
677
|
+
port=self.settings.get(f'{db_config_key}.port'),
|
|
678
|
+
database=self.settings.get(f'{db_config_key}.database'),
|
|
679
|
+
username=self.settings.get(f'{db_config_key}.username'),
|
|
680
|
+
password=self.settings.get(f'{db_config_key}.password'),
|
|
681
|
+
ssl=self.settings.get(f'{db_config_key}.ssl', False),
|
|
682
|
+
driver=self.settings.get(f'{db_config_key}.driver') # For MSSQL
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
# For daemon mode, credentials must be fully configured
|
|
686
|
+
if not all([credentials.host, credentials.database, credentials.username, credentials.password]):
|
|
687
|
+
raise RuntimeError(
|
|
688
|
+
f"Database credentials incomplete for {db_type}. "
|
|
689
|
+
f"Daemon mode requires fully configured database credentials in config.yaml."
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
print(f" - Database: {db_type} at {credentials.host}:{credentials.port}/{credentials.database}")
|
|
693
|
+
logger.info(f"Database configured: {db_type} at {credentials.host}:{credentials.port}/{credentials.database}")
|
|
694
|
+
|
|
695
|
+
return db_type, credentials
|
|
696
|
+
|
|
697
|
+
def _get_nested_setting(self, key: str, default=None):
|
|
698
|
+
"""
|
|
699
|
+
Get a nested setting value, handling both dot notation and dict navigation.
|
|
700
|
+
|
|
701
|
+
Args:
|
|
702
|
+
key: Dot-separated key
|
|
703
|
+
default: Default value if not found
|
|
704
|
+
|
|
705
|
+
Returns:
|
|
706
|
+
The setting value, or default if not found
|
|
707
|
+
"""
|
|
708
|
+
value = self.settings.get(key, None)
|
|
709
|
+
if value is not None:
|
|
710
|
+
return value
|
|
711
|
+
|
|
712
|
+
# Fallback: Navigate the dict manually
|
|
713
|
+
parts = key.split('.')
|
|
714
|
+
if len(parts) > 1:
|
|
715
|
+
# Try getting the root key as a dict
|
|
716
|
+
root_value = self.settings.get(parts[0], None)
|
|
717
|
+
if isinstance(root_value, dict):
|
|
718
|
+
current = root_value
|
|
719
|
+
for part in parts[1:]:
|
|
720
|
+
if isinstance(current, dict) and part in current:
|
|
721
|
+
current = current[part]
|
|
722
|
+
else:
|
|
723
|
+
return default
|
|
724
|
+
return current
|
|
725
|
+
|
|
726
|
+
return default
|
|
727
|
+
|
|
728
|
+
def exiting(self):
|
|
729
|
+
"""Called when application is exiting."""
|
|
730
|
+
logger.info("Daemon exiting...")
|