dtSpark 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. dtSpark/__init__.py +0 -0
  2. dtSpark/_description.txt +1 -0
  3. dtSpark/_full_name.txt +1 -0
  4. dtSpark/_licence.txt +21 -0
  5. dtSpark/_metadata.yaml +6 -0
  6. dtSpark/_name.txt +1 -0
  7. dtSpark/_version.txt +1 -0
  8. dtSpark/aws/__init__.py +7 -0
  9. dtSpark/aws/authentication.py +296 -0
  10. dtSpark/aws/bedrock.py +578 -0
  11. dtSpark/aws/costs.py +318 -0
  12. dtSpark/aws/pricing.py +580 -0
  13. dtSpark/cli_interface.py +2645 -0
  14. dtSpark/conversation_manager.py +3050 -0
  15. dtSpark/core/__init__.py +12 -0
  16. dtSpark/core/application.py +3355 -0
  17. dtSpark/core/context_compaction.py +735 -0
  18. dtSpark/daemon/__init__.py +104 -0
  19. dtSpark/daemon/__main__.py +10 -0
  20. dtSpark/daemon/action_monitor.py +213 -0
  21. dtSpark/daemon/daemon_app.py +730 -0
  22. dtSpark/daemon/daemon_manager.py +289 -0
  23. dtSpark/daemon/execution_coordinator.py +194 -0
  24. dtSpark/daemon/pid_file.py +169 -0
  25. dtSpark/database/__init__.py +482 -0
  26. dtSpark/database/autonomous_actions.py +1191 -0
  27. dtSpark/database/backends.py +329 -0
  28. dtSpark/database/connection.py +122 -0
  29. dtSpark/database/conversations.py +520 -0
  30. dtSpark/database/credential_prompt.py +218 -0
  31. dtSpark/database/files.py +205 -0
  32. dtSpark/database/mcp_ops.py +355 -0
  33. dtSpark/database/messages.py +161 -0
  34. dtSpark/database/schema.py +673 -0
  35. dtSpark/database/tool_permissions.py +186 -0
  36. dtSpark/database/usage.py +167 -0
  37. dtSpark/files/__init__.py +4 -0
  38. dtSpark/files/manager.py +322 -0
  39. dtSpark/launch.py +39 -0
  40. dtSpark/limits/__init__.py +10 -0
  41. dtSpark/limits/costs.py +296 -0
  42. dtSpark/limits/tokens.py +342 -0
  43. dtSpark/llm/__init__.py +17 -0
  44. dtSpark/llm/anthropic_direct.py +446 -0
  45. dtSpark/llm/base.py +146 -0
  46. dtSpark/llm/context_limits.py +438 -0
  47. dtSpark/llm/manager.py +177 -0
  48. dtSpark/llm/ollama.py +578 -0
  49. dtSpark/mcp_integration/__init__.py +5 -0
  50. dtSpark/mcp_integration/manager.py +653 -0
  51. dtSpark/mcp_integration/tool_selector.py +225 -0
  52. dtSpark/resources/config.yaml.template +631 -0
  53. dtSpark/safety/__init__.py +22 -0
  54. dtSpark/safety/llm_service.py +111 -0
  55. dtSpark/safety/patterns.py +229 -0
  56. dtSpark/safety/prompt_inspector.py +442 -0
  57. dtSpark/safety/violation_logger.py +346 -0
  58. dtSpark/scheduler/__init__.py +20 -0
  59. dtSpark/scheduler/creation_tools.py +599 -0
  60. dtSpark/scheduler/execution_queue.py +159 -0
  61. dtSpark/scheduler/executor.py +1152 -0
  62. dtSpark/scheduler/manager.py +395 -0
  63. dtSpark/tools/__init__.py +4 -0
  64. dtSpark/tools/builtin.py +833 -0
  65. dtSpark/web/__init__.py +20 -0
  66. dtSpark/web/auth.py +152 -0
  67. dtSpark/web/dependencies.py +37 -0
  68. dtSpark/web/endpoints/__init__.py +17 -0
  69. dtSpark/web/endpoints/autonomous_actions.py +1125 -0
  70. dtSpark/web/endpoints/chat.py +621 -0
  71. dtSpark/web/endpoints/conversations.py +353 -0
  72. dtSpark/web/endpoints/main_menu.py +547 -0
  73. dtSpark/web/endpoints/streaming.py +421 -0
  74. dtSpark/web/server.py +578 -0
  75. dtSpark/web/session.py +167 -0
  76. dtSpark/web/ssl_utils.py +195 -0
  77. dtSpark/web/static/css/dark-theme.css +427 -0
  78. dtSpark/web/static/js/actions.js +1101 -0
  79. dtSpark/web/static/js/chat.js +614 -0
  80. dtSpark/web/static/js/main.js +496 -0
  81. dtSpark/web/static/js/sse-client.js +242 -0
  82. dtSpark/web/templates/actions.html +408 -0
  83. dtSpark/web/templates/base.html +93 -0
  84. dtSpark/web/templates/chat.html +814 -0
  85. dtSpark/web/templates/conversations.html +350 -0
  86. dtSpark/web/templates/goodbye.html +81 -0
  87. dtSpark/web/templates/login.html +90 -0
  88. dtSpark/web/templates/main_menu.html +983 -0
  89. dtSpark/web/templates/new_conversation.html +191 -0
  90. dtSpark/web/web_interface.py +137 -0
  91. dtspark-1.0.4.dist-info/METADATA +187 -0
  92. dtspark-1.0.4.dist-info/RECORD +96 -0
  93. dtspark-1.0.4.dist-info/WHEEL +5 -0
  94. dtspark-1.0.4.dist-info/entry_points.txt +3 -0
  95. dtspark-1.0.4.dist-info/licenses/LICENSE +21 -0
  96. dtspark-1.0.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,3355 @@
1
+ import os
2
+ import socket
3
+ import sys
4
+ import logging
5
+ import time
6
+ import subprocess
7
+ from datetime import datetime
8
+ from typing import List, Optional
9
+
10
+ from argparse import ArgumentParser
11
+
12
+ from dtPyAppFramework.misc.packaging import load_module_package, ModulePackage
13
+ from dtPyAppFramework.application import AbstractApp
14
+ from dtPyAppFramework.settings import Settings
15
+ from dtPyAppFramework.paths import ApplicationPaths
16
+ from dtPyAppFramework.process import ProcessManager
17
+ from dtPyAppFramework.resources import ResourceManager
18
+
19
+ # Add the parent 'src' directory to sys.path to enable relative imports
20
+ src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
21
+ if src_dir not in sys.path:
22
+ sys.path.insert(0, src_dir)
23
+
24
+ dir_path = os.path.dirname(os.path.realpath(__file__))
25
+ # _metadata.yaml is in the parent directory (dtSpark/)
26
+ parent_dir = os.path.dirname(dir_path)
27
+ module_package: ModulePackage = load_module_package(os.path.join(parent_dir, '_metadata.yaml'))
28
+
29
+ # Force OpenTelemetry to use contextvars context
30
+ os.environ['OTEL_PYTHON_CONTEXT'] = 'contextvars_context'
31
+
32
+ def version():
33
+ """Returns the version of the module."""
34
+ return module_package.version
35
+
36
+ def description():
37
+ """Returns the version of the module."""
38
+ return module_package.description
39
+
40
+ def agent_type():
41
+ return module_package.short_name
42
+
43
+ def full_name():
44
+ return module_package.full_name
45
+
46
+ def agent_name():
47
+ return socket.gethostname()
48
+
49
+ def copy_to_clipboard(text: str) -> bool:
50
+ """
51
+ Copy text to system clipboard using platform-specific commands.
52
+
53
+ Args:
54
+ text: Text to copy to clipboard
55
+
56
+ Returns:
57
+ True if successful, False otherwise
58
+ """
59
+ try:
60
+ if sys.platform == 'win32':
61
+ # Windows: use clip.exe
62
+ process = subprocess.Popen(['clip'], stdin=subprocess.PIPE, shell=True)
63
+ process.communicate(text.encode('utf-16le'))
64
+ return process.returncode == 0
65
+ elif sys.platform == 'darwin':
66
+ # macOS: use pbcopy
67
+ process = subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE)
68
+ process.communicate(text.encode('utf-8'))
69
+ return process.returncode == 0
70
+ else:
71
+ # Linux: try xclip or xsel
72
+ try:
73
+ process = subprocess.Popen(['xclip', '-selection', 'clipboard'], stdin=subprocess.PIPE)
74
+ process.communicate(text.encode('utf-8'))
75
+ return process.returncode == 0
76
+ except FileNotFoundError:
77
+ try:
78
+ process = subprocess.Popen(['xsel', '--clipboard', '--input'], stdin=subprocess.PIPE)
79
+ process.communicate(text.encode('utf-8'))
80
+ return process.returncode == 0
81
+ except FileNotFoundError:
82
+ return False
83
+ except Exception as e:
84
+ logging.error(f"Failed to copy to clipboard: {e}")
85
+ return False
86
+
87
+ class AWSBedrockCLI(AbstractApp):
88
+ def __init__(self):
89
+ super().__init__(short_name=agent_type(), full_name=full_name(), version=version(),
90
+ description='AWS Bedrock CLI for GenAI Chat',
91
+ console_app=True)
92
+ self.settings: Settings = None
93
+ self.authenticator = None
94
+ self.bedrock_service = None
95
+ self.database = None
96
+ self.conversation_manager = None
97
+ self.cli = None
98
+ self.mcp_manager = None
99
+ self.auth_failed = False
100
+ self.action_scheduler = None
101
+ self.execution_queue = None
102
+ self.action_executor = None
103
+ self.configured_model_id = None # Model locked via config.yaml
104
+ self.configured_provider = None # Provider for mandatory model
105
+ self.cost_tracker = None # Cost tracker for Bedrock usage
106
+ self.token_manager = None # Token manager for usage limit enforcement
107
+
108
+ def _get_nested_setting(self, key: str, default=None):
109
+ """
110
+ Get a nested setting value, trying both dot notation and dict navigation.
111
+
112
+ The dtPyAppFramework Settings class may not fully support dot notation
113
+ for deeply nested YAML keys. This method provides a fallback.
114
+
115
+ Args:
116
+ key: Dot-separated key (e.g., 'llm_providers.aws_bedrock.enabled')
117
+ default: Default value if not found
118
+
119
+ Returns:
120
+ The setting value, or default if not found
121
+ """
122
+ # First try direct dot notation access
123
+ value = self.settings.get(key, None)
124
+ if value is not None:
125
+ return value
126
+
127
+ # Fallback: Navigate the dict manually
128
+ parts = key.split('.')
129
+ if len(parts) > 1:
130
+ # Get the top-level key
131
+ top_level = self.settings.get(parts[0], None)
132
+ if isinstance(top_level, dict):
133
+ # Navigate through remaining parts
134
+ current = top_level
135
+ for part in parts[1:]:
136
+ if isinstance(current, dict):
137
+ current = current.get(part, None)
138
+ else:
139
+ current = None
140
+ break
141
+ if current is not None:
142
+ return current
143
+
144
+ return default
145
+
146
+ def _load_database_configuration(self):
147
+ """
148
+ Load database configuration and prompt for credentials if needed.
149
+
150
+ Returns:
151
+ Tuple of (db_type, credentials)
152
+ """
153
+ from dtSpark.database.backends import DatabaseCredentials
154
+ from dtSpark.database.credential_prompt import prompt_and_validate_credentials
155
+
156
+ # Get database type from configuration
157
+ db_type = self.settings.get('database.type', 'sqlite')
158
+ logging.info(f"Database type: {db_type}")
159
+
160
+ # Load credentials from configuration
161
+ if db_type.lower() == 'sqlite':
162
+ db_path = self.settings.get('database.sqlite.path', './data/conversations.db')
163
+ # Expand path relative to current working directory (app root)
164
+ if not os.path.isabs(db_path):
165
+ db_path = os.path.abspath(db_path)
166
+
167
+ credentials = DatabaseCredentials(path=db_path)
168
+ logging.info(f"SQLite database path: {db_path}")
169
+
170
+ else:
171
+ # Remote database - load credentials from config
172
+ db_config_key = f'database.{db_type.lower()}'
173
+
174
+ credentials = DatabaseCredentials(
175
+ host=self.settings.get(f'{db_config_key}.host'),
176
+ port=self.settings.get(f'{db_config_key}.port'),
177
+ database=self.settings.get(f'{db_config_key}.database'),
178
+ username=self.settings.get(f'{db_config_key}.username'),
179
+ password=self.settings.get(f'{db_config_key}.password'),
180
+ ssl=self.settings.get(f'{db_config_key}.ssl', False),
181
+ driver=self.settings.get(f'{db_config_key}.driver') # For MSSQL
182
+ )
183
+
184
+ # Check if credentials need prompting (any null values)
185
+ needs_prompt = not all([
186
+ credentials.host,
187
+ credentials.database,
188
+ credentials.username,
189
+ credentials.password
190
+ ])
191
+
192
+ if needs_prompt:
193
+ logging.info("Database credentials incomplete - prompting user")
194
+ credentials = prompt_and_validate_credentials(db_type, credentials, max_retries=3)
195
+
196
+ if credentials is None:
197
+ raise RuntimeError(
198
+ f"Failed to establish database connection after multiple attempts. "
199
+ f"Please check your configuration in config.yaml and ensure the database server is accessible."
200
+ )
201
+
202
+ logging.info(f"Database configured: {db_type} at {credentials.host}:{credentials.port}/{credentials.database}")
203
+
204
+ return db_type, credentials
205
+
206
+ def _build_model_context_limits(self) -> dict:
207
+ """
208
+ Build model_context_limits dictionary from Settings object.
209
+
210
+ Settings uses dot notation, so we need to manually construct the
211
+ nested dictionary structure that ContextLimitResolver expects.
212
+
213
+ Returns:
214
+ Dictionary with provider sections containing model limits
215
+ """
216
+ limits = {}
217
+
218
+ # Define providers and their known models
219
+ providers_and_models = {
220
+ 'anthropic': [
221
+ 'claude-opus-4', 'claude-sonnet-4', 'claude-opus-4.5', 'claude-sonnet-4.5',
222
+ 'claude-3-5-sonnet', 'claude-3-5-haiku', 'claude-3-opus', 'claude-3-sonnet',
223
+ 'claude-3-haiku', 'default'
224
+ ],
225
+ 'aws_bedrock': [
226
+ 'amazon.titan-text-express', 'amazon.titan-text-lite',
227
+ 'meta.llama3-1', 'mistral.mistral-large', 'default'
228
+ ],
229
+ 'ollama': [
230
+ 'llama3.2', 'mistral', 'codellama', 'default'
231
+ ]
232
+ }
233
+
234
+ # Debug: Test if we can access any model_context_limits values
235
+ test_key = 'model_context_limits.anthropic.claude-sonnet-4.context_window'
236
+ test_value = self.settings.get(test_key, None)
237
+ logging.info(f"Settings test: '{test_key}' = {test_value}")
238
+
239
+ for provider, models in providers_and_models.items():
240
+ provider_limits = {}
241
+
242
+ for model in models:
243
+ # Build the dot-notation keys for this model
244
+ base_key = f'model_context_limits.{provider}.{model}'
245
+
246
+ context_window = self.settings.get(f'{base_key}.context_window', None)
247
+ max_output = self.settings.get(f'{base_key}.max_output', None)
248
+
249
+ # Log attempts for debugging
250
+ if model in ['claude-sonnet-4', 'default']:
251
+ logging.info(f"Settings lookup: '{base_key}.context_window' = {context_window}")
252
+
253
+ if context_window is not None and max_output is not None:
254
+ provider_limits[model] = {
255
+ 'context_window': int(context_window),
256
+ 'max_output': int(max_output)
257
+ }
258
+ logging.debug(f"Loaded model limits: {provider}.{model} = {context_window}/{max_output}")
259
+
260
+ if provider_limits:
261
+ limits[provider] = provider_limits
262
+ logging.info(f"Loaded {len(provider_limits)} model configurations for provider '{provider}'")
263
+
264
+ # Try global default
265
+ global_context = self.settings.get('model_context_limits.default.context_window', None)
266
+ global_output = self.settings.get('model_context_limits.default.max_output', None)
267
+ if global_context is not None and global_output is not None:
268
+ limits['default'] = {
269
+ 'context_window': int(global_context),
270
+ 'max_output': int(global_output)
271
+ }
272
+
273
+ if not limits:
274
+ logging.warning("No model_context_limits found in settings, using hardcoded defaults")
275
+
276
+ return limits
277
+
278
+ def _check_daemon_running(self) -> bool:
279
+ """
280
+ Check if a daemon process is currently running.
281
+
282
+ Uses the PID file to determine if a daemon is active.
283
+ This is used to decide whether the UI should start its own
284
+ scheduler or defer to the daemon.
285
+
286
+ Returns:
287
+ True if daemon is running, False otherwise
288
+ """
289
+ try:
290
+ from dtSpark.daemon.pid_file import PIDFile
291
+
292
+ # Get PID file path from settings (same as daemon uses)
293
+ pid_file_path = self.settings.get('daemon.pid_file', './daemon.pid')
294
+ pid_file = PIDFile(pid_file_path)
295
+
296
+ is_running = pid_file.is_running()
297
+
298
+ if is_running:
299
+ pid = pid_file.read_pid()
300
+ logging.info(f"Daemon detected running with PID {pid}")
301
+ else:
302
+ logging.debug("No daemon running (PID file absent or process not found)")
303
+
304
+ return is_running
305
+
306
+ except ImportError:
307
+ logging.debug("Daemon module not available - assuming no daemon running")
308
+ return False
309
+ except Exception as e:
310
+ logging.warning(f"Error checking daemon status: {e} - assuming no daemon running")
311
+ return False
312
+
313
+ def initialise_singletons(self):
314
+ """Initialise application components."""
315
+ import asyncio
316
+ from dtSpark.aws import AWSAuthenticator
317
+ from dtSpark.aws import BedrockService
318
+ from dtSpark.database import ConversationDatabase
319
+ from dtSpark.conversation_manager import ConversationManager
320
+ from dtSpark.cli_interface import CLIInterface
321
+ from dtSpark.mcp_integration import MCPManager
322
+ from dtSpark.aws import CostTracker
323
+ from dtSpark.limits import TokenManager
324
+ from dtSpark.llm import LLMManager, OllamaService, AnthropicService
325
+ from dtSpark.safety import PromptInspector, ViolationLogger
326
+ from dtSpark.safety.llm_service import InspectionLLMService
327
+
328
+ logging.info('Initialising application components')
329
+
330
+ # Debug: Diagnose settings loading for llm_providers
331
+ # Try different access patterns to understand how Settings class works
332
+ logging.info("=== Settings Diagnostics ===")
333
+
334
+ # Try accessing the whole llm_providers section
335
+ llm_providers_raw = self.settings.get('llm_providers', None)
336
+ logging.info(f"settings.get('llm_providers'): {llm_providers_raw} (type: {type(llm_providers_raw).__name__ if llm_providers_raw else 'None'})")
337
+
338
+ # Try accessing aws_bedrock under llm_providers
339
+ aws_bedrock_raw = self.settings.get('llm_providers.aws_bedrock', None)
340
+ logging.info(f"settings.get('llm_providers.aws_bedrock'): {aws_bedrock_raw} (type: {type(aws_bedrock_raw).__name__ if aws_bedrock_raw else 'None'})")
341
+
342
+ # If llm_providers is a dict, try to access nested values directly
343
+ if isinstance(llm_providers_raw, dict):
344
+ logging.info(f"llm_providers keys: {list(llm_providers_raw.keys())}")
345
+ aws_bedrock_dict = llm_providers_raw.get('aws_bedrock', {})
346
+ logging.info(f"llm_providers['aws_bedrock']: {aws_bedrock_dict}")
347
+ if isinstance(aws_bedrock_dict, dict):
348
+ logging.info(f"aws_bedrock['enabled']: {aws_bedrock_dict.get('enabled', 'NOT_FOUND')}")
349
+
350
+ # Check if settings has a _settings or similar internal dict
351
+ if hasattr(self.settings, '_settings'):
352
+ logging.info(f"settings._settings type: {type(self.settings._settings)}")
353
+ if hasattr(self.settings, 'settings'):
354
+ logging.info(f"settings.settings type: {type(self.settings.settings)}")
355
+
356
+ logging.info("=== End Settings Diagnostics ===")
357
+
358
+ # Initialise CLI interface
359
+ self.cli = CLIInterface()
360
+
361
+ # Display splash screen with metadata
362
+ self.cli.print_splash_screen(
363
+ full_name=full_name(),
364
+ description=module_package.description,
365
+ version=version()
366
+ )
367
+
368
+ # Create progress tracker
369
+ progress = self.cli.create_progress()
370
+
371
+ with progress:
372
+ # Task 1: Load configuration
373
+ task_config = progress.add_task("[cyan]Loading configuration...", total=100)
374
+ # AWS Bedrock configuration - now under llm_providers.aws_bedrock
375
+ # Uses _get_nested_setting helper which handles both dot notation and dict navigation
376
+ # Also checks legacy 'aws.' paths for backwards compatibility
377
+ aws_region = self._get_nested_setting('llm_providers.aws_bedrock.region', None)
378
+ if aws_region is None:
379
+ aws_region = self.settings.get('aws.region', 'us-east-1')
380
+
381
+ aws_profile = self._get_nested_setting('llm_providers.aws_bedrock.sso_profile', None)
382
+ if aws_profile is None:
383
+ aws_profile = self.settings.get('aws.sso_profile', 'default')
384
+
385
+ bedrock_request_timeout = self.settings.get('bedrock.request_timeout', 300)
386
+
387
+ # AWS API key configuration (optional - takes precedence over SSO if provided)
388
+ aws_access_key_id = self._get_nested_setting('llm_providers.aws_bedrock.access_key_id', None)
389
+ if aws_access_key_id is None:
390
+ aws_access_key_id = self.settings.get('aws.access_key_id', None)
391
+
392
+ aws_secret_access_key = self._get_nested_setting('llm_providers.aws_bedrock.secret_access_key', None)
393
+ if aws_secret_access_key is None:
394
+ aws_secret_access_key = self.settings.get('aws.secret_access_key', None)
395
+
396
+ aws_session_token = self._get_nested_setting('llm_providers.aws_bedrock.session_token', None)
397
+ if aws_session_token is None:
398
+ aws_session_token = self.settings.get('aws.session_token', None)
399
+
400
+ # Configure CLI cost tracking display
401
+ cost_tracking_enabled = self._get_nested_setting('llm_providers.aws_bedrock.cost_tracking.enabled', None)
402
+ if cost_tracking_enabled is None:
403
+ cost_tracking_enabled = self.settings.get('aws.cost_tracking.enabled', False)
404
+ self.cli.cost_tracking_enabled = cost_tracking_enabled
405
+
406
+ progress.update(task_config, advance=100)
407
+
408
+ # Task 2: Initialise LLM Providers
409
+ task_llm = progress.add_task("[cyan]Initialising LLM providers...", total=100)
410
+
411
+ self.llm_manager = LLMManager()
412
+ provider_count = 0
413
+
414
+ # Check AWS Bedrock configuration using helper that handles both dot notation and dict navigation
415
+ aws_enabled_raw = self._get_nested_setting('llm_providers.aws_bedrock.enabled', None)
416
+ logging.info(f"AWS Bedrock enabled raw value: {aws_enabled_raw} (type: {type(aws_enabled_raw).__name__ if aws_enabled_raw is not None else 'NoneType'})")
417
+
418
+ # Handle missing or various value types
419
+ if aws_enabled_raw is None:
420
+ # Setting not found via any method - check if other providers are enabled
421
+ ollama_check = self._get_nested_setting('llm_providers.ollama.enabled', False)
422
+ anthropic_check = self._get_nested_setting('llm_providers.anthropic.enabled', False)
423
+ if ollama_check or anthropic_check:
424
+ # Other providers configured, don't default to AWS
425
+ logging.info("AWS Bedrock not explicitly configured, other providers available - skipping AWS")
426
+ aws_enabled = False
427
+ else:
428
+ # No providers configured - default to AWS for backwards compatibility
429
+ logging.warning("No LLM provider explicitly configured - defaulting to AWS Bedrock")
430
+ aws_enabled = True
431
+ elif isinstance(aws_enabled_raw, str):
432
+ # Handle string 'false'/'true' from YAML parsing
433
+ aws_enabled = aws_enabled_raw.lower() not in ('false', 'no', '0', 'off', '')
434
+ else:
435
+ aws_enabled = bool(aws_enabled_raw)
436
+
437
+ logging.info(f"AWS Bedrock final enabled state: {aws_enabled}")
438
+
439
+ if aws_enabled:
440
+ # Task 2a: AWS Authentication
441
+ progress.update(task_llm, advance=10, description="[cyan]Authenticating with AWS...")
442
+
443
+ # Suppress stdout/stderr during authentication
444
+ import contextlib
445
+ with contextlib.redirect_stdout(open(os.devnull, 'w')), \
446
+ contextlib.redirect_stderr(open(os.devnull, 'w')):
447
+ self.authenticator = AWSAuthenticator(
448
+ profile_name=aws_profile,
449
+ region=aws_region,
450
+ bedrock_request_timeout=bedrock_request_timeout,
451
+ access_key_id=aws_access_key_id,
452
+ secret_access_key=aws_secret_access_key,
453
+ session_token=aws_session_token
454
+ )
455
+ auth_result = self.authenticator.authenticate()
456
+
457
+ if not auth_result:
458
+ progress.stop()
459
+ self.cli.print_warning("Failed to authenticate with AWS Bedrock")
460
+
461
+ # Check if Ollama is available as fallback
462
+ ollama_enabled = self._get_nested_setting('llm_providers.ollama.enabled', False)
463
+ if not ollama_enabled:
464
+ self.cli.print_error("AWS authentication required (Ollama not configured)")
465
+ self.cli.print_info(f"Run: aws sso login --profile {aws_profile}")
466
+ self.auth_failed = True
467
+ return
468
+ else:
469
+ self.cli.print_info("Continuing with Ollama only...")
470
+ aws_enabled = False
471
+ else:
472
+ # AWS auth succeeded, initialize Bedrock
473
+ progress.update(task_llm, advance=20, description="[cyan]Initialising AWS Bedrock...")
474
+ bedrock_client = self.authenticator.get_client('bedrock')
475
+ bedrock_runtime_client = self.authenticator.get_client('bedrock-runtime')
476
+ bedrock_service = BedrockService(bedrock_client, bedrock_runtime_client)
477
+ self.llm_manager.register_provider(bedrock_service)
478
+ provider_count += 1
479
+ logging.info("AWS Bedrock provider registered")
480
+ else:
481
+ # AWS disabled - skip AWS progress (10% auth + 20% init = 30%)
482
+ progress.update(task_llm, advance=30)
483
+
484
+ progress.update(task_llm, advance=20, description="[cyan]Checking Ollama...")
485
+
486
+ # Check Ollama configuration
487
+ ollama_enabled = self._get_nested_setting('llm_providers.ollama.enabled', False)
488
+ if ollama_enabled:
489
+ try:
490
+ ollama_url = self._get_nested_setting(
491
+ 'llm_providers.ollama.base_url',
492
+ 'http://localhost:11434'
493
+ )
494
+ # Get SSL verification setting (default True, set to False for self-signed certs)
495
+ ollama_verify_ssl = self._get_nested_setting(
496
+ 'llm_providers.ollama.verify_ssl',
497
+ True
498
+ )
499
+ ollama_service = OllamaService(
500
+ base_url=ollama_url,
501
+ verify_ssl=ollama_verify_ssl
502
+ )
503
+ self.llm_manager.register_provider(ollama_service)
504
+ provider_count += 1
505
+ logging.info("Ollama provider registered")
506
+ except Exception as e:
507
+ logging.error(f"Failed to initialise Ollama: {e}")
508
+
509
+ progress.update(task_llm, advance=20, description="[cyan]Checking Anthropic...")
510
+
511
+ # Check Anthropic configuration
512
+ anthropic_enabled = self._get_nested_setting('llm_providers.anthropic.enabled', False)
513
+ if anthropic_enabled:
514
+ try:
515
+ api_key = self._get_nested_setting('llm_providers.anthropic.api_key', '')
516
+
517
+ # Get default max_tokens from bedrock config
518
+ default_max_tokens = self.settings.get('bedrock.max_tokens', 8192)
519
+
520
+ # Get rate limit configuration
521
+ rate_limit_max_retries = self._get_nested_setting('llm_providers.anthropic.rate_limit_max_retries', 5)
522
+ rate_limit_base_delay = self._get_nested_setting('llm_providers.anthropic.rate_limit_base_delay', 2.0)
523
+
524
+ # Allow empty API key if environment variable is set
525
+ if not api_key:
526
+ api_key = os.environ.get('ANTHROPIC_API_KEY')
527
+
528
+ if api_key:
529
+ anthropic_service = AnthropicService(
530
+ api_key=api_key,
531
+ default_max_tokens=default_max_tokens,
532
+ rate_limit_max_retries=rate_limit_max_retries,
533
+ rate_limit_base_delay=rate_limit_base_delay
534
+ )
535
+ self.llm_manager.register_provider(anthropic_service)
536
+ provider_count += 1
537
+ logging.info("Anthropic Direct API provider registered")
538
+ else:
539
+ logging.warning("Anthropic enabled but no API key provided")
540
+ except Exception as e:
541
+ logging.error(f"Failed to initialise Anthropic: {e}")
542
+
543
+ progress.update(task_llm, advance=30)
544
+
545
+ # Verify at least one provider is available
546
+ if provider_count == 0:
547
+ progress.stop()
548
+ self.cli.print_error("No LLM providers available")
549
+ self.cli.print_info("Configure AWS Bedrock or Ollama in config.yaml")
550
+ raise RuntimeError("No LLM providers available")
551
+
552
+ logging.info(f"Initialised {provider_count} LLM provider(s)")
553
+
554
+ # Set bedrock_service for backward compatibility
555
+ self.bedrock_service = self.llm_manager.get_active_service()
556
+
557
+ # Task 3.5: Retrieve Bedrock cost information (silently, display later)
558
+ # Only if cost tracking is enabled in configuration
559
+ cost_tracking_enabled = self._get_nested_setting('llm_providers.aws_bedrock.cost_tracking.enabled', None)
560
+ if cost_tracking_enabled is None:
561
+ cost_tracking_enabled = self.settings.get('aws.cost_tracking.enabled', False)
562
+ if cost_tracking_enabled and self.authenticator:
563
+ task_costs = progress.add_task("[cyan]Retrieving usage costs...", total=100)
564
+ self.bedrock_costs = None
565
+ try:
566
+ cost_explorer_client = self.authenticator.get_client('ce')
567
+ self.cost_tracker = CostTracker(cost_explorer_client)
568
+ self.bedrock_costs = self.cost_tracker.get_bedrock_costs()
569
+ progress.update(task_costs, advance=100)
570
+ except Exception as e:
571
+ logging.debug(f"Could not retrieve cost information: {e}")
572
+ progress.update(task_costs, advance=100)
573
+ # Continue silently - cost tracking is optional
574
+ elif cost_tracking_enabled and not self.authenticator:
575
+ logging.debug("Cost tracking enabled but AWS not configured - skipping")
576
+ else:
577
+ logging.debug("Cost tracking disabled in configuration")
578
+ self.bedrock_costs = None
579
+ self.cost_tracker = None
580
+
581
+ # Task 3.8: Initialise or retrieve user GUID
582
+ task_user = progress.add_task("[cyan]Initialising user identity...", total=100)
583
+ import uuid
584
+
585
+ # Get or create user GUID from secret manager
586
+ user_guid = self.settings.secret_manager.get_secret('user_guid', None, 'User_Local_Store')
587
+ if user_guid is None:
588
+ # Generate new GUID for this user
589
+ user_guid = str(uuid.uuid4())
590
+ # Store in secret manager for future use
591
+ self.settings.secret_manager.set_secret('user_guid', user_guid, 'User_Local_Store')
592
+ logging.info(f"Generated new user GUID: {user_guid}")
593
+ else:
594
+ logging.info(f"Using existing user GUID: {user_guid}")
595
+
596
+ self.user_guid = user_guid
597
+ progress.update(task_user, advance=100)
598
+
599
+ # Task 4: Initialise database
600
+ task_db = progress.add_task("[cyan]Initialising conversation database...", total=100)
601
+
602
+ # Get database configuration
603
+ db_type, db_credentials = self._load_database_configuration()
604
+
605
+ # Create database connection with appropriate backend
606
+ self.database = ConversationDatabase(
607
+ db_type=db_type,
608
+ credentials=db_credentials,
609
+ user_guid=self.user_guid
610
+ )
611
+ progress.update(task_db, advance=100)
612
+
613
+ # Task 4.5: Initialise token management (if enabled)
614
+ token_mgmt_enabled = self.settings.get('token_management.enabled', False)
615
+ if token_mgmt_enabled:
616
+ task_token = progress.add_task("[cyan]Initialising token management...", total=100)
617
+ try:
618
+ # Create token manager
619
+ token_config = {
620
+ 'enabled': True,
621
+ 'max_input_tokens': self.settings.get('token_management.max_input_tokens', 100000),
622
+ 'max_output_tokens': self.settings.get('token_management.max_output_tokens', 50000),
623
+ 'period_hours': self.settings.get('token_management.period_hours', 24),
624
+ 'allow_override': self.settings.get('token_management.allow_override', True)
625
+ }
626
+ self.token_manager = TokenManager(self.database, token_config)
627
+ progress.update(task_token, advance=100)
628
+
629
+ logging.info(
630
+ f"Token management enabled: {token_config['max_input_tokens']:,} input tokens, "
631
+ f"{token_config['max_output_tokens']:,} output tokens per {token_config['period_hours']}h"
632
+ )
633
+
634
+ except Exception as e:
635
+ logging.error(f"Failed to initialise token management: {e}")
636
+ progress.update(task_token, advance=100)
637
+ self.token_manager = None
638
+ else:
639
+ self.token_manager = None
640
+
641
+ # Task 5: MCP Initialisation (if enabled)
642
+ mcp_enabled = self.settings.get('mcp_config.enabled', False)
643
+ if mcp_enabled:
644
+ task_mcp = progress.add_task("[cyan]Initialising MCP servers...", total=100)
645
+ try:
646
+ # Create MCP manager from config
647
+ config_dict = {
648
+ 'mcp_config': {
649
+ 'servers': self.settings.get('mcp_config.servers', [])
650
+ }
651
+ }
652
+ self.mcp_manager = MCPManager.from_config(config_dict)
653
+ progress.update(task_mcp, advance=10)
654
+
655
+ # Calculate progress per server (70% of progress for connecting servers)
656
+ num_servers = len(self.mcp_manager.clients)
657
+ progress_per_server = 70.0 / num_servers if num_servers > 0 else 0
658
+
659
+ # Define progress callback to update as each server connects
660
+ def on_server_connected(server_name: str, success: bool):
661
+ status = "OK" if success else "FAIL"
662
+ progress.update(task_mcp, advance=progress_per_server,
663
+ description=f"[cyan]Initialising MCP servers... [{status}] {server_name}")
664
+ progress.refresh() # Force display refresh
665
+ time.sleep(0.1) # Small delay to make progress visible
666
+
667
+ # Connect to all MCP servers with progress callback
668
+ loop = asyncio.new_event_loop()
669
+ asyncio.set_event_loop(loop)
670
+ results = loop.run_until_complete(self.mcp_manager.connect_all(on_server_connected))
671
+
672
+ # Reset description after all servers connected
673
+ progress.update(task_mcp, description="[cyan]Initialising MCP servers...")
674
+
675
+ connected_count = sum(1 for success in results.values() if success)
676
+ if connected_count > 0:
677
+ # Fetch and cache tools
678
+ try:
679
+ tools = loop.run_until_complete(
680
+ asyncio.wait_for(
681
+ self.mcp_manager.list_all_tools(),
682
+ timeout=15.0
683
+ )
684
+ )
685
+ progress.update(task_mcp, advance=20)
686
+
687
+ # Store the loop for reuse
688
+ self.mcp_manager._initialization_loop = loop
689
+
690
+ except asyncio.TimeoutError:
691
+ logging.error("Timeout during initial MCP tool fetch")
692
+ progress.update(task_mcp, advance=20)
693
+ except Exception as tool_err:
694
+ logging.error(f"Error during initial MCP tool fetch: {tool_err}")
695
+ progress.update(task_mcp, advance=20)
696
+ else:
697
+ progress.update(task_mcp, advance=90)
698
+
699
+ except Exception as e:
700
+ logging.exception("MCP initialisation failed")
701
+ self.mcp_manager = None
702
+ progress.update(task_mcp, advance=100)
703
+
704
+ # Task 6: Initialise conversation manager
705
+ task_conv = progress.add_task("[cyan]Initialising conversation manager...", total=100)
706
+ max_tokens = self.settings.get('bedrock.max_tokens', 4096)
707
+ rollup_threshold = self.settings.get('conversation.rollup_threshold', 0.8)
708
+ rollup_summary_ratio = self.settings.get('conversation.rollup_summary_ratio', 0.3)
709
+ max_tool_result_tokens = self.settings.get('conversation.max_tool_result_tokens', 10000)
710
+ max_tool_iterations = self.settings.get('conversation.max_tool_iterations', 25)
711
+ max_tool_selections = self.settings.get('conversation.max_tool_selections', 30)
712
+ emergency_rollup_threshold = self.settings.get('conversation.emergency_rollup_threshold', 0.95)
713
+
714
+ # Load global instructions if configured
715
+ global_instructions = None
716
+ global_instructions_path = self.settings.get('conversation.global_instructions_path', None)
717
+ if global_instructions_path:
718
+ try:
719
+ logging.info(f"Loading global instructions from: {global_instructions_path}")
720
+ resource_manager = ResourceManager()
721
+ global_instructions = resource_manager.load_resource(global_instructions_path)
722
+ if global_instructions:
723
+ logging.info(f"Global instructions loaded successfully ({len(global_instructions)} characters)")
724
+ else:
725
+ logging.warning(f"Global instructions file is empty: {global_instructions_path}")
726
+ except Exception as e:
727
+ logging.warning(f"Failed to load global instructions from {global_instructions_path}: {e}")
728
+ logging.warning("Continuing without global instructions")
729
+
730
+ # Initialise prompt inspector for Cyber Security
731
+ prompt_inspector = None
732
+ prompt_inspection_config = self.settings.get('prompt_inspection', {})
733
+ if prompt_inspection_config.get('enabled', True):
734
+ logging.info("Initialising prompt inspection system")
735
+
736
+ # Create violation logger
737
+ violation_logger = ViolationLogger(
738
+ self.database.conn,
739
+ prompt_inspection_config
740
+ )
741
+
742
+ # Create LLM service for inspection if strict mode
743
+ inspection_llm_service = None
744
+ llm_config = prompt_inspection_config.get('llm_inspection', {})
745
+ if llm_config.get('enabled', False):
746
+ try:
747
+ # Create inspection LLM service with provider manager
748
+ inspection_llm_service = InspectionLLMService(
749
+ config=llm_config,
750
+ provider_manager=self.llm_manager # Use existing LLM manager
751
+ )
752
+
753
+ if inspection_llm_service.is_available():
754
+ logging.info(f"LLM inspection available: {llm_config.get('model')} via {llm_config.get('provider', 'auto-detect')}")
755
+ else:
756
+ logging.warning("LLM inspection requested but not available")
757
+ except Exception as e:
758
+ logging.warning(f"Failed to initialise LLM inspection: {e}")
759
+ inspection_llm_service = None
760
+
761
+ # Create prompt inspector
762
+ prompt_inspector = PromptInspector(
763
+ config=prompt_inspection_config,
764
+ llm_service=inspection_llm_service,
765
+ violation_logger=violation_logger
766
+ )
767
+
768
+ logging.info(f"Prompt inspector initialised: level={prompt_inspector.inspection_level}, action={prompt_inspector.action}")
769
+ else:
770
+ logging.info("Prompt inspection disabled")
771
+
772
+ # Build config dictionary with model_context_limits and embedded_tools
773
+ # Settings uses dot notation, so we need to build the nested dict structure
774
+ config_for_manager = {
775
+ 'model_context_limits': self._build_model_context_limits(),
776
+ 'embedded_tools': {
777
+ 'filesystem': {
778
+ 'enabled': self.settings.get('embedded_tools.filesystem.enabled', False),
779
+ 'allowed_path': self.settings.get('embedded_tools.filesystem.allowed_path', './'),
780
+ 'access_mode': self.settings.get('embedded_tools.filesystem.access_mode', 'read')
781
+ }
782
+ }
783
+ }
784
+
785
+ self.conversation_manager = ConversationManager(
786
+ self.database,
787
+ self.bedrock_service,
788
+ max_tokens=max_tokens,
789
+ rollup_threshold=rollup_threshold,
790
+ rollup_summary_ratio=rollup_summary_ratio,
791
+ max_tool_result_tokens=max_tool_result_tokens,
792
+ max_tool_iterations=max_tool_iterations,
793
+ max_tool_selections=max_tool_selections,
794
+ emergency_rollup_threshold=emergency_rollup_threshold,
795
+ mcp_manager=self.mcp_manager,
796
+ cli_interface=self.cli,
797
+ global_instructions=global_instructions,
798
+ token_manager=self.token_manager,
799
+ prompt_inspector=prompt_inspector,
800
+ user_guid=self.user_guid,
801
+ config=config_for_manager
802
+ )
803
+ progress.update(task_conv, advance=100)
804
+
805
+ # Task 7: Initialise autonomous action scheduler
806
+ task_scheduler = progress.add_task("[cyan]Initialising action scheduler...", total=100)
807
+ try:
808
+ from dtSpark.scheduler import (
809
+ ActionSchedulerManager,
810
+ ActionExecutionQueue,
811
+ ActionExecutor
812
+ )
813
+
814
+ # Get database path for scheduler job store
815
+ db_path = self.database.db_path or ':memory:'
816
+
817
+ # Create executor with LLM manager and optional MCP manager
818
+ get_tools_func = None
819
+ if self.mcp_manager:
820
+ def get_tools_func():
821
+ import asyncio
822
+ loop = getattr(self.mcp_manager, '_initialization_loop', None)
823
+ if loop and not loop.is_closed():
824
+ return loop.run_until_complete(self.mcp_manager.list_all_tools())
825
+ return []
826
+
827
+ self.action_executor = ActionExecutor(
828
+ database=self.database,
829
+ llm_manager=self.llm_manager,
830
+ mcp_manager=self.mcp_manager,
831
+ get_tools_func=get_tools_func,
832
+ config=config_for_manager
833
+ )
834
+
835
+ # Create execution queue
836
+ self.execution_queue = ActionExecutionQueue(
837
+ executor_func=self.action_executor.execute
838
+ )
839
+
840
+ # Create scheduler manager
841
+ self.action_scheduler = ActionSchedulerManager(
842
+ db_path=db_path,
843
+ execution_callback=lambda action_id, user_guid: self.execution_queue.enqueue(
844
+ action_id, user_guid, is_manual=False
845
+ )
846
+ )
847
+
848
+ # Check if daemon is running (for warning display later)
849
+ self.daemon_is_running = self._check_daemon_running()
850
+
851
+ # Initialise execution components (for manual "Run Now" from UI)
852
+ # Note: Scheduled execution is ONLY handled by the daemon process
853
+ self.action_scheduler.initialise()
854
+ self.execution_queue.start()
855
+
856
+ # UI never starts the scheduler - daemon handles all scheduled execution
857
+ if self.daemon_is_running:
858
+ logging.info("Daemon is running - scheduled actions will be executed by daemon")
859
+ else:
860
+ logging.warning("Daemon is not running - scheduled actions will NOT execute until daemon is started")
861
+
862
+ progress.update(task_scheduler, advance=100)
863
+
864
+ except ImportError as e:
865
+ logging.warning(f"Action scheduler not available (APScheduler not installed): {e}")
866
+ self.action_scheduler = None
867
+ self.execution_queue = None
868
+ self.action_executor = None
869
+ progress.update(task_scheduler, advance=100)
870
+ except Exception as e:
871
+ logging.error(f"Failed to initialise action scheduler: {e}")
872
+ self.action_scheduler = None
873
+ self.execution_queue = None
874
+ self.action_executor = None
875
+ progress.update(task_scheduler, advance=100)
876
+
877
+ # Display application info first (user identification)
878
+ self.cli.display_application_info(self.user_guid)
879
+
880
+ # Display authentication info after progress completes (only if AWS is enabled)
881
+ if self.authenticator:
882
+ account_info = self.authenticator.get_account_info()
883
+ if account_info:
884
+ self.cli.display_aws_account_info(account_info)
885
+
886
+ # Display MCP info if enabled
887
+ if mcp_enabled and self.mcp_manager:
888
+ self.cli.display_mcp_status(self.mcp_manager)
889
+
890
+ # Display daemon status warning if daemon is not running
891
+ if hasattr(self, 'daemon_is_running') and not self.daemon_is_running:
892
+ # Check if there are any scheduled actions
893
+ try:
894
+ actions = self.database.get_all_actions(include_disabled=False)
895
+ scheduled_count = sum(1 for a in actions if a.get('schedule_type') != 'manual')
896
+ if scheduled_count > 0:
897
+ self.cli.print_warning(
898
+ f"Daemon is not running - {scheduled_count} scheduled action(s) will NOT execute. "
899
+ f"Start the daemon with: spark daemon start"
900
+ )
901
+ except Exception:
902
+ pass # Don't fail if we can't check actions
903
+
904
+ # Display AWS Bedrock Usage Costs (after all initialization)
905
+ self.display_bedrock_costs()
906
+
907
+ # Sync predefined conversations if enabled
908
+ self.sync_predefined_conversations()
909
+
910
+ logging.info('Application components initialised successfully')
911
+
912
+ def launch_web_interface(self):
913
+ """
914
+ Launch the web interface.
915
+
916
+ Starts a FastAPI web server on localhost with one-time authentication.
917
+ """
918
+ from dtSpark.web import WebServer
919
+
920
+ # Get web interface settings
921
+ host = self.settings.get('interface.web.host', '127.0.0.1')
922
+ port = self.settings.get('interface.web.port', 0)
923
+ session_timeout = self.settings.get('interface.web.session_timeout_minutes', 30)
924
+ dark_theme = self.settings.get('interface.web.dark_theme', True)
925
+
926
+ # Get SSL settings
927
+ ssl_enabled = self.settings.get('interface.web.ssl.enabled', False)
928
+ ssl_auto_generate = self.settings.get('interface.web.ssl.auto_generate_cert', True)
929
+ ssl_cert_file = self.settings.get('interface.web.ssl.cert_file', 'certs/ssl_cert.pem')
930
+ ssl_key_file = self.settings.get('interface.web.ssl.key_file', 'certs/ssl_key.pem')
931
+
932
+ # Get browser auto-open setting
933
+ auto_open_browser = self.settings.get('interface.web.auto_open_browser', True)
934
+
935
+ protocol = "HTTPS" if ssl_enabled else "HTTP"
936
+ logging.info(f"Launching web interface on {protocol}://{host}:{port if port != 0 else 'random port'}")
937
+ if ssl_enabled:
938
+ logging.info("SSL is enabled - self-signed certificate will be used")
939
+
940
+ # Create web server
941
+ server = WebServer(
942
+ app_instance=self,
943
+ host=host,
944
+ port=port,
945
+ session_timeout_minutes=session_timeout,
946
+ dark_theme=dark_theme,
947
+ ssl_enabled=ssl_enabled,
948
+ ssl_cert_file=ssl_cert_file,
949
+ ssl_key_file=ssl_key_file,
950
+ ssl_auto_generate=ssl_auto_generate,
951
+ auto_open_browser=auto_open_browser,
952
+ )
953
+
954
+ # Get access information
955
+ access_info = server.get_access_info()
956
+
957
+ # Display access information in CLI
958
+ self.cli.print_separator("═")
959
+ self.cli.print_info("Web Interface Started")
960
+ self.cli.print_separator("═")
961
+ self.cli.console.print()
962
+
963
+ # Show protocol indicator
964
+ if access_info.get('ssl_enabled'):
965
+ self.cli.console.print("[bold green]🔒 HTTPS Enabled[/bold green] (Self-signed certificate)")
966
+ self.cli.console.print()
967
+
968
+ self.cli.console.print(f"[bold cyan]URL:[/bold cyan] [bold]{access_info['url']}[/bold]")
969
+ self.cli.console.print(f"[bold cyan]Authentication Code:[/bold cyan] [bold yellow]{access_info['code']}[/bold yellow]")
970
+ self.cli.console.print()
971
+
972
+ # Update instructions based on auto-open setting
973
+ if auto_open_browser:
974
+ self.cli.console.print("[dim]Your browser should open automatically.[/dim]")
975
+ self.cli.console.print("[dim]If it doesn't, open the URL above and enter the authentication code.[/dim]")
976
+ else:
977
+ self.cli.console.print("[dim]Open the URL in your web browser and enter the authentication code.[/dim]")
978
+
979
+ self.cli.console.print("[dim]The code can only be used once for security.[/dim]")
980
+
981
+ # Add note about self-signed certificates
982
+ if access_info.get('ssl_enabled'):
983
+ self.cli.console.print()
984
+ self.cli.console.print("[bold yellow]Note:[/bold yellow] [dim]Your browser will show a security warning for the self-signed certificate.[/dim]")
985
+ self.cli.console.print("[dim]This is expected. You can safely proceed past this warning.[/dim]")
986
+
987
+ self.cli.console.print()
988
+ self.cli.console.print("[dim]Press Ctrl+C to stop the server.[/dim]")
989
+ self.cli.console.print()
990
+ self.cli.print_separator("═")
991
+
992
+ try:
993
+ # Start server (blocking)
994
+ server.run()
995
+ except KeyboardInterrupt:
996
+ self.cli.console.print()
997
+ self.cli.print_info("Shutting down web server...")
998
+ except Exception as e:
999
+ logging.error(f"Web server error: {e}")
1000
+ self.cli.print_error(f"Web server error: {e}")
1001
+
1002
+ def sync_predefined_conversations(self):
1003
+ """
1004
+ Synchronise predefined conversations from config.
1005
+
1006
+ This method:
1007
+ - Checks if predefined conversations are enabled in config
1008
+ - For each enabled predefined conversation:
1009
+ - Calculates a config hash to detect changes
1010
+ - Creates the conversation if it doesn't exist
1011
+ - Updates it if the config has changed
1012
+ - Attaches configured files
1013
+ """
1014
+ import json
1015
+ import hashlib
1016
+ from dtSpark.files import FileManager
1017
+
1018
+ try:
1019
+ # Check if predefined conversations are enabled
1020
+ predef_enabled = self.settings.get('predefined_conversations.enabled', False)
1021
+ if not predef_enabled:
1022
+ logging.debug("Predefined conversations not enabled in config")
1023
+ return
1024
+
1025
+ # Get the mandatory model setting
1026
+ mandatory_model = self._get_nested_setting('llm_providers.mandatory_model', None)
1027
+
1028
+ # Get list of predefined conversations
1029
+ predefined_convs = self.settings.get('predefined_conversations.conversations', [])
1030
+
1031
+ if not predefined_convs:
1032
+ logging.debug("No predefined conversations configured")
1033
+ return
1034
+
1035
+ logging.info(f"Synchronising {len(predefined_convs)} predefined conversation(s)")
1036
+
1037
+ for conv_config in predefined_convs:
1038
+ # Skip if not enabled
1039
+ if not conv_config.get('enabled', True):
1040
+ logging.debug(f"Skipping disabled predefined conversation: {conv_config.get('name')}")
1041
+ continue
1042
+
1043
+ name = conv_config.get('name')
1044
+ if not name:
1045
+ logging.warning("Predefined conversation missing 'name', skipping")
1046
+ continue
1047
+
1048
+ # Load instructions from file using ResourceManager, with fallback to direct path
1049
+ instructions_source = conv_config.get('instructions', '')
1050
+ instructions = self._load_text_resource(instructions_source, f"instructions for '{name}'")
1051
+
1052
+ files = conv_config.get('files', [])
1053
+
1054
+ # Determine which model and provider to use
1055
+ # If mandatory_model is set, use it regardless of conversation config
1056
+ if mandatory_model:
1057
+ model_id = mandatory_model
1058
+ provider_name = mandatory_provider # Use mandatory provider if set
1059
+ logging.debug(f"Using mandatory model '{model_id}' for predefined conversation '{name}'")
1060
+ else:
1061
+ model_id = conv_config.get('model')
1062
+ provider_name = conv_config.get('provider') # Get provider from config
1063
+ if not model_id:
1064
+ logging.warning(f"Predefined conversation '{name}' has no model specified and no mandatory model set, skipping")
1065
+ continue
1066
+
1067
+ # Calculate config hash (hash of name, instructions, files, model, and provider)
1068
+ config_data = {
1069
+ 'name': name,
1070
+ 'instructions': instructions,
1071
+ 'files': sorted(files), # Sort for consistent hashing
1072
+ 'model': model_id,
1073
+ 'provider': provider_name # Include provider in hash
1074
+ }
1075
+ config_json = json.dumps(config_data, sort_keys=True)
1076
+ config_hash = hashlib.sha256(config_json.encode()).hexdigest()
1077
+
1078
+ # Check if conversation exists
1079
+ existing_conv = self.database.get_predefined_conversation_by_name(name)
1080
+
1081
+ if existing_conv:
1082
+ # Conversation exists - check if config has changed
1083
+ if existing_conv['config_hash'] != config_hash:
1084
+ logging.info(f"Predefined conversation '{name}' config changed, updating...")
1085
+
1086
+ # Update the conversation
1087
+ self.database.update_predefined_conversation(
1088
+ existing_conv['id'],
1089
+ model_id,
1090
+ instructions,
1091
+ config_hash
1092
+ )
1093
+
1094
+ # Delete old files and re-attach
1095
+ self.database.delete_conversation_files(existing_conv['id'])
1096
+
1097
+ # Attach new files
1098
+ if files:
1099
+ self._attach_files_to_conversation(existing_conv['id'], files)
1100
+
1101
+ logging.info(f"Updated predefined conversation '{name}'")
1102
+ else:
1103
+ logging.debug(f"Predefined conversation '{name}' unchanged")
1104
+ else:
1105
+ # Create new predefined conversation
1106
+ logging.info(f"Creating predefined conversation '{name}'...")
1107
+
1108
+ conversation_id = self.database.create_predefined_conversation(
1109
+ name,
1110
+ model_id,
1111
+ instructions,
1112
+ config_hash
1113
+ )
1114
+
1115
+ # Attach files
1116
+ if files:
1117
+ self._attach_files_to_conversation(conversation_id, files)
1118
+
1119
+ logging.info(f"Created predefined conversation '{name}' (ID: {conversation_id})")
1120
+
1121
+ except Exception as e:
1122
+ logging.error(f"Error synchronising predefined conversations: {e}")
1123
+ logging.exception(e)
1124
+
1125
+ def _load_text_resource(self, source: str, description: str = "resource") -> str:
1126
+ """
1127
+ Load text content from a resource file using ResourceManager with fallback to direct path.
1128
+
1129
+ Attempts to load the resource in this order:
1130
+ 1. Via ResourceManager (for resources in package)
1131
+ 2. Direct file path (if ResourceManager returns None)
1132
+ 3. If both fail or source is empty, returns the source string as-is (inline text)
1133
+
1134
+ Args:
1135
+ source: Resource path/name, file path, or inline text
1136
+ description: Description of what's being loaded (for logging)
1137
+
1138
+ Returns:
1139
+ The loaded text content or the original source string
1140
+ """
1141
+ if not source or not source.strip():
1142
+ return source
1143
+
1144
+ # Try ResourceManager first
1145
+ try:
1146
+ resource_content = ResourceManager().load_resource(source)
1147
+ if resource_content is not None:
1148
+ logging.info(f"Loaded {description} via ResourceManager from: {source}")
1149
+ return resource_content
1150
+ except Exception as e:
1151
+ logging.debug(f"ResourceManager could not load {description} from '{source}': {e}")
1152
+
1153
+ # Try direct file path
1154
+ try:
1155
+ import os
1156
+ if os.path.isfile(source):
1157
+ with open(source, 'r', encoding='utf-8') as f:
1158
+ content = f.read()
1159
+ logging.info(f"Loaded {description} from file path: {source}")
1160
+ return content
1161
+ except Exception as e:
1162
+ logging.debug(f"Could not load {description} from file path '{source}': {e}")
1163
+
1164
+ # If both methods fail, assume it's inline text
1165
+ logging.debug(f"Using inline text for {description}")
1166
+ return source
1167
+
1168
+ def _attach_files_to_conversation(self, conversation_id: int, file_paths: List[str]):
1169
+ """
1170
+ Attach files to a conversation.
1171
+
1172
+ Tries to load files using ResourceManager first, then falls back to direct file path.
1173
+
1174
+ Args:
1175
+ conversation_id: ID of the conversation
1176
+ file_paths: List of file paths to attach
1177
+ """
1178
+ from dtSpark.files import FileManager
1179
+ import tempfile
1180
+ import os
1181
+
1182
+ file_manager = FileManager()
1183
+
1184
+ for file_path in file_paths:
1185
+ try:
1186
+ resolved_path = file_path
1187
+ temp_file_path = None
1188
+
1189
+ # Try ResourceManager first
1190
+ try:
1191
+ resource_content = ResourceManager().load_resource(file_path)
1192
+ if resource_content is not None:
1193
+ # ResourceManager returned content - create temp file
1194
+ # Extract filename from path
1195
+ filename = os.path.basename(file_path)
1196
+ # Create temporary file with same extension
1197
+ suffix = os.path.splitext(filename)[1] if '.' in filename else ''
1198
+ temp_fd, temp_file_path = tempfile.mkstemp(suffix=suffix, text=False)
1199
+
1200
+ # Write content to temp file
1201
+ if isinstance(resource_content, str):
1202
+ os.write(temp_fd, resource_content.encode('utf-8'))
1203
+ else:
1204
+ os.write(temp_fd, resource_content)
1205
+ os.close(temp_fd)
1206
+
1207
+ resolved_path = temp_file_path
1208
+ logging.info(f"Loaded file via ResourceManager from: {file_path}")
1209
+ except Exception as e:
1210
+ logging.debug(f"ResourceManager could not load file '{file_path}': {e}")
1211
+ # Will fall back to direct file path
1212
+
1213
+ # Process the file (either from temp file or original path)
1214
+ file_info = file_manager.process_file(resolved_path)
1215
+
1216
+ # Clean up temp file if created
1217
+ if temp_file_path and os.path.exists(temp_file_path):
1218
+ try:
1219
+ os.unlink(temp_file_path)
1220
+ except Exception as e:
1221
+ logging.warning(f"Could not delete temp file '{temp_file_path}': {e}")
1222
+
1223
+ if file_info:
1224
+ # Add file to database
1225
+ self.database.add_file(
1226
+ conversation_id=conversation_id,
1227
+ filename=file_info['filename'],
1228
+ file_type=file_info['file_type'],
1229
+ file_size=file_info['file_size'],
1230
+ content_text=file_info.get('content_text'),
1231
+ content_base64=file_info.get('content_base64'),
1232
+ mime_type=file_info.get('mime_type'),
1233
+ token_count=file_info.get('token_count', 0)
1234
+ )
1235
+ logging.debug(f"Attached file '{file_info['filename']}' to conversation {conversation_id}")
1236
+ else:
1237
+ logging.warning(f"Failed to process file: {file_path}")
1238
+ except Exception as e:
1239
+ logging.error(f"Error attaching file '{file_path}': {e}")
1240
+
1241
+ def setup_wizard(self):
1242
+ """
1243
+ Interactive setup wizard to create config.yaml with all commentary.
1244
+
1245
+ Walks the user through:
1246
+ - LLM provider selection (AWS Bedrock, Ollama, Anthropic API)
1247
+ - Database selection (SQLite, MySQL, MariaDB, PostgreSQL, MSSQL)
1248
+ - Interface configuration (CLI or Web)
1249
+ - Additional features (MCP, security)
1250
+
1251
+ Creates a properly formatted config.yaml file in the user data directory.
1252
+ """
1253
+ import shutil
1254
+ from rich.panel import Panel
1255
+ from rich.prompt import Prompt, Confirm
1256
+
1257
+ # Initialize CLI interface for prompts
1258
+ from dtSpark.cli_interface import CLIInterface
1259
+ cli = CLIInterface()
1260
+
1261
+ # Initialize secrets manager for storing sensitive credentials
1262
+ from dtPyAppFramework.settings import Settings
1263
+ secret_manager = Settings().secret_manager
1264
+
1265
+ # Display SPARK splash screen
1266
+ cli.print_splash_screen(
1267
+ full_name=full_name(),
1268
+ description=module_package.description,
1269
+ version=version()
1270
+ )
1271
+
1272
+ # Display setup wizard context
1273
+ cli.console.print(Panel(
1274
+ "[bold cyan]Setup Wizard[/bold cyan]\n\n"
1275
+ "This wizard will guide you through configuring:\n"
1276
+ " • LLM Providers (AWS Bedrock, Ollama, Anthropic API)\n"
1277
+ " • Database (SQLite, MySQL, PostgreSQL, MSSQL)\n"
1278
+ " • Interface (CLI or Web)\n"
1279
+ " • Additional Features\n\n"
1280
+ "A config.yaml file will be created with all settings and documentation.",
1281
+ border_style="cyan",
1282
+ padding=(1, 2)
1283
+ ))
1284
+ cli.console.print()
1285
+
1286
+ # Destination: user data directory
1287
+ user_data_path = ApplicationPaths().usr_data_root_path
1288
+ dest_config_dir = os.path.join(user_data_path, 'config')
1289
+ dest_config = os.path.join(dest_config_dir, 'config.yaml')
1290
+
1291
+ cli.print_info(f"Configuration will be created at: {dest_config}")
1292
+ cli.console.print()
1293
+
1294
+ # Check if config already exists
1295
+ if os.path.exists(dest_config):
1296
+ cli.print_warning("A configuration file already exists!")
1297
+ if not Confirm.ask("[bold yellow]Do you want to overwrite it?[/bold yellow]", default=False):
1298
+ cli.print_info("Setup cancelled. Existing configuration unchanged.")
1299
+ return
1300
+ cli.console.print()
1301
+
1302
+ # ═══════════════════════════════════════════════════════════════
1303
+ # LLM Provider Selection
1304
+ # ═══════════════════════════════════════════════════════════════
1305
+ cli.print_separator("═")
1306
+ cli.console.print("[bold]LLM Provider Selection[/bold]")
1307
+ cli.print_separator("═")
1308
+ cli.console.print()
1309
+ cli.console.print("Select which LLM providers you want to configure:")
1310
+ cli.console.print()
1311
+
1312
+ # AWS Bedrock
1313
+ use_aws_bedrock = Confirm.ask("Do you wish to use AWS Bedrock?", default=True)
1314
+
1315
+ # Ollama
1316
+ use_ollama = Confirm.ask("Do you wish to use Ollama (local LLM server)?", default=False)
1317
+
1318
+ # Anthropic Direct API
1319
+ use_anthropic = Confirm.ask("Do you wish to use Anthropic Direct API?", default=False)
1320
+
1321
+ # Ensure at least one provider is selected
1322
+ if not (use_aws_bedrock or use_ollama or use_anthropic):
1323
+ cli.print_error("You must enable at least one LLM provider!")
1324
+ cli.print_info("Setup cancelled.")
1325
+ return
1326
+
1327
+ # ═══════════════════════════════════════════════════════════════
1328
+ # AWS Bedrock Configuration
1329
+ # ═══════════════════════════════════════════════════════════════
1330
+ aws_profile = "default"
1331
+ aws_region = "us-east-1"
1332
+ enable_cost_tracking = False
1333
+
1334
+ if use_aws_bedrock:
1335
+ cli.console.print()
1336
+ cli.print_separator("═")
1337
+ cli.console.print("[bold]AWS Bedrock Configuration[/bold]")
1338
+ cli.print_separator("═")
1339
+ cli.console.print()
1340
+
1341
+ aws_profile = Prompt.ask(
1342
+ "AWS SSO profile name",
1343
+ default="default"
1344
+ )
1345
+
1346
+ aws_region = Prompt.ask(
1347
+ "AWS region",
1348
+ default="us-east-1"
1349
+ )
1350
+
1351
+ enable_cost_tracking = Confirm.ask(
1352
+ "Enable AWS cost tracking?",
1353
+ default=False
1354
+ )
1355
+
1356
+ # ═══════════════════════════════════════════════════════════════
1357
+ # Ollama Configuration
1358
+ # ═══════════════════════════════════════════════════════════════
1359
+ ollama_base_url = "http://localhost:11434"
1360
+
1361
+ if use_ollama:
1362
+ cli.console.print()
1363
+ cli.print_separator("═")
1364
+ cli.console.print("[bold]Ollama Configuration[/bold]")
1365
+ cli.print_separator("═")
1366
+ cli.console.print()
1367
+
1368
+ ollama_base_url = Prompt.ask(
1369
+ "Ollama base URL",
1370
+ default="http://localhost:11434"
1371
+ )
1372
+
1373
+ # ═══════════════════════════════════════════════════════════════
1374
+ # Anthropic Direct API Configuration
1375
+ # ═══════════════════════════════════════════════════════════════
1376
+ anthropic_api_key = ""
1377
+
1378
+ if use_anthropic:
1379
+ cli.console.print()
1380
+ cli.print_separator("═")
1381
+ cli.console.print("[bold]Anthropic Direct API Configuration[/bold]")
1382
+ cli.print_separator("═")
1383
+ cli.console.print()
1384
+
1385
+ anthropic_api_key_input = Prompt.ask(
1386
+ "Anthropic API key (or press Enter to set via environment variable later)",
1387
+ default=""
1388
+ )
1389
+
1390
+ # Store API key in secrets manager if provided
1391
+ if anthropic_api_key_input:
1392
+ secret_manager.set_secret("anthropic_api_key", anthropic_api_key_input)
1393
+ anthropic_api_key = "SEC/anthropic_api_key"
1394
+ cli.print_success("✓ Anthropic API key securely stored in secrets manager")
1395
+ else:
1396
+ anthropic_api_key = ""
1397
+
1398
+ # ═══════════════════════════════════════════════════════════════
1399
+ # Database Configuration
1400
+ # ═══════════════════════════════════════════════════════════════
1401
+ cli.console.print()
1402
+ cli.print_separator("═")
1403
+ cli.console.print("[bold]Database Configuration[/bold]")
1404
+ cli.print_separator("═")
1405
+ cli.console.print()
1406
+
1407
+ database_choices = {
1408
+ "1": "sqlite",
1409
+ "2": "mysql",
1410
+ "3": "mariadb",
1411
+ "4": "postgresql",
1412
+ "5": "mssql"
1413
+ }
1414
+ cli.console.print(" [1] SQLite (local file, no setup required) [default]")
1415
+ cli.console.print(" [2] MySQL (remote database server)")
1416
+ cli.console.print(" [3] MariaDB (remote database server)")
1417
+ cli.console.print(" [4] PostgreSQL (remote database server)")
1418
+ cli.console.print(" [5] Microsoft SQL Server (remote database server)")
1419
+ cli.console.print()
1420
+ database_choice = Prompt.ask("Select database type", choices=["1", "2", "3", "4", "5"], default="1")
1421
+ database_type = database_choices[database_choice]
1422
+
1423
+ # Remote database configuration (MySQL, MariaDB, PostgreSQL, MSSQL)
1424
+ db_host = "localhost"
1425
+ db_port = "3306"
1426
+ db_database = "dtawsbedrockcli"
1427
+ db_username = "null"
1428
+ db_password = "null"
1429
+ db_ssl = False
1430
+ db_driver = "ODBC Driver 17 for SQL Server"
1431
+
1432
+ if database_type != "sqlite":
1433
+ cli.console.print()
1434
+ cli.console.print("[dim]Configure connection details for remote database:[/dim]")
1435
+ cli.console.print()
1436
+
1437
+ # Default ports
1438
+ default_ports = {
1439
+ "mysql": "3306",
1440
+ "mariadb": "3306",
1441
+ "postgresql": "5432",
1442
+ "mssql": "1433"
1443
+ }
1444
+
1445
+ db_host = Prompt.ask("Database host", default="localhost")
1446
+ db_port = Prompt.ask("Database port", default=default_ports.get(database_type, "3306"))
1447
+ db_database = Prompt.ask("Database name", default="dtawsbedrockcli")
1448
+
1449
+ cli.console.print()
1450
+ cli.console.print("[dim]Leave username/password empty (null) to be prompted on startup (more secure)[/dim]")
1451
+ db_username_input = Prompt.ask("Database username (or press Enter for null)", default="")
1452
+
1453
+ # Store database username in secrets manager if provided
1454
+ if db_username_input:
1455
+ secret_key = f"db_{database_type}_username"
1456
+ secret_manager.set_secret(secret_key, db_username_input)
1457
+ db_username = f"SEC/{secret_key}"
1458
+ cli.print_success(f"✓ Database username securely stored in secrets manager")
1459
+ else:
1460
+ db_username = "null"
1461
+
1462
+ db_password_input = Prompt.ask("Database password (or press Enter for null)", default="")
1463
+
1464
+ # Store database password in secrets manager if provided
1465
+ if db_password_input:
1466
+ secret_key = f"db_{database_type}_password"
1467
+ secret_manager.set_secret(secret_key, db_password_input)
1468
+ db_password = f"SEC/{secret_key}"
1469
+ cli.print_success(f"✓ Database password securely stored in secrets manager")
1470
+ else:
1471
+ db_password = "null"
1472
+
1473
+ db_ssl = Confirm.ask("Use SSL/TLS connection?", default=False)
1474
+
1475
+ # MSSQL-specific: ODBC driver
1476
+ if database_type == "mssql":
1477
+ cli.console.print()
1478
+ db_driver = Prompt.ask(
1479
+ "ODBC driver name",
1480
+ default="ODBC Driver 17 for SQL Server"
1481
+ )
1482
+
1483
+ cli.console.print()
1484
+ cli.print_separator("═")
1485
+ cli.console.print("[bold]Interface Configuration[/bold]")
1486
+ cli.print_separator("═")
1487
+ cli.console.print()
1488
+
1489
+ interface_choices = {
1490
+ "1": "cli",
1491
+ "2": "web"
1492
+ }
1493
+ cli.console.print(" [1] CLI (Command-line interface)")
1494
+ cli.console.print(" [2] Web (Browser-based interface)")
1495
+ interface_choice = Prompt.ask("Select interface type", choices=["1", "2"], default="1")
1496
+ interface_type = interface_choices[interface_choice]
1497
+
1498
+ # Web-specific settings
1499
+ web_ssl_enabled = False
1500
+ web_dark_theme = True
1501
+ if interface_type == "web":
1502
+ cli.console.print()
1503
+ web_ssl_enabled = Confirm.ask("Enable HTTPS with self-signed certificate?", default=True)
1504
+ web_dark_theme = Confirm.ask("Use dark theme?", default=True)
1505
+
1506
+ cli.console.print()
1507
+ cli.print_separator("═")
1508
+ cli.console.print("[bold]Model Configuration[/bold]")
1509
+ cli.print_separator("═")
1510
+ cli.console.print()
1511
+
1512
+ max_tokens = Prompt.ask(
1513
+ "Maximum tokens per response",
1514
+ default="8192"
1515
+ )
1516
+
1517
+ temperature = Prompt.ask(
1518
+ "Temperature (0.0-1.0, higher = more creative)",
1519
+ default="0.7"
1520
+ )
1521
+
1522
+ cli.console.print()
1523
+ cli.print_separator("═")
1524
+ cli.console.print("[bold]Additional Features[/bold]")
1525
+ cli.print_separator("═")
1526
+ cli.console.print()
1527
+
1528
+ enable_mcp = Confirm.ask("Enable MCP (Model Context Protocol) integration?", default=True)
1529
+
1530
+ # ═══════════════════════════════════════════════════════════════
1531
+ # Embedded Filesystem Tools
1532
+ # ═══════════════════════════════════════════════════════════════
1533
+ cli.console.print()
1534
+ enable_filesystem_tools = Confirm.ask("Enable embedded filesystem tools?", default=False)
1535
+
1536
+ # Default values
1537
+ filesystem_allowed_path = "./running"
1538
+ filesystem_access_mode = "read_write"
1539
+
1540
+ if enable_filesystem_tools:
1541
+ cli.console.print()
1542
+ cli.console.print("[dim]Embedded filesystem tools provide LLM access to local files.[/dim]")
1543
+ cli.console.print()
1544
+
1545
+ filesystem_allowed_path = Prompt.ask(
1546
+ "Allowed directory path (tools can only access files within this directory)",
1547
+ default="./running"
1548
+ )
1549
+
1550
+ # Access mode
1551
+ access_mode_choices = {
1552
+ "1": "read",
1553
+ "2": "read_write"
1554
+ }
1555
+ cli.console.print()
1556
+ cli.console.print(" [1] Read - Read-only access (list, search, read files)")
1557
+ cli.console.print(" [2] Read/Write - Full access (read + write files, create directories)")
1558
+ cli.console.print()
1559
+ access_mode_choice = Prompt.ask(
1560
+ "Select access mode",
1561
+ choices=["1", "2"],
1562
+ default="2"
1563
+ )
1564
+ filesystem_access_mode = access_mode_choices[access_mode_choice]
1565
+
1566
+ # ═══════════════════════════════════════════════════════════════
1567
+ # Tool Permissions
1568
+ # ═══════════════════════════════════════════════════════════════
1569
+ cli.console.print()
1570
+ cli.console.print("[dim]Tool permissions control how the LLM uses tools (MCP servers, embedded tools).[/dim]")
1571
+ auto_approve_tools = Confirm.ask(
1572
+ "Auto-approve all tools without prompting (recommended for trusted environments only)?",
1573
+ default=False
1574
+ )
1575
+
1576
+ # ═══════════════════════════════════════════════════════════════
1577
+ # Prompt Inspection (Cyber Security)
1578
+ # ═══════════════════════════════════════════════════════════════
1579
+ cli.console.print()
1580
+ enable_prompt_inspection = Confirm.ask("Enable prompt security inspection (Cyber Security)?", default=False)
1581
+
1582
+ # Default values
1583
+ inspection_level = "basic"
1584
+ inspection_action = "warn"
1585
+ llm_inspection_enabled = False
1586
+ llm_inspection_model = "anthropic.claude-3-haiku-20240307-v1:0"
1587
+ llm_inspection_provider = "AWS Bedrock"
1588
+ llm_inspection_confidence = "0.7"
1589
+
1590
+ if enable_prompt_inspection:
1591
+ cli.console.print()
1592
+ cli.console.print("[dim]Prompt inspection detects and mitigates security risks in user prompts.[/dim]")
1593
+ cli.console.print()
1594
+
1595
+ # Inspection level
1596
+ inspection_level_choices = {
1597
+ "1": "basic",
1598
+ "2": "standard",
1599
+ "3": "strict"
1600
+ }
1601
+ cli.console.print(" [1] Basic - Fast pattern matching only")
1602
+ cli.console.print(" [2] Standard - Pattern matching + keyword analysis")
1603
+ cli.console.print(" [3] Strict - Pattern matching + LLM semantic analysis")
1604
+ cli.console.print()
1605
+ inspection_level_choice = Prompt.ask(
1606
+ "Select inspection level",
1607
+ choices=["1", "2", "3"],
1608
+ default="1"
1609
+ )
1610
+ inspection_level = inspection_level_choices[inspection_level_choice]
1611
+
1612
+ # Action on violations
1613
+ cli.console.print()
1614
+ inspection_action_choices = {
1615
+ "1": "warn",
1616
+ "2": "block",
1617
+ "3": "sanitise",
1618
+ "4": "log_only"
1619
+ }
1620
+ cli.console.print(" [1] Warn - Show warning and ask for confirmation [default]")
1621
+ cli.console.print(" [2] Block - Reject prompt completely")
1622
+ cli.console.print(" [3] Sanitise - Attempt to clean the prompt (with confirmation)")
1623
+ cli.console.print(" [4] Log only - Log violation but allow prompt")
1624
+ cli.console.print()
1625
+ inspection_action_choice = Prompt.ask(
1626
+ "Action when violations detected",
1627
+ choices=["1", "2", "3", "4"],
1628
+ default="1"
1629
+ )
1630
+ inspection_action = inspection_action_choices[inspection_action_choice]
1631
+
1632
+ # LLM-based inspection (for strict level)
1633
+ if inspection_level == "strict":
1634
+ cli.console.print()
1635
+ cli.console.print("[dim]Strict level can use LLM for semantic analysis of prompts.[/dim]")
1636
+ llm_inspection_enabled = Confirm.ask(
1637
+ "Enable LLM-based semantic analysis?",
1638
+ default=True
1639
+ )
1640
+
1641
+ if llm_inspection_enabled:
1642
+ cli.console.print()
1643
+ llm_inspection_model = Prompt.ask(
1644
+ "LLM model for analysis (fast, cheap model recommended)",
1645
+ default="anthropic.claude-3-haiku-20240307-v1:0"
1646
+ )
1647
+
1648
+ # Provider selection
1649
+ llm_provider_choices = {
1650
+ "1": "AWS Bedrock",
1651
+ "2": "Ollama",
1652
+ "3": "Anthropic Direct"
1653
+ }
1654
+ cli.console.print()
1655
+ cli.console.print(" [1] AWS Bedrock [default]")
1656
+ cli.console.print(" [2] Ollama")
1657
+ cli.console.print(" [3] Anthropic Direct")
1658
+ cli.console.print()
1659
+ llm_provider_choice = Prompt.ask(
1660
+ "Select provider for LLM inspection",
1661
+ choices=["1", "2", "3"],
1662
+ default="1"
1663
+ )
1664
+ llm_inspection_provider = llm_provider_choices[llm_provider_choice]
1665
+
1666
+ cli.console.print()
1667
+ llm_inspection_confidence = Prompt.ask(
1668
+ "Confidence threshold (0.0-1.0, higher = more strict)",
1669
+ default="0.7"
1670
+ )
1671
+
1672
+ # Load template config from package resources
1673
+ cli.console.print()
1674
+ cli.print_info("Generating configuration file...")
1675
+
1676
+ try:
1677
+ # Load template from package resources
1678
+ config_content = ResourceManager().load_resource('config.yaml.template')
1679
+ if config_content is None:
1680
+ raise FileNotFoundError("Configuration template not found in package resources")
1681
+
1682
+ # Replace placeholders with user values
1683
+ import re
1684
+
1685
+ # Database settings
1686
+ config_content = re.sub(
1687
+ r'(type:\s+)(sqlite|mysql|mariadb|postgresql|mssql)(\s+#)',
1688
+ f'\\g<1>{database_type}\\g<3>',
1689
+ config_content
1690
+ )
1691
+
1692
+ # MySQL/MariaDB settings
1693
+ if database_type in ["mysql", "mariadb"]:
1694
+ config_content = re.sub(
1695
+ r'(mysql:\s*\n\s+host:\s+)[^\n]+',
1696
+ f'\\g<1>{db_host}',
1697
+ config_content
1698
+ )
1699
+ config_content = re.sub(
1700
+ r'(mysql:\s*\n(?:.*\n)*?\s+port:\s+)\d+',
1701
+ f'\\g<1>{db_port}',
1702
+ config_content
1703
+ )
1704
+ config_content = re.sub(
1705
+ r'(mysql:\s*\n(?:.*\n)*?\s+database:\s+)[^\n]+',
1706
+ f'\\g<1>{db_database}',
1707
+ config_content
1708
+ )
1709
+ config_content = re.sub(
1710
+ r'(mysql:\s*\n(?:.*\n)*?\s+username:\s+)[^\s#]+',
1711
+ f'\\g<1>{db_username}',
1712
+ config_content
1713
+ )
1714
+ config_content = re.sub(
1715
+ r'(mysql:\s*\n(?:.*\n)*?\s+password:\s+)[^\s#]+',
1716
+ f'\\g<1>{db_password}',
1717
+ config_content
1718
+ )
1719
+ config_content = re.sub(
1720
+ r'(mysql:\s*\n(?:.*\n)*?\s+ssl:\s+)(true|false)',
1721
+ f'\\g<1>{str(db_ssl).lower()}',
1722
+ config_content
1723
+ )
1724
+
1725
+ # PostgreSQL settings
1726
+ if database_type == "postgresql":
1727
+ config_content = re.sub(
1728
+ r'(postgresql:\s*\n\s+host:\s+)[^\n]+',
1729
+ f'\\g<1>{db_host}',
1730
+ config_content
1731
+ )
1732
+ config_content = re.sub(
1733
+ r'(postgresql:\s*\n(?:.*\n)*?\s+port:\s+)\d+',
1734
+ f'\\g<1>{db_port}',
1735
+ config_content
1736
+ )
1737
+ config_content = re.sub(
1738
+ r'(postgresql:\s*\n(?:.*\n)*?\s+database:\s+)[^\n]+',
1739
+ f'\\g<1>{db_database}',
1740
+ config_content
1741
+ )
1742
+ config_content = re.sub(
1743
+ r'(postgresql:\s*\n(?:.*\n)*?\s+username:\s+)[^\s#]+',
1744
+ f'\\g<1>{db_username}',
1745
+ config_content
1746
+ )
1747
+ config_content = re.sub(
1748
+ r'(postgresql:\s*\n(?:.*\n)*?\s+password:\s+)[^\s#]+',
1749
+ f'\\g<1>{db_password}',
1750
+ config_content
1751
+ )
1752
+ config_content = re.sub(
1753
+ r'(postgresql:\s*\n(?:.*\n)*?\s+ssl:\s+)(true|false)',
1754
+ f'\\g<1>{str(db_ssl).lower()}',
1755
+ config_content
1756
+ )
1757
+
1758
+ # MSSQL settings
1759
+ if database_type == "mssql":
1760
+ config_content = re.sub(
1761
+ r'(mssql:\s*\n\s+host:\s+)[^\n]+',
1762
+ f'\\g<1>{db_host}',
1763
+ config_content
1764
+ )
1765
+ config_content = re.sub(
1766
+ r'(mssql:\s*\n(?:.*\n)*?\s+port:\s+)\d+',
1767
+ f'\\g<1>{db_port}',
1768
+ config_content
1769
+ )
1770
+ config_content = re.sub(
1771
+ r'(mssql:\s*\n(?:.*\n)*?\s+database:\s+)[^\n]+',
1772
+ f'\\g<1>{db_database}',
1773
+ config_content
1774
+ )
1775
+ config_content = re.sub(
1776
+ r'(mssql:\s*\n(?:.*\n)*?\s+username:\s+)[^\s#]+',
1777
+ f'\\g<1>{db_username}',
1778
+ config_content
1779
+ )
1780
+ config_content = re.sub(
1781
+ r'(mssql:\s*\n(?:.*\n)*?\s+password:\s+)[^\s#]+',
1782
+ f'\\g<1>{db_password}',
1783
+ config_content
1784
+ )
1785
+ config_content = re.sub(
1786
+ r'(mssql:\s*\n(?:.*\n)*?\s+ssl:\s+)(true|false)',
1787
+ f'\\g<1>{str(db_ssl).lower()}',
1788
+ config_content
1789
+ )
1790
+ config_content = re.sub(
1791
+ r'(mssql:\s*\n(?:.*\n)*?\s+driver:\s+")[^"]+(")',
1792
+ f'\\g<1>{db_driver}\\g<2>',
1793
+ config_content
1794
+ )
1795
+
1796
+ # LLM Provider settings
1797
+ config_content = re.sub(
1798
+ r'(aws_bedrock:\s*\n\s+enabled:\s+)(true|false)',
1799
+ f'\\g<1>{str(use_aws_bedrock).lower()}',
1800
+ config_content
1801
+ )
1802
+ config_content = re.sub(
1803
+ r'(ollama:\s*\n\s+enabled:\s+)(true|false)',
1804
+ f'\\g<1>{str(use_ollama).lower()}',
1805
+ config_content
1806
+ )
1807
+ config_content = re.sub(
1808
+ r'(anthropic:\s*\n\s+enabled:\s+)(true|false)',
1809
+ f'\\g<1>{str(use_anthropic).lower()}',
1810
+ config_content
1811
+ )
1812
+
1813
+ # AWS settings
1814
+ config_content = re.sub(
1815
+ r'(sso_profile:\s+)[^\s#]+',
1816
+ f'\\g<1>{aws_profile}',
1817
+ config_content
1818
+ )
1819
+ config_content = re.sub(
1820
+ r'(region:\s+)[^\s#]+',
1821
+ f'\\g<1>{aws_region}',
1822
+ config_content
1823
+ )
1824
+ config_content = re.sub(
1825
+ r'(cost_tracking:\s*\n\s+enabled:\s+)(true|false)',
1826
+ f'\\g<1>{str(enable_cost_tracking).lower()}',
1827
+ config_content
1828
+ )
1829
+
1830
+ # Ollama settings
1831
+ if use_ollama:
1832
+ config_content = re.sub(
1833
+ r'(base_url:\s+")[^"]+(")',
1834
+ f'\\g<1>{ollama_base_url}\\g<2>',
1835
+ config_content
1836
+ )
1837
+
1838
+ # Anthropic settings
1839
+ if use_anthropic and anthropic_api_key:
1840
+ config_content = re.sub(
1841
+ r'(api_key:\s+")[^"]*(")',
1842
+ f'\\g<1>{anthropic_api_key}\\g<2>',
1843
+ config_content
1844
+ )
1845
+
1846
+ # Interface settings
1847
+ config_content = re.sub(
1848
+ r'(type:\s+)(cli|web)(\s+#)',
1849
+ f'\\g<1>{interface_type}\\g<3>',
1850
+ config_content
1851
+ )
1852
+
1853
+ # Web settings
1854
+ if interface_type == "web":
1855
+ config_content = re.sub(
1856
+ r'(ssl:\s*\n\s+enabled:\s+)(true|false)',
1857
+ f'\\g<1>{str(web_ssl_enabled).lower()}',
1858
+ config_content
1859
+ )
1860
+ config_content = re.sub(
1861
+ r'(dark_theme:\s+)(true|false)',
1862
+ f'\\g<1>{str(web_dark_theme).lower()}',
1863
+ config_content
1864
+ )
1865
+
1866
+ # Model settings
1867
+ config_content = re.sub(
1868
+ r'(max_tokens:\s+)\d+',
1869
+ f'\\g<1>{max_tokens}',
1870
+ config_content
1871
+ )
1872
+ config_content = re.sub(
1873
+ r'(temperature:\s+)[\d.]+',
1874
+ f'\\g<1>{temperature}',
1875
+ config_content
1876
+ )
1877
+
1878
+ # Features
1879
+ config_content = re.sub(
1880
+ r'(mcp_config:\s*\n\s+enabled:\s+)(true|false)',
1881
+ f'\\g<1>{str(enable_mcp).lower()}',
1882
+ config_content
1883
+ )
1884
+
1885
+ # Embedded Filesystem Tools
1886
+ config_content = re.sub(
1887
+ r'(filesystem:\s*\n\s+enabled:\s+)(true|false)',
1888
+ f'\\g<1>{str(enable_filesystem_tools).lower()}',
1889
+ config_content
1890
+ )
1891
+ if enable_filesystem_tools:
1892
+ # Allowed path - need to handle both Unix and Windows paths
1893
+ escaped_path = filesystem_allowed_path.replace('\\', '/')
1894
+ config_content = re.sub(
1895
+ r'(allowed_path:\s+)[^\s#]+',
1896
+ f'\\g<1>{escaped_path}',
1897
+ config_content
1898
+ )
1899
+ # Access mode
1900
+ config_content = re.sub(
1901
+ r'(access_mode:\s+)(read|read_write)',
1902
+ f'\\g<1>{filesystem_access_mode}',
1903
+ config_content
1904
+ )
1905
+
1906
+ # Tool Permissions
1907
+ config_content = re.sub(
1908
+ r'(tool_permissions:\s*\n\s+auto_approve:\s+)(true|false)',
1909
+ f'\\g<1>{str(auto_approve_tools).lower()}',
1910
+ config_content
1911
+ )
1912
+
1913
+ # Prompt Inspection settings
1914
+ config_content = re.sub(
1915
+ r'(prompt_inspection:\s*\n\s+enabled:\s+)(true|false)',
1916
+ f'\\g<1>{str(enable_prompt_inspection).lower()}',
1917
+ config_content
1918
+ )
1919
+
1920
+ if enable_prompt_inspection:
1921
+ # Inspection level
1922
+ config_content = re.sub(
1923
+ r'(inspection_level:\s+)(basic|standard|strict)',
1924
+ f'\\g<1>{inspection_level}',
1925
+ config_content
1926
+ )
1927
+ # Action
1928
+ config_content = re.sub(
1929
+ r'(action:\s+)(warn|block|sanitise|log_only)',
1930
+ f'\\g<1>{inspection_action}',
1931
+ config_content
1932
+ )
1933
+ # LLM inspection enabled
1934
+ config_content = re.sub(
1935
+ r'(llm_inspection:\s*\n\s+enabled:\s+)(true|false)',
1936
+ f'\\g<1>{str(llm_inspection_enabled).lower()}',
1937
+ config_content
1938
+ )
1939
+ # LLM inspection settings (only if enabled)
1940
+ if llm_inspection_enabled:
1941
+ config_content = re.sub(
1942
+ r'(llm_inspection:.*?model:\s+)[^\n]+',
1943
+ f'\\g<1>{llm_inspection_model}',
1944
+ config_content,
1945
+ flags=re.DOTALL
1946
+ )
1947
+ config_content = re.sub(
1948
+ r'(llm_inspection:.*?provider:\s+)[^\n]+',
1949
+ f'\\g<1>{llm_inspection_provider}',
1950
+ config_content,
1951
+ flags=re.DOTALL
1952
+ )
1953
+ config_content = re.sub(
1954
+ r'(llm_inspection:.*?confidence_threshold:\s+)[\d.]+',
1955
+ f'\\g<1>{llm_inspection_confidence}',
1956
+ config_content,
1957
+ flags=re.DOTALL
1958
+ )
1959
+
1960
+ # Create destination directory if it doesn't exist
1961
+ os.makedirs(dest_config_dir, exist_ok=True)
1962
+
1963
+ # Write config file
1964
+ with open(dest_config, 'w', encoding='utf-8') as f:
1965
+ f.write(config_content)
1966
+
1967
+ cli.console.print()
1968
+ cli.print_success(f"✓ Configuration file created successfully!")
1969
+ cli.console.print()
1970
+ cli.print_info(f"Location: {dest_config}")
1971
+ cli.console.print()
1972
+
1973
+ # Display summary of secrets stored
1974
+ secrets_stored = []
1975
+ if use_anthropic and anthropic_api_key.startswith("SEC/"):
1976
+ secrets_stored.append("• Anthropic API key")
1977
+ if database_type != "sqlite":
1978
+ if db_username.startswith("SEC/"):
1979
+ secrets_stored.append(f"• {database_type.upper()} database username")
1980
+ if db_password.startswith("SEC/"):
1981
+ secrets_stored.append(f"• {database_type.upper()} database password")
1982
+
1983
+ if secrets_stored:
1984
+ cli.console.print()
1985
+ cli.print_separator("─")
1986
+ cli.console.print("[bold green]Secrets Securely Stored:[/bold green]")
1987
+ for secret in secrets_stored:
1988
+ cli.console.print(f" {secret}")
1989
+ cli.console.print()
1990
+ cli.console.print("[dim]These credentials are stored in the secrets manager and[/dim]")
1991
+ cli.console.print("[dim]referenced in config.yaml as SEC/<key_name> for security.[/dim]")
1992
+ cli.print_separator("─")
1993
+ cli.console.print()
1994
+
1995
+ cli.console.print("[dim]You can manually edit this file to customise additional settings.[/dim]")
1996
+ cli.console.print("[dim]All configuration options are documented with comments in the file.[/dim]")
1997
+ cli.console.print()
1998
+
1999
+ except Exception as e:
2000
+ cli.print_error(f"Failed to create configuration: {e}")
2001
+ logging.exception("Setup wizard error")
2002
+ return
2003
+
2004
+ def define_args(self, arg_parser: ArgumentParser):
2005
+ """Define command-line arguments."""
2006
+ arg_parser.add_argument(
2007
+ '--setup',
2008
+ action='store_true',
2009
+ help='Run interactive setup wizard to create config.yaml'
2010
+ )
2011
+
2012
+ def display_bedrock_costs(self):
2013
+ """Display AWS Bedrock usage costs if available."""
2014
+ if self.bedrock_costs and self.cost_tracker:
2015
+ self.cli.display_bedrock_costs(self.bedrock_costs)
2016
+ else:
2017
+ logging.debug("No Bedrock cost information available to display")
2018
+
2019
+ def regather_and_display_costs(self):
2020
+ """Re-gather AWS Bedrock cost information and display it."""
2021
+ # Check if cost tracking is enabled (new path with legacy fallback)
2022
+ cost_tracking_enabled = self._get_nested_setting('llm_providers.aws_bedrock.cost_tracking.enabled', None)
2023
+ if cost_tracking_enabled is None:
2024
+ cost_tracking_enabled = self.settings.get('aws.cost_tracking.enabled', False)
2025
+ if not cost_tracking_enabled:
2026
+ self.cli.print_warning("Cost tracking is disabled in configuration")
2027
+ return
2028
+
2029
+ if not self.authenticator:
2030
+ self.cli.print_warning("AWS Bedrock is not configured - cannot retrieve costs")
2031
+ return
2032
+
2033
+ self.cli.print_info("Re-gathering AWS Bedrock cost information...")
2034
+
2035
+ try:
2036
+ if not hasattr(self, 'cost_tracker') or self.cost_tracker is None:
2037
+ cost_explorer_client = self.authenticator.get_client('ce')
2038
+ from dtSpark.aws import CostTracker
2039
+ self.cost_tracker = CostTracker(cost_explorer_client)
2040
+
2041
+ with self.cli.status_indicator("Retrieving usage costs..."):
2042
+ self.bedrock_costs = self.cost_tracker.get_bedrock_costs()
2043
+
2044
+ if self.bedrock_costs:
2045
+ self.display_bedrock_costs()
2046
+ self.cli.print_success("Cost information updated successfully")
2047
+ else:
2048
+ self.cli.print_warning("No cost information available")
2049
+ except Exception as e:
2050
+ logging.error(f"Error retrieving cost information: {e}")
2051
+ self.cli.print_error(f"Failed to retrieve cost information: {e}")
2052
+
2053
+ def start_new_conversation(self) -> bool:
2054
+ """
2055
+ Create a new conversation.
2056
+
2057
+ Returns:
2058
+ True if conversation was created successfully, False otherwise
2059
+ """
2060
+ # Get conversation name
2061
+ conv_name = self.cli.get_input("\nEnter a name for this conversation")
2062
+ if not conv_name:
2063
+ self.cli.print_error("Conversation name cannot be empty")
2064
+ return False
2065
+
2066
+ # Select model for this conversation (or use configured model)
2067
+ if self.configured_model_id:
2068
+ # Model is locked via configuration
2069
+ model_id = self.configured_model_id
2070
+ try:
2071
+ self.llm_manager.set_model(model_id, self.configured_provider)
2072
+ # Update references after model change
2073
+ self.bedrock_service = self.llm_manager.get_active_service()
2074
+ self.conversation_manager.bedrock_service = self.bedrock_service
2075
+ provider_info = f" via {self.configured_provider}" if self.configured_provider else ""
2076
+ self.cli.print_info(f"Using configured model: {model_id}{provider_info}")
2077
+ except ValueError as e:
2078
+ self.cli.print_error(f"Failed to set configured model: {e}")
2079
+ if self.configured_provider:
2080
+ self.cli.print_info(f"Provider '{self.configured_provider}' may not be enabled or model not available")
2081
+ return False
2082
+ else:
2083
+ # Allow user to select model
2084
+ model_id = self.select_model()
2085
+ if not model_id or model_id == 'QUIT':
2086
+ self.cli.print_error("Model selection cancelled")
2087
+ return False
2088
+
2089
+ # Ask if user wants to provide instructions
2090
+ provide_instructions = self.cli.confirm("Would you like to provide instructions for this conversation?")
2091
+ instructions = None
2092
+
2093
+ if provide_instructions:
2094
+ instructions = self.cli.get_multiline_input("Enter instructions/system prompt for this conversation")
2095
+ if not instructions.strip():
2096
+ instructions = None
2097
+
2098
+ # Ask for compaction threshold
2099
+ default_threshold = self.settings.get('conversation.rollup_threshold', 0.3)
2100
+ compaction_threshold = None # None means use global default
2101
+
2102
+ self.cli.print_info(f"\nContext compaction triggers when token usage reaches a percentage of the model's context window.")
2103
+ self.cli.print_info(f"Lower values compact sooner (reduces costs), higher values preserve more context.")
2104
+ self.cli.print_info(f"Default: {default_threshold:.0%} of context window")
2105
+
2106
+ custom_threshold = self.cli.confirm("Would you like to set a custom compaction threshold?")
2107
+
2108
+ if custom_threshold:
2109
+ while True:
2110
+ threshold_input = self.cli.get_input(f"Enter threshold (0.1-1.0) [default: {default_threshold}]")
2111
+ if not threshold_input.strip():
2112
+ # User pressed enter, use default
2113
+ compaction_threshold = default_threshold
2114
+ self.cli.print_info(f"Using default threshold: {default_threshold:.0%}")
2115
+ break
2116
+ try:
2117
+ threshold_value = float(threshold_input)
2118
+ if 0.1 <= threshold_value <= 1.0:
2119
+ compaction_threshold = threshold_value
2120
+ self.cli.print_success(f"Compaction threshold set to: {threshold_value:.0%}")
2121
+ break
2122
+ else:
2123
+ self.cli.print_error("Threshold must be between 0.1 and 1.0")
2124
+ except ValueError:
2125
+ self.cli.print_error("Please enter a valid number between 0.1 and 1.0")
2126
+
2127
+ # Ask if user wants to attach files
2128
+ from dtSpark.files import FileManager
2129
+ supported_extensions = FileManager.get_supported_extensions()
2130
+ file_paths = self.cli.get_file_attachments(supported_extensions)
2131
+
2132
+ # Create the conversation
2133
+ conversation_id = self.conversation_manager.create_conversation(
2134
+ name=conv_name,
2135
+ model_id=model_id,
2136
+ instructions=instructions,
2137
+ compaction_threshold=compaction_threshold
2138
+ )
2139
+
2140
+ if conversation_id:
2141
+ self.conversation_manager.load_conversation(conversation_id)
2142
+
2143
+ # Attach files if any
2144
+ if file_paths:
2145
+ self.conversation_manager.attach_files(file_paths)
2146
+ files = self.conversation_manager.get_attached_files()
2147
+ if files:
2148
+ self.cli.display_attached_files(files)
2149
+
2150
+ self.cli.print_success(f"Created new conversation: {conv_name}")
2151
+ return True
2152
+ else:
2153
+ self.cli.print_error("Failed to create conversation")
2154
+ return False
2155
+
2156
+ def select_existing_conversation(self) -> bool:
2157
+ """
2158
+ List and select an existing conversation.
2159
+
2160
+ Returns:
2161
+ True if conversation was loaded successfully, False otherwise
2162
+ """
2163
+ conversations = self.conversation_manager.get_active_conversations()
2164
+ conversation_id = self.cli.display_conversations(conversations)
2165
+
2166
+ if conversation_id:
2167
+ # Load existing conversation
2168
+ if self.conversation_manager.load_conversation(conversation_id):
2169
+ conv_info = self.conversation_manager.get_current_conversation_info()
2170
+
2171
+ # Set the model from the conversation
2172
+ model_id = conv_info['model_id']
2173
+ self.llm_manager.set_model(model_id)
2174
+ # Update references after model change
2175
+ self.bedrock_service = self.llm_manager.get_active_service()
2176
+ self.conversation_manager.bedrock_service = self.bedrock_service
2177
+
2178
+ self.cli.print_success(f"Loaded conversation: {conv_info['name']}")
2179
+ self.cli.print_info(f"Using model: {model_id}")
2180
+
2181
+ # Display attached files if any
2182
+ files = self.conversation_manager.get_attached_files()
2183
+ if files:
2184
+ self.cli.display_attached_files(files)
2185
+
2186
+ return True
2187
+ else:
2188
+ self.cli.print_error("Failed to load conversation")
2189
+ return False
2190
+ else:
2191
+ return False
2192
+
2193
+ def manage_autonomous_actions(self):
2194
+ """
2195
+ Manage autonomous actions - display menu and handle user choices.
2196
+ """
2197
+ import asyncio
2198
+
2199
+ while True:
2200
+ # Get count of failed actions for display
2201
+ failed_count = self.database.get_failed_action_count()
2202
+
2203
+ choice = self.cli.display_autonomous_actions_menu(failed_count)
2204
+
2205
+ if choice == 'back':
2206
+ break
2207
+
2208
+ elif choice == 'list':
2209
+ # List all actions
2210
+ actions = self.database.get_all_actions()
2211
+ self.cli.display_actions_list(actions)
2212
+
2213
+ elif choice == 'create':
2214
+ # Choose creation method
2215
+ creation_method = self.cli.select_action_creation_method()
2216
+
2217
+ if creation_method == 'manual':
2218
+ # Manual wizard
2219
+ models = self.llm_manager.list_all_models()
2220
+
2221
+ # Get available tools
2222
+ tools = []
2223
+ if self.mcp_manager:
2224
+ try:
2225
+ loop = getattr(self.mcp_manager, '_initialization_loop', None)
2226
+ if loop and not loop.is_closed():
2227
+ tools = loop.run_until_complete(self.mcp_manager.list_all_tools())
2228
+ except Exception as e:
2229
+ logging.warning(f"Could not get tools list: {e}")
2230
+
2231
+ action_config = self.cli.create_action_wizard(models, tools)
2232
+
2233
+ if action_config:
2234
+ try:
2235
+ # Create the action
2236
+ action_id = self.database.create_action(
2237
+ name=action_config['name'],
2238
+ description=action_config['description'],
2239
+ action_prompt=action_config['action_prompt'],
2240
+ model_id=action_config['model_id'],
2241
+ schedule_type=action_config['schedule_type'],
2242
+ schedule_config=action_config['schedule_config'],
2243
+ context_mode=action_config['context_mode'],
2244
+ max_failures=action_config['max_failures'],
2245
+ max_tokens=action_config.get('max_tokens', 8192)
2246
+ )
2247
+
2248
+ # Set tool permissions
2249
+ if action_config.get('tool_permissions'):
2250
+ self.database.set_action_tool_permissions_batch(
2251
+ action_id, action_config['tool_permissions']
2252
+ )
2253
+
2254
+ # Schedule the action
2255
+ if self.action_scheduler:
2256
+ self.action_scheduler.schedule_action(
2257
+ action_id=action_id,
2258
+ action_name=action_config['name'],
2259
+ schedule_type=action_config['schedule_type'],
2260
+ schedule_config=action_config['schedule_config'],
2261
+ user_guid=self.user_guid
2262
+ )
2263
+
2264
+ self.cli.print_success(f"Created action: {action_config['name']}")
2265
+
2266
+ except Exception as e:
2267
+ self.cli.print_error(f"Failed to create action: {e}")
2268
+
2269
+ elif creation_method == 'prompt_driven':
2270
+ # Prompt-driven creation with LLM
2271
+ self._create_action_prompt_driven()
2272
+
2273
+ elif choice == 'runs':
2274
+ # View action runs
2275
+ actions = self.database.get_all_actions()
2276
+ action_id = self.cli.select_action(actions, "Select action to view runs")
2277
+
2278
+ if action_id:
2279
+ action = self.database.get_action(action_id)
2280
+ runs = self.database.get_action_runs(action_id)
2281
+ self.cli.display_action_runs(runs, action['name'] if action else None)
2282
+
2283
+ # Option to view run details
2284
+ if runs:
2285
+ run_id = self.cli.select_run(runs, "Select a run for details (or 0 to go back)")
2286
+ if run_id:
2287
+ run = self.database.get_action_run(run_id)
2288
+ if run:
2289
+ self.cli.display_run_details(run)
2290
+
2291
+ elif choice == 'run_now':
2292
+ # Run action immediately
2293
+ actions = self.database.get_all_actions()
2294
+ action_id = self.cli.select_action(actions, "Select action to run now")
2295
+
2296
+ if action_id and self.action_scheduler:
2297
+ success = self.action_scheduler.run_action_now(action_id, self.user_guid)
2298
+ if success:
2299
+ self.cli.print_success("Action triggered for immediate execution")
2300
+ else:
2301
+ self.cli.print_error("Failed to trigger action")
2302
+ elif not self.action_scheduler:
2303
+ self.cli.print_error("Action scheduler not available")
2304
+
2305
+ elif choice == 'toggle':
2306
+ # Enable/disable action
2307
+ actions = self.database.get_all_actions()
2308
+ action_id = self.cli.select_action(actions, "Select action to toggle")
2309
+
2310
+ if action_id:
2311
+ action = self.database.get_action(action_id)
2312
+ if action:
2313
+ if action['is_enabled']:
2314
+ self.database.disable_action(action_id)
2315
+ if self.action_scheduler:
2316
+ self.action_scheduler.unschedule_action(action_id)
2317
+ self.cli.print_success(f"Disabled action: {action['name']}")
2318
+ else:
2319
+ self.database.enable_action(action_id)
2320
+ if self.action_scheduler:
2321
+ self.action_scheduler.schedule_action(
2322
+ action_id=action_id,
2323
+ action_name=action['name'],
2324
+ schedule_type=action['schedule_type'],
2325
+ schedule_config=action['schedule_config'],
2326
+ user_guid=self.user_guid
2327
+ )
2328
+ self.cli.print_success(f"Enabled action: {action['name']}")
2329
+
2330
+ elif choice == 'delete':
2331
+ # Delete action
2332
+ actions = self.database.get_all_actions()
2333
+ action_id = self.cli.select_action(actions, "Select action to delete")
2334
+
2335
+ if action_id:
2336
+ action = self.database.get_action(action_id)
2337
+ if action and self.cli.confirm(f"Delete action '{action['name']}'?"):
2338
+ if self.action_scheduler:
2339
+ self.action_scheduler.unschedule_action(action_id)
2340
+ self.database.delete_action(action_id)
2341
+ self.cli.print_success(f"Deleted action: {action['name']}")
2342
+
2343
+ elif choice == 'export':
2344
+ # Export run results
2345
+ actions = self.database.get_all_actions()
2346
+ action_id = self.cli.select_action(actions, "Select action")
2347
+
2348
+ if action_id:
2349
+ runs = self.database.get_action_runs(action_id)
2350
+ run_id = self.cli.select_run(runs, "Select run to export")
2351
+
2352
+ if run_id:
2353
+ export_format = self.cli.select_export_format()
2354
+ if export_format:
2355
+ run = self.database.get_action_run(run_id)
2356
+ if run:
2357
+ if export_format == 'html':
2358
+ content = run.get('result_html') or f"<pre>{run.get('result_text', 'No result')}</pre>"
2359
+ elif export_format == 'markdown':
2360
+ content = f"# Action Run {run_id}\n\n"
2361
+ content += f"**Status:** {run['status']}\n\n"
2362
+ content += f"## Result\n\n{run.get('result_text', 'No result')}"
2363
+ else:
2364
+ content = run.get('result_text', 'No result')
2365
+
2366
+ # Copy to clipboard
2367
+ if copy_to_clipboard(content):
2368
+ self.cli.print_success(f"Exported {export_format.upper()} to clipboard")
2369
+ else:
2370
+ # Show content if clipboard failed
2371
+ self.cli.console.print(f"\n{content}\n")
2372
+
2373
+ else:
2374
+ self.cli.print_error("Invalid option")
2375
+
2376
+ def _create_action_prompt_driven(self):
2377
+ """
2378
+ Create an autonomous action using conversational LLM approach.
2379
+
2380
+ The user describes what they want to schedule in natural language,
2381
+ and the LLM guides them through the creation process.
2382
+ """
2383
+ import json
2384
+ from dtSpark.scheduler.creation_tools import (
2385
+ get_action_creation_tools,
2386
+ execute_creation_tool,
2387
+ ACTION_CREATION_SYSTEM_PROMPT
2388
+ )
2389
+
2390
+ # Step 1: Select model (will be used for both creation and execution)
2391
+ models = self.llm_manager.list_all_models()
2392
+ if not models:
2393
+ self.cli.print_error("No models available")
2394
+ return
2395
+
2396
+ self.cli.console.print("\n[bold cyan]Select Model[/bold cyan]")
2397
+ self.cli.console.print("[dim]This model will be used for the creation process AND the scheduled task.[/dim]\n")
2398
+
2399
+ for i, model in enumerate(models, 1):
2400
+ from dtSpark.cli_interface import extract_friendly_model_name
2401
+ friendly_name = extract_friendly_model_name(model.get('id', ''))
2402
+ self.cli.console.print(f" [{i}] {friendly_name}")
2403
+
2404
+ model_choice = self.cli.get_input("Enter choice")
2405
+ try:
2406
+ model_idx = int(model_choice) - 1
2407
+ if model_idx < 0 or model_idx >= len(models):
2408
+ self.cli.print_error("Invalid model selection")
2409
+ return
2410
+ model_id = models[model_idx]['id']
2411
+ except ValueError:
2412
+ self.cli.print_error("Invalid input")
2413
+ return
2414
+
2415
+ # Step 2: Get creation tools
2416
+ creation_tools = get_action_creation_tools()
2417
+
2418
+ # Build config for tool listing (includes embedded_tools settings)
2419
+ creation_config = {
2420
+ 'embedded_tools': {
2421
+ 'filesystem': {
2422
+ 'enabled': self.settings.get('embedded_tools.filesystem.enabled', False),
2423
+ 'allowed_path': self.settings.get('embedded_tools.filesystem.allowed_path', './'),
2424
+ 'access_mode': self.settings.get('embedded_tools.filesystem.access_mode', 'read')
2425
+ }
2426
+ }
2427
+ }
2428
+
2429
+ # Step 3: Set the model for the creation conversation
2430
+ # Store the previous model to restore after creation
2431
+ previous_model = None
2432
+ previous_provider = self.llm_manager.get_active_provider()
2433
+ if self.llm_manager.active_service:
2434
+ previous_model = getattr(self.llm_manager.active_service, 'model_id', None)
2435
+
2436
+ try:
2437
+ self.llm_manager.set_model(model_id)
2438
+ except Exception as e:
2439
+ self.cli.print_error(f"Failed to set model: {e}")
2440
+ return
2441
+
2442
+ # Step 4: Display header and start conversation
2443
+ self.cli.display_creation_prompt_header()
2444
+
2445
+ # Conversation loop
2446
+ messages = []
2447
+ action_created = False
2448
+
2449
+ try:
2450
+ while not action_created:
2451
+ # Get user input
2452
+ user_input = self.cli.get_multiline_input("Your message")
2453
+
2454
+ if not user_input:
2455
+ continue
2456
+
2457
+ # Check for cancel command
2458
+ if user_input.lower().strip() == 'cancel':
2459
+ self.cli.print_warning("Action creation cancelled.")
2460
+ return
2461
+
2462
+ # Add user message to history
2463
+ messages.append({'role': 'user', 'content': user_input})
2464
+ self.cli.display_creation_conversation_message('user', user_input)
2465
+
2466
+ # Continue processing tool results until we get a final response
2467
+ while True:
2468
+ try:
2469
+ # Invoke LLM with creation tools
2470
+ response = self.llm_manager.invoke_model(
2471
+ messages=messages,
2472
+ system=ACTION_CREATION_SYSTEM_PROMPT,
2473
+ tools=creation_tools,
2474
+ max_tokens=4096
2475
+ )
2476
+ except Exception as e:
2477
+ self.cli.print_error(f"LLM invocation failed: {e}")
2478
+ break
2479
+
2480
+ # Check for error response
2481
+ if response.get('error'):
2482
+ error_msg = response.get('error_message', 'Unknown error')
2483
+ self.cli.print_error(f"LLM error: {error_msg}")
2484
+ break
2485
+
2486
+ # Process response
2487
+ # Note: Bedrock service returns 'content' as text string and
2488
+ # 'content_blocks' as the list of content blocks (text, tool_use)
2489
+ assistant_content = ""
2490
+ tool_calls = []
2491
+
2492
+ content_blocks = response.get('content_blocks', [])
2493
+ for block in content_blocks:
2494
+ if block.get('type') == 'text':
2495
+ assistant_content += block.get('text', '')
2496
+ elif block.get('type') == 'tool_use':
2497
+ tool_calls.append(block)
2498
+
2499
+ # Display assistant text if any
2500
+ if assistant_content:
2501
+ self.cli.display_creation_conversation_message('assistant', assistant_content)
2502
+
2503
+ # If no tool calls, we're done with this turn
2504
+ if not tool_calls:
2505
+ # Add assistant message to history
2506
+ messages.append({
2507
+ 'role': 'assistant',
2508
+ 'content': content_blocks
2509
+ })
2510
+ break
2511
+
2512
+ # Process tool calls
2513
+ tool_results = []
2514
+ for tool_call in tool_calls:
2515
+ tool_name = tool_call.get('name')
2516
+ tool_input = tool_call.get('input', {})
2517
+ tool_id = tool_call.get('id')
2518
+
2519
+ # Execute the creation tool
2520
+ try:
2521
+ result = execute_creation_tool(
2522
+ tool_name=tool_name,
2523
+ tool_input=tool_input,
2524
+ mcp_manager=self.mcp_manager,
2525
+ database=self.database,
2526
+ scheduler_manager=self.action_scheduler,
2527
+ model_id=model_id,
2528
+ user_guid=self.user_guid,
2529
+ config=creation_config
2530
+ )
2531
+
2532
+ # Display tool call result
2533
+ self.cli.display_creation_tool_call(tool_name, result)
2534
+
2535
+ # Check if action was created successfully
2536
+ if tool_name == 'create_autonomous_action' and result.get('success'):
2537
+ action_created = True
2538
+
2539
+ tool_results.append({
2540
+ 'type': 'tool_result',
2541
+ 'tool_use_id': tool_id,
2542
+ 'content': json.dumps(result)
2543
+ })
2544
+
2545
+ except Exception as e:
2546
+ logging.error(f"Tool execution error: {e}")
2547
+ tool_results.append({
2548
+ 'type': 'tool_result',
2549
+ 'tool_use_id': tool_id,
2550
+ 'content': json.dumps({'error': str(e)})
2551
+ })
2552
+
2553
+ # Add assistant message and tool results to history
2554
+ messages.append({
2555
+ 'role': 'assistant',
2556
+ 'content': content_blocks
2557
+ })
2558
+ messages.append({
2559
+ 'role': 'user',
2560
+ 'content': tool_results
2561
+ })
2562
+
2563
+ # If action was created, we can exit
2564
+ if action_created:
2565
+ break
2566
+
2567
+ finally:
2568
+ # Restore previous model if one was active
2569
+ if previous_model:
2570
+ try:
2571
+ self.llm_manager.set_model(previous_model)
2572
+ except Exception:
2573
+ pass # Silently ignore if we can't restore
2574
+
2575
+ if action_created:
2576
+ self.cli.console.print("\n[bold green]━━━ Action created successfully ━━━[/bold green]\n")
2577
+
2578
+ def select_model(self):
2579
+ """Handle model selection."""
2580
+ # Show progress while fetching models
2581
+ with self.cli.create_progress() as progress:
2582
+ task = progress.add_task("[cyan]Fetching available models...", total=100)
2583
+ models = self.llm_manager.list_all_models()
2584
+ progress.update(task, advance=100)
2585
+
2586
+ if not models:
2587
+ self.cli.print_error("No models available")
2588
+ return None
2589
+
2590
+ model_id = self.cli.display_models(models)
2591
+
2592
+ if model_id:
2593
+ # Check for quit command
2594
+ if model_id == 'QUIT':
2595
+ return None
2596
+
2597
+ # LLM manager will automatically determine the provider for this model
2598
+ self.llm_manager.set_model(model_id)
2599
+ # Update references after model change
2600
+ self.bedrock_service = self.llm_manager.get_active_service()
2601
+ self.conversation_manager.bedrock_service = self.bedrock_service
2602
+ self.cli.print_success(f"Selected model: {model_id}")
2603
+ return model_id
2604
+ else:
2605
+ self.cli.print_error("No model selected")
2606
+ return None
2607
+
2608
+ def setup_conversation(self):
2609
+ """Setup conversation - either load existing or create new."""
2610
+ conversations = self.conversation_manager.get_active_conversations()
2611
+
2612
+ conversation_id = self.cli.display_conversations(conversations)
2613
+
2614
+ if conversation_id:
2615
+ # Load existing conversation
2616
+ if self.conversation_manager.load_conversation(conversation_id):
2617
+ conv_info = self.conversation_manager.get_current_conversation_info()
2618
+
2619
+ # Set the model from the conversation
2620
+ model_id = conv_info['model_id']
2621
+ self.llm_manager.set_model(model_id)
2622
+ # Update references after model change
2623
+ self.bedrock_service = self.llm_manager.get_active_service()
2624
+ self.conversation_manager.bedrock_service = self.bedrock_service
2625
+
2626
+ self.cli.print_success(f"Loaded conversation: {conv_info['name']}")
2627
+ self.cli.print_info(f"Using model: {model_id}")
2628
+
2629
+ # Display attached files if any
2630
+ files = self.conversation_manager.get_attached_files()
2631
+ if files:
2632
+ self.cli.display_attached_files(files)
2633
+
2634
+ return True
2635
+ else:
2636
+ self.cli.print_error("Failed to load conversation")
2637
+ return False
2638
+ else:
2639
+ # Create new conversation
2640
+ # Step 1: Get conversation name
2641
+ conv_name = self.cli.get_input("\nEnter a name for this conversation")
2642
+ if not conv_name:
2643
+ self.cli.print_error("Conversation name cannot be empty")
2644
+ return False
2645
+
2646
+ # Step 2: Select model for this conversation (or use configured model)
2647
+ if self.configured_model_id:
2648
+ # Model is locked via configuration
2649
+ model_id = self.configured_model_id
2650
+ try:
2651
+ self.llm_manager.set_model(model_id, self.configured_provider)
2652
+ # Update references after model change
2653
+ self.bedrock_service = self.llm_manager.get_active_service()
2654
+ self.conversation_manager.bedrock_service = self.bedrock_service
2655
+ provider_info = f" via {self.configured_provider}" if self.configured_provider else ""
2656
+ self.cli.print_info(f"Using configured model: {model_id}{provider_info}")
2657
+ except ValueError as e:
2658
+ self.cli.print_error(f"Failed to set configured model: {e}")
2659
+ if self.configured_provider:
2660
+ self.cli.print_info(f"Provider '{self.configured_provider}' may not be enabled or model not available")
2661
+ return False
2662
+ else:
2663
+ # Allow user to select model
2664
+ model_id = self.select_model()
2665
+ if not model_id or model_id == 'QUIT':
2666
+ self.cli.print_error("Model selection cancelled")
2667
+ return False
2668
+
2669
+ # Step 3: Ask if user wants to provide instructions
2670
+ provide_instructions = self.cli.confirm("Would you like to provide instructions for this conversation?")
2671
+ instructions = None
2672
+
2673
+ if provide_instructions:
2674
+ instructions = self.cli.get_multiline_input("Enter instructions/system prompt for this conversation")
2675
+ if not instructions.strip():
2676
+ instructions = None
2677
+
2678
+ # Step 4: Ask if user wants to attach files
2679
+ from dtSpark.files import FileManager
2680
+ supported_extensions = FileManager.get_supported_extensions()
2681
+ file_paths = self.cli.get_file_attachments(supported_extensions)
2682
+
2683
+ # Create conversation with selected model
2684
+ self.conversation_manager.create_conversation(conv_name, model_id, instructions)
2685
+ self.cli.print_success(f"Created new conversation: {conv_name}")
2686
+ self.cli.print_info(f"Using model: {model_id}")
2687
+ if instructions:
2688
+ self.cli.print_info("Instructions have been set for this conversation")
2689
+
2690
+ # Attach files if any were selected
2691
+ if file_paths:
2692
+ self.conversation_manager.attach_files(file_paths)
2693
+
2694
+ return True
2695
+
2696
+ def chat_loop(self):
2697
+ """Main chat loop."""
2698
+ while True:
2699
+ # Display conversation info
2700
+ conv_info = self.conversation_manager.get_current_conversation_info()
2701
+ token_count = self.conversation_manager.get_current_token_count()
2702
+ attached_files = self.conversation_manager.get_attached_files()
2703
+ # Get context window (actual input limit) instead of max_tokens (output limit)
2704
+ context_window = self.conversation_manager.get_context_window()
2705
+
2706
+ # Get access method from active service
2707
+ access_method = self.llm_manager.get_active_service().get_access_info() if self.llm_manager.get_active_service() else None
2708
+
2709
+ if conv_info:
2710
+ self.cli.display_conversation_info(conv_info, token_count, context_window, attached_files, access_method=access_method)
2711
+
2712
+ # Get user input
2713
+ user_message = self.cli.chat_prompt()
2714
+
2715
+ if user_message is None:
2716
+ # User wants to quit
2717
+ if self.cli.confirm("Are you sure you want to exit?"):
2718
+ break
2719
+ else:
2720
+ continue
2721
+
2722
+ if user_message == 'END_CHAT':
2723
+ # End current chat and return to conversation/model selection
2724
+ self.cli.print_info("Ending current chat session")
2725
+ break
2726
+
2727
+ if user_message == 'SHOW_HISTORY':
2728
+ # Show conversation history
2729
+ messages = self.conversation_manager.get_conversation_history()
2730
+ self.cli.display_conversation_history(messages)
2731
+ self.cli.wait_for_enter()
2732
+ continue
2733
+
2734
+ if user_message == 'SHOW_INFO':
2735
+ # Show detailed info with model usage breakdown
2736
+ model_usage = self.conversation_manager.get_model_usage_breakdown()
2737
+ self.cli.display_conversation_info(conv_info, token_count, context_window, attached_files, model_usage, detailed=True, access_method=access_method)
2738
+ # Also show attached files details if any
2739
+ if attached_files:
2740
+ self.cli.display_attached_files(attached_files)
2741
+ # Show MCP server states if MCP is enabled
2742
+ if self.mcp_manager:
2743
+ try:
2744
+ server_states = self.conversation_manager.get_mcp_server_states()
2745
+ if server_states:
2746
+ self.cli.display_mcp_server_states(server_states)
2747
+ except Exception as e:
2748
+ logging.error(f"Failed to get MCP server states for info display: {e}")
2749
+ self.cli.print_warning("Could not retrieve MCP server states")
2750
+
2751
+ # Show AWS Bedrock usage costs if available
2752
+ if self.cost_tracker:
2753
+ try:
2754
+ bedrock_costs = self.cost_tracker.get_bedrock_costs()
2755
+ self.cli.print_separator("─")
2756
+ self.cli.print_info("💰 AWS Bedrock Usage Costs")
2757
+ self.cli.print_separator("─")
2758
+
2759
+ cost_lines = self.cost_tracker.format_cost_report(bedrock_costs)
2760
+ for line in cost_lines:
2761
+ if line.startswith(' •'):
2762
+ # Indent breakdown items
2763
+ self.cli.print_info(f" {line}")
2764
+ else:
2765
+ self.cli.print_info(line)
2766
+ except Exception as e:
2767
+ logging.debug(f"Could not retrieve cost information for /info: {e}")
2768
+ self.cli.print_info("Cost information temporarily unavailable")
2769
+
2770
+ self.cli.wait_for_enter()
2771
+ continue
2772
+
2773
+ if user_message == 'EXPORT_CONVERSATION':
2774
+ # Export conversation with format and tool inclusion options
2775
+ self.cli.print_separator("═")
2776
+ self.cli.print_info("Export Conversation")
2777
+ self.cli.print_separator("═")
2778
+
2779
+ # Select export format
2780
+ format_choice = self.cli.get_input("Select export format:\n"
2781
+ " [1] Markdown (.md)\n"
2782
+ " [2] HTML (.html)\n"
2783
+ " [3] CSV (.csv)\n"
2784
+ "Enter choice")
2785
+
2786
+ format_map = {
2787
+ '1': ('markdown', '.md'),
2788
+ '2': ('html', '.html'),
2789
+ '3': ('csv', '.csv')
2790
+ }
2791
+
2792
+ if format_choice not in format_map:
2793
+ self.cli.print_error("Invalid choice")
2794
+ self.cli.wait_for_enter()
2795
+ continue
2796
+
2797
+ export_format, file_extension = format_map[format_choice]
2798
+
2799
+ # Ask about tool inclusion
2800
+ include_tools = self.cli.confirm("Include tool use details in export?")
2801
+
2802
+ # Get filename
2803
+ base_name = conv_info['name'].replace(' ', '_')
2804
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
2805
+ default_filename = f"{base_name}_{timestamp}{file_extension}"
2806
+
2807
+ file_path = self.cli.get_input(f"Enter export file path (default: ./{default_filename})")
2808
+
2809
+ if not file_path.strip():
2810
+ file_path = default_filename
2811
+
2812
+ # Ensure correct extension
2813
+ if not file_path.endswith(file_extension):
2814
+ # Remove any existing extension and add correct one
2815
+ if '.' in file_path:
2816
+ file_path = file_path.rsplit('.', 1)[0]
2817
+ file_path += file_extension
2818
+
2819
+ # Export conversation
2820
+ if self.conversation_manager.export_conversation(file_path, export_format, include_tools):
2821
+ self.cli.print_success(f"Conversation exported to: {file_path}")
2822
+ self.cli.print_info(f"Format: {export_format.upper()}")
2823
+ self.cli.print_info(f"Tool details: {'Included' if include_tools else 'Excluded'}")
2824
+ else:
2825
+ self.cli.print_error("Failed to export conversation")
2826
+
2827
+ self.cli.wait_for_enter()
2828
+ continue
2829
+
2830
+ if user_message == 'DELETE_CONVERSATION':
2831
+ # Delete current conversation
2832
+ if self.cli.confirm(f"Are you sure you want to delete '{conv_info['name']}'? This cannot be undone"):
2833
+ if self.conversation_manager.delete_current_conversation():
2834
+ self.cli.print_success("Conversation deleted")
2835
+ break # Exit chat loop
2836
+ else:
2837
+ self.cli.print_error("Failed to delete conversation")
2838
+ else:
2839
+ self.cli.print_info("Deletion cancelled")
2840
+ continue
2841
+
2842
+ if user_message == 'ATTACH_FILES':
2843
+ # Attach files to current conversation
2844
+ from dtSpark.files import FileManager
2845
+ supported_extensions = FileManager.get_supported_extensions()
2846
+ file_paths = self.cli.get_file_attachments(supported_extensions)
2847
+
2848
+ if file_paths:
2849
+ # Process and attach files
2850
+ success = self.conversation_manager.attach_files_with_message(file_paths)
2851
+ if success:
2852
+ self.cli.print_success(f"Attached {len(file_paths)} file(s) to conversation")
2853
+ # Refresh attached_files list
2854
+ attached_files = self.conversation_manager.get_attached_files()
2855
+ self.cli.display_attached_files(attached_files)
2856
+ else:
2857
+ self.cli.print_error("Some files failed to attach")
2858
+ else:
2859
+ self.cli.print_info("No files attached")
2860
+
2861
+ self.cli.wait_for_enter()
2862
+ continue
2863
+
2864
+ if user_message == 'DELETE_FILES':
2865
+ # Check if conversation is predefined - if so, block file deletion
2866
+ if self.database.is_conversation_predefined(self.conversation_manager.current_conversation_id):
2867
+ self.cli.print_error("Cannot delete files from predefined conversations")
2868
+ self.cli.print_info("This conversation is managed by configuration")
2869
+ self.cli.wait_for_enter()
2870
+ continue
2871
+
2872
+ # Delete files from current conversation
2873
+ if not attached_files:
2874
+ self.cli.print_info("No files attached to this conversation")
2875
+ self.cli.wait_for_enter()
2876
+ continue
2877
+
2878
+ self.cli.print_separator("═")
2879
+ self.cli.print_info("Delete Attached Files")
2880
+ self.cli.print_separator("═")
2881
+ self.cli.console.print()
2882
+
2883
+ # Display current attached files
2884
+ self.cli.display_attached_files(attached_files)
2885
+ self.cli.console.print()
2886
+
2887
+ # Ask user for file IDs to delete
2888
+ self.cli.print_info("Enter file IDs to delete (comma-separated), or 'all' to delete all files")
2889
+ file_ids_input = self.cli.get_input("File IDs to delete").strip()
2890
+
2891
+ if not file_ids_input:
2892
+ self.cli.print_info("Delete operation cancelled")
2893
+ self.cli.wait_for_enter()
2894
+ continue
2895
+
2896
+ # Parse file IDs
2897
+ if file_ids_input.lower() == 'all':
2898
+ # Confirm deleting all files
2899
+ if self.cli.confirm(f"Are you sure you want to delete all {len(attached_files)} file(s)?"):
2900
+ deleted_count = 0
2901
+ for file_info in attached_files:
2902
+ if self.conversation_manager.database.delete_file(file_info['id']):
2903
+ deleted_count += 1
2904
+
2905
+ if deleted_count > 0:
2906
+ self.cli.print_success(f"Deleted {deleted_count} file(s)")
2907
+ # Refresh attached_files list
2908
+ attached_files = self.conversation_manager.get_attached_files()
2909
+ else:
2910
+ self.cli.print_error("Failed to delete files")
2911
+ else:
2912
+ self.cli.print_info("Delete operation cancelled")
2913
+ else:
2914
+ # Parse comma-separated IDs
2915
+ try:
2916
+ file_ids = [int(id.strip()) for id in file_ids_input.split(',') if id.strip()]
2917
+
2918
+ if not file_ids:
2919
+ self.cli.print_error("No valid file IDs provided")
2920
+ self.cli.wait_for_enter()
2921
+ continue
2922
+
2923
+ # Confirm deletion
2924
+ if self.cli.confirm(f"Are you sure you want to delete {len(file_ids)} file(s)?"):
2925
+ deleted_count = 0
2926
+ failed_ids = []
2927
+
2928
+ for file_id in file_ids:
2929
+ if self.conversation_manager.database.delete_file(file_id):
2930
+ deleted_count += 1
2931
+ else:
2932
+ failed_ids.append(file_id)
2933
+
2934
+ if deleted_count > 0:
2935
+ self.cli.print_success(f"Deleted {deleted_count} file(s)")
2936
+ # Refresh attached_files list
2937
+ attached_files = self.conversation_manager.get_attached_files()
2938
+
2939
+ if failed_ids:
2940
+ self.cli.print_error(f"Failed to delete files with IDs: {', '.join(map(str, failed_ids))}")
2941
+ else:
2942
+ self.cli.print_info("Delete operation cancelled")
2943
+
2944
+ except ValueError:
2945
+ self.cli.print_error("Invalid file IDs. Please enter comma-separated numbers or 'all'")
2946
+
2947
+ self.cli.wait_for_enter()
2948
+ continue
2949
+
2950
+ if user_message == 'CHANGE_MODEL':
2951
+ # Check if conversation is predefined - if so, block model changes
2952
+ if self.database.is_conversation_predefined(self.conversation_manager.current_conversation_id):
2953
+ self.cli.print_error("Cannot change model for predefined conversations")
2954
+ self.cli.print_info("This conversation is managed by configuration")
2955
+ self.cli.wait_for_enter()
2956
+ continue
2957
+
2958
+ # Check if model is locked via configuration
2959
+ if self.configured_model_id:
2960
+ self.cli.print_error("Model changing is disabled - model is locked via configuration")
2961
+ self.cli.print_info(f"Configured model: {self.configured_model_id}")
2962
+ self.cli.wait_for_enter()
2963
+ continue
2964
+
2965
+ # Change the model for the current conversation - show ALL available models from ALL providers
2966
+ with self.cli.create_progress() as progress:
2967
+ task = progress.add_task("[cyan]Fetching available models from all providers...", total=100)
2968
+ models = self.llm_manager.list_all_models()
2969
+ progress.update(task, advance=100)
2970
+
2971
+ if not models:
2972
+ self.cli.print_error("No models available")
2973
+ self.cli.wait_for_enter()
2974
+ continue
2975
+
2976
+ # Let user select new model
2977
+ new_model_id = self.cli.display_models(models)
2978
+
2979
+ if new_model_id:
2980
+ # Update the model via LLM manager (which will switch providers if needed)
2981
+ self.llm_manager.set_model(new_model_id)
2982
+ # Update references after model change
2983
+ self.bedrock_service = self.llm_manager.get_active_service()
2984
+ self.conversation_manager.bedrock_service = self.bedrock_service
2985
+
2986
+ # Change the model in the conversation
2987
+ if self.conversation_manager.change_model(new_model_id):
2988
+ self.cli.print_success(f"Changed model to: {new_model_id}")
2989
+ else:
2990
+ self.cli.print_error("Failed to change model")
2991
+ else:
2992
+ self.cli.print_info("Model change cancelled")
2993
+
2994
+ self.cli.wait_for_enter()
2995
+ continue
2996
+
2997
+ if user_message == 'COPY_LAST':
2998
+ # Copy last assistant response to clipboard
2999
+ last_message = self.conversation_manager.get_last_assistant_message()
3000
+
3001
+ if last_message:
3002
+ if copy_to_clipboard(last_message):
3003
+ char_count = len(last_message)
3004
+ self.cli.print_success(f"✓ Last assistant response copied to clipboard ({char_count:,} characters)")
3005
+ else:
3006
+ self.cli.print_error("Failed to copy to clipboard. Please ensure clipboard utilities are installed.")
3007
+ if sys.platform not in ['win32', 'darwin']:
3008
+ self.cli.print_info("Linux users: Install xclip or xsel (e.g., 'sudo apt install xclip')")
3009
+ else:
3010
+ self.cli.print_warning("No assistant response found to copy")
3011
+
3012
+ self.cli.wait_for_enter()
3013
+ continue
3014
+
3015
+ if user_message == 'CHANGE_INSTRUCTIONS':
3016
+ # Check if conversation is predefined - if so, block instruction changes
3017
+ if self.database.is_conversation_predefined(self.conversation_manager.current_conversation_id):
3018
+ self.cli.print_error("Cannot change instructions for predefined conversations")
3019
+ self.cli.print_info("This conversation is managed by configuration")
3020
+ self.cli.wait_for_enter()
3021
+ continue
3022
+
3023
+ # Change/update the instructions for the current conversation
3024
+ self.cli.print_separator("═")
3025
+ self.cli.print_info("Update Conversation Instructions")
3026
+ self.cli.print_separator("═")
3027
+
3028
+ # Show current instructions if any
3029
+ if conv_info.get('instructions'):
3030
+ self.cli.console.print()
3031
+ self.cli.console.print("[bold cyan]Current Instructions:[/bold cyan]")
3032
+ self.cli.console.print(f"[dim italic]{conv_info['instructions']}[/dim italic]")
3033
+ self.cli.console.print()
3034
+ else:
3035
+ self.cli.console.print()
3036
+ self.cli.print_info("No instructions currently set for this conversation")
3037
+ self.cli.console.print()
3038
+
3039
+ # Ask if user wants to set new instructions or clear them
3040
+ if self.cli.confirm("Would you like to set new instructions?"):
3041
+ new_instructions = self.cli.get_multiline_input("Enter new instructions/system prompt (press Enter twice to finish)")
3042
+
3043
+ if new_instructions.strip():
3044
+ # Set new instructions
3045
+ if self.conversation_manager.update_instructions(new_instructions.strip()):
3046
+ self.cli.print_success("Instructions updated successfully")
3047
+ else:
3048
+ self.cli.print_error("Failed to update instructions")
3049
+ else:
3050
+ self.cli.print_info("Instructions update cancelled (empty input)")
3051
+ elif conv_info.get('instructions') and self.cli.confirm("Would you like to clear the existing instructions?"):
3052
+ # Clear instructions
3053
+ if self.conversation_manager.update_instructions(None):
3054
+ self.cli.print_success("Instructions cleared successfully")
3055
+ else:
3056
+ self.cli.print_error("Failed to clear instructions")
3057
+ else:
3058
+ self.cli.print_info("Instructions unchanged")
3059
+
3060
+ self.cli.wait_for_enter()
3061
+ continue
3062
+
3063
+ if user_message == 'MCP_AUDIT':
3064
+ # Show MCP transaction audit information
3065
+ self.cli.print_separator("═")
3066
+ self.cli.print_info("MCP Transaction Audit")
3067
+ self.cli.print_separator("═")
3068
+
3069
+ # Display options
3070
+ audit_choice = self.cli.get_input("Options:\n"
3071
+ " [1] View transactions for this conversation\n"
3072
+ " [2] View all recent transactions (last 50)\n"
3073
+ " [3] View statistics\n"
3074
+ " [4] Export to CSV\n"
3075
+ "Enter choice")
3076
+
3077
+ if audit_choice == '1':
3078
+ # Show transactions for current conversation
3079
+ transactions = self.database.get_mcp_transactions(
3080
+ conversation_id=self.conversation_manager.current_conversation_id,
3081
+ limit=50
3082
+ )
3083
+ self.cli.display_mcp_transactions(transactions, "Conversation MCP Transactions")
3084
+
3085
+ elif audit_choice == '2':
3086
+ # Show all recent transactions
3087
+ transactions = self.database.get_mcp_transactions(limit=50)
3088
+ self.cli.display_mcp_transactions(transactions, "Recent MCP Transactions")
3089
+
3090
+ elif audit_choice == '3':
3091
+ # Show statistics
3092
+ stats = self.database.get_mcp_transaction_stats()
3093
+ self.cli.display_mcp_stats(stats)
3094
+
3095
+ elif audit_choice == '4':
3096
+ # Export to CSV
3097
+ default_filename = f"mcp_audit_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
3098
+ file_path = self.cli.get_input(f"Enter export file path (default: ./{default_filename})")
3099
+
3100
+ if not file_path.strip():
3101
+ file_path = default_filename
3102
+
3103
+ # Ensure .csv extension
3104
+ if not file_path.endswith('.csv'):
3105
+ file_path += '.csv'
3106
+
3107
+ # Ask if user wants to export all or just this conversation
3108
+ export_all = self.cli.confirm("Export all transactions? (No = current conversation only)")
3109
+
3110
+ conversation_id = None if export_all else self.conversation_manager.current_conversation_id
3111
+
3112
+ if self.database.export_mcp_transactions_to_csv(file_path, conversation_id):
3113
+ self.cli.print_success(f"MCP transactions exported to: {file_path}")
3114
+ else:
3115
+ self.cli.print_error("Failed to export MCP transactions")
3116
+
3117
+ else:
3118
+ self.cli.print_error("Invalid choice")
3119
+
3120
+ self.cli.wait_for_enter()
3121
+ continue
3122
+
3123
+ if user_message == 'MCP_SERVERS':
3124
+ # Manage MCP server enabled/disabled states
3125
+ if not self.mcp_manager:
3126
+ self.cli.print_error("MCP is not enabled")
3127
+ self.cli.wait_for_enter()
3128
+ continue
3129
+
3130
+ try:
3131
+ server_states = self.conversation_manager.get_mcp_server_states()
3132
+
3133
+ if not server_states:
3134
+ self.cli.print_error("No MCP servers available")
3135
+ self.cli.wait_for_enter()
3136
+ continue
3137
+ except Exception as e:
3138
+ logging.error(f"Failed to get MCP server states: {e}")
3139
+ self.cli.print_error(f"Failed to retrieve MCP server states: {e}")
3140
+ self.cli.wait_for_enter()
3141
+ continue
3142
+
3143
+ # Display current state
3144
+ self.cli.display_mcp_server_states(server_states)
3145
+
3146
+ # Ask user if they want to toggle a server
3147
+ if self.cli.confirm("\nWould you like to enable/disable a server?"):
3148
+ selected_server = self.cli.select_mcp_server(server_states, "toggle")
3149
+
3150
+ if selected_server:
3151
+ try:
3152
+ # Find current state
3153
+ current_state = next((s for s in server_states if s['server_name'] == selected_server), None)
3154
+
3155
+ if current_state:
3156
+ # Toggle the state
3157
+ new_state = not current_state['enabled']
3158
+ if self.conversation_manager.set_mcp_server_enabled(selected_server, new_state):
3159
+ action = "enabled" if new_state else "disabled"
3160
+ self.cli.print_success(f"Server '{selected_server}' {action}")
3161
+ else:
3162
+ self.cli.print_error(f"Failed to update server state")
3163
+
3164
+ # Show updated states
3165
+ self.cli.print_separator("─")
3166
+ updated_states = self.conversation_manager.get_mcp_server_states()
3167
+ self.cli.display_mcp_server_states(updated_states)
3168
+ else:
3169
+ self.cli.print_error("Server not found")
3170
+ except Exception as e:
3171
+ logging.error(f"Failed to toggle MCP server state: {e}")
3172
+ self.cli.print_error(f"Failed to update server state: {e}")
3173
+ else:
3174
+ self.cli.print_info("Cancelled")
3175
+
3176
+ self.cli.wait_for_enter()
3177
+ continue
3178
+
3179
+ if not user_message.strip():
3180
+ continue
3181
+
3182
+ # Send message and get response
3183
+ self.cli.display_message('user', user_message)
3184
+
3185
+ # Show animated status indicator during processing
3186
+ with self.cli.status_indicator("Generating response..."):
3187
+ assistant_response = self.conversation_manager.send_message(user_message)
3188
+
3189
+ if assistant_response:
3190
+ self.cli.display_message('assistant', assistant_response)
3191
+ # Note: If None is returned, it could be because:
3192
+ # 1. Prompt was blocked by security (already displayed violation message)
3193
+ # 2. Model failed (error already logged)
3194
+ # In both cases, the user has already been notified, so no additional error needed
3195
+
3196
+ def main(self, args):
3197
+ """Main application entry point."""
3198
+
3199
+ ResourceManager().add_resource_path(os.path.join(parent_dir, 'resources'))
3200
+
3201
+ try:
3202
+ # Check if --setup flag was provided
3203
+ if hasattr(args, 'setup') and args.setup:
3204
+ # Run setup wizard and exit
3205
+ self.setup_wizard()
3206
+ ProcessManager().call_shutdown()
3207
+ return
3208
+
3209
+ # Initialise settings
3210
+ # Point to the correct config location
3211
+ self.settings = Settings()
3212
+ self.settings.init_settings_readers()
3213
+
3214
+ # Check if model is locked via configuration
3215
+ # Priority 1: Check for mandatory_model (forces model for ALL conversations)
3216
+ # Priority 2: Check for bedrock.model_id (legacy configuration)
3217
+ self.configured_model_id = self._get_nested_setting('llm_providers.mandatory_model', None)
3218
+ self.configured_provider = self._get_nested_setting('llm_providers.mandatory_provider', None)
3219
+
3220
+ if not self.configured_model_id:
3221
+ self.configured_model_id = self.settings.get('bedrock.model_id', None)
3222
+ # Legacy config doesn't support provider specification
3223
+
3224
+ # Initialise components
3225
+ self.initialise_singletons()
3226
+
3227
+ # If model is locked via config, disable model changing in CLI
3228
+ if self.configured_model_id:
3229
+ logging.info(f"Model locked via configuration: {self.configured_model_id}")
3230
+ self.cli.model_changing_enabled = False
3231
+
3232
+ if not self.auth_failed:
3233
+
3234
+ # Check interface type from configuration
3235
+ interface_type = self.settings.get('interface.type', 'cli')
3236
+
3237
+ if interface_type == 'web':
3238
+ # Launch web interface
3239
+ self.launch_web_interface()
3240
+ return # Web server is blocking, so return after it exits
3241
+
3242
+ # Main application loop - menu-driven interface (CLI)
3243
+ while True:
3244
+ # Display main menu
3245
+ choice = self.cli.display_main_menu()
3246
+
3247
+ if choice == 'costs':
3248
+ # Re-gather and display AWS Bedrock costs
3249
+ self.regather_and_display_costs()
3250
+
3251
+ elif choice == 'new':
3252
+ # Start a new conversation
3253
+ if self.start_new_conversation():
3254
+ # Start chat loop
3255
+ self.chat_loop()
3256
+ else:
3257
+ self.cli.print_warning("Failed to create new conversation")
3258
+
3259
+ elif choice == 'list':
3260
+ # List and select existing conversation
3261
+ if self.select_existing_conversation():
3262
+ # Start chat loop
3263
+ self.chat_loop()
3264
+ else:
3265
+ self.cli.print_warning("No conversation selected")
3266
+
3267
+ elif choice == 'autonomous':
3268
+ # Manage autonomous actions
3269
+ self.manage_autonomous_actions()
3270
+
3271
+ elif choice == 'quit':
3272
+ # User chose to quit
3273
+ break
3274
+
3275
+ else:
3276
+ # Invalid choice
3277
+ self.cli.print_error("Invalid option. Please select 1-5.")
3278
+
3279
+ # Farewell message
3280
+ self.cli.print_farewell(version())
3281
+
3282
+ except KeyboardInterrupt:
3283
+ if self.cli:
3284
+ self.cli.print_info("\nOperation cancelled by user")
3285
+ self.cli.print_farewell(version())
3286
+ else:
3287
+ print("\nOperation cancelled by user")
3288
+ except Exception as e:
3289
+ logging.exception("Unexpected error in main application")
3290
+ if self.cli:
3291
+ self.cli.print_error(f"Unexpected error: {e}")
3292
+ else:
3293
+ print(f"Unexpected error: {e}", file=sys.stderr)
3294
+ raise
3295
+
3296
+ ProcessManager().call_shutdown()
3297
+
3298
+ def exiting(self):
3299
+ """Clean up resources on exit."""
3300
+ import asyncio
3301
+ logging.info('Shutting down application')
3302
+ if not self.auth_failed:
3303
+ # Stop action scheduler and execution queue
3304
+ if self.action_scheduler:
3305
+ try:
3306
+ self.action_scheduler.stop()
3307
+ logging.info('Action scheduler stopped')
3308
+ except Exception as e:
3309
+ logging.warning(f'Error stopping action scheduler: {e}')
3310
+
3311
+ if self.execution_queue:
3312
+ try:
3313
+ self.execution_queue.stop()
3314
+ logging.info('Execution queue stopped')
3315
+ except Exception as e:
3316
+ logging.warning(f'Error stopping execution queue: {e}')
3317
+
3318
+ # Disconnect from MCP servers
3319
+ if self.mcp_manager:
3320
+ try:
3321
+ # Try to disconnect gracefully, but don't fail if it errors
3322
+ # The asyncio context manager cleanup can be tricky with reused loops
3323
+ if hasattr(self.mcp_manager, '_initialization_loop') and self.mcp_manager._initialization_loop:
3324
+ loop = self.mcp_manager._initialization_loop
3325
+ logging.debug('Using stored initialisation loop for MCP disconnection')
3326
+ try:
3327
+ # Try to disconnect, but catch any asyncio-specific errors
3328
+ loop.run_until_complete(self.mcp_manager.disconnect_all())
3329
+ logging.info('Disconnected from MCP servers')
3330
+ except RuntimeError as e:
3331
+ # Ignore asyncio context manager errors during shutdown
3332
+ logging.debug(f'Ignoring asyncio cleanup error during shutdown: {e}')
3333
+ finally:
3334
+ # Close the loop
3335
+ if not loop.is_closed():
3336
+ loop.close()
3337
+ logging.debug('Event loop closed')
3338
+ else:
3339
+ logging.debug('No stored event loop, skipping MCP disconnection')
3340
+ except Exception as e:
3341
+ logging.warning(f'Error during MCP cleanup (non-critical): {e}')
3342
+
3343
+ if self.database:
3344
+ self.database.close()
3345
+ logging.info('Database connection closed')
3346
+
3347
+
3348
+
3349
+ def main():
3350
+ """Entry point for the console script."""
3351
+ AWSBedrockCLI().run()
3352
+
3353
+ if __name__ == '__main__':
3354
+ main()
3355
+