dtSpark 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. dtSpark/__init__.py +0 -0
  2. dtSpark/_description.txt +1 -0
  3. dtSpark/_full_name.txt +1 -0
  4. dtSpark/_licence.txt +21 -0
  5. dtSpark/_metadata.yaml +6 -0
  6. dtSpark/_name.txt +1 -0
  7. dtSpark/_version.txt +1 -0
  8. dtSpark/aws/__init__.py +7 -0
  9. dtSpark/aws/authentication.py +296 -0
  10. dtSpark/aws/bedrock.py +578 -0
  11. dtSpark/aws/costs.py +318 -0
  12. dtSpark/aws/pricing.py +580 -0
  13. dtSpark/cli_interface.py +2645 -0
  14. dtSpark/conversation_manager.py +3050 -0
  15. dtSpark/core/__init__.py +12 -0
  16. dtSpark/core/application.py +3355 -0
  17. dtSpark/core/context_compaction.py +735 -0
  18. dtSpark/daemon/__init__.py +104 -0
  19. dtSpark/daemon/__main__.py +10 -0
  20. dtSpark/daemon/action_monitor.py +213 -0
  21. dtSpark/daemon/daemon_app.py +730 -0
  22. dtSpark/daemon/daemon_manager.py +289 -0
  23. dtSpark/daemon/execution_coordinator.py +194 -0
  24. dtSpark/daemon/pid_file.py +169 -0
  25. dtSpark/database/__init__.py +482 -0
  26. dtSpark/database/autonomous_actions.py +1191 -0
  27. dtSpark/database/backends.py +329 -0
  28. dtSpark/database/connection.py +122 -0
  29. dtSpark/database/conversations.py +520 -0
  30. dtSpark/database/credential_prompt.py +218 -0
  31. dtSpark/database/files.py +205 -0
  32. dtSpark/database/mcp_ops.py +355 -0
  33. dtSpark/database/messages.py +161 -0
  34. dtSpark/database/schema.py +673 -0
  35. dtSpark/database/tool_permissions.py +186 -0
  36. dtSpark/database/usage.py +167 -0
  37. dtSpark/files/__init__.py +4 -0
  38. dtSpark/files/manager.py +322 -0
  39. dtSpark/launch.py +39 -0
  40. dtSpark/limits/__init__.py +10 -0
  41. dtSpark/limits/costs.py +296 -0
  42. dtSpark/limits/tokens.py +342 -0
  43. dtSpark/llm/__init__.py +17 -0
  44. dtSpark/llm/anthropic_direct.py +446 -0
  45. dtSpark/llm/base.py +146 -0
  46. dtSpark/llm/context_limits.py +438 -0
  47. dtSpark/llm/manager.py +177 -0
  48. dtSpark/llm/ollama.py +578 -0
  49. dtSpark/mcp_integration/__init__.py +5 -0
  50. dtSpark/mcp_integration/manager.py +653 -0
  51. dtSpark/mcp_integration/tool_selector.py +225 -0
  52. dtSpark/resources/config.yaml.template +631 -0
  53. dtSpark/safety/__init__.py +22 -0
  54. dtSpark/safety/llm_service.py +111 -0
  55. dtSpark/safety/patterns.py +229 -0
  56. dtSpark/safety/prompt_inspector.py +442 -0
  57. dtSpark/safety/violation_logger.py +346 -0
  58. dtSpark/scheduler/__init__.py +20 -0
  59. dtSpark/scheduler/creation_tools.py +599 -0
  60. dtSpark/scheduler/execution_queue.py +159 -0
  61. dtSpark/scheduler/executor.py +1152 -0
  62. dtSpark/scheduler/manager.py +395 -0
  63. dtSpark/tools/__init__.py +4 -0
  64. dtSpark/tools/builtin.py +833 -0
  65. dtSpark/web/__init__.py +20 -0
  66. dtSpark/web/auth.py +152 -0
  67. dtSpark/web/dependencies.py +37 -0
  68. dtSpark/web/endpoints/__init__.py +17 -0
  69. dtSpark/web/endpoints/autonomous_actions.py +1125 -0
  70. dtSpark/web/endpoints/chat.py +621 -0
  71. dtSpark/web/endpoints/conversations.py +353 -0
  72. dtSpark/web/endpoints/main_menu.py +547 -0
  73. dtSpark/web/endpoints/streaming.py +421 -0
  74. dtSpark/web/server.py +578 -0
  75. dtSpark/web/session.py +167 -0
  76. dtSpark/web/ssl_utils.py +195 -0
  77. dtSpark/web/static/css/dark-theme.css +427 -0
  78. dtSpark/web/static/js/actions.js +1101 -0
  79. dtSpark/web/static/js/chat.js +614 -0
  80. dtSpark/web/static/js/main.js +496 -0
  81. dtSpark/web/static/js/sse-client.js +242 -0
  82. dtSpark/web/templates/actions.html +408 -0
  83. dtSpark/web/templates/base.html +93 -0
  84. dtSpark/web/templates/chat.html +814 -0
  85. dtSpark/web/templates/conversations.html +350 -0
  86. dtSpark/web/templates/goodbye.html +81 -0
  87. dtSpark/web/templates/login.html +90 -0
  88. dtSpark/web/templates/main_menu.html +983 -0
  89. dtSpark/web/templates/new_conversation.html +191 -0
  90. dtSpark/web/web_interface.py +137 -0
  91. dtspark-1.0.4.dist-info/METADATA +187 -0
  92. dtspark-1.0.4.dist-info/RECORD +96 -0
  93. dtspark-1.0.4.dist-info/WHEEL +5 -0
  94. dtspark-1.0.4.dist-info/entry_points.txt +3 -0
  95. dtspark-1.0.4.dist-info/licenses/LICENSE +21 -0
  96. dtspark-1.0.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,421 @@
1
+ """
2
+ SSE streaming endpoints for real-time updates.
3
+
4
+ Provides Server-Sent Events streaming for model responses, tool execution,
5
+ and progress updates.
6
+
7
+
8
+ """
9
+
10
+ import asyncio
11
+ import json
12
+ import logging
13
+ from typing import AsyncGenerator
14
+
15
+ from fastapi import APIRouter, Depends, Request
16
+ from sse_starlette.sse import EventSourceResponse
17
+
18
+ from ..dependencies import get_current_session
19
+
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ router = APIRouter()
24
+
25
+
26
+ class StreamingManager:
27
+ """
28
+ Manages Server-Sent Events streams for real-time updates.
29
+
30
+ Handles streaming for:
31
+ - Model response text (token by token)
32
+ - Tool execution progress
33
+ - Token limit warnings
34
+ - Progress bars and status updates
35
+ """
36
+
37
+ def __init__(self):
38
+ """Initialise the streaming manager."""
39
+ self._active_streams = {}
40
+
41
+ async def stream_chat_response(
42
+ self,
43
+ conversation_manager,
44
+ message: str,
45
+ ) -> AsyncGenerator[dict, None]:
46
+ """
47
+ Stream a chat response with real-time updates including tool calls.
48
+
49
+ Args:
50
+ conversation_manager: ConversationManager instance
51
+ message: User message to send
52
+
53
+ Yields:
54
+ Dictionary events for SSE streaming
55
+ """
56
+ import concurrent.futures
57
+ import threading
58
+
59
+ try:
60
+ # Send initial "processing" event
61
+ yield {
62
+ "event": "status",
63
+ "data": json.dumps({
64
+ "type": "processing",
65
+ "message": "",
66
+ }),
67
+ }
68
+
69
+ # Get the current conversation ID and track starting message count
70
+ conversation_id = conversation_manager.current_conversation_id
71
+ database = conversation_manager.database
72
+
73
+ # Get initial message count (before sending)
74
+ try:
75
+ initial_messages = database.get_conversation_messages(conversation_id)
76
+ last_message_count = len(initial_messages)
77
+ except Exception as e:
78
+ logger.error(f"Failed to get initial message count: {e}")
79
+ last_message_count = 0
80
+
81
+ # Result container for the thread
82
+ result_container = {'response': None, 'error': None, 'done': False}
83
+
84
+ # Run send_message in a background thread
85
+ def run_send_message():
86
+ try:
87
+ result_container['response'] = conversation_manager.send_message(message)
88
+ except Exception as e:
89
+ import traceback
90
+ logger.error(f"Error in send_message thread: {e}")
91
+ logger.error(f"Traceback: {traceback.format_exc()}")
92
+ result_container['error'] = str(e)
93
+ finally:
94
+ result_container['done'] = True
95
+
96
+ # Start the thread
97
+ thread = threading.Thread(target=run_send_message)
98
+ thread.start()
99
+
100
+ # Poll for new messages while thread is running
101
+ emitted_messages = set() # Track which message IDs we've already emitted
102
+ emitted_permission_requests = set() # Track which permission requests we've already emitted
103
+
104
+ while not result_container['done']:
105
+ # Check for pending permission requests (if web interface is available)
106
+ if hasattr(conversation_manager, 'web_interface') and conversation_manager.web_interface:
107
+ pending_request = conversation_manager.web_interface.get_pending_permission_request()
108
+ if pending_request:
109
+ request_id = pending_request['request_id']
110
+ if request_id not in emitted_permission_requests:
111
+ emitted_permission_requests.add(request_id)
112
+ yield {
113
+ "event": "permission_request",
114
+ "data": json.dumps({
115
+ "request_id": request_id,
116
+ "tool_name": pending_request['tool_name'],
117
+ "tool_description": pending_request.get('tool_description'),
118
+ }),
119
+ }
120
+
121
+ # Check for new messages
122
+ try:
123
+ current_messages = database.get_conversation_messages(conversation_id)
124
+ except Exception as e:
125
+ # Database might be locked, retry on next poll
126
+ logger.warning(f"Database query failed during polling: {e}")
127
+ await asyncio.sleep(0.2)
128
+ continue
129
+
130
+ # Find new messages since last check
131
+ for msg in current_messages[last_message_count:]:
132
+ msg_id = msg['id']
133
+ if msg_id in emitted_messages:
134
+ continue
135
+
136
+ emitted_messages.add(msg_id)
137
+ role = msg['role']
138
+ content = msg['content']
139
+
140
+ # Check message type and emit appropriate event
141
+ if content.startswith('[TOOL_RESULTS]'):
142
+ # Tool results
143
+ try:
144
+ json_content = content.replace('[TOOL_RESULTS]', '').strip()
145
+ results = json.loads(json_content)
146
+ for result in results:
147
+ yield {
148
+ "event": "tool_complete",
149
+ "data": json.dumps({
150
+ "tool_use_id": result.get('tool_use_id', 'unknown'),
151
+ "content": result.get('content', ''),
152
+ }),
153
+ }
154
+ except:
155
+ pass
156
+
157
+ elif role == 'assistant' and content.strip().startswith('['):
158
+ # Check if this is a tool call message (may contain text + tool_use)
159
+ try:
160
+ blocks = json.loads(content)
161
+ if isinstance(blocks, list):
162
+ for block in blocks:
163
+ if block.get('type') == 'text' and block.get('text'):
164
+ # Emit text content that appears with tool calls
165
+ yield {
166
+ "event": "response",
167
+ "data": json.dumps({
168
+ "type": "text",
169
+ "content": block.get('text'),
170
+ "final": False,
171
+ }),
172
+ }
173
+ elif block.get('type') == 'tool_use':
174
+ # Emit tool call
175
+ yield {
176
+ "event": "tool_start",
177
+ "data": json.dumps({
178
+ "tool_name": block.get('name'),
179
+ "input": block.get('input', {}),
180
+ }),
181
+ }
182
+ except:
183
+ pass
184
+
185
+ last_message_count = len(current_messages)
186
+
187
+ # Small delay before next poll
188
+ await asyncio.sleep(0.2)
189
+
190
+ # Thread finished, check result
191
+ if result_container['error']:
192
+ yield {
193
+ "event": "error",
194
+ "data": json.dumps({
195
+ "message": result_container['error'],
196
+ }),
197
+ }
198
+ elif result_container['response']:
199
+ # Emit final response
200
+ yield {
201
+ "event": "response",
202
+ "data": json.dumps({
203
+ "type": "text",
204
+ "content": result_container['response'],
205
+ "final": True,
206
+ }),
207
+ }
208
+
209
+ # Send completion event
210
+ yield {
211
+ "event": "complete",
212
+ "data": json.dumps({
213
+ "status": "success",
214
+ }),
215
+ }
216
+ else:
217
+ yield {
218
+ "event": "error",
219
+ "data": json.dumps({
220
+ "message": "Failed to get response from model",
221
+ }),
222
+ }
223
+
224
+ except Exception as e:
225
+ logger.error(f"Error in stream_chat_response: {e}")
226
+ yield {
227
+ "event": "error",
228
+ "data": json.dumps({
229
+ "message": str(e),
230
+ }),
231
+ }
232
+
233
+ async def stream_tool_execution(
234
+ self,
235
+ tool_name: str,
236
+ tool_input: dict,
237
+ ) -> AsyncGenerator[dict, None]:
238
+ """
239
+ Stream tool execution progress.
240
+
241
+ Args:
242
+ tool_name: Name of the tool being executed
243
+ tool_input: Tool input parameters
244
+
245
+ Yields:
246
+ Dictionary events for SSE streaming
247
+ """
248
+ try:
249
+ # Send start event
250
+ yield {
251
+ "event": "tool_start",
252
+ "data": json.dumps({
253
+ "tool_name": tool_name,
254
+ "input": tool_input,
255
+ }),
256
+ }
257
+
258
+ # Simulate tool execution
259
+ # In actual implementation, this would integrate with MCP manager
260
+ await asyncio.sleep(0.5)
261
+
262
+ # Send completion event
263
+ yield {
264
+ "event": "tool_complete",
265
+ "data": json.dumps({
266
+ "tool_name": tool_name,
267
+ "status": "success",
268
+ }),
269
+ }
270
+
271
+ except Exception as e:
272
+ logger.error(f"Error in stream_tool_execution: {e}")
273
+ yield {
274
+ "event": "tool_error",
275
+ "data": json.dumps({
276
+ "tool_name": tool_name,
277
+ "error": str(e),
278
+ }),
279
+ }
280
+
281
+ async def stream_progress(
282
+ self,
283
+ task_name: str,
284
+ total_steps: int,
285
+ ) -> AsyncGenerator[dict, None]:
286
+ """
287
+ Stream progress updates.
288
+
289
+ Args:
290
+ task_name: Name of the task
291
+ total_steps: Total number of steps
292
+
293
+ Yields:
294
+ Dictionary events for SSE streaming
295
+ """
296
+ try:
297
+ for step in range(total_steps + 1):
298
+ yield {
299
+ "event": "progress",
300
+ "data": json.dumps({
301
+ "task": task_name,
302
+ "step": step,
303
+ "total": total_steps,
304
+ "percentage": int((step / total_steps) * 100) if total_steps > 0 else 100,
305
+ }),
306
+ }
307
+ await asyncio.sleep(0.1)
308
+
309
+ except Exception as e:
310
+ logger.error(f"Error in stream_progress: {e}")
311
+ yield {
312
+ "event": "error",
313
+ "data": json.dumps({
314
+ "message": str(e),
315
+ }),
316
+ }
317
+
318
+
319
+ # Global streaming manager instance
320
+ streaming_manager = StreamingManager()
321
+
322
+
323
+ @router.get("/stream/chat")
324
+ async def stream_chat(
325
+ request: Request,
326
+ conversation_id: int,
327
+ message: str,
328
+ session_id: str = Depends(get_current_session),
329
+ ):
330
+ """
331
+ SSE endpoint for streaming chat responses.
332
+
333
+ Args:
334
+ request: FastAPI request object
335
+ conversation_id: Conversation ID
336
+ message: User message to send
337
+ session_id: Validated session ID from dependency
338
+
339
+ Returns:
340
+ EventSourceResponse with SSE stream
341
+ """
342
+ # Get app instance
343
+ app_instance = request.app.state.app_instance
344
+ conversation_manager = app_instance.conversation_manager
345
+
346
+ # Load conversation and set model with proper provider routing
347
+ conversation_manager.load_conversation(conversation_id)
348
+ conv = app_instance.database.get_conversation(conversation_id)
349
+ if conv:
350
+ app_instance.llm_manager.set_model(conv['model_id'])
351
+ # Update service references so conversation manager uses the correct provider
352
+ app_instance.bedrock_service = app_instance.llm_manager.get_active_service()
353
+ conversation_manager.update_service(app_instance.bedrock_service)
354
+
355
+ # Create streaming generator
356
+ async def event_generator():
357
+ async for event in streaming_manager.stream_chat_response(
358
+ conversation_manager=conversation_manager,
359
+ message=message,
360
+ ):
361
+ yield event
362
+
363
+ return EventSourceResponse(event_generator())
364
+
365
+
366
+ @router.get("/stream/tool")
367
+ async def stream_tool(
368
+ request: Request,
369
+ tool_name: str,
370
+ session_id: str = Depends(get_current_session),
371
+ ):
372
+ """
373
+ SSE endpoint for streaming tool execution.
374
+
375
+ Args:
376
+ request: FastAPI request object
377
+ tool_name: Name of the tool to execute
378
+ session_id: Validated session ID from dependency
379
+
380
+ Returns:
381
+ EventSourceResponse with SSE stream
382
+ """
383
+ # Create streaming generator
384
+ async def event_generator():
385
+ async for event in streaming_manager.stream_tool_execution(
386
+ tool_name=tool_name,
387
+ tool_input={}, # Placeholder
388
+ ):
389
+ yield event
390
+
391
+ return EventSourceResponse(event_generator())
392
+
393
+
394
+ @router.get("/stream/progress")
395
+ async def stream_progress(
396
+ request: Request,
397
+ task_name: str,
398
+ total_steps: int = 10,
399
+ session_id: str = Depends(get_current_session),
400
+ ):
401
+ """
402
+ SSE endpoint for streaming progress updates.
403
+
404
+ Args:
405
+ request: FastAPI request object
406
+ task_name: Name of the task
407
+ total_steps: Total number of steps
408
+ session_id: Validated session ID from dependency
409
+
410
+ Returns:
411
+ EventSourceResponse with SSE stream
412
+ """
413
+ # Create streaming generator
414
+ async def event_generator():
415
+ async for event in streaming_manager.stream_progress(
416
+ task_name=task_name,
417
+ total_steps=total_steps,
418
+ ):
419
+ yield event
420
+
421
+ return EventSourceResponse(event_generator())