dtSpark 1.1.0a3__py3-none-any.whl → 1.1.0a7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dtSpark/_version.txt +1 -1
- dtSpark/aws/authentication.py +1 -1
- dtSpark/aws/bedrock.py +238 -239
- dtSpark/aws/costs.py +9 -5
- dtSpark/aws/pricing.py +25 -21
- dtSpark/cli_interface.py +77 -68
- dtSpark/conversation_manager.py +54 -47
- dtSpark/core/application.py +114 -91
- dtSpark/core/context_compaction.py +241 -226
- dtSpark/daemon/__init__.py +36 -22
- dtSpark/daemon/action_monitor.py +46 -17
- dtSpark/daemon/daemon_app.py +126 -104
- dtSpark/daemon/daemon_manager.py +59 -23
- dtSpark/daemon/pid_file.py +3 -2
- dtSpark/database/autonomous_actions.py +3 -0
- dtSpark/database/credential_prompt.py +52 -54
- dtSpark/files/manager.py +6 -12
- dtSpark/limits/__init__.py +1 -1
- dtSpark/limits/tokens.py +2 -2
- dtSpark/llm/anthropic_direct.py +246 -141
- dtSpark/llm/ollama.py +3 -1
- dtSpark/mcp_integration/manager.py +4 -4
- dtSpark/mcp_integration/tool_selector.py +83 -77
- dtSpark/resources/config.yaml.template +11 -0
- dtSpark/safety/patterns.py +45 -46
- dtSpark/safety/prompt_inspector.py +8 -1
- dtSpark/scheduler/creation_tools.py +273 -181
- dtSpark/scheduler/executor.py +503 -221
- dtSpark/tools/builtin.py +70 -53
- dtSpark/web/endpoints/autonomous_actions.py +12 -9
- dtSpark/web/endpoints/chat.py +8 -6
- dtSpark/web/endpoints/conversations.py +18 -9
- dtSpark/web/endpoints/main_menu.py +132 -105
- dtSpark/web/endpoints/streaming.py +2 -2
- dtSpark/web/server.py +70 -5
- dtSpark/web/ssl_utils.py +3 -3
- dtSpark/web/static/css/dark-theme.css +8 -29
- dtSpark/web/static/js/chat.js +6 -8
- dtSpark/web/static/js/main.js +8 -8
- dtSpark/web/static/js/sse-client.js +130 -122
- dtSpark/web/templates/actions.html +5 -5
- dtSpark/web/templates/base.html +15 -0
- dtSpark/web/templates/chat.html +10 -10
- dtSpark/web/templates/conversations.html +6 -2
- dtSpark/web/templates/goodbye.html +2 -2
- dtSpark/web/templates/main_menu.html +19 -17
- dtSpark/web/web_interface.py +2 -2
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/METADATA +9 -2
- dtspark-1.1.0a7.dist-info/RECORD +96 -0
- dtspark-1.1.0a3.dist-info/RECORD +0 -96
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/WHEEL +0 -0
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/entry_points.txt +0 -0
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/licenses/LICENSE +0 -0
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/top_level.txt +0 -0
dtSpark/scheduler/executor.py
CHANGED
|
@@ -123,20 +123,48 @@ class ActionContextCompactor:
|
|
|
123
123
|
"""
|
|
124
124
|
total = 0
|
|
125
125
|
for msg in messages:
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
126
|
+
total += self._estimate_message_tokens(msg)
|
|
127
|
+
return total
|
|
128
|
+
|
|
129
|
+
def _estimate_message_tokens(self, msg: Dict) -> int:
|
|
130
|
+
"""
|
|
131
|
+
Estimate token count for a single message.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
msg: Message dictionary
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Estimated token count
|
|
138
|
+
"""
|
|
139
|
+
content = msg.get('content', '')
|
|
140
|
+
if isinstance(content, str):
|
|
141
|
+
# Rough estimate: ~4 chars per token
|
|
142
|
+
return len(content) // 4
|
|
143
|
+
if isinstance(content, list):
|
|
144
|
+
return self._estimate_content_blocks_tokens(content)
|
|
145
|
+
return 0
|
|
146
|
+
|
|
147
|
+
def _estimate_content_blocks_tokens(self, blocks: list) -> int:
|
|
148
|
+
"""
|
|
149
|
+
Estimate token count for a list of content blocks.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
blocks: List of content block dictionaries
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
Estimated token count
|
|
156
|
+
"""
|
|
157
|
+
total = 0
|
|
158
|
+
for block in blocks:
|
|
159
|
+
if not isinstance(block, dict):
|
|
160
|
+
continue
|
|
161
|
+
block_type = block.get('type')
|
|
162
|
+
if block_type == 'text':
|
|
163
|
+
total += len(block.get('text', '')) // 4
|
|
164
|
+
elif block_type == 'tool_use':
|
|
165
|
+
total += len(json.dumps(block.get('input', {}))) // 4
|
|
166
|
+
elif block_type == 'tool_result':
|
|
167
|
+
total += len(str(block.get('content', ''))) // 4
|
|
140
168
|
return total
|
|
141
169
|
|
|
142
170
|
def _check_rate_limits(self, prompt: str) -> Dict[str, Any]:
|
|
@@ -249,8 +277,9 @@ class ActionContextCompactor:
|
|
|
249
277
|
compacted_tokens = len(compacted_content) // 4
|
|
250
278
|
reduction = ((current_tokens - compacted_tokens) / current_tokens * 100) if current_tokens > 0 else 0
|
|
251
279
|
|
|
280
|
+
reduction_str = f"{reduction:.1f}% reduction"
|
|
252
281
|
logging.info(f"Action compaction: {len(messages)} messages → 1 summary, "
|
|
253
|
-
f"{current_tokens:,} → {compacted_tokens:,} tokens ({
|
|
282
|
+
f"{current_tokens:,} → {compacted_tokens:,} tokens ({reduction_str})")
|
|
254
283
|
|
|
255
284
|
# Return compacted context as single user message
|
|
256
285
|
return [{
|
|
@@ -273,36 +302,65 @@ class ActionContextCompactor:
|
|
|
273
302
|
Formatted string
|
|
274
303
|
"""
|
|
275
304
|
lines = []
|
|
276
|
-
for
|
|
305
|
+
for msg in messages:
|
|
277
306
|
role = msg.get('role', 'unknown').upper()
|
|
278
307
|
content = msg.get('content', '')
|
|
279
|
-
|
|
280
|
-
if
|
|
281
|
-
|
|
282
|
-
if len(content) > 1500:
|
|
283
|
-
content = content[:1500] + f"... [truncated, {len(content) - 1500} more chars]"
|
|
284
|
-
lines.append(f"[{role}]: {content}")
|
|
285
|
-
elif isinstance(content, list):
|
|
286
|
-
# Handle content blocks
|
|
287
|
-
parts = []
|
|
288
|
-
for block in content:
|
|
289
|
-
if isinstance(block, dict):
|
|
290
|
-
if block.get('type') == 'text':
|
|
291
|
-
text = block.get('text', '')
|
|
292
|
-
if len(text) > 500:
|
|
293
|
-
text = text[:500] + "..."
|
|
294
|
-
parts.append(text)
|
|
295
|
-
elif block.get('type') == 'tool_use':
|
|
296
|
-
parts.append(f"[Tool: {block.get('name', 'unknown')}]")
|
|
297
|
-
elif block.get('type') == 'tool_result':
|
|
298
|
-
result = str(block.get('content', ''))
|
|
299
|
-
if len(result) > 300:
|
|
300
|
-
result = result[:300] + "..."
|
|
301
|
-
parts.append(f"[Result: {result}]")
|
|
302
|
-
lines.append(f"[{role}]: {' | '.join(parts)}")
|
|
308
|
+
formatted = self._format_single_message_content(role, content)
|
|
309
|
+
if formatted:
|
|
310
|
+
lines.append(formatted)
|
|
303
311
|
|
|
304
312
|
return '\n\n'.join(lines)
|
|
305
313
|
|
|
314
|
+
def _format_single_message_content(self, role: str, content) -> str:
|
|
315
|
+
"""
|
|
316
|
+
Format content of a single message for compaction.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
role: Message role (uppercased)
|
|
320
|
+
content: Message content (string or list of blocks)
|
|
321
|
+
|
|
322
|
+
Returns:
|
|
323
|
+
Formatted string for this message
|
|
324
|
+
"""
|
|
325
|
+
if isinstance(content, str):
|
|
326
|
+
if len(content) > 1500:
|
|
327
|
+
content = content[:1500] + f"... [truncated, {len(content) - 1500} more chars]"
|
|
328
|
+
return f"[{role}]: {content}"
|
|
329
|
+
|
|
330
|
+
if isinstance(content, list):
|
|
331
|
+
parts = [self._format_content_block(block) for block in content
|
|
332
|
+
if isinstance(block, dict)]
|
|
333
|
+
parts = [p for p in parts if p]
|
|
334
|
+
return f"[{role}]: {' | '.join(parts)}"
|
|
335
|
+
|
|
336
|
+
return ''
|
|
337
|
+
|
|
338
|
+
@staticmethod
|
|
339
|
+
def _format_content_block(block: Dict) -> Optional[str]:
|
|
340
|
+
"""
|
|
341
|
+
Format a single content block for compaction output.
|
|
342
|
+
|
|
343
|
+
Args:
|
|
344
|
+
block: Content block dictionary
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
Formatted string or None
|
|
348
|
+
"""
|
|
349
|
+
block_type = block.get('type')
|
|
350
|
+
if block_type == 'text':
|
|
351
|
+
text = block.get('text', '')
|
|
352
|
+
if len(text) > 500:
|
|
353
|
+
text = text[:500] + "..."
|
|
354
|
+
return text
|
|
355
|
+
if block_type == 'tool_use':
|
|
356
|
+
return f"[Tool: {block.get('name', 'unknown')}]"
|
|
357
|
+
if block_type == 'tool_result':
|
|
358
|
+
result = str(block.get('content', ''))
|
|
359
|
+
if len(result) > 300:
|
|
360
|
+
result = result[:300] + "..."
|
|
361
|
+
return f"[Result: {result}]"
|
|
362
|
+
return None
|
|
363
|
+
|
|
306
364
|
|
|
307
365
|
class ActionExecutor:
|
|
308
366
|
"""
|
|
@@ -351,13 +409,13 @@ class ActionExecutor:
|
|
|
351
409
|
|
|
352
410
|
logging.info("ActionExecutor initialised with context compaction support")
|
|
353
411
|
|
|
354
|
-
def execute(self, action_id: int,
|
|
412
|
+
def execute(self, action_id: int, _user_guid: str, is_manual: bool = False) -> Dict[str, Any]:
|
|
355
413
|
"""
|
|
356
414
|
Execute an autonomous action.
|
|
357
415
|
|
|
358
416
|
Args:
|
|
359
417
|
action_id: ID of the action to execute
|
|
360
|
-
|
|
418
|
+
_user_guid: User GUID (reserved for future use)
|
|
361
419
|
is_manual: Whether this is a manual "Run Now" execution
|
|
362
420
|
|
|
363
421
|
Returns:
|
|
@@ -445,45 +503,88 @@ class ActionExecutor:
|
|
|
445
503
|
'error': error_message
|
|
446
504
|
}
|
|
447
505
|
|
|
448
|
-
def _execute_action(self, action: Dict,
|
|
506
|
+
def _execute_action(self, action: Dict, _is_manual: bool = False) -> Dict[str, Any]:
|
|
449
507
|
"""
|
|
450
508
|
Execute the actual LLM invocation for an action.
|
|
451
509
|
|
|
452
510
|
Args:
|
|
453
511
|
action: Action dictionary
|
|
454
|
-
|
|
512
|
+
_is_manual: Whether this is a manual execution (reserved for future use)
|
|
455
513
|
|
|
456
514
|
Returns:
|
|
457
515
|
Dict with 'text', 'html', 'input_tokens', 'output_tokens', 'context_snapshot'
|
|
458
516
|
"""
|
|
459
|
-
# Set the model
|
|
460
517
|
model_id = action['model_id']
|
|
518
|
+
self._set_action_model(model_id)
|
|
519
|
+
|
|
520
|
+
# Prepare context, tools, system prompt, and max tokens
|
|
521
|
+
messages = self._prepare_context(action)
|
|
522
|
+
tools = self._get_filtered_tools(action['id'])
|
|
523
|
+
system_prompt = self._build_system_prompt(action)
|
|
524
|
+
action_max_tokens = action.get('max_tokens', 8192)
|
|
525
|
+
|
|
526
|
+
# Initial model invocation
|
|
527
|
+
response = self._invoke_initial_model(
|
|
528
|
+
action, messages, action_max_tokens, tools, system_prompt
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
# Run the tool use loop
|
|
532
|
+
loop_result = self._run_tool_loop(
|
|
533
|
+
action, model_id, messages, response,
|
|
534
|
+
action_max_tokens, tools, system_prompt
|
|
535
|
+
)
|
|
536
|
+
|
|
537
|
+
# Assemble final result
|
|
538
|
+
return self._assemble_action_result(
|
|
539
|
+
action, loop_result['messages'], loop_result['response'],
|
|
540
|
+
loop_result['all_text_responses'], loop_result['tool_calls_summary'],
|
|
541
|
+
loop_result['compaction_count']
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
def _set_action_model(self, model_id: str) -> None:
|
|
545
|
+
"""
|
|
546
|
+
Set the LLM model for action execution.
|
|
547
|
+
|
|
548
|
+
Args:
|
|
549
|
+
model_id: Model identifier to set
|
|
550
|
+
|
|
551
|
+
Raises:
|
|
552
|
+
RuntimeError: If model cannot be set
|
|
553
|
+
"""
|
|
461
554
|
try:
|
|
462
555
|
self.llm_manager.set_model(model_id)
|
|
463
556
|
except Exception as e:
|
|
464
|
-
|
|
465
|
-
try:
|
|
466
|
-
available = self.llm_manager.list_all_models()
|
|
467
|
-
available_ids = [m.get('id', 'unknown') for m in available]
|
|
468
|
-
logging.error(f"Available models: {available_ids}")
|
|
469
|
-
except Exception as list_err:
|
|
470
|
-
logging.error(f"Failed to list available models: {list_err}")
|
|
557
|
+
self._log_available_models()
|
|
471
558
|
raise RuntimeError(f"Failed to set model {model_id}: {e}")
|
|
472
559
|
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
560
|
+
def _log_available_models(self) -> None:
|
|
561
|
+
"""Log available models for diagnostic purposes."""
|
|
562
|
+
try:
|
|
563
|
+
available = self.llm_manager.list_all_models()
|
|
564
|
+
available_ids = [m.get('id', 'unknown') for m in available]
|
|
565
|
+
logging.error(f"Available models: {available_ids}")
|
|
566
|
+
except Exception as list_err:
|
|
567
|
+
logging.error(f"Failed to list available models: {list_err}")
|
|
568
|
+
|
|
569
|
+
def _invoke_initial_model(self, action: Dict, messages: List[Dict],
|
|
570
|
+
action_max_tokens: int, tools: Optional[List[Dict]],
|
|
571
|
+
system_prompt: str) -> Dict:
|
|
572
|
+
"""
|
|
573
|
+
Perform the initial model invocation and validate the response.
|
|
478
574
|
|
|
479
|
-
|
|
480
|
-
|
|
575
|
+
Args:
|
|
576
|
+
action: Action dictionary
|
|
577
|
+
messages: Prepared message context
|
|
578
|
+
action_max_tokens: Maximum tokens for output
|
|
579
|
+
tools: Filtered tool definitions
|
|
580
|
+
system_prompt: System prompt string
|
|
481
581
|
|
|
482
|
-
|
|
483
|
-
|
|
582
|
+
Returns:
|
|
583
|
+
Validated model response dictionary
|
|
484
584
|
|
|
485
|
-
|
|
486
|
-
|
|
585
|
+
Raises:
|
|
586
|
+
RuntimeError: If no response or LLM returns an error
|
|
587
|
+
"""
|
|
487
588
|
response = self.llm_manager.invoke_model(
|
|
488
589
|
messages=messages,
|
|
489
590
|
max_tokens=action_max_tokens,
|
|
@@ -491,7 +592,6 @@ class ActionExecutor:
|
|
|
491
592
|
tools=tools if tools else None,
|
|
492
593
|
system=system_prompt
|
|
493
594
|
)
|
|
494
|
-
elapsed_time = time.time() - start_time
|
|
495
595
|
|
|
496
596
|
if not response:
|
|
497
597
|
raise RuntimeError("No response from LLM")
|
|
@@ -501,7 +601,6 @@ class ActionExecutor:
|
|
|
501
601
|
f"LLM error: {response.get('error_message', 'Unknown error')}"
|
|
502
602
|
)
|
|
503
603
|
|
|
504
|
-
# Log response details for debugging token limit issues
|
|
505
604
|
stop_reason = response.get('stop_reason', 'unknown')
|
|
506
605
|
usage = response.get('usage', {})
|
|
507
606
|
output_tokens = usage.get('output_tokens', 0)
|
|
@@ -509,25 +608,39 @@ class ActionExecutor:
|
|
|
509
608
|
f"Action {action['id']} initial response: stop_reason={stop_reason}, "
|
|
510
609
|
f"output_tokens={output_tokens}, max_tokens={action_max_tokens}"
|
|
511
610
|
)
|
|
611
|
+
return response
|
|
612
|
+
|
|
613
|
+
def _run_tool_loop(self, action: Dict, model_id: str,
|
|
614
|
+
messages: List[Dict], response: Dict,
|
|
615
|
+
action_max_tokens: int, tools: Optional[List[Dict]],
|
|
616
|
+
system_prompt: str) -> Dict[str, Any]:
|
|
617
|
+
"""
|
|
618
|
+
Run the tool use loop until the LLM stops requesting tools.
|
|
512
619
|
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
620
|
+
Args:
|
|
621
|
+
action: Action dictionary
|
|
622
|
+
model_id: Model identifier (for context window lookup)
|
|
623
|
+
messages: Current message list
|
|
624
|
+
response: Initial model response
|
|
625
|
+
action_max_tokens: Maximum tokens for output
|
|
626
|
+
tools: Filtered tool definitions
|
|
627
|
+
system_prompt: System prompt string
|
|
628
|
+
|
|
629
|
+
Returns:
|
|
630
|
+
Dict with 'messages', 'response', 'all_text_responses',
|
|
631
|
+
'tool_calls_summary', 'compaction_count'
|
|
632
|
+
"""
|
|
516
633
|
max_tool_iterations = action.get('max_tool_iterations', None)
|
|
517
634
|
if max_tool_iterations is None:
|
|
518
635
|
max_tool_iterations = self.config.get('conversation', {}).get('max_tool_iterations', 25)
|
|
519
636
|
logging.debug(f"Action {action['id']} max tool iterations: {max_tool_iterations}")
|
|
520
|
-
iteration = 0
|
|
521
637
|
|
|
522
|
-
# Accumulate all text responses and track tool calls
|
|
523
638
|
all_text_responses = []
|
|
524
639
|
tool_calls_summary = []
|
|
525
640
|
compaction_count = 0
|
|
526
|
-
|
|
527
|
-
# Get context window for compaction checks
|
|
528
641
|
context_window = self._get_context_window(model_id)
|
|
642
|
+
iteration = 0
|
|
529
643
|
|
|
530
|
-
# Extract any text from initial response
|
|
531
644
|
initial_text = self._extract_text_response(response)
|
|
532
645
|
if initial_text:
|
|
533
646
|
all_text_responses.append(initial_text)
|
|
@@ -537,7 +650,6 @@ class ActionExecutor:
|
|
|
537
650
|
tool_use_blocks = [b for b in content_blocks if b.get('type') == 'tool_use']
|
|
538
651
|
|
|
539
652
|
if not tool_use_blocks:
|
|
540
|
-
# No more tool calls - we're done
|
|
541
653
|
break
|
|
542
654
|
|
|
543
655
|
iteration += 1
|
|
@@ -551,73 +663,137 @@ class ActionExecutor:
|
|
|
551
663
|
'input': block.get('input', {})
|
|
552
664
|
})
|
|
553
665
|
|
|
554
|
-
# Execute tool calls
|
|
666
|
+
# Execute tool calls and add results to messages
|
|
555
667
|
tool_results = self._execute_tool_calls(action['id'], tool_use_blocks)
|
|
556
|
-
|
|
557
|
-
# Add tool results to messages and get next response
|
|
558
668
|
messages = self._add_tool_results(messages, response, tool_results)
|
|
559
669
|
|
|
560
|
-
#
|
|
561
|
-
|
|
562
|
-
messages,
|
|
563
|
-
messages=messages,
|
|
564
|
-
original_prompt=action['action_prompt'],
|
|
565
|
-
context_window=context_window,
|
|
566
|
-
in_tool_loop=True
|
|
567
|
-
)
|
|
568
|
-
if compacted:
|
|
569
|
-
compaction_count += 1
|
|
570
|
-
logging.info(f"Action {action['id']} context compacted (compaction #{compaction_count})")
|
|
571
|
-
|
|
572
|
-
response = self.llm_manager.invoke_model(
|
|
573
|
-
messages=messages,
|
|
574
|
-
max_tokens=action_max_tokens,
|
|
575
|
-
temperature=0.7,
|
|
576
|
-
tools=tools if tools else None,
|
|
577
|
-
system=system_prompt
|
|
670
|
+
# Periodic context compaction
|
|
671
|
+
messages, compaction_count = self._maybe_compact_context(
|
|
672
|
+
action, messages, iteration, context_window, compaction_count
|
|
578
673
|
)
|
|
579
674
|
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
675
|
+
# Next model invocation
|
|
676
|
+
response = self._invoke_tool_iteration(
|
|
677
|
+
action, messages, action_max_tokens, tools, system_prompt, iteration
|
|
678
|
+
)
|
|
584
679
|
|
|
585
|
-
# Extract and accumulate text from this iteration
|
|
586
680
|
iter_text = self._extract_text_response(response)
|
|
587
681
|
if iter_text:
|
|
588
682
|
all_text_responses.append(iter_text)
|
|
589
683
|
|
|
590
|
-
# Log iteration response details
|
|
591
|
-
iter_stop_reason = response.get('stop_reason', 'unknown')
|
|
592
|
-
iter_usage = response.get('usage', {})
|
|
593
|
-
iter_output_tokens = iter_usage.get('output_tokens', 0)
|
|
594
|
-
logging.debug(
|
|
595
|
-
f"Action {action['id']} iteration {iteration} response: "
|
|
596
|
-
f"stop_reason={iter_stop_reason}, output_tokens={iter_output_tokens}"
|
|
597
|
-
)
|
|
598
|
-
|
|
599
684
|
if iteration >= max_tool_iterations:
|
|
600
685
|
logging.warning(f"Action {action['id']} reached max tool iterations ({max_tool_iterations})")
|
|
601
|
-
# Add warning to output
|
|
602
686
|
all_text_responses.append(
|
|
603
687
|
f"\n\n---\n**Note:** Action reached maximum tool iterations ({max_tool_iterations}). "
|
|
604
688
|
f"The task may be incomplete. Consider increasing max_tool_iterations in config."
|
|
605
689
|
)
|
|
606
690
|
|
|
607
|
-
|
|
691
|
+
return {
|
|
692
|
+
'messages': messages,
|
|
693
|
+
'response': response,
|
|
694
|
+
'all_text_responses': all_text_responses,
|
|
695
|
+
'tool_calls_summary': tool_calls_summary,
|
|
696
|
+
'compaction_count': compaction_count
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
def _maybe_compact_context(self, action: Dict, messages: List[Dict],
|
|
700
|
+
iteration: int, context_window: int,
|
|
701
|
+
compaction_count: int) -> Tuple[List[Dict], int]:
|
|
702
|
+
"""
|
|
703
|
+
Check and perform context compaction if needed.
|
|
704
|
+
|
|
705
|
+
Args:
|
|
706
|
+
action: Action dictionary
|
|
707
|
+
messages: Current message list
|
|
708
|
+
iteration: Current tool iteration number
|
|
709
|
+
context_window: Model context window size
|
|
710
|
+
compaction_count: Running count of compactions performed
|
|
711
|
+
|
|
712
|
+
Returns:
|
|
713
|
+
Tuple of (possibly compacted messages, updated compaction count)
|
|
714
|
+
"""
|
|
715
|
+
if iteration % 3 != 0 or context_window <= 0:
|
|
716
|
+
return messages, compaction_count
|
|
717
|
+
|
|
718
|
+
messages, compacted = self.context_compactor.check_and_compact(
|
|
719
|
+
messages=messages,
|
|
720
|
+
original_prompt=action['action_prompt'],
|
|
721
|
+
context_window=context_window,
|
|
722
|
+
in_tool_loop=True
|
|
723
|
+
)
|
|
724
|
+
if compacted:
|
|
725
|
+
compaction_count += 1
|
|
726
|
+
logging.info(f"Action {action['id']} context compacted (compaction #{compaction_count})")
|
|
727
|
+
|
|
728
|
+
return messages, compaction_count
|
|
729
|
+
|
|
730
|
+
def _invoke_tool_iteration(self, action: Dict, messages: List[Dict],
|
|
731
|
+
action_max_tokens: int, tools: Optional[List[Dict]],
|
|
732
|
+
system_prompt: str, iteration: int) -> Dict:
|
|
733
|
+
"""
|
|
734
|
+
Invoke the model during a tool use iteration.
|
|
735
|
+
|
|
736
|
+
Args:
|
|
737
|
+
action: Action dictionary
|
|
738
|
+
messages: Current message list
|
|
739
|
+
action_max_tokens: Maximum tokens for output
|
|
740
|
+
tools: Filtered tool definitions
|
|
741
|
+
system_prompt: System prompt string
|
|
742
|
+
iteration: Current iteration number
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
Model response dictionary
|
|
746
|
+
|
|
747
|
+
Raises:
|
|
748
|
+
RuntimeError: If the model returns an error
|
|
749
|
+
"""
|
|
750
|
+
response = self.llm_manager.invoke_model(
|
|
751
|
+
messages=messages,
|
|
752
|
+
max_tokens=action_max_tokens,
|
|
753
|
+
temperature=0.7,
|
|
754
|
+
tools=tools if tools else None,
|
|
755
|
+
system=system_prompt
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
if response.get('error'):
|
|
759
|
+
raise RuntimeError(
|
|
760
|
+
f"LLM error during tool iteration: {response.get('error_message', 'Unknown error')}"
|
|
761
|
+
)
|
|
762
|
+
|
|
763
|
+
iter_stop_reason = response.get('stop_reason', 'unknown')
|
|
764
|
+
iter_usage = response.get('usage', {})
|
|
765
|
+
iter_output_tokens = iter_usage.get('output_tokens', 0)
|
|
766
|
+
logging.debug(
|
|
767
|
+
f"Action {action['id']} iteration {iteration} response: "
|
|
768
|
+
f"stop_reason={iter_stop_reason}, output_tokens={iter_output_tokens}"
|
|
769
|
+
)
|
|
770
|
+
return response
|
|
771
|
+
|
|
772
|
+
def _assemble_action_result(self, action: Dict, messages: List[Dict],
|
|
773
|
+
response: Dict, all_text_responses: List[str],
|
|
774
|
+
tool_calls_summary: List[Dict],
|
|
775
|
+
compaction_count: int) -> Dict[str, Any]:
|
|
776
|
+
"""
|
|
777
|
+
Assemble the final action result from accumulated data.
|
|
778
|
+
|
|
779
|
+
Args:
|
|
780
|
+
action: Action dictionary
|
|
781
|
+
messages: Final message list
|
|
782
|
+
response: Final model response
|
|
783
|
+
all_text_responses: Accumulated text responses
|
|
784
|
+
tool_calls_summary: Summary of tool calls made
|
|
785
|
+
compaction_count: Number of compactions performed
|
|
786
|
+
|
|
787
|
+
Returns:
|
|
788
|
+
Dict with 'text', 'html', 'input_tokens', 'output_tokens', 'context_snapshot'
|
|
789
|
+
"""
|
|
608
790
|
text_response = '\n\n'.join(all_text_responses) if all_text_responses else ''
|
|
609
791
|
|
|
610
|
-
#
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
summary_parts.append(f"**Tools used ({len(tool_calls_summary)} calls):** {', '.join(sorted(tools_used))}")
|
|
615
|
-
if compaction_count > 0:
|
|
616
|
-
summary_parts.append(f"**Context compactions:** {compaction_count}")
|
|
617
|
-
if summary_parts:
|
|
618
|
-
text_response += "\n\n---\n" + " | ".join(summary_parts)
|
|
792
|
+
# Build execution summary
|
|
793
|
+
text_response = self._append_execution_summary(
|
|
794
|
+
text_response, tool_calls_summary, compaction_count
|
|
795
|
+
)
|
|
619
796
|
|
|
620
|
-
# Convert to HTML
|
|
621
797
|
html_response = self._convert_to_html(text_response)
|
|
622
798
|
|
|
623
799
|
# Update cumulative context if needed
|
|
@@ -626,9 +802,7 @@ class ActionExecutor:
|
|
|
626
802
|
self._update_cumulative_context(action['id'], messages, response)
|
|
627
803
|
context_snapshot = json.dumps(self._cumulative_contexts.get(action['id'], []))
|
|
628
804
|
|
|
629
|
-
# Get token usage
|
|
630
805
|
usage = response.get('usage', {})
|
|
631
|
-
|
|
632
806
|
return {
|
|
633
807
|
'text': text_response,
|
|
634
808
|
'html': html_response,
|
|
@@ -637,6 +811,32 @@ class ActionExecutor:
|
|
|
637
811
|
'context_snapshot': context_snapshot
|
|
638
812
|
}
|
|
639
813
|
|
|
814
|
+
@staticmethod
|
|
815
|
+
def _append_execution_summary(text_response: str, tool_calls_summary: List[Dict],
|
|
816
|
+
compaction_count: int) -> str:
|
|
817
|
+
"""
|
|
818
|
+
Append execution summary to the text response.
|
|
819
|
+
|
|
820
|
+
Args:
|
|
821
|
+
text_response: Current text response
|
|
822
|
+
tool_calls_summary: Summary of tool calls made
|
|
823
|
+
compaction_count: Number of compactions performed
|
|
824
|
+
|
|
825
|
+
Returns:
|
|
826
|
+
Text response with summary appended
|
|
827
|
+
"""
|
|
828
|
+
summary_parts = []
|
|
829
|
+
if tool_calls_summary:
|
|
830
|
+
tools_used = set(tc['tool'] for tc in tool_calls_summary)
|
|
831
|
+
summary_parts.append(
|
|
832
|
+
f"**Tools used ({len(tool_calls_summary)} calls):** {', '.join(sorted(tools_used))}"
|
|
833
|
+
)
|
|
834
|
+
if compaction_count > 0:
|
|
835
|
+
summary_parts.append(f"**Context compactions:** {compaction_count}")
|
|
836
|
+
if summary_parts:
|
|
837
|
+
text_response += "\n\n---\n" + " | ".join(summary_parts)
|
|
838
|
+
return text_response
|
|
839
|
+
|
|
640
840
|
def _prepare_context(self, action: Dict) -> List[Dict]:
|
|
641
841
|
"""
|
|
642
842
|
Prepare the message context for the action.
|
|
@@ -755,116 +955,198 @@ class ActionExecutor:
|
|
|
755
955
|
Returns:
|
|
756
956
|
List of tool result dictionaries
|
|
757
957
|
"""
|
|
758
|
-
from dtSpark.tools import builtin
|
|
759
|
-
|
|
760
958
|
results = []
|
|
761
|
-
|
|
762
|
-
# Get tool sources from instance (populated by _get_filtered_tools)
|
|
763
959
|
tool_sources = getattr(self, '_tool_sources', {})
|
|
764
960
|
|
|
765
961
|
for tool_block in tool_use_blocks:
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
962
|
+
result = self._execute_single_tool_call(
|
|
963
|
+
action_id, tool_block, tool_sources
|
|
964
|
+
)
|
|
965
|
+
results.append(result)
|
|
769
966
|
|
|
770
|
-
|
|
771
|
-
if tool_name == 'write_file':
|
|
772
|
-
logging.debug(f"write_file tool_block keys: {list(tool_block.keys())}")
|
|
773
|
-
logging.debug(f"write_file tool_input keys: {list(tool_input.keys()) if tool_input else 'None'}")
|
|
774
|
-
content_preview = str(tool_input.get('content', ''))[:100] if tool_input else ''
|
|
775
|
-
logging.debug(f"write_file content preview: '{content_preview}...' (len={len(tool_input.get('content', '') or '')})")
|
|
967
|
+
return results
|
|
776
968
|
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
p['tool_name'] == tool_name and p['permission_state'] == 'allowed'
|
|
782
|
-
for p in permissions
|
|
783
|
-
)
|
|
969
|
+
def _execute_single_tool_call(self, action_id: int, tool_block: Dict,
|
|
970
|
+
tool_sources: Dict[str, str]) -> Dict:
|
|
971
|
+
"""
|
|
972
|
+
Execute a single tool call and return the result.
|
|
784
973
|
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
'content': f"Tool '{tool_name}' is not permitted for this action"
|
|
790
|
-
})
|
|
791
|
-
continue
|
|
792
|
-
|
|
793
|
-
# Determine tool source and execute accordingly
|
|
794
|
-
tool_source = tool_sources.get(tool_name, 'mcp')
|
|
795
|
-
|
|
796
|
-
if tool_source == 'builtin':
|
|
797
|
-
# Execute builtin tool
|
|
798
|
-
logging.debug(f"Executing builtin tool: {tool_name}")
|
|
799
|
-
result = builtin.execute_builtin_tool(tool_name, tool_input, self.config)
|
|
800
|
-
|
|
801
|
-
if result.get('success'):
|
|
802
|
-
result_data = result.get('result', {})
|
|
803
|
-
if isinstance(result_data, dict):
|
|
804
|
-
result_str = json.dumps(result_data, indent=2)
|
|
805
|
-
else:
|
|
806
|
-
result_str = str(result_data)
|
|
807
|
-
results.append({
|
|
808
|
-
'type': 'tool_result',
|
|
809
|
-
'tool_use_id': tool_id,
|
|
810
|
-
'content': result_str
|
|
811
|
-
})
|
|
812
|
-
else:
|
|
813
|
-
results.append({
|
|
814
|
-
'type': 'tool_result',
|
|
815
|
-
'tool_use_id': tool_id,
|
|
816
|
-
'content': result.get('error', 'Builtin tool execution failed'),
|
|
817
|
-
'is_error': True
|
|
818
|
-
})
|
|
974
|
+
Args:
|
|
975
|
+
action_id: Action ID for permission checking
|
|
976
|
+
tool_block: Tool use block from model response
|
|
977
|
+
tool_sources: Mapping of tool names to their source ('builtin' or 'mcp')
|
|
819
978
|
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
# Extract text content from result
|
|
827
|
-
content_parts = []
|
|
828
|
-
for content in result.get('content', []):
|
|
829
|
-
if content.get('type') == 'text':
|
|
830
|
-
content_parts.append(content.get('text', ''))
|
|
831
|
-
|
|
832
|
-
result_str = '\n'.join(content_parts) if content_parts else 'Tool executed successfully (no output)'
|
|
833
|
-
results.append({
|
|
834
|
-
'type': 'tool_result',
|
|
835
|
-
'tool_use_id': tool_id,
|
|
836
|
-
'content': result_str
|
|
837
|
-
})
|
|
838
|
-
else:
|
|
839
|
-
error_msg = "Tool execution failed"
|
|
840
|
-
if result:
|
|
841
|
-
for content in result.get('content', []):
|
|
842
|
-
if content.get('type') == 'text':
|
|
843
|
-
error_msg = content.get('text', error_msg)
|
|
844
|
-
break
|
|
845
|
-
results.append({
|
|
846
|
-
'type': 'tool_result',
|
|
847
|
-
'tool_use_id': tool_id,
|
|
848
|
-
'content': error_msg,
|
|
849
|
-
'is_error': True
|
|
850
|
-
})
|
|
851
|
-
else:
|
|
852
|
-
results.append({
|
|
853
|
-
'type': 'tool_result',
|
|
854
|
-
'tool_use_id': tool_id,
|
|
855
|
-
'content': "No tool execution handler available"
|
|
856
|
-
})
|
|
979
|
+
Returns:
|
|
980
|
+
Tool result dictionary
|
|
981
|
+
"""
|
|
982
|
+
tool_name = tool_block.get('name')
|
|
983
|
+
tool_id = tool_block.get('id')
|
|
984
|
+
tool_input = tool_block.get('input', {})
|
|
857
985
|
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
986
|
+
self._log_write_file_debug(tool_name, tool_block, tool_input)
|
|
987
|
+
|
|
988
|
+
try:
|
|
989
|
+
if not self._is_tool_permitted(action_id, tool_name):
|
|
990
|
+
return {
|
|
861
991
|
'type': 'tool_result',
|
|
862
992
|
'tool_use_id': tool_id,
|
|
863
|
-
'content': f"
|
|
864
|
-
|
|
865
|
-
})
|
|
993
|
+
'content': f"Tool '{tool_name}' is not permitted for this action"
|
|
994
|
+
}
|
|
866
995
|
|
|
867
|
-
|
|
996
|
+
tool_source = tool_sources.get(tool_name, 'mcp')
|
|
997
|
+
|
|
998
|
+
if tool_source == 'builtin':
|
|
999
|
+
return self._execute_builtin_tool_call(tool_name, tool_id, tool_input)
|
|
1000
|
+
|
|
1001
|
+
if self.mcp_manager:
|
|
1002
|
+
return self._execute_mcp_tool_call(tool_name, tool_id, tool_input)
|
|
1003
|
+
|
|
1004
|
+
return {
|
|
1005
|
+
'type': 'tool_result',
|
|
1006
|
+
'tool_use_id': tool_id,
|
|
1007
|
+
'content': "No tool execution handler available"
|
|
1008
|
+
}
|
|
1009
|
+
|
|
1010
|
+
except Exception as e:
|
|
1011
|
+
logging.error(f"Tool {tool_name} execution failed: {e}")
|
|
1012
|
+
return {
|
|
1013
|
+
'type': 'tool_result',
|
|
1014
|
+
'tool_use_id': tool_id,
|
|
1015
|
+
'content': f"Error executing tool: {str(e)}",
|
|
1016
|
+
'is_error': True
|
|
1017
|
+
}
|
|
1018
|
+
|
|
1019
|
+
@staticmethod
|
|
1020
|
+
def _log_write_file_debug(tool_name: str, tool_block: Dict, tool_input: Dict) -> None:
|
|
1021
|
+
"""Log debug information for write_file tool calls."""
|
|
1022
|
+
if tool_name != 'write_file':
|
|
1023
|
+
return
|
|
1024
|
+
logging.debug(f"write_file tool_block keys: {list(tool_block.keys())}")
|
|
1025
|
+
logging.debug(f"write_file tool_input keys: {list(tool_input.keys()) if tool_input else 'None'}")
|
|
1026
|
+
content_preview = str(tool_input.get('content', ''))[:100] if tool_input else ''
|
|
1027
|
+
logging.debug(
|
|
1028
|
+
f"write_file content preview: '{content_preview}...' "
|
|
1029
|
+
f"(len={len(tool_input.get('content', '') or '')})"
|
|
1030
|
+
)
|
|
1031
|
+
|
|
1032
|
+
def _is_tool_permitted(self, action_id: int, tool_name: str) -> bool:
|
|
1033
|
+
"""
|
|
1034
|
+
Check whether a tool is permitted for the given action.
|
|
1035
|
+
|
|
1036
|
+
Args:
|
|
1037
|
+
action_id: Action ID
|
|
1038
|
+
tool_name: Name of the tool to check
|
|
1039
|
+
|
|
1040
|
+
Returns:
|
|
1041
|
+
True if the tool is allowed
|
|
1042
|
+
"""
|
|
1043
|
+
permissions = self.database.get_action_tool_permissions(action_id)
|
|
1044
|
+
return any(
|
|
1045
|
+
p['tool_name'] == tool_name and p['permission_state'] == 'allowed'
|
|
1046
|
+
for p in permissions
|
|
1047
|
+
)
|
|
1048
|
+
|
|
1049
|
+
def _execute_builtin_tool_call(self, tool_name: str, tool_id: str,
|
|
1050
|
+
tool_input: Dict) -> Dict:
|
|
1051
|
+
"""
|
|
1052
|
+
Execute a builtin tool call and return the formatted result.
|
|
1053
|
+
|
|
1054
|
+
Args:
|
|
1055
|
+
tool_name: Name of the builtin tool
|
|
1056
|
+
tool_id: Tool use ID for the response
|
|
1057
|
+
tool_input: Tool input parameters
|
|
1058
|
+
|
|
1059
|
+
Returns:
|
|
1060
|
+
Tool result dictionary
|
|
1061
|
+
"""
|
|
1062
|
+
from dtSpark.tools import builtin
|
|
1063
|
+
|
|
1064
|
+
logging.debug(f"Executing builtin tool: {tool_name}")
|
|
1065
|
+
result = builtin.execute_builtin_tool(tool_name, tool_input, self.config)
|
|
1066
|
+
|
|
1067
|
+
if result.get('success'):
|
|
1068
|
+
result_data = result.get('result', {})
|
|
1069
|
+
result_str = json.dumps(result_data, indent=2) if isinstance(result_data, dict) else str(result_data)
|
|
1070
|
+
return {
|
|
1071
|
+
'type': 'tool_result',
|
|
1072
|
+
'tool_use_id': tool_id,
|
|
1073
|
+
'content': result_str
|
|
1074
|
+
}
|
|
1075
|
+
|
|
1076
|
+
return {
|
|
1077
|
+
'type': 'tool_result',
|
|
1078
|
+
'tool_use_id': tool_id,
|
|
1079
|
+
'content': result.get('error', 'Builtin tool execution failed'),
|
|
1080
|
+
'is_error': True
|
|
1081
|
+
}
|
|
1082
|
+
|
|
1083
|
+
def _execute_mcp_tool_call(self, tool_name: str, tool_id: str,
|
|
1084
|
+
tool_input: Dict) -> Dict:
|
|
1085
|
+
"""
|
|
1086
|
+
Execute an MCP tool call and return the formatted result.
|
|
1087
|
+
|
|
1088
|
+
Args:
|
|
1089
|
+
tool_name: Name of the MCP tool
|
|
1090
|
+
tool_id: Tool use ID for the response
|
|
1091
|
+
tool_input: Tool input parameters
|
|
1092
|
+
|
|
1093
|
+
Returns:
|
|
1094
|
+
Tool result dictionary
|
|
1095
|
+
"""
|
|
1096
|
+
logging.debug(f"Executing MCP tool: {tool_name}")
|
|
1097
|
+
result = self._call_mcp_tool_sync(tool_name, tool_input)
|
|
1098
|
+
|
|
1099
|
+
if result and not result.get('isError'):
|
|
1100
|
+
return {
|
|
1101
|
+
'type': 'tool_result',
|
|
1102
|
+
'tool_use_id': tool_id,
|
|
1103
|
+
'content': self._extract_mcp_text_content(result)
|
|
1104
|
+
}
|
|
1105
|
+
|
|
1106
|
+
return {
|
|
1107
|
+
'type': 'tool_result',
|
|
1108
|
+
'tool_use_id': tool_id,
|
|
1109
|
+
'content': self._extract_mcp_error_message(result),
|
|
1110
|
+
'is_error': True
|
|
1111
|
+
}
|
|
1112
|
+
|
|
1113
|
+
@staticmethod
|
|
1114
|
+
def _extract_mcp_text_content(result: Dict) -> str:
|
|
1115
|
+
"""
|
|
1116
|
+
Extract text content from an MCP tool result.
|
|
1117
|
+
|
|
1118
|
+
Args:
|
|
1119
|
+
result: MCP tool result dictionary
|
|
1120
|
+
|
|
1121
|
+
Returns:
|
|
1122
|
+
Concatenated text content or default message
|
|
1123
|
+
"""
|
|
1124
|
+
content_parts = [
|
|
1125
|
+
content.get('text', '')
|
|
1126
|
+
for content in result.get('content', [])
|
|
1127
|
+
if content.get('type') == 'text'
|
|
1128
|
+
]
|
|
1129
|
+
return '\n'.join(content_parts) if content_parts else 'Tool executed successfully (no output)'
|
|
1130
|
+
|
|
1131
|
+
@staticmethod
|
|
1132
|
+
def _extract_mcp_error_message(result: Optional[Dict]) -> str:
|
|
1133
|
+
"""
|
|
1134
|
+
Extract error message from an MCP tool result.
|
|
1135
|
+
|
|
1136
|
+
Args:
|
|
1137
|
+
result: MCP tool result dictionary (may be None)
|
|
1138
|
+
|
|
1139
|
+
Returns:
|
|
1140
|
+
Error message string
|
|
1141
|
+
"""
|
|
1142
|
+
if not result:
|
|
1143
|
+
return "Tool execution failed"
|
|
1144
|
+
|
|
1145
|
+
for content in result.get('content', []):
|
|
1146
|
+
if content.get('type') == 'text':
|
|
1147
|
+
return content.get('text', 'Tool execution failed')
|
|
1148
|
+
|
|
1149
|
+
return "Tool execution failed"
|
|
868
1150
|
|
|
869
1151
|
def _call_mcp_tool_sync(self, tool_name: str, tool_input: Dict) -> Optional[Dict]:
|
|
870
1152
|
"""
|