dtSpark 1.1.0a3__py3-none-any.whl → 1.1.0a7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dtSpark/_version.txt +1 -1
- dtSpark/aws/authentication.py +1 -1
- dtSpark/aws/bedrock.py +238 -239
- dtSpark/aws/costs.py +9 -5
- dtSpark/aws/pricing.py +25 -21
- dtSpark/cli_interface.py +77 -68
- dtSpark/conversation_manager.py +54 -47
- dtSpark/core/application.py +114 -91
- dtSpark/core/context_compaction.py +241 -226
- dtSpark/daemon/__init__.py +36 -22
- dtSpark/daemon/action_monitor.py +46 -17
- dtSpark/daemon/daemon_app.py +126 -104
- dtSpark/daemon/daemon_manager.py +59 -23
- dtSpark/daemon/pid_file.py +3 -2
- dtSpark/database/autonomous_actions.py +3 -0
- dtSpark/database/credential_prompt.py +52 -54
- dtSpark/files/manager.py +6 -12
- dtSpark/limits/__init__.py +1 -1
- dtSpark/limits/tokens.py +2 -2
- dtSpark/llm/anthropic_direct.py +246 -141
- dtSpark/llm/ollama.py +3 -1
- dtSpark/mcp_integration/manager.py +4 -4
- dtSpark/mcp_integration/tool_selector.py +83 -77
- dtSpark/resources/config.yaml.template +11 -0
- dtSpark/safety/patterns.py +45 -46
- dtSpark/safety/prompt_inspector.py +8 -1
- dtSpark/scheduler/creation_tools.py +273 -181
- dtSpark/scheduler/executor.py +503 -221
- dtSpark/tools/builtin.py +70 -53
- dtSpark/web/endpoints/autonomous_actions.py +12 -9
- dtSpark/web/endpoints/chat.py +8 -6
- dtSpark/web/endpoints/conversations.py +18 -9
- dtSpark/web/endpoints/main_menu.py +132 -105
- dtSpark/web/endpoints/streaming.py +2 -2
- dtSpark/web/server.py +70 -5
- dtSpark/web/ssl_utils.py +3 -3
- dtSpark/web/static/css/dark-theme.css +8 -29
- dtSpark/web/static/js/chat.js +6 -8
- dtSpark/web/static/js/main.js +8 -8
- dtSpark/web/static/js/sse-client.js +130 -122
- dtSpark/web/templates/actions.html +5 -5
- dtSpark/web/templates/base.html +15 -0
- dtSpark/web/templates/chat.html +10 -10
- dtSpark/web/templates/conversations.html +6 -2
- dtSpark/web/templates/goodbye.html +2 -2
- dtSpark/web/templates/main_menu.html +19 -17
- dtSpark/web/web_interface.py +2 -2
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/METADATA +9 -2
- dtspark-1.1.0a7.dist-info/RECORD +96 -0
- dtspark-1.1.0a3.dist-info/RECORD +0 -96
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/WHEEL +0 -0
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/entry_points.txt +0 -0
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/licenses/LICENSE +0 -0
- {dtspark-1.1.0a3.dist-info → dtspark-1.1.0a7.dist-info}/top_level.txt +0 -0
|
@@ -183,8 +183,9 @@ class ContextCompactor:
|
|
|
183
183
|
|
|
184
184
|
# Check emergency threshold (force compaction even during tool use)
|
|
185
185
|
if current_tokens >= emergency_threshold_tokens:
|
|
186
|
+
usage_pct = current_tokens / context_window * 100
|
|
186
187
|
logging.warning(f"EMERGENCY COMPACTION: {current_tokens:,}/{context_window:,} tokens "
|
|
187
|
-
f"({
|
|
188
|
+
f"({usage_pct:.1f}%% of context window)")
|
|
188
189
|
if self.cli_interface:
|
|
189
190
|
self.cli_interface.print_warning(
|
|
190
191
|
f"Emergency compaction triggered at {current_tokens/context_window*100:.1f}% of context window"
|
|
@@ -199,13 +200,14 @@ class ContextCompactor:
|
|
|
199
200
|
|
|
200
201
|
# Normal threshold check
|
|
201
202
|
if current_tokens > compaction_threshold_tokens:
|
|
203
|
+
usage_pct = current_tokens / context_window * 100
|
|
202
204
|
logging.info(f"Compaction triggered: {current_tokens:,}/{compaction_threshold_tokens:,} tokens "
|
|
203
|
-
f"({
|
|
205
|
+
f"({usage_pct:.1f}%% of context window)")
|
|
204
206
|
return self._perform_compaction(conversation_id, model_id, provider, limits)
|
|
205
207
|
|
|
206
208
|
return False
|
|
207
209
|
|
|
208
|
-
def _perform_compaction(self, conversation_id: int, model_id: str,
|
|
210
|
+
def _perform_compaction(self, conversation_id: int, model_id: str, # noqa: S1172
|
|
209
211
|
provider: str, limits: Dict[str, int]) -> bool:
|
|
210
212
|
"""
|
|
211
213
|
Perform the actual context compaction.
|
|
@@ -220,41 +222,30 @@ class ContextCompactor:
|
|
|
220
222
|
True if successful, False otherwise
|
|
221
223
|
"""
|
|
222
224
|
start_time = datetime.now()
|
|
223
|
-
|
|
224
|
-
# Display progress
|
|
225
225
|
self._display_progress("🗜️ Starting intelligent context compaction...")
|
|
226
226
|
self._display_separator()
|
|
227
227
|
|
|
228
228
|
try:
|
|
229
|
-
# Get ALL messages (including previously compacted ones for full recompaction)
|
|
230
229
|
messages = self.database.get_conversation_messages(
|
|
231
230
|
conversation_id, include_rolled_up=True
|
|
232
231
|
)
|
|
233
|
-
|
|
234
232
|
if len(messages) <= 4:
|
|
235
233
|
logging.warning("Not enough messages to compact")
|
|
236
234
|
self._display_warning("Not enough messages to compact")
|
|
237
235
|
return False
|
|
238
236
|
|
|
239
|
-
# Calculate original metrics
|
|
240
237
|
original_token_count = sum(msg.get('token_count', 0) for msg in messages)
|
|
241
238
|
original_message_count = len(messages)
|
|
242
|
-
|
|
243
239
|
self._display_info(
|
|
244
240
|
f"Analysing {original_message_count} messages ({original_token_count:,} tokens)..."
|
|
245
241
|
)
|
|
246
242
|
|
|
247
|
-
# Format conversation history for compaction
|
|
248
|
-
conversation_history = self._format_messages_for_compaction(messages)
|
|
249
|
-
|
|
250
|
-
# Build the compaction prompt
|
|
251
243
|
compaction_prompt = self._build_compaction_prompt(
|
|
252
|
-
|
|
244
|
+
self._format_messages_for_compaction(messages),
|
|
253
245
|
original_message_count,
|
|
254
246
|
original_token_count
|
|
255
247
|
)
|
|
256
248
|
|
|
257
|
-
# Check provider rate limits before attempting compaction
|
|
258
249
|
rate_limit_check = self._check_rate_limits_for_compaction(
|
|
259
250
|
compaction_prompt, original_token_count
|
|
260
251
|
)
|
|
@@ -263,152 +254,157 @@ class ContextCompactor:
|
|
|
263
254
|
logging.warning(f"Compaction skipped: {rate_limit_check['message']}")
|
|
264
255
|
return False
|
|
265
256
|
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
max_compaction_tokens = min(
|
|
269
|
-
limits.get('max_output', 8192),
|
|
270
|
-
max(2000, int(original_token_count * self.compaction_ratio)),
|
|
271
|
-
16000 # Absolute cap
|
|
272
|
-
)
|
|
273
|
-
|
|
274
|
-
# Estimate prompt size and validate against context window
|
|
275
|
-
context_window = limits.get('context_window', 8192)
|
|
276
|
-
prompt_tokens = 0
|
|
277
|
-
if hasattr(self.bedrock_service, 'count_tokens'):
|
|
278
|
-
try:
|
|
279
|
-
prompt_tokens = self.bedrock_service.count_tokens(compaction_prompt)
|
|
280
|
-
except Exception:
|
|
281
|
-
prompt_tokens = len(compaction_prompt) // 4 # Fallback estimate
|
|
282
|
-
else:
|
|
283
|
-
prompt_tokens = len(compaction_prompt) // 4
|
|
284
|
-
|
|
285
|
-
# Check if prompt exceeds context window (need room for output too)
|
|
286
|
-
max_input_tokens = context_window - max_compaction_tokens - 1000 # Safety buffer
|
|
287
|
-
if prompt_tokens > max_input_tokens:
|
|
288
|
-
logging.warning(
|
|
289
|
-
f"Compaction prompt ({prompt_tokens:,} tokens) too large for context window "
|
|
290
|
-
f"({context_window:,} tokens with {max_compaction_tokens:,} reserved for output)"
|
|
291
|
-
)
|
|
292
|
-
self._display_warning(
|
|
293
|
-
f"Conversation too large ({prompt_tokens:,} tokens) for compaction in a single pass. "
|
|
294
|
-
f"Context window: {context_window:,} tokens"
|
|
295
|
-
)
|
|
296
|
-
# Still proceed - let the API handle it and return a proper error
|
|
297
|
-
# The model might still be able to handle it or provide partial results
|
|
298
|
-
|
|
299
|
-
logging.info(
|
|
300
|
-
f"Compaction: input={prompt_tokens:,} tokens, target_output={max_compaction_tokens:,} tokens, "
|
|
301
|
-
f"context_window={context_window:,} tokens"
|
|
257
|
+
max_compaction_tokens, _ = self._calculate_compaction_tokens(
|
|
258
|
+
compaction_prompt, original_token_count, limits
|
|
302
259
|
)
|
|
303
260
|
|
|
304
261
|
self._display_info(f"Generating compacted context (target: {max_compaction_tokens:,} tokens)...")
|
|
305
262
|
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
[{'role': 'user', 'content': compaction_prompt}],
|
|
309
|
-
max_tokens=max_compaction_tokens,
|
|
310
|
-
temperature=0.2 # Low temperature for consistent compaction
|
|
311
|
-
)
|
|
312
|
-
|
|
313
|
-
# Check for error response
|
|
314
|
-
if not response:
|
|
315
|
-
logging.error("Compaction failed - null response from model")
|
|
316
|
-
self._display_error("Compaction failed - no response from model")
|
|
317
|
-
return False
|
|
318
|
-
|
|
319
|
-
if response.get('error'):
|
|
320
|
-
error_msg = response.get('error_message', 'Unknown error')
|
|
321
|
-
error_type = response.get('error_type', 'Unknown')
|
|
322
|
-
logging.error(f"Compaction failed - {error_type}: {error_msg}")
|
|
323
|
-
self._display_error(f"Compaction failed: {error_msg}")
|
|
324
|
-
return False
|
|
325
|
-
|
|
326
|
-
# Get content from response (may be in 'content' or 'content_blocks')
|
|
327
|
-
content = response.get('content', '')
|
|
328
|
-
if not content and response.get('content_blocks'):
|
|
329
|
-
# Try to extract text from content_blocks
|
|
330
|
-
for block in response.get('content_blocks', []):
|
|
331
|
-
if block.get('type') == 'text':
|
|
332
|
-
content += block.get('text', '')
|
|
333
|
-
|
|
334
|
-
if not content:
|
|
335
|
-
logging.error(f"Compaction failed - empty response. Response keys: {list(response.keys())}")
|
|
336
|
-
self._display_error("Compaction failed - no content in model response")
|
|
263
|
+
compacted_content = self._invoke_compaction_model(compaction_prompt, max_compaction_tokens)
|
|
264
|
+
if compacted_content is None:
|
|
337
265
|
return False
|
|
338
266
|
|
|
339
|
-
compacted_content = content.strip()
|
|
340
267
|
compacted_token_count = self.bedrock_service.count_tokens(compacted_content)
|
|
341
|
-
|
|
342
|
-
# Validate compaction quality
|
|
343
268
|
if len(compacted_content) < 200:
|
|
344
269
|
logging.warning(f"Compacted content too brief ({len(compacted_content)} chars), aborting")
|
|
345
270
|
self._display_warning("Compacted content too brief, keeping original messages")
|
|
346
271
|
return False
|
|
347
272
|
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
original_message_count
|
|
351
|
-
|
|
352
|
-
compacted_token_count=compacted_token_count,
|
|
353
|
-
model_id=model_id,
|
|
354
|
-
context_window=limits['context_window']
|
|
273
|
+
self._store_compaction_results(
|
|
274
|
+
conversation_id, messages, compacted_content,
|
|
275
|
+
original_message_count, original_token_count,
|
|
276
|
+
compacted_token_count, limits['context_window']
|
|
355
277
|
)
|
|
356
278
|
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
'user',
|
|
361
|
-
f"[COMPACTED CONTEXT - {compaction_marker}]\n\n{compacted_content}",
|
|
362
|
-
compacted_token_count
|
|
279
|
+
self._report_compaction_success(
|
|
280
|
+
start_time, original_message_count,
|
|
281
|
+
original_token_count, compacted_token_count
|
|
363
282
|
)
|
|
283
|
+
return True
|
|
364
284
|
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
self.
|
|
285
|
+
except Exception as e:
|
|
286
|
+
logging.error(f"Compaction failed with error: {e}", exc_info=True)
|
|
287
|
+
self._display_error(f"Compaction failed: {str(e)}")
|
|
288
|
+
return False
|
|
368
289
|
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
290
|
+
def _calculate_compaction_tokens(self, compaction_prompt: str,
|
|
291
|
+
original_token_count: int,
|
|
292
|
+
limits: Dict[str, int]) -> Tuple[int, int]:
|
|
293
|
+
"""Calculate max compaction output tokens and estimate prompt size."""
|
|
294
|
+
max_compaction_tokens = min(
|
|
295
|
+
limits.get('max_output', 8192),
|
|
296
|
+
max(2000, int(original_token_count * self.compaction_ratio)),
|
|
297
|
+
16000
|
|
298
|
+
)
|
|
377
299
|
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
f"{original_token_count:,} → {compacted_token_count:,} tokens "
|
|
393
|
-
f"({reduction_pct:.1f}% reduction)")
|
|
394
|
-
|
|
395
|
-
# Display completion
|
|
396
|
-
self._display_success(
|
|
397
|
-
f"✓ Compaction complete: {original_message_count} messages → structured context"
|
|
300
|
+
context_window = limits.get('context_window', 8192)
|
|
301
|
+
if hasattr(self.bedrock_service, 'count_tokens'):
|
|
302
|
+
try:
|
|
303
|
+
prompt_tokens = self.bedrock_service.count_tokens(compaction_prompt)
|
|
304
|
+
except Exception:
|
|
305
|
+
prompt_tokens = len(compaction_prompt) // 4
|
|
306
|
+
else:
|
|
307
|
+
prompt_tokens = len(compaction_prompt) // 4
|
|
308
|
+
|
|
309
|
+
max_input_tokens = context_window - max_compaction_tokens - 1000
|
|
310
|
+
if prompt_tokens > max_input_tokens:
|
|
311
|
+
logging.warning(
|
|
312
|
+
f"Compaction prompt ({prompt_tokens:,} tokens) too large for context window "
|
|
313
|
+
f"({context_window:,} tokens with {max_compaction_tokens:,} reserved for output)"
|
|
398
314
|
)
|
|
399
|
-
self.
|
|
400
|
-
f"
|
|
401
|
-
f"
|
|
315
|
+
self._display_warning(
|
|
316
|
+
f"Conversation too large ({prompt_tokens:,} tokens) for compaction in a single pass. "
|
|
317
|
+
f"Context window: {context_window:,} tokens"
|
|
402
318
|
)
|
|
403
|
-
self._display_info(f"Completed in {elapsed_time:.1f} seconds")
|
|
404
|
-
self._display_separator()
|
|
405
319
|
|
|
406
|
-
|
|
320
|
+
logging.info(
|
|
321
|
+
f"Compaction: input={prompt_tokens:,} tokens, target_output={max_compaction_tokens:,} tokens, "
|
|
322
|
+
f"context_window={context_window:,} tokens"
|
|
323
|
+
)
|
|
324
|
+
return max_compaction_tokens, prompt_tokens
|
|
325
|
+
|
|
326
|
+
def _invoke_compaction_model(self, compaction_prompt: str, max_tokens: int) -> Optional[str]:
|
|
327
|
+
"""Invoke the LLM for compaction and return content, or None on failure."""
|
|
328
|
+
response = self.bedrock_service.invoke_model(
|
|
329
|
+
[{'role': 'user', 'content': compaction_prompt}],
|
|
330
|
+
max_tokens=max_tokens,
|
|
331
|
+
temperature=0.2
|
|
332
|
+
)
|
|
407
333
|
|
|
408
|
-
|
|
409
|
-
logging.error(
|
|
410
|
-
self._display_error(
|
|
411
|
-
return
|
|
334
|
+
if not response:
|
|
335
|
+
logging.error("Compaction failed - null response from model")
|
|
336
|
+
self._display_error("Compaction failed - no response from model")
|
|
337
|
+
return None
|
|
338
|
+
|
|
339
|
+
if response.get('error'):
|
|
340
|
+
error_msg = response.get('error_message', 'Unknown error')
|
|
341
|
+
error_type = response.get('error_type', 'Unknown')
|
|
342
|
+
logging.error(f"Compaction failed - {error_type}: {error_msg}")
|
|
343
|
+
self._display_error(f"Compaction failed: {error_msg}")
|
|
344
|
+
return None
|
|
345
|
+
|
|
346
|
+
content = response.get('content', '')
|
|
347
|
+
if not content and response.get('content_blocks'):
|
|
348
|
+
for block in response.get('content_blocks', []):
|
|
349
|
+
if block.get('type') == 'text':
|
|
350
|
+
content += block.get('text', '')
|
|
351
|
+
|
|
352
|
+
if not content:
|
|
353
|
+
logging.error(f"Compaction failed - empty response. Response keys: {list(response.keys())}")
|
|
354
|
+
self._display_error("Compaction failed - no content in model response")
|
|
355
|
+
return None
|
|
356
|
+
|
|
357
|
+
return content.strip()
|
|
358
|
+
|
|
359
|
+
def _store_compaction_results(self, conversation_id: int, messages: List[Dict],
|
|
360
|
+
compacted_content: str, original_message_count: int,
|
|
361
|
+
original_token_count: int, compacted_token_count: int,
|
|
362
|
+
context_window: int):
|
|
363
|
+
"""Store compaction results in the database."""
|
|
364
|
+
compaction_marker = self._create_compaction_marker(
|
|
365
|
+
original_message_count=original_message_count,
|
|
366
|
+
original_token_count=original_token_count,
|
|
367
|
+
compacted_token_count=compacted_token_count,
|
|
368
|
+
context_window=context_window
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
self.database.add_message(
|
|
372
|
+
conversation_id, 'user',
|
|
373
|
+
f"[COMPACTED CONTEXT - {compaction_marker}]\n\n{compacted_content}",
|
|
374
|
+
compacted_token_count
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
message_ids = [msg['id'] for msg in messages]
|
|
378
|
+
self.database.mark_messages_as_rolled_up(message_ids)
|
|
379
|
+
self.database.record_rollup(
|
|
380
|
+
conversation_id, original_message_count,
|
|
381
|
+
compacted_content, original_token_count, compacted_token_count
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
actual_token_count = self.database.recalculate_total_tokens(conversation_id)
|
|
385
|
+
logging.debug(f"Recalculated total_tokens after compaction: {actual_token_count:,}")
|
|
386
|
+
|
|
387
|
+
def _report_compaction_success(self, start_time, original_message_count: int,
|
|
388
|
+
original_token_count: int, compacted_token_count: int):
|
|
389
|
+
"""Log and display compaction success metrics."""
|
|
390
|
+
elapsed_time = (datetime.now() - start_time).total_seconds()
|
|
391
|
+
reduction_pct = ((original_token_count - compacted_token_count) /
|
|
392
|
+
original_token_count * 100) if original_token_count > 0 else 0
|
|
393
|
+
|
|
394
|
+
logging.info(f"Compaction completed in {elapsed_time:.1f}s: "
|
|
395
|
+
f"{original_message_count} messages → structured context, "
|
|
396
|
+
f"{original_token_count:,} → {compacted_token_count:,} tokens "
|
|
397
|
+
f"({reduction_pct:.1f}%% reduction)")
|
|
398
|
+
|
|
399
|
+
self._display_success(
|
|
400
|
+
f"✓ Compaction complete: {original_message_count} messages → structured context"
|
|
401
|
+
)
|
|
402
|
+
self._display_info(
|
|
403
|
+
f"Token reduction: {original_token_count:,} → {compacted_token_count:,} "
|
|
404
|
+
f"({reduction_pct:.1f}%% reduction)"
|
|
405
|
+
)
|
|
406
|
+
self._display_info(f"Completed in {elapsed_time:.1f} seconds")
|
|
407
|
+
self._display_separator()
|
|
412
408
|
|
|
413
409
|
def _check_rate_limits_for_compaction(
|
|
414
410
|
self, compaction_prompt: str, original_token_count: int
|
|
@@ -502,99 +498,120 @@ class ContextCompactor:
|
|
|
502
498
|
Formatted conversation history string
|
|
503
499
|
"""
|
|
504
500
|
formatted_lines = []
|
|
505
|
-
message_number = 0
|
|
506
501
|
|
|
507
502
|
for msg in messages:
|
|
508
|
-
message_number += 1
|
|
509
503
|
role = msg.get('role', 'unknown').upper()
|
|
510
504
|
content = msg.get('content', '')
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
# Format timestamp if available
|
|
514
|
-
time_str = ""
|
|
515
|
-
if timestamp:
|
|
516
|
-
try:
|
|
517
|
-
if isinstance(timestamp, str):
|
|
518
|
-
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
|
|
519
|
-
else:
|
|
520
|
-
dt = timestamp
|
|
521
|
-
time_str = f" [{dt.strftime('%Y-%m-%d %H:%M')}]"
|
|
522
|
-
except (ValueError, AttributeError):
|
|
523
|
-
pass
|
|
524
|
-
|
|
525
|
-
# Check for previously compacted content
|
|
526
|
-
if content.startswith('[COMPACTED CONTEXT'):
|
|
527
|
-
formatted_lines.append(f"\n--- PREVIOUS COMPACTION{time_str} ---")
|
|
528
|
-
# Extract just the summary sections, not the full compacted content
|
|
529
|
-
formatted_lines.append("[Previous conversation was compacted - key points preserved below]")
|
|
530
|
-
# Include a truncated version of the compacted content
|
|
531
|
-
compacted_preview = content[:2000] + "..." if len(content) > 2000 else content
|
|
532
|
-
formatted_lines.append(compacted_preview)
|
|
533
|
-
formatted_lines.append("--- END PREVIOUS COMPACTION ---\n")
|
|
534
|
-
continue
|
|
505
|
+
time_str = self._format_timestamp(msg.get('timestamp', ''))
|
|
506
|
+
formatted_lines.extend(self._format_single_message(role, content, time_str))
|
|
535
507
|
|
|
536
|
-
|
|
537
|
-
if content.startswith('[TOOL_RESULTS]'):
|
|
538
|
-
formatted_lines.append(f"\n[{role}]{time_str} Tool Results:")
|
|
539
|
-
try:
|
|
540
|
-
tool_results_json = content.replace('[TOOL_RESULTS]', '', 1)
|
|
541
|
-
tool_results = json.loads(tool_results_json)
|
|
542
|
-
if isinstance(tool_results, list):
|
|
543
|
-
for i, result in enumerate(tool_results, 1):
|
|
544
|
-
if isinstance(result, dict) and result.get('type') == 'tool_result':
|
|
545
|
-
tool_id = result.get('tool_use_id', 'unknown')[:8]
|
|
546
|
-
result_content = result.get('content', '')
|
|
547
|
-
# Truncate long tool results
|
|
548
|
-
if len(str(result_content)) > 500:
|
|
549
|
-
result_content = str(result_content)[:500] + "... [truncated]"
|
|
550
|
-
formatted_lines.append(f" Result {i} (tool:{tool_id}): {result_content}")
|
|
551
|
-
except json.JSONDecodeError:
|
|
552
|
-
formatted_lines.append(f" [Raw tool results - {len(content)} chars]")
|
|
553
|
-
continue
|
|
554
|
-
|
|
555
|
-
# Check for tool use blocks
|
|
556
|
-
if role == 'ASSISTANT' and content.startswith('['):
|
|
557
|
-
try:
|
|
558
|
-
content_blocks = json.loads(content)
|
|
559
|
-
if isinstance(content_blocks, list):
|
|
560
|
-
text_parts = []
|
|
561
|
-
tool_calls = []
|
|
562
|
-
for block in content_blocks:
|
|
563
|
-
if isinstance(block, dict):
|
|
564
|
-
if block.get('type') == 'text':
|
|
565
|
-
text_parts.append(block.get('text', ''))
|
|
566
|
-
elif block.get('type') == 'tool_use':
|
|
567
|
-
tool_name = block.get('name', 'unknown')
|
|
568
|
-
tool_input = block.get('input', {})
|
|
569
|
-
# Summarise tool input
|
|
570
|
-
input_summary = self._summarise_tool_input(tool_input)
|
|
571
|
-
tool_calls.append(f"{tool_name}({input_summary})")
|
|
572
|
-
|
|
573
|
-
if text_parts:
|
|
574
|
-
formatted_lines.append(f"\n[{role}]{time_str}")
|
|
575
|
-
formatted_lines.append(''.join(text_parts))
|
|
576
|
-
if tool_calls:
|
|
577
|
-
formatted_lines.append(f"[Tool calls: {', '.join(tool_calls)}]")
|
|
578
|
-
continue
|
|
579
|
-
except json.JSONDecodeError:
|
|
580
|
-
pass # Not JSON, treat as regular message
|
|
581
|
-
|
|
582
|
-
# Check for rollup summaries
|
|
583
|
-
if content.startswith('[Summary of previous conversation]'):
|
|
584
|
-
formatted_lines.append(f"\n--- PREVIOUS SUMMARY{time_str} ---")
|
|
585
|
-
formatted_lines.append(content)
|
|
586
|
-
formatted_lines.append("--- END PREVIOUS SUMMARY ---\n")
|
|
587
|
-
continue
|
|
508
|
+
return '\n'.join(formatted_lines)
|
|
588
509
|
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
510
|
+
@staticmethod
|
|
511
|
+
def _format_timestamp(timestamp) -> str:
|
|
512
|
+
"""Format a message timestamp into a display string."""
|
|
513
|
+
if not timestamp:
|
|
514
|
+
return ""
|
|
515
|
+
try:
|
|
516
|
+
if isinstance(timestamp, str):
|
|
517
|
+
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
|
|
594
518
|
else:
|
|
595
|
-
|
|
519
|
+
dt = timestamp
|
|
520
|
+
return f" [{dt.strftime('%Y-%m-%d %H:%M')}]"
|
|
521
|
+
except (ValueError, AttributeError):
|
|
522
|
+
return ""
|
|
596
523
|
|
|
597
|
-
|
|
524
|
+
def _format_single_message(self, role: str, content: str, time_str: str) -> List[str]:
|
|
525
|
+
"""Format a single message into lines. Returns a list of formatted lines."""
|
|
526
|
+
if content.startswith('[COMPACTED CONTEXT'):
|
|
527
|
+
return self._format_compacted_content(content, time_str)
|
|
528
|
+
|
|
529
|
+
if content.startswith('[TOOL_RESULTS]'):
|
|
530
|
+
return self._format_tool_results(role, content, time_str)
|
|
531
|
+
|
|
532
|
+
if role == 'ASSISTANT' and content.startswith('['):
|
|
533
|
+
result = self._format_tool_use_blocks(role, content, time_str)
|
|
534
|
+
if result is not None:
|
|
535
|
+
return result
|
|
536
|
+
|
|
537
|
+
if content.startswith('[Summary of previous conversation]'):
|
|
538
|
+
return [
|
|
539
|
+
f"\n--- PREVIOUS SUMMARY{time_str} ---",
|
|
540
|
+
content,
|
|
541
|
+
"--- END PREVIOUS SUMMARY ---\n",
|
|
542
|
+
]
|
|
543
|
+
|
|
544
|
+
return self._format_regular_message(role, content, time_str)
|
|
545
|
+
|
|
546
|
+
@staticmethod
|
|
547
|
+
def _format_compacted_content(content: str, time_str: str) -> List[str]:
|
|
548
|
+
"""Format a previously compacted context message."""
|
|
549
|
+
compacted_preview = content[:2000] + "..." if len(content) > 2000 else content
|
|
550
|
+
return [
|
|
551
|
+
f"\n--- PREVIOUS COMPACTION{time_str} ---",
|
|
552
|
+
"[Previous conversation was compacted - key points preserved below]",
|
|
553
|
+
compacted_preview,
|
|
554
|
+
"--- END PREVIOUS COMPACTION ---\n",
|
|
555
|
+
]
|
|
556
|
+
|
|
557
|
+
@staticmethod
|
|
558
|
+
def _format_tool_results(role: str, content: str, time_str: str) -> List[str]:
|
|
559
|
+
"""Format a tool results message."""
|
|
560
|
+
lines = [f"\n[{role}]{time_str} Tool Results:"]
|
|
561
|
+
try:
|
|
562
|
+
tool_results_json = content.replace('[TOOL_RESULTS]', '', 1)
|
|
563
|
+
tool_results = json.loads(tool_results_json)
|
|
564
|
+
if isinstance(tool_results, list):
|
|
565
|
+
for i, result in enumerate(tool_results, 1):
|
|
566
|
+
if isinstance(result, dict) and result.get('type') == 'tool_result':
|
|
567
|
+
tool_id = result.get('tool_use_id', 'unknown')[:8]
|
|
568
|
+
result_content = str(result.get('content', ''))
|
|
569
|
+
if len(result_content) > 500:
|
|
570
|
+
result_content = result_content[:500] + "... [truncated]"
|
|
571
|
+
lines.append(f" Result {i} (tool:{tool_id}): {result_content}")
|
|
572
|
+
except json.JSONDecodeError:
|
|
573
|
+
lines.append(f" [Raw tool results - {len(content)} chars]")
|
|
574
|
+
return lines
|
|
575
|
+
|
|
576
|
+
def _format_tool_use_blocks(self, role: str, content: str, time_str: str) -> Optional[List[str]]:
|
|
577
|
+
"""Format assistant tool-use blocks. Returns None if content is not valid JSON blocks."""
|
|
578
|
+
try:
|
|
579
|
+
content_blocks = json.loads(content)
|
|
580
|
+
except json.JSONDecodeError:
|
|
581
|
+
return None
|
|
582
|
+
|
|
583
|
+
if not isinstance(content_blocks, list):
|
|
584
|
+
return None
|
|
585
|
+
|
|
586
|
+
lines = []
|
|
587
|
+
text_parts = []
|
|
588
|
+
tool_calls = []
|
|
589
|
+
for block in content_blocks:
|
|
590
|
+
if not isinstance(block, dict):
|
|
591
|
+
continue
|
|
592
|
+
if block.get('type') == 'text':
|
|
593
|
+
text_parts.append(block.get('text', ''))
|
|
594
|
+
elif block.get('type') == 'tool_use':
|
|
595
|
+
input_summary = self._summarise_tool_input(block.get('input', {}))
|
|
596
|
+
tool_calls.append(f"{block.get('name', 'unknown')}({input_summary})")
|
|
597
|
+
|
|
598
|
+
if text_parts:
|
|
599
|
+
lines.append(f"\n[{role}]{time_str}")
|
|
600
|
+
lines.append(''.join(text_parts))
|
|
601
|
+
if tool_calls:
|
|
602
|
+
lines.append(f"[Tool calls: {', '.join(tool_calls)}]")
|
|
603
|
+
return lines
|
|
604
|
+
|
|
605
|
+
@staticmethod
|
|
606
|
+
def _format_regular_message(role: str, content: str, time_str: str) -> List[str]:
|
|
607
|
+
"""Format a regular text message."""
|
|
608
|
+
lines = [f"\n[{role}]{time_str}"]
|
|
609
|
+
if len(content) > 3000:
|
|
610
|
+
remaining = len(content) - 3000
|
|
611
|
+
lines.append(f"{content[:3000]}\n... [message truncated, {remaining} more chars]")
|
|
612
|
+
else:
|
|
613
|
+
lines.append(content)
|
|
614
|
+
return lines
|
|
598
615
|
|
|
599
616
|
def _summarise_tool_input(self, tool_input: Dict) -> str:
|
|
600
617
|
"""
|
|
@@ -644,7 +661,6 @@ class ContextCompactor:
|
|
|
644
661
|
def _create_compaction_marker(self, original_message_count: int,
|
|
645
662
|
original_token_count: int,
|
|
646
663
|
compacted_token_count: int,
|
|
647
|
-
model_id: str,
|
|
648
664
|
context_window: int) -> str:
|
|
649
665
|
"""
|
|
650
666
|
Create a marker string for the compaction event.
|
|
@@ -653,7 +669,6 @@ class ContextCompactor:
|
|
|
653
669
|
original_message_count: Number of messages compacted
|
|
654
670
|
original_token_count: Original token count
|
|
655
671
|
compacted_token_count: Compacted token count
|
|
656
|
-
model_id: Model used for compaction
|
|
657
672
|
context_window: Model's context window size
|
|
658
673
|
|
|
659
674
|
Returns:
|
dtSpark/daemon/__init__.py
CHANGED
|
@@ -76,29 +76,43 @@ def daemon_main():
|
|
|
76
76
|
time.sleep(2)
|
|
77
77
|
sys.exit(manager.start(args))
|
|
78
78
|
elif command == '--run':
|
|
79
|
-
|
|
80
|
-
# Clean up sys.argv - AbstractApp expects program name and valid args
|
|
81
|
-
sys.argv = ['dtSpark-daemon'] + args
|
|
82
|
-
|
|
83
|
-
# Set up error logging to file for background mode debugging
|
|
84
|
-
error_log_path = './daemon_error.log'
|
|
85
|
-
|
|
86
|
-
try:
|
|
87
|
-
from .daemon_app import DaemonApplication
|
|
88
|
-
app = DaemonApplication()
|
|
89
|
-
app.run()
|
|
90
|
-
except Exception as e:
|
|
91
|
-
import traceback
|
|
92
|
-
error_msg = f"Daemon failed to start: {e}\n{traceback.format_exc()}"
|
|
93
|
-
print(error_msg)
|
|
94
|
-
# Also write to error log file for background mode
|
|
95
|
-
try:
|
|
96
|
-
with open(error_log_path, 'w') as f:
|
|
97
|
-
f.write(error_msg)
|
|
98
|
-
except Exception:
|
|
99
|
-
pass
|
|
100
|
-
sys.exit(1)
|
|
79
|
+
_run_daemon_internal(args)
|
|
101
80
|
else:
|
|
102
81
|
print(f"Unknown command: {command}")
|
|
103
82
|
print("Use: dtSpark daemon {start|stop|status|restart}")
|
|
104
83
|
sys.exit(1)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _run_daemon_internal(args):
|
|
87
|
+
"""
|
|
88
|
+
Run the daemon application directly (called by start in background).
|
|
89
|
+
|
|
90
|
+
Sets up sys.argv for AbstractApp and handles error logging for
|
|
91
|
+
background mode debugging.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
args: Additional command-line arguments for the daemon
|
|
95
|
+
"""
|
|
96
|
+
import sys
|
|
97
|
+
|
|
98
|
+
# Clean up sys.argv - AbstractApp expects program name and valid args
|
|
99
|
+
sys.argv = ['dtSpark-daemon'] + args
|
|
100
|
+
|
|
101
|
+
# Set up error logging to file for background mode debugging
|
|
102
|
+
error_log_path = './daemon_error.log'
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
from .daemon_app import DaemonApplication
|
|
106
|
+
app = DaemonApplication()
|
|
107
|
+
app.run()
|
|
108
|
+
except Exception as e:
|
|
109
|
+
import traceback
|
|
110
|
+
error_msg = f"Daemon failed to start: {e}\n{traceback.format_exc()}"
|
|
111
|
+
print(error_msg)
|
|
112
|
+
# Also write to error log file for background mode
|
|
113
|
+
try:
|
|
114
|
+
with open(error_log_path, 'w') as f:
|
|
115
|
+
f.write(error_msg)
|
|
116
|
+
except Exception:
|
|
117
|
+
pass
|
|
118
|
+
sys.exit(1)
|