kweaver-dolphin 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dolphin/cli/ui/console.py CHANGED
@@ -1738,14 +1738,14 @@ class ConsoleUI:
1738
1738
 
1739
1739
  # Block type icons and colors
1740
1740
  block_icons = {
1741
- "explore": ("🔍", Theme.SECONDARY),
1742
- "prompt": ("💬", Theme.SUCCESS),
1743
- "judge": ("⚖️", Theme.WARNING),
1744
- "assign": ("📝", Theme.PRIMARY),
1745
- "tool": ("", Theme.ACCENT),
1741
+ "explore": ("[?]", Theme.SECONDARY),
1742
+ "prompt": ("[>]", Theme.SUCCESS),
1743
+ "judge": ("[J]", Theme.WARNING),
1744
+ "assign": ("[=]", Theme.PRIMARY),
1745
+ "tool": ("[*]", Theme.ACCENT),
1746
1746
  }
1747
1747
 
1748
- icon, color = block_icons.get(block_type.lower(), ("📦", Theme.LABEL))
1748
+ icon, color = block_icons.get(block_type.lower(), ("[X]", Theme.LABEL))
1749
1749
 
1750
1750
  # Build output line
1751
1751
  header = f"{color}{Theme.BOLD}{icon} {block_type.upper()}{Theme.RESET}"
@@ -406,12 +406,34 @@ class BaseAgent(ABC):
406
406
  AgentState.PAUSED, "Agent paused due to tool interrupt"
407
407
  )
408
408
 
409
+ # Map interrupt_type: "tool_interrupt" (internal) -> "tool_confirmation" (API)
410
+ api_interrupt_type = self._pause_type.value
411
+ if run_result.resume_handle:
412
+ internal_type = run_result.resume_handle.interrupt_type
413
+ if internal_type == "tool_interrupt":
414
+ api_interrupt_type = "tool_confirmation"
415
+ elif internal_type == "user_interrupt":
416
+ api_interrupt_type = "user_interrupt"
417
+
409
418
  # 统一输出格式:status 固定为 "interrupted",通过 interrupt_type 区分
410
- yield {
419
+ interrupt_response = {
411
420
  "status": "interrupted",
412
421
  "handle": run_result.resume_handle,
413
- "interrupt_type": run_result.resume_handle.interrupt_type if run_result.resume_handle else self._pause_type.value,
422
+ "interrupt_type": api_interrupt_type,
414
423
  }
424
+
425
+ # For ToolInterrupt, include tool data from frame.error (same as step mode)
426
+ if run_result.is_tool_interrupted and self._current_frame and self._current_frame.error:
427
+ frame_error = self._current_frame.error
428
+ if frame_error.get("error_type") == "ToolInterrupt":
429
+ interrupt_response["data"] = {
430
+ "tool_name": frame_error.get("tool_name", ""),
431
+ "tool_description": "", # Can be added if available
432
+ "tool_args": frame_error.get("tool_args", []),
433
+ "interrupt_config": frame_error.get("tool_config", {}),
434
+ }
435
+
436
+ yield interrupt_response
415
437
  return
416
438
 
417
439
  elif run_result.is_completed:
@@ -463,11 +485,32 @@ class BaseAgent(ABC):
463
485
  )
464
486
 
465
487
  # 统一输出格式
466
- yield {
488
+ # Map interrupt_type: "tool_interrupt" (internal) -> "tool_confirmation" (API)
489
+ api_interrupt_type = self._pause_type.value
490
+ if step_result.resume_handle:
491
+ internal_type = step_result.resume_handle.interrupt_type
492
+ if internal_type == "tool_interrupt":
493
+ api_interrupt_type = "tool_confirmation"
494
+ elif internal_type == "user_interrupt":
495
+ api_interrupt_type = "user_interrupt"
496
+
497
+ interrupt_response = {
467
498
  "status": "interrupted",
468
499
  "handle": step_result.resume_handle,
469
- "interrupt_type": step_result.resume_handle.interrupt_type if step_result.resume_handle else self._pause_type.value,
500
+ "interrupt_type": api_interrupt_type,
470
501
  }
502
+
503
+ # For ToolInterrupt, include tool data from frame.error
504
+ if step_result.is_tool_interrupted and self._current_frame and self._current_frame.error:
505
+ frame_error = self._current_frame.error
506
+ if frame_error.get("error_type") == "ToolInterrupt":
507
+ interrupt_response["data"] = {
508
+ "tool_name": frame_error.get("tool_name", ""),
509
+ "tool_args": frame_error.get("tool_args", []),
510
+ "tool_config": frame_error.get("tool_config", {}),
511
+ }
512
+
513
+ yield interrupt_response
471
514
  break
472
515
 
473
516
  elif step_result.is_completed:
@@ -568,11 +611,22 @@ class BaseAgent(ABC):
568
611
  """Pause logic implemented by subclasses"""
569
612
  pass
570
613
 
571
- async def resume(self, updates: Optional[Dict[str, Any]] = None) -> bool:
614
+ async def resume(
615
+ self,
616
+ updates: Optional[Dict[str, Any]] = None,
617
+ resume_handle=None # External resume handle (for stateless scenarios)
618
+ ) -> bool:
572
619
  """Resume Agent (based on coroutine)
573
620
 
574
621
  Args:
575
622
  updates: Variable updates to inject (used to resume from tool interruption)
623
+ resume_handle: Optional external resume handle (for web apps/stateless scenarios)
624
+ If provided, will override internal _resume_handle
625
+ This allows resuming across different requests/processes
626
+
627
+ Usage Scenarios:
628
+ 1. Stateful (same process): resume(updates) - uses internal _resume_handle
629
+ 2. Stateless (web apps): resume(updates, resume_handle) - uses external handle
576
630
  """
577
631
  if self.state != AgentState.PAUSED:
578
632
  raise AgentLifecycleException(
@@ -580,9 +634,18 @@ class BaseAgent(ABC):
580
634
  )
581
635
 
582
636
  try:
583
- # Resume coroutine execution
584
- if self._resume_handle is not None:
637
+ # Use external handle if provided (for stateless scenarios like web apps)
638
+ # Otherwise use internal handle (for stateful scenarios like testing)
639
+ handle_to_use = resume_handle if resume_handle is not None else self._resume_handle
640
+
641
+ if handle_to_use is not None:
642
+ # Temporarily set internal handle for _on_resume_coroutine to use
643
+ original_handle = self._resume_handle
644
+ self._resume_handle = handle_to_use
645
+
585
646
  self._current_frame = await self._on_resume_coroutine(updates)
647
+
648
+ # Clear handles after resume
586
649
  self._resume_handle = None
587
650
  self._pause_type = None
588
651
 
@@ -1133,6 +1133,7 @@ class BasicCodeBlock:
1133
1133
  skill_params_json: Dict[str, Any] = {},
1134
1134
  props=None,
1135
1135
  ):
1136
+ from dolphin.core.utils.tools import ToolInterrupt
1136
1137
  if self.context.is_skillkit_empty():
1137
1138
  self.context.warn(f"skillkit is None, skill_name[{skill_name}]")
1138
1139
  return
@@ -1199,6 +1200,40 @@ class BasicCodeBlock:
1199
1200
  props = {}
1200
1201
  props.update({"gvp": self.context})
1201
1202
  try:
1203
+ # Check for tool interrupt configuration (ToolInterrupt mechanism)
1204
+ # Default: all tool calls support interrupt if tool has interrupt_config
1205
+ # Skip interrupt check if this is a resumed tool call (intervention=False)
1206
+ if props.get('intervention', True):
1207
+ interrupt_config = getattr(skill, 'interrupt_config', None)
1208
+ logger.debug(f"[DEBUG skill_run] skill_name={skill_name}, interrupt_config={interrupt_config}, intervention={props.get('intervention', True)}")
1209
+
1210
+ if interrupt_config and interrupt_config.get('requires_confirmation'):
1211
+ logger.debug(f"[DEBUG skill_run] Tool {skill_name} requires confirmation, raising ToolInterrupt")
1212
+ # Format confirmation message (support parameter interpolation)
1213
+ message = interrupt_config.get('confirmation_message', 'Tool requires confirmation')
1214
+ if message and skill_params_json:
1215
+ try:
1216
+ message = message.format(**skill_params_json)
1217
+ except (KeyError, ValueError):
1218
+ # If parameter interpolation fails, use original message
1219
+ pass
1220
+
1221
+ # Construct tool arguments list
1222
+ tool_args = [
1223
+ {"key": k, "value": v, "type": type(v).__name__}
1224
+ for k, v in skill_params_json.items()
1225
+ ]
1226
+
1227
+ # Throw ToolInterrupt (checked before execution)
1228
+ raise ToolInterrupt(
1229
+ message=message,
1230
+ tool_name=skill_name,
1231
+ tool_args=tool_args,
1232
+ tool_config=interrupt_config
1233
+ )
1234
+ else:
1235
+ logger.debug(f"[DEBUG skill_run] Tool {skill_name} does not require confirmation, proceeding with execution")
1236
+
1202
1237
  console_skill_call(
1203
1238
  skill_name, skill_params_json, verbose=self.context.verbose, skill=skill
1204
1239
  )
@@ -1410,6 +1445,12 @@ class BasicCodeBlock:
1410
1445
  if recorder:
1411
1446
  recorder.update(item=stream_item, raw_output=stream_item.answer)
1412
1447
 
1448
+ # Update tool call detection flags
1449
+ if stream_item.has_tool_call():
1450
+ tool_call_detected = True
1451
+ if stream_item.has_complete_tool_call():
1452
+ complete_tool_call = stream_item.get_tool_call()
1453
+
1413
1454
  yield stream_item
1414
1455
 
1415
1456
  # If a complete tool call is detected and early-stop is enabled, stop streaming.
@@ -558,10 +558,10 @@ Please reconsider your approach and improve your answer based on the feedback ab
558
558
  def _has_pending_tool_call(self) -> bool:
559
559
  """Check if there are pending tool calls (interrupt recovery)"""
560
560
  intervention_tmp_key = "intervention_explore_block_vars"
561
- return (
562
- intervention_tmp_key in self.context.get_all_variables().keys()
563
- and "tool" in self.context.get_all_variables().keys()
564
- )
561
+ has_intervention = intervention_tmp_key in self.context.get_all_variables().keys()
562
+ has_tool = "tool" in self.context.get_all_variables().keys()
563
+ logger.debug(f"[DEBUG _has_pending_tool_call] has_intervention={has_intervention}, has_tool={has_tool}")
564
+ return has_intervention and has_tool
565
565
 
566
566
  async def _handle_resumed_tool_call(self):
567
567
  """Tools for handling interrupt recovery calls """
@@ -571,17 +571,47 @@ Please reconsider your approach and improve your answer based on the feedback ab
571
571
  intervention_vars = self.context.get_var_value(intervention_tmp_key)
572
572
  self.context.delete_variable(intervention_tmp_key)
573
573
 
574
- # Restore complete message context
574
+ # Restore complete message context to context_manager buckets
575
575
  saved_messages = intervention_vars.get("prompt")
576
576
  if saved_messages is not None:
577
+ from dolphin.core.common.enums import MessageRole
578
+
579
+ # *** FIX: Filter out messages that are already in other buckets ***
580
+ # To avoid duplication, only restore messages generated during the conversation:
581
+ # - SYSTEM messages are already in SYSTEM bucket (from initial execute)
582
+ # - USER messages are already in QUERY/HISTORY buckets (initial query and history)
583
+ # - We only need to restore ASSISTANT and TOOL messages (conversation progress)
584
+ filtered_messages = [
585
+ msg for msg in saved_messages
586
+ if msg.get("role") in [MessageRole.ASSISTANT.value, MessageRole.TOOL.value]
587
+ ]
588
+
577
589
  msgs = Messages()
578
- msgs.extend_plain_messages(saved_messages)
579
- self.context.set_messages(msgs)
590
+ msgs.extend_plain_messages(filtered_messages)
591
+ # Use set_messages_batch to restore to context_manager buckets
592
+ # This ensures messages are available when to_dph_messages() is called
593
+ self.context.set_messages_batch(msgs, bucket=BuildInBucket.SCRATCHPAD.value)
580
594
 
581
595
  input_dict = self.context.get_var_value("tool")
582
596
  function_name = input_dict["tool_name"]
583
597
  raw_tool_args = input_dict["tool_args"]
584
598
  function_params_json = {arg["key"]: arg["value"] for arg in raw_tool_args}
599
+
600
+ # *** FIX: Update the last tool_call message with modified parameters ***
601
+ # This ensures LLM sees the actual parameters used, not the original ones
602
+ messages = self.context.get_messages()
603
+ if messages and len(messages.get_messages()) > 0:
604
+ last_message = messages.get_messages()[-1]
605
+ # Check if last message is an assistant message with tool_calls
606
+ if (hasattr(last_message, 'role') and last_message.role == "assistant" and
607
+ hasattr(last_message, 'tool_calls') and last_message.tool_calls):
608
+ # Find the matching tool_call
609
+ for tool_call in last_message.tool_calls:
610
+ if hasattr(tool_call, 'function') and tool_call.function.name == function_name:
611
+ # Update the arguments with modified parameters
612
+ import json
613
+ tool_call.function.arguments = json.dumps(function_params_json, ensure_ascii=False)
614
+ logger.debug(f"[FIX] Updated tool_call arguments from original to modified: {function_params_json}")
585
615
 
586
616
  if self.recorder:
587
617
  self.recorder.update(
@@ -591,9 +621,58 @@ Please reconsider your approach and improve your answer based on the feedback ab
591
621
  skill_type=self.context.get_skill_type(function_name),
592
622
  skill_args=function_params_json,
593
623
  )
624
+
625
+ # *** Handle skip action ***
626
+ skip_tool = self.context.get_var_value("__skip_tool__")
627
+ skip_message = self.context.get_var_value("__skip_message__")
628
+
629
+ # Clean up skip flags
630
+ if skip_tool:
631
+ self.context.delete_variable("__skip_tool__")
632
+ if skip_message:
633
+ self.context.delete_variable("__skip_message__")
634
+
594
635
  self.context.delete_variable("tool")
595
636
 
596
637
  return_answer = {}
638
+
639
+ # If user chose to skip, don't execute the tool
640
+ if skip_tool:
641
+ # Generate friendly skip message
642
+ params_str = ", ".join([f"{k}={v}" for k, v in function_params_json.items()])
643
+ default_skip_msg = f"Tool '{function_name}' was skipped by user"
644
+ if skip_message:
645
+ skip_response = f"[SKIPPED] {skip_message}"
646
+ else:
647
+ skip_response = f"[SKIPPED] {default_skip_msg} (parameters: {params_str})"
648
+
649
+ return_answer["answer"] = skip_response
650
+ return_answer["think"] = skip_response
651
+ return_answer["status"] = "completed"
652
+
653
+ if self.recorder:
654
+ self.recorder.update(
655
+ item={"answer": skip_response, "block_answer": skip_response},
656
+ stage=TypeStage.SKILL,
657
+ source_type=SourceType.EXPLORE,
658
+ skill_name=function_name,
659
+ skill_type=self.context.get_skill_type(function_name),
660
+ skill_args=function_params_json,
661
+ )
662
+
663
+ yield [return_answer]
664
+
665
+ # Add tool response message with skip indicator
666
+ tool_call_id = self._extract_tool_call_id()
667
+ if not tool_call_id:
668
+ tool_call_id = f"call_{function_name}_{self.times}"
669
+
670
+ self.strategy.append_tool_response_message(
671
+ self.context, tool_call_id, skip_response, metadata={"skipped": True}
672
+ )
673
+ return
674
+
675
+ # Normal execution (not skipped)
597
676
  try:
598
677
  props = {"intervention": False}
599
678
  have_answer = False
@@ -1061,7 +1140,7 @@ Please reconsider your approach and improve your answer based on the feedback ab
1061
1140
 
1062
1141
  def _handle_tool_interrupt(self, e: Exception, tool_name: str):
1063
1142
  """Handling Tool Interruptions"""
1064
- self.context.info(f"tool interrupt in call {tool_name} tool")
1143
+ self.context.info(f"Tool interrupt in call {tool_name} tool")
1065
1144
  if "※tool" in self.context.get_all_variables().keys():
1066
1145
  self.context.delete_variable("※tool")
1067
1146
 
@@ -295,17 +295,48 @@ class ExploreBlockV2(BasicCodeBlock):
295
295
  intervention_vars = self.context.get_var_value(intervention_tmp_key)
296
296
  self.context.delete_variable(intervention_tmp_key)
297
297
 
298
- # restore the complete message context before tool execution
298
+ # restore the complete message context to context_manager buckets
299
299
  saved_messages = intervention_vars.get("prompt")
300
300
  if saved_messages is not None:
301
+ from dolphin.core.common.enums import MessageRole
302
+ from dolphin.core.context_engineer.config.settings import BuildInBucket
303
+
304
+ # *** FIX: Filter out messages that are already in other buckets ***
305
+ # To avoid duplication, only restore messages generated during the conversation:
306
+ # - SYSTEM messages are already in SYSTEM bucket (from initial execute)
307
+ # - USER messages are already in QUERY/HISTORY buckets (initial query and history)
308
+ # - We only need to restore ASSISTANT and TOOL messages (conversation progress)
309
+ filtered_messages = [
310
+ msg for msg in saved_messages
311
+ if msg.get("role") in [MessageRole.ASSISTANT.value, MessageRole.TOOL.value]
312
+ ]
313
+
301
314
  msgs = Messages()
302
- msgs.extend_plain_messages(saved_messages)
303
- self.context.set_messages(msgs)
315
+ msgs.extend_plain_messages(filtered_messages)
316
+ # Use set_messages_batch to restore to context_manager buckets
317
+ # This ensures messages are available when to_dph_messages() is called
318
+ self.context.set_messages_batch(msgs, bucket=BuildInBucket.SCRATCHPAD.value)
304
319
 
305
320
  input_dict = self.context.get_var_value("tool")
306
321
  function_name = input_dict["tool_name"]
307
322
  raw_tool_args = input_dict["tool_args"]
308
323
  function_params_json = {arg["key"]: arg["value"] for arg in raw_tool_args}
324
+
325
+ # *** FIX: Update the last tool_call message with modified parameters ***
326
+ # This ensures LLM sees the actual parameters used, not the original ones
327
+ messages = self.context.get_messages()
328
+ if messages and len(messages.get_messages()) > 0:
329
+ last_message = messages.get_messages()[-1]
330
+ # Check if last message is an assistant message with tool_calls
331
+ if (hasattr(last_message, 'role') and last_message.role == "assistant" and
332
+ hasattr(last_message, 'tool_calls') and last_message.tool_calls):
333
+ # Find the matching tool_call
334
+ for tool_call in last_message.tool_calls:
335
+ if hasattr(tool_call, 'function') and tool_call.function.name == function_name:
336
+ # Update the arguments with modified parameters
337
+ import json
338
+ tool_call.function.arguments = json.dumps(function_params_json, ensure_ascii=False)
339
+ logger.debug(f"[FIX] Updated tool_call arguments from original to modified: {function_params_json}")
309
340
 
310
341
  (
311
342
  self.recorder.update(
@@ -318,9 +349,59 @@ class ExploreBlockV2(BasicCodeBlock):
318
349
  if self.recorder
319
350
  else None
320
351
  )
352
+
353
+ # *** Handle skip action ***
354
+ skip_tool = self.context.get_var_value("__skip_tool__")
355
+ skip_message = self.context.get_var_value("__skip_message__")
356
+
357
+ # Clean up skip flags
358
+ if skip_tool:
359
+ self.context.delete_variable("__skip_tool__")
360
+ if skip_message:
361
+ self.context.delete_variable("__skip_message__")
362
+
321
363
  self.context.delete_variable("tool")
322
364
 
323
365
  return_answer = {}
366
+
367
+ # If user chose to skip, don't execute the tool
368
+ if skip_tool:
369
+ # Generate friendly skip message
370
+ params_str = ", ".join([f"{k}={v}" for k, v in function_params_json.items()])
371
+ default_skip_msg = f"Tool '{function_name}' was skipped by user"
372
+ if skip_message:
373
+ skip_response = f"[SKIPPED] {skip_message}"
374
+ else:
375
+ skip_response = f"[SKIPPED] {default_skip_msg} (parameters: {params_str})"
376
+
377
+ return_answer["answer"] = skip_response
378
+ return_answer["think"] = skip_response
379
+ return_answer["status"] = "completed"
380
+
381
+ (
382
+ self.recorder.update(
383
+ item={"answer": skip_response, "block_answer": skip_response},
384
+ stage=TypeStage.SKILL,
385
+ source_type=SourceType.EXPLORE,
386
+ skill_name=function_name,
387
+ skill_type=self.context.get_skill_type(function_name),
388
+ skill_args=function_params_json,
389
+ )
390
+ if self.recorder
391
+ else None
392
+ )
393
+
394
+ yield [return_answer]
395
+
396
+ # Add tool response message with skip indicator
397
+ tool_call_id = self._extract_tool_call_id()
398
+ if not tool_call_id:
399
+ tool_call_id = f"call_{function_name}_{self.times}"
400
+
401
+ self._append_tool_message(tool_call_id, skip_response, metadata={"skipped": True})
402
+ return
403
+
404
+ # Normal execution (not skipped)
324
405
  try:
325
406
  props = {"intervention": False}
326
407
  have_answer = False
@@ -480,6 +561,7 @@ class ExploreBlockV2(BasicCodeBlock):
480
561
  return
481
562
 
482
563
  # Add assistant message containing tool calls
564
+ logger.debug(f"[DEBUG] Tool call detected, preparing to execute: {stream_item.tool_name}")
483
565
  tool_call_id = f"call_{stream_item.tool_name}_{self.times}"
484
566
  tool_call_openai_format = [
485
567
  {
@@ -506,13 +588,16 @@ class ExploreBlockV2(BasicCodeBlock):
506
588
  )
507
589
  self.deduplicator_skillcall.add(tool_call)
508
590
 
591
+ logger.debug(f"[DEBUG] Calling _execute_tool_call for: {stream_item.tool_name}")
509
592
  async for ret in self._execute_tool_call(stream_item, tool_call_id):
510
593
  yield ret
594
+ logger.debug(f"[DEBUG] _execute_tool_call completed for: {stream_item.tool_name}")
511
595
  else:
512
596
  await self._handle_duplicate_tool_call(tool_call, stream_item)
513
597
 
514
598
  async def _execute_tool_call(self, stream_item, tool_call_id: str):
515
599
  """Execute tool call"""
600
+ logger.debug(f"[DEBUG] _execute_tool_call ENTERED for: {stream_item.tool_name}")
516
601
  intervention_tmp_key = "intervention_explore_block_vars"
517
602
 
518
603
  try:
@@ -574,11 +574,12 @@ class ToolCallStrategy(ExploreStrategy):
574
574
  no_cache: bool = False,
575
575
  ) -> Dict[str, Any]:
576
576
  """Includes the tools parameter and an optional tool_choice"""
577
+ tools = skillkit.getSkillsSchema() if skillkit and not skillkit.isEmpty() else []
577
578
  llm_params = {
578
579
  "messages": messages,
579
580
  "model": model,
580
581
  "no_cache": no_cache,
581
- "tools": skillkit.getSkillsSchema() if skillkit and not skillkit.isEmpty() else [],
582
+ "tools": tools,
582
583
  }
583
584
 
584
585
  if tool_choice:
@@ -148,15 +148,39 @@ class JudgeBlock(BasicCodeBlock):
148
148
  new_tool_args = {arg["key"]: arg["value"] for arg in raw_tool_args}
149
149
 
150
150
  props = {"intervention": False, "gvp": self.context}
151
+
152
+ # *** Handle skip action ***
153
+ skip_tool = self.context.get_var_value("__skip_tool__")
154
+ skip_message = self.context.get_var_value("__skip_message__")
155
+
156
+ # Clean up skip flags
157
+ if skip_tool:
158
+ self.context.delete_variable("__skip_tool__")
159
+ if skip_message:
160
+ self.context.delete_variable("__skip_message__")
161
+
151
162
  self.context.delete_variable("tool")
152
163
 
153
- async for resp_item in self.skill_run(
154
- source_type=SourceType.SKILL,
155
- skill_name=tool_name,
156
- skill_params_json=new_tool_args,
157
- props=props,
158
- ):
159
- yield resp_item
164
+ # If user chose to skip, don't execute the tool
165
+ if skip_tool:
166
+ # Generate friendly skip message
167
+ params_str = ", ".join([f"{k}={v}" for k, v in new_tool_args.items()])
168
+ default_skip_msg = f"Tool '{tool_name}' was skipped by user"
169
+ if skip_message:
170
+ skip_response = f"[SKIPPED] {skip_message}"
171
+ else:
172
+ skip_response = f"[SKIPPED] {default_skip_msg} (parameters: {params_str})"
173
+
174
+ yield {"answer": skip_response}
175
+ else:
176
+ # Normal execution (not skipped)
177
+ async for resp_item in self.skill_run(
178
+ source_type=SourceType.SKILL,
179
+ skill_name=tool_name,
180
+ skill_params_json=new_tool_args,
181
+ props=props,
182
+ ):
183
+ yield resp_item
160
184
  else:
161
185
  self.recorder.set_output_var(self.assign_type, self.output_var)
162
186
 
@@ -70,28 +70,65 @@ class ToolBlock(BasicCodeBlock):
70
70
  new_tool_args = {arg["key"]: arg["value"] for arg in raw_tool_args}
71
71
 
72
72
  props = {"intervention": False, "gvp": self.context}
73
+
74
+ # *** Handle skip action ***
75
+ skip_tool = self.context.get_var_value("__skip_tool__")
76
+ skip_message = self.context.get_var_value("__skip_message__")
77
+
78
+ # Clean up skip flags
79
+ if skip_tool:
80
+ self.context.delete_variable("__skip_tool__")
81
+ if skip_message:
82
+ self.context.delete_variable("__skip_message__")
83
+
73
84
  input_dict = self.context.delete_variable("tool")
74
85
 
75
- resp_item = None
76
- async for resp_item in self.skill_run(
77
- source_type=SourceType.SKILL,
78
- skill_name=tool_name,
79
- skill_params_json=new_tool_args,
80
- props=props,
81
- ):
82
- yield resp_item
83
-
84
- if self.recorder is not None:
85
- self.recorder.update(
86
- stage=TypeStage.SKILL,
87
- item=resp_item,
88
- skill_name=tool_name,
89
- skill_args=new_tool_args,
90
- skill_type=self.context.get_skill_type(tool_name),
86
+ # If user chose to skip, don't execute the tool
87
+ if skip_tool:
88
+ # Generate friendly skip message
89
+ params_str = ", ".join([f"{k}={v}" for k, v in new_tool_args.items()])
90
+ default_skip_msg = f"Tool '{tool_name}' was skipped by user"
91
+ if skip_message:
92
+ skip_response = f"[SKIPPED] {skip_message}"
93
+ else:
94
+ skip_response = f"[SKIPPED] {default_skip_msg} (parameters: {params_str})"
95
+
96
+ resp_item = {"answer": skip_response}
97
+
98
+ if self.recorder is not None:
99
+ self.recorder.update(
100
+ stage=TypeStage.SKILL,
101
+ item=resp_item,
102
+ skill_name=tool_name,
103
+ skill_args=new_tool_args,
104
+ skill_type=self.context.get_skill_type(tool_name),
105
+ source_type=SourceType.SKILL,
106
+ is_completed=True,
107
+ )
108
+
109
+ yield {"data": resp_item}
110
+ else:
111
+ # Normal execution (not skipped)
112
+ resp_item = None
113
+ async for resp_item in self.skill_run(
91
114
  source_type=SourceType.SKILL,
92
- is_completed=True,
93
- )
94
- yield {"data": resp_item}
115
+ skill_name=tool_name,
116
+ skill_params_json=new_tool_args,
117
+ props=props,
118
+ ):
119
+ yield resp_item
120
+
121
+ if self.recorder is not None:
122
+ self.recorder.update(
123
+ stage=TypeStage.SKILL,
124
+ item=resp_item,
125
+ skill_name=tool_name,
126
+ skill_args=new_tool_args,
127
+ skill_type=self.context.get_skill_type(tool_name),
128
+ source_type=SourceType.SKILL,
129
+ is_completed=True,
130
+ )
131
+ yield {"data": resp_item}
95
132
  else:
96
133
  # step1: First parse, then retrieve the actual values from gvpool when actually calling the function (the actual variable values might be of type dict, list)
97
134
  tool_call_info = self.parse_tool_call()
@@ -969,10 +969,13 @@ class Context:
969
969
  if bucket is None:
970
970
  bucket = BuildInBucket.SCRATCHPAD.value
971
971
 
972
- # Empty the current bucket
973
- self.context_manager.clear_bucket(bucket)
974
- # Add new messages
975
- self.context_manager.add_bucket(bucket, messages)
972
+ # Replace bucket content (or create if not exists)
973
+ if self.context_manager.has_bucket(bucket):
974
+ # Use replace_bucket_content to directly replace existing bucket
975
+ self.context_manager.replace_bucket_content(bucket, messages)
976
+ else:
977
+ # Create new bucket if it doesn't exist
978
+ self.context_manager.add_bucket(bucket, messages)
976
979
 
977
980
  # Mark as dirty
978
981
  self.messages_dirty = True
@@ -484,6 +484,10 @@ class DolphinExecutor:
484
484
  if isinstance(e, UserInterrupt):
485
485
  frame.status = FrameStatus.WAITING_FOR_INTERVENTION
486
486
  frame.wait_reason = WaitReason.USER_INTERRUPT
487
+ # *** FIX: Update block_pointer to current block before saving snapshot ***
488
+ # This ensures resume will continue from the interrupted block, not restart from beginning
489
+ frame.block_pointer = block_pointer
490
+ self.state_registry.update_frame(frame) # Save updated pointer
487
491
  intervention_snapshot_id = self._save_frame_snapshot(frame)
488
492
  frame.error = {
489
493
  "error_type": "UserInterrupt",
@@ -504,12 +508,17 @@ class DolphinExecutor:
504
508
  if isinstance(e, ToolInterrupt):
505
509
  frame.status = FrameStatus.WAITING_FOR_INTERVENTION
506
510
  frame.wait_reason = WaitReason.TOOL_REQUEST
511
+ # *** FIX: Update block_pointer to current block before saving snapshot ***
512
+ # This ensures resume will continue from the interrupted block, not restart from beginning
513
+ frame.block_pointer = block_pointer
514
+ self.state_registry.update_frame(frame) # Save updated pointer
507
515
  intervention_snapshot_id = self._save_frame_snapshot(frame)
508
516
  frame.error = {
509
517
  "error_type": "ToolInterrupt",
510
518
  "message": str(e),
511
519
  "tool_name": getattr(e, "tool_name", ""),
512
520
  "tool_args": getattr(e, "tool_args", []),
521
+ "tool_config": getattr(e, "tool_config", {}),
513
522
  "at_block": block_pointer,
514
523
  "intervention_snapshot_id": intervention_snapshot_id,
515
524
  }
dolphin/core/llm/llm.py CHANGED
@@ -249,7 +249,6 @@ class LLMModelFactory(LLM):
249
249
  finish_reason = None
250
250
  # Use ToolCallsParser to handle tool calls parsing
251
251
  tool_parser = ToolCallsParser()
252
-
253
252
  timeout = aiohttp.ClientTimeout(
254
253
  total=1800, # Disable overall timeout (use with caution)
255
254
  sock_connect=30, # Keep connection timeout
@@ -320,7 +319,7 @@ class LLMModelFactory(LLM):
320
319
 
321
320
  accu_content += delta_content
322
321
  reasoning_content += delta_reasoning
323
-
322
+
324
323
  # Capture finish_reason
325
324
  chunk_finish_reason = line_json["choices"][0].get("finish_reason")
326
325
  if chunk_finish_reason:
@@ -461,7 +460,7 @@ class LLMOpenai(LLM):
461
460
  and delta.reasoning_content is not None
462
461
  ):
463
462
  accu_reasoning += delta.reasoning_content
464
-
463
+
465
464
  # Capture finish_reason
466
465
  chunk_finish_reason = chunk.choices[0].finish_reason
467
466
  if chunk_finish_reason:
@@ -182,6 +182,7 @@ class LLMClient:
182
182
  sock_connect=30, # Keep connection timeout
183
183
  sock_read=300, # Single read timeout (for slow streaming data)
184
184
  )
185
+ print(f"------------------------llm={payload}")
185
186
  async with aiohttp.ClientSession(timeout=timeout) as session:
186
187
  async with session.post(
187
188
  model_config.api,
@@ -1,15 +1,79 @@
1
- import fcntl
2
1
  import json
3
2
  import os
3
+ import sys
4
4
  import threading
5
5
  import time
6
6
  from typing import List, Dict, Any, Optional
7
7
  import uuid
8
8
  from dolphin.core.logging.logger import get_logger
9
9
 
10
+ # Cross-platform file locking support
11
+ _HAS_FCNTL = False
12
+ _HAS_MSVCRT = False
13
+
14
+ if sys.platform == 'win32':
15
+ # Windows: use msvcrt for file locking
16
+ try:
17
+ import msvcrt
18
+ _HAS_MSVCRT = True
19
+ except ImportError:
20
+ pass
21
+ else:
22
+ # Unix/Linux: use fcntl for file locking
23
+ try:
24
+ import fcntl
25
+ _HAS_FCNTL = True
26
+ except ImportError:
27
+ pass
28
+
10
29
  logger = get_logger("utils.cache_kv")
11
30
 
12
31
 
32
+ def _lock_file(f, exclusive=False):
33
+ """
34
+ Cross-platform file locking
35
+
36
+ Args:
37
+ f: File object
38
+ exclusive: If True, acquire exclusive lock; otherwise shared lock
39
+ """
40
+ if _HAS_FCNTL:
41
+ # Unix/Linux: use fcntl
42
+ lock_type = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
43
+ fcntl.flock(f.fileno(), lock_type)
44
+ elif _HAS_MSVCRT:
45
+ # Windows: use msvcrt
46
+ # msvcrt.locking doesn't support shared locks, so we use exclusive
47
+ # Note: msvcrt requires seeking to the start
48
+ f.seek(0)
49
+ try:
50
+ msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, 1)
51
+ except OSError:
52
+ # File might be too short, which is ok for our use case
53
+ pass
54
+ # If neither is available, proceed without locking (not ideal but won't crash)
55
+
56
+
57
+ def _unlock_file(f):
58
+ """
59
+ Cross-platform file unlocking
60
+
61
+ Args:
62
+ f: File object
63
+ """
64
+ if _HAS_FCNTL:
65
+ # Unix/Linux: use fcntl
66
+ fcntl.flock(f.fileno(), fcntl.LOCK_UN)
67
+ elif _HAS_MSVCRT:
68
+ # Windows: use msvcrt
69
+ f.seek(0)
70
+ try:
71
+ msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK, 1)
72
+ except OSError:
73
+ pass
74
+ # If neither is available, nothing to unlock
75
+
76
+
13
77
  class CacheKV:
14
78
  def __init__(
15
79
  self, filePath: str, dumpInterval: int = 5, expireTimeByDay: float = 1
@@ -28,8 +92,7 @@ class CacheKV:
28
92
 
29
93
  try:
30
94
  with open(self.filePath, "r", encoding="utf-8") as f:
31
- fd = f.fileno()
32
- fcntl.flock(fd, fcntl.LOCK_SH) # Shared lock for reading
95
+ _lock_file(f, exclusive=False) # Shared lock for reading
33
96
  try:
34
97
  loaded_cache = json.load(f)
35
98
  for key, value in loaded_cache.items():
@@ -46,7 +109,7 @@ class CacheKV:
46
109
  # Compatible with old formats
47
110
  self.cache[key] = {"value": value, "timestamp": time.time()}
48
111
  finally:
49
- fcntl.flock(fd, fcntl.LOCK_UN)
112
+ _unlock_file(f)
50
113
  except (json.JSONDecodeError, IOError) as e:
51
114
  logger.error(
52
115
  f"Error loading cache file {self.filePath}: {e} try to backup and reset cache"
@@ -77,14 +140,13 @@ class CacheKV:
77
140
  temp_path = f"{self.filePath}.tmp.{os.getpid()}.{uuid.uuid4().hex}"
78
141
  try:
79
142
  with open(temp_path, "w", encoding="utf-8") as f:
80
- fd = f.fileno()
81
- fcntl.flock(fd, fcntl.LOCK_EX) # Exclusive lock for writing
143
+ _lock_file(f, exclusive=True) # Exclusive lock for writing
82
144
  try:
83
145
  json.dump(self.cache, f, ensure_ascii=False)
84
146
  f.flush()
85
- os.fsync(fd) # Ensure data is written to disk
147
+ os.fsync(f.fileno()) # Ensure data is written to disk
86
148
  finally:
87
- fcntl.flock(fd, fcntl.LOCK_UN)
149
+ _unlock_file(f)
88
150
 
89
151
  # Atomic rename
90
152
  os.rename(temp_path, self.filePath)
@@ -332,9 +332,11 @@ class ToolInterrupt(Exception):
332
332
  message="The tool was interrupted.",
333
333
  tool_name: str = None,
334
334
  tool_args: List[Dict] = None,
335
+ tool_config: Dict = None,
335
336
  *args,
336
337
  **kwargs,
337
338
  ):
338
339
  super().__init__(message, *args, **kwargs)
339
340
  self.tool_name = tool_name if tool_name else ""
340
341
  self.tool_args = tool_args if tool_args else []
342
+ self.tool_config = tool_config if tool_config else {}
@@ -66,6 +66,10 @@ class TriditionalToolkit(Skillkit):
66
66
 
67
67
  # Add tool type information to the SkillFunction
68
68
  openai_function.original_tool = tool
69
+
70
+ # Copy interrupt_config from tool to SkillFunction if it exists
71
+ if hasattr(tool, 'interrupt_config'):
72
+ openai_function.interrupt_config = tool.interrupt_config
69
73
 
70
74
  openai_functions[tool_name] = openai_function
71
75
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kweaver-dolphin
3
- Version: 0.1.0
3
+ Version: 0.2.0
4
4
  Summary: Dolphin Language - An intelligent agent framework
5
5
  Author-email: AnyData <contact@anydata.com>
6
6
  License: Apache 2.0
@@ -17,7 +17,7 @@ dolphin/cli/multimodal/input_parser.py,sha256=gKbSnO2i_sNYamcYtVB638sLZnDCrp0ZJx
17
17
  dolphin/cli/runner/__init__.py,sha256=wUBuFqq_wm5wi8KpJvF3QDEnCWBiqrgi0ihI1EDe6Nw,153
18
18
  dolphin/cli/runner/runner.py,sha256=C-3qtWWkK_qHg_I5Qyq9n4bo1Ufp4RIotGuf5-d6aD8,36529
19
19
  dolphin/cli/ui/__init__.py,sha256=w1xrgYJ66ehCcdgeHMZWiCBRP_C64j5Alt0HYB0dTxM,228
20
- dolphin/cli/ui/console.py,sha256=KxUCg8WftmAEmgwLajHgd8GjRPDQ3J9mnIiexXuesf8,112346
20
+ dolphin/cli/ui/console.py,sha256=W1Mg8Lstq6PDX2P4-0ujWTCkNXuimTtmAfwnCQlfDf0,112339
21
21
  dolphin/cli/ui/input.py,sha256=qEtyOdNdipWHLWe7aZI1eOtcX7S34vQ_bytXumrZ1S0,11199
22
22
  dolphin/cli/ui/layout.py,sha256=Qhjp0a7ZvecYdifZDlO64PTHkSXb_amuWzCtYZYdods,14501
23
23
  dolphin/cli/ui/stream_renderer.py,sha256=qo5mIDE_dUxf9otwr34kjCDx9Y9If2sLXZ4twojSKKw,11950
@@ -28,18 +28,18 @@ dolphin/core/__init__.py,sha256=h41bEI_o9djjE4v00cUT8oTdtynsvtmus-mX1XT6TZg,2785
28
28
  dolphin/core/interfaces.py,sha256=XvyVO9LB9YbBxRlJXSdcqt7-6lH0GVTYEDYxY_zqQmE,1161
29
29
  dolphin/core/agent/__init__.py,sha256=mOligICB_byI5rZFqMvSJN0msodBvravbhSghpboDdU,221
30
30
  dolphin/core/agent/agent_state.py,sha256=3YsFJdG21FqoIoHTDMBx01KEJJDRXFToW0vDrzUWMOU,1949
31
- dolphin/core/agent/base_agent.py,sha256=PMXCq1dQIhipJhHAWI7nQtNT5l_qndxEQDhkK9idxBo,42496
31
+ dolphin/core/agent/base_agent.py,sha256=hlTgEhxWvnWIPhyZvL6RWymZQM9NtSk22AH5W3EhT-g,46259
32
32
  dolphin/core/code_block/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
33
  dolphin/core/code_block/agent_init_block.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  dolphin/core/code_block/assign_block.py,sha256=xbPiQOkFAbrcRDnwkcNN2tfxy7OG8IXPPbjlCM1KJ9I,4124
35
- dolphin/core/code_block/basic_code_block.py,sha256=Ktqfmz4x5PudaE1gyynAJ4YZ_Vy4EPWODJYVU_e7TZk,74076
36
- dolphin/core/code_block/explore_block.py,sha256=DG5xRh9hP82ry6bPkb0aCL8G9rKDgTuyb40MaHHVsQI,54246
37
- dolphin/core/code_block/explore_block_v2.py,sha256=sHhPoG1hVpQ-3QopSn_iX20a5egqFYJllPFDZzEWkCo,27898
38
- dolphin/core/code_block/explore_strategy.py,sha256=PcgKvNOXh6B2udr2yhHfzCRcUNBSstC6-zTBigcm-g0,22794
39
- dolphin/core/code_block/judge_block.py,sha256=oOjgHEVLBfMnZP-l16DR13pcZITm2uKQaZM-T4W1t28,9318
35
+ dolphin/core/code_block/basic_code_block.py,sha256=X0dHzsQF3g9tFdgMtgbMtVp9qvN8goXKv5BKLce4-Gk,76438
36
+ dolphin/core/code_block/explore_block.py,sha256=dbQ6X4nIoogOu8Ytk1VEUAmmZVG6de_G66N5uTtjGI8,58495
37
+ dolphin/core/code_block/explore_block_v2.py,sha256=HWHNLWLiVcjphqOv7TNS1qfRpvJDeS1ROfM7pKXvtfM,32432
38
+ dolphin/core/code_block/explore_strategy.py,sha256=MXzJ8NhQyz3bt6PY02YboKjTWH_Vfwdy3UmlFYjDJqI,22816
39
+ dolphin/core/code_block/judge_block.py,sha256=ueMkiOs7vsdrZgrnrwZ6l79FvvnQOwd4pDWPEsRvtag,10525
40
40
  dolphin/core/code_block/prompt_block.py,sha256=lFun43lOQP9rbZzoNw2sYhwQCHRenRBZzL6vuXKunvo,1131
41
41
  dolphin/core/code_block/skill_call_deduplicator.py,sha256=6btnQU6AIUPnxl2JthYRdsmJkueDyxW9Iw-OIomjiaM,9879
42
- dolphin/core/code_block/tool_block.py,sha256=8h5XqLSPycUpQWz9BlFl7UBwXOexvgZ1JaQnNCeYU6Y,5360
42
+ dolphin/core/code_block/tool_block.py,sha256=Gh0NeW_Vk1Ue43ymi_FXX8MpEGJ3Br1G_88hHn6M7fs,7214
43
43
  dolphin/core/common/__init__.py,sha256=WUsyhGJljLFzpW_YEnkMsNn_Iw3WdqkFyuHa-mMKhNc,455
44
44
  dolphin/core/common/constants.py,sha256=tOJ31IupXIYfT8I8rmSh2YhWKitAJTf6iRn99MbxBwo,7348
45
45
  dolphin/core/common/enums.py,sha256=9QEzvCb7vbkQns1mtVlswsRZHKVwwvdRqrefSr-S68g,42282
@@ -52,7 +52,7 @@ dolphin/core/config/__init__.py,sha256=7PJH0qB8mTc8Tjuvlum8Y-RdomyaKWZQoA4XVzEd0
52
52
  dolphin/core/config/global_config.py,sha256=AiyVw3tOeA5xlsJvQ2O9zU2AEmjeIqYm-LXl_2JLdWY,46392
53
53
  dolphin/core/config/ontology_config.py,sha256=sGMkbcfFp5EdNIFazrY7Z2MsahCt0vC7GH_5-VleGcA,3899
54
54
  dolphin/core/context/__init__.py,sha256=2CKAzbIol_oZ611KSYLEk-Apyfez9tLXOYT9S6WMhW8,324
55
- dolphin/core/context/context.py,sha256=NGwYSim5LvqPJZlvjp_fX8fukhjy4fGk-d-lT9zK9Uk,63156
55
+ dolphin/core/context/context.py,sha256=6PV84YAuwZ4OtBDd1auRl6bjEC5egw-czPEee4aPgWo,63376
56
56
  dolphin/core/context/context_manager.py,sha256=3yo8sdWkfqq52cwhNIM0OlXPLZSLftTbFHuWBlqjOsE,6288
57
57
  dolphin/core/context/var_output.py,sha256=1Xb_JYv5L0f3JhUy8ZxmwF4EQEtn_Czh44s5nXjo2PA,2830
58
58
  dolphin/core/context/variable_pool.py,sha256=SgztIgALcIoNAMYf1VM7EzHeNNa2g61oLt5Ta32u81o,13212
@@ -82,7 +82,7 @@ dolphin/core/coroutine/resume_handle.py,sha256=6TyLNlKr99rXKOFBihUcUOr6zOSxjk66b
82
82
  dolphin/core/coroutine/step_result.py,sha256=uag0YhE-TbX6YoqtM2kQfWfBuoConH4ti4u_K_zdwxs,4017
83
83
  dolphin/core/executor/__init__.py,sha256=nogeHlssz4IPr5HD8nqjcf6ipSJCmQtelfayvg3a_oM,550
84
84
  dolphin/core/executor/debug_controller.py,sha256=15N4sFPBiZYK_DiTsrA3f0FdRYrbLdXV0IZ0qvnj9U8,26652
85
- dolphin/core/executor/dolphin_executor.py,sha256=C0v1j7mN3L_7Ic1dA2IE_HXekR8aLAj4Wni-OhQA6x0,42677
85
+ dolphin/core/executor/dolphin_executor.py,sha256=Eci_MzdHv0r7TFF9chCBzHxblxazJ48JVlQtEoFBkjw,43369
86
86
  dolphin/core/executor/executor.py,sha256=xoTf8tBVaWiuv-EmTXY94HW-Llgz2nWJDHtZ0yh5Fxo,25632
87
87
  dolphin/core/flags/__init__.py,sha256=9rek6speJJyfzJfy6MqlM4pTrSAB-kmAfrwgu2wCoB0,498
88
88
  dolphin/core/flags/definitions.py,sha256=O-15MCZCyC7NVMF0E8cWlr0k8gAyr36Acutb4i8UDNo,1378
@@ -93,9 +93,9 @@ dolphin/core/hook/hook_dispatcher.py,sha256=_V8zZJlgeJUDCgTnYYju1B0wYY7ADj5XXIF4
93
93
  dolphin/core/hook/hook_types.py,sha256=xGBhv8RgG21xEF2Z4IFACtKUGWn9-G5RzQTPgS9DPhU,8047
94
94
  dolphin/core/hook/isolated_variable_pool.py,sha256=CpIikUIDcl6_0V239oNBYLhsCDQKFITNFtubpAI1JJ4,9046
95
95
  dolphin/core/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
96
- dolphin/core/llm/llm.py,sha256=yG5yYZ5hSW1LFB8KgbnvaalJ_vw-fhlk8aJCg0eox9Y,20399
96
+ dolphin/core/llm/llm.py,sha256=l7N4xSoH6Y_Y14Qmbjsr2jTEV-ycC3VrGGCz2yQkHTw,20350
97
97
  dolphin/core/llm/llm_call.py,sha256=ccCRW5GxJ8_UiiWVtjuTx7Tattjlp7SbENSW50EbxAU,3408
98
- dolphin/core/llm/llm_client.py,sha256=OvFOpUirp2fdMCwZDG85ephlqcGZzKOrQqY4AOsu4LE,59090
98
+ dolphin/core/llm/llm_client.py,sha256=-YS5RskVjLF7HS0SlFnUwF-UMYNbtb_qS97fLU9HbBg,59150
99
99
  dolphin/core/llm/message_sanitizer.py,sha256=xGBattdBVYHDPHdZLxUmmerZYPNFe8HZwrGI5PycjFQ,4461
100
100
  dolphin/core/logging/__init__.py,sha256=ix3t_AONJVUaO2WQ5BeVUx762OBlFFYu8tyZ-8Q0jpU,316
101
101
  dolphin/core/logging/logger.py,sha256=FawfEAbgGHDlCsEBxH8zmeoknFNNRLgIlBXTz8dp-JM,18859
@@ -116,8 +116,8 @@ dolphin/core/trajectory/__init__.py,sha256=5FGGDE48K_OMKcca4usSyv1z_hvfaDzearur-
116
116
  dolphin/core/trajectory/recorder.py,sha256=xA5ZOyoRxgIgFZkF5EzyNzP0nJexDjDnnC_pDskzg4g,6441
117
117
  dolphin/core/trajectory/trajectory.py,sha256=eEYmokzaskeWx0MfSBuMYWrfBouXpt4HwvCl73BGmNI,21577
118
118
  dolphin/core/utils/__init__.py,sha256=8IhOF81ct7TYiVoc9SWbyO6YUWaSlcZH2dVcF0qy46Y,176
119
- dolphin/core/utils/cache_kv.py,sha256=hWGz3j-vtGd317Eo9o2lDBhNdCEef4Eo5TZCUmadUXw,7412
120
- dolphin/core/utils/tools.py,sha256=Cn0KpG_pqlXzpkud16JRGpY4VO5eG0GMxPtLC2gWowU,9779
119
+ dolphin/core/utils/cache_kv.py,sha256=UnuUKE_1sL0glWJ3qq3TLN4MpKxNqzdw_72sxfUPXig,8968
120
+ dolphin/core/utils/tools.py,sha256=-Ezo7daUFII_VGsPY0WWvLt1SWdXUzir2nEL0UxWISA,9875
121
121
  dolphin/lib/__init__.py,sha256=tm_qFRc7NEuWUYYYFq9PKuJ-CXCG8WCjLwYut_3yPYc,2557
122
122
  dolphin/lib/debug/__init__.py,sha256=LXMTcUqnSnvEd_aCtSjaxs7PVUSf0pVJ0BiMozGESP0,154
123
123
  dolphin/lib/debug/visualizer.py,sha256=zoweYgmbZW2KOoJe3Y63C6-G1uDZi23srUDpxngtWbQ,18024
@@ -190,10 +190,10 @@ dolphin/sdk/runtime/__init__.py,sha256=W90bnFZ_l6ukuZ3l1iBRWN0oPetdNisjj6dxBMk1N
190
190
  dolphin/sdk/runtime/env.py,sha256=OU0CEZuVXgdQPeNsYq6BhFETcc-Q2vcAp6jLvOrEsbU,12337
191
191
  dolphin/sdk/skill/__init__.py,sha256=zzfHgrxIWxUEFH1HMiwb08U1M1ukHcdNtFqKi3Sg1ug,246
192
192
  dolphin/sdk/skill/global_skills.py,sha256=2gebgdAKEgPWGhv1_pliI0uqbbkEMA9ZaphJj7A_eeE,27531
193
- dolphin/sdk/skill/traditional_toolkit.py,sha256=fYVxX0e8nHr0d9UQwq4FKVsyPmP7VpTB9ufsNzquAcI,9143
194
- kweaver_dolphin-0.1.0.dist-info/licenses/LICENSE.txt,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
195
- kweaver_dolphin-0.1.0.dist-info/METADATA,sha256=XCZFEnIUXbvnafcjiC8k-N4IStUSw73fwmGQYjSawfk,15420
196
- kweaver_dolphin-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
197
- kweaver_dolphin-0.1.0.dist-info/entry_points.txt,sha256=0xA4Hy1bkr0V7CTaWvswGeHZ0tXdAtCNlGa3ZUDdBg0,1519
198
- kweaver_dolphin-0.1.0.dist-info/top_level.txt,sha256=vwNhnr4e0NgKRTfM56xcCfo9t2ZebTbbPF38xpBYiq0,27
199
- kweaver_dolphin-0.1.0.dist-info/RECORD,,
193
+ dolphin/sdk/skill/traditional_toolkit.py,sha256=Yyejm1LCUDVfXb1MCiaiT2tyFsrW0bj0uvKa5VjA8FM,9355
194
+ kweaver_dolphin-0.2.0.dist-info/licenses/LICENSE.txt,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
195
+ kweaver_dolphin-0.2.0.dist-info/METADATA,sha256=aOKfCCkkYlH6C-zQBIk8145oFmrxqBT4DrXsbkwcPbw,15420
196
+ kweaver_dolphin-0.2.0.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
197
+ kweaver_dolphin-0.2.0.dist-info/entry_points.txt,sha256=0xA4Hy1bkr0V7CTaWvswGeHZ0tXdAtCNlGa3ZUDdBg0,1519
198
+ kweaver_dolphin-0.2.0.dist-info/top_level.txt,sha256=vwNhnr4e0NgKRTfM56xcCfo9t2ZebTbbPF38xpBYiq0,27
199
+ kweaver_dolphin-0.2.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5