kweaver-dolphin 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. dolphin/cli/runner/runner.py +20 -0
  2. dolphin/cli/ui/console.py +35 -17
  3. dolphin/cli/utils/helpers.py +4 -4
  4. dolphin/core/agent/base_agent.py +70 -7
  5. dolphin/core/code_block/basic_code_block.py +162 -26
  6. dolphin/core/code_block/explore_block.py +438 -35
  7. dolphin/core/code_block/explore_block_v2.py +105 -16
  8. dolphin/core/code_block/explore_strategy.py +3 -1
  9. dolphin/core/code_block/judge_block.py +41 -8
  10. dolphin/core/code_block/skill_call_deduplicator.py +32 -10
  11. dolphin/core/code_block/tool_block.py +69 -23
  12. dolphin/core/common/constants.py +25 -1
  13. dolphin/core/config/global_config.py +35 -0
  14. dolphin/core/context/context.py +175 -9
  15. dolphin/core/context/cow_context.py +392 -0
  16. dolphin/core/executor/dolphin_executor.py +9 -0
  17. dolphin/core/flags/definitions.py +2 -2
  18. dolphin/core/llm/llm.py +2 -3
  19. dolphin/core/llm/llm_client.py +1 -0
  20. dolphin/core/runtime/runtime_instance.py +31 -0
  21. dolphin/core/skill/context_retention.py +3 -3
  22. dolphin/core/task_registry.py +404 -0
  23. dolphin/core/utils/cache_kv.py +70 -8
  24. dolphin/core/utils/tools.py +2 -0
  25. dolphin/lib/__init__.py +0 -2
  26. dolphin/lib/skillkits/__init__.py +2 -2
  27. dolphin/lib/skillkits/plan_skillkit.py +756 -0
  28. dolphin/lib/skillkits/system_skillkit.py +103 -30
  29. dolphin/sdk/skill/global_skills.py +43 -3
  30. dolphin/sdk/skill/traditional_toolkit.py +4 -0
  31. {kweaver_dolphin-0.1.0.dist-info → kweaver_dolphin-0.2.1.dist-info}/METADATA +1 -1
  32. {kweaver_dolphin-0.1.0.dist-info → kweaver_dolphin-0.2.1.dist-info}/RECORD +36 -34
  33. {kweaver_dolphin-0.1.0.dist-info → kweaver_dolphin-0.2.1.dist-info}/WHEEL +1 -1
  34. kweaver_dolphin-0.2.1.dist-info/entry_points.txt +15 -0
  35. dolphin/lib/skillkits/plan_act_skillkit.py +0 -452
  36. kweaver_dolphin-0.1.0.dist-info/entry_points.txt +0 -27
  37. {kweaver_dolphin-0.1.0.dist-info → kweaver_dolphin-0.2.1.dist-info}/licenses/LICENSE.txt +0 -0
  38. {kweaver_dolphin-0.1.0.dist-info → kweaver_dolphin-0.2.1.dist-info}/top_level.txt +0 -0
@@ -295,34 +295,121 @@ class ExploreBlockV2(BasicCodeBlock):
295
295
  intervention_vars = self.context.get_var_value(intervention_tmp_key)
296
296
  self.context.delete_variable(intervention_tmp_key)
297
297
 
298
- # restore the complete message context before tool execution
298
+ # restore the complete message context to context_manager buckets
299
299
  saved_messages = intervention_vars.get("prompt")
300
300
  if saved_messages is not None:
301
+ from dolphin.core.common.enums import MessageRole
302
+ from dolphin.core.context_engineer.config.settings import BuildInBucket
303
+
304
+ # *** FIX: Filter out messages that are already in other buckets ***
305
+ # To avoid duplication, only restore messages generated during the conversation:
306
+ # - SYSTEM messages are already in SYSTEM bucket (from initial execute)
307
+ # - USER messages are already in QUERY/HISTORY buckets (initial query and history)
308
+ # - We only need to restore ASSISTANT and TOOL messages (conversation progress)
309
+ filtered_messages = [
310
+ msg for msg in saved_messages
311
+ if msg.get("role") in [MessageRole.ASSISTANT.value, MessageRole.TOOL.value]
312
+ ]
313
+
301
314
  msgs = Messages()
302
- msgs.extend_plain_messages(saved_messages)
303
- self.context.set_messages(msgs)
315
+ msgs.extend_plain_messages(filtered_messages)
316
+ # Use set_messages_batch to restore to context_manager buckets
317
+ # This ensures messages are available when to_dph_messages() is called
318
+ self.context.set_messages_batch(msgs, bucket=BuildInBucket.SCRATCHPAD.value)
304
319
 
305
320
  input_dict = self.context.get_var_value("tool")
306
321
  function_name = input_dict["tool_name"]
307
322
  raw_tool_args = input_dict["tool_args"]
308
323
  function_params_json = {arg["key"]: arg["value"] for arg in raw_tool_args}
309
-
310
- (
311
- self.recorder.update(
312
- stage=TypeStage.SKILL,
313
- source_type=SourceType.EXPLORE,
314
- skill_name=function_name,
315
- skill_type=self.context.get_skill_type(function_name),
316
- skill_args=function_params_json,
317
- )
318
- if self.recorder
319
- else None
320
- )
324
+
325
+ # Get saved stage_id for resume
326
+ saved_stage_id = intervention_vars.get("stage_id")
327
+ logger.debug(f"Resuming tool call for {input_dict['tool_name']}, saved_stage_id: {saved_stage_id}")
328
+
329
+ # *** FIX: Update the last tool_call message with modified parameters ***
330
+ # This ensures LLM sees the actual parameters used, not the original ones
331
+ messages = self.context.get_messages()
332
+ if messages and len(messages.get_messages()) > 0:
333
+ last_message = messages.get_messages()[-1]
334
+ # Check if last message is an assistant message with tool_calls
335
+ if (hasattr(last_message, 'role') and last_message.role == "assistant" and
336
+ hasattr(last_message, 'tool_calls') and last_message.tool_calls):
337
+ # Find the matching tool_call
338
+ for tool_call in last_message.tool_calls:
339
+ if hasattr(tool_call, 'function') and tool_call.function.name == function_name:
340
+ # Update the arguments with modified parameters
341
+ import json
342
+ tool_call.function.arguments = json.dumps(function_params_json, ensure_ascii=False)
343
+
344
+ # *** FIX: Don't call recorder.update() here during resume ***
345
+ # skill_run() will create the stage with the correct saved_stage_id
346
+ # Calling update() here would create an extra stage with a new ID
347
+ # (
348
+ # self.recorder.update(
349
+ # stage=TypeStage.SKILL,
350
+ # source_type=SourceType.EXPLORE,
351
+ # skill_name=function_name,
352
+ # skill_type=self.context.get_skill_type(function_name),
353
+ # skill_args=function_params_json,
354
+ # )
355
+ # if self.recorder
356
+ # else None
357
+ # )
358
+
359
+ # *** Handle skip action ***
360
+ skip_tool = self.context.get_var_value("__skip_tool__")
361
+ skip_message = self.context.get_var_value("__skip_message__")
362
+
363
+ # Clean up skip flags
364
+ if skip_tool:
365
+ self.context.delete_variable("__skip_tool__")
366
+ if skip_message:
367
+ self.context.delete_variable("__skip_message__")
368
+
321
369
  self.context.delete_variable("tool")
322
370
 
323
371
  return_answer = {}
372
+
373
+ # If user chose to skip, don't execute the tool
374
+ if skip_tool:
375
+ # Generate friendly skip message
376
+ params_str = ", ".join([f"{k}={v}" for k, v in function_params_json.items()])
377
+ default_skip_msg = f"Tool '{function_name}' was skipped by user"
378
+ if skip_message:
379
+ skip_response = f"[SKIPPED] {skip_message}"
380
+ else:
381
+ skip_response = f"[SKIPPED] {default_skip_msg} (parameters: {params_str})"
382
+
383
+ return_answer["answer"] = skip_response
384
+ return_answer["think"] = skip_response
385
+ return_answer["status"] = "completed"
386
+
387
+ (
388
+ self.recorder.update(
389
+ item={"answer": skip_response, "block_answer": skip_response},
390
+ stage=TypeStage.SKILL,
391
+ source_type=SourceType.EXPLORE,
392
+ skill_name=function_name,
393
+ skill_type=self.context.get_skill_type(function_name),
394
+ skill_args=function_params_json,
395
+ )
396
+ if self.recorder
397
+ else None
398
+ )
399
+
400
+ yield [return_answer]
401
+
402
+ # Add tool response message with skip indicator
403
+ tool_call_id = self._extract_tool_call_id()
404
+ if not tool_call_id:
405
+ tool_call_id = f"call_{function_name}_{self.times}"
406
+
407
+ self._append_tool_message(tool_call_id, skip_response, metadata={"skipped": True})
408
+ return
409
+
410
+ # Normal execution (not skipped)
324
411
  try:
325
- props = {"intervention": False}
412
+ props = {"intervention": False, "saved_stage_id": saved_stage_id}
326
413
  have_answer = False
327
414
 
328
415
  async for resp in self.skill_run(
@@ -516,11 +603,13 @@ class ExploreBlockV2(BasicCodeBlock):
516
603
  intervention_tmp_key = "intervention_explore_block_vars"
517
604
 
518
605
  try:
606
+ # Save intervention vars (stage_id will be filled by skill_run after creating the stage)
519
607
  intervention_vars = {
520
608
  "prompt": self.context.get_messages().get_messages_as_dict(),
521
609
  "tool_name": stream_item.tool_name,
522
610
  "cur_llm_stream_answer": stream_item.answer,
523
611
  "all_answer": stream_item.answer,
612
+ "stage_id": None, # Will be updated by skill_run() after stage creation
524
613
  }
525
614
 
526
615
  self.context.set_variable(intervention_tmp_key, intervention_vars)
@@ -574,11 +574,12 @@ class ToolCallStrategy(ExploreStrategy):
574
574
  no_cache: bool = False,
575
575
  ) -> Dict[str, Any]:
576
576
  """Includes the tools parameter and an optional tool_choice"""
577
+ tools = skillkit.getSkillsSchema() if skillkit and not skillkit.isEmpty() else []
577
578
  llm_params = {
578
579
  "messages": messages,
579
580
  "model": model,
580
581
  "no_cache": no_cache,
581
- "tools": skillkit.getSkillsSchema() if skillkit and not skillkit.isEmpty() else [],
582
+ "tools": tools,
582
583
  }
583
584
 
584
585
  if tool_choice:
@@ -667,6 +668,7 @@ class ToolCallStrategy(ExploreStrategy):
667
668
  f"Tool call {info.name} (id={info.id}) skipped: "
668
669
  f"Stream ended but JSON arguments incomplete or invalid. "
669
670
  f"Raw arguments: '{info.raw_arguments}'"
671
+ f"finish_reason: {stream_item.finish_reason}"
670
672
  )
671
673
 
672
674
  return result
@@ -7,6 +7,9 @@ from dolphin.core.context.context import Context
7
7
  from dolphin.core.llm.llm_client import LLMClient
8
8
  from dolphin.core.utils.tools import ToolInterrupt
9
9
  from dolphin.core.context.var_output import SourceType
10
+ from dolphin.core.logging.logger import get_logger
11
+
12
+ logger = get_logger()
10
13
 
11
14
 
12
15
  class JudgeBlock(BasicCodeBlock):
@@ -129,6 +132,9 @@ class JudgeBlock(BasicCodeBlock):
129
132
 
130
133
  tool_name = intervention_vars["tool_name"]
131
134
  judge_call_info = intervention_vars["judge_call_info"]
135
+
136
+ # *** FIX: Get saved stage_id for resume ***
137
+ saved_stage_id = intervention_vars.get("stage_id")
132
138
 
133
139
  self.recorder.set_output_var(
134
140
  judge_call_info["assign_type"], judge_call_info["output_var"]
@@ -147,16 +153,41 @@ class JudgeBlock(BasicCodeBlock):
147
153
  raw_tool_args = input_dict["tool_args"]
148
154
  new_tool_args = {arg["key"]: arg["value"] for arg in raw_tool_args}
149
155
 
150
- props = {"intervention": False, "gvp": self.context}
156
+ # *** FIX: Pass saved_stage_id to skill_run ***
157
+ props = {"intervention": False, "saved_stage_id": saved_stage_id, "gvp": self.context}
158
+
159
+ # *** Handle skip action ***
160
+ skip_tool = self.context.get_var_value("__skip_tool__")
161
+ skip_message = self.context.get_var_value("__skip_message__")
162
+
163
+ # Clean up skip flags
164
+ if skip_tool:
165
+ self.context.delete_variable("__skip_tool__")
166
+ if skip_message:
167
+ self.context.delete_variable("__skip_message__")
168
+
151
169
  self.context.delete_variable("tool")
152
170
 
153
- async for resp_item in self.skill_run(
154
- source_type=SourceType.SKILL,
155
- skill_name=tool_name,
156
- skill_params_json=new_tool_args,
157
- props=props,
158
- ):
159
- yield resp_item
171
+ # If user chose to skip, don't execute the tool
172
+ if skip_tool:
173
+ # Generate friendly skip message
174
+ params_str = ", ".join([f"{k}={v}" for k, v in new_tool_args.items()])
175
+ default_skip_msg = f"Tool '{tool_name}' was skipped by user"
176
+ if skip_message:
177
+ skip_response = f"[SKIPPED] {skip_message}"
178
+ else:
179
+ skip_response = f"[SKIPPED] {default_skip_msg} (parameters: {params_str})"
180
+
181
+ yield {"answer": skip_response}
182
+ else:
183
+ # Normal execution (not skipped)
184
+ async for resp_item in self.skill_run(
185
+ source_type=SourceType.SKILL,
186
+ skill_name=tool_name,
187
+ skill_params_json=new_tool_args,
188
+ props=props,
189
+ ):
190
+ yield resp_item
160
191
  else:
161
192
  self.recorder.set_output_var(self.assign_type, self.output_var)
162
193
 
@@ -178,6 +209,7 @@ class JudgeBlock(BasicCodeBlock):
178
209
  if self.recorder and hasattr(self.recorder, "set_output_var"):
179
210
  self.recorder.set_output_var(self.assign_type, self.output_var)
180
211
 
212
+ # Save intervention vars (stage_id will be filled by skill_run after creating the stage)
181
213
  intervention_vars = {
182
214
  "tool_name": tool_name,
183
215
  "judge_call_info": {
@@ -186,6 +218,7 @@ class JudgeBlock(BasicCodeBlock):
186
218
  "output_var": self.output_var,
187
219
  "params": self.params,
188
220
  },
221
+ "stage_id": None, # Will be updated by skill_run() after stage creation
189
222
  }
190
223
 
191
224
  try:
@@ -111,14 +111,16 @@ class DefaultSkillCallDeduplicator(SkillCallDeduplicator):
111
111
  def __init__(self):
112
112
  self.skillcalls: Dict[str, int] = {}
113
113
  self.call_results: Dict[str, str] = {}
114
- # Cache call_key to avoid duplicate serialization
115
- self._call_key_cache: Dict[int, str] = {}
114
+ # Import polling tools from constants to avoid hardcoding.
115
+ # These tools are expected to be called repeatedly (polling-style).
116
+ # Do NOT count these towards duplicate-call termination.
117
+ from dolphin.core.common.constants import POLLING_TOOLS
118
+ self._always_allow_duplicate_skills = POLLING_TOOLS
116
119
 
117
120
  def clear(self):
118
121
  """Clear all records"""
119
122
  self.skillcalls.clear()
120
123
  self.call_results.clear()
121
- self._call_key_cache.clear()
122
124
 
123
125
  def get_history(self) -> list:
124
126
  """Get the history of all recorded skill calls.
@@ -153,11 +155,6 @@ class DefaultSkillCallDeduplicator(SkillCallDeduplicator):
153
155
 
154
156
  Uses the normalized JSON string of the skill name and arguments as the unique identifier.
155
157
  """
156
- # Use object id as cache key
157
- cache_key = id(skill_call)
158
- if cache_key in self._call_key_cache:
159
- return self._call_key_cache[cache_key]
160
-
161
158
  skill_name, arguments = self._extract_skill_info(skill_call)
162
159
 
163
160
  # Normalized parameters: sorting keys, ensuring consistency
@@ -175,7 +172,6 @@ class DefaultSkillCallDeduplicator(SkillCallDeduplicator):
175
172
  normalized_args = str(arguments).strip()
176
173
 
177
174
  call_key = f"{skill_name}:{normalized_args}"
178
- self._call_key_cache[cache_key] = call_key
179
175
  return call_key
180
176
 
181
177
  def _extract_skill_info(self, skill_call: Any) -> Tuple[str, Any]:
@@ -200,7 +196,29 @@ class DefaultSkillCallDeduplicator(SkillCallDeduplicator):
200
196
  skill_name = str(skill_call)
201
197
  arguments = {}
202
198
 
203
- return skill_name, arguments
199
+ return skill_name, self._normalize_arguments(arguments)
200
+
201
+ @staticmethod
202
+ def _normalize_arguments(arguments: Any) -> Any:
203
+ """Normalize arguments to improve deduplication stability.
204
+
205
+ Some callers may pass JSON strings (e.g., "{}") instead of dicts.
206
+ This method converts JSON strings into Python objects when possible.
207
+ """
208
+ if arguments is None:
209
+ return {}
210
+ if isinstance(arguments, str):
211
+ raw = arguments.strip()
212
+ if raw == "":
213
+ return {}
214
+ # Fast-path common empty payloads.
215
+ if raw in ("{}", "[]", "null"):
216
+ return {} if raw != "[]" else []
217
+ try:
218
+ return json.loads(raw)
219
+ except Exception:
220
+ return raw
221
+ return arguments
204
222
 
205
223
  def add(self, skill_call: Any, result: Optional[str] = None):
206
224
  """Add skill call record
@@ -248,6 +266,10 @@ class DefaultSkillCallDeduplicator(SkillCallDeduplicator):
248
266
  """
249
267
  skill_name, arguments = self._extract_skill_info(skill_call)
250
268
 
269
+ # Polling tools are expected to be invoked repeatedly.
270
+ if skill_name in self._always_allow_duplicate_skills:
271
+ return True
272
+
251
273
  # Calls without arguments are not specially handled
252
274
  if not arguments:
253
275
  return False
@@ -2,10 +2,12 @@ from dolphin.core.code_block.basic_code_block import BasicCodeBlock
2
2
  from dolphin.core.utils.tools import ToolInterrupt
3
3
  from dolphin.core.common.enums import CategoryBlock, TypeStage
4
4
  from dolphin.core.context.context import Context
5
- from dolphin.core.logging.logger import console
5
+ from dolphin.core.logging.logger import console, get_logger
6
6
  from dolphin.core.context.var_output import SourceType
7
7
  from typing import Optional, AsyncGenerator, Dict, Any
8
8
 
9
+ logger = get_logger()
10
+
9
11
 
10
12
  class ToolBlock(BasicCodeBlock):
11
13
  def __init__(self, context: Context, debug_infos: Optional[dict] = None):
@@ -52,6 +54,10 @@ class ToolBlock(BasicCodeBlock):
52
54
 
53
55
  tool_name = intervention_vars["tool_name"]
54
56
  tool_call_info = intervention_vars["tool_call_info"]
57
+
58
+ # *** FIX: Get saved stage_id for resume ***
59
+ saved_stage_id = intervention_vars.get("stage_id")
60
+
55
61
  self.context.delete_variable("intervention_tool_block_vars")
56
62
  if self.recorder is not None:
57
63
  self.recorder.set_output_var(
@@ -69,29 +75,67 @@ class ToolBlock(BasicCodeBlock):
69
75
  raw_tool_args = input_dict["tool_args"]
70
76
  new_tool_args = {arg["key"]: arg["value"] for arg in raw_tool_args}
71
77
 
72
- props = {"intervention": False, "gvp": self.context}
73
- input_dict = self.context.delete_variable("tool")
74
-
75
- resp_item = None
76
- async for resp_item in self.skill_run(
77
- source_type=SourceType.SKILL,
78
- skill_name=tool_name,
79
- skill_params_json=new_tool_args,
80
- props=props,
81
- ):
82
- yield resp_item
83
-
84
- if self.recorder is not None:
85
- self.recorder.update(
86
- stage=TypeStage.SKILL,
87
- item=resp_item,
88
- skill_name=tool_name,
89
- skill_args=new_tool_args,
90
- skill_type=self.context.get_skill_type(tool_name),
78
+ # *** FIX: Pass saved_stage_id to skill_run ***
79
+ props = {"intervention": False, "saved_stage_id": saved_stage_id, "gvp": self.context}
80
+
81
+ # *** Handle skip action ***
82
+ skip_tool = self.context.get_var_value("__skip_tool__")
83
+ skip_message = self.context.get_var_value("__skip_message__")
84
+
85
+ # Clean up skip flags
86
+ if skip_tool:
87
+ self.context.delete_variable("__skip_tool__")
88
+ if skip_message:
89
+ self.context.delete_variable("__skip_message__")
90
+
91
+ self.context.delete_variable("tool")
92
+
93
+ # If user chose to skip, don't execute the tool
94
+ if skip_tool:
95
+ # Generate friendly skip message
96
+ params_str = ", ".join([f"{k}={v}" for k, v in new_tool_args.items()])
97
+ default_skip_msg = f"Tool '{tool_name}' was skipped by user"
98
+ if skip_message:
99
+ skip_response = f"[SKIPPED] {skip_message}"
100
+ else:
101
+ skip_response = f"[SKIPPED] {default_skip_msg} (parameters: {params_str})"
102
+
103
+ resp_item = {"answer": skip_response}
104
+
105
+ if self.recorder is not None:
106
+ self.recorder.update(
107
+ stage=TypeStage.SKILL,
108
+ item=resp_item,
109
+ skill_name=tool_name,
110
+ skill_args=new_tool_args,
111
+ skill_type=self.context.get_skill_type(tool_name),
112
+ source_type=SourceType.SKILL,
113
+ is_completed=True,
114
+ )
115
+
116
+ yield {"data": resp_item}
117
+ else:
118
+ # Normal execution (not skipped)
119
+ resp_item = None
120
+ async for resp_item in self.skill_run(
91
121
  source_type=SourceType.SKILL,
92
- is_completed=True,
93
- )
94
- yield {"data": resp_item}
122
+ skill_name=tool_name,
123
+ skill_params_json=new_tool_args,
124
+ props=props,
125
+ ):
126
+ yield resp_item
127
+
128
+ if self.recorder is not None:
129
+ self.recorder.update(
130
+ stage=TypeStage.SKILL,
131
+ item=resp_item,
132
+ skill_name=tool_name,
133
+ skill_args=new_tool_args,
134
+ skill_type=self.context.get_skill_type(tool_name),
135
+ source_type=SourceType.SKILL,
136
+ is_completed=True,
137
+ )
138
+ yield {"data": resp_item}
95
139
  else:
96
140
  # step1: First parse, then retrieve the actual values from gvpool when actually calling the function (the actual variable values might be of type dict, list)
97
141
  tool_call_info = self.parse_tool_call()
@@ -103,9 +147,11 @@ class ToolBlock(BasicCodeBlock):
103
147
  # step2: Obtain the tool object and execute the tool call
104
148
  tool_name = tool_call_info["tool_name"]
105
149
 
150
+ # Save intervention vars (stage_id will be filled by skill_run after creating the stage)
106
151
  intervention_vars = {
107
152
  "tool_name": tool_call_info["tool_name"],
108
153
  "tool_call_info": tool_call_info,
154
+ "stage_id": None, # Will be updated by skill_run() after stage creation
109
155
  }
110
156
 
111
157
  self.context.set_variable(
@@ -144,7 +144,31 @@ SEARCH_TIMEOUT = 10 # seconds for search API calls
144
144
 
145
145
  SEARCH_RETRY_COUNT = 2 # number of retries for failed search API calls
146
146
 
147
- MAX_SKILL_CALL_TIMES = 100
147
+ MAX_SKILL_CALL_TIMES = 500
148
+
149
+ # Plan orchestration tools (used for task management in plan mode)
150
+ # These tools should be excluded from subtask contexts to prevent infinite recursion.
151
+ PLAN_ORCHESTRATION_TOOLS = frozenset({
152
+ "_plan_tasks", # Create and register subtasks
153
+ "_check_progress", # Check task execution status
154
+ "_get_task_output", # Retrieve task results
155
+ "_wait", # Wait for a specified duration
156
+ "_kill_task", # Cancel a running task
157
+ "_retry_task", # Retry a failed task
158
+ })
159
+
160
+ # Polling tools that are expected to be called repeatedly (excluded from deduplication)
161
+ # These tools are used to check status/wait for async operations and should not trigger
162
+ # duplicate-call termination in ExploreBlock.
163
+ POLLING_TOOLS = frozenset({
164
+ "_check_progress", # Plan mode: check task execution status
165
+ "_wait", # Plan mode: wait for a specified duration
166
+ })
167
+
168
+ # Plan mode: maximum consecutive rounds without task status progress.
169
+ # This only applies when an active plan exists and the agent is not using plan-related tools
170
+ # (e.g., _wait / _check_progress). Set to 0 to disable.
171
+ MAX_PLAN_SILENT_ROUNDS = 50
148
172
 
149
173
  # Compression constants
150
174
  MAX_ANSWER_COMPRESSION_LENGTH = 100
@@ -1017,6 +1017,17 @@ class GlobalConfig:
1017
1017
 
1018
1018
  @staticmethod
1019
1019
  def from_dict(config_dict: dict, base_dir: str = None) -> "GlobalConfig":
1020
+ # Load and apply flags configuration if present
1021
+ if "flags" in config_dict:
1022
+ from dolphin.core import flags
1023
+ flags_config = config_dict.get("flags", {})
1024
+ for flag_name, flag_value in flags_config.items():
1025
+ try:
1026
+ flags.set_flag(flag_name, bool(flag_value))
1027
+ except Exception as e:
1028
+ import logging
1029
+ logging.warning(f"Failed to set flag '{flag_name}': {e}")
1030
+
1020
1031
  is_new_config_format = "llms" in config_dict and "default" in config_dict
1021
1032
  if is_new_config_format:
1022
1033
  default_llm = config_dict.get("default")
@@ -1228,6 +1239,30 @@ class GlobalConfig:
1228
1239
  result = {
1229
1240
  "default": self.default_llm,
1230
1241
  }
1242
+
1243
+ # Add flags configuration
1244
+ from dolphin.core import flags
1245
+ from dolphin.core.flags.definitions import DEFAULT_VALUES
1246
+ import logging
1247
+
1248
+ flags_dict = flags.get_all()
1249
+ non_default_flags = {}
1250
+
1251
+ for name, value in flags_dict.items():
1252
+ if name in DEFAULT_VALUES:
1253
+ # Only include flags that differ from their known defaults
1254
+ if value != DEFAULT_VALUES[name]:
1255
+ non_default_flags[name] = value
1256
+ else:
1257
+ # Unknown flag (possibly user-defined) - include unconditionally with warning
1258
+ logging.warning(
1259
+ f"Flag '{name}' is not in DEFAULT_VALUES, serializing unconditionally. "
1260
+ f"Consider adding it to dolphin.core.flags.definitions."
1261
+ )
1262
+ non_default_flags[name] = value
1263
+
1264
+ if non_default_flags:
1265
+ result["flags"] = non_default_flags
1231
1266
 
1232
1267
  # Add fast_llm (if different from default_llm)
1233
1268
  if self.fast_llm and self.fast_llm != self.default_llm: