aiecs 1.7.6__py3-none-any.whl → 1.8.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (35) hide show
  1. aiecs/__init__.py +1 -1
  2. aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +5 -1
  3. aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +7 -5
  4. aiecs/config/config.py +3 -0
  5. aiecs/config/tool_config.py +55 -19
  6. aiecs/domain/agent/base_agent.py +79 -0
  7. aiecs/domain/agent/hybrid_agent.py +552 -175
  8. aiecs/domain/agent/knowledge_aware_agent.py +3 -2
  9. aiecs/domain/agent/llm_agent.py +2 -0
  10. aiecs/domain/agent/models.py +10 -0
  11. aiecs/domain/agent/tools/schema_generator.py +17 -4
  12. aiecs/llm/callbacks/custom_callbacks.py +9 -4
  13. aiecs/llm/client_factory.py +20 -7
  14. aiecs/llm/clients/base_client.py +50 -5
  15. aiecs/llm/clients/google_function_calling_mixin.py +46 -88
  16. aiecs/llm/clients/googleai_client.py +183 -9
  17. aiecs/llm/clients/openai_client.py +12 -0
  18. aiecs/llm/clients/openai_compatible_mixin.py +42 -2
  19. aiecs/llm/clients/openrouter_client.py +272 -0
  20. aiecs/llm/clients/vertex_client.py +385 -22
  21. aiecs/llm/clients/xai_client.py +41 -3
  22. aiecs/llm/protocols.py +19 -1
  23. aiecs/llm/utils/image_utils.py +179 -0
  24. aiecs/main.py +2 -2
  25. aiecs/tools/docs/document_creator_tool.py +143 -2
  26. aiecs/tools/docs/document_parser_tool.py +9 -4
  27. aiecs/tools/docs/document_writer_tool.py +179 -0
  28. aiecs/tools/task_tools/image_tool.py +49 -14
  29. aiecs/tools/task_tools/scraper_tool.py +39 -2
  30. {aiecs-1.7.6.dist-info → aiecs-1.8.4.dist-info}/METADATA +4 -2
  31. {aiecs-1.7.6.dist-info → aiecs-1.8.4.dist-info}/RECORD +35 -33
  32. {aiecs-1.7.6.dist-info → aiecs-1.8.4.dist-info}/WHEEL +0 -0
  33. {aiecs-1.7.6.dist-info → aiecs-1.8.4.dist-info}/entry_points.txt +0 -0
  34. {aiecs-1.7.6.dist-info → aiecs-1.8.4.dist-info}/licenses/LICENSE +0 -0
  35. {aiecs-1.7.6.dist-info → aiecs-1.8.4.dist-info}/top_level.txt +0 -0
@@ -43,6 +43,67 @@ class HybridAgent(BaseAIAgent):
43
43
  - BaseLLMClient: Standard LLM clients (OpenAI, xAI, etc.)
44
44
  - Custom clients: Any object implementing LLMClientProtocol (duck typing)
45
45
 
46
+ **ReAct Format Reference (for callers to include in their prompts):**
47
+
48
+ The caller is responsible for ensuring the LLM follows the correct format.
49
+ Below are the standard formats that HybridAgent expects:
50
+
51
+ CORRECT FORMAT EXAMPLE::
52
+
53
+ <THOUGHT>
54
+ I need to search for information about the weather. Let me use the search tool.
55
+ </THOUGHT>
56
+
57
+ TOOL: search
58
+ OPERATION: query
59
+ PARAMETERS: {"q": "weather today"}
60
+
61
+ <OBSERVATION>
62
+ The search tool returned: Today's weather is sunny, 72°F.
63
+ </OBSERVATION>
64
+
65
+ <THOUGHT>
66
+ I have the weather information. Now I can provide the final response.
67
+ </THOUGHT>
68
+
69
+ FINAL RESPONSE: Today's weather is sunny, 72°F. finish
70
+
71
+ INCORRECT FORMAT (DO NOT DO THIS)::
72
+
73
+ <THOUGHT>
74
+ I need to search.
75
+ TOOL: search
76
+ OPERATION: query
77
+ </THOUGHT>
78
+ ❌ Tool calls must be OUTSIDE the <THOUGHT> and <OBSERVATION> tags
79
+
80
+ <THOUGHT>
81
+ I know the answer.
82
+ FINAL RESPONSE: The answer is... finish
83
+ </THOUGHT>
84
+ ❌ Final responses must be OUTSIDE the <THOUGHT> and <OBSERVATION> tags
85
+ ❌ FINAL RESPONSE must end with 'finish' suffix to indicate completion
86
+
87
+ TOOL CALL FORMAT::
88
+
89
+ TOOL: <tool_name>
90
+ OPERATION: <operation_name>
91
+ PARAMETERS: <json_parameters>
92
+
93
+ FINAL RESPONSE FORMAT::
94
+
95
+ FINAL RESPONSE: <your_response> finish
96
+
97
+ **Important Notes for Callers:**
98
+
99
+ - FINAL RESPONSE MUST end with 'finish' to indicate completion
100
+ - If no 'finish' suffix, the system assumes response is incomplete and will continue iteration
101
+ - LLM can output JSON or any text format - it will be passed through unchanged
102
+ - Each iteration will inform LLM of current iteration number and remaining iterations
103
+ - If LLM generation is incomplete, it will be asked to continue from where it left off
104
+ - Callers can customize max_iterations to control loop behavior
105
+ - Callers are responsible for parsing and handling LLM output format
106
+
46
107
  Examples:
47
108
  # Example 1: Basic usage with tool names (backward compatible)
48
109
  agent = HybridAgent(
@@ -181,7 +242,7 @@ class HybridAgent(BaseAIAgent):
181
242
  config: AgentConfiguration,
182
243
  description: Optional[str] = None,
183
244
  version: str = "1.0.0",
184
- max_iterations: int = 10,
245
+ max_iterations: Optional[int] = None,
185
246
  config_manager: Optional["ConfigManagerProtocol"] = None,
186
247
  checkpointer: Optional["CheckpointerProtocol"] = None,
187
248
  context_engine: Optional[Any] = None,
@@ -201,7 +262,7 @@ class HybridAgent(BaseAIAgent):
201
262
  config: Agent configuration
202
263
  description: Optional description
203
264
  version: Agent version
204
- max_iterations: Maximum ReAct iterations
265
+ max_iterations: Maximum ReAct iterations (if None, uses config.max_iterations)
205
266
  config_manager: Optional configuration manager for dynamic config
206
267
  checkpointer: Optional checkpointer for state persistence
207
268
  context_engine: Optional context engine for persistent storage
@@ -255,7 +316,17 @@ class HybridAgent(BaseAIAgent):
255
316
 
256
317
  # Store LLM client reference (from BaseAIAgent or local)
257
318
  self.llm_client = self._llm_client if self._llm_client else llm_client
258
- self._max_iterations = max_iterations
319
+
320
+ # Use config.max_iterations if constructor parameter is None
321
+ # This makes max_iterations consistent with max_tokens (both configurable via config)
322
+ # If max_iterations is explicitly provided, it takes precedence over config
323
+ if max_iterations is None:
324
+ # Use config value (defaults to 10 if not set in config)
325
+ self._max_iterations = config.max_iterations
326
+ else:
327
+ # Constructor parameter explicitly provided, use it
328
+ self._max_iterations = max_iterations
329
+
259
330
  self._system_prompt: Optional[str] = None
260
331
  self._conversation_history: List[LLMMessage] = []
261
332
  self._tool_schemas: List[Dict[str, Any]] = []
@@ -339,17 +410,29 @@ class HybridAgent(BaseAIAgent):
339
410
 
340
411
  # Add ReAct instructions (always required for HybridAgent)
341
412
  parts.append(
342
- "You are a reasoning agent that can use tools to complete tasks. "
343
- "Follow the ReAct pattern:\n"
413
+ "Within the given identity framework, you are also a highly intelligent, responsive, and accurate reasoning agent. that can use tools to complete tasks. "
414
+ "Follow the ReAct (Reasoning + Acting) pattern to achieve best results:\n"
344
415
  "1. THOUGHT: Analyze the task and decide what to do\n"
345
416
  "2. ACTION: Use a tool if needed, or provide final answer\n"
346
417
  "3. OBSERVATION: Review the tool result and continue reasoning\n\n"
347
- "When you need to use a tool, respond with:\n"
348
- "TOOL: <tool_name>\n"
349
- "OPERATION: <operation_name>\n"
350
- "PARAMETERS: <json_parameters>\n\n"
351
- "When you have the final answer, respond with:\n"
352
- "FINAL ANSWER: <your_answer>"
418
+ "RESPONSE FORMAT REQUIREMENTS:\n"
419
+ "- Wrap your thinking process in <THOUGHT>...</THOUGHT> tags\n"
420
+ "- Wrap your insight about tool result in <OBSERVATION>...</OBSERVATION> tags\n"
421
+ "- Tool calls (TOOL:, OPERATION:, PARAMETERS:) MUST be OUTSIDE <THOUGHT> and <OBSERVATION> tags\n"
422
+ "- Final responses (FINAL RESPONSE:) MUST be OUTSIDE <THOUGHT> and <OBSERVATION> tags\n\n"
423
+ "THINKING GUIDANCE:\n"
424
+ "When writing <THOUGHT> sections, consider:\n"
425
+ "- What is the core thing to do?\n"
426
+ "- What information do I already have?\n"
427
+ "- What information do I need to gather?\n"
428
+ "- Which tools would be most helpful?\n"
429
+ "- What action should I take?\n\n"
430
+ "OBSERVATION GUIDANCE:\n"
431
+ "When writing <OBSERVATION> sections, consider:\n"
432
+ "- What did I learn from the tool results?\n"
433
+ "- How does this information inform my next work?\n"
434
+ "- Do I need additional information?\n"
435
+ "- Am I ready to provide a final response?"
353
436
  )
354
437
 
355
438
  # Add available tools (always required for HybridAgent)
@@ -383,6 +466,24 @@ class HybridAgent(BaseAIAgent):
383
466
  agent_id=self.agent_id,
384
467
  )
385
468
 
469
+ # Extract images from task dict and merge into context
470
+ task_images = task.get("images")
471
+ if task_images:
472
+ # Merge images from task into context
473
+ # If context already has images, combine them
474
+ if "images" in context:
475
+ existing_images = context["images"]
476
+ if isinstance(existing_images, list) and isinstance(task_images, list):
477
+ context["images"] = existing_images + task_images
478
+ elif isinstance(existing_images, list):
479
+ context["images"] = existing_images + [task_images]
480
+ elif isinstance(task_images, list):
481
+ context["images"] = [existing_images] + task_images
482
+ else:
483
+ context["images"] = [existing_images, task_images]
484
+ else:
485
+ context["images"] = task_images
486
+
386
487
  # Transition to busy state
387
488
  self._transition_state(self.state.__class__.BUSY)
388
489
  self._current_task_id = task.get("task_id")
@@ -408,7 +509,7 @@ class HybridAgent(BaseAIAgent):
408
509
 
409
510
  return {
410
511
  "success": True,
411
- "output": result.get("final_answer"),
512
+ "output": result.get("final_response"), # Changed from final_answer
412
513
  "reasoning_steps": result.get("steps"),
413
514
  "tool_calls_count": result.get("tool_calls_count"),
414
515
  "iterations": result.get("iterations"),
@@ -499,6 +600,24 @@ class HybridAgent(BaseAIAgent):
499
600
  }
500
601
  return
501
602
 
603
+ # Extract images from task dict and merge into context
604
+ task_images = task.get("images")
605
+ if task_images:
606
+ # Merge images from task into context
607
+ # If context already has images, combine them
608
+ if "images" in context:
609
+ existing_images = context["images"]
610
+ if isinstance(existing_images, list) and isinstance(task_images, list):
611
+ context["images"] = existing_images + task_images
612
+ elif isinstance(existing_images, list):
613
+ context["images"] = existing_images + [task_images]
614
+ elif isinstance(task_images, list):
615
+ context["images"] = [existing_images] + task_images
616
+ else:
617
+ context["images"] = [existing_images, task_images]
618
+ else:
619
+ context["images"] = task_images
620
+
502
621
  # Transition to busy state
503
622
  self._transition_state(self.state.__class__.BUSY)
504
623
  self._current_task_id = task.get("task_id")
@@ -605,11 +724,23 @@ class HybridAgent(BaseAIAgent):
605
724
  for iteration in range(self._max_iterations):
606
725
  logger.debug(f"HybridAgent {self.agent_id} - ReAct iteration {iteration + 1}")
607
726
 
727
+ # Add iteration info to messages (except first iteration which has task context)
728
+ if iteration > 0:
729
+ iteration_info = (
730
+ f"[Iteration {iteration + 1}/{self._max_iterations}, "
731
+ f"remaining: {self._max_iterations - iteration - 1}]"
732
+ )
733
+ # Only add if the last message is not already an iteration info
734
+ if messages and not messages[-1].content.startswith("[Iteration"):
735
+ messages.append(LLMMessage(role="user", content=iteration_info))
736
+
608
737
  # Yield iteration status
609
738
  yield {
610
739
  "type": "status",
611
740
  "status": "thinking",
612
741
  "iteration": iteration + 1,
742
+ "max_iterations": self._max_iterations,
743
+ "remaining": self._max_iterations - iteration - 1,
613
744
  "timestamp": datetime.utcnow().isoformat(),
614
745
  }
615
746
 
@@ -627,6 +758,7 @@ class HybridAgent(BaseAIAgent):
627
758
  model=self._config.llm_model,
628
759
  temperature=self._config.temperature,
629
760
  max_tokens=self._config.max_tokens,
761
+ context=context,
630
762
  tools=tools,
631
763
  tool_choice="auto",
632
764
  return_chunks=True, # Enable tool_calls accumulation
@@ -638,6 +770,7 @@ class HybridAgent(BaseAIAgent):
638
770
  model=self._config.llm_model,
639
771
  temperature=self._config.temperature,
640
772
  max_tokens=self._config.max_tokens,
773
+ context=context,
641
774
  )
642
775
 
643
776
  # Stream tokens and collect tool calls
@@ -677,7 +810,16 @@ class HybridAgent(BaseAIAgent):
677
810
  "timestamp": datetime.utcnow().isoformat(),
678
811
  }
679
812
 
680
- thought = "".join(thought_tokens)
813
+ thought_raw = "".join(thought_tokens)
814
+
815
+ # Store raw output in steps (no format processing)
816
+ steps.append(
817
+ {
818
+ "type": "thought",
819
+ "content": thought_raw.strip(), # Return raw output without processing
820
+ "iteration": iteration + 1,
821
+ }
822
+ )
681
823
 
682
824
  # Process tool_calls if received from stream
683
825
  if tool_calls_from_stream:
@@ -688,19 +830,30 @@ class HybridAgent(BaseAIAgent):
688
830
  func_args = tool_call["function"]["arguments"]
689
831
 
690
832
  # Parse function name to extract tool and operation
691
- parts = func_name.split("_", 1)
692
- if len(parts) == 2:
693
- tool_name, operation = parts
694
- else:
695
- tool_name = parts[0]
833
+ # CRITICAL: Try exact match first, then fall back to underscore parsing
834
+ if self._tool_instances and func_name in self._tool_instances:
835
+ # Exact match found - use full function name as tool name
836
+ tool_name = func_name
696
837
  operation = None
838
+ elif self._available_tools and func_name in self._available_tools:
839
+ # Exact match in available tools list
840
+ tool_name = func_name
841
+ operation = None
842
+ else:
843
+ # Fallback: try underscore parsing for legacy compatibility
844
+ parts = func_name.split("_", 1)
845
+ if len(parts) == 2:
846
+ tool_name, operation = parts
847
+ else:
848
+ tool_name = parts[0]
849
+ operation = None
697
850
 
698
851
  # Parse arguments JSON
699
852
  import json
700
853
  if isinstance(func_args, str):
701
854
  parameters = json.loads(func_args)
702
855
  else:
703
- parameters = func_args
856
+ parameters = func_args if func_args else {}
704
857
 
705
858
  # Yield tool call event
706
859
  yield {
@@ -715,17 +868,19 @@ class HybridAgent(BaseAIAgent):
715
868
  tool_result = await self._execute_tool(tool_name, operation, parameters)
716
869
  tool_calls_count += 1
717
870
 
871
+ # Wrap tool call and result in step
718
872
  steps.append(
719
873
  {
720
874
  "type": "action",
721
875
  "tool": tool_name,
722
876
  "operation": operation,
723
877
  "parameters": parameters,
878
+ "result": str(tool_result), # Include result in step
724
879
  "iteration": iteration + 1,
725
880
  }
726
881
  )
727
882
 
728
- # Yield tool result event
883
+ # Yield tool result event (streaming)
729
884
  yield {
730
885
  "type": "tool_result",
731
886
  "tool_name": tool_name,
@@ -733,15 +888,9 @@ class HybridAgent(BaseAIAgent):
733
888
  "timestamp": datetime.utcnow().isoformat(),
734
889
  }
735
890
 
736
- # Add tool result to messages
737
- observation = f"Tool '{tool_name}' returned: {tool_result}"
738
- steps.append(
739
- {
740
- "type": "observation",
741
- "content": observation,
742
- "iteration": iteration + 1,
743
- }
744
- )
891
+ # Add tool result to messages (for LLM consumption)
892
+ observation_content = f"Tool '{tool_name}' returned: {tool_result}"
893
+ observation = f"<OBSERVATION>\n{observation_content}\n</OBSERVATION>"
745
894
 
746
895
  # Add assistant message with tool call and tool result
747
896
  messages.append(
@@ -760,13 +909,14 @@ class HybridAgent(BaseAIAgent):
760
909
  )
761
910
 
762
911
  except Exception as e:
763
- error_msg = f"Tool execution failed: {str(e)}"
912
+ error_content = f"Tool execution failed: {str(e)}"
913
+ error_msg = f"<OBSERVATION>\n{error_content}\n</OBSERVATION>"
764
914
  steps.append(
765
915
  {
766
916
  "type": "observation",
767
917
  "content": error_msg,
768
918
  "iteration": iteration + 1,
769
- "error": True,
919
+ "has_error": True,
770
920
  }
771
921
  )
772
922
  yield {
@@ -786,21 +936,13 @@ class HybridAgent(BaseAIAgent):
786
936
  # Continue to next iteration
787
937
  continue
788
938
 
789
- steps.append(
790
- {
791
- "type": "thought",
792
- "content": thought,
793
- "iteration": iteration + 1,
794
- }
795
- )
796
-
797
- # Check if final answer
798
- if "FINAL ANSWER:" in thought:
799
- final_answer = self._extract_final_answer(thought)
939
+ # Check for final response (outside tags only)
940
+ if self._has_final_response(thought_raw):
941
+ final_response = self._extract_final_response(thought_raw)
800
942
  yield {
801
943
  "type": "result",
802
944
  "success": True,
803
- "output": final_answer,
945
+ "output": final_response, # Return raw output without processing
804
946
  "reasoning_steps": steps,
805
947
  "tool_calls_count": tool_calls_count,
806
948
  "iterations": iteration + 1,
@@ -809,11 +951,11 @@ class HybridAgent(BaseAIAgent):
809
951
  }
810
952
  return
811
953
 
812
- # Check if tool call
813
- if "TOOL:" in thought:
954
+ # Check if tool call (ReAct mode, outside tags only)
955
+ if self._has_tool_call(thought_raw):
814
956
  # ACT: Execute tool
815
957
  try:
816
- tool_info = self._parse_tool_call(thought)
958
+ tool_info = self._parse_tool_call(thought_raw) # Parse from raw text
817
959
  tool_name = tool_info.get("tool", "")
818
960
  if not tool_name:
819
961
  raise ValueError("Tool name not found in tool call")
@@ -834,27 +976,19 @@ class HybridAgent(BaseAIAgent):
834
976
  )
835
977
  tool_calls_count += 1
836
978
 
979
+ # Wrap tool call and result in step
837
980
  steps.append(
838
981
  {
839
982
  "type": "action",
840
983
  "tool": tool_info["tool"],
841
984
  "operation": tool_info.get("operation"),
842
985
  "parameters": tool_info.get("parameters"),
986
+ "result": str(tool_result), # Include result in step
843
987
  "iteration": iteration + 1,
844
988
  }
845
989
  )
846
990
 
847
- # OBSERVE: Add tool result to conversation
848
- observation = f"OBSERVATION: Tool '{tool_info['tool']}' returned: {tool_result}"
849
- steps.append(
850
- {
851
- "type": "observation",
852
- "content": observation,
853
- "iteration": iteration + 1,
854
- }
855
- )
856
-
857
- # Yield tool result event
991
+ # Yield tool result event (streaming)
858
992
  yield {
859
993
  "type": "tool_result",
860
994
  "tool_name": tool_name,
@@ -862,16 +996,22 @@ class HybridAgent(BaseAIAgent):
862
996
  "timestamp": datetime.utcnow().isoformat(),
863
997
  }
864
998
 
999
+ # OBSERVE: Add tool result to conversation (for LLM consumption)
1000
+ observation_content = f"Tool '{tool_info['tool']}' returned: {tool_result}"
1001
+ observation = f"<OBSERVATION>\n{observation_content}\n</OBSERVATION>"
1002
+
865
1003
  # Add to messages for next iteration
866
- messages.append(LLMMessage(role="assistant", content=thought))
1004
+ messages.append(LLMMessage(role="assistant", content=thought_raw))
867
1005
  messages.append(LLMMessage(role="user", content=observation))
868
1006
 
869
1007
  except Exception as e:
870
- error_msg = f"OBSERVATION: Tool execution failed: {str(e)}"
1008
+ error_content = f"Tool execution failed: {str(e)}"
1009
+ error_msg = f"<OBSERVATION>\n{error_content}\n</OBSERVATION>"
871
1010
  steps.append(
872
1011
  {
873
- "type": "observation",
874
- "content": error_msg,
1012
+ "type": "action",
1013
+ "tool": tool_name if "tool_name" in locals() else "unknown",
1014
+ "error": str(e),
875
1015
  "iteration": iteration + 1,
876
1016
  "error": True,
877
1017
  }
@@ -885,22 +1025,37 @@ class HybridAgent(BaseAIAgent):
885
1025
  "timestamp": datetime.utcnow().isoformat(),
886
1026
  }
887
1027
 
888
- messages.append(LLMMessage(role="assistant", content=thought))
1028
+ messages.append(LLMMessage(role="assistant", content=thought_raw))
889
1029
  messages.append(LLMMessage(role="user", content=error_msg))
890
1030
 
891
1031
  else:
892
- # LLM didn't provide clear action - treat as final answer
893
- yield {
894
- "type": "result",
895
- "success": True,
896
- "output": thought,
897
- "reasoning_steps": steps,
898
- "tool_calls_count": tool_calls_count,
899
- "iterations": iteration + 1,
900
- "total_tokens": total_tokens,
901
- "timestamp": datetime.utcnow().isoformat(),
902
- }
903
- return
1032
+ # Check if there's an incomplete final response (has FINAL RESPONSE but no finish)
1033
+ if self._has_incomplete_final_response(thought_raw):
1034
+ # Incomplete final response - ask LLM to continue
1035
+ continue_message = (
1036
+ f"[Iteration {iteration + 1}/{self._max_iterations}, "
1037
+ f"remaining: {self._max_iterations - iteration - 1}]\n"
1038
+ "Your FINAL RESPONSE appears incomplete (missing 'finish' suffix). "
1039
+ "Please continue your response from where you left off and end with 'finish' "
1040
+ "to indicate completion. If no 'finish' suffix, the system will continue iteration."
1041
+ )
1042
+ messages.append(LLMMessage(role="assistant", content=thought_raw))
1043
+ messages.append(LLMMessage(role="user", content=continue_message))
1044
+ else:
1045
+ # No tool call or final response detected - ask LLM to continue
1046
+ continue_message = (
1047
+ f"[Iteration {iteration + 1}/{self._max_iterations}, "
1048
+ f"remaining: {self._max_iterations - iteration - 1}]\n"
1049
+ "Continuing from your previous output. "
1050
+ "If your generation is incomplete, please continue from where you left off. "
1051
+ "If you decide to take action, ensure proper format:\n"
1052
+ "- Tool call: TOOL:, OPERATION:, PARAMETERS: (outside tags)\n"
1053
+ "- Final response: FINAL RESPONSE: <content> finish (outside tags)"
1054
+ )
1055
+ messages.append(LLMMessage(role="assistant", content=thought_raw))
1056
+ messages.append(LLMMessage(role="user", content=continue_message))
1057
+ # Continue to next iteration
1058
+ continue
904
1059
 
905
1060
  # Max iterations reached
906
1061
  logger.warning(f"HybridAgent {self.agent_id} reached max iterations")
@@ -937,6 +1092,16 @@ class HybridAgent(BaseAIAgent):
937
1092
  for iteration in range(self._max_iterations):
938
1093
  logger.debug(f"HybridAgent {self.agent_id} - ReAct iteration {iteration + 1}")
939
1094
 
1095
+ # Add iteration info to messages (except first iteration which has task context)
1096
+ if iteration > 0:
1097
+ iteration_info = (
1098
+ f"[Iteration {iteration + 1}/{self._max_iterations}, "
1099
+ f"remaining: {self._max_iterations - iteration - 1}]"
1100
+ )
1101
+ # Only add if the last message is not already an iteration info
1102
+ if messages and not messages[-1].content.startswith("[Iteration"):
1103
+ messages.append(LLMMessage(role="user", content=iteration_info))
1104
+
940
1105
  # THINK: LLM reasons about next action
941
1106
  # Use Function Calling if supported, otherwise use ReAct mode
942
1107
  if self._use_function_calling and self._tool_schemas:
@@ -947,6 +1112,7 @@ class HybridAgent(BaseAIAgent):
947
1112
  model=self._config.llm_model,
948
1113
  temperature=self._config.temperature,
949
1114
  max_tokens=self._config.max_tokens,
1115
+ context=context,
950
1116
  tools=tools,
951
1117
  tool_choice="auto",
952
1118
  )
@@ -957,11 +1123,32 @@ class HybridAgent(BaseAIAgent):
957
1123
  model=self._config.llm_model,
958
1124
  temperature=self._config.temperature,
959
1125
  max_tokens=self._config.max_tokens,
1126
+ context=context,
960
1127
  )
961
1128
 
962
- thought = response.content or ""
1129
+ thought_raw = response.content or ""
963
1130
  total_tokens += getattr(response, "total_tokens", 0)
964
1131
 
1132
+ # Update prompt cache metrics from LLM response
1133
+ cache_read_tokens = getattr(response, "cache_read_tokens", None)
1134
+ cache_creation_tokens = getattr(response, "cache_creation_tokens", None)
1135
+ cache_hit = getattr(response, "cache_hit", None)
1136
+ if cache_read_tokens is not None or cache_creation_tokens is not None or cache_hit is not None:
1137
+ self.update_cache_metrics(
1138
+ cache_read_tokens=cache_read_tokens,
1139
+ cache_creation_tokens=cache_creation_tokens,
1140
+ cache_hit=cache_hit,
1141
+ )
1142
+
1143
+ # Store raw output in steps (no format processing)
1144
+ steps.append(
1145
+ {
1146
+ "type": "thought",
1147
+ "content": thought_raw.strip(), # Return raw output without processing
1148
+ "iteration": iteration + 1,
1149
+ }
1150
+ )
1151
+
965
1152
  # Check for Function Calling response
966
1153
  tool_calls = getattr(response, "tool_calls", None)
967
1154
  function_call = getattr(response, "function_call", None)
@@ -989,52 +1176,50 @@ class HybridAgent(BaseAIAgent):
989
1176
  func_args = tool_call["function"]["arguments"]
990
1177
 
991
1178
  # Parse function name to extract tool and operation
992
- # Format: tool_name_operation or tool_name
993
- parts = func_name.split("_", 1)
994
- if len(parts) == 2:
995
- tool_name, operation = parts
996
- else:
997
- tool_name = parts[0]
1179
+ # CRITICAL: Try exact match first, then fall back to underscore parsing
1180
+ if self._tool_instances and func_name in self._tool_instances:
1181
+ # Exact match found - use full function name as tool name
1182
+ tool_name = func_name
998
1183
  operation = None
1184
+ elif self._available_tools and func_name in self._available_tools:
1185
+ # Exact match in available tools list
1186
+ tool_name = func_name
1187
+ operation = None
1188
+ else:
1189
+ # Fallback: try underscore parsing for legacy compatibility
1190
+ parts = func_name.split("_", 1)
1191
+ if len(parts) == 2:
1192
+ tool_name, operation = parts
1193
+ else:
1194
+ tool_name = parts[0]
1195
+ operation = None
999
1196
 
1000
1197
  # Parse arguments JSON
1001
1198
  import json
1002
1199
  if isinstance(func_args, str):
1003
1200
  parameters = json.loads(func_args)
1004
1201
  else:
1005
- parameters = func_args
1006
-
1007
- steps.append(
1008
- {
1009
- "type": "thought",
1010
- "content": f"Calling tool {func_name}",
1011
- "iteration": iteration + 1,
1012
- }
1013
- )
1202
+ parameters = func_args if func_args else {}
1014
1203
 
1015
1204
  # Execute tool
1016
1205
  tool_result = await self._execute_tool(tool_name, operation, parameters)
1017
1206
  tool_calls_count += 1
1018
1207
 
1208
+ # Wrap tool call and result in step
1019
1209
  steps.append(
1020
1210
  {
1021
1211
  "type": "action",
1022
1212
  "tool": tool_name,
1023
1213
  "operation": operation,
1024
1214
  "parameters": parameters,
1215
+ "result": str(tool_result), # Include result in step
1025
1216
  "iteration": iteration + 1,
1026
1217
  }
1027
1218
  )
1028
1219
 
1029
- # Add tool result to messages
1030
- observation = f"Tool '{tool_name}' returned: {tool_result}"
1031
- steps.append(
1032
- {
1033
- "type": "observation",
1034
- "content": observation,
1035
- "iteration": iteration + 1,
1036
- }
1037
- )
1220
+ # Add tool result to messages (for LLM consumption)
1221
+ observation_content = f"Tool '{tool_name}' returned: {tool_result}"
1222
+ observation = f"<OBSERVATION>\n{observation_content}\n</OBSERVATION>"
1038
1223
 
1039
1224
  # Add assistant message with tool call and tool result
1040
1225
  messages.append(
@@ -1053,13 +1238,14 @@ class HybridAgent(BaseAIAgent):
1053
1238
  )
1054
1239
 
1055
1240
  except Exception as e:
1056
- error_msg = f"Tool execution failed: {str(e)}"
1241
+ error_content = f"Tool execution failed: {str(e)}"
1242
+ error_msg = f"<OBSERVATION>\n{error_content}\n</OBSERVATION>"
1057
1243
  steps.append(
1058
1244
  {
1059
1245
  "type": "observation",
1060
1246
  "content": error_msg,
1061
1247
  "iteration": iteration + 1,
1062
- "error": True,
1248
+ "has_error": True,
1063
1249
  }
1064
1250
  )
1065
1251
  # Add error to messages
@@ -1074,41 +1260,22 @@ class HybridAgent(BaseAIAgent):
1074
1260
  # Continue to next iteration
1075
1261
  continue
1076
1262
 
1077
- # If using Function Calling and no tool calls, check if we have a final answer
1078
- if self._use_function_calling and thought:
1079
- # LLM provided a text response without tool calls - treat as final answer
1263
+ # Check for final response (outside tags only)
1264
+ if self._has_final_response(thought_raw):
1265
+ final_response = self._extract_final_response(thought_raw)
1080
1266
  return {
1081
- "final_answer": thought,
1267
+ "final_response": final_response, # Return raw output without processing
1082
1268
  "steps": steps,
1083
1269
  "iterations": iteration + 1,
1084
1270
  "tool_calls_count": tool_calls_count,
1085
1271
  "total_tokens": total_tokens,
1086
1272
  }
1087
1273
 
1088
- steps.append(
1089
- {
1090
- "type": "thought",
1091
- "content": thought,
1092
- "iteration": iteration + 1,
1093
- }
1094
- )
1095
-
1096
- # Check if final answer (ReAct mode)
1097
- if "FINAL ANSWER:" in thought:
1098
- final_answer = self._extract_final_answer(thought)
1099
- return {
1100
- "final_answer": final_answer,
1101
- "steps": steps,
1102
- "iterations": iteration + 1,
1103
- "tool_calls_count": tool_calls_count,
1104
- "total_tokens": total_tokens,
1105
- }
1106
-
1107
- # Check if tool call (ReAct mode)
1108
- if "TOOL:" in thought:
1274
+ # Check if tool call (ReAct mode, outside tags only)
1275
+ if self._has_tool_call(thought_raw):
1109
1276
  # ACT: Execute tool
1110
1277
  try:
1111
- tool_info = self._parse_tool_call(thought)
1278
+ tool_info = self._parse_tool_call(thought_raw) # Parse from raw text
1112
1279
  tool_name = tool_info.get("tool", "")
1113
1280
  if not tool_name:
1114
1281
  raise ValueError("Tool name not found in tool call")
@@ -1119,57 +1286,74 @@ class HybridAgent(BaseAIAgent):
1119
1286
  )
1120
1287
  tool_calls_count += 1
1121
1288
 
1289
+ # Wrap tool call and result in step
1122
1290
  steps.append(
1123
1291
  {
1124
1292
  "type": "action",
1125
1293
  "tool": tool_info["tool"],
1126
1294
  "operation": tool_info.get("operation"),
1127
1295
  "parameters": tool_info.get("parameters"),
1296
+ "result": str(tool_result), # Include result in step
1128
1297
  "iteration": iteration + 1,
1129
1298
  }
1130
1299
  )
1131
1300
 
1132
- # OBSERVE: Add tool result to conversation
1133
- observation = f"OBSERVATION: Tool '{tool_info['tool']}' returned: {tool_result}"
1134
- steps.append(
1135
- {
1136
- "type": "observation",
1137
- "content": observation,
1138
- "iteration": iteration + 1,
1139
- }
1140
- )
1301
+ # OBSERVE: Add tool result to conversation (for LLM consumption)
1302
+ observation_content = f"Tool '{tool_info['tool']}' returned: {tool_result}"
1303
+ observation = f"<OBSERVATION>\n{observation_content}\n</OBSERVATION>"
1141
1304
 
1142
1305
  # Add to messages for next iteration
1143
- messages.append(LLMMessage(role="assistant", content=thought))
1306
+ messages.append(LLMMessage(role="assistant", content=thought_raw))
1144
1307
  messages.append(LLMMessage(role="user", content=observation))
1145
1308
 
1146
1309
  except Exception as e:
1147
- error_msg = f"OBSERVATION: Tool execution failed: {str(e)}"
1310
+ error_content = f"Tool execution failed: {str(e)}"
1311
+ error_msg = f"<OBSERVATION>\n{error_content}\n</OBSERVATION>"
1148
1312
  steps.append(
1149
1313
  {
1150
- "type": "observation",
1151
- "content": error_msg,
1314
+ "type": "action",
1315
+ "tool": tool_name if "tool_name" in locals() else "unknown",
1316
+ "error": str(e),
1152
1317
  "iteration": iteration + 1,
1153
- "error": True,
1318
+ "has_error": True,
1154
1319
  }
1155
1320
  )
1156
- messages.append(LLMMessage(role="assistant", content=thought))
1321
+ messages.append(LLMMessage(role="assistant", content=thought_raw))
1157
1322
  messages.append(LLMMessage(role="user", content=error_msg))
1158
1323
 
1159
1324
  else:
1160
- # LLM didn't provide clear action - treat as final answer
1161
- return {
1162
- "final_answer": thought,
1163
- "steps": steps,
1164
- "iterations": iteration + 1,
1165
- "tool_calls_count": tool_calls_count,
1166
- "total_tokens": total_tokens,
1167
- }
1325
+ # Check if there's an incomplete final response (has FINAL RESPONSE but no finish)
1326
+ if self._has_incomplete_final_response(thought_raw):
1327
+ # Incomplete final response - ask LLM to continue
1328
+ continue_message = (
1329
+ f"[Iteration {iteration + 1}/{self._max_iterations}, "
1330
+ f"remaining: {self._max_iterations - iteration - 1}]\n"
1331
+ "Your FINAL RESPONSE appears incomplete (missing 'finish' suffix). "
1332
+ "Please continue your response from where you left off and end with 'finish' "
1333
+ "to indicate completion. If no 'finish' suffix, the system will continue iteration."
1334
+ )
1335
+ messages.append(LLMMessage(role="assistant", content=thought_raw))
1336
+ messages.append(LLMMessage(role="user", content=continue_message))
1337
+ else:
1338
+ # No tool call or final response detected - ask LLM to continue
1339
+ continue_message = (
1340
+ f"[Iteration {iteration + 1}/{self._max_iterations}, "
1341
+ f"remaining: {self._max_iterations - iteration - 1}]\n"
1342
+ "Continuing from your previous output. "
1343
+ "If your generation is incomplete, please continue from where you left off. "
1344
+ "If you decide to take action, ensure proper format:\n"
1345
+ "- Tool call: TOOL:, OPERATION:, PARAMETERS: (outside tags)\n"
1346
+ "- Final response: FINAL RESPONSE: <content> finish (outside tags)"
1347
+ )
1348
+ messages.append(LLMMessage(role="assistant", content=thought_raw))
1349
+ messages.append(LLMMessage(role="user", content=continue_message))
1350
+ # Continue to next iteration
1351
+ continue
1168
1352
 
1169
1353
  # Max iterations reached
1170
1354
  logger.warning(f"HybridAgent {self.agent_id} reached max iterations")
1171
1355
  return {
1172
- "final_answer": "Max iterations reached. Unable to complete task fully.",
1356
+ "final_response": "Max iterations reached. Unable to complete task fully.",
1173
1357
  "steps": steps,
1174
1358
  "iterations": self._max_iterations,
1175
1359
  "tool_calls_count": tool_calls_count,
@@ -1196,19 +1380,75 @@ class HybridAgent(BaseAIAgent):
1196
1380
  )
1197
1381
  )
1198
1382
 
1383
+ # Collect images from context to attach to task message
1384
+ task_images = []
1385
+
1199
1386
  # Add context if provided
1200
1387
  if context:
1201
- context_str = self._format_context(context)
1202
- if context_str:
1203
- messages.append(
1204
- LLMMessage(
1205
- role="system",
1206
- content=f"Additional Context:\n{context_str}",
1388
+ # Special handling: if context contains 'history' as a list of messages,
1389
+ # add them as separate user/assistant messages instead of formatting
1390
+ history = context.get("history")
1391
+ if isinstance(history, list) and len(history) > 0:
1392
+ # Check if history contains message-like dictionaries
1393
+ for msg in history:
1394
+ if isinstance(msg, dict) and "role" in msg and "content" in msg:
1395
+ # Valid message format - add as separate message
1396
+ # Extract images if present
1397
+ msg_images = msg.get("images", [])
1398
+ if msg_images:
1399
+ messages.append(
1400
+ LLMMessage(
1401
+ role=msg["role"],
1402
+ content=msg["content"],
1403
+ images=msg_images if isinstance(msg_images, list) else [msg_images],
1404
+ )
1405
+ )
1406
+ else:
1407
+ messages.append(
1408
+ LLMMessage(
1409
+ role=msg["role"],
1410
+ content=msg["content"],
1411
+ )
1412
+ )
1413
+ elif isinstance(msg, LLMMessage):
1414
+ # Already an LLMMessage instance (may already have images)
1415
+ messages.append(msg)
1416
+
1417
+ # Extract images from context if present
1418
+ context_images = context.get("images")
1419
+ if context_images:
1420
+ if isinstance(context_images, list):
1421
+ task_images.extend(context_images)
1422
+ else:
1423
+ task_images.append(context_images)
1424
+
1425
+ # Format remaining context fields (excluding history and images) as Additional Context
1426
+ context_without_history = {
1427
+ k: v for k, v in context.items()
1428
+ if k not in ("history", "images")
1429
+ }
1430
+ if context_without_history:
1431
+ context_str = self._format_context(context_without_history)
1432
+ if context_str:
1433
+ messages.append(
1434
+ LLMMessage(
1435
+ role="user",
1436
+ content=f"Additional Context:\n{context_str}",
1437
+ )
1207
1438
  )
1208
- )
1209
1439
 
1210
- # Add task
1211
- messages.append(LLMMessage(role="user", content=f"Task: {task}"))
1440
+ # Add task with iteration info
1441
+ task_message = (
1442
+ f"Task: {task}\n\n"
1443
+ f"[Iteration 1/{self._max_iterations}, remaining: {self._max_iterations - 1}]"
1444
+ )
1445
+ messages.append(
1446
+ LLMMessage(
1447
+ role="user",
1448
+ content=task_message,
1449
+ images=task_images if task_images else [],
1450
+ )
1451
+ )
1212
1452
 
1213
1453
  return messages
1214
1454
 
@@ -1220,15 +1460,147 @@ class HybridAgent(BaseAIAgent):
1220
1460
  relevant_fields.append(f"{key}: {value}")
1221
1461
  return "\n".join(relevant_fields) if relevant_fields else ""
1222
1462
 
1223
- def _extract_final_answer(self, thought: str) -> str:
1224
- """Extract final answer from thought."""
1225
- if "FINAL ANSWER:" in thought:
1226
- return thought.split("FINAL ANSWER:", 1)[1].strip()
1227
- return thought
1463
+ def _extract_thought_content(self, text: str) -> str:
1464
+ """
1465
+ Extract content from <THOUGHT>...</THOUGHT> tags.
1466
+
1467
+ DEPRECATED: This method is kept for backward compatibility but no longer
1468
+ extracts content. Returns original text as-is per new design.
1469
+
1470
+ Args:
1471
+ text: Text that may contain THOUGHT tags
1472
+
1473
+ Returns:
1474
+ Original text (no extraction performed)
1475
+ """
1476
+ # Return original text without processing (new design)
1477
+ return text.strip()
1478
+
1479
+ def _extract_observation_content(self, text: str) -> str:
1480
+ """
1481
+ Extract content from <OBSERVATION>...</OBSERVATION> tags.
1482
+
1483
+ DEPRECATED: This method is kept for backward compatibility but no longer
1484
+ extracts content. Returns original text as-is per new design.
1485
+
1486
+ Args:
1487
+ text: Text that may contain OBSERVATION tags
1488
+
1489
+ Returns:
1490
+ Original text (no extraction performed)
1491
+ """
1492
+ # Return original text without processing (new design)
1493
+ return text.strip()
1494
+
1495
+ def _has_final_response(self, text: str) -> bool:
1496
+ """
1497
+ Check if text contains complete FINAL RESPONSE with 'finish' suffix.
1498
+
1499
+ The FINAL RESPONSE must end with 'finish' to be considered complete.
1500
+ If FINAL RESPONSE is present but without 'finish', it's considered incomplete
1501
+ and the loop will continue to let LLM complete the response.
1502
+
1503
+ Args:
1504
+ text: Text to check
1505
+
1506
+ Returns:
1507
+ True if complete FINAL RESPONSE (with finish suffix) found outside tags
1508
+ """
1509
+ import re
1510
+
1511
+ # Remove content inside THOUGHT and OBSERVATION tags
1512
+ text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
1513
+ text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
1514
+
1515
+ # Check for FINAL RESPONSE marker with 'finish' suffix in remaining text
1516
+ # The 'finish' must appear after FINAL RESPONSE: content
1517
+ if "FINAL RESPONSE:" not in text_without_tags:
1518
+ return False
1519
+
1520
+ # Check if 'finish' appears after FINAL RESPONSE:
1521
+ # Use case-insensitive search for 'finish' at the end
1522
+ text_lower = text_without_tags.lower()
1523
+ final_response_idx = text_lower.find("final response:")
1524
+ if final_response_idx == -1:
1525
+ return False
1526
+
1527
+ # Check if 'finish' appears after the FINAL RESPONSE marker
1528
+ remaining_text = text_without_tags[final_response_idx:]
1529
+ return "finish" in remaining_text.lower()
1530
+
1531
+ def _has_incomplete_final_response(self, text: str) -> bool:
1532
+ """
1533
+ Check if text contains FINAL RESPONSE marker but without 'finish' suffix.
1534
+
1535
+ Args:
1536
+ text: Text to check
1537
+
1538
+ Returns:
1539
+ True if FINAL RESPONSE marker found but without finish suffix
1540
+ """
1541
+ import re
1542
+
1543
+ # Remove content inside THOUGHT and OBSERVATION tags
1544
+ text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
1545
+ text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
1546
+
1547
+ # Check for FINAL RESPONSE marker without 'finish' suffix
1548
+ if "FINAL RESPONSE:" not in text_without_tags:
1549
+ return False
1550
+
1551
+ # Check if 'finish' is missing
1552
+ text_lower = text_without_tags.lower()
1553
+ final_response_idx = text_lower.find("final response:")
1554
+ remaining_text = text_without_tags[final_response_idx:]
1555
+ return "finish" not in remaining_text.lower()
1556
+
1557
+ def _extract_final_response(self, text: str) -> str:
1558
+ """
1559
+ Extract final response from text, preserving original format.
1560
+ Only extracts from outside THOUGHT/OBSERVATION tags.
1561
+
1562
+ Args:
1563
+ text: Text that may contain FINAL RESPONSE marker
1564
+
1565
+ Returns:
1566
+ Original text if FINAL RESPONSE found, otherwise empty string
1567
+ """
1568
+ import re
1569
+
1570
+ # Remove content inside THOUGHT and OBSERVATION tags
1571
+ text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
1572
+ text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
1573
+
1574
+ # Check for FINAL RESPONSE marker
1575
+ if "FINAL RESPONSE:" in text_without_tags:
1576
+ # Return original text without any processing
1577
+ return text.strip()
1578
+
1579
+ return ""
1228
1580
 
1229
- def _parse_tool_call(self, thought: str) -> Dict[str, Any]:
1581
+ def _has_tool_call(self, text: str) -> bool:
1582
+ """
1583
+ Check if text contains TOOL call marker outside of THOUGHT/OBSERVATION tags.
1584
+
1585
+ Args:
1586
+ text: Text to check
1587
+
1588
+ Returns:
1589
+ True if TOOL marker found outside tags
1590
+ """
1591
+ import re
1592
+
1593
+ # Remove content inside THOUGHT and OBSERVATION tags
1594
+ text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
1595
+ text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
1596
+
1597
+ # Check for TOOL marker in remaining text
1598
+ return "TOOL:" in text_without_tags
1599
+
1600
+ def _parse_tool_call(self, text: str) -> Dict[str, Any]:
1230
1601
  """
1231
- Parse tool call from LLM thought.
1602
+ Parse tool call from LLM output.
1603
+ Only parses from outside THOUGHT/OBSERVATION tags.
1232
1604
 
1233
1605
  Expected format:
1234
1606
  TOOL: <tool_name>
@@ -1236,28 +1608,33 @@ class HybridAgent(BaseAIAgent):
1236
1608
  PARAMETERS: <json_parameters>
1237
1609
 
1238
1610
  Args:
1239
- thought: LLM thought containing tool call
1611
+ text: LLM output that may contain tool call
1240
1612
 
1241
1613
  Returns:
1242
1614
  Dictionary with 'tool', 'operation', 'parameters'
1243
1615
  """
1244
1616
  import json
1617
+ import re
1245
1618
 
1246
1619
  result = {}
1620
+
1621
+ # Remove content inside THOUGHT and OBSERVATION tags
1622
+ text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
1623
+ text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
1247
1624
 
1248
- # Extract tool
1249
- if "TOOL:" in thought:
1250
- tool_line = [line for line in thought.split("\n") if line.startswith("TOOL:")][0]
1625
+ # Extract tool from text outside tags
1626
+ if "TOOL:" in text_without_tags:
1627
+ tool_line = [line for line in text_without_tags.split("\n") if line.strip().startswith("TOOL:")][0]
1251
1628
  result["tool"] = tool_line.split("TOOL:", 1)[1].strip()
1252
1629
 
1253
1630
  # Extract operation (optional)
1254
- if "OPERATION:" in thought:
1255
- op_line = [line for line in thought.split("\n") if line.startswith("OPERATION:")][0]
1631
+ if "OPERATION:" in text_without_tags:
1632
+ op_line = [line for line in text_without_tags.split("\n") if line.strip().startswith("OPERATION:")][0]
1256
1633
  result["operation"] = op_line.split("OPERATION:", 1)[1].strip()
1257
1634
 
1258
1635
  # Extract parameters (optional)
1259
- if "PARAMETERS:" in thought:
1260
- param_line = [line for line in thought.split("\n") if line.startswith("PARAMETERS:")][0]
1636
+ if "PARAMETERS:" in text_without_tags:
1637
+ param_line = [line for line in text_without_tags.split("\n") if line.strip().startswith("PARAMETERS:")][0]
1261
1638
  param_str = param_line.split("PARAMETERS:", 1)[1].strip()
1262
1639
  try:
1263
1640
  result["parameters"] = json.loads(param_str)