mojentic 0.8.4__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. _examples/async_dispatcher_example.py +12 -4
  2. _examples/async_llm_example.py +1 -2
  3. _examples/broker_as_tool.py +42 -17
  4. _examples/broker_examples.py +5 -7
  5. _examples/broker_image_examples.py +1 -1
  6. _examples/characterize_ollama.py +3 -3
  7. _examples/characterize_openai.py +1 -1
  8. _examples/chat_session.py +2 -2
  9. _examples/chat_session_with_tool.py +2 -2
  10. _examples/coding_file_tool.py +16 -18
  11. _examples/current_datetime_tool_example.py +2 -2
  12. _examples/embeddings.py +1 -1
  13. _examples/ephemeral_task_manager_example.py +15 -11
  14. _examples/fetch_openai_models.py +10 -3
  15. _examples/file_deduplication.py +6 -6
  16. _examples/file_tool.py +5 -5
  17. _examples/image_analysis.py +2 -3
  18. _examples/image_broker.py +1 -1
  19. _examples/image_broker_splat.py +1 -1
  20. _examples/iterative_solver.py +3 -3
  21. _examples/model_characterization.py +2 -0
  22. _examples/openai_gateway_enhanced_demo.py +15 -5
  23. _examples/raw.py +1 -1
  24. _examples/react/agents/decisioning_agent.py +173 -15
  25. _examples/react/agents/summarization_agent.py +89 -0
  26. _examples/react/agents/thinking_agent.py +84 -14
  27. _examples/react/agents/tool_call_agent.py +83 -0
  28. _examples/react/formatters.py +38 -4
  29. _examples/react/models/base.py +60 -11
  30. _examples/react/models/events.py +76 -8
  31. _examples/react.py +71 -21
  32. _examples/recursive_agent.py +2 -2
  33. _examples/simple_llm.py +3 -3
  34. _examples/simple_llm_repl.py +1 -1
  35. _examples/simple_structured.py +1 -1
  36. _examples/simple_tool.py +2 -2
  37. _examples/solver_chat_session.py +5 -11
  38. _examples/streaming.py +36 -18
  39. _examples/tell_user_example.py +4 -4
  40. _examples/tracer_demo.py +18 -20
  41. _examples/tracer_qt_viewer.py +49 -46
  42. _examples/working_memory.py +1 -1
  43. mojentic/__init__.py +3 -3
  44. mojentic/agents/__init__.py +26 -8
  45. mojentic/agents/{agent_broker.py → agent_event_adapter.py} +3 -3
  46. mojentic/agents/async_aggregator_agent_spec.py +32 -33
  47. mojentic/agents/async_llm_agent.py +9 -5
  48. mojentic/agents/async_llm_agent_spec.py +21 -22
  49. mojentic/agents/base_async_agent.py +2 -2
  50. mojentic/agents/base_llm_agent.py +6 -2
  51. mojentic/agents/iterative_problem_solver.py +11 -5
  52. mojentic/agents/simple_recursive_agent.py +11 -10
  53. mojentic/agents/simple_recursive_agent_spec.py +423 -0
  54. mojentic/async_dispatcher.py +0 -1
  55. mojentic/async_dispatcher_spec.py +1 -1
  56. mojentic/context/__init__.py +0 -2
  57. mojentic/dispatcher.py +7 -8
  58. mojentic/llm/__init__.py +5 -5
  59. mojentic/llm/gateways/__init__.py +19 -18
  60. mojentic/llm/gateways/anthropic.py +1 -0
  61. mojentic/llm/gateways/anthropic_messages_adapter.py +0 -1
  62. mojentic/llm/gateways/llm_gateway.py +1 -1
  63. mojentic/llm/gateways/ollama.py +23 -18
  64. mojentic/llm/gateways/openai.py +243 -44
  65. mojentic/llm/gateways/openai_message_adapter_spec.py +3 -3
  66. mojentic/llm/gateways/openai_model_registry.py +7 -6
  67. mojentic/llm/gateways/openai_model_registry_spec.py +1 -2
  68. mojentic/llm/gateways/openai_temperature_handling_spec.py +2 -2
  69. mojentic/llm/llm_broker.py +162 -2
  70. mojentic/llm/llm_broker_spec.py +76 -2
  71. mojentic/llm/message_composers.py +6 -3
  72. mojentic/llm/message_composers_spec.py +5 -1
  73. mojentic/llm/registry/__init__.py +0 -3
  74. mojentic/llm/registry/populate_registry_from_ollama.py +2 -2
  75. mojentic/llm/tools/__init__.py +0 -9
  76. mojentic/llm/tools/ask_user_tool.py +11 -5
  77. mojentic/llm/tools/current_datetime.py +9 -6
  78. mojentic/llm/tools/date_resolver.py +10 -4
  79. mojentic/llm/tools/date_resolver_spec.py +0 -1
  80. mojentic/llm/tools/ephemeral_task_manager/append_task_tool.py +4 -1
  81. mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list.py +1 -1
  82. mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool.py +4 -1
  83. mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool.py +5 -2
  84. mojentic/llm/tools/file_manager.py +131 -28
  85. mojentic/llm/tools/file_manager_spec.py +0 -3
  86. mojentic/llm/tools/llm_tool.py +1 -1
  87. mojentic/llm/tools/llm_tool_spec.py +0 -2
  88. mojentic/llm/tools/organic_web_search.py +4 -2
  89. mojentic/llm/tools/tell_user_tool.py +6 -2
  90. mojentic/llm/tools/tool_wrapper.py +2 -2
  91. mojentic/tracer/__init__.py +1 -10
  92. mojentic/tracer/event_store.py +7 -8
  93. mojentic/tracer/event_store_spec.py +1 -2
  94. mojentic/tracer/null_tracer.py +37 -43
  95. mojentic/tracer/tracer_events.py +8 -2
  96. mojentic/tracer/tracer_events_spec.py +6 -7
  97. mojentic/tracer/tracer_system.py +37 -36
  98. mojentic/tracer/tracer_system_spec.py +21 -6
  99. mojentic/utils/__init__.py +1 -1
  100. mojentic/utils/formatting.py +1 -0
  101. {mojentic-0.8.4.dist-info → mojentic-1.0.0.dist-info}/METADATA +76 -27
  102. mojentic-1.0.0.dist-info/RECORD +149 -0
  103. mojentic-0.8.4.dist-info/RECORD +0 -146
  104. {mojentic-0.8.4.dist-info → mojentic-1.0.0.dist-info}/WHEEL +0 -0
  105. {mojentic-0.8.4.dist-info → mojentic-1.0.0.dist-info}/licenses/LICENSE.md +0 -0
  106. {mojentic-0.8.4.dist-info → mojentic-1.0.0.dist-info}/top_level.txt +0 -0
@@ -6,11 +6,11 @@ for reasoning models vs chat models, provides detailed logging, and offers bette
6
6
  error handling.
7
7
  """
8
8
 
9
- import os
10
9
  from mojentic.llm.gateways.openai import OpenAIGateway
11
10
  from mojentic.llm.gateways.openai_model_registry import get_model_registry
12
11
  from mojentic.llm.gateways.models import LLMMessage, MessageRole
13
12
 
13
+
14
14
  def demonstrate_model_registry():
15
15
  """Demonstrate the model registry capabilities."""
16
16
  print("=== Model Registry Demonstration ===")
@@ -38,6 +38,7 @@ def demonstrate_model_registry():
38
38
  token_param = capabilities.get_token_limit_param()
39
39
  print(f" → Defaulted to: type={capabilities.model_type.value}, token_param={token_param}")
40
40
 
41
+
41
42
  def demonstrate_parameter_adaptation():
42
43
  """Demonstrate parameter adaptation for different model types."""
43
44
  print("\n=== Parameter Adaptation Demonstration ===")
@@ -54,8 +55,14 @@ def demonstrate_parameter_adaptation():
54
55
  }
55
56
 
56
57
  adapted_args = gateway._adapt_parameters_for_model('o1-mini', original_args)
57
- print(f" Original: max_tokens={original_args.get('max_tokens')}, has_tools={'tools' in original_args}")
58
- print(f" Adapted: max_completion_tokens={adapted_args.get('max_completion_tokens')}, has_tools={'tools' in adapted_args}")
58
+ print(
59
+ f" Original: max_tokens={original_args.get('max_tokens')}, "
60
+ f"has_tools={'tools' in original_args}"
61
+ )
62
+ print(
63
+ f" Adapted: max_completion_tokens={adapted_args.get('max_completion_tokens')}, "
64
+ f"has_tools={'tools' in adapted_args}"
65
+ )
59
66
 
60
67
  print("\n2. Chat model parameter adaptation (gpt-4o):")
61
68
  original_args = {
@@ -69,6 +76,7 @@ def demonstrate_parameter_adaptation():
69
76
  print(f" Original: max_tokens={original_args.get('max_tokens')}, has_tools={'tools' in original_args}")
70
77
  print(f" Adapted: max_tokens={adapted_args.get('max_tokens')}, has_tools={'tools' in adapted_args}")
71
78
 
79
+
72
80
  def demonstrate_model_validation():
73
81
  """Demonstrate model parameter validation."""
74
82
  print("\n=== Model Validation Demonstration ===")
@@ -89,6 +97,7 @@ def demonstrate_model_validation():
89
97
  except Exception as e:
90
98
  print(f" Validation error: {e}")
91
99
 
100
+
92
101
  def demonstrate_registry_extensibility():
93
102
  """Demonstrate how to extend the registry with new models."""
94
103
  print("\n=== Registry Extensibility Demonstration ===")
@@ -106,7 +115,7 @@ def demonstrate_registry_extensibility():
106
115
  )
107
116
 
108
117
  registry.register_model("o5-preview", new_capabilities)
109
- print(f" Registered o5-preview as reasoning model")
118
+ print(" Registered o5-preview as reasoning model")
110
119
 
111
120
  # Test the new model
112
121
  capabilities = registry.get_model_capabilities("o5-preview")
@@ -120,6 +129,7 @@ def demonstrate_registry_extensibility():
120
129
  capabilities = registry.get_model_capabilities("claude-3-opus")
121
130
  print(f" claude-3-opus (inferred): type={capabilities.model_type.value}")
122
131
 
132
+
123
133
  if __name__ == "__main__":
124
134
  print("OpenAI Gateway Enhanced Infrastructure Demo")
125
135
  print("=" * 50)
@@ -137,4 +147,4 @@ if __name__ == "__main__":
137
147
  print("✓ Enhanced logging for debugging")
138
148
  print("✓ Parameter validation with helpful warnings")
139
149
  print("✓ Pattern matching for unknown models")
140
- print("✓ Comprehensive test coverage")
150
+ print("✓ Comprehensive test coverage")
_examples/raw.py CHANGED
@@ -18,4 +18,4 @@ anthropic_args = {
18
18
 
19
19
  response = client.messages.create(**anthropic_args)
20
20
 
21
- print(response.content[0].text)
21
+ print(response.content[0].text)
@@ -1,32 +1,190 @@
1
- from mojentic.utils import format_block
2
- from ..formatters import format_current_context, format_available_tools
3
- from ..models.events import InvokeDecisioning, InvokeToolCall
1
+ """Decision-making agent for the ReAct pattern.
2
+
3
+ This agent evaluates the current context and decides on the next action to take.
4
+ """
5
+ from typing import List
6
+
7
+ from pydantic import BaseModel, Field
8
+
4
9
  from mojentic.agents.base_llm_agent import BaseLLMAgent
10
+ from mojentic.event import Event
5
11
  from mojentic.llm import LLMBroker
12
+ from mojentic.llm.gateways.models import LLMMessage
6
13
  from mojentic.llm.tools.date_resolver import ResolveDateTool
14
+ from mojentic.utils import format_block
15
+
16
+ from ..formatters import format_available_tools, format_current_context
17
+ from ..models.base import NextAction
18
+ from ..models.events import (
19
+ FailureOccurred,
20
+ FinishAndSummarize,
21
+ InvokeDecisioning,
22
+ InvokeThinking,
23
+ InvokeToolCall,
24
+ )
25
+
26
+
27
+ class DecisionResponse(BaseModel):
28
+ """Structured response from the decisioning agent."""
29
+
30
+ thought: str = Field(
31
+ ...,
32
+ description="The reasoning behind the decision"
33
+ )
34
+ next_action: NextAction = Field(
35
+ ...,
36
+ description="What should happen next: PLAN, ACT, or FINISH"
37
+ )
38
+ tool_name: str | None = Field(
39
+ None,
40
+ description="Name of tool to use if next_action is ACT"
41
+ )
42
+ tool_arguments: dict = Field(
43
+ default_factory=dict,
44
+ description=("Arguments for the tool if next_action is ACT. "
45
+ "IMPORTANT: Use the exact parameter names from the tool's descriptor. "
46
+ "For resolve_date, use 'relative_date_found' not 'date_text'.")
47
+ )
7
48
 
8
49
 
9
50
  class DecisioningAgent(BaseLLMAgent):
51
+ """Agent responsible for deciding the next action in the ReAct loop.
52
+
53
+ This agent evaluates the current context, plan, and history to determine
54
+ whether to continue planning, take an action, or finish and summarize.
55
+ """
56
+
57
+ MAX_ITERATIONS = 10
58
+
10
59
  def __init__(self, llm: LLMBroker):
11
- super().__init__(llm,
12
- "You are a careful decision maker, weighing the situation and making the best choice based on the information available.")
60
+ """Initialize the decisioning agent.
61
+
62
+ Args:
63
+ llm: The LLM broker to use for generating decisions.
64
+ """
65
+ super().__init__(
66
+ llm,
67
+ ("You are a careful decision maker, "
68
+ "weighing the situation and making the best choice "
69
+ "based on the information available.")
70
+ )
13
71
  self.tools = [ResolveDateTool()]
14
72
 
15
- def receive_event(self, event: InvokeDecisioning):
16
- prompt = self.prompt(event)
17
- print(format_block(prompt))
18
- tool_to_call = self.tools[0]
19
- return [InvokeToolCall(source=type(self), context=event.context, tool=tool_to_call)]
73
+ def receive_event(self, event: Event) -> List[Event]:
74
+ """Process a decisioning event and determine the next action.
75
+
76
+ Args:
77
+ event: The decisioning event containing current context.
78
+
79
+ Returns:
80
+ List containing one of: InvokeToolCall, FinishAndSummarize,
81
+ InvokeThinking, or FailureOccurred event.
82
+ """
83
+ if not isinstance(event, InvokeDecisioning):
84
+ return []
20
85
 
21
- def prompt(self, event: InvokeDecisioning):
22
- prompt = f"""
86
+ # Check iteration limit
87
+ if event.context.iteration >= self.MAX_ITERATIONS:
88
+ return [FailureOccurred(
89
+ source=type(self),
90
+ context=event.context,
91
+ reason=f"Maximum iterations ({self.MAX_ITERATIONS}) exceeded",
92
+ correlation_id=event.correlation_id
93
+ )]
94
+
95
+ # Increment iteration counter
96
+ event.context.iteration += 1
97
+
98
+ prompt = self.prompt(event)
99
+ print(format_block(prompt))
100
+
101
+ try:
102
+ decision = self.llm.generate_object(
103
+ [LLMMessage(content=prompt)],
104
+ object_model=DecisionResponse
105
+ )
106
+ print(format_block(f"Decision: {decision}"))
107
+
108
+ # Route based on decision
109
+ if decision.next_action == NextAction.FINISH:
110
+ return [FinishAndSummarize(
111
+ source=type(self),
112
+ context=event.context,
113
+ thought=decision.thought,
114
+ correlation_id=event.correlation_id
115
+ )]
116
+
117
+ if decision.next_action == NextAction.ACT:
118
+ if not decision.tool_name:
119
+ return [FailureOccurred(
120
+ source=type(self),
121
+ context=event.context,
122
+ reason="ACT decision made but no tool specified",
123
+ correlation_id=event.correlation_id
124
+ )]
125
+
126
+ # Find the requested tool
127
+ tool = next(
128
+ (t for t in self.tools
129
+ if t.descriptor["function"]["name"] == decision.tool_name),
130
+ None
131
+ )
132
+
133
+ if not tool:
134
+ return [FailureOccurred(
135
+ source=type(self),
136
+ context=event.context,
137
+ reason=f"Tool '{decision.tool_name}' not found",
138
+ correlation_id=event.correlation_id
139
+ )]
140
+
141
+ return [InvokeToolCall(
142
+ source=type(self),
143
+ context=event.context,
144
+ thought=decision.thought,
145
+ action=NextAction.ACT,
146
+ tool=tool,
147
+ tool_arguments=decision.tool_arguments,
148
+ correlation_id=event.correlation_id
149
+ )]
150
+
151
+ # PLAN action - go back to thinking
152
+ return [InvokeThinking(
153
+ source=type(self),
154
+ context=event.context,
155
+ correlation_id=event.correlation_id
156
+ )]
157
+
158
+ except Exception as e:
159
+ return [FailureOccurred(
160
+ source=type(self),
161
+ context=event.context,
162
+ reason=f"Error during decision making: {str(e)}",
163
+ correlation_id=event.correlation_id
164
+ )]
165
+
166
+ def prompt(self, event: InvokeDecisioning):
167
+ """Generate the prompt for the decision-making LLM.
168
+
169
+ Args:
170
+ event: The decisioning event containing current context.
171
+
172
+ Returns:
173
+ The formatted prompt string.
174
+ """
175
+ return f"""
23
176
  You are to solve a problem by reasoning and acting on the information you have. Here is the current context:
24
177
 
25
178
  {format_current_context(event.context)}
26
179
  {format_available_tools(self.tools)}
27
180
 
28
181
  Your Instructions:
29
- Given our context and what we've done so far, and the tools available, create a step-by-step plan to answer the query.
30
- """.strip()
182
+ Review the current plan and history. Decide what to do next:
183
+
184
+ 1. PLAN - If the plan is incomplete or needs refinement
185
+ 2. ACT - If you should take an action using one of the available tools
186
+ 3. FINISH - If you have enough information to answer the user's query
31
187
 
32
- return prompt
188
+ If you choose ACT, specify which tool to use and what arguments to pass.
189
+ Think carefully about whether each step in the plan has been completed.
190
+ """.strip()
@@ -0,0 +1,89 @@
1
+ """Summarization agent for the ReAct pattern.
2
+
3
+ This agent generates the final answer based on accumulated context.
4
+ """
5
+ from typing import List
6
+
7
+ from mojentic.agents.base_llm_agent import BaseLLMAgent
8
+ from mojentic.event import Event
9
+ from mojentic.llm import LLMBroker
10
+ from mojentic.llm.gateways.models import LLMMessage
11
+ from mojentic.utils import format_block
12
+
13
+ from ..formatters import format_current_context
14
+ from ..models.events import FailureOccurred, FinishAndSummarize
15
+
16
+
17
+ class SummarizationAgent(BaseLLMAgent):
18
+ """Agent responsible for generating the final answer.
19
+
20
+ This agent reviews the context, plan, and history to synthesize
21
+ a complete answer to the user's original query.
22
+ """
23
+
24
+ def __init__(self, llm: LLMBroker):
25
+ """Initialize the summarization agent.
26
+
27
+ Args:
28
+ llm: The LLM broker to use for generating summaries.
29
+ """
30
+ super().__init__(
31
+ llm,
32
+ ("You are a helpful assistant who provides clear, "
33
+ "accurate answers based on the information gathered.")
34
+ )
35
+
36
+ def receive_event(self, event: Event) -> List[Event]:
37
+ """Generate a final answer based on the context.
38
+
39
+ Args:
40
+ event: The finish event containing the complete context.
41
+
42
+ Returns:
43
+ Empty list (terminal event) or list with FailureOccurred on error.
44
+ """
45
+ if not isinstance(event, FinishAndSummarize):
46
+ return []
47
+
48
+ try:
49
+ prompt = self.prompt(event)
50
+ print(format_block(prompt))
51
+
52
+ response = self.llm.generate([LLMMessage(content=prompt)])
53
+
54
+ print("\n" + "=" * 80)
55
+ print("FINAL ANSWER:")
56
+ print("=" * 80)
57
+ print(response)
58
+ print("=" * 80 + "\n")
59
+
60
+ # This is a terminal event - return empty list to stop the loop
61
+ return []
62
+
63
+ except Exception as e:
64
+ return [FailureOccurred(
65
+ source=type(self),
66
+ context=event.context,
67
+ reason=f"Error during summarization: {str(e)}",
68
+ correlation_id=event.correlation_id
69
+ )]
70
+
71
+ def prompt(self, event: FinishAndSummarize):
72
+ """Generate the prompt for the summarization LLM.
73
+
74
+ Args:
75
+ event: The finish event containing the complete context.
76
+
77
+ Returns:
78
+ The formatted prompt string.
79
+ """
80
+ return f"""
81
+ Based on the following context, provide a clear and concise answer to the user's query.
82
+
83
+ {format_current_context(event.context)}
84
+
85
+ Your task:
86
+ Review what we've learned and provide a direct answer to: "{event.context.user_query}"
87
+
88
+ Be specific and use the information gathered during our process.
89
+ """.strip()
@@ -1,31 +1,100 @@
1
+ """Planning agent for the ReAct pattern.
2
+
3
+ This agent creates structured plans for solving user queries.
4
+ """
5
+ from typing import List
6
+
1
7
  from mojentic.agents.base_llm_agent import BaseLLMAgent
8
+ from mojentic.event import Event
2
9
  from mojentic.llm import LLMBroker
3
10
  from mojentic.llm.gateways.models import LLMMessage
4
11
  from mojentic.llm.tools.date_resolver import ResolveDateTool
5
12
  from mojentic.utils import format_block
6
- from ..formatters import format_current_context, format_available_tools
7
13
 
14
+ from ..formatters import format_available_tools, format_current_context
8
15
  from ..models.base import Plan, ThoughtActionObservation
9
- from ..models.events import InvokeThinking, InvokeDecisioning
16
+ from ..models.events import FailureOccurred, InvokeDecisioning, InvokeThinking
10
17
 
11
18
 
12
19
  class ThinkingAgent(BaseLLMAgent):
20
+ """Agent responsible for creating plans in the ReAct loop.
21
+
22
+ This agent analyzes the user query and available tools to create
23
+ a step-by-step plan for answering the query.
24
+ """
25
+
13
26
  def __init__(self, llm: LLMBroker):
14
- super().__init__(llm,
15
- "You are a task coordinator, who breaks down tasks into component steps to be performed by others.")
27
+ """Initialize the thinking agent.
28
+
29
+ Args:
30
+ llm: The LLM broker to use for generating plans.
31
+ """
32
+ super().__init__(
33
+ llm,
34
+ ("You are a task coordinator, "
35
+ "who breaks down tasks into component steps "
36
+ "to be performed by others.")
37
+ )
16
38
  self.tools = [ResolveDateTool()]
17
39
 
18
- def receive_event(self, event: InvokeThinking):
19
- prompt = self.prompt(event)
20
- print(format_block(prompt))
21
- plan: Plan = self.llm.generate_object([LLMMessage(content=prompt)], object_model=Plan)
22
- print(format_block(str(plan)))
23
- event.context.plan = plan
24
- event.context.history.append(ThoughtActionObservation(thought="I have no plan yet.", action="Create a plan.",
25
- observation="Ready for next step."))
26
- return InvokeDecisioning(source=type(self), context=event.context)
40
+ def receive_event(self, event: Event) -> List[Event]:
41
+ """Process a thinking event and generate a plan.
42
+
43
+ Args:
44
+ event: The thinking event containing current context.
45
+
46
+ Returns:
47
+ List containing InvokeDecisioning event with updated plan,
48
+ or FailureOccurred on error.
49
+ """
50
+ if not isinstance(event, InvokeThinking):
51
+ return []
52
+
53
+ try:
54
+ prompt = self.prompt(event)
55
+ print(format_block(prompt))
56
+
57
+ plan: Plan = self.llm.generate_object(
58
+ [LLMMessage(content=prompt)],
59
+ object_model=Plan
60
+ )
61
+ print(format_block(str(plan)))
62
+
63
+ # Update context with new plan
64
+ event.context.plan = plan
65
+
66
+ # Add planning step to history
67
+ event.context.history.append(
68
+ ThoughtActionObservation(
69
+ thought="I need to create a plan to solve this query.",
70
+ action="Created a step-by-step plan.",
71
+ observation=f"Plan has {len(plan.steps)} steps."
72
+ )
73
+ )
74
+
75
+ return [InvokeDecisioning(
76
+ source=type(self),
77
+ context=event.context,
78
+ correlation_id=event.correlation_id
79
+ )]
80
+
81
+ except Exception as e:
82
+ return [FailureOccurred(
83
+ source=type(self),
84
+ context=event.context,
85
+ reason=f"Error during planning: {str(e)}",
86
+ correlation_id=event.correlation_id
87
+ )]
27
88
 
28
89
  def prompt(self, event: InvokeThinking):
90
+ """Generate the prompt for the planning LLM.
91
+
92
+ Args:
93
+ event: The thinking event containing current context.
94
+
95
+ Returns:
96
+ The formatted prompt string.
97
+ """
29
98
  return f"""
30
99
  You are to solve a problem by reasoning and acting on the information you have. Here is the current context:
31
100
 
@@ -33,5 +102,6 @@ You are to solve a problem by reasoning and acting on the information you have.
33
102
  {format_available_tools(self.tools)}
34
103
 
35
104
  Your Instructions:
36
- Given our context and what we've done so far, and the tools available, create a step-by-step plan to answer the query.
105
+ Given our context and what we've done so far, and the tools available, create a step-by-step plan to answer the query.
106
+ Each step should be concrete and actionable. Consider which tools you'll need to use.
37
107
  """.strip()
@@ -0,0 +1,83 @@
1
+ """Tool execution agent for the ReAct pattern.
2
+
3
+ This agent handles the actual execution of tools and captures the results.
4
+ """
5
+ from typing import List
6
+
7
+ from mojentic.agents.base_agent import BaseAgent
8
+ from mojentic.event import Event
9
+
10
+ from ..models.base import ThoughtActionObservation
11
+ from ..models.events import FailureOccurred, InvokeDecisioning, InvokeToolCall
12
+
13
+
14
+ class ToolCallAgent(BaseAgent):
15
+ """Agent responsible for executing tool calls.
16
+
17
+ This agent receives tool call events, executes the specified tool,
18
+ and updates the context with the results before continuing to the
19
+ decisioning phase.
20
+ """
21
+
22
+ def receive_event(self, event: Event) -> List[Event]:
23
+ """Execute a tool and update the context.
24
+
25
+ Args:
26
+ event: The tool call event containing the tool and arguments.
27
+
28
+ Returns:
29
+ List containing InvokeDecisioning event with updated context,
30
+ or FailureOccurred on error.
31
+ """
32
+ if not isinstance(event, InvokeToolCall):
33
+ return []
34
+
35
+ try:
36
+ tool = event.tool
37
+ tool_name = tool.name
38
+ arguments = event.tool_arguments
39
+
40
+ print(f"\nExecuting tool: {tool_name}")
41
+ print(f"Arguments: {arguments}")
42
+
43
+ # Execute the tool using call_tool method
44
+ result = tool.call_tool(
45
+ correlation_id=event.correlation_id,
46
+ **arguments
47
+ )
48
+
49
+ print(f"Result: {result}")
50
+
51
+ # Extract the text content from the result
52
+ result_text = result
53
+ if isinstance(result, dict) and "content" in result:
54
+ # Extract text from content array
55
+ content_items = result["content"]
56
+ if content_items and isinstance(content_items, list):
57
+ result_text = content_items[0].get("text", str(result))
58
+
59
+ # Add to history
60
+ event.context.history.append(
61
+ ThoughtActionObservation(
62
+ thought=event.thought,
63
+ action=f"Called {tool_name} with {arguments}",
64
+ observation=str(result_text)
65
+ )
66
+ )
67
+
68
+ # Continue to decisioning
69
+ return [InvokeDecisioning(
70
+ source=type(self),
71
+ context=event.context,
72
+ correlation_id=event.correlation_id
73
+ )]
74
+
75
+ except Exception as e:
76
+ import traceback
77
+ traceback.print_exc()
78
+ return [FailureOccurred(
79
+ source=type(self),
80
+ context=event.context,
81
+ reason=f"Tool execution failed: {str(e)}",
82
+ correlation_id=event.correlation_id
83
+ )]
@@ -1,7 +1,20 @@
1
+ """Formatting utilities for the ReAct pattern implementation.
2
+
3
+ This module provides helper functions for formatting context and tool information
4
+ into human-readable strings for LLM prompts.
5
+ """
1
6
  from .models.base import CurrentContext
2
7
 
3
8
 
4
- def format_current_context(context: CurrentContext):
9
+ def format_current_context(context: CurrentContext) -> str:
10
+ """Format the current context into a readable string.
11
+
12
+ Args:
13
+ context: The current context containing query, plan, and history.
14
+
15
+ Returns:
16
+ A formatted multi-line string describing the current context.
17
+ """
5
18
  user_query = f"The user has asked us to answer the following query:\n> {context.user_query}\n"
6
19
 
7
20
  plan = "You have not yet made a plan.\n"
@@ -12,7 +25,7 @@ def format_current_context(context: CurrentContext):
12
25
 
13
26
  history = "No steps have yet been taken.\n"
14
27
  if context.history:
15
- history = "What's been done so far':\n"
28
+ history = "What's been done so far:\n"
16
29
  history += "\n".join(
17
30
  f"{i + 1}.\n Thought: {step.thought}\n Action: {step.action}\n Observation: {step.observation}"
18
31
  for i, step in enumerate(context.history))
@@ -20,11 +33,32 @@ def format_current_context(context: CurrentContext):
20
33
 
21
34
  return f"Current Context:\n{user_query}{plan}{history}\n"
22
35
 
23
- def format_available_tools(tools):
36
+
37
+ def format_available_tools(tools) -> str:
38
+ """Format the available tools into a readable list.
39
+
40
+ Args:
41
+ tools: A list of tool objects with descriptor dictionaries.
42
+
43
+ Returns:
44
+ A formatted string listing available tools and their descriptions.
45
+ """
24
46
  output = ""
25
47
  if tools:
26
48
  output += "Tools available:\n"
27
49
  for tool in tools:
28
- output += f"- {tool.descriptor["function"]["name"]}: {tool.descriptor["function"]["description"]}\n"
50
+ func_descriptor = tool.descriptor['function']
51
+ output += f"- {func_descriptor['name']}: {func_descriptor['description']}\n"
52
+
53
+ # Add parameter information
54
+ if 'parameters' in func_descriptor:
55
+ params = func_descriptor['parameters']
56
+ if 'properties' in params:
57
+ output += " Parameters:\n"
58
+ for param_name, param_info in params['properties'].items():
59
+ param_desc = param_info.get('description', '')
60
+ is_required = param_name in params.get('required', [])
61
+ req_str = " (required)" if is_required else " (optional)"
62
+ output += f" - {param_name}{req_str}: {param_desc}\n"
29
63
 
30
64
  return output