mojentic 0.9.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _examples/async_dispatcher_example.py +12 -4
- _examples/async_llm_example.py +1 -2
- _examples/broker_as_tool.py +39 -14
- _examples/broker_examples.py +4 -6
- _examples/characterize_ollama.py +1 -1
- _examples/characterize_openai.py +1 -1
- _examples/chat_session.py +1 -1
- _examples/chat_session_with_tool.py +1 -1
- _examples/coding_file_tool.py +1 -3
- _examples/current_datetime_tool_example.py +1 -1
- _examples/embeddings.py +1 -1
- _examples/ephemeral_task_manager_example.py +13 -9
- _examples/fetch_openai_models.py +10 -3
- _examples/file_deduplication.py +6 -6
- _examples/image_analysis.py +2 -3
- _examples/image_broker.py +1 -1
- _examples/image_broker_splat.py +1 -1
- _examples/iterative_solver.py +2 -2
- _examples/model_characterization.py +2 -0
- _examples/openai_gateway_enhanced_demo.py +15 -5
- _examples/raw.py +1 -1
- _examples/react/agents/decisioning_agent.py +173 -15
- _examples/react/agents/summarization_agent.py +89 -0
- _examples/react/agents/thinking_agent.py +84 -14
- _examples/react/agents/tool_call_agent.py +83 -0
- _examples/react/formatters.py +38 -4
- _examples/react/models/base.py +60 -11
- _examples/react/models/events.py +76 -8
- _examples/react.py +71 -21
- _examples/recursive_agent.py +1 -1
- _examples/solver_chat_session.py +1 -7
- _examples/streaming.py +7 -5
- _examples/tell_user_example.py +3 -3
- _examples/tracer_demo.py +15 -17
- _examples/tracer_qt_viewer.py +49 -46
- mojentic/__init__.py +3 -3
- mojentic/agents/__init__.py +26 -8
- mojentic/agents/{agent_broker.py → agent_event_adapter.py} +3 -3
- mojentic/agents/async_aggregator_agent_spec.py +32 -33
- mojentic/agents/async_llm_agent.py +9 -5
- mojentic/agents/async_llm_agent_spec.py +21 -22
- mojentic/agents/base_async_agent.py +2 -2
- mojentic/agents/base_llm_agent.py +6 -2
- mojentic/agents/iterative_problem_solver.py +11 -5
- mojentic/agents/simple_recursive_agent.py +11 -10
- mojentic/agents/simple_recursive_agent_spec.py +423 -0
- mojentic/async_dispatcher.py +0 -1
- mojentic/async_dispatcher_spec.py +1 -1
- mojentic/context/__init__.py +0 -2
- mojentic/dispatcher.py +7 -8
- mojentic/llm/__init__.py +5 -5
- mojentic/llm/chat_session.py +24 -1
- mojentic/llm/chat_session_spec.py +40 -0
- mojentic/llm/gateways/__init__.py +19 -18
- mojentic/llm/gateways/anthropic.py +1 -0
- mojentic/llm/gateways/anthropic_messages_adapter.py +0 -1
- mojentic/llm/gateways/llm_gateway.py +1 -1
- mojentic/llm/gateways/ollama.py +2 -0
- mojentic/llm/gateways/openai.py +62 -58
- mojentic/llm/gateways/openai_message_adapter_spec.py +3 -3
- mojentic/llm/gateways/openai_model_registry.py +7 -6
- mojentic/llm/gateways/openai_model_registry_spec.py +1 -2
- mojentic/llm/gateways/openai_temperature_handling_spec.py +2 -2
- mojentic/llm/llm_broker.py +7 -5
- mojentic/llm/llm_broker_spec.py +7 -2
- mojentic/llm/message_composers.py +6 -3
- mojentic/llm/message_composers_spec.py +5 -1
- mojentic/llm/registry/__init__.py +0 -3
- mojentic/llm/tools/__init__.py +0 -9
- mojentic/llm/tools/ask_user_tool.py +11 -5
- mojentic/llm/tools/current_datetime.py +9 -6
- mojentic/llm/tools/date_resolver.py +10 -4
- mojentic/llm/tools/date_resolver_spec.py +0 -1
- mojentic/llm/tools/ephemeral_task_manager/append_task_tool.py +4 -1
- mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list.py +1 -1
- mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool.py +4 -1
- mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool.py +5 -2
- mojentic/llm/tools/file_manager.py +131 -28
- mojentic/llm/tools/file_manager_spec.py +0 -3
- mojentic/llm/tools/llm_tool.py +1 -1
- mojentic/llm/tools/llm_tool_spec.py +0 -2
- mojentic/llm/tools/organic_web_search.py +4 -2
- mojentic/llm/tools/tell_user_tool.py +6 -2
- mojentic/llm/tools/tool_wrapper.py +2 -2
- mojentic/tracer/__init__.py +1 -10
- mojentic/tracer/event_store.py +7 -8
- mojentic/tracer/event_store_spec.py +1 -2
- mojentic/tracer/null_tracer.py +37 -43
- mojentic/tracer/tracer_events.py +8 -2
- mojentic/tracer/tracer_events_spec.py +6 -7
- mojentic/tracer/tracer_system.py +37 -36
- mojentic/tracer/tracer_system_spec.py +21 -6
- mojentic/utils/__init__.py +1 -1
- mojentic/utils/formatting.py +1 -0
- {mojentic-0.9.0.dist-info → mojentic-1.0.1.dist-info}/METADATA +47 -29
- mojentic-1.0.1.dist-info/RECORD +149 -0
- {mojentic-0.9.0.dist-info → mojentic-1.0.1.dist-info}/WHEEL +1 -1
- mojentic-0.9.0.dist-info/RECORD +0 -146
- {mojentic-0.9.0.dist-info → mojentic-1.0.1.dist-info}/licenses/LICENSE.md +0 -0
- {mojentic-0.9.0.dist-info → mojentic-1.0.1.dist-info}/top_level.txt +0 -0
|
@@ -1,32 +1,190 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
1
|
+
"""Decision-making agent for the ReAct pattern.
|
|
2
|
+
|
|
3
|
+
This agent evaluates the current context and decides on the next action to take.
|
|
4
|
+
"""
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel, Field
|
|
8
|
+
|
|
4
9
|
from mojentic.agents.base_llm_agent import BaseLLMAgent
|
|
10
|
+
from mojentic.event import Event
|
|
5
11
|
from mojentic.llm import LLMBroker
|
|
12
|
+
from mojentic.llm.gateways.models import LLMMessage
|
|
6
13
|
from mojentic.llm.tools.date_resolver import ResolveDateTool
|
|
14
|
+
from mojentic.utils import format_block
|
|
15
|
+
|
|
16
|
+
from ..formatters import format_available_tools, format_current_context
|
|
17
|
+
from ..models.base import NextAction
|
|
18
|
+
from ..models.events import (
|
|
19
|
+
FailureOccurred,
|
|
20
|
+
FinishAndSummarize,
|
|
21
|
+
InvokeDecisioning,
|
|
22
|
+
InvokeThinking,
|
|
23
|
+
InvokeToolCall,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class DecisionResponse(BaseModel):
|
|
28
|
+
"""Structured response from the decisioning agent."""
|
|
29
|
+
|
|
30
|
+
thought: str = Field(
|
|
31
|
+
...,
|
|
32
|
+
description="The reasoning behind the decision"
|
|
33
|
+
)
|
|
34
|
+
next_action: NextAction = Field(
|
|
35
|
+
...,
|
|
36
|
+
description="What should happen next: PLAN, ACT, or FINISH"
|
|
37
|
+
)
|
|
38
|
+
tool_name: str | None = Field(
|
|
39
|
+
None,
|
|
40
|
+
description="Name of tool to use if next_action is ACT"
|
|
41
|
+
)
|
|
42
|
+
tool_arguments: dict = Field(
|
|
43
|
+
default_factory=dict,
|
|
44
|
+
description=("Arguments for the tool if next_action is ACT. "
|
|
45
|
+
"IMPORTANT: Use the exact parameter names from the tool's descriptor. "
|
|
46
|
+
"For resolve_date, use 'relative_date_found' not 'date_text'.")
|
|
47
|
+
)
|
|
7
48
|
|
|
8
49
|
|
|
9
50
|
class DecisioningAgent(BaseLLMAgent):
|
|
51
|
+
"""Agent responsible for deciding the next action in the ReAct loop.
|
|
52
|
+
|
|
53
|
+
This agent evaluates the current context, plan, and history to determine
|
|
54
|
+
whether to continue planning, take an action, or finish and summarize.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
MAX_ITERATIONS = 10
|
|
58
|
+
|
|
10
59
|
def __init__(self, llm: LLMBroker):
|
|
11
|
-
|
|
12
|
-
|
|
60
|
+
"""Initialize the decisioning agent.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
llm: The LLM broker to use for generating decisions.
|
|
64
|
+
"""
|
|
65
|
+
super().__init__(
|
|
66
|
+
llm,
|
|
67
|
+
("You are a careful decision maker, "
|
|
68
|
+
"weighing the situation and making the best choice "
|
|
69
|
+
"based on the information available.")
|
|
70
|
+
)
|
|
13
71
|
self.tools = [ResolveDateTool()]
|
|
14
72
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
73
|
+
def receive_event(self, event: Event) -> List[Event]:
|
|
74
|
+
"""Process a decisioning event and determine the next action.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
event: The decisioning event containing current context.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
List containing one of: InvokeToolCall, FinishAndSummarize,
|
|
81
|
+
InvokeThinking, or FailureOccurred event.
|
|
82
|
+
"""
|
|
83
|
+
if not isinstance(event, InvokeDecisioning):
|
|
84
|
+
return []
|
|
20
85
|
|
|
21
|
-
|
|
22
|
-
|
|
86
|
+
# Check iteration limit
|
|
87
|
+
if event.context.iteration >= self.MAX_ITERATIONS:
|
|
88
|
+
return [FailureOccurred(
|
|
89
|
+
source=type(self),
|
|
90
|
+
context=event.context,
|
|
91
|
+
reason=f"Maximum iterations ({self.MAX_ITERATIONS}) exceeded",
|
|
92
|
+
correlation_id=event.correlation_id
|
|
93
|
+
)]
|
|
94
|
+
|
|
95
|
+
# Increment iteration counter
|
|
96
|
+
event.context.iteration += 1
|
|
97
|
+
|
|
98
|
+
prompt = self.prompt(event)
|
|
99
|
+
print(format_block(prompt))
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
decision = self.llm.generate_object(
|
|
103
|
+
[LLMMessage(content=prompt)],
|
|
104
|
+
object_model=DecisionResponse
|
|
105
|
+
)
|
|
106
|
+
print(format_block(f"Decision: {decision}"))
|
|
107
|
+
|
|
108
|
+
# Route based on decision
|
|
109
|
+
if decision.next_action == NextAction.FINISH:
|
|
110
|
+
return [FinishAndSummarize(
|
|
111
|
+
source=type(self),
|
|
112
|
+
context=event.context,
|
|
113
|
+
thought=decision.thought,
|
|
114
|
+
correlation_id=event.correlation_id
|
|
115
|
+
)]
|
|
116
|
+
|
|
117
|
+
if decision.next_action == NextAction.ACT:
|
|
118
|
+
if not decision.tool_name:
|
|
119
|
+
return [FailureOccurred(
|
|
120
|
+
source=type(self),
|
|
121
|
+
context=event.context,
|
|
122
|
+
reason="ACT decision made but no tool specified",
|
|
123
|
+
correlation_id=event.correlation_id
|
|
124
|
+
)]
|
|
125
|
+
|
|
126
|
+
# Find the requested tool
|
|
127
|
+
tool = next(
|
|
128
|
+
(t for t in self.tools
|
|
129
|
+
if t.descriptor["function"]["name"] == decision.tool_name),
|
|
130
|
+
None
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
if not tool:
|
|
134
|
+
return [FailureOccurred(
|
|
135
|
+
source=type(self),
|
|
136
|
+
context=event.context,
|
|
137
|
+
reason=f"Tool '{decision.tool_name}' not found",
|
|
138
|
+
correlation_id=event.correlation_id
|
|
139
|
+
)]
|
|
140
|
+
|
|
141
|
+
return [InvokeToolCall(
|
|
142
|
+
source=type(self),
|
|
143
|
+
context=event.context,
|
|
144
|
+
thought=decision.thought,
|
|
145
|
+
action=NextAction.ACT,
|
|
146
|
+
tool=tool,
|
|
147
|
+
tool_arguments=decision.tool_arguments,
|
|
148
|
+
correlation_id=event.correlation_id
|
|
149
|
+
)]
|
|
150
|
+
|
|
151
|
+
# PLAN action - go back to thinking
|
|
152
|
+
return [InvokeThinking(
|
|
153
|
+
source=type(self),
|
|
154
|
+
context=event.context,
|
|
155
|
+
correlation_id=event.correlation_id
|
|
156
|
+
)]
|
|
157
|
+
|
|
158
|
+
except Exception as e:
|
|
159
|
+
return [FailureOccurred(
|
|
160
|
+
source=type(self),
|
|
161
|
+
context=event.context,
|
|
162
|
+
reason=f"Error during decision making: {str(e)}",
|
|
163
|
+
correlation_id=event.correlation_id
|
|
164
|
+
)]
|
|
165
|
+
|
|
166
|
+
def prompt(self, event: InvokeDecisioning):
|
|
167
|
+
"""Generate the prompt for the decision-making LLM.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
event: The decisioning event containing current context.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
The formatted prompt string.
|
|
174
|
+
"""
|
|
175
|
+
return f"""
|
|
23
176
|
You are to solve a problem by reasoning and acting on the information you have. Here is the current context:
|
|
24
177
|
|
|
25
178
|
{format_current_context(event.context)}
|
|
26
179
|
{format_available_tools(self.tools)}
|
|
27
180
|
|
|
28
181
|
Your Instructions:
|
|
29
|
-
|
|
30
|
-
|
|
182
|
+
Review the current plan and history. Decide what to do next:
|
|
183
|
+
|
|
184
|
+
1. PLAN - If the plan is incomplete or needs refinement
|
|
185
|
+
2. ACT - If you should take an action using one of the available tools
|
|
186
|
+
3. FINISH - If you have enough information to answer the user's query
|
|
31
187
|
|
|
32
|
-
|
|
188
|
+
If you choose ACT, specify which tool to use and what arguments to pass.
|
|
189
|
+
Think carefully about whether each step in the plan has been completed.
|
|
190
|
+
""".strip()
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""Summarization agent for the ReAct pattern.
|
|
2
|
+
|
|
3
|
+
This agent generates the final answer based on accumulated context.
|
|
4
|
+
"""
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from mojentic.agents.base_llm_agent import BaseLLMAgent
|
|
8
|
+
from mojentic.event import Event
|
|
9
|
+
from mojentic.llm import LLMBroker
|
|
10
|
+
from mojentic.llm.gateways.models import LLMMessage
|
|
11
|
+
from mojentic.utils import format_block
|
|
12
|
+
|
|
13
|
+
from ..formatters import format_current_context
|
|
14
|
+
from ..models.events import FailureOccurred, FinishAndSummarize
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SummarizationAgent(BaseLLMAgent):
|
|
18
|
+
"""Agent responsible for generating the final answer.
|
|
19
|
+
|
|
20
|
+
This agent reviews the context, plan, and history to synthesize
|
|
21
|
+
a complete answer to the user's original query.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, llm: LLMBroker):
|
|
25
|
+
"""Initialize the summarization agent.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
llm: The LLM broker to use for generating summaries.
|
|
29
|
+
"""
|
|
30
|
+
super().__init__(
|
|
31
|
+
llm,
|
|
32
|
+
("You are a helpful assistant who provides clear, "
|
|
33
|
+
"accurate answers based on the information gathered.")
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
def receive_event(self, event: Event) -> List[Event]:
|
|
37
|
+
"""Generate a final answer based on the context.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
event: The finish event containing the complete context.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Empty list (terminal event) or list with FailureOccurred on error.
|
|
44
|
+
"""
|
|
45
|
+
if not isinstance(event, FinishAndSummarize):
|
|
46
|
+
return []
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
prompt = self.prompt(event)
|
|
50
|
+
print(format_block(prompt))
|
|
51
|
+
|
|
52
|
+
response = self.llm.generate([LLMMessage(content=prompt)])
|
|
53
|
+
|
|
54
|
+
print("\n" + "=" * 80)
|
|
55
|
+
print("FINAL ANSWER:")
|
|
56
|
+
print("=" * 80)
|
|
57
|
+
print(response)
|
|
58
|
+
print("=" * 80 + "\n")
|
|
59
|
+
|
|
60
|
+
# This is a terminal event - return empty list to stop the loop
|
|
61
|
+
return []
|
|
62
|
+
|
|
63
|
+
except Exception as e:
|
|
64
|
+
return [FailureOccurred(
|
|
65
|
+
source=type(self),
|
|
66
|
+
context=event.context,
|
|
67
|
+
reason=f"Error during summarization: {str(e)}",
|
|
68
|
+
correlation_id=event.correlation_id
|
|
69
|
+
)]
|
|
70
|
+
|
|
71
|
+
def prompt(self, event: FinishAndSummarize):
|
|
72
|
+
"""Generate the prompt for the summarization LLM.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
event: The finish event containing the complete context.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
The formatted prompt string.
|
|
79
|
+
"""
|
|
80
|
+
return f"""
|
|
81
|
+
Based on the following context, provide a clear and concise answer to the user's query.
|
|
82
|
+
|
|
83
|
+
{format_current_context(event.context)}
|
|
84
|
+
|
|
85
|
+
Your task:
|
|
86
|
+
Review what we've learned and provide a direct answer to: "{event.context.user_query}"
|
|
87
|
+
|
|
88
|
+
Be specific and use the information gathered during our process.
|
|
89
|
+
""".strip()
|
|
@@ -1,31 +1,100 @@
|
|
|
1
|
+
"""Planning agent for the ReAct pattern.
|
|
2
|
+
|
|
3
|
+
This agent creates structured plans for solving user queries.
|
|
4
|
+
"""
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
1
7
|
from mojentic.agents.base_llm_agent import BaseLLMAgent
|
|
8
|
+
from mojentic.event import Event
|
|
2
9
|
from mojentic.llm import LLMBroker
|
|
3
10
|
from mojentic.llm.gateways.models import LLMMessage
|
|
4
11
|
from mojentic.llm.tools.date_resolver import ResolveDateTool
|
|
5
12
|
from mojentic.utils import format_block
|
|
6
|
-
from ..formatters import format_current_context, format_available_tools
|
|
7
13
|
|
|
14
|
+
from ..formatters import format_available_tools, format_current_context
|
|
8
15
|
from ..models.base import Plan, ThoughtActionObservation
|
|
9
|
-
from ..models.events import
|
|
16
|
+
from ..models.events import FailureOccurred, InvokeDecisioning, InvokeThinking
|
|
10
17
|
|
|
11
18
|
|
|
12
19
|
class ThinkingAgent(BaseLLMAgent):
|
|
20
|
+
"""Agent responsible for creating plans in the ReAct loop.
|
|
21
|
+
|
|
22
|
+
This agent analyzes the user query and available tools to create
|
|
23
|
+
a step-by-step plan for answering the query.
|
|
24
|
+
"""
|
|
25
|
+
|
|
13
26
|
def __init__(self, llm: LLMBroker):
|
|
14
|
-
|
|
15
|
-
|
|
27
|
+
"""Initialize the thinking agent.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
llm: The LLM broker to use for generating plans.
|
|
31
|
+
"""
|
|
32
|
+
super().__init__(
|
|
33
|
+
llm,
|
|
34
|
+
("You are a task coordinator, "
|
|
35
|
+
"who breaks down tasks into component steps "
|
|
36
|
+
"to be performed by others.")
|
|
37
|
+
)
|
|
16
38
|
self.tools = [ResolveDateTool()]
|
|
17
39
|
|
|
18
|
-
def receive_event(self, event:
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
40
|
+
def receive_event(self, event: Event) -> List[Event]:
|
|
41
|
+
"""Process a thinking event and generate a plan.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
event: The thinking event containing current context.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
List containing InvokeDecisioning event with updated plan,
|
|
48
|
+
or FailureOccurred on error.
|
|
49
|
+
"""
|
|
50
|
+
if not isinstance(event, InvokeThinking):
|
|
51
|
+
return []
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
prompt = self.prompt(event)
|
|
55
|
+
print(format_block(prompt))
|
|
56
|
+
|
|
57
|
+
plan: Plan = self.llm.generate_object(
|
|
58
|
+
[LLMMessage(content=prompt)],
|
|
59
|
+
object_model=Plan
|
|
60
|
+
)
|
|
61
|
+
print(format_block(str(plan)))
|
|
62
|
+
|
|
63
|
+
# Update context with new plan
|
|
64
|
+
event.context.plan = plan
|
|
65
|
+
|
|
66
|
+
# Add planning step to history
|
|
67
|
+
event.context.history.append(
|
|
68
|
+
ThoughtActionObservation(
|
|
69
|
+
thought="I need to create a plan to solve this query.",
|
|
70
|
+
action="Created a step-by-step plan.",
|
|
71
|
+
observation=f"Plan has {len(plan.steps)} steps."
|
|
72
|
+
)
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
return [InvokeDecisioning(
|
|
76
|
+
source=type(self),
|
|
77
|
+
context=event.context,
|
|
78
|
+
correlation_id=event.correlation_id
|
|
79
|
+
)]
|
|
80
|
+
|
|
81
|
+
except Exception as e:
|
|
82
|
+
return [FailureOccurred(
|
|
83
|
+
source=type(self),
|
|
84
|
+
context=event.context,
|
|
85
|
+
reason=f"Error during planning: {str(e)}",
|
|
86
|
+
correlation_id=event.correlation_id
|
|
87
|
+
)]
|
|
27
88
|
|
|
28
89
|
def prompt(self, event: InvokeThinking):
|
|
90
|
+
"""Generate the prompt for the planning LLM.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
event: The thinking event containing current context.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
The formatted prompt string.
|
|
97
|
+
"""
|
|
29
98
|
return f"""
|
|
30
99
|
You are to solve a problem by reasoning and acting on the information you have. Here is the current context:
|
|
31
100
|
|
|
@@ -33,5 +102,6 @@ You are to solve a problem by reasoning and acting on the information you have.
|
|
|
33
102
|
{format_available_tools(self.tools)}
|
|
34
103
|
|
|
35
104
|
Your Instructions:
|
|
36
|
-
Given our context and what we've done so far, and the tools available, create a step-by-step plan to answer the query.
|
|
105
|
+
Given our context and what we've done so far, and the tools available, create a step-by-step plan to answer the query.
|
|
106
|
+
Each step should be concrete and actionable. Consider which tools you'll need to use.
|
|
37
107
|
""".strip()
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""Tool execution agent for the ReAct pattern.
|
|
2
|
+
|
|
3
|
+
This agent handles the actual execution of tools and captures the results.
|
|
4
|
+
"""
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from mojentic.agents.base_agent import BaseAgent
|
|
8
|
+
from mojentic.event import Event
|
|
9
|
+
|
|
10
|
+
from ..models.base import ThoughtActionObservation
|
|
11
|
+
from ..models.events import FailureOccurred, InvokeDecisioning, InvokeToolCall
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ToolCallAgent(BaseAgent):
|
|
15
|
+
"""Agent responsible for executing tool calls.
|
|
16
|
+
|
|
17
|
+
This agent receives tool call events, executes the specified tool,
|
|
18
|
+
and updates the context with the results before continuing to the
|
|
19
|
+
decisioning phase.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def receive_event(self, event: Event) -> List[Event]:
|
|
23
|
+
"""Execute a tool and update the context.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
event: The tool call event containing the tool and arguments.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
List containing InvokeDecisioning event with updated context,
|
|
30
|
+
or FailureOccurred on error.
|
|
31
|
+
"""
|
|
32
|
+
if not isinstance(event, InvokeToolCall):
|
|
33
|
+
return []
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
tool = event.tool
|
|
37
|
+
tool_name = tool.name
|
|
38
|
+
arguments = event.tool_arguments
|
|
39
|
+
|
|
40
|
+
print(f"\nExecuting tool: {tool_name}")
|
|
41
|
+
print(f"Arguments: {arguments}")
|
|
42
|
+
|
|
43
|
+
# Execute the tool using call_tool method
|
|
44
|
+
result = tool.call_tool(
|
|
45
|
+
correlation_id=event.correlation_id,
|
|
46
|
+
**arguments
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
print(f"Result: {result}")
|
|
50
|
+
|
|
51
|
+
# Extract the text content from the result
|
|
52
|
+
result_text = result
|
|
53
|
+
if isinstance(result, dict) and "content" in result:
|
|
54
|
+
# Extract text from content array
|
|
55
|
+
content_items = result["content"]
|
|
56
|
+
if content_items and isinstance(content_items, list):
|
|
57
|
+
result_text = content_items[0].get("text", str(result))
|
|
58
|
+
|
|
59
|
+
# Add to history
|
|
60
|
+
event.context.history.append(
|
|
61
|
+
ThoughtActionObservation(
|
|
62
|
+
thought=event.thought,
|
|
63
|
+
action=f"Called {tool_name} with {arguments}",
|
|
64
|
+
observation=str(result_text)
|
|
65
|
+
)
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Continue to decisioning
|
|
69
|
+
return [InvokeDecisioning(
|
|
70
|
+
source=type(self),
|
|
71
|
+
context=event.context,
|
|
72
|
+
correlation_id=event.correlation_id
|
|
73
|
+
)]
|
|
74
|
+
|
|
75
|
+
except Exception as e:
|
|
76
|
+
import traceback
|
|
77
|
+
traceback.print_exc()
|
|
78
|
+
return [FailureOccurred(
|
|
79
|
+
source=type(self),
|
|
80
|
+
context=event.context,
|
|
81
|
+
reason=f"Tool execution failed: {str(e)}",
|
|
82
|
+
correlation_id=event.correlation_id
|
|
83
|
+
)]
|
_examples/react/formatters.py
CHANGED
|
@@ -1,7 +1,20 @@
|
|
|
1
|
+
"""Formatting utilities for the ReAct pattern implementation.
|
|
2
|
+
|
|
3
|
+
This module provides helper functions for formatting context and tool information
|
|
4
|
+
into human-readable strings for LLM prompts.
|
|
5
|
+
"""
|
|
1
6
|
from .models.base import CurrentContext
|
|
2
7
|
|
|
3
8
|
|
|
4
|
-
def format_current_context(context: CurrentContext):
|
|
9
|
+
def format_current_context(context: CurrentContext) -> str:
|
|
10
|
+
"""Format the current context into a readable string.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
context: The current context containing query, plan, and history.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
A formatted multi-line string describing the current context.
|
|
17
|
+
"""
|
|
5
18
|
user_query = f"The user has asked us to answer the following query:\n> {context.user_query}\n"
|
|
6
19
|
|
|
7
20
|
plan = "You have not yet made a plan.\n"
|
|
@@ -12,7 +25,7 @@ def format_current_context(context: CurrentContext):
|
|
|
12
25
|
|
|
13
26
|
history = "No steps have yet been taken.\n"
|
|
14
27
|
if context.history:
|
|
15
|
-
history = "What's been done so far
|
|
28
|
+
history = "What's been done so far:\n"
|
|
16
29
|
history += "\n".join(
|
|
17
30
|
f"{i + 1}.\n Thought: {step.thought}\n Action: {step.action}\n Observation: {step.observation}"
|
|
18
31
|
for i, step in enumerate(context.history))
|
|
@@ -20,11 +33,32 @@ def format_current_context(context: CurrentContext):
|
|
|
20
33
|
|
|
21
34
|
return f"Current Context:\n{user_query}{plan}{history}\n"
|
|
22
35
|
|
|
23
|
-
|
|
36
|
+
|
|
37
|
+
def format_available_tools(tools) -> str:
|
|
38
|
+
"""Format the available tools into a readable list.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
tools: A list of tool objects with descriptor dictionaries.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
A formatted string listing available tools and their descriptions.
|
|
45
|
+
"""
|
|
24
46
|
output = ""
|
|
25
47
|
if tools:
|
|
26
48
|
output += "Tools available:\n"
|
|
27
49
|
for tool in tools:
|
|
28
|
-
|
|
50
|
+
func_descriptor = tool.descriptor['function']
|
|
51
|
+
output += f"- {func_descriptor['name']}: {func_descriptor['description']}\n"
|
|
52
|
+
|
|
53
|
+
# Add parameter information
|
|
54
|
+
if 'parameters' in func_descriptor:
|
|
55
|
+
params = func_descriptor['parameters']
|
|
56
|
+
if 'properties' in params:
|
|
57
|
+
output += " Parameters:\n"
|
|
58
|
+
for param_name, param_info in params['properties'].items():
|
|
59
|
+
param_desc = param_info.get('description', '')
|
|
60
|
+
is_required = param_name in params.get('required', [])
|
|
61
|
+
req_str = " (required)" if is_required else " (optional)"
|
|
62
|
+
output += f" - {param_name}{req_str}: {param_desc}\n"
|
|
29
63
|
|
|
30
64
|
return output
|
_examples/react/models/base.py
CHANGED
|
@@ -1,29 +1,78 @@
|
|
|
1
|
+
"""Base data models for the ReAct pattern.
|
|
2
|
+
|
|
3
|
+
This module defines the core data structures used throughout the ReAct
|
|
4
|
+
implementation, including actions, plans, observations, and context.
|
|
5
|
+
"""
|
|
1
6
|
from enum import Enum
|
|
2
7
|
from typing import List
|
|
3
8
|
|
|
4
|
-
from pydantic import
|
|
9
|
+
from pydantic import BaseModel, Field
|
|
5
10
|
|
|
6
11
|
|
|
7
12
|
class NextAction(str, Enum):
|
|
13
|
+
"""Enumeration of possible next actions in the ReAct loop."""
|
|
14
|
+
|
|
8
15
|
PLAN = "PLAN"
|
|
9
16
|
ACT = "ACT"
|
|
10
17
|
FINISH = "FINISH"
|
|
11
18
|
|
|
12
19
|
|
|
13
20
|
class ThoughtActionObservation(BaseModel):
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
21
|
+
"""A single step in the ReAct loop capturing thought, action, and observation.
|
|
22
|
+
|
|
23
|
+
This model represents one iteration of the ReAct pattern where the agent:
|
|
24
|
+
1. Thinks about what to do
|
|
25
|
+
2. Takes an action
|
|
26
|
+
3. Observes the result
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
thought: str = Field(
|
|
30
|
+
...,
|
|
31
|
+
description="The thought process behind the action taken in the current context."
|
|
32
|
+
)
|
|
33
|
+
action: str = Field(
|
|
34
|
+
...,
|
|
35
|
+
description="The action taken in the current context."
|
|
36
|
+
)
|
|
37
|
+
observation: str = Field(
|
|
38
|
+
...,
|
|
39
|
+
description="The observation made after the action taken in the current context."
|
|
40
|
+
)
|
|
17
41
|
|
|
18
42
|
|
|
19
43
|
class Plan(BaseModel):
|
|
20
|
-
|
|
21
|
-
|
|
44
|
+
"""A structured plan for solving a user query.
|
|
45
|
+
|
|
46
|
+
Contains a list of steps that outline how to approach answering the query.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
steps: List[str] = Field(
|
|
50
|
+
[],
|
|
51
|
+
description="How to answer the query, step by step, each step outlining an action to take."
|
|
52
|
+
)
|
|
22
53
|
|
|
23
54
|
|
|
24
55
|
class CurrentContext(BaseModel):
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
56
|
+
"""The complete context for a ReAct session.
|
|
57
|
+
|
|
58
|
+
This model tracks everything needed to maintain state throughout the
|
|
59
|
+
reasoning and acting loop, including the user's query, the plan,
|
|
60
|
+
the history of actions, and the iteration count.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
user_query: str = Field(
|
|
64
|
+
...,
|
|
65
|
+
description="The user query to which we are responding."
|
|
66
|
+
)
|
|
67
|
+
plan: Plan = Field(
|
|
68
|
+
Plan(steps=[]),
|
|
69
|
+
description="The current plan of action for the current context."
|
|
70
|
+
)
|
|
71
|
+
history: List[ThoughtActionObservation] = Field(
|
|
72
|
+
[],
|
|
73
|
+
description="The history of actions taken and observations made in the current context."
|
|
74
|
+
)
|
|
75
|
+
iteration: int = Field(
|
|
76
|
+
0,
|
|
77
|
+
description="The number of iterations taken in the current context."
|
|
78
|
+
)
|