quantalogic 0.80__py3-none-any.whl → 0.93__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/flow/__init__.py +16 -34
- quantalogic/main.py +11 -6
- quantalogic/tools/tool.py +8 -922
- quantalogic-0.93.dist-info/METADATA +475 -0
- {quantalogic-0.80.dist-info → quantalogic-0.93.dist-info}/RECORD +8 -54
- quantalogic/codeact/TODO.md +0 -14
- quantalogic/codeact/__init__.py +0 -0
- quantalogic/codeact/agent.py +0 -478
- quantalogic/codeact/cli.py +0 -50
- quantalogic/codeact/cli_commands/__init__.py +0 -0
- quantalogic/codeact/cli_commands/create_toolbox.py +0 -45
- quantalogic/codeact/cli_commands/install_toolbox.py +0 -20
- quantalogic/codeact/cli_commands/list_executor.py +0 -15
- quantalogic/codeact/cli_commands/list_reasoners.py +0 -15
- quantalogic/codeact/cli_commands/list_toolboxes.py +0 -47
- quantalogic/codeact/cli_commands/task.py +0 -215
- quantalogic/codeact/cli_commands/tool_info.py +0 -24
- quantalogic/codeact/cli_commands/uninstall_toolbox.py +0 -43
- quantalogic/codeact/config.yaml +0 -21
- quantalogic/codeact/constants.py +0 -9
- quantalogic/codeact/events.py +0 -85
- quantalogic/codeact/examples/README.md +0 -342
- quantalogic/codeact/examples/agent_sample.yaml +0 -29
- quantalogic/codeact/executor.py +0 -186
- quantalogic/codeact/history_manager.py +0 -94
- quantalogic/codeact/llm_util.py +0 -57
- quantalogic/codeact/plugin_manager.py +0 -92
- quantalogic/codeact/prompts/error_format.j2 +0 -11
- quantalogic/codeact/prompts/generate_action.j2 +0 -77
- quantalogic/codeact/prompts/generate_program.j2 +0 -52
- quantalogic/codeact/prompts/response_format.j2 +0 -11
- quantalogic/codeact/react_agent.py +0 -318
- quantalogic/codeact/reasoner.py +0 -185
- quantalogic/codeact/templates/toolbox/README.md.j2 +0 -10
- quantalogic/codeact/templates/toolbox/pyproject.toml.j2 +0 -16
- quantalogic/codeact/templates/toolbox/tools.py.j2 +0 -6
- quantalogic/codeact/templates.py +0 -7
- quantalogic/codeact/tools_manager.py +0 -258
- quantalogic/codeact/utils.py +0 -62
- quantalogic/codeact/xml_utils.py +0 -126
- quantalogic/flow/flow.py +0 -1070
- quantalogic/flow/flow_extractor.py +0 -783
- quantalogic/flow/flow_generator.py +0 -322
- quantalogic/flow/flow_manager.py +0 -676
- quantalogic/flow/flow_manager_schema.py +0 -287
- quantalogic/flow/flow_mermaid.py +0 -365
- quantalogic/flow/flow_validator.py +0 -479
- quantalogic/flow/flow_yaml.linkedin.md +0 -31
- quantalogic/flow/flow_yaml.md +0 -767
- quantalogic/flow/templates/prompt_check_inventory.j2 +0 -1
- quantalogic/flow/templates/system_check_inventory.j2 +0 -1
- quantalogic-0.80.dist-info/METADATA +0 -900
- {quantalogic-0.80.dist-info → quantalogic-0.93.dist-info}/LICENSE +0 -0
- {quantalogic-0.80.dist-info → quantalogic-0.93.dist-info}/WHEEL +0 -0
- {quantalogic-0.80.dist-info → quantalogic-0.93.dist-info}/entry_points.txt +0 -0
quantalogic/codeact/llm_util.py
DELETED
@@ -1,57 +0,0 @@
|
|
1
|
-
from typing import Callable, List, Optional
|
2
|
-
|
3
|
-
import litellm
|
4
|
-
|
5
|
-
|
6
|
-
async def litellm_completion(
|
7
|
-
model: str,
|
8
|
-
messages: List[dict],
|
9
|
-
max_tokens: int,
|
10
|
-
temperature: float,
|
11
|
-
stream: bool = False,
|
12
|
-
step: Optional[int] = None,
|
13
|
-
notify_event: Optional[Callable] = None,
|
14
|
-
**kwargs
|
15
|
-
) -> str:
|
16
|
-
"""A wrapper for litellm.acompletion that supports streaming and non-streaming modes."""
|
17
|
-
from .events import StreamTokenEvent
|
18
|
-
|
19
|
-
if stream:
|
20
|
-
if notify_event is None:
|
21
|
-
raise ValueError("notify_event callback is required when streaming is enabled.")
|
22
|
-
|
23
|
-
full_response = ""
|
24
|
-
try:
|
25
|
-
response = await litellm.acompletion(
|
26
|
-
model=model,
|
27
|
-
messages=messages,
|
28
|
-
max_tokens=max_tokens,
|
29
|
-
temperature=temperature,
|
30
|
-
stream=True,
|
31
|
-
**kwargs
|
32
|
-
)
|
33
|
-
async for chunk in response:
|
34
|
-
if chunk.choices[0].delta.content:
|
35
|
-
token = chunk.choices[0].delta.content
|
36
|
-
full_response += token
|
37
|
-
await notify_event(StreamTokenEvent(
|
38
|
-
event_type="StreamToken",
|
39
|
-
token=token,
|
40
|
-
step_number=step
|
41
|
-
))
|
42
|
-
return full_response
|
43
|
-
except Exception as e:
|
44
|
-
raise Exception(f"Streaming completion failed: {e}")
|
45
|
-
else:
|
46
|
-
try:
|
47
|
-
response = await litellm.acompletion(
|
48
|
-
model=model,
|
49
|
-
messages=messages,
|
50
|
-
max_tokens=max_tokens,
|
51
|
-
temperature=temperature,
|
52
|
-
stream=False,
|
53
|
-
**kwargs
|
54
|
-
)
|
55
|
-
return response.choices[0].message.content
|
56
|
-
except Exception as e:
|
57
|
-
raise Exception(f"Completion failed: {e}")
|
@@ -1,92 +0,0 @@
|
|
1
|
-
"""Plugin management module for dynamically loading components."""
|
2
|
-
|
3
|
-
from importlib.metadata import entry_points
|
4
|
-
from typing import List
|
5
|
-
|
6
|
-
from loguru import logger
|
7
|
-
|
8
|
-
from quantalogic.tools import Tool
|
9
|
-
|
10
|
-
from .executor import Executor
|
11
|
-
from .reasoner import Reasoner
|
12
|
-
from .tools_manager import ToolRegistry
|
13
|
-
|
14
|
-
|
15
|
-
class PluginManager:
|
16
|
-
"""Manages dynamic loading of plugins for tools, reasoners, executors, and CLI commands."""
|
17
|
-
_instance = None
|
18
|
-
|
19
|
-
def __new__(cls):
|
20
|
-
if cls._instance is None:
|
21
|
-
cls._instance = super(PluginManager, cls).__new__(cls)
|
22
|
-
cls._instance.tools = ToolRegistry()
|
23
|
-
cls._instance.reasoners = {"default": Reasoner}
|
24
|
-
cls._instance.executors = {"default": Executor}
|
25
|
-
cls._instance.cli_commands = {}
|
26
|
-
cls._instance._plugins_loaded = False
|
27
|
-
return cls._instance
|
28
|
-
|
29
|
-
def load_plugins(self, force: bool = False) -> None:
|
30
|
-
"""Load all plugins from registered entry points, handling duplicates gracefully.
|
31
|
-
|
32
|
-
Args:
|
33
|
-
force: If True, reload plugins even if they were already loaded
|
34
|
-
"""
|
35
|
-
if self._plugins_loaded and not force:
|
36
|
-
logger.debug("Plugins already loaded, skipping entire load process")
|
37
|
-
return
|
38
|
-
|
39
|
-
# Clear existing plugins only if forcing reload
|
40
|
-
if force:
|
41
|
-
logger.info("Forcing plugin reload, clearing existing registrations")
|
42
|
-
self.tools = ToolRegistry()
|
43
|
-
self.reasoners = {"default": Reasoner}
|
44
|
-
self.executors = {"default": Executor}
|
45
|
-
self.cli_commands = {}
|
46
|
-
self._plugins_loaded = False
|
47
|
-
|
48
|
-
logger.debug("Loading plugins")
|
49
|
-
for group, store in [
|
50
|
-
("quantalogic.tools", self.tools.load_toolboxes),
|
51
|
-
("quantalogic.reasoners", self.reasoners),
|
52
|
-
("quantalogic.executors", self.executors),
|
53
|
-
("quantalogic.cli", self.cli_commands),
|
54
|
-
]:
|
55
|
-
try:
|
56
|
-
eps = entry_points(group=group)
|
57
|
-
logger.debug(f"Found {len(eps)} entry points for group {group}")
|
58
|
-
for ep in eps:
|
59
|
-
try:
|
60
|
-
loaded = ep.load()
|
61
|
-
if group == "quantalogic.tools":
|
62
|
-
# Load static tools first
|
63
|
-
store()
|
64
|
-
# Then initialize dynamic MCP tools if applicable
|
65
|
-
if ep.name == "quantalogic_toolbox_mcp":
|
66
|
-
import asyncio
|
67
|
-
|
68
|
-
from quantalogic_toolbox_mcp.tools import initialize_toolbox
|
69
|
-
asyncio.run(initialize_toolbox(self.tools))
|
70
|
-
else:
|
71
|
-
store[ep.name] = loaded
|
72
|
-
logger.info(f"Loaded plugin {ep.name} for {group}")
|
73
|
-
except Exception as e:
|
74
|
-
logger.warning(f"Skipping plugin {ep.name} for {group} due to error: {e}")
|
75
|
-
except Exception as e:
|
76
|
-
logger.error(f"Failed to retrieve entry points for {group}: {e}")
|
77
|
-
self._plugins_loaded = True
|
78
|
-
logger.info("Plugin loading completed")
|
79
|
-
|
80
|
-
def get_tools(self, force_reload: bool = False) -> List[Tool]:
|
81
|
-
"""Return all registered tools.
|
82
|
-
|
83
|
-
Args:
|
84
|
-
force_reload: If True, reload plugins before getting tools
|
85
|
-
"""
|
86
|
-
if force_reload:
|
87
|
-
self.load_plugins(force=True)
|
88
|
-
else:
|
89
|
-
# Ensure plugins are loaded at least once, but don’t reload
|
90
|
-
if not self._plugins_loaded:
|
91
|
-
self.load_plugins()
|
92
|
-
return self.tools.get_tools()
|
@@ -1,11 +0,0 @@
|
|
1
|
-
<Action>
|
2
|
-
<Thought>Failed to generate a valid action due to: {{ error }}</Thought>
|
3
|
-
<Error>
|
4
|
-
<Message>{{ error }}</Message>
|
5
|
-
</Error>
|
6
|
-
<Code><![CDATA[
|
7
|
-
import asyncio
|
8
|
-
async def main():
|
9
|
-
print("Error: Action generation failed")
|
10
|
-
]]></Code>
|
11
|
-
</Action>
|
@@ -1,77 +0,0 @@
|
|
1
|
-
Before generating the code, follow these steps:
|
2
|
-
|
3
|
-
1. **Review the History**: Examine the <History> section below to understand previous thoughts, actions, and results.
|
4
|
-
2. **Identify Available Variables**: Check the 'Currently available variables' list and 'Available variables' in the history. These are stored in the `context_vars` dictionary and can be accessed using `context_vars.get('variable_name', default_value)`.
|
5
|
-
3. **Understand Variable Types**: Variables in `context_vars` are objects returned by tools, not just their string representations. Refer to tool documentation (e.g., `create_project_plan` returns `PlanResult` with `task_id`, `task_description`, `subtasks`) to access attributes directly (e.g., `plan.task_id`) rather than parsing strings.
|
6
|
-
4. **Use Previous Results**: Incorporate relevant variables from prior steps to avoid redundant calculations and ensure continuity. For example, if `step1_plan` is a `PlanResult`, retrieve it with `plan = context_vars.get('step1_plan')` and use `plan.task_id`.
|
7
|
-
5. **Plan Your Approach**: Based on the history and variables, determine the next logical step toward solving the task.
|
8
|
-
|
9
|
-
Currently available variables from previous steps: {{ available_vars | join(', ') }}
|
10
|
-
|
11
|
-
Solve the following task:
|
12
|
-
|
13
|
-
<Task>
|
14
|
-
{{ task }}
|
15
|
-
</Task>
|
16
|
-
|
17
|
-
This is step {{ current_step }} out of {{ max_iterations }} allowed steps.
|
18
|
-
|
19
|
-
Previous steps:
|
20
|
-
|
21
|
-
<History>
|
22
|
-
{{ history_str }}
|
23
|
-
</History>
|
24
|
-
|
25
|
-
Task:
|
26
|
-
|
27
|
-
Generate a Python program with an async main() function that uses the available tools to take the next step toward solving the task.
|
28
|
-
|
29
|
-
Essential Requirements:
|
30
|
-
|
31
|
-
- **Always check the 'Available variables' from the history** before proceeding. These are listed in the <History> section and stored in the `context_vars` dictionary as objects. Access them using `context_vars.get("variable_name", default_value)` to retrieve results from previous steps.
|
32
|
-
- **Handle Variable Types Correctly**: Variables in `context_vars` are the actual objects returned by tools (e.g., `PlanResult` from `create_project_plan`). Use attribute access (e.g., `plan.task_id`) instead of string methods like `split()` unless the variable is explicitly a string. Check tool documentation for return types.
|
33
|
-
- **Build upon previous steps** by using these variables when they are relevant. This avoids redundant work and ensures continuity. For example, if a previous step stored a `PlanResult` as `step1_plan`, retrieve it with `plan = context_vars.get('step1_plan')` and use `plan.task_id`.
|
34
|
-
- When defining new variables, **prefix them with 'step{{ current_step }}_'** to ensure uniqueness (e.g., `step{{ current_step }}_result`). This prevents overwriting variables from earlier steps.
|
35
|
-
- Return `"Task completed: [final answer]"` if the task is fully solved in this step.
|
36
|
-
- Otherwise, return intermediate results as a string to be stored for future steps.
|
37
|
-
- Handle potential errors in tool arguments gracefully, using try/except blocks if needed.
|
38
|
-
- Use proper async/await syntax for all asynchronous operations.
|
39
|
-
|
40
|
-
Variable Naming Convention:
|
41
|
-
- Prefix all new variables with 'step{{ current_step }}_' (e.g., `step{{ current_step }}_result = ...`).
|
42
|
-
- This ensures variables are unique across steps and traceable to their origin.
|
43
|
-
|
44
|
-
Example of using variables from previous steps:
|
45
|
-
|
46
|
-
Task: "Retrieve the plan from the previous step"
|
47
|
-
History:
|
48
|
-
"===== Step 1 of 5 max =====
|
49
|
-
Thought: Create a project plan
|
50
|
-
Action: <code>import asyncio\nasync def main():\n step1_plan = await quantalogic_planning_toolbox.create_project_plan(task_description='Write an article')\n return f'{step1_plan}'</code>
|
51
|
-
Result: task_id='abc123' task_description='Write an article' subtasks=[...]
|
52
|
-
Available variables: step1_plan"
|
53
|
-
|
54
|
-
Code:
|
55
|
-
import asyncio
|
56
|
-
|
57
|
-
async def main():
|
58
|
-
# Retrieve previous plan as an object, not a string
|
59
|
-
step1_plan = context_vars.get('step1_plan')
|
60
|
-
if step1_plan:
|
61
|
-
step2_task_id: str = step1_plan.task_id # Access attribute directly
|
62
|
-
step2_retrieved_plan = await quantalogic_planning_toolbox.retrieve_project_plan(task_id=step2_task_id)
|
63
|
-
return f"Retrieved plan: {step2_retrieved_plan}"
|
64
|
-
return "Error: No plan found from previous step"
|
65
|
-
|
66
|
-
Error Reflection:
|
67
|
-
- If there were errors in previous steps, review the error messages in the <History> section and adjust your code to avoid similar issues.
|
68
|
-
- For example, if a variable was missing, check `context_vars` with a default value.
|
69
|
-
- If a tool call failed, verify the arguments and ensure they match the tool's requirements.
|
70
|
-
- If an error like "'PlanResult' object has no attribute 'split'" occurred, it means you tried to treat an object as a string. Use attribute access instead (e.g., `.task_id`).
|
71
|
-
|
72
|
-
The program must:
|
73
|
-
|
74
|
-
1. Use only the provided async tools
|
75
|
-
2. Follow a subset of Python 3.10+ syntax, doesn't allow unsafe operations such as eval() or exec()
|
76
|
-
3. Include type annotations for variables, give very explicit names to variables
|
77
|
-
4. Output progress information
|
@@ -1,52 +0,0 @@
|
|
1
|
-
You are a Python code generator. Your task is to create a Python program that solves the following task:
|
2
|
-
|
3
|
-
"{{ task_description }}"
|
4
|
-
|
5
|
-
You have access to the following pre-defined async tool functions, grouped by toolbox:
|
6
|
-
|
7
|
-
{% for toolbox_name, docstrings in tools_by_toolbox.items() %}
|
8
|
-
### {{ toolbox_name }}
|
9
|
-
{% for docstring in docstrings %}
|
10
|
-
{{ docstring }}
|
11
|
-
|
12
|
-
{% endfor %}
|
13
|
-
{% endfor %}
|
14
|
-
|
15
|
-
If applicable, use tools to assess the situation and generate a Python program that solves the task step by step.
|
16
|
-
|
17
|
-
If applicable, use tools to verify the task is completed.
|
18
|
-
|
19
|
-
Instructions:
|
20
|
-
1. Generate a very simple Python program, avoid complex logic, return the program as a single string. No more than 3 functions called.
|
21
|
-
2. Include only the import for asyncio (import asyncio).
|
22
|
-
3. Define an async function named main() that solves the task.
|
23
|
-
4. Use the pre-defined tool functions by calling them with await and prefixing them with their toolbox name (e.g., `await {{ toolbox_name }}.tool_name(arg1, arg2)`). For core tools, use `default.tool_name` (e.g., `await default.agent_tool(...)`).
|
24
|
-
5. Do not redefine the tool functions within the program; assume they are already available in the namespace under their toolbox names (e.g., `default.agent_tool` for core tools).
|
25
|
-
6. Return the program as a plain string (no markdown or extra text).
|
26
|
-
7. Strictly exclude asyncio.run(main()) or any code outside the main() function definition, including any 'if __name__ == "__main__":' block, as the runtime will handle execution of main().
|
27
|
-
8. Express all string variables as multiline strings, always start a string at the beginning of a line.
|
28
|
-
9. Always return a string from main(); use "Task completed: [result]" if the task is solved, otherwise return intermediate results.
|
29
|
-
10. Access variables from previous steps using the `context_vars` dictionary:
|
30
|
-
- Use `context_vars.get("variable_name", default_value)` to safely retrieve variables (e.g., `previous_sum = context_vars.get("step1_sum", 0)`).
|
31
|
-
- Always specify a default value to handle cases where the variable might not exist.
|
32
|
-
- Check the history for 'Available variables' to identify relevant previous results.
|
33
|
-
- Use these variables to build on prior work rather than starting from scratch.
|
34
|
-
11. Be careful to avoid programs that cannot terminate.
|
35
|
-
12. When defining new variables, prefix them with 'step<current_step>_' (e.g., `step1_result`) to ensure uniqueness across steps.
|
36
|
-
13. Never use dangerous functions like eval, or any other unsafe operations.
|
37
|
-
14. If a return result is tool complex use an intermediate result to store it.
|
38
|
-
15. VERY IMPORTANT: If the return type of a function is Any or not specified don't call another function after this just return the result, the result will be handled by the runtime.
|
39
|
-
|
40
|
-
Example task: "Translate the poem 'The sun rises' into Spanish using the agent_tool"
|
41
|
-
Example output:
|
42
|
-
import asyncio
|
43
|
-
|
44
|
-
async def main():
|
45
|
-
step1_poem: str = "The sun rises"
|
46
|
-
step1_system_prompt: str = "You are a translation expert."
|
47
|
-
step1_translation: str = await default.agent_tool(
|
48
|
-
system_prompt=step1_system_prompt,
|
49
|
-
prompt=f"Translate '{step1_poem}' into Spanish",
|
50
|
-
temperature=0.7
|
51
|
-
)
|
52
|
-
return f"Task completed: {step1_translation}"
|
@@ -1,11 +0,0 @@
|
|
1
|
-
<Action>
|
2
|
-
<Thought>Generated a Python program to take the next step toward solving: {{ task }} Based on history: {{ history_str }}</Thought>
|
3
|
-
<Code><![CDATA[
|
4
|
-
{{ program }}
|
5
|
-
]]></Code>
|
6
|
-
<Metadata>
|
7
|
-
<StepNumber>{{ current_step }}</StepNumber>
|
8
|
-
<MaxIterations>{{ max_iterations }}</MaxIterations>
|
9
|
-
<Task>{{ task }}</Task>
|
10
|
-
</Metadata>
|
11
|
-
</Action>
|
@@ -1,318 +0,0 @@
|
|
1
|
-
"""Core implementation of the ReAct framework for reasoning and acting."""
|
2
|
-
|
3
|
-
import asyncio
|
4
|
-
import time
|
5
|
-
from typing import Callable, Dict, List, Optional, Tuple
|
6
|
-
|
7
|
-
from loguru import logger
|
8
|
-
from lxml import etree
|
9
|
-
|
10
|
-
from quantalogic.tools import Tool
|
11
|
-
|
12
|
-
from .events import (
|
13
|
-
ActionExecutedEvent,
|
14
|
-
ActionGeneratedEvent,
|
15
|
-
ErrorOccurredEvent,
|
16
|
-
StepCompletedEvent,
|
17
|
-
StepStartedEvent,
|
18
|
-
TaskCompletedEvent,
|
19
|
-
TaskStartedEvent,
|
20
|
-
ThoughtGeneratedEvent,
|
21
|
-
)
|
22
|
-
from .executor import BaseExecutor, Executor
|
23
|
-
from .history_manager import HistoryManager
|
24
|
-
from .llm_util import litellm_completion
|
25
|
-
from .reasoner import BaseReasoner, Reasoner
|
26
|
-
from .tools_manager import ToolRegistry
|
27
|
-
from .xml_utils import XMLResultHandler
|
28
|
-
|
29
|
-
|
30
|
-
class ReActAgent:
|
31
|
-
"""Implements the ReAct framework for reasoning and acting with enhanced memory management."""
|
32
|
-
|
33
|
-
def __init__(
|
34
|
-
self,
|
35
|
-
model: str,
|
36
|
-
tools: List[Tool],
|
37
|
-
max_iterations: int = 5,
|
38
|
-
max_history_tokens: int = 2000,
|
39
|
-
system_prompt: str = "", # New parameter for persistent context
|
40
|
-
task_description: str = "", # New parameter for persistent context
|
41
|
-
reasoner: Optional[BaseReasoner] = None,
|
42
|
-
executor: Optional[BaseExecutor] = None,
|
43
|
-
tool_registry: Optional[ToolRegistry] = None,
|
44
|
-
history_manager: Optional[HistoryManager] = None,
|
45
|
-
error_handler: Optional[Callable[[Exception, int], bool]] = None
|
46
|
-
) -> None:
|
47
|
-
"""
|
48
|
-
Initialize the ReActAgent with tools, reasoning, execution, and memory components.
|
49
|
-
|
50
|
-
Args:
|
51
|
-
model (str): Language model identifier.
|
52
|
-
tools (List[Tool]): List of available tools.
|
53
|
-
max_iterations (int): Maximum reasoning steps (default: 5).
|
54
|
-
max_history_tokens (int): Max tokens for history (default: 2000).
|
55
|
-
system_prompt (str): Persistent system instructions (default: "").
|
56
|
-
task_description (str): Persistent task context (default: "").
|
57
|
-
reasoner (Optional[BaseReasoner]): Custom reasoner instance.
|
58
|
-
executor (Optional[BaseExecutor]): Custom executor instance.
|
59
|
-
tool_registry (Optional[ToolRegistry]): Custom tool registry.
|
60
|
-
history_manager (Optional[HistoryManager]): Custom history manager.
|
61
|
-
error_handler (Optional[Callable[[Exception, int], bool]]): Error handler callback.
|
62
|
-
"""
|
63
|
-
self.tool_registry = tool_registry or ToolRegistry()
|
64
|
-
for tool in tools:
|
65
|
-
self.tool_registry.register(tool)
|
66
|
-
self.reasoner: BaseReasoner = reasoner or Reasoner(model, self.tool_registry.get_tools())
|
67
|
-
self.executor: BaseExecutor = executor or Executor(self.tool_registry.get_tools(), notify_event=self._notify_observers)
|
68
|
-
self.max_iterations: int = max_iterations
|
69
|
-
self.max_history_tokens: int = max_history_tokens
|
70
|
-
self.history_manager: HistoryManager = history_manager or HistoryManager(
|
71
|
-
max_tokens=max_history_tokens,
|
72
|
-
system_prompt=system_prompt,
|
73
|
-
task_description=task_description
|
74
|
-
)
|
75
|
-
self.context_vars: Dict = {}
|
76
|
-
self._observers: List[Tuple[Callable, List[str]]] = []
|
77
|
-
self.error_handler = error_handler or (lambda e, step: False) # Default: no retry
|
78
|
-
|
79
|
-
def add_observer(self, observer: Callable, event_types: List[str]) -> 'ReActAgent':
|
80
|
-
"""Add an observer for specific event types."""
|
81
|
-
self._observers.append((observer, event_types))
|
82
|
-
return self
|
83
|
-
|
84
|
-
async def _notify_observers(self, event: object) -> None:
|
85
|
-
"""Notify all subscribed observers of an event."""
|
86
|
-
await asyncio.gather(
|
87
|
-
*(observer(event) for observer, types in self._observers if event.event_type in types),
|
88
|
-
return_exceptions=True
|
89
|
-
)
|
90
|
-
|
91
|
-
async def generate_action(
|
92
|
-
self,
|
93
|
-
task: str,
|
94
|
-
history: List[Dict],
|
95
|
-
step: int,
|
96
|
-
max_iterations: int,
|
97
|
-
system_prompt: Optional[str] = None,
|
98
|
-
streaming: bool = False
|
99
|
-
) -> str:
|
100
|
-
"""
|
101
|
-
Generate an action using the Reasoner, passing available variables.
|
102
|
-
|
103
|
-
Args:
|
104
|
-
task (str): The task to address.
|
105
|
-
history (List[Dict]): Stored step history.
|
106
|
-
step (int): Current step number.
|
107
|
-
max_iterations (int): Maximum allowed steps.
|
108
|
-
system_prompt (Optional[str]): Override system prompt (optional).
|
109
|
-
streaming (bool): Whether to stream the response.
|
110
|
-
|
111
|
-
Returns:
|
112
|
-
str: Generated action in XML format.
|
113
|
-
"""
|
114
|
-
history_str: str = self.history_manager.format_history(max_iterations)
|
115
|
-
available_vars: List[str] = list(self.context_vars.keys())
|
116
|
-
start: float = time.perf_counter()
|
117
|
-
response: str = await self.reasoner.generate_action(
|
118
|
-
task,
|
119
|
-
history_str,
|
120
|
-
step,
|
121
|
-
max_iterations,
|
122
|
-
system_prompt or self.history_manager.system_prompt,
|
123
|
-
self._notify_observers,
|
124
|
-
streaming=streaming,
|
125
|
-
available_vars=available_vars
|
126
|
-
)
|
127
|
-
thought, code = XMLResultHandler.parse_action_response(response)
|
128
|
-
gen_time: float = time.perf_counter() - start
|
129
|
-
await self._notify_observers(ThoughtGeneratedEvent(
|
130
|
-
event_type="ThoughtGenerated", step_number=step, thought=thought, generation_time=gen_time
|
131
|
-
))
|
132
|
-
await self._notify_observers(ActionGeneratedEvent(
|
133
|
-
event_type="ActionGenerated", step_number=step, action_code=code, generation_time=gen_time
|
134
|
-
))
|
135
|
-
if not response.endswith("</Code>"):
|
136
|
-
logger.warning(f"Response might be truncated at step {step}")
|
137
|
-
return response
|
138
|
-
|
139
|
-
async def execute_action(self, code: str, step: int, timeout: int = 300) -> str:
|
140
|
-
"""
|
141
|
-
Execute an action using the Executor.
|
142
|
-
|
143
|
-
Args:
|
144
|
-
code (str): Code to execute.
|
145
|
-
step (int): Current step number.
|
146
|
-
timeout (int): Execution timeout in seconds (default: 300).
|
147
|
-
|
148
|
-
Returns:
|
149
|
-
str: Execution result in XML format.
|
150
|
-
"""
|
151
|
-
start: float = time.perf_counter()
|
152
|
-
result_xml: str = await self.executor.execute_action(code, self.context_vars, step, timeout)
|
153
|
-
execution_time: float = time.perf_counter() - start
|
154
|
-
await self._notify_observers(ActionExecutedEvent(
|
155
|
-
event_type="ActionExecuted", step_number=step, result_xml=result_xml, execution_time=execution_time
|
156
|
-
))
|
157
|
-
return result_xml
|
158
|
-
|
159
|
-
async def is_task_complete(self, task: str, history: List[Dict], result: str, success_criteria: Optional[str]) -> Tuple[bool, str]:
|
160
|
-
"""
|
161
|
-
Check if the task is complete based on the result.
|
162
|
-
|
163
|
-
Args:
|
164
|
-
task (str): The task being solved.
|
165
|
-
history (List[Dict]): Step history.
|
166
|
-
result (str): Result of the latest action.
|
167
|
-
success_criteria (Optional[str]): Optional success criteria.
|
168
|
-
|
169
|
-
Returns:
|
170
|
-
Tuple[bool, str]: (is_complete, final_answer).
|
171
|
-
"""
|
172
|
-
try:
|
173
|
-
root = etree.fromstring(result)
|
174
|
-
if root.findtext("Completed") == "true":
|
175
|
-
final_answer: str = root.findtext("FinalAnswer") or ""
|
176
|
-
verification: str = await litellm_completion(
|
177
|
-
model=self.reasoner.model,
|
178
|
-
messages=[{
|
179
|
-
"role": "user",
|
180
|
-
"content": f"Does '{final_answer}' solve '{task}' given history:\n{self.history_manager.format_history(self.max_iterations)}?"
|
181
|
-
}],
|
182
|
-
max_tokens=100,
|
183
|
-
temperature=0.1,
|
184
|
-
stream=False
|
185
|
-
)
|
186
|
-
if verification and "yes" in verification.lower():
|
187
|
-
return True, final_answer
|
188
|
-
return True, final_answer
|
189
|
-
except etree.XMLSyntaxError:
|
190
|
-
pass
|
191
|
-
|
192
|
-
if success_criteria and (result_value := XMLResultHandler.extract_result_value(result)) and success_criteria in result_value:
|
193
|
-
return True, result_value
|
194
|
-
return False, ""
|
195
|
-
|
196
|
-
async def _run_step(self, task: str, step: int, max_iters: int,
|
197
|
-
system_prompt: Optional[str], streaming: bool) -> Dict:
|
198
|
-
"""
|
199
|
-
Execute a single step of the ReAct loop with retry logic.
|
200
|
-
|
201
|
-
Args:
|
202
|
-
task (str): The task to address.
|
203
|
-
step (int): Current step number.
|
204
|
-
max_iters (int): Maximum allowed steps.
|
205
|
-
system_prompt (Optional[str]): System prompt override.
|
206
|
-
streaming (bool): Whether to stream responses.
|
207
|
-
|
208
|
-
Returns:
|
209
|
-
Dict: Step data (step_number, thought, action, result).
|
210
|
-
"""
|
211
|
-
await self._notify_observers(StepStartedEvent(
|
212
|
-
event_type="StepStarted",
|
213
|
-
step_number=step,
|
214
|
-
system_prompt=self.history_manager.system_prompt,
|
215
|
-
task_description=self.history_manager.task_description
|
216
|
-
))
|
217
|
-
for attempt in range(3):
|
218
|
-
try:
|
219
|
-
response: str = await self.generate_action(task, self.history_manager.store, step, max_iters, system_prompt, streaming)
|
220
|
-
thought, code = XMLResultHandler.parse_action_response(response)
|
221
|
-
result: str = await self.execute_action(code, step)
|
222
|
-
step_data = {"step_number": step, "thought": thought, "action": code, "result": result}
|
223
|
-
self.history_manager.add_step(step_data)
|
224
|
-
return step_data
|
225
|
-
except Exception as e:
|
226
|
-
if not self.error_handler(e, step) or attempt == 2:
|
227
|
-
await self._notify_observers(ErrorOccurredEvent(
|
228
|
-
event_type="ErrorOccurred", error_message=str(e), step_number=step
|
229
|
-
))
|
230
|
-
raise
|
231
|
-
await asyncio.sleep(2 ** attempt) # Exponential backoff
|
232
|
-
|
233
|
-
async def _finalize_step(self, task: str, step_data: Dict,
|
234
|
-
success_criteria: Optional[str]) -> Tuple[bool, Dict]:
|
235
|
-
"""
|
236
|
-
Check completion and notify observers for a step.
|
237
|
-
|
238
|
-
Args:
|
239
|
-
task (str): The task being solved.
|
240
|
-
step_data (Dict): Current step data.
|
241
|
-
success_criteria (Optional[str]): Optional success criteria.
|
242
|
-
|
243
|
-
Returns:
|
244
|
-
Tuple[bool, Dict]: (is_complete, updated_step_data).
|
245
|
-
"""
|
246
|
-
is_complete, final_answer = await self.is_task_complete(task, self.history_manager.store, step_data["result"], success_criteria)
|
247
|
-
if is_complete:
|
248
|
-
try:
|
249
|
-
root = etree.fromstring(step_data["result"])
|
250
|
-
if root.find("FinalAnswer") is None:
|
251
|
-
final_answer_elem = etree.Element("FinalAnswer")
|
252
|
-
final_answer_elem.text = etree.CDATA(final_answer)
|
253
|
-
root.append(final_answer_elem)
|
254
|
-
step_data["result"] = etree.tostring(root, pretty_print=True, encoding="unicode")
|
255
|
-
except etree.XMLSyntaxError as e:
|
256
|
-
logger.error(f"Failed to parse result XML for appending FinalAnswer: {e}")
|
257
|
-
if "<FinalAnswer>" not in step_data["result"]:
|
258
|
-
step_data["result"] += f"\n<FinalAnswer><![CDATA[\n{final_answer}\n]]></FinalAnswer>"
|
259
|
-
await self._notify_observers(StepCompletedEvent(
|
260
|
-
event_type="StepCompleted", step_number=step_data["step_number"],
|
261
|
-
thought=step_data["thought"], action=step_data["action"], result=step_data["result"],
|
262
|
-
is_complete=is_complete, final_answer=final_answer if is_complete else None
|
263
|
-
))
|
264
|
-
return is_complete, step_data
|
265
|
-
|
266
|
-
async def solve(
|
267
|
-
self,
|
268
|
-
task: str,
|
269
|
-
success_criteria: Optional[str] = None,
|
270
|
-
system_prompt: Optional[str] = None,
|
271
|
-
max_iterations: Optional[int] = None,
|
272
|
-
streaming: bool = False
|
273
|
-
) -> List[Dict]:
|
274
|
-
"""
|
275
|
-
Solve a task using the ReAct framework with persistent memory.
|
276
|
-
|
277
|
-
Args:
|
278
|
-
task (str): The task to solve.
|
279
|
-
success_criteria (Optional[str]): Criteria for success.
|
280
|
-
system_prompt (Optional[str]): System prompt override.
|
281
|
-
max_iterations (Optional[int]): Override for max steps.
|
282
|
-
streaming (bool): Whether to stream responses.
|
283
|
-
|
284
|
-
Returns:
|
285
|
-
List[Dict]: History of steps taken.
|
286
|
-
"""
|
287
|
-
max_iters: int = max_iterations if max_iterations is not None else self.max_iterations
|
288
|
-
self.history_manager.store.clear() # Reset history for new task
|
289
|
-
if system_prompt is not None:
|
290
|
-
self.history_manager.system_prompt = system_prompt
|
291
|
-
self.history_manager.task_description = task
|
292
|
-
await self._notify_observers(TaskStartedEvent(
|
293
|
-
event_type="TaskStarted",
|
294
|
-
task_description=task,
|
295
|
-
system_prompt=self.history_manager.system_prompt
|
296
|
-
))
|
297
|
-
|
298
|
-
for step in range(1, max_iters + 1):
|
299
|
-
try:
|
300
|
-
step_data: Dict = await self._run_step(task, step, max_iters, system_prompt, streaming)
|
301
|
-
is_complete, step_data = await self._finalize_step(task, step_data, success_criteria)
|
302
|
-
if is_complete:
|
303
|
-
await self._notify_observers(TaskCompletedEvent(
|
304
|
-
event_type="TaskCompleted", final_answer=step_data["result"], reason="success"
|
305
|
-
))
|
306
|
-
break
|
307
|
-
except Exception as e:
|
308
|
-
await self._notify_observers(ErrorOccurredEvent(
|
309
|
-
event_type="ErrorOccurred", error_message=str(e), step_number=step
|
310
|
-
))
|
311
|
-
break
|
312
|
-
|
313
|
-
if not any("<FinalAnswer>" in step["result"] for step in self.history_manager.store):
|
314
|
-
await self._notify_observers(TaskCompletedEvent(
|
315
|
-
event_type="TaskCompleted", final_answer=None,
|
316
|
-
reason="max_iterations_reached" if len(self.history_manager.store) == max_iters else "error"
|
317
|
-
))
|
318
|
-
return self.history_manager.store
|