praisonaiagents 0.0.28__py3-none-any.whl → 0.0.29__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- praisonaiagents/__init__.py +2 -0
- praisonaiagents/agent/agent.py +2 -2
- praisonaiagents/agents/agents.py +169 -35
- praisonaiagents/memory/memory.py +928 -0
- praisonaiagents/task/task.py +157 -9
- {praisonaiagents-0.0.28.dist-info → praisonaiagents-0.0.29.dist-info}/METADATA +4 -2
- {praisonaiagents-0.0.28.dist-info → praisonaiagents-0.0.29.dist-info}/RECORD +9 -16
- {praisonaiagents-0.0.28.dist-info → praisonaiagents-0.0.29.dist-info}/WHEEL +1 -1
- praisonaiagents/build/lib/praisonaiagents/__init__.py +0 -1
- praisonaiagents/build/lib/praisonaiagents/agent/__init__.py +0 -4
- praisonaiagents/build/lib/praisonaiagents/agent/agent.py +0 -350
- praisonaiagents/build/lib/praisonaiagents/agents/__init__.py +0 -4
- praisonaiagents/build/lib/praisonaiagents/agents/agents.py +0 -318
- praisonaiagents/build/lib/praisonaiagents/main.py +0 -112
- praisonaiagents/build/lib/praisonaiagents/task/__init__.py +0 -4
- praisonaiagents/build/lib/praisonaiagents/task/task.py +0 -48
- {praisonaiagents-0.0.28.dist-info → praisonaiagents-0.0.29.dist-info}/top_level.txt +0 -0
praisonaiagents/task/task.py
CHANGED
@@ -5,12 +5,17 @@ from pydantic import BaseModel
|
|
5
5
|
from ..main import TaskOutput
|
6
6
|
from ..agent.agent import Agent
|
7
7
|
import uuid
|
8
|
+
import os
|
9
|
+
import time
|
10
|
+
|
11
|
+
# Set up logger
|
12
|
+
logger = logging.getLogger(__name__)
|
8
13
|
|
9
14
|
class Task:
|
10
15
|
def __init__(
|
11
16
|
self,
|
12
17
|
description: str,
|
13
|
-
expected_output: str,
|
18
|
+
expected_output: Optional[str] = None,
|
14
19
|
agent: Optional[Agent] = None,
|
15
20
|
name: Optional[str] = None,
|
16
21
|
tools: Optional[List[Any]] = None,
|
@@ -30,12 +35,14 @@ class Task:
|
|
30
35
|
task_type: str = "task",
|
31
36
|
condition: Optional[Dict[str, List[str]]] = None,
|
32
37
|
is_start: bool = False,
|
33
|
-
loop_state: Optional[Dict[str, Union[str, int]]] = None
|
38
|
+
loop_state: Optional[Dict[str, Union[str, int]]] = None,
|
39
|
+
memory=None,
|
40
|
+
quality_check=True
|
34
41
|
):
|
35
42
|
self.id = str(uuid.uuid4()) if id is None else str(id)
|
36
43
|
self.name = name
|
37
44
|
self.description = description
|
38
|
-
self.expected_output = expected_output
|
45
|
+
self.expected_output = expected_output if expected_output is not None else "Complete the task successfully"
|
39
46
|
self.agent = agent
|
40
47
|
self.tools = tools if tools else []
|
41
48
|
self.context = context if context else []
|
@@ -54,6 +61,21 @@ class Task:
|
|
54
61
|
self.condition = condition if condition else {}
|
55
62
|
self.is_start = is_start
|
56
63
|
self.loop_state = loop_state if loop_state else {}
|
64
|
+
self.memory = memory
|
65
|
+
self.quality_check = quality_check
|
66
|
+
|
67
|
+
# Set logger level based on config verbose level
|
68
|
+
verbose = self.config.get("verbose", 0)
|
69
|
+
if verbose >= 5:
|
70
|
+
logger.setLevel(logging.INFO)
|
71
|
+
else:
|
72
|
+
logger.setLevel(logging.WARNING)
|
73
|
+
|
74
|
+
# Also set third-party loggers to WARNING
|
75
|
+
logging.getLogger('chromadb').setLevel(logging.WARNING)
|
76
|
+
logging.getLogger('openai').setLevel(logging.WARNING)
|
77
|
+
logging.getLogger('httpx').setLevel(logging.WARNING)
|
78
|
+
logging.getLogger('httpcore').setLevel(logging.WARNING)
|
57
79
|
|
58
80
|
if self.output_json and self.output_pydantic:
|
59
81
|
raise ValueError("Only one output type can be defined")
|
@@ -64,11 +86,137 @@ class Task:
|
|
64
86
|
def __str__(self):
|
65
87
|
return f"Task(name='{self.name if self.name else 'None'}', description='{self.description}', agent='{self.agent.name if self.agent else 'None'}', status='{self.status}')"
|
66
88
|
|
89
|
+
def initialize_memory(self):
|
90
|
+
"""Initialize memory if config exists but memory doesn't"""
|
91
|
+
if not self.memory and self.config.get('memory_config'):
|
92
|
+
try:
|
93
|
+
from ..memory.memory import Memory
|
94
|
+
logger.info(f"Task {self.id}: Initializing memory from config: {self.config['memory_config']}")
|
95
|
+
self.memory = Memory(config=self.config['memory_config'])
|
96
|
+
logger.info(f"Task {self.id}: Memory initialized successfully")
|
97
|
+
|
98
|
+
# Verify database was created
|
99
|
+
if os.path.exists(self.config['memory_config']['storage']['path']):
|
100
|
+
logger.info(f"Task {self.id}: Memory database exists after initialization")
|
101
|
+
else:
|
102
|
+
logger.error(f"Task {self.id}: Failed to create memory database!")
|
103
|
+
return self.memory
|
104
|
+
except Exception as e:
|
105
|
+
logger.error(f"Task {self.id}: Failed to initialize memory: {e}")
|
106
|
+
logger.exception(e)
|
107
|
+
return None
|
108
|
+
|
109
|
+
def store_in_memory(self, content: str, agent_name: str = None, task_id: str = None):
|
110
|
+
"""Store content in memory with metadata"""
|
111
|
+
if self.memory:
|
112
|
+
try:
|
113
|
+
logger.info(f"Task {self.id}: Storing content in memory...")
|
114
|
+
self.memory.store_long_term(
|
115
|
+
text=content,
|
116
|
+
metadata={
|
117
|
+
"agent_name": agent_name or "Agent",
|
118
|
+
"task_id": task_id or self.id,
|
119
|
+
"timestamp": time.time()
|
120
|
+
}
|
121
|
+
)
|
122
|
+
logger.info(f"Task {self.id}: Content stored in memory")
|
123
|
+
except Exception as e:
|
124
|
+
logger.error(f"Task {self.id}: Failed to store content in memory: {e}")
|
125
|
+
logger.exception(e)
|
126
|
+
|
67
127
|
async def execute_callback(self, task_output: TaskOutput) -> None:
|
68
|
-
"""Execute
|
128
|
+
"""Execute callback and store quality metrics if enabled"""
|
129
|
+
logger.info(f"Task {self.id}: execute_callback called")
|
130
|
+
logger.info(f"Quality check enabled: {self.quality_check}")
|
131
|
+
|
132
|
+
# Initialize memory if not already initialized
|
133
|
+
if not self.memory:
|
134
|
+
self.memory = self.initialize_memory()
|
135
|
+
|
136
|
+
logger.info(f"Memory object exists: {self.memory is not None}")
|
137
|
+
if self.memory:
|
138
|
+
logger.info(f"Memory config: {self.memory.cfg}")
|
139
|
+
# Store task output in memory
|
140
|
+
try:
|
141
|
+
logger.info(f"Task {self.id}: Storing task output in memory...")
|
142
|
+
self.store_in_memory(
|
143
|
+
content=task_output.raw,
|
144
|
+
agent_name=self.agent.name if self.agent else "Agent",
|
145
|
+
task_id=self.id
|
146
|
+
)
|
147
|
+
logger.info(f"Task {self.id}: Task output stored in memory")
|
148
|
+
except Exception as e:
|
149
|
+
logger.error(f"Task {self.id}: Failed to store task output in memory: {e}")
|
150
|
+
logger.exception(e)
|
151
|
+
|
152
|
+
logger.info(f"Task output: {task_output.raw[:100]}...")
|
153
|
+
|
154
|
+
if self.quality_check and self.memory:
|
155
|
+
try:
|
156
|
+
logger.info(f"Task {self.id}: Starting memory operations")
|
157
|
+
logger.info(f"Task {self.id}: Calculating quality metrics for output: {task_output.raw[:100]}...")
|
158
|
+
|
159
|
+
# Get quality metrics from LLM
|
160
|
+
metrics = self.memory.calculate_quality_metrics(
|
161
|
+
task_output.raw,
|
162
|
+
self.expected_output
|
163
|
+
)
|
164
|
+
logger.info(f"Task {self.id}: Quality metrics calculated: {metrics}")
|
165
|
+
|
166
|
+
quality_score = metrics.get("accuracy", 0.0)
|
167
|
+
logger.info(f"Task {self.id}: Quality score: {quality_score}")
|
168
|
+
|
169
|
+
# Store in both short and long-term memory with higher threshold
|
170
|
+
logger.info(f"Task {self.id}: Finalizing task output in memory...")
|
171
|
+
self.memory.finalize_task_output(
|
172
|
+
content=task_output.raw,
|
173
|
+
agent_name=self.agent.name if self.agent else "Agent",
|
174
|
+
quality_score=quality_score,
|
175
|
+
threshold=0.7, # Only high quality outputs in long-term memory
|
176
|
+
metrics=metrics,
|
177
|
+
task_id=self.id
|
178
|
+
)
|
179
|
+
logger.info(f"Task {self.id}: Finalized task output in memory")
|
180
|
+
|
181
|
+
# Store quality metrics separately
|
182
|
+
logger.info(f"Task {self.id}: Storing quality metrics...")
|
183
|
+
self.memory.store_quality(
|
184
|
+
text=task_output.raw,
|
185
|
+
quality_score=quality_score,
|
186
|
+
task_id=self.id,
|
187
|
+
metrics=metrics
|
188
|
+
)
|
189
|
+
|
190
|
+
# Store in both short and long-term memory with higher threshold
|
191
|
+
self.memory.finalize_task_output(
|
192
|
+
content=task_output.raw,
|
193
|
+
agent_name=self.agent.name if self.agent else "Agent",
|
194
|
+
quality_score=quality_score,
|
195
|
+
threshold=0.7 # Only high quality outputs in long-term memory
|
196
|
+
)
|
197
|
+
|
198
|
+
# Build context for next tasks
|
199
|
+
if self.next_tasks:
|
200
|
+
logger.info(f"Task {self.id}: Building context for next tasks...")
|
201
|
+
context = self.memory.build_context_for_task(
|
202
|
+
task_descr=task_output.raw,
|
203
|
+
max_items=5
|
204
|
+
)
|
205
|
+
logger.info(f"Task {self.id}: Built context for next tasks: {len(context)} items")
|
206
|
+
|
207
|
+
logger.info(f"Task {self.id}: Memory operations complete")
|
208
|
+
except Exception as e:
|
209
|
+
logger.error(f"Task {self.id}: Failed to process memory operations: {e}")
|
210
|
+
logger.exception(e) # Print full stack trace
|
211
|
+
# Continue execution even if memory operations fail
|
212
|
+
|
213
|
+
# Execute original callback
|
69
214
|
if self.callback:
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
215
|
+
try:
|
216
|
+
if asyncio.iscoroutinefunction(self.callback):
|
217
|
+
await self.callback(task_output)
|
218
|
+
else:
|
219
|
+
self.callback(task_output)
|
220
|
+
except Exception as e:
|
221
|
+
logger.error(f"Task {self.id}: Failed to execute callback: {e}")
|
222
|
+
logger.exception(e)
|
@@ -1,8 +1,10 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.2
|
2
2
|
Name: praisonaiagents
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.29
|
4
4
|
Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
|
5
5
|
Author: Mervin Praison
|
6
6
|
Requires-Dist: pydantic
|
7
7
|
Requires-Dist: rich
|
8
8
|
Requires-Dist: openai
|
9
|
+
Provides-Extra: memory
|
10
|
+
Requires-Dist: chromadb>=0.6.0; extra == "memory"
|
@@ -1,22 +1,15 @@
|
|
1
|
-
praisonaiagents/__init__.py,sha256=
|
1
|
+
praisonaiagents/__init__.py,sha256=Pm_HNlIsenf5zIstcVNk6nteJmOEnI4nB-zB-YL0Jgo,1160
|
2
2
|
praisonaiagents/main.py,sha256=7Phfe0gdxHzbhPb3WRzBTfq9CaLq0K31M5DM_4oCiCQ,12451
|
3
3
|
praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
|
4
|
-
praisonaiagents/agent/agent.py,sha256=
|
4
|
+
praisonaiagents/agent/agent.py,sha256=r4Bfe_R5CZlPEKt6DQqgFa9EGuaaMTr9suiiy_qbsnc,33361
|
5
5
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
6
|
-
praisonaiagents/agents/agents.py,sha256=
|
6
|
+
praisonaiagents/agents/agents.py,sha256=N55Ae3JkjNgGQ6pXBaBpI73sA80Y-pPUZLOd1NgtiWU,29523
|
7
7
|
praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
|
8
|
-
praisonaiagents/
|
9
|
-
praisonaiagents/build/lib/praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
|
10
|
-
praisonaiagents/build/lib/praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
|
11
|
-
praisonaiagents/build/lib/praisonaiagents/agent/agent.py,sha256=PwbeW6v4Ldcl10JQr9_7TBfg4_FskQh-mGoFUdGxg8w,15483
|
12
|
-
praisonaiagents/build/lib/praisonaiagents/agents/__init__.py,sha256=cgCLFLFcLp9SizmFSHUkH5aX-1seAAsRtQbtIHBBso4,101
|
13
|
-
praisonaiagents/build/lib/praisonaiagents/agents/agents.py,sha256=P2FAtlfD3kPib5a1oLVYanxlU6e4-GhBMQ0YDY5MHY4,13473
|
14
|
-
praisonaiagents/build/lib/praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
15
|
-
praisonaiagents/build/lib/praisonaiagents/task/task.py,sha256=4Y1qX8OeEFcid2yhAiPYylvHpuDmWORsyNL16_BiVvI,1831
|
8
|
+
praisonaiagents/memory/memory.py,sha256=pKQT2tKQpSeStZvW8BT83ree-hqJBvm-tGUwCwOsjDI,35504
|
16
9
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
17
10
|
praisonaiagents/process/process.py,sha256=4qXdrCDQPH5MtvHvdJVURXKNgSl6ae3OYTiqAF_A2ZU,24295
|
18
11
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
19
|
-
praisonaiagents/task/task.py,sha256=
|
12
|
+
praisonaiagents/task/task.py,sha256=mwmk98nesfz102qTnHSE5VuuPIgHiPDxjeEX7b7g2BA,10023
|
20
13
|
praisonaiagents/tools/__init__.py,sha256=-0lV5n5cG54vYW6REjXIfuJnCLKnfQIDlXsySCaPB9s,7347
|
21
14
|
praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
|
22
15
|
praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
|
@@ -37,7 +30,7 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
|
|
37
30
|
praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
|
38
31
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
39
32
|
praisonaiagents/tools/yfinance_tools.py,sha256=nmzjS7G_5GqMQD4r867mt17dHg5xvtsYDDfOPh68SgE,8105
|
40
|
-
praisonaiagents-0.0.
|
41
|
-
praisonaiagents-0.0.
|
42
|
-
praisonaiagents-0.0.
|
43
|
-
praisonaiagents-0.0.
|
33
|
+
praisonaiagents-0.0.29.dist-info/METADATA,sha256=HUWedStiXMzTbrkdz61JBYXN5x8K7xRRA1q6C2LLhlU,306
|
34
|
+
praisonaiagents-0.0.29.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
35
|
+
praisonaiagents-0.0.29.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
36
|
+
praisonaiagents-0.0.29.dist-info/RECORD,,
|
@@ -1 +0,0 @@
|
|
1
|
-
|
@@ -1,350 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
import json
|
3
|
-
import time
|
4
|
-
from typing import List, Optional, Any, Dict, Union, Literal
|
5
|
-
from rich.console import Console
|
6
|
-
from rich.live import Live
|
7
|
-
from ..main import (
|
8
|
-
display_error,
|
9
|
-
display_tool_call,
|
10
|
-
display_instruction,
|
11
|
-
display_interaction,
|
12
|
-
display_generating,
|
13
|
-
ReflectionOutput,
|
14
|
-
client,
|
15
|
-
error_logs
|
16
|
-
)
|
17
|
-
|
18
|
-
class Agent:
|
19
|
-
def __init__(
|
20
|
-
self,
|
21
|
-
name: str,
|
22
|
-
role: str,
|
23
|
-
goal: str,
|
24
|
-
backstory: str,
|
25
|
-
llm: Optional[Union[str, Any]] = "gpt-4o-mini",
|
26
|
-
tools: Optional[List[Any]] = None,
|
27
|
-
function_calling_llm: Optional[Any] = None,
|
28
|
-
max_iter: int = 20,
|
29
|
-
max_rpm: Optional[int] = None,
|
30
|
-
max_execution_time: Optional[int] = None,
|
31
|
-
memory: bool = True,
|
32
|
-
verbose: bool = False,
|
33
|
-
allow_delegation: bool = False,
|
34
|
-
step_callback: Optional[Any] = None,
|
35
|
-
cache: bool = True,
|
36
|
-
system_template: Optional[str] = None,
|
37
|
-
prompt_template: Optional[str] = None,
|
38
|
-
response_template: Optional[str] = None,
|
39
|
-
allow_code_execution: Optional[bool] = False,
|
40
|
-
max_retry_limit: int = 2,
|
41
|
-
respect_context_window: bool = True,
|
42
|
-
code_execution_mode: Literal["safe", "unsafe"] = "safe",
|
43
|
-
embedder_config: Optional[Dict[str, Any]] = None,
|
44
|
-
knowledge_sources: Optional[List[Any]] = None,
|
45
|
-
use_system_prompt: Optional[bool] = True,
|
46
|
-
markdown: bool = True,
|
47
|
-
self_reflect: bool = True,
|
48
|
-
max_reflection_iter: int = 3
|
49
|
-
):
|
50
|
-
self.name = name
|
51
|
-
self.role = role
|
52
|
-
self.goal = goal
|
53
|
-
self.backstory = backstory
|
54
|
-
self.llm = llm
|
55
|
-
self.tools = tools if tools else []
|
56
|
-
self.function_calling_llm = function_calling_llm
|
57
|
-
self.max_iter = max_iter
|
58
|
-
self.max_rpm = max_rpm
|
59
|
-
self.max_execution_time = max_execution_time
|
60
|
-
self.memory = memory
|
61
|
-
self.verbose = verbose
|
62
|
-
self.allow_delegation = allow_delegation
|
63
|
-
self.step_callback = step_callback
|
64
|
-
self.cache = cache
|
65
|
-
self.system_template = system_template
|
66
|
-
self.prompt_template = prompt_template
|
67
|
-
self.response_template = response_template
|
68
|
-
self.allow_code_execution = allow_code_execution
|
69
|
-
self.max_retry_limit = max_retry_limit
|
70
|
-
self.respect_context_window = respect_context_window
|
71
|
-
self.code_execution_mode = code_execution_mode
|
72
|
-
self.embedder_config = embedder_config
|
73
|
-
self.knowledge_sources = knowledge_sources
|
74
|
-
self.use_system_prompt = use_system_prompt
|
75
|
-
self.chat_history = []
|
76
|
-
self.markdown = markdown
|
77
|
-
self.self_reflect = self_reflect
|
78
|
-
self.max_reflection_iter = max_reflection_iter
|
79
|
-
|
80
|
-
def execute_tool(self, function_name, arguments):
|
81
|
-
logging.debug(f"{self.name} executing tool {function_name} with arguments: {arguments}")
|
82
|
-
if function_name == "get_weather":
|
83
|
-
location = arguments.get("location", "Unknown Location")
|
84
|
-
return {"temperature": "25C", "condition": "Sunny", "location": location}
|
85
|
-
elif function_name == "search_tool":
|
86
|
-
query = arguments.get("query", "AI trends in 2024")
|
87
|
-
return {"results": [
|
88
|
-
{"title": "AI advancements in 2024", "link": "url1", "summary": "Lots of advancements"},
|
89
|
-
{"title": "New trends in AI", "link": "url2", "summary": "New trends being found"}
|
90
|
-
]}
|
91
|
-
else:
|
92
|
-
return f"Tool '{function_name}' is not recognized"
|
93
|
-
|
94
|
-
def clear_history(self):
|
95
|
-
self.chat_history = []
|
96
|
-
|
97
|
-
def __str__(self):
|
98
|
-
return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
|
99
|
-
|
100
|
-
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True):
|
101
|
-
console = Console()
|
102
|
-
start_time = time.time()
|
103
|
-
logging.debug(f"{self.name} sending messages to LLM: {messages}")
|
104
|
-
|
105
|
-
formatted_tools = []
|
106
|
-
if tools:
|
107
|
-
for tool in tools:
|
108
|
-
if isinstance(tool, dict):
|
109
|
-
formatted_tools.append(tool)
|
110
|
-
elif hasattr(tool, "to_openai_tool"):
|
111
|
-
formatted_tools.append(tool.to_openai_tool())
|
112
|
-
elif isinstance(tool, str):
|
113
|
-
formatted_tools.append({
|
114
|
-
"type": "function",
|
115
|
-
"function": {
|
116
|
-
"name": tool,
|
117
|
-
"description": f"This is a tool called {tool}",
|
118
|
-
"parameters": {
|
119
|
-
"type": "object",
|
120
|
-
"properties": {},
|
121
|
-
},
|
122
|
-
}
|
123
|
-
})
|
124
|
-
else:
|
125
|
-
display_error(f"Warning: Tool {tool} not recognized")
|
126
|
-
|
127
|
-
try:
|
128
|
-
initial_response = client.chat.completions.create(
|
129
|
-
model=self.llm,
|
130
|
-
messages=messages,
|
131
|
-
temperature=temperature,
|
132
|
-
tools=formatted_tools if formatted_tools else None,
|
133
|
-
stream=False
|
134
|
-
)
|
135
|
-
|
136
|
-
tool_calls = getattr(initial_response.choices[0].message, 'tool_calls', None)
|
137
|
-
|
138
|
-
if tool_calls:
|
139
|
-
messages.append({
|
140
|
-
"role": "assistant",
|
141
|
-
"content": initial_response.choices[0].message.content,
|
142
|
-
"tool_calls": tool_calls
|
143
|
-
})
|
144
|
-
|
145
|
-
for tool_call in tool_calls:
|
146
|
-
function_name = tool_call.function.name
|
147
|
-
arguments = json.loads(tool_call.function.arguments)
|
148
|
-
|
149
|
-
if self.verbose:
|
150
|
-
display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
|
151
|
-
|
152
|
-
tool_result = self.execute_tool(function_name, arguments)
|
153
|
-
results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
|
154
|
-
|
155
|
-
if self.verbose:
|
156
|
-
display_tool_call(f"Function '{function_name}' returned: {results_str}")
|
157
|
-
|
158
|
-
messages.append({
|
159
|
-
"role": "tool",
|
160
|
-
"tool_call_id": tool_call.id,
|
161
|
-
"content": results_str
|
162
|
-
})
|
163
|
-
|
164
|
-
if stream:
|
165
|
-
response_stream = client.chat.completions.create(
|
166
|
-
model=self.llm,
|
167
|
-
messages=messages,
|
168
|
-
temperature=temperature,
|
169
|
-
stream=True
|
170
|
-
)
|
171
|
-
full_response_text = ""
|
172
|
-
with Live(display_generating("", start_time), refresh_per_second=4) as live:
|
173
|
-
for chunk in response_stream:
|
174
|
-
if chunk.choices[0].delta.content:
|
175
|
-
full_response_text += chunk.choices[0].delta.content
|
176
|
-
live.update(display_generating(full_response_text, start_time))
|
177
|
-
|
178
|
-
final_response = client.chat.completions.create(
|
179
|
-
model=self.llm,
|
180
|
-
messages=messages,
|
181
|
-
temperature=temperature,
|
182
|
-
stream=False
|
183
|
-
)
|
184
|
-
return final_response
|
185
|
-
else:
|
186
|
-
if tool_calls:
|
187
|
-
final_response = client.chat.completions.create(
|
188
|
-
model=self.llm,
|
189
|
-
messages=messages,
|
190
|
-
temperature=temperature,
|
191
|
-
stream=False
|
192
|
-
)
|
193
|
-
return final_response
|
194
|
-
else:
|
195
|
-
return initial_response
|
196
|
-
|
197
|
-
except Exception as e:
|
198
|
-
display_error(f"Error in chat completion: {e}")
|
199
|
-
return None
|
200
|
-
|
201
|
-
def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
|
202
|
-
if self.use_system_prompt:
|
203
|
-
system_prompt = f"""{self.backstory}\n
|
204
|
-
Your Role: {self.role}\n
|
205
|
-
Your Goal: {self.goal}
|
206
|
-
"""
|
207
|
-
else:
|
208
|
-
system_prompt = None
|
209
|
-
|
210
|
-
messages = []
|
211
|
-
if system_prompt:
|
212
|
-
messages.append({"role": "system", "content": system_prompt})
|
213
|
-
messages.extend(self.chat_history)
|
214
|
-
messages.append({"role": "user", "content": prompt})
|
215
|
-
|
216
|
-
final_response_text = None
|
217
|
-
reflection_count = 0
|
218
|
-
start_time = time.time()
|
219
|
-
|
220
|
-
while True:
|
221
|
-
try:
|
222
|
-
if self.verbose:
|
223
|
-
display_instruction(f"Agent {self.name} is processing prompt: {prompt}")
|
224
|
-
|
225
|
-
formatted_tools = []
|
226
|
-
if tools:
|
227
|
-
for tool in tools:
|
228
|
-
if isinstance(tool, dict):
|
229
|
-
formatted_tools.append(tool)
|
230
|
-
elif hasattr(tool, "to_openai_tool"):
|
231
|
-
formatted_tools.append(tool.to_openai_tool())
|
232
|
-
elif isinstance(tool, str):
|
233
|
-
formatted_tools.append({
|
234
|
-
"type": "function",
|
235
|
-
"function": {
|
236
|
-
"name": tool,
|
237
|
-
"description": f"This is a tool called {tool}",
|
238
|
-
"parameters": {
|
239
|
-
"type": "object",
|
240
|
-
"properties": {},
|
241
|
-
},
|
242
|
-
}
|
243
|
-
})
|
244
|
-
else:
|
245
|
-
display_error(f"Warning: Tool {tool} not recognized")
|
246
|
-
|
247
|
-
response = self._chat_completion(messages, temperature=temperature, tools=formatted_tools if formatted_tools else None)
|
248
|
-
if not response:
|
249
|
-
return None
|
250
|
-
|
251
|
-
tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
|
252
|
-
|
253
|
-
if tool_calls:
|
254
|
-
messages.append({
|
255
|
-
"role": "assistant",
|
256
|
-
"content": response.choices[0].message.content,
|
257
|
-
"tool_calls": tool_calls
|
258
|
-
})
|
259
|
-
|
260
|
-
for tool_call in tool_calls:
|
261
|
-
function_name = tool_call.function.name
|
262
|
-
arguments = json.loads(tool_call.function.arguments)
|
263
|
-
|
264
|
-
if self.verbose:
|
265
|
-
display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
|
266
|
-
|
267
|
-
tool_result = self.execute_tool(function_name, arguments)
|
268
|
-
|
269
|
-
if tool_result:
|
270
|
-
if self.verbose:
|
271
|
-
display_tool_call(f"Function '{function_name}' returned: {tool_result}")
|
272
|
-
messages.append({
|
273
|
-
"role": "tool",
|
274
|
-
"tool_call_id": tool_call.id,
|
275
|
-
"content": json.dumps(tool_result)
|
276
|
-
})
|
277
|
-
else:
|
278
|
-
messages.append({
|
279
|
-
"role": "tool",
|
280
|
-
"tool_call_id": tool_call.id,
|
281
|
-
"content": "Function returned an empty output"
|
282
|
-
})
|
283
|
-
|
284
|
-
response = self._chat_completion(messages, temperature=temperature)
|
285
|
-
if not response:
|
286
|
-
return None
|
287
|
-
response_text = response.choices[0].message.content.strip()
|
288
|
-
else:
|
289
|
-
response_text = response.choices[0].message.content.strip()
|
290
|
-
|
291
|
-
if not self.self_reflect:
|
292
|
-
self.chat_history.append({"role": "user", "content": prompt})
|
293
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
294
|
-
if self.verbose:
|
295
|
-
logging.info(f"Agent {self.name} final response: {response_text}")
|
296
|
-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
|
297
|
-
return response_text
|
298
|
-
|
299
|
-
reflection_prompt = f"""
|
300
|
-
Reflect on your previous response: '{response_text}'.
|
301
|
-
Identify any flaws, improvements, or actions.
|
302
|
-
Provide a "satisfactory" status ('yes' or 'no').
|
303
|
-
Output MUST be JSON with 'reflection' and 'satisfactory'.
|
304
|
-
"""
|
305
|
-
logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
|
306
|
-
messages.append({"role": "user", "content": reflection_prompt})
|
307
|
-
|
308
|
-
try:
|
309
|
-
reflection_response = client.beta.chat.completions.parse(
|
310
|
-
model=self.llm,
|
311
|
-
messages=messages,
|
312
|
-
temperature=temperature,
|
313
|
-
response_format=ReflectionOutput
|
314
|
-
)
|
315
|
-
|
316
|
-
reflection_output = reflection_response.choices[0].message.parsed
|
317
|
-
|
318
|
-
if self.verbose:
|
319
|
-
display_self_reflection(f"Agent {self.name} self reflection: reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
|
320
|
-
|
321
|
-
messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
|
322
|
-
|
323
|
-
if reflection_output.satisfactory == "yes":
|
324
|
-
if self.verbose:
|
325
|
-
display_self_reflection("Agent marked the response as satisfactory")
|
326
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
327
|
-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
|
328
|
-
return response_text
|
329
|
-
|
330
|
-
logging.debug(f"{self.name} reflection not satisfactory, requesting regeneration.")
|
331
|
-
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
|
332
|
-
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
|
333
|
-
response_text = response.choices[0].message.content.strip()
|
334
|
-
except Exception as e:
|
335
|
-
display_error(f"Error in parsing self-reflection json {e}. Retrying")
|
336
|
-
logging.error("Reflection parsing failed.", exc_info=True)
|
337
|
-
messages.append({"role": "assistant", "content": f"Self Reflection failed."})
|
338
|
-
|
339
|
-
reflection_count += 1
|
340
|
-
|
341
|
-
self.chat_history.append({"role": "user", "content": prompt})
|
342
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
343
|
-
|
344
|
-
if self.verbose:
|
345
|
-
logging.info(f"Agent {self.name} final response: {response_text}")
|
346
|
-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
|
347
|
-
return response_text
|
348
|
-
except Exception as e:
|
349
|
-
display_error(f"Error in chat: {e}")
|
350
|
-
return None
|