praisonaiagents 0.0.29__py3-none-any.whl → 0.0.53__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/__init__.py +4 -2
- praisonaiagents/agent/agent.py +679 -235
- praisonaiagents/agents/agents.py +169 -34
- praisonaiagents/knowledge/__init__.py +8 -0
- praisonaiagents/knowledge/chunking.py +182 -0
- praisonaiagents/knowledge/knowledge.py +321 -0
- praisonaiagents/llm/__init__.py +20 -0
- praisonaiagents/llm/llm.py +1023 -0
- praisonaiagents/main.py +46 -9
- praisonaiagents/memory/memory.py +6 -3
- praisonaiagents/process/process.py +206 -90
- praisonaiagents/task/task.py +104 -4
- praisonaiagents/tools/pandas_tools.py +3 -0
- praisonaiagents/tools/yfinance_tools.py +9 -1
- praisonaiagents-0.0.53.dist-info/METADATA +22 -0
- {praisonaiagents-0.0.29.dist-info → praisonaiagents-0.0.53.dist-info}/RECORD +18 -13
- praisonaiagents-0.0.29.dist-info/METADATA +0 -10
- {praisonaiagents-0.0.29.dist-info → praisonaiagents-0.0.53.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.29.dist-info → praisonaiagents-0.0.53.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -16,13 +16,142 @@ from ..main import (
|
|
16
16
|
display_self_reflection,
|
17
17
|
ReflectionOutput,
|
18
18
|
client,
|
19
|
-
error_logs
|
19
|
+
error_logs,
|
20
|
+
adisplay_instruction
|
20
21
|
)
|
21
22
|
import inspect
|
23
|
+
import uuid
|
24
|
+
from dataclasses import dataclass
|
22
25
|
|
23
26
|
if TYPE_CHECKING:
|
24
27
|
from ..task.task import Task
|
25
28
|
|
29
|
+
@dataclass
|
30
|
+
class ChatCompletionMessage:
|
31
|
+
content: str
|
32
|
+
role: str = "assistant"
|
33
|
+
refusal: Optional[str] = None
|
34
|
+
audio: Optional[str] = None
|
35
|
+
function_call: Optional[dict] = None
|
36
|
+
tool_calls: Optional[List] = None
|
37
|
+
reasoning_content: Optional[str] = None
|
38
|
+
|
39
|
+
@dataclass
|
40
|
+
class Choice:
|
41
|
+
finish_reason: Optional[str]
|
42
|
+
index: int
|
43
|
+
message: ChatCompletionMessage
|
44
|
+
logprobs: Optional[dict] = None
|
45
|
+
|
46
|
+
@dataclass
|
47
|
+
class CompletionTokensDetails:
|
48
|
+
accepted_prediction_tokens: Optional[int] = None
|
49
|
+
audio_tokens: Optional[int] = None
|
50
|
+
reasoning_tokens: Optional[int] = None
|
51
|
+
rejected_prediction_tokens: Optional[int] = None
|
52
|
+
|
53
|
+
@dataclass
|
54
|
+
class PromptTokensDetails:
|
55
|
+
audio_tokens: Optional[int] = None
|
56
|
+
cached_tokens: int = 0
|
57
|
+
|
58
|
+
@dataclass
|
59
|
+
class CompletionUsage:
|
60
|
+
completion_tokens: int = 0
|
61
|
+
prompt_tokens: int = 0
|
62
|
+
total_tokens: int = 0
|
63
|
+
completion_tokens_details: Optional[CompletionTokensDetails] = None
|
64
|
+
prompt_tokens_details: Optional[PromptTokensDetails] = None
|
65
|
+
prompt_cache_hit_tokens: int = 0
|
66
|
+
prompt_cache_miss_tokens: int = 0
|
67
|
+
|
68
|
+
@dataclass
|
69
|
+
class ChatCompletion:
|
70
|
+
id: str
|
71
|
+
choices: List[Choice]
|
72
|
+
created: int
|
73
|
+
model: str
|
74
|
+
object: str = "chat.completion"
|
75
|
+
system_fingerprint: Optional[str] = None
|
76
|
+
service_tier: Optional[str] = None
|
77
|
+
usage: Optional[CompletionUsage] = None
|
78
|
+
|
79
|
+
def process_stream_chunks(chunks):
|
80
|
+
"""Process streaming chunks into combined response"""
|
81
|
+
if not chunks:
|
82
|
+
return None
|
83
|
+
|
84
|
+
try:
|
85
|
+
first_chunk = chunks[0]
|
86
|
+
last_chunk = chunks[-1]
|
87
|
+
|
88
|
+
# Basic metadata
|
89
|
+
id = getattr(first_chunk, "id", None)
|
90
|
+
created = getattr(first_chunk, "created", None)
|
91
|
+
model = getattr(first_chunk, "model", None)
|
92
|
+
system_fingerprint = getattr(first_chunk, "system_fingerprint", None)
|
93
|
+
|
94
|
+
# Track usage
|
95
|
+
completion_tokens = 0
|
96
|
+
prompt_tokens = 0
|
97
|
+
|
98
|
+
content_list = []
|
99
|
+
reasoning_list = []
|
100
|
+
|
101
|
+
for chunk in chunks:
|
102
|
+
if not hasattr(chunk, "choices") or not chunk.choices:
|
103
|
+
continue
|
104
|
+
|
105
|
+
# Track usage from each chunk
|
106
|
+
if hasattr(chunk, "usage"):
|
107
|
+
completion_tokens += getattr(chunk.usage, "completion_tokens", 0)
|
108
|
+
prompt_tokens += getattr(chunk.usage, "prompt_tokens", 0)
|
109
|
+
|
110
|
+
delta = getattr(chunk.choices[0], "delta", None)
|
111
|
+
if not delta:
|
112
|
+
continue
|
113
|
+
|
114
|
+
if hasattr(delta, "content") and delta.content:
|
115
|
+
content_list.append(delta.content)
|
116
|
+
if hasattr(delta, "reasoning_content") and delta.reasoning_content:
|
117
|
+
reasoning_list.append(delta.reasoning_content)
|
118
|
+
|
119
|
+
combined_content = "".join(content_list) if content_list else ""
|
120
|
+
combined_reasoning = "".join(reasoning_list) if reasoning_list else None
|
121
|
+
finish_reason = getattr(last_chunk.choices[0], "finish_reason", None) if hasattr(last_chunk, "choices") and last_chunk.choices else None
|
122
|
+
|
123
|
+
message = ChatCompletionMessage(
|
124
|
+
content=combined_content,
|
125
|
+
reasoning_content=combined_reasoning
|
126
|
+
)
|
127
|
+
|
128
|
+
choice = Choice(
|
129
|
+
finish_reason=finish_reason,
|
130
|
+
index=0,
|
131
|
+
message=message
|
132
|
+
)
|
133
|
+
|
134
|
+
usage = CompletionUsage(
|
135
|
+
completion_tokens=completion_tokens,
|
136
|
+
prompt_tokens=prompt_tokens,
|
137
|
+
total_tokens=completion_tokens + prompt_tokens,
|
138
|
+
completion_tokens_details=CompletionTokensDetails(),
|
139
|
+
prompt_tokens_details=PromptTokensDetails()
|
140
|
+
)
|
141
|
+
|
142
|
+
return ChatCompletion(
|
143
|
+
id=id,
|
144
|
+
choices=[choice],
|
145
|
+
created=created,
|
146
|
+
model=model,
|
147
|
+
system_fingerprint=system_fingerprint,
|
148
|
+
usage=usage
|
149
|
+
)
|
150
|
+
|
151
|
+
except Exception as e:
|
152
|
+
print(f"Error processing chunks: {e}")
|
153
|
+
return None
|
154
|
+
|
26
155
|
class Agent:
|
27
156
|
def _generate_tool_definition(self, function_name):
|
28
157
|
"""
|
@@ -69,10 +198,15 @@ class Agent:
|
|
69
198
|
|
70
199
|
import inspect
|
71
200
|
# Langchain tools
|
72
|
-
if inspect.isclass(func) and hasattr(func, 'run'):
|
201
|
+
if inspect.isclass(func) and hasattr(func, 'run') and not hasattr(func, '_run'):
|
73
202
|
original_func = func
|
74
203
|
func = func.run
|
75
204
|
function_name = original_func.__name__
|
205
|
+
# CrewAI tools
|
206
|
+
elif inspect.isclass(func) and hasattr(func, '_run'):
|
207
|
+
original_func = func
|
208
|
+
func = func._run
|
209
|
+
function_name = original_func.__name__
|
76
210
|
|
77
211
|
sig = inspect.signature(func)
|
78
212
|
logging.debug(f"Function signature: {sig}")
|
@@ -175,18 +309,37 @@ class Agent:
|
|
175
309
|
respect_context_window: bool = True,
|
176
310
|
code_execution_mode: Literal["safe", "unsafe"] = "safe",
|
177
311
|
embedder_config: Optional[Dict[str, Any]] = None,
|
178
|
-
|
312
|
+
knowledge: Optional[List[str]] = None,
|
313
|
+
knowledge_config: Optional[Dict[str, Any]] = None,
|
179
314
|
use_system_prompt: Optional[bool] = True,
|
180
315
|
markdown: bool = True,
|
181
316
|
self_reflect: bool = False,
|
182
317
|
max_reflect: int = 3,
|
183
318
|
min_reflect: int = 1,
|
184
|
-
reflect_llm: Optional[str] = None
|
319
|
+
reflect_llm: Optional[str] = None,
|
320
|
+
user_id: Optional[str] = None,
|
321
|
+
reasoning_steps: bool = False
|
185
322
|
):
|
323
|
+
# Add check at start if memory is requested
|
324
|
+
if memory is not None:
|
325
|
+
try:
|
326
|
+
from ..memory.memory import Memory
|
327
|
+
MEMORY_AVAILABLE = True
|
328
|
+
except ImportError:
|
329
|
+
raise ImportError(
|
330
|
+
"Memory features requested in Agent but memory dependencies not installed. "
|
331
|
+
"Please install with: pip install \"praisonaiagents[memory]\""
|
332
|
+
)
|
333
|
+
|
186
334
|
# Handle backward compatibility for required fields
|
187
335
|
if all(x is None for x in [name, role, goal, backstory, instructions]):
|
188
336
|
raise ValueError("At least one of name, role, goal, backstory, or instructions must be provided")
|
189
337
|
|
338
|
+
# Configure logging to suppress unwanted outputs
|
339
|
+
logging.getLogger("litellm").setLevel(logging.WARNING)
|
340
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
341
|
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
342
|
+
|
190
343
|
# If instructions are provided, use them to set role, goal, and backstory
|
191
344
|
if instructions:
|
192
345
|
self.name = name or "Agent"
|
@@ -206,7 +359,34 @@ class Agent:
|
|
206
359
|
|
207
360
|
self.instructions = instructions
|
208
361
|
# Check for model name in environment variable if not provided
|
209
|
-
self.
|
362
|
+
self._using_custom_llm = False
|
363
|
+
|
364
|
+
# If the user passes a dictionary (for advanced configuration)
|
365
|
+
if isinstance(llm, dict) and "model" in llm:
|
366
|
+
try:
|
367
|
+
from ..llm.llm import LLM
|
368
|
+
self.llm_instance = LLM(**llm) # Pass all dict items as kwargs
|
369
|
+
self._using_custom_llm = True
|
370
|
+
except ImportError as e:
|
371
|
+
raise ImportError(
|
372
|
+
"LLM features requested but dependencies not installed. "
|
373
|
+
"Please install with: pip install \"praisonaiagents[llm]\""
|
374
|
+
) from e
|
375
|
+
# If the user passes a string with a slash (provider/model)
|
376
|
+
elif isinstance(llm, str) and "/" in llm:
|
377
|
+
try:
|
378
|
+
from ..llm.llm import LLM
|
379
|
+
# Pass the entire string so LiteLLM can parse provider/model
|
380
|
+
self.llm_instance = LLM(model=llm)
|
381
|
+
self._using_custom_llm = True
|
382
|
+
except ImportError as e:
|
383
|
+
raise ImportError(
|
384
|
+
"LLM features requested but dependencies not installed. "
|
385
|
+
"Please install with: pip install \"praisonaiagents[llm]\""
|
386
|
+
) from e
|
387
|
+
# Otherwise, fall back to OpenAI environment/name
|
388
|
+
else:
|
389
|
+
self.llm = llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
|
210
390
|
self.tools = tools if tools else [] # Store original tools
|
211
391
|
self.function_calling_llm = function_calling_llm
|
212
392
|
self.max_iter = max_iter
|
@@ -225,7 +405,7 @@ class Agent:
|
|
225
405
|
self.respect_context_window = respect_context_window
|
226
406
|
self.code_execution_mode = code_execution_mode
|
227
407
|
self.embedder_config = embedder_config
|
228
|
-
self.
|
408
|
+
self.knowledge = knowledge
|
229
409
|
self.use_system_prompt = use_system_prompt
|
230
410
|
self.chat_history = []
|
231
411
|
self.markdown = markdown
|
@@ -241,6 +421,41 @@ Your Role: {self.role}\n
|
|
241
421
|
Your Goal: {self.goal}
|
242
422
|
"""
|
243
423
|
|
424
|
+
# Generate unique IDs
|
425
|
+
self.agent_id = str(uuid.uuid4())
|
426
|
+
|
427
|
+
# Store user_id
|
428
|
+
self.user_id = user_id or "praison"
|
429
|
+
self.reasoning_steps = reasoning_steps
|
430
|
+
|
431
|
+
# Check if knowledge parameter has any values
|
432
|
+
if not knowledge:
|
433
|
+
self.knowledge = None
|
434
|
+
else:
|
435
|
+
# Initialize Knowledge with provided or default config
|
436
|
+
from praisonaiagents.knowledge import Knowledge
|
437
|
+
self.knowledge = Knowledge(knowledge_config or None)
|
438
|
+
|
439
|
+
# Handle knowledge
|
440
|
+
if knowledge:
|
441
|
+
for source in knowledge:
|
442
|
+
self._process_knowledge(source)
|
443
|
+
|
444
|
+
def _process_knowledge(self, knowledge_item):
|
445
|
+
"""Process and store knowledge from a file path, URL, or string."""
|
446
|
+
try:
|
447
|
+
if os.path.exists(knowledge_item):
|
448
|
+
# It's a file path
|
449
|
+
self.knowledge.add(knowledge_item, user_id=self.user_id, agent_id=self.agent_id)
|
450
|
+
elif knowledge_item.startswith("http://") or knowledge_item.startswith("https://"):
|
451
|
+
# It's a URL
|
452
|
+
pass
|
453
|
+
else:
|
454
|
+
# It's a string content
|
455
|
+
self.knowledge.store(knowledge_item, user_id=self.user_id, agent_id=self.agent_id)
|
456
|
+
except Exception as e:
|
457
|
+
logging.error(f"Error processing knowledge item: {knowledge_item}, error: {e}")
|
458
|
+
|
244
459
|
def generate_task(self) -> 'Task':
|
245
460
|
"""Generate a Task object from the agent's instructions"""
|
246
461
|
from ..task.task import Task
|
@@ -279,14 +494,22 @@ Your Goal: {self.goal}
|
|
279
494
|
|
280
495
|
if func:
|
281
496
|
try:
|
282
|
-
# If it's a class with run
|
283
|
-
if inspect.isclass(func) and hasattr(func, 'run'):
|
497
|
+
# Langchain: If it's a class with run but not _run, instantiate and call run
|
498
|
+
if inspect.isclass(func) and hasattr(func, 'run') and not hasattr(func, '_run'):
|
284
499
|
instance = func()
|
285
|
-
# Extract only the parameters that run() expects
|
286
500
|
run_params = {k: v for k, v in arguments.items()
|
287
|
-
|
288
|
-
|
501
|
+
if k in inspect.signature(instance.run).parameters
|
502
|
+
and k != 'self'}
|
289
503
|
return instance.run(**run_params)
|
504
|
+
|
505
|
+
# CrewAI: If it's a class with an _run method, instantiate and call _run
|
506
|
+
elif inspect.isclass(func) and hasattr(func, '_run'):
|
507
|
+
instance = func()
|
508
|
+
run_params = {k: v for k, v in arguments.items()
|
509
|
+
if k in inspect.signature(instance._run).parameters
|
510
|
+
and k != 'self'}
|
511
|
+
return instance._run(**run_params)
|
512
|
+
|
290
513
|
# Otherwise treat as regular function
|
291
514
|
elif callable(func):
|
292
515
|
return func(**arguments)
|
@@ -305,7 +528,7 @@ Your Goal: {self.goal}
|
|
305
528
|
def __str__(self):
|
306
529
|
return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
|
307
530
|
|
308
|
-
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True):
|
531
|
+
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
|
309
532
|
start_time = time.time()
|
310
533
|
logging.debug(f"{self.name} sending messages to LLM: {messages}")
|
311
534
|
|
@@ -375,30 +598,35 @@ Your Goal: {self.goal}
|
|
375
598
|
stream=True
|
376
599
|
)
|
377
600
|
full_response_text = ""
|
601
|
+
reasoning_content = ""
|
602
|
+
chunks = []
|
378
603
|
|
379
604
|
# Create Live display with proper configuration
|
380
605
|
with Live(
|
381
606
|
display_generating("", start_time),
|
382
607
|
console=self.console,
|
383
608
|
refresh_per_second=4,
|
384
|
-
transient=
|
609
|
+
transient=True,
|
385
610
|
vertical_overflow="ellipsis",
|
386
611
|
auto_refresh=True
|
387
612
|
) as live:
|
388
613
|
for chunk in response_stream:
|
614
|
+
chunks.append(chunk)
|
389
615
|
if chunk.choices[0].delta.content:
|
390
616
|
full_response_text += chunk.choices[0].delta.content
|
391
617
|
live.update(display_generating(full_response_text, start_time))
|
618
|
+
|
619
|
+
# Update live display with reasoning content if enabled
|
620
|
+
if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
|
621
|
+
rc = chunk.choices[0].delta.reasoning_content
|
622
|
+
if rc:
|
623
|
+
reasoning_content += rc
|
624
|
+
live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
|
392
625
|
|
393
626
|
# Clear the last generating display with a blank line
|
394
627
|
self.console.print()
|
395
628
|
|
396
|
-
final_response =
|
397
|
-
model=self.llm,
|
398
|
-
messages=messages,
|
399
|
-
temperature=temperature,
|
400
|
-
stream=False
|
401
|
-
)
|
629
|
+
final_response = process_stream_chunks(chunks)
|
402
630
|
return final_response
|
403
631
|
else:
|
404
632
|
if tool_calls:
|
@@ -416,178 +644,237 @@ Your Goal: {self.goal}
|
|
416
644
|
display_error(f"Error in chat completion: {e}")
|
417
645
|
return None
|
418
646
|
|
419
|
-
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
messages.extend(self.chat_history)
|
436
|
-
|
437
|
-
# Modify prompt if output_json or output_pydantic is specified
|
438
|
-
original_prompt = prompt
|
439
|
-
if output_json or output_pydantic:
|
440
|
-
if isinstance(prompt, str):
|
441
|
-
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
442
|
-
elif isinstance(prompt, list):
|
443
|
-
# For multimodal prompts, append to the text content
|
444
|
-
for item in prompt:
|
445
|
-
if item["type"] == "text":
|
446
|
-
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
447
|
-
break
|
448
|
-
|
449
|
-
if isinstance(prompt, list):
|
450
|
-
# If we receive a multimodal prompt list, place it directly in the user message
|
451
|
-
messages.append({"role": "user", "content": prompt})
|
452
|
-
else:
|
453
|
-
messages.append({"role": "user", "content": prompt})
|
454
|
-
|
455
|
-
final_response_text = None
|
456
|
-
reflection_count = 0
|
457
|
-
start_time = time.time()
|
647
|
+
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
|
648
|
+
reasoning_steps = reasoning_steps or self.reasoning_steps
|
649
|
+
# Search for existing knowledge if any knowledge is provided
|
650
|
+
if self.knowledge:
|
651
|
+
search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
|
652
|
+
if search_results:
|
653
|
+
# Check if search_results is a list of dictionaries or strings
|
654
|
+
if isinstance(search_results, dict) and 'results' in search_results:
|
655
|
+
# Extract memory content from the results
|
656
|
+
knowledge_content = "\n".join([result['memory'] for result in search_results['results']])
|
657
|
+
else:
|
658
|
+
# If search_results is a list of strings, join them directly
|
659
|
+
knowledge_content = "\n".join(search_results)
|
660
|
+
|
661
|
+
# Append found knowledge to the prompt
|
662
|
+
prompt = f"{prompt}\n\nKnowledge: {knowledge_content}"
|
458
663
|
|
459
|
-
|
664
|
+
if self._using_custom_llm:
|
460
665
|
try:
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
if
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
666
|
+
# Pass everything to LLM class
|
667
|
+
response_text = self.llm_instance.get_response(
|
668
|
+
prompt=prompt,
|
669
|
+
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
|
670
|
+
chat_history=self.chat_history,
|
671
|
+
temperature=temperature,
|
672
|
+
tools=tools,
|
673
|
+
output_json=output_json,
|
674
|
+
output_pydantic=output_pydantic,
|
675
|
+
verbose=self.verbose,
|
676
|
+
markdown=self.markdown,
|
677
|
+
self_reflect=self.self_reflect,
|
678
|
+
max_reflect=self.max_reflect,
|
679
|
+
min_reflect=self.min_reflect,
|
680
|
+
console=self.console,
|
681
|
+
agent_name=self.name,
|
682
|
+
agent_role=self.role,
|
683
|
+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
|
684
|
+
execute_tool_fn=self.execute_tool, # Pass tool execution function
|
685
|
+
reasoning_steps=reasoning_steps
|
686
|
+
)
|
470
687
|
|
471
|
-
|
472
|
-
|
473
|
-
return None
|
688
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
689
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
474
690
|
|
475
|
-
|
476
|
-
|
691
|
+
return response_text
|
692
|
+
except Exception as e:
|
693
|
+
display_error(f"Error in LLM chat: {e}")
|
694
|
+
return None
|
695
|
+
else:
|
696
|
+
if self.use_system_prompt:
|
697
|
+
system_prompt = f"""{self.backstory}\n
|
698
|
+
Your Role: {self.role}\n
|
699
|
+
Your Goal: {self.goal}
|
700
|
+
"""
|
701
|
+
if output_json:
|
702
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
703
|
+
elif output_pydantic:
|
704
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
705
|
+
else:
|
706
|
+
system_prompt = None
|
477
707
|
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
"tool_calls": tool_calls
|
483
|
-
})
|
484
|
-
|
485
|
-
for tool_call in tool_calls:
|
486
|
-
function_name = tool_call.function.name
|
487
|
-
arguments = json.loads(tool_call.function.arguments)
|
708
|
+
messages = []
|
709
|
+
if system_prompt:
|
710
|
+
messages.append({"role": "system", "content": system_prompt})
|
711
|
+
messages.extend(self.chat_history)
|
488
712
|
|
489
|
-
|
490
|
-
|
713
|
+
# Modify prompt if output_json or output_pydantic is specified
|
714
|
+
original_prompt = prompt
|
715
|
+
if output_json or output_pydantic:
|
716
|
+
if isinstance(prompt, str):
|
717
|
+
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
718
|
+
elif isinstance(prompt, list):
|
719
|
+
# For multimodal prompts, append to the text content
|
720
|
+
for item in prompt:
|
721
|
+
if item["type"] == "text":
|
722
|
+
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
723
|
+
break
|
491
724
|
|
492
|
-
|
725
|
+
if isinstance(prompt, list):
|
726
|
+
# If we receive a multimodal prompt list, place it directly in the user message
|
727
|
+
messages.append({"role": "user", "content": prompt})
|
728
|
+
else:
|
729
|
+
messages.append({"role": "user", "content": prompt})
|
493
730
|
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
"content": "Function returned an empty output"
|
507
|
-
})
|
731
|
+
final_response_text = None
|
732
|
+
reflection_count = 0
|
733
|
+
start_time = time.time()
|
734
|
+
|
735
|
+
while True:
|
736
|
+
try:
|
737
|
+
if self.verbose:
|
738
|
+
# Handle both string and list prompts for instruction display
|
739
|
+
display_text = prompt
|
740
|
+
if isinstance(prompt, list):
|
741
|
+
# Extract text content from multimodal prompt
|
742
|
+
display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
|
508
743
|
|
509
|
-
|
744
|
+
if display_text and str(display_text).strip():
|
745
|
+
# Pass agent information to display_instruction
|
746
|
+
agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
|
747
|
+
display_instruction(
|
748
|
+
f"Agent {self.name} is processing prompt: {display_text}",
|
749
|
+
console=self.console,
|
750
|
+
agent_name=self.name,
|
751
|
+
agent_role=self.role,
|
752
|
+
agent_tools=agent_tools
|
753
|
+
)
|
754
|
+
|
755
|
+
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps)
|
510
756
|
if not response:
|
511
757
|
return None
|
758
|
+
|
759
|
+
tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
|
512
760
|
response_text = response.choices[0].message.content.strip()
|
513
761
|
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
762
|
+
if tool_calls:
|
763
|
+
messages.append({
|
764
|
+
"role": "assistant",
|
765
|
+
"content": response_text,
|
766
|
+
"tool_calls": tool_calls
|
767
|
+
})
|
768
|
+
|
769
|
+
for tool_call in tool_calls:
|
770
|
+
function_name = tool_call.function.name
|
771
|
+
arguments = json.loads(tool_call.function.arguments)
|
523
772
|
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
773
|
+
if self.verbose:
|
774
|
+
display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}", console=self.console)
|
775
|
+
|
776
|
+
tool_result = self.execute_tool(function_name, arguments)
|
777
|
+
|
778
|
+
if tool_result:
|
779
|
+
if self.verbose:
|
780
|
+
display_tool_call(f"Function '{function_name}' returned: {tool_result}", console=self.console)
|
781
|
+
messages.append({
|
782
|
+
"role": "tool",
|
783
|
+
"tool_call_id": tool_call.id,
|
784
|
+
"content": json.dumps(tool_result)
|
785
|
+
})
|
786
|
+
else:
|
787
|
+
messages.append({
|
788
|
+
"role": "tool",
|
789
|
+
"tool_call_id": tool_call.id,
|
790
|
+
"content": "Function returned an empty output"
|
791
|
+
})
|
792
|
+
|
793
|
+
response = self._chat_completion(messages, temperature=temperature)
|
794
|
+
if not response:
|
795
|
+
return None
|
796
|
+
response_text = response.choices[0].message.content.strip()
|
797
|
+
|
798
|
+
# Handle output_json or output_pydantic if specified
|
799
|
+
if output_json or output_pydantic:
|
800
|
+
# Add to chat history and return raw response
|
801
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
802
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
803
|
+
if self.verbose:
|
804
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
805
|
+
generation_time=time.time() - start_time, console=self.console)
|
806
|
+
return response_text
|
807
|
+
|
808
|
+
if not self.self_reflect:
|
809
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
810
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
811
|
+
if self.verbose:
|
812
|
+
logging.debug(f"Agent {self.name} final response: {response_text}")
|
813
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
814
|
+
# Return only reasoning content if reasoning_steps is True
|
815
|
+
if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
|
816
|
+
return response.choices[0].message.reasoning_content
|
817
|
+
return response_text
|
531
818
|
|
532
|
-
|
819
|
+
reflection_prompt = f"""
|
533
820
|
Reflect on your previous response: '{response_text}'.
|
534
821
|
Identify any flaws, improvements, or actions.
|
535
822
|
Provide a "satisfactory" status ('yes' or 'no').
|
536
823
|
Output MUST be JSON with 'reflection' and 'satisfactory'.
|
537
|
-
|
538
|
-
|
539
|
-
|
824
|
+
"""
|
825
|
+
logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
|
826
|
+
messages.append({"role": "user", "content": reflection_prompt})
|
540
827
|
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
828
|
+
try:
|
829
|
+
reflection_response = client.beta.chat.completions.parse(
|
830
|
+
model=self.reflect_llm if self.reflect_llm else self.llm,
|
831
|
+
messages=messages,
|
832
|
+
temperature=temperature,
|
833
|
+
response_format=ReflectionOutput
|
834
|
+
)
|
548
835
|
|
549
|
-
|
836
|
+
reflection_output = reflection_response.choices[0].message.parsed
|
550
837
|
|
551
|
-
|
552
|
-
|
838
|
+
if self.verbose:
|
839
|
+
display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
|
553
840
|
|
554
|
-
|
841
|
+
messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
|
555
842
|
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
843
|
+
# Only consider satisfactory after minimum reflections
|
844
|
+
if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
|
845
|
+
if self.verbose:
|
846
|
+
display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
|
847
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
848
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
849
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
850
|
+
return response_text
|
564
851
|
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
852
|
+
# Check if we've hit max reflections
|
853
|
+
if reflection_count >= self.max_reflect - 1:
|
854
|
+
if self.verbose:
|
855
|
+
display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
|
856
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
857
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
858
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
859
|
+
return response_text
|
573
860
|
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
861
|
+
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
|
862
|
+
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
|
863
|
+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
|
864
|
+
response_text = response.choices[0].message.content.strip()
|
865
|
+
reflection_count += 1
|
866
|
+
continue # Continue the loop for more reflections
|
580
867
|
|
868
|
+
except Exception as e:
|
869
|
+
display_error(f"Error in parsing self-reflection json {e}. Retrying", console=self.console)
|
870
|
+
logging.error("Reflection parsing failed.", exc_info=True)
|
871
|
+
messages.append({"role": "assistant", "content": f"Self Reflection failed."})
|
872
|
+
reflection_count += 1
|
873
|
+
continue # Continue even after error to try again
|
874
|
+
|
581
875
|
except Exception as e:
|
582
|
-
display_error(f"Error in
|
583
|
-
|
584
|
-
messages.append({"role": "assistant", "content": f"Self Reflection failed."})
|
585
|
-
reflection_count += 1
|
586
|
-
continue # Continue even after error to try again
|
587
|
-
|
588
|
-
except Exception as e:
|
589
|
-
display_error(f"Error in chat: {e}", console=self.console)
|
590
|
-
return None
|
876
|
+
display_error(f"Error in chat: {e}", console=self.console)
|
877
|
+
return None
|
591
878
|
|
592
879
|
def clean_json_output(self, output: str) -> str:
|
593
880
|
"""Clean and extract JSON from response text."""
|
@@ -599,85 +886,158 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
599
886
|
cleaned = cleaned[len("```"):].strip()
|
600
887
|
if cleaned.endswith("```"):
|
601
888
|
cleaned = cleaned[:-3].strip()
|
602
|
-
return cleaned
|
889
|
+
return cleaned
|
603
890
|
|
604
|
-
async def achat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
|
605
|
-
"""Async version of chat method"""
|
891
|
+
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
|
892
|
+
"""Async version of chat method. TODO: Requires Syncing with chat method."""
|
893
|
+
reasoning_steps = reasoning_steps or self.reasoning_steps
|
606
894
|
try:
|
607
|
-
#
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
895
|
+
# Search for existing knowledge if any knowledge is provided
|
896
|
+
if self.knowledge:
|
897
|
+
search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
|
898
|
+
if search_results:
|
899
|
+
if isinstance(search_results, dict) and 'results' in search_results:
|
900
|
+
knowledge_content = "\n".join([result['memory'] for result in search_results['results']])
|
901
|
+
else:
|
902
|
+
knowledge_content = "\n".join(search_results)
|
903
|
+
prompt = f"{prompt}\n\nKnowledge: {knowledge_content}"
|
904
|
+
|
905
|
+
if self._using_custom_llm:
|
906
|
+
try:
|
907
|
+
response_text = await self.llm_instance.get_response_async(
|
908
|
+
prompt=prompt,
|
909
|
+
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
|
910
|
+
chat_history=self.chat_history,
|
911
|
+
temperature=temperature,
|
912
|
+
tools=tools,
|
913
|
+
output_json=output_json,
|
914
|
+
output_pydantic=output_pydantic,
|
915
|
+
verbose=self.verbose,
|
916
|
+
markdown=self.markdown,
|
917
|
+
self_reflect=self.self_reflect,
|
918
|
+
max_reflect=self.max_reflect,
|
919
|
+
min_reflect=self.min_reflect,
|
920
|
+
console=self.console,
|
921
|
+
agent_name=self.name,
|
922
|
+
agent_role=self.role,
|
923
|
+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
|
924
|
+
execute_tool_fn=self.execute_tool_async,
|
925
|
+
reasoning_steps=reasoning_steps
|
926
|
+
)
|
927
|
+
|
928
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
929
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
930
|
+
|
931
|
+
return response_text
|
932
|
+
except Exception as e:
|
933
|
+
display_error(f"Error in LLM chat: {e}")
|
934
|
+
return None
|
935
|
+
|
936
|
+
# For OpenAI client
|
937
|
+
if self.use_system_prompt:
|
938
|
+
system_prompt = f"""{self.backstory}\n
|
939
|
+
Your Role: {self.role}\n
|
940
|
+
Your Goal: {self.goal}
|
941
|
+
"""
|
942
|
+
if output_json:
|
943
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
944
|
+
elif output_pydantic:
|
945
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
620
946
|
else:
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
947
|
+
system_prompt = None
|
948
|
+
|
949
|
+
messages = []
|
950
|
+
if system_prompt:
|
951
|
+
messages.append({"role": "system", "content": system_prompt})
|
952
|
+
messages.extend(self.chat_history)
|
953
|
+
|
954
|
+
# Modify prompt if output_json or output_pydantic is specified
|
955
|
+
original_prompt = prompt
|
956
|
+
if output_json or output_pydantic:
|
957
|
+
if isinstance(prompt, str):
|
958
|
+
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
959
|
+
elif isinstance(prompt, list):
|
960
|
+
for item in prompt:
|
629
961
|
if item["type"] == "text":
|
630
962
|
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
631
963
|
break
|
632
964
|
|
633
|
-
|
634
|
-
|
635
|
-
if tools:
|
636
|
-
for tool in tools:
|
637
|
-
if isinstance(tool, str):
|
638
|
-
tool_def = self._generate_tool_definition(tool)
|
639
|
-
if tool_def:
|
640
|
-
formatted_tools.append(tool_def)
|
641
|
-
elif isinstance(tool, dict):
|
642
|
-
formatted_tools.append(tool)
|
643
|
-
elif hasattr(tool, "to_openai_tool"):
|
644
|
-
formatted_tools.append(tool.to_openai_tool())
|
645
|
-
elif callable(tool):
|
646
|
-
formatted_tools.append(self._generate_tool_definition(tool.__name__))
|
647
|
-
|
648
|
-
# Create async OpenAI client
|
649
|
-
async_client = AsyncOpenAI()
|
650
|
-
|
651
|
-
# Make the API call based on the type of request
|
652
|
-
if tools:
|
653
|
-
response = await async_client.chat.completions.create(
|
654
|
-
model=self.llm,
|
655
|
-
messages=messages,
|
656
|
-
temperature=temperature,
|
657
|
-
tools=formatted_tools
|
658
|
-
)
|
659
|
-
return await self._achat_completion(response, tools)
|
660
|
-
elif output_json or output_pydantic:
|
661
|
-
response = await async_client.chat.completions.create(
|
662
|
-
model=self.llm,
|
663
|
-
messages=messages,
|
664
|
-
temperature=temperature,
|
665
|
-
response_format={"type": "json_object"}
|
666
|
-
)
|
667
|
-
# Return the raw response
|
668
|
-
return response.choices[0].message.content
|
965
|
+
if isinstance(prompt, list):
|
966
|
+
messages.append({"role": "user", "content": prompt})
|
669
967
|
else:
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
|
674
|
-
|
675
|
-
|
968
|
+
messages.append({"role": "user", "content": prompt})
|
969
|
+
|
970
|
+
reflection_count = 0
|
971
|
+
start_time = time.time()
|
972
|
+
|
973
|
+
while True:
|
974
|
+
try:
|
975
|
+
if self.verbose:
|
976
|
+
display_text = prompt
|
977
|
+
if isinstance(prompt, list):
|
978
|
+
display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
|
979
|
+
|
980
|
+
if display_text and str(display_text).strip():
|
981
|
+
agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
|
982
|
+
await adisplay_instruction(
|
983
|
+
f"Agent {self.name} is processing prompt: {display_text}",
|
984
|
+
console=self.console,
|
985
|
+
agent_name=self.name,
|
986
|
+
agent_role=self.role,
|
987
|
+
agent_tools=agent_tools
|
988
|
+
)
|
989
|
+
|
990
|
+
# Format tools if provided
|
991
|
+
formatted_tools = []
|
992
|
+
if tools:
|
993
|
+
for tool in tools:
|
994
|
+
if isinstance(tool, str):
|
995
|
+
tool_def = self._generate_tool_definition(tool)
|
996
|
+
if tool_def:
|
997
|
+
formatted_tools.append(tool_def)
|
998
|
+
elif isinstance(tool, dict):
|
999
|
+
formatted_tools.append(tool)
|
1000
|
+
elif hasattr(tool, "to_openai_tool"):
|
1001
|
+
formatted_tools.append(tool.to_openai_tool())
|
1002
|
+
elif callable(tool):
|
1003
|
+
formatted_tools.append(self._generate_tool_definition(tool.__name__))
|
1004
|
+
|
1005
|
+
# Create async OpenAI client
|
1006
|
+
async_client = AsyncOpenAI()
|
1007
|
+
|
1008
|
+
# Make the API call based on the type of request
|
1009
|
+
if tools:
|
1010
|
+
response = await async_client.chat.completions.create(
|
1011
|
+
model=self.llm,
|
1012
|
+
messages=messages,
|
1013
|
+
temperature=temperature,
|
1014
|
+
tools=formatted_tools
|
1015
|
+
)
|
1016
|
+
return await self._achat_completion(response, tools)
|
1017
|
+
elif output_json or output_pydantic:
|
1018
|
+
response = await async_client.chat.completions.create(
|
1019
|
+
model=self.llm,
|
1020
|
+
messages=messages,
|
1021
|
+
temperature=temperature,
|
1022
|
+
response_format={"type": "json_object"}
|
1023
|
+
)
|
1024
|
+
# Return the raw response
|
1025
|
+
return response.choices[0].message.content
|
1026
|
+
else:
|
1027
|
+
response = await async_client.chat.completions.create(
|
1028
|
+
model=self.llm,
|
1029
|
+
messages=messages,
|
1030
|
+
temperature=temperature
|
1031
|
+
)
|
1032
|
+
return response.choices[0].message.content
|
1033
|
+
except Exception as e:
|
1034
|
+
display_error(f"Error in chat completion: {e}")
|
1035
|
+
return None
|
676
1036
|
except Exception as e:
|
677
|
-
display_error(f"Error in
|
1037
|
+
display_error(f"Error in achat: {e}")
|
678
1038
|
return None
|
679
1039
|
|
680
|
-
async def _achat_completion(self, response, tools):
|
1040
|
+
async def _achat_completion(self, response, tools, reasoning_steps=False):
|
681
1041
|
"""Async version of _chat_completion method"""
|
682
1042
|
try:
|
683
1043
|
message = response.choices[0].message
|
@@ -723,9 +1083,42 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
723
1083
|
final_response = await async_client.chat.completions.create(
|
724
1084
|
model=self.llm,
|
725
1085
|
messages=messages,
|
726
|
-
temperature=0.2
|
1086
|
+
temperature=0.2,
|
1087
|
+
stream=True
|
727
1088
|
)
|
728
|
-
|
1089
|
+
full_response_text = ""
|
1090
|
+
reasoning_content = ""
|
1091
|
+
chunks = []
|
1092
|
+
start_time = time.time()
|
1093
|
+
|
1094
|
+
with Live(
|
1095
|
+
display_generating("", start_time),
|
1096
|
+
console=self.console,
|
1097
|
+
refresh_per_second=4,
|
1098
|
+
transient=True,
|
1099
|
+
vertical_overflow="ellipsis",
|
1100
|
+
auto_refresh=True
|
1101
|
+
) as live:
|
1102
|
+
async for chunk in final_response:
|
1103
|
+
chunks.append(chunk)
|
1104
|
+
if chunk.choices[0].delta.content:
|
1105
|
+
full_response_text += chunk.choices[0].delta.content
|
1106
|
+
live.update(display_generating(full_response_text, start_time))
|
1107
|
+
|
1108
|
+
if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
|
1109
|
+
rc = chunk.choices[0].delta.reasoning_content
|
1110
|
+
if rc:
|
1111
|
+
reasoning_content += rc
|
1112
|
+
live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
|
1113
|
+
|
1114
|
+
self.console.print()
|
1115
|
+
|
1116
|
+
final_response = process_stream_chunks(chunks)
|
1117
|
+
# Return only reasoning content if reasoning_steps is True
|
1118
|
+
if reasoning_steps and hasattr(final_response.choices[0].message, 'reasoning_content'):
|
1119
|
+
return final_response.choices[0].message.reasoning_content
|
1120
|
+
return final_response.choices[0].message.content if final_response else full_response_text
|
1121
|
+
|
729
1122
|
except Exception as e:
|
730
1123
|
display_error(f"Error in final chat completion: {e}")
|
731
1124
|
return formatted_results
|
@@ -733,8 +1126,59 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
733
1126
|
return None
|
734
1127
|
except Exception as e:
|
735
1128
|
display_error(f"Error in _achat_completion: {e}")
|
736
|
-
return None
|
1129
|
+
return None
|
1130
|
+
|
1131
|
+
async def astart(self, prompt: str, **kwargs):
|
1132
|
+
"""Async version of start method"""
|
1133
|
+
return await self.achat(prompt, **kwargs)
|
737
1134
|
|
738
1135
|
def run(self):
|
739
1136
|
"""Alias for start() method"""
|
740
|
-
return self.start()
|
1137
|
+
return self.start()
|
1138
|
+
|
1139
|
+
def start(self, prompt: str, **kwargs):
|
1140
|
+
"""Start the agent with a prompt. This is a convenience method that wraps chat()."""
|
1141
|
+
return self.chat(prompt, **kwargs)
|
1142
|
+
|
1143
|
+
async def execute_tool_async(self, function_name: str, arguments: Dict[str, Any]) -> Any:
|
1144
|
+
"""Async version of execute_tool"""
|
1145
|
+
try:
|
1146
|
+
logging.info(f"Executing async tool: {function_name} with arguments: {arguments}")
|
1147
|
+
# Try to find the function in the agent's tools list first
|
1148
|
+
func = None
|
1149
|
+
for tool in self.tools:
|
1150
|
+
if (callable(tool) and getattr(tool, '__name__', '') == function_name):
|
1151
|
+
func = tool
|
1152
|
+
break
|
1153
|
+
|
1154
|
+
if func is None:
|
1155
|
+
logging.error(f"Function {function_name} not found in tools")
|
1156
|
+
return {"error": f"Function {function_name} not found in tools"}
|
1157
|
+
|
1158
|
+
try:
|
1159
|
+
if inspect.iscoroutinefunction(func):
|
1160
|
+
logging.debug(f"Executing async function: {function_name}")
|
1161
|
+
result = await func(**arguments)
|
1162
|
+
else:
|
1163
|
+
logging.debug(f"Executing sync function in executor: {function_name}")
|
1164
|
+
loop = asyncio.get_event_loop()
|
1165
|
+
result = await loop.run_in_executor(None, lambda: func(**arguments))
|
1166
|
+
|
1167
|
+
# Ensure result is JSON serializable
|
1168
|
+
logging.debug(f"Raw result from tool: {result}")
|
1169
|
+
if result is None:
|
1170
|
+
return {"result": None}
|
1171
|
+
try:
|
1172
|
+
json.dumps(result) # Test serialization
|
1173
|
+
return result
|
1174
|
+
except TypeError:
|
1175
|
+
logging.warning(f"Result not JSON serializable, converting to string: {result}")
|
1176
|
+
return {"result": str(result)}
|
1177
|
+
|
1178
|
+
except Exception as e:
|
1179
|
+
logging.error(f"Error executing {function_name}: {str(e)}", exc_info=True)
|
1180
|
+
return {"error": f"Error executing {function_name}: {str(e)}"}
|
1181
|
+
|
1182
|
+
except Exception as e:
|
1183
|
+
logging.error(f"Error in execute_tool_async: {str(e)}", exc_info=True)
|
1184
|
+
return {"error": f"Error in execute_tool_async: {str(e)}"}
|