praisonaiagents 0.0.45__py3-none-any.whl → 0.0.47__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/__init__.py +0 -2
- praisonaiagents/agent/agent.py +413 -237
- praisonaiagents/agents/agents.py +33 -6
- praisonaiagents/llm/__init__.py +20 -0
- praisonaiagents/llm/llm.py +823 -0
- praisonaiagents/main.py +9 -0
- praisonaiagents/memory/memory.py +1 -1
- praisonaiagents/task/task.py +11 -0
- {praisonaiagents-0.0.45.dist-info → praisonaiagents-0.0.47.dist-info}/METADATA +5 -1
- {praisonaiagents-0.0.45.dist-info → praisonaiagents-0.0.47.dist-info}/RECORD +12 -10
- {praisonaiagents-0.0.45.dist-info → praisonaiagents-0.0.47.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.45.dist-info → praisonaiagents-0.0.47.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -192,10 +192,26 @@ class Agent:
|
|
192
192
|
reflect_llm: Optional[str] = None,
|
193
193
|
user_id: Optional[str] = None
|
194
194
|
):
|
195
|
+
# Add check at start if memory is requested
|
196
|
+
if memory is not None:
|
197
|
+
try:
|
198
|
+
from ..memory.memory import Memory
|
199
|
+
MEMORY_AVAILABLE = True
|
200
|
+
except ImportError:
|
201
|
+
raise ImportError(
|
202
|
+
"Memory features requested in Agent but memory dependencies not installed. "
|
203
|
+
"Please install with: pip install \"praisonaiagents[memory]\""
|
204
|
+
)
|
205
|
+
|
195
206
|
# Handle backward compatibility for required fields
|
196
207
|
if all(x is None for x in [name, role, goal, backstory, instructions]):
|
197
208
|
raise ValueError("At least one of name, role, goal, backstory, or instructions must be provided")
|
198
209
|
|
210
|
+
# Configure logging to suppress unwanted outputs
|
211
|
+
logging.getLogger("litellm").setLevel(logging.WARNING)
|
212
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
213
|
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
214
|
+
|
199
215
|
# If instructions are provided, use them to set role, goal, and backstory
|
200
216
|
if instructions:
|
201
217
|
self.name = name or "Agent"
|
@@ -215,7 +231,34 @@ class Agent:
|
|
215
231
|
|
216
232
|
self.instructions = instructions
|
217
233
|
# Check for model name in environment variable if not provided
|
218
|
-
self.
|
234
|
+
self._using_custom_llm = False
|
235
|
+
|
236
|
+
# If the user passes a dictionary (for advanced configuration)
|
237
|
+
if isinstance(llm, dict) and "model" in llm:
|
238
|
+
try:
|
239
|
+
from ..llm.llm import LLM
|
240
|
+
self.llm_instance = LLM(**llm) # Pass all dict items as kwargs
|
241
|
+
self._using_custom_llm = True
|
242
|
+
except ImportError as e:
|
243
|
+
raise ImportError(
|
244
|
+
"LLM features requested but dependencies not installed. "
|
245
|
+
"Please install with: pip install \"praisonaiagents[llm]\""
|
246
|
+
) from e
|
247
|
+
# If the user passes a string with a slash (provider/model)
|
248
|
+
elif isinstance(llm, str) and "/" in llm:
|
249
|
+
try:
|
250
|
+
from ..llm.llm import LLM
|
251
|
+
# Pass the entire string so LiteLLM can parse provider/model
|
252
|
+
self.llm_instance = LLM(model=llm)
|
253
|
+
self._using_custom_llm = True
|
254
|
+
except ImportError as e:
|
255
|
+
raise ImportError(
|
256
|
+
"LLM features requested but dependencies not installed. "
|
257
|
+
"Please install with: pip install \"praisonaiagents[llm]\""
|
258
|
+
) from e
|
259
|
+
# Otherwise, fall back to OpenAI environment/name
|
260
|
+
else:
|
261
|
+
self.llm = llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
|
219
262
|
self.tools = tools if tools else [] # Store original tools
|
220
263
|
self.function_calling_llm = function_calling_llm
|
221
264
|
self.max_iter = max_iter
|
@@ -483,185 +526,216 @@ Your Goal: {self.goal}
|
|
483
526
|
# Append found knowledge to the prompt
|
484
527
|
prompt = f"{prompt}\n\nKnowledge: {knowledge_content}"
|
485
528
|
|
486
|
-
if self.
|
487
|
-
system_prompt = f"""{self.backstory}\n
|
488
|
-
Your Role: {self.role}\n
|
489
|
-
Your Goal: {self.goal}
|
490
|
-
"""
|
491
|
-
if output_json:
|
492
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
493
|
-
elif output_pydantic:
|
494
|
-
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
495
|
-
else:
|
496
|
-
system_prompt = None
|
497
|
-
|
498
|
-
messages = []
|
499
|
-
if system_prompt:
|
500
|
-
messages.append({"role": "system", "content": system_prompt})
|
501
|
-
messages.extend(self.chat_history)
|
502
|
-
|
503
|
-
# Modify prompt if output_json or output_pydantic is specified
|
504
|
-
original_prompt = prompt
|
505
|
-
if output_json or output_pydantic:
|
506
|
-
if isinstance(prompt, str):
|
507
|
-
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
508
|
-
elif isinstance(prompt, list):
|
509
|
-
# For multimodal prompts, append to the text content
|
510
|
-
for item in prompt:
|
511
|
-
if item["type"] == "text":
|
512
|
-
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
513
|
-
break
|
514
|
-
|
515
|
-
if isinstance(prompt, list):
|
516
|
-
# If we receive a multimodal prompt list, place it directly in the user message
|
517
|
-
messages.append({"role": "user", "content": prompt})
|
518
|
-
else:
|
519
|
-
messages.append({"role": "user", "content": prompt})
|
520
|
-
|
521
|
-
final_response_text = None
|
522
|
-
reflection_count = 0
|
523
|
-
start_time = time.time()
|
524
|
-
|
525
|
-
while True:
|
529
|
+
if self._using_custom_llm:
|
526
530
|
try:
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
if
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
return None
|
531
|
+
# Pass everything to LLM class
|
532
|
+
response_text = self.llm_instance.get_response(
|
533
|
+
prompt=prompt,
|
534
|
+
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
|
535
|
+
chat_history=self.chat_history,
|
536
|
+
temperature=temperature,
|
537
|
+
tools=tools,
|
538
|
+
output_json=output_json,
|
539
|
+
output_pydantic=output_pydantic,
|
540
|
+
verbose=self.verbose,
|
541
|
+
markdown=self.markdown,
|
542
|
+
self_reflect=self.self_reflect,
|
543
|
+
max_reflect=self.max_reflect,
|
544
|
+
min_reflect=self.min_reflect,
|
545
|
+
console=self.console,
|
546
|
+
agent_name=self.name,
|
547
|
+
agent_role=self.role,
|
548
|
+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
|
549
|
+
execute_tool_fn=self.execute_tool # Pass tool execution function
|
550
|
+
)
|
548
551
|
|
549
|
-
|
550
|
-
|
552
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
553
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
551
554
|
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
555
|
+
return response_text
|
556
|
+
except Exception as e:
|
557
|
+
display_error(f"Error in LLM chat: {e}")
|
558
|
+
return None
|
559
|
+
else:
|
560
|
+
if self.use_system_prompt:
|
561
|
+
system_prompt = f"""{self.backstory}\n
|
562
|
+
Your Role: {self.role}\n
|
563
|
+
Your Goal: {self.goal}
|
564
|
+
"""
|
565
|
+
if output_json:
|
566
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
567
|
+
elif output_pydantic:
|
568
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
569
|
+
else:
|
570
|
+
system_prompt = None
|
571
|
+
|
572
|
+
messages = []
|
573
|
+
if system_prompt:
|
574
|
+
messages.append({"role": "system", "content": system_prompt})
|
575
|
+
messages.extend(self.chat_history)
|
576
|
+
|
577
|
+
# Modify prompt if output_json or output_pydantic is specified
|
578
|
+
original_prompt = prompt
|
579
|
+
if output_json or output_pydantic:
|
580
|
+
if isinstance(prompt, str):
|
581
|
+
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
582
|
+
elif isinstance(prompt, list):
|
583
|
+
# For multimodal prompts, append to the text content
|
584
|
+
for item in prompt:
|
585
|
+
if item["type"] == "text":
|
586
|
+
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
587
|
+
break
|
562
588
|
|
563
|
-
|
564
|
-
|
589
|
+
if isinstance(prompt, list):
|
590
|
+
# If we receive a multimodal prompt list, place it directly in the user message
|
591
|
+
messages.append({"role": "user", "content": prompt})
|
592
|
+
else:
|
593
|
+
messages.append({"role": "user", "content": prompt})
|
565
594
|
|
566
|
-
|
595
|
+
final_response_text = None
|
596
|
+
reflection_count = 0
|
597
|
+
start_time = time.time()
|
567
598
|
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
else:
|
577
|
-
messages.append({
|
578
|
-
"role": "tool",
|
579
|
-
"tool_call_id": tool_call.id,
|
580
|
-
"content": "Function returned an empty output"
|
581
|
-
})
|
599
|
+
while True:
|
600
|
+
try:
|
601
|
+
if self.verbose:
|
602
|
+
# Handle both string and list prompts for instruction display
|
603
|
+
display_text = prompt
|
604
|
+
if isinstance(prompt, list):
|
605
|
+
# Extract text content from multimodal prompt
|
606
|
+
display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
|
582
607
|
|
583
|
-
|
608
|
+
if display_text and str(display_text).strip():
|
609
|
+
# Pass agent information to display_instruction
|
610
|
+
agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
|
611
|
+
display_instruction(
|
612
|
+
f"Agent {self.name} is processing prompt: {display_text}",
|
613
|
+
console=self.console,
|
614
|
+
agent_name=self.name,
|
615
|
+
agent_role=self.role,
|
616
|
+
agent_tools=agent_tools
|
617
|
+
)
|
618
|
+
|
619
|
+
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None)
|
584
620
|
if not response:
|
585
621
|
return None
|
622
|
+
|
623
|
+
tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
|
586
624
|
response_text = response.choices[0].message.content.strip()
|
587
625
|
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
626
|
+
if tool_calls:
|
627
|
+
messages.append({
|
628
|
+
"role": "assistant",
|
629
|
+
"content": response_text,
|
630
|
+
"tool_calls": tool_calls
|
631
|
+
})
|
632
|
+
|
633
|
+
for tool_call in tool_calls:
|
634
|
+
function_name = tool_call.function.name
|
635
|
+
arguments = json.loads(tool_call.function.arguments)
|
597
636
|
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
637
|
+
if self.verbose:
|
638
|
+
display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}", console=self.console)
|
639
|
+
|
640
|
+
tool_result = self.execute_tool(function_name, arguments)
|
641
|
+
|
642
|
+
if tool_result:
|
643
|
+
if self.verbose:
|
644
|
+
display_tool_call(f"Function '{function_name}' returned: {tool_result}", console=self.console)
|
645
|
+
messages.append({
|
646
|
+
"role": "tool",
|
647
|
+
"tool_call_id": tool_call.id,
|
648
|
+
"content": json.dumps(tool_result)
|
649
|
+
})
|
650
|
+
else:
|
651
|
+
messages.append({
|
652
|
+
"role": "tool",
|
653
|
+
"tool_call_id": tool_call.id,
|
654
|
+
"content": "Function returned an empty output"
|
655
|
+
})
|
656
|
+
|
657
|
+
response = self._chat_completion(messages, temperature=temperature)
|
658
|
+
if not response:
|
659
|
+
return None
|
660
|
+
response_text = response.choices[0].message.content.strip()
|
661
|
+
|
662
|
+
# Handle output_json or output_pydantic if specified
|
663
|
+
if output_json or output_pydantic:
|
664
|
+
# Add to chat history and return raw response
|
665
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
666
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
667
|
+
if self.verbose:
|
668
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
669
|
+
generation_time=time.time() - start_time, console=self.console)
|
670
|
+
return response_text
|
671
|
+
|
672
|
+
if not self.self_reflect:
|
673
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
674
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
675
|
+
if self.verbose:
|
676
|
+
logging.debug(f"Agent {self.name} final response: {response_text}")
|
677
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
678
|
+
return response_text
|
605
679
|
|
606
|
-
|
680
|
+
reflection_prompt = f"""
|
607
681
|
Reflect on your previous response: '{response_text}'.
|
608
682
|
Identify any flaws, improvements, or actions.
|
609
683
|
Provide a "satisfactory" status ('yes' or 'no').
|
610
684
|
Output MUST be JSON with 'reflection' and 'satisfactory'.
|
611
|
-
|
612
|
-
|
613
|
-
|
685
|
+
"""
|
686
|
+
logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
|
687
|
+
messages.append({"role": "user", "content": reflection_prompt})
|
614
688
|
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
reflection_output = reflection_response.choices[0].message.parsed
|
624
|
-
|
625
|
-
if self.verbose:
|
626
|
-
display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
|
689
|
+
try:
|
690
|
+
reflection_response = client.beta.chat.completions.parse(
|
691
|
+
model=self.reflect_llm if self.reflect_llm else self.llm,
|
692
|
+
messages=messages,
|
693
|
+
temperature=temperature,
|
694
|
+
response_format=ReflectionOutput
|
695
|
+
)
|
627
696
|
|
628
|
-
|
697
|
+
reflection_output = reflection_response.choices[0].message.parsed
|
629
698
|
|
630
|
-
# Only consider satisfactory after minimum reflections
|
631
|
-
if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
|
632
699
|
if self.verbose:
|
633
|
-
display_self_reflection("Agent
|
634
|
-
self.chat_history.append({"role": "user", "content": prompt})
|
635
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
636
|
-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
637
|
-
return response_text
|
700
|
+
display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
|
638
701
|
|
639
|
-
|
640
|
-
if reflection_count >= self.max_reflect - 1:
|
641
|
-
if self.verbose:
|
642
|
-
display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
|
643
|
-
self.chat_history.append({"role": "user", "content": prompt})
|
644
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
645
|
-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
646
|
-
return response_text
|
702
|
+
messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
|
647
703
|
|
648
|
-
|
649
|
-
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
704
|
+
# Only consider satisfactory after minimum reflections
|
705
|
+
if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
|
706
|
+
if self.verbose:
|
707
|
+
display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
|
708
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
709
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
710
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
711
|
+
return response_text
|
712
|
+
|
713
|
+
# Check if we've hit max reflections
|
714
|
+
if reflection_count >= self.max_reflect - 1:
|
715
|
+
if self.verbose:
|
716
|
+
display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
|
717
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
718
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
719
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
720
|
+
return response_text
|
721
|
+
|
722
|
+
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
|
723
|
+
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
|
724
|
+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
|
725
|
+
response_text = response.choices[0].message.content.strip()
|
726
|
+
reflection_count += 1
|
727
|
+
continue # Continue the loop for more reflections
|
654
728
|
|
729
|
+
except Exception as e:
|
730
|
+
display_error(f"Error in parsing self-reflection json {e}. Retrying", console=self.console)
|
731
|
+
logging.error("Reflection parsing failed.", exc_info=True)
|
732
|
+
messages.append({"role": "assistant", "content": f"Self Reflection failed."})
|
733
|
+
reflection_count += 1
|
734
|
+
continue # Continue even after error to try again
|
735
|
+
|
655
736
|
except Exception as e:
|
656
|
-
display_error(f"Error in
|
657
|
-
|
658
|
-
messages.append({"role": "assistant", "content": f"Self Reflection failed."})
|
659
|
-
reflection_count += 1
|
660
|
-
continue # Continue even after error to try again
|
661
|
-
|
662
|
-
except Exception as e:
|
663
|
-
display_error(f"Error in chat: {e}", console=self.console)
|
664
|
-
return None
|
737
|
+
display_error(f"Error in chat: {e}", console=self.console)
|
738
|
+
return None
|
665
739
|
|
666
740
|
def clean_json_output(self, output: str) -> str:
|
667
741
|
"""Clean and extract JSON from response text."""
|
@@ -673,98 +747,153 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
673
747
|
cleaned = cleaned[len("```"):].strip()
|
674
748
|
if cleaned.endswith("```"):
|
675
749
|
cleaned = cleaned[:-3].strip()
|
676
|
-
return cleaned
|
750
|
+
return cleaned
|
677
751
|
|
678
|
-
async def achat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
|
752
|
+
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
|
679
753
|
"""Async version of chat method"""
|
680
754
|
try:
|
681
|
-
#
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
messages = [
|
691
|
-
{"role": "system", "content": system_prompt},
|
692
|
-
{"role": "user", "content": prompt + ("\nReturn ONLY a valid JSON object. No other text or explanation." if (output_json or output_pydantic) else "")}
|
693
|
-
]
|
694
|
-
else:
|
695
|
-
# For multimodal prompts
|
696
|
-
messages = [
|
697
|
-
{"role": "system", "content": system_prompt},
|
698
|
-
{"role": "user", "content": prompt}
|
699
|
-
]
|
700
|
-
if output_json or output_pydantic:
|
701
|
-
# Add JSON instruction to text content
|
702
|
-
for item in messages[-1]["content"]:
|
703
|
-
if item["type"] == "text":
|
704
|
-
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
705
|
-
break
|
755
|
+
# Search for existing knowledge if any knowledge is provided
|
756
|
+
if self.knowledge:
|
757
|
+
search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
|
758
|
+
if search_results:
|
759
|
+
if isinstance(search_results, dict) and 'results' in search_results:
|
760
|
+
knowledge_content = "\n".join([result['memory'] for result in search_results['results']])
|
761
|
+
else:
|
762
|
+
knowledge_content = "\n".join(search_results)
|
763
|
+
prompt = f"{prompt}\n\nKnowledge: {knowledge_content}"
|
706
764
|
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
765
|
+
if self._using_custom_llm:
|
766
|
+
try:
|
767
|
+
response_text = await self.llm_instance.get_response_async(
|
768
|
+
prompt=prompt,
|
769
|
+
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
|
770
|
+
chat_history=self.chat_history,
|
771
|
+
temperature=temperature,
|
772
|
+
tools=tools,
|
773
|
+
output_json=output_json,
|
774
|
+
output_pydantic=output_pydantic,
|
775
|
+
verbose=self.verbose,
|
776
|
+
markdown=self.markdown,
|
777
|
+
self_reflect=self.self_reflect,
|
778
|
+
max_reflect=self.max_reflect,
|
779
|
+
min_reflect=self.min_reflect,
|
717
780
|
console=self.console,
|
718
781
|
agent_name=self.name,
|
719
782
|
agent_role=self.role,
|
720
|
-
agent_tools=
|
783
|
+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
|
784
|
+
execute_tool_fn=self.execute_tool_async
|
721
785
|
)
|
722
786
|
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
# Make the API call based on the type of request
|
742
|
-
if tools:
|
743
|
-
response = await async_client.chat.completions.create(
|
744
|
-
model=self.llm,
|
745
|
-
messages=messages,
|
746
|
-
temperature=temperature,
|
747
|
-
tools=formatted_tools
|
748
|
-
)
|
749
|
-
return await self._achat_completion(response, tools)
|
750
|
-
elif output_json or output_pydantic:
|
751
|
-
response = await async_client.chat.completions.create(
|
752
|
-
model=self.llm,
|
753
|
-
messages=messages,
|
754
|
-
temperature=temperature,
|
755
|
-
response_format={"type": "json_object"}
|
756
|
-
)
|
757
|
-
# Return the raw response
|
758
|
-
return response.choices[0].message.content
|
787
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
788
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
789
|
+
|
790
|
+
return response_text
|
791
|
+
except Exception as e:
|
792
|
+
display_error(f"Error in LLM chat: {e}")
|
793
|
+
return None
|
794
|
+
|
795
|
+
# For OpenAI client
|
796
|
+
if self.use_system_prompt:
|
797
|
+
system_prompt = f"""{self.backstory}\n
|
798
|
+
Your Role: {self.role}\n
|
799
|
+
Your Goal: {self.goal}
|
800
|
+
"""
|
801
|
+
if output_json:
|
802
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
|
803
|
+
elif output_pydantic:
|
804
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
|
759
805
|
else:
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
|
764
|
-
)
|
765
|
-
|
806
|
+
system_prompt = None
|
807
|
+
|
808
|
+
messages = []
|
809
|
+
if system_prompt:
|
810
|
+
messages.append({"role": "system", "content": system_prompt})
|
811
|
+
messages.extend(self.chat_history)
|
812
|
+
|
813
|
+
# Modify prompt if output_json or output_pydantic is specified
|
814
|
+
original_prompt = prompt
|
815
|
+
if output_json or output_pydantic:
|
816
|
+
if isinstance(prompt, str):
|
817
|
+
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
818
|
+
elif isinstance(prompt, list):
|
819
|
+
for item in prompt:
|
820
|
+
if item["type"] == "text":
|
821
|
+
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
822
|
+
break
|
823
|
+
|
824
|
+
if isinstance(prompt, list):
|
825
|
+
messages.append({"role": "user", "content": prompt})
|
826
|
+
else:
|
827
|
+
messages.append({"role": "user", "content": prompt})
|
828
|
+
|
829
|
+
reflection_count = 0
|
830
|
+
start_time = time.time()
|
831
|
+
|
832
|
+
while True:
|
833
|
+
try:
|
834
|
+
if self.verbose:
|
835
|
+
display_text = prompt
|
836
|
+
if isinstance(prompt, list):
|
837
|
+
display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
|
838
|
+
|
839
|
+
if display_text and str(display_text).strip():
|
840
|
+
agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
|
841
|
+
await adisplay_instruction(
|
842
|
+
f"Agent {self.name} is processing prompt: {display_text}",
|
843
|
+
console=self.console,
|
844
|
+
agent_name=self.name,
|
845
|
+
agent_role=self.role,
|
846
|
+
agent_tools=agent_tools
|
847
|
+
)
|
848
|
+
|
849
|
+
# Format tools if provided
|
850
|
+
formatted_tools = []
|
851
|
+
if tools:
|
852
|
+
for tool in tools:
|
853
|
+
if isinstance(tool, str):
|
854
|
+
tool_def = self._generate_tool_definition(tool)
|
855
|
+
if tool_def:
|
856
|
+
formatted_tools.append(tool_def)
|
857
|
+
elif isinstance(tool, dict):
|
858
|
+
formatted_tools.append(tool)
|
859
|
+
elif hasattr(tool, "to_openai_tool"):
|
860
|
+
formatted_tools.append(tool.to_openai_tool())
|
861
|
+
elif callable(tool):
|
862
|
+
formatted_tools.append(self._generate_tool_definition(tool.__name__))
|
863
|
+
|
864
|
+
# Create async OpenAI client
|
865
|
+
async_client = AsyncOpenAI()
|
866
|
+
|
867
|
+
# Make the API call based on the type of request
|
868
|
+
if tools:
|
869
|
+
response = await async_client.chat.completions.create(
|
870
|
+
model=self.llm,
|
871
|
+
messages=messages,
|
872
|
+
temperature=temperature,
|
873
|
+
tools=formatted_tools
|
874
|
+
)
|
875
|
+
return await self._achat_completion(response, tools)
|
876
|
+
elif output_json or output_pydantic:
|
877
|
+
response = await async_client.chat.completions.create(
|
878
|
+
model=self.llm,
|
879
|
+
messages=messages,
|
880
|
+
temperature=temperature,
|
881
|
+
response_format={"type": "json_object"}
|
882
|
+
)
|
883
|
+
# Return the raw response
|
884
|
+
return response.choices[0].message.content
|
885
|
+
else:
|
886
|
+
response = await async_client.chat.completions.create(
|
887
|
+
model=self.llm,
|
888
|
+
messages=messages,
|
889
|
+
temperature=temperature
|
890
|
+
)
|
891
|
+
return response.choices[0].message.content
|
892
|
+
except Exception as e:
|
893
|
+
display_error(f"Error in chat completion: {e}")
|
894
|
+
return None
|
766
895
|
except Exception as e:
|
767
|
-
display_error(f"Error in
|
896
|
+
display_error(f"Error in achat: {e}")
|
768
897
|
return None
|
769
898
|
|
770
899
|
async def _achat_completion(self, response, tools):
|
@@ -825,10 +954,57 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
825
954
|
display_error(f"Error in _achat_completion: {e}")
|
826
955
|
return None
|
827
956
|
|
957
|
+
async def astart(self, prompt: str, **kwargs):
|
958
|
+
"""Async version of start method"""
|
959
|
+
return await self.achat(prompt, **kwargs)
|
960
|
+
|
828
961
|
def run(self):
|
829
962
|
"""Alias for start() method"""
|
830
963
|
return self.start()
|
831
964
|
|
832
965
|
def start(self, prompt: str, **kwargs):
|
833
966
|
"""Start the agent with a prompt. This is a convenience method that wraps chat()."""
|
834
|
-
return self.chat(prompt, **kwargs)
|
967
|
+
return self.chat(prompt, **kwargs)
|
968
|
+
|
969
|
+
async def execute_tool_async(self, function_name: str, arguments: Dict[str, Any]) -> Any:
|
970
|
+
"""Async version of execute_tool"""
|
971
|
+
try:
|
972
|
+
logging.info(f"Executing async tool: {function_name} with arguments: {arguments}")
|
973
|
+
# Try to find the function in the agent's tools list first
|
974
|
+
func = None
|
975
|
+
for tool in self.tools:
|
976
|
+
if (callable(tool) and getattr(tool, '__name__', '') == function_name):
|
977
|
+
func = tool
|
978
|
+
break
|
979
|
+
|
980
|
+
if func is None:
|
981
|
+
logging.error(f"Function {function_name} not found in tools")
|
982
|
+
return {"error": f"Function {function_name} not found in tools"}
|
983
|
+
|
984
|
+
try:
|
985
|
+
if inspect.iscoroutinefunction(func):
|
986
|
+
logging.debug(f"Executing async function: {function_name}")
|
987
|
+
result = await func(**arguments)
|
988
|
+
else:
|
989
|
+
logging.debug(f"Executing sync function in executor: {function_name}")
|
990
|
+
loop = asyncio.get_event_loop()
|
991
|
+
result = await loop.run_in_executor(None, lambda: func(**arguments))
|
992
|
+
|
993
|
+
# Ensure result is JSON serializable
|
994
|
+
logging.debug(f"Raw result from tool: {result}")
|
995
|
+
if result is None:
|
996
|
+
return {"result": None}
|
997
|
+
try:
|
998
|
+
json.dumps(result) # Test serialization
|
999
|
+
return result
|
1000
|
+
except TypeError:
|
1001
|
+
logging.warning(f"Result not JSON serializable, converting to string: {result}")
|
1002
|
+
return {"result": str(result)}
|
1003
|
+
|
1004
|
+
except Exception as e:
|
1005
|
+
logging.error(f"Error executing {function_name}: {str(e)}", exc_info=True)
|
1006
|
+
return {"error": f"Error executing {function_name}: {str(e)}"}
|
1007
|
+
|
1008
|
+
except Exception as e:
|
1009
|
+
logging.error(f"Error in execute_tool_async: {str(e)}", exc_info=True)
|
1010
|
+
return {"error": f"Error in execute_tool_async: {str(e)}"}
|