praisonaiagents 0.0.46__py3-none-any.whl → 0.0.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -207,6 +207,11 @@ class Agent:
207
207
  if all(x is None for x in [name, role, goal, backstory, instructions]):
208
208
  raise ValueError("At least one of name, role, goal, backstory, or instructions must be provided")
209
209
 
210
+ # Configure logging to suppress unwanted outputs
211
+ logging.getLogger("litellm").setLevel(logging.WARNING)
212
+ logging.getLogger("httpx").setLevel(logging.WARNING)
213
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
214
+
210
215
  # If instructions are provided, use them to set role, goal, and backstory
211
216
  if instructions:
212
217
  self.name = name or "Agent"
@@ -226,7 +231,34 @@ class Agent:
226
231
 
227
232
  self.instructions = instructions
228
233
  # Check for model name in environment variable if not provided
229
- self.llm = llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
234
+ self._using_custom_llm = False
235
+
236
+ # If the user passes a dictionary (for advanced configuration)
237
+ if isinstance(llm, dict) and "model" in llm:
238
+ try:
239
+ from ..llm.llm import LLM
240
+ self.llm_instance = LLM(**llm) # Pass all dict items as kwargs
241
+ self._using_custom_llm = True
242
+ except ImportError as e:
243
+ raise ImportError(
244
+ "LLM features requested but dependencies not installed. "
245
+ "Please install with: pip install \"praisonaiagents[llm]\""
246
+ ) from e
247
+ # If the user passes a string with a slash (provider/model)
248
+ elif isinstance(llm, str) and "/" in llm:
249
+ try:
250
+ from ..llm.llm import LLM
251
+ # Pass the entire string so LiteLLM can parse provider/model
252
+ self.llm_instance = LLM(model=llm)
253
+ self._using_custom_llm = True
254
+ except ImportError as e:
255
+ raise ImportError(
256
+ "LLM features requested but dependencies not installed. "
257
+ "Please install with: pip install \"praisonaiagents[llm]\""
258
+ ) from e
259
+ # Otherwise, fall back to OpenAI environment/name
260
+ else:
261
+ self.llm = llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
230
262
  self.tools = tools if tools else [] # Store original tools
231
263
  self.function_calling_llm = function_calling_llm
232
264
  self.max_iter = max_iter
@@ -494,185 +526,216 @@ Your Goal: {self.goal}
494
526
  # Append found knowledge to the prompt
495
527
  prompt = f"{prompt}\n\nKnowledge: {knowledge_content}"
496
528
 
497
- if self.use_system_prompt:
498
- system_prompt = f"""{self.backstory}\n
499
- Your Role: {self.role}\n
500
- Your Goal: {self.goal}
501
- """
502
- if output_json:
503
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
504
- elif output_pydantic:
505
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
506
- else:
507
- system_prompt = None
508
-
509
- messages = []
510
- if system_prompt:
511
- messages.append({"role": "system", "content": system_prompt})
512
- messages.extend(self.chat_history)
513
-
514
- # Modify prompt if output_json or output_pydantic is specified
515
- original_prompt = prompt
516
- if output_json or output_pydantic:
517
- if isinstance(prompt, str):
518
- prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
519
- elif isinstance(prompt, list):
520
- # For multimodal prompts, append to the text content
521
- for item in prompt:
522
- if item["type"] == "text":
523
- item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
524
- break
525
-
526
- if isinstance(prompt, list):
527
- # If we receive a multimodal prompt list, place it directly in the user message
528
- messages.append({"role": "user", "content": prompt})
529
- else:
530
- messages.append({"role": "user", "content": prompt})
531
-
532
- final_response_text = None
533
- reflection_count = 0
534
- start_time = time.time()
535
-
536
- while True:
529
+ if self._using_custom_llm:
537
530
  try:
538
- if self.verbose:
539
- # Handle both string and list prompts for instruction display
540
- display_text = prompt
541
- if isinstance(prompt, list):
542
- # Extract text content from multimodal prompt
543
- display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
544
-
545
- if display_text and str(display_text).strip():
546
- # Pass agent information to display_instruction
547
- agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
548
- display_instruction(
549
- f"Agent {self.name} is processing prompt: {display_text}",
550
- console=self.console,
551
- agent_name=self.name,
552
- agent_role=self.role,
553
- agent_tools=agent_tools
554
- )
555
-
556
- response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None)
557
- if not response:
558
- return None
531
+ # Pass everything to LLM class
532
+ response_text = self.llm_instance.get_response(
533
+ prompt=prompt,
534
+ system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
535
+ chat_history=self.chat_history,
536
+ temperature=temperature,
537
+ tools=tools,
538
+ output_json=output_json,
539
+ output_pydantic=output_pydantic,
540
+ verbose=self.verbose,
541
+ markdown=self.markdown,
542
+ self_reflect=self.self_reflect,
543
+ max_reflect=self.max_reflect,
544
+ min_reflect=self.min_reflect,
545
+ console=self.console,
546
+ agent_name=self.name,
547
+ agent_role=self.role,
548
+ agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
549
+ execute_tool_fn=self.execute_tool # Pass tool execution function
550
+ )
559
551
 
560
- tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
561
- response_text = response.choices[0].message.content.strip()
552
+ self.chat_history.append({"role": "user", "content": prompt})
553
+ self.chat_history.append({"role": "assistant", "content": response_text})
562
554
 
563
- if tool_calls:
564
- messages.append({
565
- "role": "assistant",
566
- "content": response_text,
567
- "tool_calls": tool_calls
568
- })
569
-
570
- for tool_call in tool_calls:
571
- function_name = tool_call.function.name
572
- arguments = json.loads(tool_call.function.arguments)
555
+ return response_text
556
+ except Exception as e:
557
+ display_error(f"Error in LLM chat: {e}")
558
+ return None
559
+ else:
560
+ if self.use_system_prompt:
561
+ system_prompt = f"""{self.backstory}\n
562
+ Your Role: {self.role}\n
563
+ Your Goal: {self.goal}
564
+ """
565
+ if output_json:
566
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
567
+ elif output_pydantic:
568
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
569
+ else:
570
+ system_prompt = None
571
+
572
+ messages = []
573
+ if system_prompt:
574
+ messages.append({"role": "system", "content": system_prompt})
575
+ messages.extend(self.chat_history)
576
+
577
+ # Modify prompt if output_json or output_pydantic is specified
578
+ original_prompt = prompt
579
+ if output_json or output_pydantic:
580
+ if isinstance(prompt, str):
581
+ prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
582
+ elif isinstance(prompt, list):
583
+ # For multimodal prompts, append to the text content
584
+ for item in prompt:
585
+ if item["type"] == "text":
586
+ item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
587
+ break
573
588
 
574
- if self.verbose:
575
- display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}", console=self.console)
589
+ if isinstance(prompt, list):
590
+ # If we receive a multimodal prompt list, place it directly in the user message
591
+ messages.append({"role": "user", "content": prompt})
592
+ else:
593
+ messages.append({"role": "user", "content": prompt})
576
594
 
577
- tool_result = self.execute_tool(function_name, arguments)
595
+ final_response_text = None
596
+ reflection_count = 0
597
+ start_time = time.time()
578
598
 
579
- if tool_result:
580
- if self.verbose:
581
- display_tool_call(f"Function '{function_name}' returned: {tool_result}", console=self.console)
582
- messages.append({
583
- "role": "tool",
584
- "tool_call_id": tool_call.id,
585
- "content": json.dumps(tool_result)
586
- })
587
- else:
588
- messages.append({
589
- "role": "tool",
590
- "tool_call_id": tool_call.id,
591
- "content": "Function returned an empty output"
592
- })
599
+ while True:
600
+ try:
601
+ if self.verbose:
602
+ # Handle both string and list prompts for instruction display
603
+ display_text = prompt
604
+ if isinstance(prompt, list):
605
+ # Extract text content from multimodal prompt
606
+ display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
593
607
 
594
- response = self._chat_completion(messages, temperature=temperature)
608
+ if display_text and str(display_text).strip():
609
+ # Pass agent information to display_instruction
610
+ agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
611
+ display_instruction(
612
+ f"Agent {self.name} is processing prompt: {display_text}",
613
+ console=self.console,
614
+ agent_name=self.name,
615
+ agent_role=self.role,
616
+ agent_tools=agent_tools
617
+ )
618
+
619
+ response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None)
595
620
  if not response:
596
621
  return None
622
+
623
+ tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
597
624
  response_text = response.choices[0].message.content.strip()
598
625
 
599
- # Handle output_json or output_pydantic if specified
600
- if output_json or output_pydantic:
601
- # Add to chat history and return raw response
602
- self.chat_history.append({"role": "user", "content": original_prompt})
603
- self.chat_history.append({"role": "assistant", "content": response_text})
604
- if self.verbose:
605
- display_interaction(original_prompt, response_text, markdown=self.markdown,
606
- generation_time=time.time() - start_time, console=self.console)
607
- return response_text
626
+ if tool_calls:
627
+ messages.append({
628
+ "role": "assistant",
629
+ "content": response_text,
630
+ "tool_calls": tool_calls
631
+ })
632
+
633
+ for tool_call in tool_calls:
634
+ function_name = tool_call.function.name
635
+ arguments = json.loads(tool_call.function.arguments)
608
636
 
609
- if not self.self_reflect:
610
- self.chat_history.append({"role": "user", "content": original_prompt})
611
- self.chat_history.append({"role": "assistant", "content": response_text})
612
- if self.verbose:
613
- logging.debug(f"Agent {self.name} final response: {response_text}")
614
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
615
- return response_text
637
+ if self.verbose:
638
+ display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}", console=self.console)
639
+
640
+ tool_result = self.execute_tool(function_name, arguments)
641
+
642
+ if tool_result:
643
+ if self.verbose:
644
+ display_tool_call(f"Function '{function_name}' returned: {tool_result}", console=self.console)
645
+ messages.append({
646
+ "role": "tool",
647
+ "tool_call_id": tool_call.id,
648
+ "content": json.dumps(tool_result)
649
+ })
650
+ else:
651
+ messages.append({
652
+ "role": "tool",
653
+ "tool_call_id": tool_call.id,
654
+ "content": "Function returned an empty output"
655
+ })
656
+
657
+ response = self._chat_completion(messages, temperature=temperature)
658
+ if not response:
659
+ return None
660
+ response_text = response.choices[0].message.content.strip()
661
+
662
+ # Handle output_json or output_pydantic if specified
663
+ if output_json or output_pydantic:
664
+ # Add to chat history and return raw response
665
+ self.chat_history.append({"role": "user", "content": original_prompt})
666
+ self.chat_history.append({"role": "assistant", "content": response_text})
667
+ if self.verbose:
668
+ display_interaction(original_prompt, response_text, markdown=self.markdown,
669
+ generation_time=time.time() - start_time, console=self.console)
670
+ return response_text
671
+
672
+ if not self.self_reflect:
673
+ self.chat_history.append({"role": "user", "content": original_prompt})
674
+ self.chat_history.append({"role": "assistant", "content": response_text})
675
+ if self.verbose:
676
+ logging.debug(f"Agent {self.name} final response: {response_text}")
677
+ display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
678
+ return response_text
616
679
 
617
- reflection_prompt = f"""
680
+ reflection_prompt = f"""
618
681
  Reflect on your previous response: '{response_text}'.
619
682
  Identify any flaws, improvements, or actions.
620
683
  Provide a "satisfactory" status ('yes' or 'no').
621
684
  Output MUST be JSON with 'reflection' and 'satisfactory'.
622
- """
623
- logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
624
- messages.append({"role": "user", "content": reflection_prompt})
685
+ """
686
+ logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
687
+ messages.append({"role": "user", "content": reflection_prompt})
625
688
 
626
- try:
627
- reflection_response = client.beta.chat.completions.parse(
628
- model=self.reflect_llm if self.reflect_llm else self.llm,
629
- messages=messages,
630
- temperature=temperature,
631
- response_format=ReflectionOutput
632
- )
633
-
634
- reflection_output = reflection_response.choices[0].message.parsed
635
-
636
- if self.verbose:
637
- display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
689
+ try:
690
+ reflection_response = client.beta.chat.completions.parse(
691
+ model=self.reflect_llm if self.reflect_llm else self.llm,
692
+ messages=messages,
693
+ temperature=temperature,
694
+ response_format=ReflectionOutput
695
+ )
638
696
 
639
- messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
697
+ reflection_output = reflection_response.choices[0].message.parsed
640
698
 
641
- # Only consider satisfactory after minimum reflections
642
- if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
643
699
  if self.verbose:
644
- display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
645
- self.chat_history.append({"role": "user", "content": prompt})
646
- self.chat_history.append({"role": "assistant", "content": response_text})
647
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
648
- return response_text
700
+ display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
649
701
 
650
- # Check if we've hit max reflections
651
- if reflection_count >= self.max_reflect - 1:
652
- if self.verbose:
653
- display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
654
- self.chat_history.append({"role": "user", "content": prompt})
655
- self.chat_history.append({"role": "assistant", "content": response_text})
656
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
657
- return response_text
702
+ messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
658
703
 
659
- logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
660
- messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
661
- response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
662
- response_text = response.choices[0].message.content.strip()
663
- reflection_count += 1
664
- continue # Continue the loop for more reflections
704
+ # Only consider satisfactory after minimum reflections
705
+ if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
706
+ if self.verbose:
707
+ display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
708
+ self.chat_history.append({"role": "user", "content": prompt})
709
+ self.chat_history.append({"role": "assistant", "content": response_text})
710
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
711
+ return response_text
712
+
713
+ # Check if we've hit max reflections
714
+ if reflection_count >= self.max_reflect - 1:
715
+ if self.verbose:
716
+ display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
717
+ self.chat_history.append({"role": "user", "content": prompt})
718
+ self.chat_history.append({"role": "assistant", "content": response_text})
719
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
720
+ return response_text
721
+
722
+ logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
723
+ messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
724
+ response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
725
+ response_text = response.choices[0].message.content.strip()
726
+ reflection_count += 1
727
+ continue # Continue the loop for more reflections
665
728
 
729
+ except Exception as e:
730
+ display_error(f"Error in parsing self-reflection json {e}. Retrying", console=self.console)
731
+ logging.error("Reflection parsing failed.", exc_info=True)
732
+ messages.append({"role": "assistant", "content": f"Self Reflection failed."})
733
+ reflection_count += 1
734
+ continue # Continue even after error to try again
735
+
666
736
  except Exception as e:
667
- display_error(f"Error in parsing self-reflection json {e}. Retrying", console=self.console)
668
- logging.error("Reflection parsing failed.", exc_info=True)
669
- messages.append({"role": "assistant", "content": f"Self Reflection failed."})
670
- reflection_count += 1
671
- continue # Continue even after error to try again
672
-
673
- except Exception as e:
674
- display_error(f"Error in chat: {e}", console=self.console)
675
- return None
737
+ display_error(f"Error in chat: {e}", console=self.console)
738
+ return None
676
739
 
677
740
  def clean_json_output(self, output: str) -> str:
678
741
  """Clean and extract JSON from response text."""
@@ -684,98 +747,153 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
684
747
  cleaned = cleaned[len("```"):].strip()
685
748
  if cleaned.endswith("```"):
686
749
  cleaned = cleaned[:-3].strip()
687
- return cleaned
750
+ return cleaned
688
751
 
689
- async def achat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
752
+ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
690
753
  """Async version of chat method"""
691
754
  try:
692
- # Build system prompt
693
- system_prompt = self.system_prompt
694
- if output_json:
695
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
696
- elif output_pydantic:
697
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
698
-
699
- # Build messages
700
- if isinstance(prompt, str):
701
- messages = [
702
- {"role": "system", "content": system_prompt},
703
- {"role": "user", "content": prompt + ("\nReturn ONLY a valid JSON object. No other text or explanation." if (output_json or output_pydantic) else "")}
704
- ]
705
- else:
706
- # For multimodal prompts
707
- messages = [
708
- {"role": "system", "content": system_prompt},
709
- {"role": "user", "content": prompt}
710
- ]
711
- if output_json or output_pydantic:
712
- # Add JSON instruction to text content
713
- for item in messages[-1]["content"]:
714
- if item["type"] == "text":
715
- item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
716
- break
755
+ # Search for existing knowledge if any knowledge is provided
756
+ if self.knowledge:
757
+ search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
758
+ if search_results:
759
+ if isinstance(search_results, dict) and 'results' in search_results:
760
+ knowledge_content = "\n".join([result['memory'] for result in search_results['results']])
761
+ else:
762
+ knowledge_content = "\n".join(search_results)
763
+ prompt = f"{prompt}\n\nKnowledge: {knowledge_content}"
717
764
 
718
- # Display instruction with agent info if verbose
719
- if self.verbose:
720
- display_text = prompt
721
- if isinstance(prompt, list):
722
- display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
723
-
724
- if display_text and str(display_text).strip():
725
- agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
726
- await adisplay_instruction(
727
- f"Agent {self.name} is processing prompt: {display_text}",
765
+ if self._using_custom_llm:
766
+ try:
767
+ response_text = await self.llm_instance.get_response_async(
768
+ prompt=prompt,
769
+ system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
770
+ chat_history=self.chat_history,
771
+ temperature=temperature,
772
+ tools=tools,
773
+ output_json=output_json,
774
+ output_pydantic=output_pydantic,
775
+ verbose=self.verbose,
776
+ markdown=self.markdown,
777
+ self_reflect=self.self_reflect,
778
+ max_reflect=self.max_reflect,
779
+ min_reflect=self.min_reflect,
728
780
  console=self.console,
729
781
  agent_name=self.name,
730
782
  agent_role=self.role,
731
- agent_tools=agent_tools
783
+ agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
784
+ execute_tool_fn=self.execute_tool_async
732
785
  )
733
786
 
734
- # Format tools if provided
735
- formatted_tools = []
736
- if tools:
737
- for tool in tools:
738
- if isinstance(tool, str):
739
- tool_def = self._generate_tool_definition(tool)
740
- if tool_def:
741
- formatted_tools.append(tool_def)
742
- elif isinstance(tool, dict):
743
- formatted_tools.append(tool)
744
- elif hasattr(tool, "to_openai_tool"):
745
- formatted_tools.append(tool.to_openai_tool())
746
- elif callable(tool):
747
- formatted_tools.append(self._generate_tool_definition(tool.__name__))
748
-
749
- # Create async OpenAI client
750
- async_client = AsyncOpenAI()
751
-
752
- # Make the API call based on the type of request
753
- if tools:
754
- response = await async_client.chat.completions.create(
755
- model=self.llm,
756
- messages=messages,
757
- temperature=temperature,
758
- tools=formatted_tools
759
- )
760
- return await self._achat_completion(response, tools)
761
- elif output_json or output_pydantic:
762
- response = await async_client.chat.completions.create(
763
- model=self.llm,
764
- messages=messages,
765
- temperature=temperature,
766
- response_format={"type": "json_object"}
767
- )
768
- # Return the raw response
769
- return response.choices[0].message.content
787
+ self.chat_history.append({"role": "user", "content": prompt})
788
+ self.chat_history.append({"role": "assistant", "content": response_text})
789
+
790
+ return response_text
791
+ except Exception as e:
792
+ display_error(f"Error in LLM chat: {e}")
793
+ return None
794
+
795
+ # For OpenAI client
796
+ if self.use_system_prompt:
797
+ system_prompt = f"""{self.backstory}\n
798
+ Your Role: {self.role}\n
799
+ Your Goal: {self.goal}
800
+ """
801
+ if output_json:
802
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
803
+ elif output_pydantic:
804
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
770
805
  else:
771
- response = await async_client.chat.completions.create(
772
- model=self.llm,
773
- messages=messages,
774
- temperature=temperature
775
- )
776
- return response.choices[0].message.content
806
+ system_prompt = None
807
+
808
+ messages = []
809
+ if system_prompt:
810
+ messages.append({"role": "system", "content": system_prompt})
811
+ messages.extend(self.chat_history)
812
+
813
+ # Modify prompt if output_json or output_pydantic is specified
814
+ original_prompt = prompt
815
+ if output_json or output_pydantic:
816
+ if isinstance(prompt, str):
817
+ prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
818
+ elif isinstance(prompt, list):
819
+ for item in prompt:
820
+ if item["type"] == "text":
821
+ item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
822
+ break
823
+
824
+ if isinstance(prompt, list):
825
+ messages.append({"role": "user", "content": prompt})
826
+ else:
827
+ messages.append({"role": "user", "content": prompt})
828
+
829
+ reflection_count = 0
830
+ start_time = time.time()
831
+
832
+ while True:
833
+ try:
834
+ if self.verbose:
835
+ display_text = prompt
836
+ if isinstance(prompt, list):
837
+ display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
838
+
839
+ if display_text and str(display_text).strip():
840
+ agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
841
+ await adisplay_instruction(
842
+ f"Agent {self.name} is processing prompt: {display_text}",
843
+ console=self.console,
844
+ agent_name=self.name,
845
+ agent_role=self.role,
846
+ agent_tools=agent_tools
847
+ )
848
+
849
+ # Format tools if provided
850
+ formatted_tools = []
851
+ if tools:
852
+ for tool in tools:
853
+ if isinstance(tool, str):
854
+ tool_def = self._generate_tool_definition(tool)
855
+ if tool_def:
856
+ formatted_tools.append(tool_def)
857
+ elif isinstance(tool, dict):
858
+ formatted_tools.append(tool)
859
+ elif hasattr(tool, "to_openai_tool"):
860
+ formatted_tools.append(tool.to_openai_tool())
861
+ elif callable(tool):
862
+ formatted_tools.append(self._generate_tool_definition(tool.__name__))
863
+
864
+ # Create async OpenAI client
865
+ async_client = AsyncOpenAI()
866
+
867
+ # Make the API call based on the type of request
868
+ if tools:
869
+ response = await async_client.chat.completions.create(
870
+ model=self.llm,
871
+ messages=messages,
872
+ temperature=temperature,
873
+ tools=formatted_tools
874
+ )
875
+ return await self._achat_completion(response, tools)
876
+ elif output_json or output_pydantic:
877
+ response = await async_client.chat.completions.create(
878
+ model=self.llm,
879
+ messages=messages,
880
+ temperature=temperature,
881
+ response_format={"type": "json_object"}
882
+ )
883
+ # Return the raw response
884
+ return response.choices[0].message.content
885
+ else:
886
+ response = await async_client.chat.completions.create(
887
+ model=self.llm,
888
+ messages=messages,
889
+ temperature=temperature
890
+ )
891
+ return response.choices[0].message.content
892
+ except Exception as e:
893
+ display_error(f"Error in chat completion: {e}")
894
+ return None
777
895
  except Exception as e:
778
- display_error(f"Error in chat completion: {e}")
896
+ display_error(f"Error in achat: {e}")
779
897
  return None
780
898
 
781
899
  async def _achat_completion(self, response, tools):
@@ -836,10 +954,57 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
836
954
  display_error(f"Error in _achat_completion: {e}")
837
955
  return None
838
956
 
957
+ async def astart(self, prompt: str, **kwargs):
958
+ """Async version of start method"""
959
+ return await self.achat(prompt, **kwargs)
960
+
839
961
  def run(self):
840
962
  """Alias for start() method"""
841
963
  return self.start()
842
964
 
843
965
  def start(self, prompt: str, **kwargs):
844
966
  """Start the agent with a prompt. This is a convenience method that wraps chat()."""
845
- return self.chat(prompt, **kwargs)
967
+ return self.chat(prompt, **kwargs)
968
+
969
+ async def execute_tool_async(self, function_name: str, arguments: Dict[str, Any]) -> Any:
970
+ """Async version of execute_tool"""
971
+ try:
972
+ logging.info(f"Executing async tool: {function_name} with arguments: {arguments}")
973
+ # Try to find the function in the agent's tools list first
974
+ func = None
975
+ for tool in self.tools:
976
+ if (callable(tool) and getattr(tool, '__name__', '') == function_name):
977
+ func = tool
978
+ break
979
+
980
+ if func is None:
981
+ logging.error(f"Function {function_name} not found in tools")
982
+ return {"error": f"Function {function_name} not found in tools"}
983
+
984
+ try:
985
+ if inspect.iscoroutinefunction(func):
986
+ logging.debug(f"Executing async function: {function_name}")
987
+ result = await func(**arguments)
988
+ else:
989
+ logging.debug(f"Executing sync function in executor: {function_name}")
990
+ loop = asyncio.get_event_loop()
991
+ result = await loop.run_in_executor(None, lambda: func(**arguments))
992
+
993
+ # Ensure result is JSON serializable
994
+ logging.debug(f"Raw result from tool: {result}")
995
+ if result is None:
996
+ return {"result": None}
997
+ try:
998
+ json.dumps(result) # Test serialization
999
+ return result
1000
+ except TypeError:
1001
+ logging.warning(f"Result not JSON serializable, converting to string: {result}")
1002
+ return {"result": str(result)}
1003
+
1004
+ except Exception as e:
1005
+ logging.error(f"Error executing {function_name}: {str(e)}", exc_info=True)
1006
+ return {"error": f"Error executing {function_name}: {str(e)}"}
1007
+
1008
+ except Exception as e:
1009
+ logging.error(f"Error in execute_tool_async: {str(e)}", exc_info=True)
1010
+ return {"error": f"Error in execute_tool_async: {str(e)}"}