praisonaiagents 0.0.113__tar.gz → 0.0.114__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/agent/agent.py +187 -124
  3. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/llm/__init__.py +2 -1
  4. praisonaiagents-0.0.114/praisonaiagents/llm/openai_client.py +260 -0
  5. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents.egg-info/PKG-INFO +1 -1
  6. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents.egg-info/SOURCES.txt +1 -0
  7. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/pyproject.toml +1 -1
  8. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/README.md +0 -0
  9. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/__init__.py +0 -0
  10. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/agent/__init__.py +0 -0
  11. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/agent/handoff.py +0 -0
  12. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/agent/image_agent.py +0 -0
  13. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/agents/__init__.py +0 -0
  14. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/agents/agents.py +0 -0
  15. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/agents/autoagents.py +0 -0
  16. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/approval.py +0 -0
  17. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/guardrails/__init__.py +0 -0
  18. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  19. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  20. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/knowledge/__init__.py +0 -0
  21. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/knowledge/chunking.py +0 -0
  22. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/knowledge/knowledge.py +0 -0
  23. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/llm/llm.py +0 -0
  24. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/main.py +0 -0
  25. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/mcp/__init__.py +0 -0
  26. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/mcp/mcp.py +0 -0
  27. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/mcp/mcp_sse.py +0 -0
  28. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/memory/__init__.py +0 -0
  29. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/memory/memory.py +0 -0
  30. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/process/__init__.py +0 -0
  31. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/process/process.py +0 -0
  32. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/session.py +0 -0
  33. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/task/__init__.py +0 -0
  34. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/task/task.py +0 -0
  35. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/telemetry/__init__.py +0 -0
  36. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/telemetry/integration.py +0 -0
  37. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/telemetry/telemetry.py +0 -0
  38. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/README.md +0 -0
  39. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/__init__.py +0 -0
  40. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/arxiv_tools.py +0 -0
  41. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/calculator_tools.py +0 -0
  42. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/csv_tools.py +0 -0
  43. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/duckdb_tools.py +0 -0
  44. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  45. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/excel_tools.py +0 -0
  46. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/file_tools.py +0 -0
  47. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/json_tools.py +0 -0
  48. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/newspaper_tools.py +0 -0
  49. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/pandas_tools.py +0 -0
  50. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/python_tools.py +0 -0
  51. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/searxng_tools.py +0 -0
  52. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/shell_tools.py +0 -0
  53. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/spider_tools.py +0 -0
  54. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/test.py +0 -0
  55. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/tools.py +0 -0
  56. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  57. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  58. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/xml_tools.py +0 -0
  59. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/yaml_tools.py +0 -0
  60. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents/tools/yfinance_tools.py +0 -0
  61. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  62. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents.egg-info/requires.txt +0 -0
  63. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/praisonaiagents.egg-info/top_level.txt +0 -0
  64. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/setup.cfg +0 -0
  65. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/tests/test-graph-memory.py +0 -0
  66. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/tests/test.py +0 -0
  67. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/tests/test_handoff_compatibility.py +0 -0
  68. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/tests/test_ollama_async_fix.py +0 -0
  69. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/tests/test_ollama_fix.py +0 -0
  70. {praisonaiagents-0.0.113 → praisonaiagents-0.0.114}/tests/test_posthog_fixed.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.113
3
+ Version: 0.0.114
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1,12 +1,13 @@
1
1
  import os
2
2
  import time
3
3
  import json
4
+ import copy
4
5
  import logging
5
6
  import asyncio
6
7
  from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING, Callable, Tuple
7
8
  from rich.console import Console
8
9
  from rich.live import Live
9
- from openai import AsyncOpenAI
10
+ from ..llm import get_openai_client
10
11
  from ..main import (
11
12
  display_error,
12
13
  display_tool_call,
@@ -15,7 +16,6 @@ from ..main import (
15
16
  display_generating,
16
17
  display_self_reflection,
17
18
  ReflectionOutput,
18
- client,
19
19
  adisplay_instruction,
20
20
  approval_callback
21
21
  )
@@ -85,6 +85,13 @@ class ChatCompletion:
85
85
  service_tier: Optional[str] = None
86
86
  usage: Optional[CompletionUsage] = None
87
87
 
88
+ @dataclass
89
+ class ToolCall:
90
+ """Tool call representation compatible with OpenAI format"""
91
+ id: str
92
+ type: str
93
+ function: Dict[str, Any]
94
+
88
95
  def process_stream_chunks(chunks):
89
96
  """Process streaming chunks into combined response"""
90
97
  if not chunks:
@@ -156,9 +163,8 @@ def process_stream_chunks(chunks):
156
163
  processed_tool_calls = []
157
164
  if tool_calls:
158
165
  try:
159
- from openai.types.chat import ChatCompletionMessageToolCall
160
166
  for tc in tool_calls:
161
- tool_call = ChatCompletionMessageToolCall(
167
+ tool_call = ToolCall(
162
168
  id=tc["id"],
163
169
  type=tc["type"],
164
170
  function={
@@ -513,6 +519,9 @@ class Agent:
513
519
  self.instructions = instructions
514
520
  # Check for model name in environment variable if not provided
515
521
  self._using_custom_llm = False
522
+
523
+ # Initialize OpenAI client for direct API calls
524
+ self._openai_client = get_openai_client(api_key=api_key, base_url=base_url)
516
525
 
517
526
  # If base_url is provided, always create a custom LLM instance
518
527
  if base_url:
@@ -831,6 +840,132 @@ Your Goal: {self.goal}
831
840
 
832
841
  return current_response
833
842
 
843
+ def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None):
844
+ """Build messages list for chat completion.
845
+
846
+ Args:
847
+ prompt: The user prompt (str or list)
848
+ temperature: Temperature for the chat
849
+ output_json: Optional Pydantic model for JSON output
850
+ output_pydantic: Optional Pydantic model for JSON output (alias)
851
+
852
+ Returns:
853
+ tuple: (messages list, original prompt)
854
+ """
855
+ messages = []
856
+
857
+ # Build system prompt if enabled
858
+ system_prompt = None
859
+ if self.use_system_prompt:
860
+ system_prompt = f"""{self.backstory}\n
861
+ Your Role: {self.role}\n
862
+ Your Goal: {self.goal}
863
+ """
864
+ if output_json:
865
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
866
+ elif output_pydantic:
867
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
868
+
869
+ messages.append({"role": "system", "content": system_prompt})
870
+
871
+ # Add chat history
872
+ messages.extend(self.chat_history)
873
+
874
+ # Handle prompt modifications for JSON output
875
+ original_prompt = prompt
876
+ if output_json or output_pydantic:
877
+ if isinstance(prompt, str):
878
+ prompt = prompt + "\nReturn ONLY a valid JSON object. No other text or explanation."
879
+ elif isinstance(prompt, list):
880
+ # Create a deep copy to avoid modifying the original
881
+ prompt = copy.deepcopy(prompt)
882
+ for item in prompt:
883
+ if item.get("type") == "text":
884
+ item["text"] = item["text"] + "\nReturn ONLY a valid JSON object. No other text or explanation."
885
+ break
886
+
887
+ # Add prompt to messages
888
+ if isinstance(prompt, list):
889
+ # If we receive a multimodal prompt list, place it directly in the user message
890
+ messages.append({"role": "user", "content": prompt})
891
+ else:
892
+ messages.append({"role": "user", "content": prompt})
893
+
894
+ return messages, original_prompt
895
+
896
+ def _format_tools_for_completion(self, tools=None):
897
+ """Format tools for OpenAI completion API.
898
+
899
+ Supports:
900
+ - Pre-formatted OpenAI tools (dicts with type='function')
901
+ - Lists of pre-formatted tools
902
+ - Callable functions
903
+ - String function names
904
+ - Objects with to_openai_tool() method
905
+
906
+ Args:
907
+ tools: List of tools in various formats or None to use self.tools
908
+
909
+ Returns:
910
+ List of formatted tools or empty list
911
+ """
912
+ if tools is None:
913
+ tools = self.tools
914
+
915
+ if not tools:
916
+ return []
917
+
918
+ formatted_tools = []
919
+ for tool in tools:
920
+ # Handle pre-formatted OpenAI tools
921
+ if isinstance(tool, dict) and tool.get('type') == 'function':
922
+ # Validate nested dictionary structure before accessing
923
+ if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
924
+ formatted_tools.append(tool)
925
+ else:
926
+ logging.warning(f"Skipping malformed OpenAI tool: missing function or name")
927
+ # Handle lists of tools
928
+ elif isinstance(tool, list):
929
+ for subtool in tool:
930
+ if isinstance(subtool, dict) and subtool.get('type') == 'function':
931
+ # Validate nested dictionary structure before accessing
932
+ if 'function' in subtool and isinstance(subtool['function'], dict) and 'name' in subtool['function']:
933
+ formatted_tools.append(subtool)
934
+ else:
935
+ logging.warning(f"Skipping malformed OpenAI tool in list: missing function or name")
936
+ # Handle string tool names
937
+ elif isinstance(tool, str):
938
+ tool_def = self._generate_tool_definition(tool)
939
+ if tool_def:
940
+ formatted_tools.append(tool_def)
941
+ else:
942
+ logging.warning(f"Could not generate definition for tool: {tool}")
943
+ # Handle objects with to_openai_tool method (MCP tools)
944
+ elif hasattr(tool, "to_openai_tool"):
945
+ openai_tools = tool.to_openai_tool()
946
+ # MCP tools can return either a single tool or a list of tools
947
+ if isinstance(openai_tools, list):
948
+ formatted_tools.extend(openai_tools)
949
+ elif openai_tools is not None:
950
+ formatted_tools.append(openai_tools)
951
+ # Handle callable functions
952
+ elif callable(tool):
953
+ tool_def = self._generate_tool_definition(tool.__name__)
954
+ if tool_def:
955
+ formatted_tools.append(tool_def)
956
+ else:
957
+ logging.warning(f"Tool {tool} not recognized")
958
+
959
+ # Validate JSON serialization before returning
960
+ if formatted_tools:
961
+ try:
962
+ json.dumps(formatted_tools) # Validate serialization
963
+ except (TypeError, ValueError) as e:
964
+ logging.error(f"Tools are not JSON serializable: {e}")
965
+ return []
966
+
967
+ return formatted_tools
968
+
834
969
  def generate_task(self) -> 'Task':
835
970
  """Generate a Task object from the agent's instructions"""
836
971
  from ..task.task import Task
@@ -998,7 +1133,7 @@ Your Goal: {self.goal}
998
1133
  """Process streaming response and return final response"""
999
1134
  try:
1000
1135
  # Create the response stream
1001
- response_stream = client.chat.completions.create(
1136
+ response_stream = self._openai_client.sync_client.chat.completions.create(
1002
1137
  model=self.llm,
1003
1138
  messages=messages,
1004
1139
  temperature=temperature,
@@ -1045,26 +1180,8 @@ Your Goal: {self.goal}
1045
1180
  start_time = time.time()
1046
1181
  logging.debug(f"{self.name} sending messages to LLM: {messages}")
1047
1182
 
1048
- formatted_tools = []
1049
- if tools is None:
1050
- tools = self.tools
1051
- if tools:
1052
- for tool in tools:
1053
- if isinstance(tool, str):
1054
- # Generate tool definition for string tool names
1055
- tool_def = self._generate_tool_definition(tool)
1056
- if tool_def:
1057
- formatted_tools.append(tool_def)
1058
- else:
1059
- logging.warning(f"Could not generate definition for tool: {tool}")
1060
- elif isinstance(tool, dict):
1061
- formatted_tools.append(tool)
1062
- elif hasattr(tool, "to_openai_tool"):
1063
- formatted_tools.append(tool.to_openai_tool())
1064
- elif callable(tool):
1065
- formatted_tools.append(self._generate_tool_definition(tool.__name__))
1066
- else:
1067
- logging.warning(f"Tool {tool} not recognized")
1183
+ # Use the new _format_tools_for_completion helper method
1184
+ formatted_tools = self._format_tools_for_completion(tools)
1068
1185
 
1069
1186
  try:
1070
1187
  # Use the custom LLM instance if available
@@ -1123,7 +1240,7 @@ Your Goal: {self.goal}
1123
1240
  )
1124
1241
  else:
1125
1242
  # Process as regular non-streaming response
1126
- final_response = client.chat.completions.create(
1243
+ final_response = self._openai_client.sync_client.chat.completions.create(
1127
1244
  model=self.llm,
1128
1245
  messages=messages,
1129
1246
  temperature=temperature,
@@ -1134,15 +1251,34 @@ Your Goal: {self.goal}
1134
1251
  tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
1135
1252
 
1136
1253
  if tool_calls:
1254
+ # Convert ToolCall dataclass objects to dict for JSON serialization
1255
+ serializable_tool_calls = []
1256
+ for tc in tool_calls:
1257
+ if isinstance(tc, ToolCall):
1258
+ # Convert dataclass to dict
1259
+ serializable_tool_calls.append({
1260
+ "id": tc.id,
1261
+ "type": tc.type,
1262
+ "function": tc.function
1263
+ })
1264
+ else:
1265
+ # Already an OpenAI object, keep as is
1266
+ serializable_tool_calls.append(tc)
1267
+
1137
1268
  messages.append({
1138
1269
  "role": "assistant",
1139
1270
  "content": final_response.choices[0].message.content,
1140
- "tool_calls": tool_calls
1271
+ "tool_calls": serializable_tool_calls
1141
1272
  })
1142
1273
 
1143
1274
  for tool_call in tool_calls:
1144
- function_name = tool_call.function.name
1145
- arguments = json.loads(tool_call.function.arguments)
1275
+ # Handle both ToolCall dataclass and OpenAI object
1276
+ if isinstance(tool_call, ToolCall):
1277
+ function_name = tool_call.function["name"]
1278
+ arguments = json.loads(tool_call.function["arguments"])
1279
+ else:
1280
+ function_name = tool_call.function.name
1281
+ arguments = json.loads(tool_call.function.arguments)
1146
1282
 
1147
1283
  if self.verbose:
1148
1284
  display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
@@ -1155,15 +1291,20 @@ Your Goal: {self.goal}
1155
1291
 
1156
1292
  messages.append({
1157
1293
  "role": "tool",
1158
- "tool_call_id": tool_call.id,
1294
+ "tool_call_id": tool_call.id if hasattr(tool_call, 'id') else tool_call['id'],
1159
1295
  "content": results_str
1160
1296
  })
1161
1297
 
1162
1298
  # Check if we should continue (for tools like sequential thinking)
1163
1299
  should_continue = False
1164
1300
  for tool_call in tool_calls:
1165
- function_name = tool_call.function.name
1166
- arguments = json.loads(tool_call.function.arguments)
1301
+ # Handle both ToolCall dataclass and OpenAI object
1302
+ if isinstance(tool_call, ToolCall):
1303
+ function_name = tool_call.function["name"]
1304
+ arguments = json.loads(tool_call.function["arguments"])
1305
+ else:
1306
+ function_name = tool_call.function.name
1307
+ arguments = json.loads(tool_call.function.arguments)
1167
1308
 
1168
1309
  # For sequential thinking tool, check if nextThoughtNeeded is True
1169
1310
  if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
@@ -1181,7 +1322,7 @@ Your Goal: {self.goal}
1181
1322
  reasoning_steps=reasoning_steps
1182
1323
  )
1183
1324
  else:
1184
- final_response = client.chat.completions.create(
1325
+ final_response = self._openai_client.sync_client.chat.completions.create(
1185
1326
  model=self.llm,
1186
1327
  messages=messages,
1187
1328
  temperature=temperature,
@@ -1297,40 +1438,8 @@ Your Goal: {self.goal}
1297
1438
  display_error(f"Error in LLM chat: {e}")
1298
1439
  return None
1299
1440
  else:
1300
- if self.use_system_prompt:
1301
- system_prompt = f"""{self.backstory}\n
1302
- Your Role: {self.role}\n
1303
- Your Goal: {self.goal}
1304
- """
1305
- if output_json:
1306
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
1307
- elif output_pydantic:
1308
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
1309
- else:
1310
- system_prompt = None
1311
-
1312
- messages = []
1313
- if system_prompt:
1314
- messages.append({"role": "system", "content": system_prompt})
1315
- messages.extend(self.chat_history)
1316
-
1317
- # Modify prompt if output_json or output_pydantic is specified
1318
- original_prompt = prompt
1319
- if output_json or output_pydantic:
1320
- if isinstance(prompt, str):
1321
- prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
1322
- elif isinstance(prompt, list):
1323
- # For multimodal prompts, append to the text content
1324
- for item in prompt:
1325
- if item["type"] == "text":
1326
- item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
1327
- break
1328
-
1329
- if isinstance(prompt, list):
1330
- # If we receive a multimodal prompt list, place it directly in the user message
1331
- messages.append({"role": "user", "content": prompt})
1332
- else:
1333
- messages.append({"role": "user", "content": prompt})
1441
+ # Use the new _build_messages helper method
1442
+ messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
1334
1443
 
1335
1444
  final_response_text = None
1336
1445
  reflection_count = 0
@@ -1405,7 +1514,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1405
1514
  messages.append({"role": "user", "content": reflection_prompt})
1406
1515
 
1407
1516
  try:
1408
- reflection_response = client.beta.chat.completions.parse(
1517
+ reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1409
1518
  model=self.reflect_llm if self.reflect_llm else self.llm,
1410
1519
  messages=messages,
1411
1520
  temperature=temperature,
@@ -1566,38 +1675,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1566
1675
  return None
1567
1676
 
1568
1677
  # For OpenAI client
1569
- if self.use_system_prompt:
1570
- system_prompt = f"""{self.backstory}\n
1571
- Your Role: {self.role}\n
1572
- Your Goal: {self.goal}
1573
- """
1574
- if output_json:
1575
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
1576
- elif output_pydantic:
1577
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
1578
- else:
1579
- system_prompt = None
1580
-
1581
- messages = []
1582
- if system_prompt:
1583
- messages.append({"role": "system", "content": system_prompt})
1584
- messages.extend(self.chat_history)
1585
-
1586
- # Modify prompt if output_json or output_pydantic is specified
1587
- original_prompt = prompt
1588
- if output_json or output_pydantic:
1589
- if isinstance(prompt, str):
1590
- prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
1591
- elif isinstance(prompt, list):
1592
- for item in prompt:
1593
- if item["type"] == "text":
1594
- item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
1595
- break
1596
-
1597
- if isinstance(prompt, list):
1598
- messages.append({"role": "user", "content": prompt})
1599
- else:
1600
- messages.append({"role": "user", "content": prompt})
1678
+ # Use the new _build_messages helper method
1679
+ messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
1601
1680
 
1602
1681
  reflection_count = 0
1603
1682
  start_time = time.time()
@@ -1619,27 +1698,12 @@ Your Goal: {self.goal}
1619
1698
  agent_tools=agent_tools
1620
1699
  )
1621
1700
 
1622
- # Format tools if provided
1623
- formatted_tools = []
1624
- if tools:
1625
- for tool in tools:
1626
- if isinstance(tool, str):
1627
- tool_def = self._generate_tool_definition(tool)
1628
- if tool_def:
1629
- formatted_tools.append(tool_def)
1630
- elif isinstance(tool, dict):
1631
- formatted_tools.append(tool)
1632
- elif hasattr(tool, "to_openai_tool"):
1633
- formatted_tools.append(tool.to_openai_tool())
1634
- elif callable(tool):
1635
- formatted_tools.append(self._generate_tool_definition(tool.__name__))
1636
-
1637
- # Create async OpenAI client
1638
- async_client = AsyncOpenAI()
1701
+ # Use the new _format_tools_for_completion helper method
1702
+ formatted_tools = self._format_tools_for_completion(tools)
1639
1703
 
1640
1704
  # Make the API call based on the type of request
1641
1705
  if tools:
1642
- response = await async_client.chat.completions.create(
1706
+ response = await self._openai_client.async_client.chat.completions.create(
1643
1707
  model=self.llm,
1644
1708
  messages=messages,
1645
1709
  temperature=temperature,
@@ -1651,7 +1715,7 @@ Your Goal: {self.goal}
1651
1715
  logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1652
1716
  return result
1653
1717
  elif output_json or output_pydantic:
1654
- response = await async_client.chat.completions.create(
1718
+ response = await self._openai_client.async_client.chat.completions.create(
1655
1719
  model=self.llm,
1656
1720
  messages=messages,
1657
1721
  temperature=temperature,
@@ -1663,7 +1727,7 @@ Your Goal: {self.goal}
1663
1727
  logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1664
1728
  return response.choices[0].message.content
1665
1729
  else:
1666
- response = await async_client.chat.completions.create(
1730
+ response = await self._openai_client.async_client.chat.completions.create(
1667
1731
  model=self.llm,
1668
1732
  messages=messages,
1669
1733
  temperature=temperature
@@ -1690,7 +1754,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1690
1754
  ]
1691
1755
 
1692
1756
  try:
1693
- reflection_response = await async_client.beta.chat.completions.parse(
1757
+ reflection_response = await self._openai_client.async_client.beta.chat.completions.parse(
1694
1758
  model=self.reflect_llm if self.reflect_llm else self.llm,
1695
1759
  messages=reflection_messages,
1696
1760
  temperature=temperature,
@@ -1720,7 +1784,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1720
1784
  {"role": "user", "content": "Now regenerate your response using the reflection you made"}
1721
1785
  ]
1722
1786
 
1723
- new_response = await async_client.chat.completions.create(
1787
+ new_response = await self._openai_client.async_client.chat.completions.create(
1724
1788
  model=self.llm,
1725
1789
  messages=regenerate_messages,
1726
1790
  temperature=temperature
@@ -1796,8 +1860,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1796
1860
  {"role": "user", "content": formatted_results + "\nPlease process these results and provide a final response."}
1797
1861
  ]
1798
1862
  try:
1799
- async_client = AsyncOpenAI()
1800
- final_response = await async_client.chat.completions.create(
1863
+ final_response = await self._openai_client.async_client.chat.completions.create(
1801
1864
  model=self.llm,
1802
1865
  messages=messages,
1803
1866
  temperature=0.2,
@@ -20,6 +20,7 @@ logging.basicConfig(level=logging.WARNING)
20
20
 
21
21
  # Import after suppressing warnings
22
22
  from .llm import LLM, LLMContextLengthExceededException
23
+ from .openai_client import OpenAIClient, get_openai_client
23
24
 
24
25
  # Ensure telemetry is disabled after import as well
25
26
  try:
@@ -28,4 +29,4 @@ try:
28
29
  except ImportError:
29
30
  pass
30
31
 
31
- __all__ = ["LLM", "LLMContextLengthExceededException"]
32
+ __all__ = ["LLM", "LLMContextLengthExceededException", "OpenAIClient", "get_openai_client"]
@@ -0,0 +1,260 @@
1
+ """
2
+ OpenAI Client Module
3
+
4
+ This module provides a unified interface for OpenAI API interactions,
5
+ supporting both synchronous and asynchronous operations.
6
+ """
7
+
8
+ import os
9
+ import logging
10
+ from typing import Any, Dict, List, Optional, Union, AsyncIterator, Iterator
11
+ from openai import OpenAI, AsyncOpenAI
12
+ from openai.types.chat import ChatCompletionChunk
13
+ import asyncio
14
+ from pydantic import BaseModel
15
+
16
+ # Constants
17
+ LOCAL_SERVER_API_KEY_PLACEHOLDER = "not-needed"
18
+
19
+ class OpenAIClient:
20
+ """
21
+ Unified OpenAI client wrapper for sync/async operations.
22
+
23
+ This class encapsulates all OpenAI-specific logic, providing a clean
24
+ interface for chat completions, streaming, and structured outputs.
25
+ """
26
+
27
+ def __init__(self, api_key: Optional[str] = None, base_url: Optional[str] = None):
28
+ """
29
+ Initialize the OpenAI client with proper API key handling.
30
+
31
+ Args:
32
+ api_key: OpenAI API key (defaults to OPENAI_API_KEY env var)
33
+ base_url: Custom base URL for API endpoints (defaults to OPENAI_API_BASE env var)
34
+ """
35
+ # Use provided values or fall back to environment variables
36
+ self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
37
+ self.base_url = base_url or os.environ.get("OPENAI_API_BASE") or os.environ.get("OPENAI_BASE_URL")
38
+
39
+ # For local servers like LM Studio, allow minimal API key
40
+ if self.base_url and not self.api_key:
41
+ self.api_key = LOCAL_SERVER_API_KEY_PLACEHOLDER
42
+ elif not self.api_key:
43
+ raise ValueError(
44
+ "OPENAI_API_KEY environment variable is required for the default OpenAI service. "
45
+ "If you are targeting a local server (e.g., LM Studio), ensure OPENAI_API_BASE is set "
46
+ f"(e.g., 'http://localhost:1234/v1') and you can use a placeholder API key by setting OPENAI_API_KEY='{LOCAL_SERVER_API_KEY_PLACEHOLDER}'"
47
+ )
48
+
49
+ # Initialize synchronous client (lazy loading for async)
50
+ self._sync_client = OpenAI(api_key=self.api_key, base_url=self.base_url)
51
+ self._async_client = None
52
+
53
+ # Set up logging
54
+ self.logger = logging.getLogger(__name__)
55
+
56
+ @property
57
+ def sync_client(self) -> OpenAI:
58
+ """Get the synchronous OpenAI client."""
59
+ return self._sync_client
60
+
61
+ @property
62
+ def async_client(self) -> AsyncOpenAI:
63
+ """Get the asynchronous OpenAI client (lazy initialization)."""
64
+ if self._async_client is None:
65
+ self._async_client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
66
+ return self._async_client
67
+
68
+ def create_completion(
69
+ self,
70
+ messages: List[Dict[str, Any]],
71
+ model: str = "gpt-4o",
72
+ temperature: float = 0.7,
73
+ stream: bool = False,
74
+ tools: Optional[List[Dict[str, Any]]] = None,
75
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
76
+ **kwargs
77
+ ) -> Union[Any, Iterator[ChatCompletionChunk]]:
78
+ """
79
+ Create a chat completion using the synchronous client.
80
+
81
+ Args:
82
+ messages: List of message dictionaries
83
+ model: Model to use for completion
84
+ temperature: Sampling temperature
85
+ stream: Whether to stream the response
86
+ tools: List of tools/functions available
87
+ tool_choice: Tool selection preference
88
+ **kwargs: Additional parameters to pass to the API
89
+
90
+ Returns:
91
+ ChatCompletion object or stream iterator
92
+ """
93
+ params = {
94
+ "model": model,
95
+ "messages": messages,
96
+ "temperature": temperature,
97
+ "stream": stream,
98
+ **kwargs
99
+ }
100
+
101
+ # Add tools if provided
102
+ if tools:
103
+ params["tools"] = tools
104
+ if tool_choice is not None:
105
+ params["tool_choice"] = tool_choice
106
+
107
+ try:
108
+ return self._sync_client.chat.completions.create(**params)
109
+ except Exception as e:
110
+ self.logger.error(f"Error creating completion: {e}")
111
+ raise
112
+
113
+ async def acreate_completion(
114
+ self,
115
+ messages: List[Dict[str, Any]],
116
+ model: str = "gpt-4o",
117
+ temperature: float = 0.7,
118
+ stream: bool = False,
119
+ tools: Optional[List[Dict[str, Any]]] = None,
120
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
121
+ **kwargs
122
+ ) -> Union[Any, AsyncIterator[ChatCompletionChunk]]:
123
+ """
124
+ Create a chat completion using the asynchronous client.
125
+
126
+ Args:
127
+ messages: List of message dictionaries
128
+ model: Model to use for completion
129
+ temperature: Sampling temperature
130
+ stream: Whether to stream the response
131
+ tools: List of tools/functions available
132
+ tool_choice: Tool selection preference
133
+ **kwargs: Additional parameters to pass to the API
134
+
135
+ Returns:
136
+ ChatCompletion object or async stream iterator
137
+ """
138
+ params = {
139
+ "model": model,
140
+ "messages": messages,
141
+ "temperature": temperature,
142
+ "stream": stream,
143
+ **kwargs
144
+ }
145
+
146
+ # Add tools if provided
147
+ if tools:
148
+ params["tools"] = tools
149
+ if tool_choice is not None:
150
+ params["tool_choice"] = tool_choice
151
+
152
+ try:
153
+ return await self.async_client.chat.completions.create(**params)
154
+ except Exception as e:
155
+ self.logger.error(f"Error creating async completion: {e}")
156
+ raise
157
+
158
+ def parse_structured_output(
159
+ self,
160
+ messages: List[Dict[str, Any]],
161
+ response_format: BaseModel,
162
+ model: str = "gpt-4o",
163
+ temperature: float = 0.7,
164
+ **kwargs
165
+ ) -> Any:
166
+ """
167
+ Parse structured output using the beta.chat.completions.parse API.
168
+
169
+ Args:
170
+ messages: List of message dictionaries
171
+ response_format: Pydantic model for response validation
172
+ model: Model to use for completion
173
+ temperature: Sampling temperature
174
+ **kwargs: Additional parameters to pass to the API
175
+
176
+ Returns:
177
+ Parsed response according to the response_format
178
+ """
179
+ try:
180
+ response = self._sync_client.beta.chat.completions.parse(
181
+ model=model,
182
+ messages=messages,
183
+ temperature=temperature,
184
+ response_format=response_format,
185
+ **kwargs
186
+ )
187
+ return response.choices[0].message.parsed
188
+ except Exception as e:
189
+ self.logger.error(f"Error parsing structured output: {e}")
190
+ raise
191
+
192
+ async def aparse_structured_output(
193
+ self,
194
+ messages: List[Dict[str, Any]],
195
+ response_format: BaseModel,
196
+ model: str = "gpt-4o",
197
+ temperature: float = 0.7,
198
+ **kwargs
199
+ ) -> Any:
200
+ """
201
+ Parse structured output using the async beta.chat.completions.parse API.
202
+
203
+ Args:
204
+ messages: List of message dictionaries
205
+ response_format: Pydantic model for response validation
206
+ model: Model to use for completion
207
+ temperature: Sampling temperature
208
+ **kwargs: Additional parameters to pass to the API
209
+
210
+ Returns:
211
+ Parsed response according to the response_format
212
+ """
213
+ try:
214
+ response = await self.async_client.beta.chat.completions.parse(
215
+ model=model,
216
+ messages=messages,
217
+ temperature=temperature,
218
+ response_format=response_format,
219
+ **kwargs
220
+ )
221
+ return response.choices[0].message.parsed
222
+ except Exception as e:
223
+ self.logger.error(f"Error parsing async structured output: {e}")
224
+ raise
225
+
226
+ def close(self):
227
+ """Close the OpenAI clients."""
228
+ if hasattr(self._sync_client, 'close'):
229
+ self._sync_client.close()
230
+ if self._async_client and hasattr(self._async_client, 'close'):
231
+ self._async_client.close()
232
+
233
+ async def aclose(self):
234
+ """Asynchronously close the OpenAI clients."""
235
+ if hasattr(self._sync_client, 'close'):
236
+ await asyncio.to_thread(self._sync_client.close)
237
+ if self._async_client and hasattr(self._async_client, 'aclose'):
238
+ await self._async_client.aclose()
239
+
240
+
241
+ # Global client instance (similar to main.py pattern)
242
+ _global_client = None
243
+
244
+ def get_openai_client(api_key: Optional[str] = None, base_url: Optional[str] = None) -> OpenAIClient:
245
+ """
246
+ Get or create a global OpenAI client instance.
247
+
248
+ Args:
249
+ api_key: OpenAI API key (defaults to OPENAI_API_KEY env var)
250
+ base_url: Custom base URL for API endpoints
251
+
252
+ Returns:
253
+ OpenAIClient instance
254
+ """
255
+ global _global_client
256
+
257
+ if _global_client is None:
258
+ _global_client = OpenAIClient(api_key=api_key, base_url=base_url)
259
+
260
+ return _global_client
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.113
3
+ Version: 0.0.114
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -24,6 +24,7 @@ praisonaiagents/knowledge/chunking.py
24
24
  praisonaiagents/knowledge/knowledge.py
25
25
  praisonaiagents/llm/__init__.py
26
26
  praisonaiagents/llm/llm.py
27
+ praisonaiagents/llm/openai_client.py
27
28
  praisonaiagents/mcp/__init__.py
28
29
  praisonaiagents/mcp/mcp.py
29
30
  praisonaiagents/mcp/mcp_sse.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.113"
7
+ version = "0.0.114"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [