praisonaiagents 0.0.114__py3-none-any.whl → 0.0.116__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,7 +7,17 @@ import asyncio
7
7
  from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING, Callable, Tuple
8
8
  from rich.console import Console
9
9
  from rich.live import Live
10
- from ..llm import get_openai_client
10
+ from ..llm import (
11
+ get_openai_client,
12
+ ChatCompletionMessage,
13
+ Choice,
14
+ CompletionTokensDetails,
15
+ PromptTokensDetails,
16
+ CompletionUsage,
17
+ ChatCompletion,
18
+ ToolCall,
19
+ process_stream_chunks
20
+ )
11
21
  from ..main import (
12
22
  display_error,
13
23
  display_tool_call,
@@ -21,7 +31,6 @@ from ..main import (
21
31
  )
22
32
  import inspect
23
33
  import uuid
24
- from dataclasses import dataclass
25
34
 
26
35
  # Global variables for API server
27
36
  _server_started = {} # Dict of port -> started boolean
@@ -35,181 +44,6 @@ if TYPE_CHECKING:
35
44
  from ..main import TaskOutput
36
45
  from ..handoff import Handoff
37
46
 
38
- @dataclass
39
- class ChatCompletionMessage:
40
- content: str
41
- role: str = "assistant"
42
- refusal: Optional[str] = None
43
- audio: Optional[str] = None
44
- function_call: Optional[dict] = None
45
- tool_calls: Optional[List] = None
46
- reasoning_content: Optional[str] = None
47
-
48
- @dataclass
49
- class Choice:
50
- finish_reason: Optional[str]
51
- index: int
52
- message: ChatCompletionMessage
53
- logprobs: Optional[dict] = None
54
-
55
- @dataclass
56
- class CompletionTokensDetails:
57
- accepted_prediction_tokens: Optional[int] = None
58
- audio_tokens: Optional[int] = None
59
- reasoning_tokens: Optional[int] = None
60
- rejected_prediction_tokens: Optional[int] = None
61
-
62
- @dataclass
63
- class PromptTokensDetails:
64
- audio_tokens: Optional[int] = None
65
- cached_tokens: int = 0
66
-
67
- @dataclass
68
- class CompletionUsage:
69
- completion_tokens: int = 0
70
- prompt_tokens: int = 0
71
- total_tokens: int = 0
72
- completion_tokens_details: Optional[CompletionTokensDetails] = None
73
- prompt_tokens_details: Optional[PromptTokensDetails] = None
74
- prompt_cache_hit_tokens: int = 0
75
- prompt_cache_miss_tokens: int = 0
76
-
77
- @dataclass
78
- class ChatCompletion:
79
- id: str
80
- choices: List[Choice]
81
- created: int
82
- model: str
83
- object: str = "chat.completion"
84
- system_fingerprint: Optional[str] = None
85
- service_tier: Optional[str] = None
86
- usage: Optional[CompletionUsage] = None
87
-
88
- @dataclass
89
- class ToolCall:
90
- """Tool call representation compatible with OpenAI format"""
91
- id: str
92
- type: str
93
- function: Dict[str, Any]
94
-
95
- def process_stream_chunks(chunks):
96
- """Process streaming chunks into combined response"""
97
- if not chunks:
98
- return None
99
-
100
- try:
101
- first_chunk = chunks[0]
102
- last_chunk = chunks[-1]
103
-
104
- # Basic metadata
105
- id = getattr(first_chunk, "id", None)
106
- created = getattr(first_chunk, "created", None)
107
- model = getattr(first_chunk, "model", None)
108
- system_fingerprint = getattr(first_chunk, "system_fingerprint", None)
109
-
110
- # Track usage
111
- completion_tokens = 0
112
- prompt_tokens = 0
113
-
114
- content_list = []
115
- reasoning_list = []
116
- tool_calls = []
117
- current_tool_call = None
118
-
119
- # First pass: Get initial tool call data
120
- for chunk in chunks:
121
- if not hasattr(chunk, "choices") or not chunk.choices:
122
- continue
123
-
124
- delta = getattr(chunk.choices[0], "delta", None)
125
- if not delta:
126
- continue
127
-
128
- # Handle content and reasoning
129
- if hasattr(delta, "content") and delta.content:
130
- content_list.append(delta.content)
131
- if hasattr(delta, "reasoning_content") and delta.reasoning_content:
132
- reasoning_list.append(delta.reasoning_content)
133
-
134
- # Handle tool calls
135
- if hasattr(delta, "tool_calls") and delta.tool_calls:
136
- for tool_call_delta in delta.tool_calls:
137
- if tool_call_delta.index is not None and tool_call_delta.id:
138
- # Found the initial tool call
139
- current_tool_call = {
140
- "id": tool_call_delta.id,
141
- "type": "function",
142
- "function": {
143
- "name": tool_call_delta.function.name,
144
- "arguments": ""
145
- }
146
- }
147
- while len(tool_calls) <= tool_call_delta.index:
148
- tool_calls.append(None)
149
- tool_calls[tool_call_delta.index] = current_tool_call
150
- current_tool_call = tool_calls[tool_call_delta.index]
151
- elif current_tool_call is not None and hasattr(tool_call_delta.function, "arguments"):
152
- if tool_call_delta.function.arguments:
153
- current_tool_call["function"]["arguments"] += tool_call_delta.function.arguments
154
-
155
- # Remove any None values and empty tool calls
156
- tool_calls = [tc for tc in tool_calls if tc and tc["id"] and tc["function"]["name"]]
157
-
158
- combined_content = "".join(content_list) if content_list else ""
159
- combined_reasoning = "".join(reasoning_list) if reasoning_list else None
160
- finish_reason = getattr(last_chunk.choices[0], "finish_reason", None) if hasattr(last_chunk, "choices") and last_chunk.choices else None
161
-
162
- # Create ToolCall objects
163
- processed_tool_calls = []
164
- if tool_calls:
165
- try:
166
- for tc in tool_calls:
167
- tool_call = ToolCall(
168
- id=tc["id"],
169
- type=tc["type"],
170
- function={
171
- "name": tc["function"]["name"],
172
- "arguments": tc["function"]["arguments"]
173
- }
174
- )
175
- processed_tool_calls.append(tool_call)
176
- except Exception as e:
177
- print(f"Error processing tool call: {e}")
178
-
179
- message = ChatCompletionMessage(
180
- content=combined_content,
181
- role="assistant",
182
- reasoning_content=combined_reasoning,
183
- tool_calls=processed_tool_calls if processed_tool_calls else None
184
- )
185
-
186
- choice = Choice(
187
- finish_reason=finish_reason or "tool_calls" if processed_tool_calls else None,
188
- index=0,
189
- message=message
190
- )
191
-
192
- usage = CompletionUsage(
193
- completion_tokens=completion_tokens,
194
- prompt_tokens=prompt_tokens,
195
- total_tokens=completion_tokens + prompt_tokens,
196
- completion_tokens_details=CompletionTokensDetails(),
197
- prompt_tokens_details=PromptTokensDetails()
198
- )
199
-
200
- return ChatCompletion(
201
- id=id,
202
- choices=[choice],
203
- created=created,
204
- model=model,
205
- system_fingerprint=system_fingerprint,
206
- usage=usage
207
- )
208
-
209
- except Exception as e:
210
- print(f"Error processing chunks: {e}")
211
- return None
212
-
213
47
  class Agent:
214
48
  def _generate_tool_definition(self, function_name):
215
49
  """
@@ -852,8 +686,6 @@ Your Goal: {self.goal}
852
686
  Returns:
853
687
  tuple: (messages list, original prompt)
854
688
  """
855
- messages = []
856
-
857
689
  # Build system prompt if enabled
858
690
  system_prompt = None
859
691
  if self.use_system_prompt:
@@ -861,35 +693,15 @@ Your Goal: {self.goal}
861
693
  Your Role: {self.role}\n
862
694
  Your Goal: {self.goal}
863
695
  """
864
- if output_json:
865
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
866
- elif output_pydantic:
867
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
868
-
869
- messages.append({"role": "system", "content": system_prompt})
870
696
 
871
- # Add chat history
872
- messages.extend(self.chat_history)
873
-
874
- # Handle prompt modifications for JSON output
875
- original_prompt = prompt
876
- if output_json or output_pydantic:
877
- if isinstance(prompt, str):
878
- prompt = prompt + "\nReturn ONLY a valid JSON object. No other text or explanation."
879
- elif isinstance(prompt, list):
880
- # Create a deep copy to avoid modifying the original
881
- prompt = copy.deepcopy(prompt)
882
- for item in prompt:
883
- if item.get("type") == "text":
884
- item["text"] = item["text"] + "\nReturn ONLY a valid JSON object. No other text or explanation."
885
- break
886
-
887
- # Add prompt to messages
888
- if isinstance(prompt, list):
889
- # If we receive a multimodal prompt list, place it directly in the user message
890
- messages.append({"role": "user", "content": prompt})
891
- else:
892
- messages.append({"role": "user", "content": prompt})
697
+ # Use openai_client's build_messages method
698
+ messages, original_prompt = self._openai_client.build_messages(
699
+ prompt=prompt,
700
+ system_prompt=system_prompt,
701
+ chat_history=self.chat_history,
702
+ output_json=output_json,
703
+ output_pydantic=output_pydantic
704
+ )
893
705
 
894
706
  return messages, original_prompt
895
707
 
@@ -1131,50 +943,16 @@ Your Goal: {self.goal}
1131
943
 
1132
944
  def _process_stream_response(self, messages, temperature, start_time, formatted_tools=None, reasoning_steps=False):
1133
945
  """Process streaming response and return final response"""
1134
- try:
1135
- # Create the response stream
1136
- response_stream = self._openai_client.sync_client.chat.completions.create(
1137
- model=self.llm,
1138
- messages=messages,
1139
- temperature=temperature,
1140
- tools=formatted_tools if formatted_tools else None,
1141
- stream=True
1142
- )
1143
-
1144
- full_response_text = ""
1145
- reasoning_content = ""
1146
- chunks = []
1147
-
1148
- # Create Live display with proper configuration
1149
- with Live(
1150
- display_generating("", start_time),
1151
- console=self.console,
1152
- refresh_per_second=4,
1153
- transient=True,
1154
- vertical_overflow="ellipsis",
1155
- auto_refresh=True
1156
- ) as live:
1157
- for chunk in response_stream:
1158
- chunks.append(chunk)
1159
- if chunk.choices[0].delta.content:
1160
- full_response_text += chunk.choices[0].delta.content
1161
- live.update(display_generating(full_response_text, start_time))
1162
-
1163
- # Update live display with reasoning content if enabled
1164
- if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
1165
- rc = chunk.choices[0].delta.reasoning_content
1166
- if rc:
1167
- reasoning_content += rc
1168
- live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
1169
-
1170
- # Clear the last generating display with a blank line
1171
- self.console.print()
1172
- final_response = process_stream_chunks(chunks)
1173
- return final_response
1174
-
1175
- except Exception as e:
1176
- display_error(f"Error in stream processing: {e}")
1177
- return None
946
+ return self._openai_client.process_stream_response(
947
+ messages=messages,
948
+ model=self.llm,
949
+ temperature=temperature,
950
+ tools=formatted_tools,
951
+ start_time=start_time,
952
+ console=self.console,
953
+ display_fn=display_generating,
954
+ reasoning_steps=reasoning_steps
955
+ )
1178
956
 
1179
957
  def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
1180
958
  start_time = time.time()
@@ -1223,117 +1001,27 @@ Your Goal: {self.goal}
1223
1001
  reasoning_steps=reasoning_steps
1224
1002
  )
1225
1003
  else:
1226
- # Use the standard OpenAI client approach
1227
- # Continue tool execution loop until no more tool calls are needed
1228
- max_iterations = 10 # Prevent infinite loops
1229
- iteration_count = 0
1004
+ # Use the standard OpenAI client approach with tool support
1005
+ def custom_display_fn(text, start_time):
1006
+ if self.verbose:
1007
+ return display_generating(text, start_time)
1008
+ return ""
1230
1009
 
1231
- while iteration_count < max_iterations:
1232
- if stream:
1233
- # Process as streaming response with formatted tools
1234
- final_response = self._process_stream_response(
1235
- messages,
1236
- temperature,
1237
- start_time,
1238
- formatted_tools=formatted_tools if formatted_tools else None,
1239
- reasoning_steps=reasoning_steps
1240
- )
1241
- else:
1242
- # Process as regular non-streaming response
1243
- final_response = self._openai_client.sync_client.chat.completions.create(
1244
- model=self.llm,
1245
- messages=messages,
1246
- temperature=temperature,
1247
- tools=formatted_tools if formatted_tools else None,
1248
- stream=False
1249
- )
1250
-
1251
- tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
1252
-
1253
- if tool_calls:
1254
- # Convert ToolCall dataclass objects to dict for JSON serialization
1255
- serializable_tool_calls = []
1256
- for tc in tool_calls:
1257
- if isinstance(tc, ToolCall):
1258
- # Convert dataclass to dict
1259
- serializable_tool_calls.append({
1260
- "id": tc.id,
1261
- "type": tc.type,
1262
- "function": tc.function
1263
- })
1264
- else:
1265
- # Already an OpenAI object, keep as is
1266
- serializable_tool_calls.append(tc)
1267
-
1268
- messages.append({
1269
- "role": "assistant",
1270
- "content": final_response.choices[0].message.content,
1271
- "tool_calls": serializable_tool_calls
1272
- })
1273
-
1274
- for tool_call in tool_calls:
1275
- # Handle both ToolCall dataclass and OpenAI object
1276
- if isinstance(tool_call, ToolCall):
1277
- function_name = tool_call.function["name"]
1278
- arguments = json.loads(tool_call.function["arguments"])
1279
- else:
1280
- function_name = tool_call.function.name
1281
- arguments = json.loads(tool_call.function.arguments)
1282
-
1283
- if self.verbose:
1284
- display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
1285
-
1286
- tool_result = self.execute_tool(function_name, arguments)
1287
- results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
1288
-
1289
- if self.verbose:
1290
- display_tool_call(f"Function '{function_name}' returned: {results_str}")
1291
-
1292
- messages.append({
1293
- "role": "tool",
1294
- "tool_call_id": tool_call.id if hasattr(tool_call, 'id') else tool_call['id'],
1295
- "content": results_str
1296
- })
1297
-
1298
- # Check if we should continue (for tools like sequential thinking)
1299
- should_continue = False
1300
- for tool_call in tool_calls:
1301
- # Handle both ToolCall dataclass and OpenAI object
1302
- if isinstance(tool_call, ToolCall):
1303
- function_name = tool_call.function["name"]
1304
- arguments = json.loads(tool_call.function["arguments"])
1305
- else:
1306
- function_name = tool_call.function.name
1307
- arguments = json.loads(tool_call.function.arguments)
1308
-
1309
- # For sequential thinking tool, check if nextThoughtNeeded is True
1310
- if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
1311
- should_continue = True
1312
- break
1313
-
1314
- if not should_continue:
1315
- # Get final response after tool calls
1316
- if stream:
1317
- final_response = self._process_stream_response(
1318
- messages,
1319
- temperature,
1320
- start_time,
1321
- formatted_tools=formatted_tools if formatted_tools else None,
1322
- reasoning_steps=reasoning_steps
1323
- )
1324
- else:
1325
- final_response = self._openai_client.sync_client.chat.completions.create(
1326
- model=self.llm,
1327
- messages=messages,
1328
- temperature=temperature,
1329
- stream=False
1330
- )
1331
- break
1332
-
1333
- iteration_count += 1
1334
- else:
1335
- # No tool calls, we're done
1336
- break
1010
+ # Note: openai_client expects tools in various formats and will format them internally
1011
+ # But since we already have formatted_tools, we can pass them directly
1012
+ final_response = self._openai_client.chat_completion_with_tools(
1013
+ messages=messages,
1014
+ model=self.llm,
1015
+ temperature=temperature,
1016
+ tools=formatted_tools, # Already formatted for OpenAI
1017
+ execute_tool_fn=self.execute_tool,
1018
+ stream=stream,
1019
+ console=self.console if self.verbose else None,
1020
+ display_fn=display_generating if stream and self.verbose else None,
1021
+ reasoning_steps=reasoning_steps,
1022
+ verbose=self.verbose,
1023
+ max_iterations=10
1024
+ )
1337
1025
 
1338
1026
  return final_response
1339
1027
 
@@ -20,7 +20,18 @@ logging.basicConfig(level=logging.WARNING)
20
20
 
21
21
  # Import after suppressing warnings
22
22
  from .llm import LLM, LLMContextLengthExceededException
23
- from .openai_client import OpenAIClient, get_openai_client
23
+ from .openai_client import (
24
+ OpenAIClient,
25
+ get_openai_client,
26
+ ChatCompletionMessage,
27
+ Choice,
28
+ CompletionTokensDetails,
29
+ PromptTokensDetails,
30
+ CompletionUsage,
31
+ ChatCompletion,
32
+ ToolCall,
33
+ process_stream_chunks
34
+ )
24
35
 
25
36
  # Ensure telemetry is disabled after import as well
26
37
  try:
@@ -29,4 +40,17 @@ try:
29
40
  except ImportError:
30
41
  pass
31
42
 
32
- __all__ = ["LLM", "LLMContextLengthExceededException", "OpenAIClient", "get_openai_client"]
43
+ __all__ = [
44
+ "LLM",
45
+ "LLMContextLengthExceededException",
46
+ "OpenAIClient",
47
+ "get_openai_client",
48
+ "ChatCompletionMessage",
49
+ "Choice",
50
+ "CompletionTokensDetails",
51
+ "PromptTokensDetails",
52
+ "CompletionUsage",
53
+ "ChatCompletion",
54
+ "ToolCall",
55
+ "process_stream_chunks"
56
+ ]
@@ -364,6 +364,46 @@ class LLM:
364
364
 
365
365
  return messages, original_prompt
366
366
 
367
+ def _fix_array_schemas(self, schema: Dict) -> Dict:
368
+ """
369
+ Recursively fix array schemas by adding missing 'items' attribute.
370
+
371
+ This ensures compatibility with OpenAI's function calling format which
372
+ requires array types to specify the type of items they contain.
373
+
374
+ Args:
375
+ schema: The schema dictionary to fix
376
+
377
+ Returns:
378
+ dict: The fixed schema
379
+ """
380
+ if not isinstance(schema, dict):
381
+ return schema
382
+
383
+ # Create a copy to avoid modifying the original
384
+ fixed_schema = schema.copy()
385
+
386
+ # Fix array types at the current level
387
+ if fixed_schema.get("type") == "array" and "items" not in fixed_schema:
388
+ # Add a default items schema for arrays without it
389
+ fixed_schema["items"] = {"type": "string"}
390
+
391
+ # Recursively fix nested schemas in properties
392
+ if "properties" in fixed_schema and isinstance(fixed_schema["properties"], dict):
393
+ fixed_properties = {}
394
+ for prop_name, prop_schema in fixed_schema["properties"].items():
395
+ if isinstance(prop_schema, dict):
396
+ fixed_properties[prop_name] = self._fix_array_schemas(prop_schema)
397
+ else:
398
+ fixed_properties[prop_name] = prop_schema
399
+ fixed_schema["properties"] = fixed_properties
400
+
401
+ # Fix items schema if it exists
402
+ if "items" in fixed_schema and isinstance(fixed_schema["items"], dict):
403
+ fixed_schema["items"] = self._fix_array_schemas(fixed_schema["items"])
404
+
405
+ return fixed_schema
406
+
367
407
  def _format_tools_for_litellm(self, tools: Optional[List[Any]]) -> Optional[List[Dict]]:
368
408
  """Format tools for LiteLLM - handles all tool formats.
369
409
 
@@ -389,7 +429,11 @@ class LLM:
389
429
  # Validate nested dictionary structure before accessing
390
430
  if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
391
431
  logging.debug(f"Using pre-formatted OpenAI tool: {tool['function']['name']}")
392
- formatted_tools.append(tool)
432
+ # Fix array schemas in the tool parameters
433
+ fixed_tool = tool.copy()
434
+ if 'parameters' in fixed_tool['function']:
435
+ fixed_tool['function']['parameters'] = self._fix_array_schemas(fixed_tool['function']['parameters'])
436
+ formatted_tools.append(fixed_tool)
393
437
  else:
394
438
  logging.debug(f"Skipping malformed OpenAI tool: missing function or name")
395
439
  # Handle lists of tools (e.g. from MCP.to_openai_tool())
@@ -399,7 +443,11 @@ class LLM:
399
443
  # Validate nested dictionary structure before accessing
400
444
  if 'function' in subtool and isinstance(subtool['function'], dict) and 'name' in subtool['function']:
401
445
  logging.debug(f"Using pre-formatted OpenAI tool from list: {subtool['function']['name']}")
402
- formatted_tools.append(subtool)
446
+ # Fix array schemas in the tool parameters
447
+ fixed_tool = subtool.copy()
448
+ if 'parameters' in fixed_tool['function']:
449
+ fixed_tool['function']['parameters'] = self._fix_array_schemas(fixed_tool['function']['parameters'])
450
+ formatted_tools.append(fixed_tool)
403
451
  else:
404
452
  logging.debug(f"Skipping malformed OpenAI tool in list: missing function or name")
405
453
  elif callable(tool):
@@ -2153,7 +2201,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2153
2201
  "function": {
2154
2202
  "name": function_name,
2155
2203
  "description": docstring.split('\n\n')[0] if docstring else "No description available",
2156
- "parameters": parameters
2204
+ "parameters": self._fix_array_schemas(parameters)
2157
2205
  }
2158
2206
  }
2159
2207
  logging.debug(f"Generated tool definition: {tool_def}")