praisonaiagents 0.0.106__py3-none-any.whl → 0.0.108__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +8 -4
- praisonaiagents/llm/llm.py +118 -34
- praisonaiagents/mcp/mcp.py +39 -1
- praisonaiagents/mcp/mcp_sse.py +41 -1
- {praisonaiagents-0.0.106.dist-info → praisonaiagents-0.0.108.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.106.dist-info → praisonaiagents-0.0.108.dist-info}/RECORD +8 -8
- {praisonaiagents-0.0.106.dist-info → praisonaiagents-0.0.108.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.106.dist-info → praisonaiagents-0.0.108.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -364,6 +364,7 @@ class Agent:
|
|
364
364
|
knowledge_config: Optional[Dict[str, Any]] = None,
|
365
365
|
use_system_prompt: Optional[bool] = True,
|
366
366
|
markdown: bool = True,
|
367
|
+
stream: bool = True,
|
367
368
|
self_reflect: bool = False,
|
368
369
|
max_reflect: int = 3,
|
369
370
|
min_reflect: int = 1,
|
@@ -435,6 +436,8 @@ class Agent:
|
|
435
436
|
conversations to establish agent behavior and context. Defaults to True.
|
436
437
|
markdown (bool, optional): Enable markdown formatting in agent responses for better
|
437
438
|
readability and structure. Defaults to True.
|
439
|
+
stream (bool, optional): Enable streaming responses from the language model. Set to False
|
440
|
+
for LLM providers that don't support streaming. Defaults to True.
|
438
441
|
self_reflect (bool, optional): Enable self-reflection capabilities where the agent
|
439
442
|
evaluates and improves its own responses. Defaults to False.
|
440
443
|
max_reflect (int, optional): Maximum number of self-reflection iterations to prevent
|
@@ -554,6 +557,7 @@ class Agent:
|
|
554
557
|
self.use_system_prompt = use_system_prompt
|
555
558
|
self.chat_history = []
|
556
559
|
self.markdown = markdown
|
560
|
+
self.stream = stream
|
557
561
|
self.max_reflect = max_reflect
|
558
562
|
self.min_reflect = min_reflect
|
559
563
|
self.reflect_prompt = reflect_prompt
|
@@ -1002,7 +1006,7 @@ Your Goal: {self.goal}
|
|
1002
1006
|
tools=formatted_tools if formatted_tools else None,
|
1003
1007
|
verbose=self.verbose,
|
1004
1008
|
markdown=self.markdown,
|
1005
|
-
stream=
|
1009
|
+
stream=stream,
|
1006
1010
|
console=self.console,
|
1007
1011
|
execute_tool_fn=self.execute_tool,
|
1008
1012
|
agent_name=self.name,
|
@@ -1018,7 +1022,7 @@ Your Goal: {self.goal}
|
|
1018
1022
|
tools=formatted_tools if formatted_tools else None,
|
1019
1023
|
verbose=self.verbose,
|
1020
1024
|
markdown=self.markdown,
|
1021
|
-
stream=
|
1025
|
+
stream=stream,
|
1022
1026
|
console=self.console,
|
1023
1027
|
execute_tool_fn=self.execute_tool,
|
1024
1028
|
agent_name=self.name,
|
@@ -1276,7 +1280,7 @@ Your Goal: {self.goal}
|
|
1276
1280
|
agent_tools=agent_tools
|
1277
1281
|
)
|
1278
1282
|
|
1279
|
-
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=stream)
|
1283
|
+
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream)
|
1280
1284
|
if not response:
|
1281
1285
|
return None
|
1282
1286
|
|
@@ -1371,7 +1375,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1371
1375
|
|
1372
1376
|
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
|
1373
1377
|
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
|
1374
|
-
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=stream)
|
1378
|
+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=self.stream)
|
1375
1379
|
response_text = response.choices[0].message.content.strip()
|
1376
1380
|
reflection_count += 1
|
1377
1381
|
continue # Continue the loop for more reflections
|
praisonaiagents/llm/llm.py
CHANGED
@@ -296,6 +296,7 @@ class LLM:
|
|
296
296
|
agent_role: Optional[str] = None,
|
297
297
|
agent_tools: Optional[List[str]] = None,
|
298
298
|
execute_tool_fn: Optional[Callable] = None,
|
299
|
+
stream: bool = True,
|
299
300
|
**kwargs
|
300
301
|
) -> str:
|
301
302
|
"""Enhanced get_response with all OpenAI-like features"""
|
@@ -487,7 +488,7 @@ class LLM:
|
|
487
488
|
messages=messages,
|
488
489
|
tools=formatted_tools,
|
489
490
|
temperature=temperature,
|
490
|
-
stream=
|
491
|
+
stream=stream,
|
491
492
|
**kwargs
|
492
493
|
)
|
493
494
|
):
|
@@ -503,7 +504,7 @@ class LLM:
|
|
503
504
|
messages=messages,
|
504
505
|
tools=formatted_tools,
|
505
506
|
temperature=temperature,
|
506
|
-
stream=
|
507
|
+
stream=stream,
|
507
508
|
**kwargs
|
508
509
|
)
|
509
510
|
):
|
@@ -549,6 +550,7 @@ class LLM:
|
|
549
550
|
})
|
550
551
|
|
551
552
|
should_continue = False
|
553
|
+
tool_results = [] # Store all tool results
|
552
554
|
for tool_call in tool_calls:
|
553
555
|
# Handle both object and dict access patterns
|
554
556
|
if isinstance(tool_call, dict):
|
@@ -569,6 +571,7 @@ class LLM:
|
|
569
571
|
logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
|
570
572
|
tool_result = execute_tool_fn(function_name, arguments)
|
571
573
|
logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
|
574
|
+
tool_results.append(tool_result) # Store the result
|
572
575
|
|
573
576
|
if verbose:
|
574
577
|
display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
|
@@ -601,7 +604,8 @@ class LLM:
|
|
601
604
|
# If we reach here, no more tool calls needed - get final response
|
602
605
|
# Make one more call to get the final summary response
|
603
606
|
# Special handling for Ollama models that don't automatically process tool results
|
604
|
-
|
607
|
+
ollama_handled = False
|
608
|
+
if self.model and self.model.startswith("ollama/") and tool_results:
|
605
609
|
# For Ollama models, we need to explicitly ask the model to process the tool results
|
606
610
|
# First, check if the response is just a JSON tool call
|
607
611
|
try:
|
@@ -614,13 +618,30 @@ class LLM:
|
|
614
618
|
# Create a prompt that asks the model to process the tool results based on original context
|
615
619
|
# Extract the original user query from messages
|
616
620
|
original_query = ""
|
617
|
-
for msg in messages:
|
621
|
+
for msg in reversed(messages): # Look from the end to find the most recent user message
|
618
622
|
if msg.get("role") == "user":
|
619
|
-
|
620
|
-
|
623
|
+
content = msg.get("content", "")
|
624
|
+
# Handle list content (multimodal)
|
625
|
+
if isinstance(content, list):
|
626
|
+
for item in content:
|
627
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
628
|
+
original_query = item.get("text", "")
|
629
|
+
break
|
630
|
+
else:
|
631
|
+
original_query = content
|
632
|
+
if original_query:
|
633
|
+
break
|
634
|
+
|
635
|
+
# Create a shorter follow-up prompt with all tool results
|
636
|
+
# If there's only one result, use it directly; otherwise combine them
|
637
|
+
if len(tool_results) == 1:
|
638
|
+
results_text = json.dumps(tool_results[0], indent=2)
|
639
|
+
else:
|
640
|
+
results_text = json.dumps(tool_results, indent=2)
|
621
641
|
|
622
|
-
|
623
|
-
|
642
|
+
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
|
643
|
+
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
|
644
|
+
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
|
624
645
|
|
625
646
|
# Make a follow-up call to process the results
|
626
647
|
follow_up_messages = [
|
@@ -635,7 +656,7 @@ class LLM:
|
|
635
656
|
**self._build_completion_params(
|
636
657
|
messages=follow_up_messages,
|
637
658
|
temperature=temperature,
|
638
|
-
stream=
|
659
|
+
stream=stream
|
639
660
|
)
|
640
661
|
):
|
641
662
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
@@ -648,17 +669,38 @@ class LLM:
|
|
648
669
|
**self._build_completion_params(
|
649
670
|
messages=follow_up_messages,
|
650
671
|
temperature=temperature,
|
651
|
-
stream=
|
672
|
+
stream=stream
|
652
673
|
)
|
653
674
|
):
|
654
675
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
655
676
|
response_text += chunk.choices[0].delta.content
|
677
|
+
|
678
|
+
# Set flag to indicate Ollama was handled
|
679
|
+
ollama_handled = True
|
680
|
+
final_response_text = response_text.strip()
|
681
|
+
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
682
|
+
|
683
|
+
# Display the response if we got one
|
684
|
+
if final_response_text and verbose:
|
685
|
+
display_interaction(
|
686
|
+
original_prompt,
|
687
|
+
final_response_text,
|
688
|
+
markdown=markdown,
|
689
|
+
generation_time=time.time() - start_time,
|
690
|
+
console=console
|
691
|
+
)
|
692
|
+
|
693
|
+
# Return the final response after processing Ollama's follow-up
|
694
|
+
if final_response_text:
|
695
|
+
return final_response_text
|
696
|
+
else:
|
697
|
+
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
656
698
|
except (json.JSONDecodeError, KeyError):
|
657
699
|
# Not a JSON response or not a tool call format, continue normally
|
658
700
|
pass
|
659
701
|
|
660
|
-
# If reasoning_steps is True, do a single non-streaming call
|
661
|
-
|
702
|
+
# If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
|
703
|
+
if reasoning_steps and not ollama_handled:
|
662
704
|
resp = litellm.completion(
|
663
705
|
**self._build_completion_params(
|
664
706
|
messages=messages,
|
@@ -688,8 +730,8 @@ class LLM:
|
|
688
730
|
console=console
|
689
731
|
)
|
690
732
|
|
691
|
-
# Otherwise do the existing streaming approach
|
692
|
-
|
733
|
+
# Otherwise do the existing streaming approach if not already handled
|
734
|
+
elif not ollama_handled:
|
693
735
|
# Get response after tool calls with streaming
|
694
736
|
if verbose:
|
695
737
|
with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
|
@@ -714,7 +756,7 @@ class LLM:
|
|
714
756
|
messages=messages,
|
715
757
|
tools=formatted_tools,
|
716
758
|
temperature=temperature,
|
717
|
-
stream=
|
759
|
+
stream=stream,
|
718
760
|
**kwargs
|
719
761
|
)
|
720
762
|
):
|
@@ -832,7 +874,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
832
874
|
**self._build_completion_params(
|
833
875
|
messages=reflection_messages,
|
834
876
|
temperature=temperature,
|
835
|
-
stream=
|
877
|
+
stream=stream,
|
836
878
|
response_format={"type": "json_object"},
|
837
879
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
838
880
|
)
|
@@ -847,7 +889,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
847
889
|
**self._build_completion_params(
|
848
890
|
messages=reflection_messages,
|
849
891
|
temperature=temperature,
|
850
|
-
stream=
|
892
|
+
stream=stream,
|
851
893
|
response_format={"type": "json_object"},
|
852
894
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
853
895
|
)
|
@@ -963,6 +1005,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
963
1005
|
agent_role: Optional[str] = None,
|
964
1006
|
agent_tools: Optional[List[str]] = None,
|
965
1007
|
execute_tool_fn: Optional[Callable] = None,
|
1008
|
+
stream: bool = True,
|
966
1009
|
**kwargs
|
967
1010
|
) -> str:
|
968
1011
|
"""Async version of get_response with identical functionality."""
|
@@ -1163,7 +1206,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1163
1206
|
**self._build_completion_params(
|
1164
1207
|
messages=messages,
|
1165
1208
|
temperature=temperature,
|
1166
|
-
stream=
|
1209
|
+
stream=stream,
|
1167
1210
|
**kwargs
|
1168
1211
|
)
|
1169
1212
|
):
|
@@ -1177,7 +1220,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1177
1220
|
**self._build_completion_params(
|
1178
1221
|
messages=messages,
|
1179
1222
|
temperature=temperature,
|
1180
|
-
stream=
|
1223
|
+
stream=stream,
|
1181
1224
|
**kwargs
|
1182
1225
|
)
|
1183
1226
|
):
|
@@ -1225,6 +1268,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1225
1268
|
"tool_calls": serializable_tool_calls
|
1226
1269
|
})
|
1227
1270
|
|
1271
|
+
tool_results = [] # Store all tool results
|
1228
1272
|
for tool_call in tool_calls:
|
1229
1273
|
# Handle both object and dict access patterns
|
1230
1274
|
if isinstance(tool_call, dict):
|
@@ -1243,6 +1287,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1243
1287
|
tool_call_id = f"tool_{id(tool_call)}"
|
1244
1288
|
|
1245
1289
|
tool_result = await execute_tool_fn(function_name, arguments)
|
1290
|
+
tool_results.append(tool_result) # Store the result
|
1246
1291
|
|
1247
1292
|
if verbose:
|
1248
1293
|
display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
|
@@ -1261,7 +1306,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1261
1306
|
response_text = ""
|
1262
1307
|
|
1263
1308
|
# Special handling for Ollama models that don't automatically process tool results
|
1264
|
-
|
1309
|
+
ollama_handled = False
|
1310
|
+
if self._is_ollama_provider() and tool_results:
|
1265
1311
|
# For Ollama models, we need to explicitly ask the model to process the tool results
|
1266
1312
|
# First, check if the response is just a JSON tool call
|
1267
1313
|
try:
|
@@ -1274,13 +1320,30 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1274
1320
|
# Create a prompt that asks the model to process the tool results based on original context
|
1275
1321
|
# Extract the original user query from messages
|
1276
1322
|
original_query = ""
|
1277
|
-
for msg in messages:
|
1323
|
+
for msg in reversed(messages): # Look from the end to find the most recent user message
|
1278
1324
|
if msg.get("role") == "user":
|
1279
|
-
|
1280
|
-
|
1325
|
+
content = msg.get("content", "")
|
1326
|
+
# Handle list content (multimodal)
|
1327
|
+
if isinstance(content, list):
|
1328
|
+
for item in content:
|
1329
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
1330
|
+
original_query = item.get("text", "")
|
1331
|
+
break
|
1332
|
+
else:
|
1333
|
+
original_query = content
|
1334
|
+
if original_query:
|
1335
|
+
break
|
1336
|
+
|
1337
|
+
# Create a shorter follow-up prompt with all tool results
|
1338
|
+
# If there's only one result, use it directly; otherwise combine them
|
1339
|
+
if len(tool_results) == 1:
|
1340
|
+
results_text = json.dumps(tool_results[0], indent=2)
|
1341
|
+
else:
|
1342
|
+
results_text = json.dumps(tool_results, indent=2)
|
1281
1343
|
|
1282
|
-
|
1283
|
-
|
1344
|
+
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
|
1345
|
+
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
|
1346
|
+
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
|
1284
1347
|
|
1285
1348
|
# Make a follow-up call to process the results
|
1286
1349
|
follow_up_messages = [
|
@@ -1294,7 +1357,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1294
1357
|
**self._build_completion_params(
|
1295
1358
|
messages=follow_up_messages,
|
1296
1359
|
temperature=temperature,
|
1297
|
-
stream=
|
1360
|
+
stream=stream
|
1298
1361
|
)
|
1299
1362
|
):
|
1300
1363
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
@@ -1308,17 +1371,38 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1308
1371
|
**self._build_completion_params(
|
1309
1372
|
messages=follow_up_messages,
|
1310
1373
|
temperature=temperature,
|
1311
|
-
stream=
|
1374
|
+
stream=stream
|
1312
1375
|
)
|
1313
1376
|
):
|
1314
1377
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1315
1378
|
response_text += chunk.choices[0].delta.content
|
1379
|
+
|
1380
|
+
# Set flag to indicate Ollama was handled
|
1381
|
+
ollama_handled = True
|
1382
|
+
final_response_text = response_text.strip()
|
1383
|
+
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
1384
|
+
|
1385
|
+
# Display the response if we got one
|
1386
|
+
if final_response_text and verbose:
|
1387
|
+
display_interaction(
|
1388
|
+
original_prompt,
|
1389
|
+
final_response_text,
|
1390
|
+
markdown=markdown,
|
1391
|
+
generation_time=time.time() - start_time,
|
1392
|
+
console=console
|
1393
|
+
)
|
1394
|
+
|
1395
|
+
# Return the final response after processing Ollama's follow-up
|
1396
|
+
if final_response_text:
|
1397
|
+
return final_response_text
|
1398
|
+
else:
|
1399
|
+
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
1316
1400
|
except (json.JSONDecodeError, KeyError):
|
1317
1401
|
# Not a JSON response or not a tool call format, continue normally
|
1318
1402
|
pass
|
1319
1403
|
|
1320
1404
|
# If no special handling was needed or if it's not an Ollama model
|
1321
|
-
|
1405
|
+
if reasoning_steps and not ollama_handled:
|
1322
1406
|
# Non-streaming call to capture reasoning
|
1323
1407
|
resp = await litellm.acompletion(
|
1324
1408
|
**self._build_completion_params(
|
@@ -1348,14 +1432,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1348
1432
|
generation_time=time.time() - start_time,
|
1349
1433
|
console=console
|
1350
1434
|
)
|
1351
|
-
|
1352
|
-
# Get response after tool calls with streaming
|
1435
|
+
elif not ollama_handled:
|
1436
|
+
# Get response after tool calls with streaming if not already handled
|
1353
1437
|
if verbose:
|
1354
1438
|
async for chunk in await litellm.acompletion(
|
1355
1439
|
**self._build_completion_params(
|
1356
1440
|
messages=messages,
|
1357
1441
|
temperature=temperature,
|
1358
|
-
stream=
|
1442
|
+
stream=stream,
|
1359
1443
|
tools=formatted_tools,
|
1360
1444
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1361
1445
|
)
|
@@ -1371,7 +1455,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1371
1455
|
**self._build_completion_params(
|
1372
1456
|
messages=messages,
|
1373
1457
|
temperature=temperature,
|
1374
|
-
stream=
|
1458
|
+
stream=stream,
|
1375
1459
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1376
1460
|
)
|
1377
1461
|
):
|
@@ -1452,7 +1536,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1452
1536
|
**self._build_completion_params(
|
1453
1537
|
messages=reflection_messages,
|
1454
1538
|
temperature=temperature,
|
1455
|
-
stream=
|
1539
|
+
stream=stream,
|
1456
1540
|
response_format={"type": "json_object"},
|
1457
1541
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1458
1542
|
)
|
@@ -1467,7 +1551,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1467
1551
|
**self._build_completion_params(
|
1468
1552
|
messages=reflection_messages,
|
1469
1553
|
temperature=temperature,
|
1470
|
-
stream=
|
1554
|
+
stream=stream,
|
1471
1555
|
response_format={"type": "json_object"},
|
1472
1556
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1473
1557
|
)
|
praisonaiagents/mcp/mcp.py
CHANGED
@@ -379,6 +379,43 @@ class MCP:
|
|
379
379
|
"""
|
380
380
|
return iter(self._tools)
|
381
381
|
|
382
|
+
def _fix_array_schemas(self, schema):
|
383
|
+
"""
|
384
|
+
Fix array schemas by adding missing 'items' attribute required by OpenAI.
|
385
|
+
|
386
|
+
This ensures compatibility with OpenAI's function calling format which
|
387
|
+
requires array types to specify the type of items they contain.
|
388
|
+
|
389
|
+
Args:
|
390
|
+
schema: The schema dictionary to fix
|
391
|
+
|
392
|
+
Returns:
|
393
|
+
dict: The fixed schema
|
394
|
+
"""
|
395
|
+
if not isinstance(schema, dict):
|
396
|
+
return schema
|
397
|
+
|
398
|
+
# Create a copy to avoid modifying the original
|
399
|
+
fixed_schema = schema.copy()
|
400
|
+
|
401
|
+
# Fix array types at the current level
|
402
|
+
if fixed_schema.get("type") == "array" and "items" not in fixed_schema:
|
403
|
+
# Add a default items schema for arrays without it
|
404
|
+
fixed_schema["items"] = {"type": "string"}
|
405
|
+
|
406
|
+
# Recursively fix nested schemas
|
407
|
+
if "properties" in fixed_schema:
|
408
|
+
fixed_properties = {}
|
409
|
+
for prop_name, prop_schema in fixed_schema["properties"].items():
|
410
|
+
fixed_properties[prop_name] = self._fix_array_schemas(prop_schema)
|
411
|
+
fixed_schema["properties"] = fixed_properties
|
412
|
+
|
413
|
+
# Fix items schema if it exists
|
414
|
+
if "items" in fixed_schema:
|
415
|
+
fixed_schema["items"] = self._fix_array_schemas(fixed_schema["items"])
|
416
|
+
|
417
|
+
return fixed_schema
|
418
|
+
|
382
419
|
def to_openai_tool(self):
|
383
420
|
"""Convert the MCP tool to an OpenAI-compatible tool definition.
|
384
421
|
|
@@ -404,7 +441,8 @@ class MCP:
|
|
404
441
|
# Create OpenAI tool definition
|
405
442
|
parameters = {}
|
406
443
|
if hasattr(tool, 'inputSchema') and tool.inputSchema:
|
407
|
-
|
444
|
+
# Fix array schemas to include 'items' attribute
|
445
|
+
parameters = self._fix_array_schemas(tool.inputSchema)
|
408
446
|
else:
|
409
447
|
# Create a minimal schema if none exists
|
410
448
|
parameters = {
|
praisonaiagents/mcp/mcp_sse.py
CHANGED
@@ -88,14 +88,54 @@ class SSEMCPTool:
|
|
88
88
|
logger.error(f"Error in _async_call for {self.name}: {e}")
|
89
89
|
raise
|
90
90
|
|
91
|
+
def _fix_array_schemas(self, schema):
|
92
|
+
"""
|
93
|
+
Fix array schemas by adding missing 'items' attribute required by OpenAI.
|
94
|
+
|
95
|
+
This ensures compatibility with OpenAI's function calling format which
|
96
|
+
requires array types to specify the type of items they contain.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
schema: The schema dictionary to fix
|
100
|
+
|
101
|
+
Returns:
|
102
|
+
dict: The fixed schema
|
103
|
+
"""
|
104
|
+
if not isinstance(schema, dict):
|
105
|
+
return schema
|
106
|
+
|
107
|
+
# Create a copy to avoid modifying the original
|
108
|
+
fixed_schema = schema.copy()
|
109
|
+
|
110
|
+
# Fix array types at the current level
|
111
|
+
if fixed_schema.get("type") == "array" and "items" not in fixed_schema:
|
112
|
+
# Add a default items schema for arrays without it
|
113
|
+
fixed_schema["items"] = {"type": "string"}
|
114
|
+
|
115
|
+
# Recursively fix nested schemas
|
116
|
+
if "properties" in fixed_schema:
|
117
|
+
fixed_properties = {}
|
118
|
+
for prop_name, prop_schema in fixed_schema["properties"].items():
|
119
|
+
fixed_properties[prop_name] = self._fix_array_schemas(prop_schema)
|
120
|
+
fixed_schema["properties"] = fixed_properties
|
121
|
+
|
122
|
+
# Fix items schema if it exists
|
123
|
+
if "items" in fixed_schema:
|
124
|
+
fixed_schema["items"] = self._fix_array_schemas(fixed_schema["items"])
|
125
|
+
|
126
|
+
return fixed_schema
|
127
|
+
|
91
128
|
def to_openai_tool(self):
|
92
129
|
"""Convert the tool to OpenAI format."""
|
130
|
+
# Fix array schemas to include 'items' attribute
|
131
|
+
fixed_schema = self._fix_array_schemas(self.input_schema)
|
132
|
+
|
93
133
|
return {
|
94
134
|
"type": "function",
|
95
135
|
"function": {
|
96
136
|
"name": self.name,
|
97
137
|
"description": self.description,
|
98
|
-
"parameters":
|
138
|
+
"parameters": fixed_schema
|
99
139
|
}
|
100
140
|
}
|
101
141
|
|
@@ -3,7 +3,7 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
|
|
3
3
|
praisonaiagents/main.py,sha256=_-XE7_Y7ChvtLQMivfNFrrnAhv4wSSDhH9WJMWlkS0w,16315
|
4
4
|
praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=iTt9pQigjexlQA-65xfeBHsxdRzQq3yvVxZW65Usz_c,110112
|
7
7
|
praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
|
8
8
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
9
9
|
praisonaiagents/agents/agents.py,sha256=C_yDdJB4XUuwKA9DrysAtAj3zSYT0IKtfCT4Pxo0oyI,63309
|
@@ -15,10 +15,10 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
|
|
15
15
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
16
16
|
praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
|
17
17
|
praisonaiagents/llm/__init__.py,sha256=bSywIHBHH0YUf4hSx-FmFXkRv2g1Rlhuk-gjoImE8j8,925
|
18
|
-
praisonaiagents/llm/llm.py,sha256=
|
18
|
+
praisonaiagents/llm/llm.py,sha256=NfsJNSScR_kS2sLeU1Ah41IXYN804cOQEMuxpt59zuM,104505
|
19
19
|
praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
|
20
|
-
praisonaiagents/mcp/mcp.py,sha256=
|
21
|
-
praisonaiagents/mcp/mcp_sse.py,sha256=
|
20
|
+
praisonaiagents/mcp/mcp.py,sha256=qr9xbTfM3V6ZQgs3o9mGv6pDiJPnfbI24nUK_vUnGOI,18771
|
21
|
+
praisonaiagents/mcp/mcp_sse.py,sha256=z8TMFhW9xuLQ7QnpOa3n1-nSHt0-Bf27qso0u4qxYSY,8357
|
22
22
|
praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
|
23
23
|
praisonaiagents/memory/memory.py,sha256=eYXVvuXrvt4LaEJ-AAbAiwpFUCuS5LH5F7Z0cBW5_gQ,42186
|
24
24
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
@@ -51,7 +51,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
51
51
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
52
52
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
53
53
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
54
|
-
praisonaiagents-0.0.
|
55
|
-
praisonaiagents-0.0.
|
56
|
-
praisonaiagents-0.0.
|
57
|
-
praisonaiagents-0.0.
|
54
|
+
praisonaiagents-0.0.108.dist-info/METADATA,sha256=kDa4kCNt9b_sAFX9JECSpWfxXhlT7YW0hn8kjezwrKs,1669
|
55
|
+
praisonaiagents-0.0.108.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
56
|
+
praisonaiagents-0.0.108.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
57
|
+
praisonaiagents-0.0.108.dist-info/RECORD,,
|
File without changes
|
File without changes
|