praisonaiagents 0.0.83__py3-none-any.whl → 0.0.85__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/llm/llm.py +232 -161
- praisonaiagents/mcp/mcp.py +8 -7
- praisonaiagents/mcp/mcp_sse.py +9 -5
- {praisonaiagents-0.0.83.dist-info → praisonaiagents-0.0.85.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.83.dist-info → praisonaiagents-0.0.85.dist-info}/RECORD +7 -7
- {praisonaiagents-0.0.83.dist-info → praisonaiagents-0.0.85.dist-info}/WHEEL +1 -1
- {praisonaiagents-0.0.83.dist-info → praisonaiagents-0.0.85.dist-info}/top_level.txt +0 -0
praisonaiagents/llm/llm.py
CHANGED
@@ -367,12 +367,13 @@ class LLM:
|
|
367
367
|
# If reasoning_steps is True, do a single non-streaming call
|
368
368
|
if reasoning_steps:
|
369
369
|
resp = litellm.completion(
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
370
|
+
**self._build_completion_params(
|
371
|
+
messages=messages,
|
372
|
+
temperature=temperature,
|
373
|
+
stream=False, # force non-streaming
|
374
|
+
tools=formatted_tools,
|
375
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
376
|
+
)
|
376
377
|
)
|
377
378
|
reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
378
379
|
response_text = resp["choices"][0]["message"]["content"]
|
@@ -401,12 +402,13 @@ class LLM:
|
|
401
402
|
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
402
403
|
response_text = ""
|
403
404
|
for chunk in litellm.completion(
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
405
|
+
**self._build_completion_params(
|
406
|
+
messages=messages,
|
407
|
+
tools=formatted_tools,
|
408
|
+
temperature=temperature,
|
409
|
+
stream=True,
|
410
|
+
**kwargs
|
411
|
+
)
|
410
412
|
):
|
411
413
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
412
414
|
content = chunk.choices[0].delta.content
|
@@ -416,12 +418,13 @@ class LLM:
|
|
416
418
|
# Non-verbose mode, just collect the response
|
417
419
|
response_text = ""
|
418
420
|
for chunk in litellm.completion(
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
421
|
+
**self._build_completion_params(
|
422
|
+
messages=messages,
|
423
|
+
tools=formatted_tools,
|
424
|
+
temperature=temperature,
|
425
|
+
stream=True,
|
426
|
+
**kwargs
|
427
|
+
)
|
425
428
|
):
|
426
429
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
427
430
|
response_text += chunk.choices[0].delta.content
|
@@ -430,12 +433,13 @@ class LLM:
|
|
430
433
|
|
431
434
|
# Get final completion to check for tool calls
|
432
435
|
final_response = litellm.completion(
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
436
|
+
**self._build_completion_params(
|
437
|
+
messages=messages,
|
438
|
+
tools=formatted_tools,
|
439
|
+
temperature=temperature,
|
440
|
+
stream=False, # No streaming for tool call check
|
441
|
+
**kwargs
|
442
|
+
)
|
439
443
|
)
|
440
444
|
|
441
445
|
tool_calls = final_response["choices"][0]["message"].get("tool_calls")
|
@@ -547,10 +551,11 @@ class LLM:
|
|
547
551
|
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
548
552
|
response_text = ""
|
549
553
|
for chunk in litellm.completion(
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
+
**self._build_completion_params(
|
555
|
+
messages=follow_up_messages,
|
556
|
+
temperature=temperature,
|
557
|
+
stream=True
|
558
|
+
)
|
554
559
|
):
|
555
560
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
556
561
|
content = chunk.choices[0].delta.content
|
@@ -559,10 +564,11 @@ class LLM:
|
|
559
564
|
else:
|
560
565
|
response_text = ""
|
561
566
|
for chunk in litellm.completion(
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
567
|
+
**self._build_completion_params(
|
568
|
+
messages=follow_up_messages,
|
569
|
+
temperature=temperature,
|
570
|
+
stream=True
|
571
|
+
)
|
566
572
|
):
|
567
573
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
568
574
|
response_text += chunk.choices[0].delta.content
|
@@ -573,11 +579,12 @@ class LLM:
|
|
573
579
|
# If reasoning_steps is True, do a single non-streaming call
|
574
580
|
elif reasoning_steps:
|
575
581
|
resp = litellm.completion(
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
582
|
+
**self._build_completion_params(
|
583
|
+
messages=messages,
|
584
|
+
temperature=temperature,
|
585
|
+
stream=False, # force non-streaming
|
586
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
587
|
+
)
|
581
588
|
)
|
582
589
|
reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
583
590
|
response_text = resp["choices"][0]["message"]["content"]
|
@@ -607,10 +614,11 @@ class LLM:
|
|
607
614
|
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
608
615
|
response_text = ""
|
609
616
|
for chunk in litellm.completion(
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
617
|
+
**self._build_completion_params(
|
618
|
+
messages=messages,
|
619
|
+
temperature=temperature,
|
620
|
+
stream=True
|
621
|
+
)
|
614
622
|
):
|
615
623
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
616
624
|
content = chunk.choices[0].delta.content
|
@@ -619,10 +627,11 @@ class LLM:
|
|
619
627
|
else:
|
620
628
|
response_text = ""
|
621
629
|
for chunk in litellm.completion(
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
630
|
+
**self._build_completion_params(
|
631
|
+
messages=messages,
|
632
|
+
temperature=temperature,
|
633
|
+
stream=True
|
634
|
+
)
|
626
635
|
):
|
627
636
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
628
637
|
response_text += chunk.choices[0].delta.content
|
@@ -663,12 +672,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
663
672
|
# If reasoning_steps is True, do a single non-streaming call to capture reasoning
|
664
673
|
if reasoning_steps:
|
665
674
|
reflection_resp = litellm.completion(
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
|
675
|
+
**self._build_completion_params(
|
676
|
+
messages=reflection_messages,
|
677
|
+
temperature=temperature,
|
678
|
+
stream=False, # Force non-streaming
|
679
|
+
response_format={"type": "json_object"},
|
680
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
681
|
+
)
|
672
682
|
)
|
673
683
|
# Grab reflection text and optional reasoning
|
674
684
|
reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
@@ -697,12 +707,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
697
707
|
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
698
708
|
reflection_text = ""
|
699
709
|
for chunk in litellm.completion(
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
710
|
+
**self._build_completion_params(
|
711
|
+
messages=reflection_messages,
|
712
|
+
temperature=temperature,
|
713
|
+
stream=True,
|
714
|
+
response_format={"type": "json_object"},
|
715
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
716
|
+
)
|
706
717
|
):
|
707
718
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
708
719
|
content = chunk.choices[0].delta.content
|
@@ -711,12 +722,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
711
722
|
else:
|
712
723
|
reflection_text = ""
|
713
724
|
for chunk in litellm.completion(
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
725
|
+
**self._build_completion_params(
|
726
|
+
messages=reflection_messages,
|
727
|
+
temperature=temperature,
|
728
|
+
stream=True,
|
729
|
+
response_format={"type": "json_object"},
|
730
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
731
|
+
)
|
720
732
|
):
|
721
733
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
722
734
|
reflection_text += chunk.choices[0].delta.content
|
@@ -953,11 +965,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
953
965
|
if reasoning_steps:
|
954
966
|
# Non-streaming call to capture reasoning
|
955
967
|
resp = await litellm.acompletion(
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
|
960
|
-
|
968
|
+
**self._build_completion_params(
|
969
|
+
messages=messages,
|
970
|
+
temperature=temperature,
|
971
|
+
stream=False, # force non-streaming
|
972
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
973
|
+
)
|
961
974
|
)
|
962
975
|
reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
963
976
|
response_text = resp["choices"][0]["message"]["content"]
|
@@ -984,11 +997,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
984
997
|
# 1) Make the streaming call WITHOUT tools
|
985
998
|
# ----------------------------------------------------
|
986
999
|
async for chunk in await litellm.acompletion(
|
987
|
-
|
988
|
-
|
989
|
-
|
990
|
-
|
991
|
-
|
1000
|
+
**self._build_completion_params(
|
1001
|
+
messages=messages,
|
1002
|
+
temperature=temperature,
|
1003
|
+
stream=True,
|
1004
|
+
**kwargs
|
1005
|
+
)
|
992
1006
|
):
|
993
1007
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
994
1008
|
response_text += chunk.choices[0].delta.content
|
@@ -997,11 +1011,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
997
1011
|
else:
|
998
1012
|
# Non-verbose streaming call, still no tools
|
999
1013
|
async for chunk in await litellm.acompletion(
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
|
1004
|
-
|
1014
|
+
**self._build_completion_params(
|
1015
|
+
messages=messages,
|
1016
|
+
temperature=temperature,
|
1017
|
+
stream=True,
|
1018
|
+
**kwargs
|
1019
|
+
)
|
1005
1020
|
):
|
1006
1021
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1007
1022
|
response_text += chunk.choices[0].delta.content
|
@@ -1014,12 +1029,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1014
1029
|
if tools and execute_tool_fn:
|
1015
1030
|
# Next call with tools if needed
|
1016
1031
|
tool_response = await litellm.acompletion(
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1020
|
-
|
1021
|
-
|
1022
|
-
|
1032
|
+
**self._build_completion_params(
|
1033
|
+
messages=messages,
|
1034
|
+
temperature=temperature,
|
1035
|
+
stream=False,
|
1036
|
+
tools=formatted_tools, # We safely pass tools here
|
1037
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1038
|
+
)
|
1023
1039
|
)
|
1024
1040
|
# handle tool_calls from tool_response as usual...
|
1025
1041
|
tool_calls = tool_response.choices[0].message.get("tool_calls")
|
@@ -1125,10 +1141,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1125
1141
|
if verbose:
|
1126
1142
|
response_text = ""
|
1127
1143
|
async for chunk in await litellm.acompletion(
|
1128
|
-
|
1129
|
-
|
1130
|
-
|
1131
|
-
|
1144
|
+
**self._build_completion_params(
|
1145
|
+
messages=follow_up_messages,
|
1146
|
+
temperature=temperature,
|
1147
|
+
stream=True
|
1148
|
+
)
|
1132
1149
|
):
|
1133
1150
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1134
1151
|
content = chunk.choices[0].delta.content
|
@@ -1138,10 +1155,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1138
1155
|
else:
|
1139
1156
|
response_text = ""
|
1140
1157
|
async for chunk in await litellm.acompletion(
|
1141
|
-
|
1142
|
-
|
1143
|
-
|
1144
|
-
|
1158
|
+
**self._build_completion_params(
|
1159
|
+
messages=follow_up_messages,
|
1160
|
+
temperature=temperature,
|
1161
|
+
stream=True
|
1162
|
+
)
|
1145
1163
|
):
|
1146
1164
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1147
1165
|
response_text += chunk.choices[0].delta.content
|
@@ -1153,12 +1171,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1153
1171
|
elif reasoning_steps:
|
1154
1172
|
# Non-streaming call to capture reasoning
|
1155
1173
|
resp = await litellm.acompletion(
|
1156
|
-
|
1157
|
-
|
1158
|
-
|
1159
|
-
|
1160
|
-
|
1161
|
-
|
1174
|
+
**self._build_completion_params(
|
1175
|
+
messages=messages,
|
1176
|
+
temperature=temperature,
|
1177
|
+
stream=False, # force non-streaming
|
1178
|
+
tools=formatted_tools, # Include tools
|
1179
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1180
|
+
)
|
1162
1181
|
)
|
1163
1182
|
reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
1164
1183
|
response_text = resp["choices"][0]["message"]["content"]
|
@@ -1183,12 +1202,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1183
1202
|
# Get response after tool calls with streaming
|
1184
1203
|
if verbose:
|
1185
1204
|
async for chunk in await litellm.acompletion(
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1190
|
-
|
1191
|
-
|
1205
|
+
**self._build_completion_params(
|
1206
|
+
messages=messages,
|
1207
|
+
temperature=temperature,
|
1208
|
+
stream=True,
|
1209
|
+
tools=formatted_tools,
|
1210
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1211
|
+
)
|
1192
1212
|
):
|
1193
1213
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1194
1214
|
content = chunk.choices[0].delta.content
|
@@ -1197,12 +1217,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1197
1217
|
print(f"Reflecting... {time.time() - start_time:.1f}s", end="\r")
|
1198
1218
|
else:
|
1199
1219
|
response_text = ""
|
1200
|
-
for chunk in litellm.
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1220
|
+
async for chunk in await litellm.acompletion(
|
1221
|
+
**self._build_completion_params(
|
1222
|
+
messages=messages,
|
1223
|
+
temperature=temperature,
|
1224
|
+
stream=True,
|
1225
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1226
|
+
)
|
1206
1227
|
):
|
1207
1228
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1208
1229
|
response_text += chunk.choices[0].delta.content
|
@@ -1242,13 +1263,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1242
1263
|
|
1243
1264
|
# If reasoning_steps is True, do a single non-streaming call to capture reasoning
|
1244
1265
|
if reasoning_steps:
|
1245
|
-
reflection_resp = litellm.
|
1246
|
-
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1251
|
-
|
1266
|
+
reflection_resp = await litellm.acompletion(
|
1267
|
+
**self._build_completion_params(
|
1268
|
+
messages=reflection_messages,
|
1269
|
+
temperature=temperature,
|
1270
|
+
stream=False, # Force non-streaming
|
1271
|
+
response_format={"type": "json_object"},
|
1272
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1273
|
+
)
|
1252
1274
|
)
|
1253
1275
|
# Grab reflection text and optional reasoning
|
1254
1276
|
reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
@@ -1276,13 +1298,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1276
1298
|
if verbose:
|
1277
1299
|
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
1278
1300
|
reflection_text = ""
|
1279
|
-
for chunk in litellm.
|
1280
|
-
|
1281
|
-
|
1282
|
-
|
1283
|
-
|
1284
|
-
|
1285
|
-
|
1301
|
+
async for chunk in await litellm.acompletion(
|
1302
|
+
**self._build_completion_params(
|
1303
|
+
messages=reflection_messages,
|
1304
|
+
temperature=temperature,
|
1305
|
+
stream=True,
|
1306
|
+
response_format={"type": "json_object"},
|
1307
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1308
|
+
)
|
1286
1309
|
):
|
1287
1310
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1288
1311
|
content = chunk.choices[0].delta.content
|
@@ -1290,13 +1313,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1290
1313
|
live.update(display_generating(reflection_text, start_time))
|
1291
1314
|
else:
|
1292
1315
|
reflection_text = ""
|
1293
|
-
for chunk in litellm.
|
1294
|
-
|
1295
|
-
|
1296
|
-
|
1297
|
-
|
1298
|
-
|
1299
|
-
|
1316
|
+
async for chunk in await litellm.acompletion(
|
1317
|
+
**self._build_completion_params(
|
1318
|
+
messages=reflection_messages,
|
1319
|
+
temperature=temperature,
|
1320
|
+
stream=True,
|
1321
|
+
response_format={"type": "json_object"},
|
1322
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1323
|
+
)
|
1300
1324
|
):
|
1301
1325
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1302
1326
|
reflection_text += chunk.choices[0].delta.content
|
@@ -1408,6 +1432,47 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1408
1432
|
|
1409
1433
|
litellm.callbacks = events
|
1410
1434
|
|
1435
|
+
def _build_completion_params(self, **override_params) -> Dict[str, Any]:
|
1436
|
+
"""Build parameters for litellm completion calls with all necessary config"""
|
1437
|
+
params = {
|
1438
|
+
"model": self.model,
|
1439
|
+
}
|
1440
|
+
|
1441
|
+
# Add optional parameters if they exist
|
1442
|
+
if self.base_url:
|
1443
|
+
params["base_url"] = self.base_url
|
1444
|
+
if self.api_key:
|
1445
|
+
params["api_key"] = self.api_key
|
1446
|
+
if self.api_version:
|
1447
|
+
params["api_version"] = self.api_version
|
1448
|
+
if self.timeout:
|
1449
|
+
params["timeout"] = self.timeout
|
1450
|
+
if self.max_tokens:
|
1451
|
+
params["max_tokens"] = self.max_tokens
|
1452
|
+
if self.top_p:
|
1453
|
+
params["top_p"] = self.top_p
|
1454
|
+
if self.presence_penalty:
|
1455
|
+
params["presence_penalty"] = self.presence_penalty
|
1456
|
+
if self.frequency_penalty:
|
1457
|
+
params["frequency_penalty"] = self.frequency_penalty
|
1458
|
+
if self.logit_bias:
|
1459
|
+
params["logit_bias"] = self.logit_bias
|
1460
|
+
if self.response_format:
|
1461
|
+
params["response_format"] = self.response_format
|
1462
|
+
if self.seed:
|
1463
|
+
params["seed"] = self.seed
|
1464
|
+
if self.logprobs:
|
1465
|
+
params["logprobs"] = self.logprobs
|
1466
|
+
if self.top_logprobs:
|
1467
|
+
params["top_logprobs"] = self.top_logprobs
|
1468
|
+
if self.stop_phrases:
|
1469
|
+
params["stop"] = self.stop_phrases
|
1470
|
+
|
1471
|
+
# Override with any provided parameters
|
1472
|
+
params.update(override_params)
|
1473
|
+
|
1474
|
+
return params
|
1475
|
+
|
1411
1476
|
# Response without tool calls
|
1412
1477
|
def response(
|
1413
1478
|
self,
|
@@ -1466,11 +1531,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1466
1531
|
if verbose:
|
1467
1532
|
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
1468
1533
|
for chunk in litellm.completion(
|
1469
|
-
|
1470
|
-
|
1471
|
-
|
1472
|
-
|
1473
|
-
|
1534
|
+
**self._build_completion_params(
|
1535
|
+
messages=messages,
|
1536
|
+
temperature=temperature,
|
1537
|
+
stream=True,
|
1538
|
+
**kwargs
|
1539
|
+
)
|
1474
1540
|
):
|
1475
1541
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1476
1542
|
content = chunk.choices[0].delta.content
|
@@ -1478,21 +1544,23 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1478
1544
|
live.update(display_generating(response_text, start_time))
|
1479
1545
|
else:
|
1480
1546
|
for chunk in litellm.completion(
|
1481
|
-
|
1482
|
-
|
1483
|
-
|
1484
|
-
|
1485
|
-
|
1547
|
+
**self._build_completion_params(
|
1548
|
+
messages=messages,
|
1549
|
+
temperature=temperature,
|
1550
|
+
stream=True,
|
1551
|
+
**kwargs
|
1552
|
+
)
|
1486
1553
|
):
|
1487
1554
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1488
1555
|
response_text += chunk.choices[0].delta.content
|
1489
1556
|
else:
|
1490
1557
|
response = litellm.completion(
|
1491
|
-
|
1492
|
-
|
1493
|
-
|
1494
|
-
|
1495
|
-
|
1558
|
+
**self._build_completion_params(
|
1559
|
+
messages=messages,
|
1560
|
+
temperature=temperature,
|
1561
|
+
stream=False,
|
1562
|
+
**kwargs
|
1563
|
+
)
|
1496
1564
|
)
|
1497
1565
|
response_text = response.choices[0].message.content.strip()
|
1498
1566
|
|
@@ -1569,11 +1637,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1569
1637
|
if verbose:
|
1570
1638
|
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
1571
1639
|
async for chunk in await litellm.acompletion(
|
1572
|
-
|
1573
|
-
|
1574
|
-
|
1575
|
-
|
1576
|
-
|
1640
|
+
**self._build_completion_params(
|
1641
|
+
messages=messages,
|
1642
|
+
temperature=temperature,
|
1643
|
+
stream=True,
|
1644
|
+
**kwargs
|
1645
|
+
)
|
1577
1646
|
):
|
1578
1647
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1579
1648
|
content = chunk.choices[0].delta.content
|
@@ -1581,21 +1650,23 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1581
1650
|
live.update(display_generating(response_text, start_time))
|
1582
1651
|
else:
|
1583
1652
|
async for chunk in await litellm.acompletion(
|
1584
|
-
|
1585
|
-
|
1586
|
-
|
1587
|
-
|
1588
|
-
|
1653
|
+
**self._build_completion_params(
|
1654
|
+
messages=messages,
|
1655
|
+
temperature=temperature,
|
1656
|
+
stream=True,
|
1657
|
+
**kwargs
|
1658
|
+
)
|
1589
1659
|
):
|
1590
1660
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1591
1661
|
response_text += chunk.choices[0].delta.content
|
1592
1662
|
else:
|
1593
1663
|
response = await litellm.acompletion(
|
1594
|
-
|
1595
|
-
|
1596
|
-
|
1597
|
-
|
1598
|
-
|
1664
|
+
**self._build_completion_params(
|
1665
|
+
messages=messages,
|
1666
|
+
temperature=temperature,
|
1667
|
+
stream=False,
|
1668
|
+
**kwargs
|
1669
|
+
)
|
1599
1670
|
)
|
1600
1671
|
response_text = response.choices[0].message.content.strip()
|
1601
1672
|
|
praisonaiagents/mcp/mcp.py
CHANGED
@@ -16,13 +16,14 @@ from mcp.client.stdio import stdio_client
|
|
16
16
|
class MCPToolRunner(threading.Thread):
|
17
17
|
"""A dedicated thread for running MCP operations."""
|
18
18
|
|
19
|
-
def __init__(self, server_params):
|
19
|
+
def __init__(self, server_params, timeout=60):
|
20
20
|
super().__init__(daemon=True)
|
21
21
|
self.server_params = server_params
|
22
22
|
self.queue = queue.Queue()
|
23
23
|
self.result_queue = queue.Queue()
|
24
24
|
self.initialized = threading.Event()
|
25
25
|
self.tools = []
|
26
|
+
self.timeout = timeout
|
26
27
|
self.start()
|
27
28
|
|
28
29
|
def run(self):
|
@@ -74,9 +75,9 @@ class MCPToolRunner(threading.Thread):
|
|
74
75
|
def call_tool(self, tool_name, arguments):
|
75
76
|
"""Call an MCP tool and wait for the result."""
|
76
77
|
if not self.initialized.is_set():
|
77
|
-
self.initialized.wait(timeout=
|
78
|
+
self.initialized.wait(timeout=self.timeout)
|
78
79
|
if not self.initialized.is_set():
|
79
|
-
return "Error: MCP initialization timed out"
|
80
|
+
return f"Error: MCP initialization timed out after {self.timeout} seconds"
|
80
81
|
|
81
82
|
# Put request in queue
|
82
83
|
self.queue.put((tool_name, arguments))
|
@@ -189,7 +190,7 @@ class MCP:
|
|
189
190
|
if isinstance(command_or_string, str) and re.match(r'^https?://', command_or_string):
|
190
191
|
# Import the SSE client implementation
|
191
192
|
from .mcp_sse import SSEMCPClient
|
192
|
-
self.sse_client = SSEMCPClient(command_or_string, debug=debug)
|
193
|
+
self.sse_client = SSEMCPClient(command_or_string, debug=debug, timeout=timeout)
|
193
194
|
self._tools = list(self.sse_client.tools)
|
194
195
|
self.is_sse = True
|
195
196
|
self.is_npx = False
|
@@ -216,11 +217,11 @@ class MCP:
|
|
216
217
|
args=arguments,
|
217
218
|
**kwargs
|
218
219
|
)
|
219
|
-
self.runner = MCPToolRunner(self.server_params)
|
220
|
+
self.runner = MCPToolRunner(self.server_params, timeout)
|
220
221
|
|
221
222
|
# Wait for initialization
|
222
|
-
if not self.runner.initialized.wait(timeout=
|
223
|
-
print("Warning: MCP initialization timed out")
|
223
|
+
if not self.runner.initialized.wait(timeout=self.timeout):
|
224
|
+
print(f"Warning: MCP initialization timed out after {self.timeout} seconds")
|
224
225
|
|
225
226
|
# Automatically detect if this is an NPX command
|
226
227
|
self.is_npx = cmd == 'npx' or (isinstance(cmd, str) and os.path.basename(cmd) == 'npx')
|
praisonaiagents/mcp/mcp_sse.py
CHANGED
@@ -31,7 +31,7 @@ def get_event_loop():
|
|
31
31
|
class SSEMCPTool:
|
32
32
|
"""A wrapper for an MCP tool that can be used with praisonaiagents."""
|
33
33
|
|
34
|
-
def __init__(self, name: str, description: str, session: ClientSession, input_schema: Optional[Dict[str, Any]] = None):
|
34
|
+
def __init__(self, name: str, description: str, session: ClientSession, input_schema: Optional[Dict[str, Any]] = None, timeout: int = 60):
|
35
35
|
self.name = name
|
36
36
|
self.__name__ = name # Required for Agent to recognize it as a tool
|
37
37
|
self.__qualname__ = name # Required for Agent to recognize it as a tool
|
@@ -39,6 +39,7 @@ class SSEMCPTool:
|
|
39
39
|
self.description = description
|
40
40
|
self.session = session
|
41
41
|
self.input_schema = input_schema or {}
|
42
|
+
self.timeout = timeout
|
42
43
|
|
43
44
|
# Create a signature based on input schema
|
44
45
|
params = []
|
@@ -66,7 +67,7 @@ class SSEMCPTool:
|
|
66
67
|
future = asyncio.run_coroutine_threadsafe(self._async_call(**kwargs), loop)
|
67
68
|
try:
|
68
69
|
# Wait for the result with a timeout
|
69
|
-
return future.result(timeout=
|
70
|
+
return future.result(timeout=self.timeout)
|
70
71
|
except Exception as e:
|
71
72
|
logger.error(f"Error calling tool {self.name}: {e}")
|
72
73
|
return f"Error: {str(e)}"
|
@@ -102,16 +103,18 @@ class SSEMCPTool:
|
|
102
103
|
class SSEMCPClient:
|
103
104
|
"""A client for connecting to an MCP server over SSE."""
|
104
105
|
|
105
|
-
def __init__(self, server_url: str, debug: bool = False):
|
106
|
+
def __init__(self, server_url: str, debug: bool = False, timeout: int = 60):
|
106
107
|
"""
|
107
108
|
Initialize an SSE MCP client.
|
108
109
|
|
109
110
|
Args:
|
110
111
|
server_url: The URL of the SSE MCP server
|
111
112
|
debug: Whether to enable debug logging
|
113
|
+
timeout: Timeout in seconds for operations (default: 60)
|
112
114
|
"""
|
113
115
|
self.server_url = server_url
|
114
116
|
self.debug = debug
|
117
|
+
self.timeout = timeout
|
115
118
|
self.session = None
|
116
119
|
self.tools = []
|
117
120
|
|
@@ -139,7 +142,7 @@ class SSEMCPClient:
|
|
139
142
|
|
140
143
|
# Run the initialization in the event loop
|
141
144
|
future = asyncio.run_coroutine_threadsafe(self._async_initialize(), loop)
|
142
|
-
self.tools = future.result(timeout=
|
145
|
+
self.tools = future.result(timeout=self.timeout)
|
143
146
|
|
144
147
|
async def _async_initialize(self):
|
145
148
|
"""Asynchronously initialize the connection and tools."""
|
@@ -169,7 +172,8 @@ class SSEMCPClient:
|
|
169
172
|
name=tool.name,
|
170
173
|
description=tool.description if hasattr(tool, 'description') else f"Call the {tool.name} tool",
|
171
174
|
session=self.session,
|
172
|
-
input_schema=input_schema
|
175
|
+
input_schema=input_schema,
|
176
|
+
timeout=self.timeout
|
173
177
|
)
|
174
178
|
tools.append(wrapper)
|
175
179
|
|
@@ -10,10 +10,10 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
|
|
10
10
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
11
11
|
praisonaiagents/knowledge/knowledge.py,sha256=Po0JZsgjYJrXdNSggmUGOWidZEF0f8xo4nhsZZfh8tY,13217
|
12
12
|
praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
|
13
|
-
praisonaiagents/llm/llm.py,sha256=
|
13
|
+
praisonaiagents/llm/llm.py,sha256=Y8z7mfzL_OMhoPSIr7k7Demk8HvHmJZv80EXFY6SUEU,91863
|
14
14
|
praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
|
15
|
-
praisonaiagents/mcp/mcp.py,sha256=
|
16
|
-
praisonaiagents/mcp/mcp_sse.py,sha256=
|
15
|
+
praisonaiagents/mcp/mcp.py,sha256=foarT5IoCZ6V8P9AbnqnWQHKmshJoD24gf3OP4sD_IM,16419
|
16
|
+
praisonaiagents/mcp/mcp_sse.py,sha256=DLh3F_aoVRM1X-7hgIOWOw4FQ1nGmn9YNbQTesykzn4,6792
|
17
17
|
praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
|
18
18
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
19
19
|
praisonaiagents/process/process.py,sha256=HPw84OhnKQW3EyrDkpoQu0DcpxThbrzR2hWUgwQh9Pw,59955
|
@@ -40,7 +40,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
40
40
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
41
41
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
42
42
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
43
|
-
praisonaiagents-0.0.
|
44
|
-
praisonaiagents-0.0.
|
45
|
-
praisonaiagents-0.0.
|
46
|
-
praisonaiagents-0.0.
|
43
|
+
praisonaiagents-0.0.85.dist-info/METADATA,sha256=p4gpH5C_8N2UL7IKRZlaJXBd_qjL36bpdoRly8NMzoE,1244
|
44
|
+
praisonaiagents-0.0.85.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
45
|
+
praisonaiagents-0.0.85.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
46
|
+
praisonaiagents-0.0.85.dist-info/RECORD,,
|
File without changes
|