praisonaiagents 0.0.130__py3-none-any.whl → 0.0.132__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +95 -18
- praisonaiagents/agent/image_agent.py +12 -3
- praisonaiagents/agents/agents.py +32 -7
- praisonaiagents/guardrails/llm_guardrail.py +44 -2
- praisonaiagents/llm/llm.py +273 -44
- praisonaiagents/main.py +91 -45
- praisonaiagents/memory/memory.py +16 -6
- praisonaiagents/process/process.py +88 -4
- praisonaiagents/task/task.py +62 -6
- {praisonaiagents-0.0.130.dist-info → praisonaiagents-0.0.132.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.130.dist-info → praisonaiagents-0.0.132.dist-info}/RECORD +13 -13
- {praisonaiagents-0.0.130.dist-info → praisonaiagents-0.0.132.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.130.dist-info → praisonaiagents-0.0.132.dist-info}/top_level.txt +0 -0
praisonaiagents/llm/llm.py
CHANGED
@@ -14,6 +14,7 @@ from ..main import (
|
|
14
14
|
display_generating,
|
15
15
|
display_self_reflection,
|
16
16
|
ReflectionOutput,
|
17
|
+
execute_sync_callback,
|
17
18
|
)
|
18
19
|
from rich.console import Console
|
19
20
|
from rich.live import Live
|
@@ -130,8 +131,10 @@ class LLM:
|
|
130
131
|
if 'tools' in safe_config:
|
131
132
|
tools = safe_config['tools']
|
132
133
|
# Check if tools is iterable before processing
|
133
|
-
if tools and
|
134
|
+
if tools and isinstance(tools, (list, tuple)):
|
134
135
|
safe_config['tools'] = [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools]
|
136
|
+
elif tools and callable(tools):
|
137
|
+
safe_config['tools'] = tools.__name__ if hasattr(tools, "__name__") else str(tools)
|
135
138
|
else:
|
136
139
|
safe_config['tools'] = None
|
137
140
|
if 'output_json' in safe_config:
|
@@ -617,6 +620,9 @@ class LLM:
|
|
617
620
|
agent_name: Optional[str] = None,
|
618
621
|
agent_role: Optional[str] = None,
|
619
622
|
agent_tools: Optional[List[str]] = None,
|
623
|
+
task_name: Optional[str] = None,
|
624
|
+
task_description: Optional[str] = None,
|
625
|
+
task_id: Optional[str] = None,
|
620
626
|
execute_tool_fn: Optional[Callable] = None,
|
621
627
|
stream: bool = True,
|
622
628
|
**kwargs
|
@@ -692,6 +698,7 @@ class LLM:
|
|
692
698
|
|
693
699
|
start_time = time.time()
|
694
700
|
reflection_count = 0
|
701
|
+
callback_executed = False # Track if callback has been executed for this interaction
|
695
702
|
interaction_displayed = False # Track if interaction has been displayed
|
696
703
|
|
697
704
|
# Display initial instruction once
|
@@ -737,25 +744,53 @@ class LLM:
|
|
737
744
|
response_text = resp["choices"][0]["message"]["content"]
|
738
745
|
final_response = resp
|
739
746
|
|
747
|
+
# Execute callbacks and display based on verbose setting
|
748
|
+
generation_time_val = time.time() - current_time
|
749
|
+
response_content = f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}" if reasoning_content else response_text
|
750
|
+
|
740
751
|
# Optionally display reasoning if present
|
741
752
|
if verbose and reasoning_content and not interaction_displayed:
|
742
753
|
display_interaction(
|
743
754
|
original_prompt,
|
744
755
|
f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
|
745
756
|
markdown=markdown,
|
746
|
-
generation_time=
|
747
|
-
console=console
|
757
|
+
generation_time=generation_time_val,
|
758
|
+
console=console,
|
759
|
+
agent_name=agent_name,
|
760
|
+
agent_role=agent_role,
|
761
|
+
agent_tools=agent_tools,
|
762
|
+
task_name=task_name,
|
763
|
+
task_description=task_description,
|
764
|
+
task_id=task_id
|
748
765
|
)
|
749
766
|
interaction_displayed = True
|
767
|
+
callback_executed = True
|
750
768
|
elif verbose and not interaction_displayed:
|
751
769
|
display_interaction(
|
752
770
|
original_prompt,
|
753
771
|
response_text,
|
754
772
|
markdown=markdown,
|
755
|
-
generation_time=
|
756
|
-
console=console
|
773
|
+
generation_time=generation_time_val,
|
774
|
+
console=console,
|
775
|
+
agent_name=agent_name,
|
776
|
+
agent_role=agent_role,
|
777
|
+
agent_tools=agent_tools,
|
778
|
+
task_name=task_name,
|
779
|
+
task_description=task_description,
|
780
|
+
task_id=task_id
|
757
781
|
)
|
758
782
|
interaction_displayed = True
|
783
|
+
callback_executed = True
|
784
|
+
elif not callback_executed:
|
785
|
+
# Only execute callback if display_interaction hasn't been called (which would trigger callbacks internally)
|
786
|
+
execute_sync_callback(
|
787
|
+
'interaction',
|
788
|
+
message=original_prompt,
|
789
|
+
response=response_content,
|
790
|
+
markdown=markdown,
|
791
|
+
generation_time=generation_time_val
|
792
|
+
)
|
793
|
+
callback_executed = True
|
759
794
|
|
760
795
|
# Otherwise do the existing streaming approach
|
761
796
|
else:
|
@@ -815,6 +850,18 @@ class LLM:
|
|
815
850
|
|
816
851
|
response_text = response_text.strip() if response_text else ""
|
817
852
|
|
853
|
+
# Execute callbacks after streaming completes (only if not verbose, since verbose will call display_interaction later)
|
854
|
+
if not verbose and not callback_executed:
|
855
|
+
execute_sync_callback(
|
856
|
+
'interaction',
|
857
|
+
message=original_prompt,
|
858
|
+
response=response_text,
|
859
|
+
markdown=markdown,
|
860
|
+
generation_time=time.time() - current_time
|
861
|
+
)
|
862
|
+
callback_executed = True
|
863
|
+
|
864
|
+
|
818
865
|
# Create a mock final_response with the captured data
|
819
866
|
final_response = {
|
820
867
|
"choices": [{
|
@@ -839,16 +886,34 @@ class LLM:
|
|
839
886
|
)
|
840
887
|
response_text = final_response["choices"][0]["message"]["content"]
|
841
888
|
|
889
|
+
# Execute callbacks and display based on verbose setting
|
842
890
|
if verbose and not interaction_displayed:
|
843
|
-
# Display the complete response at once
|
891
|
+
# Display the complete response at once (this will trigger callbacks internally)
|
844
892
|
display_interaction(
|
845
893
|
original_prompt,
|
846
894
|
response_text,
|
847
895
|
markdown=markdown,
|
848
896
|
generation_time=time.time() - current_time,
|
849
|
-
console=console
|
897
|
+
console=console,
|
898
|
+
agent_name=agent_name,
|
899
|
+
agent_role=agent_role,
|
900
|
+
agent_tools=agent_tools,
|
901
|
+
task_name=task_name,
|
902
|
+
task_description=task_description,
|
903
|
+
task_id=task_id
|
850
904
|
)
|
851
905
|
interaction_displayed = True
|
906
|
+
callback_executed = True
|
907
|
+
elif not callback_executed:
|
908
|
+
# Only execute callback if display_interaction hasn't been called
|
909
|
+
execute_sync_callback(
|
910
|
+
'interaction',
|
911
|
+
message=original_prompt,
|
912
|
+
response=response_text,
|
913
|
+
markdown=markdown,
|
914
|
+
generation_time=time.time() - current_time
|
915
|
+
)
|
916
|
+
callback_executed = True
|
852
917
|
|
853
918
|
tool_calls = final_response["choices"][0]["message"].get("tool_calls")
|
854
919
|
|
@@ -926,16 +991,14 @@ class LLM:
|
|
926
991
|
iteration_count += 1
|
927
992
|
continue
|
928
993
|
|
929
|
-
#
|
930
|
-
|
931
|
-
|
932
|
-
|
933
|
-
|
934
|
-
|
935
|
-
})
|
994
|
+
# Check if the LLM provided a final answer alongside the tool calls
|
995
|
+
# If response_text contains substantive content, treat it as the final answer
|
996
|
+
if response_text and response_text.strip() and len(response_text.strip()) > 10:
|
997
|
+
# LLM provided a final answer after tool execution, don't continue
|
998
|
+
final_response_text = response_text.strip()
|
999
|
+
break
|
936
1000
|
|
937
|
-
#
|
938
|
-
# instead of immediately trying to get a final response
|
1001
|
+
# Otherwise, continue the loop to check if more tools are needed
|
939
1002
|
iteration_count += 1
|
940
1003
|
continue
|
941
1004
|
else:
|
@@ -954,6 +1017,9 @@ class LLM:
|
|
954
1017
|
return final_response_text
|
955
1018
|
|
956
1019
|
# No tool calls were made in this iteration, return the response
|
1020
|
+
generation_time_val = time.time() - start_time
|
1021
|
+
response_content = f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}" if stored_reasoning_content else response_text
|
1022
|
+
|
957
1023
|
if verbose and not interaction_displayed:
|
958
1024
|
# If we have stored reasoning content from tool execution, display it
|
959
1025
|
if stored_reasoning_content:
|
@@ -961,18 +1027,41 @@ class LLM:
|
|
961
1027
|
original_prompt,
|
962
1028
|
f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}",
|
963
1029
|
markdown=markdown,
|
964
|
-
generation_time=
|
965
|
-
console=console
|
1030
|
+
generation_time=generation_time_val,
|
1031
|
+
console=console,
|
1032
|
+
agent_name=agent_name,
|
1033
|
+
agent_role=agent_role,
|
1034
|
+
agent_tools=agent_tools,
|
1035
|
+
task_name=task_name,
|
1036
|
+
task_description=task_description,
|
1037
|
+
task_id=task_id
|
966
1038
|
)
|
967
1039
|
else:
|
968
1040
|
display_interaction(
|
969
1041
|
original_prompt,
|
970
1042
|
response_text,
|
971
1043
|
markdown=markdown,
|
972
|
-
generation_time=
|
973
|
-
console=console
|
1044
|
+
generation_time=generation_time_val,
|
1045
|
+
console=console,
|
1046
|
+
agent_name=agent_name,
|
1047
|
+
agent_role=agent_role,
|
1048
|
+
agent_tools=agent_tools,
|
1049
|
+
task_name=task_name,
|
1050
|
+
task_description=task_description,
|
1051
|
+
task_id=task_id
|
974
1052
|
)
|
975
1053
|
interaction_displayed = True
|
1054
|
+
callback_executed = True
|
1055
|
+
elif not callback_executed:
|
1056
|
+
# Only execute callback if display_interaction hasn't been called
|
1057
|
+
execute_sync_callback(
|
1058
|
+
'interaction',
|
1059
|
+
message=original_prompt,
|
1060
|
+
response=response_content,
|
1061
|
+
markdown=markdown,
|
1062
|
+
generation_time=generation_time_val
|
1063
|
+
)
|
1064
|
+
callback_executed = True
|
976
1065
|
|
977
1066
|
response_text = response_text.strip() if response_text else ""
|
978
1067
|
|
@@ -984,17 +1073,45 @@ class LLM:
|
|
984
1073
|
if output_json or output_pydantic:
|
985
1074
|
self.chat_history.append({"role": "user", "content": original_prompt})
|
986
1075
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1076
|
+
|
987
1077
|
if verbose and not interaction_displayed:
|
988
1078
|
display_interaction(original_prompt, response_text, markdown=markdown,
|
989
|
-
generation_time=time.time() - start_time, console=console
|
1079
|
+
generation_time=time.time() - start_time, console=console,
|
1080
|
+
agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
|
1081
|
+
task_name=task_name, task_description=task_description, task_id=task_id)
|
990
1082
|
interaction_displayed = True
|
1083
|
+
callback_executed = True
|
1084
|
+
elif not callback_executed:
|
1085
|
+
# Only execute callback if display_interaction hasn't been called
|
1086
|
+
execute_sync_callback(
|
1087
|
+
'interaction',
|
1088
|
+
message=original_prompt,
|
1089
|
+
response=response_text,
|
1090
|
+
markdown=markdown,
|
1091
|
+
generation_time=time.time() - start_time
|
1092
|
+
)
|
1093
|
+
callback_executed = True
|
991
1094
|
return response_text
|
992
1095
|
|
993
1096
|
if not self_reflect:
|
994
1097
|
if verbose and not interaction_displayed:
|
995
1098
|
display_interaction(original_prompt, response_text, markdown=markdown,
|
996
|
-
generation_time=time.time() - start_time, console=console
|
1099
|
+
generation_time=time.time() - start_time, console=console,
|
1100
|
+
agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
|
1101
|
+
task_name=task_name, task_description=task_description, task_id=task_id)
|
997
1102
|
interaction_displayed = True
|
1103
|
+
callback_executed = True
|
1104
|
+
elif not callback_executed:
|
1105
|
+
# Only execute callback if display_interaction hasn't been called
|
1106
|
+
execute_sync_callback(
|
1107
|
+
'interaction',
|
1108
|
+
message=original_prompt,
|
1109
|
+
response=response_text,
|
1110
|
+
markdown=markdown,
|
1111
|
+
generation_time=time.time() - start_time
|
1112
|
+
)
|
1113
|
+
callback_executed = True
|
1114
|
+
|
998
1115
|
# Return reasoning content if reasoning_steps is True
|
999
1116
|
if reasoning_steps and stored_reasoning_content:
|
1000
1117
|
return stored_reasoning_content
|
@@ -1039,7 +1156,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1039
1156
|
f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
|
1040
1157
|
markdown=markdown,
|
1041
1158
|
generation_time=time.time() - start_time,
|
1042
|
-
console=console
|
1159
|
+
console=console,
|
1160
|
+
agent_name=agent_name,
|
1161
|
+
agent_role=agent_role,
|
1162
|
+
agent_tools=agent_tools,
|
1163
|
+
task_name=task_name,
|
1164
|
+
task_description=task_description,
|
1165
|
+
task_id=task_id
|
1043
1166
|
)
|
1044
1167
|
elif verbose:
|
1045
1168
|
display_interaction(
|
@@ -1047,7 +1170,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1047
1170
|
reflection_text,
|
1048
1171
|
markdown=markdown,
|
1049
1172
|
generation_time=time.time() - start_time,
|
1050
|
-
console=console
|
1173
|
+
console=console,
|
1174
|
+
agent_name=agent_name,
|
1175
|
+
agent_role=agent_role,
|
1176
|
+
agent_tools=agent_tools,
|
1177
|
+
task_name=task_name,
|
1178
|
+
task_description=task_description,
|
1179
|
+
task_id=task_id
|
1051
1180
|
)
|
1052
1181
|
else:
|
1053
1182
|
# Existing streaming approach
|
@@ -1098,14 +1227,18 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1098
1227
|
if satisfactory and reflection_count >= min_reflect - 1:
|
1099
1228
|
if verbose and not interaction_displayed:
|
1100
1229
|
display_interaction(prompt, response_text, markdown=markdown,
|
1101
|
-
generation_time=time.time() - start_time, console=console
|
1230
|
+
generation_time=time.time() - start_time, console=console,
|
1231
|
+
agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
|
1232
|
+
task_name=task_name, task_description=task_description, task_id=task_id)
|
1102
1233
|
interaction_displayed = True
|
1103
1234
|
return response_text
|
1104
1235
|
|
1105
1236
|
if reflection_count >= max_reflect - 1:
|
1106
1237
|
if verbose and not interaction_displayed:
|
1107
1238
|
display_interaction(prompt, response_text, markdown=markdown,
|
1108
|
-
generation_time=time.time() - start_time, console=console
|
1239
|
+
generation_time=time.time() - start_time, console=console,
|
1240
|
+
agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
|
1241
|
+
task_name=task_name, task_description=task_description, task_id=task_id)
|
1109
1242
|
interaction_displayed = True
|
1110
1243
|
return response_text
|
1111
1244
|
|
@@ -1126,6 +1259,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1126
1259
|
messages=messages,
|
1127
1260
|
temperature=temperature,
|
1128
1261
|
stream=True,
|
1262
|
+
tools=formatted_tools,
|
1129
1263
|
output_json=output_json,
|
1130
1264
|
output_pydantic=output_pydantic,
|
1131
1265
|
**kwargs
|
@@ -1142,6 +1276,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1142
1276
|
messages=messages,
|
1143
1277
|
temperature=temperature,
|
1144
1278
|
stream=True,
|
1279
|
+
tools=formatted_tools,
|
1145
1280
|
output_json=output_json,
|
1146
1281
|
output_pydantic=output_pydantic,
|
1147
1282
|
**kwargs
|
@@ -1158,7 +1293,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1158
1293
|
if reflection_count >= max_reflect:
|
1159
1294
|
if verbose and not interaction_displayed:
|
1160
1295
|
display_interaction(prompt, response_text, markdown=markdown,
|
1161
|
-
generation_time=time.time() - start_time, console=console
|
1296
|
+
generation_time=time.time() - start_time, console=console,
|
1297
|
+
agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
|
1298
|
+
task_name=task_name, task_description=task_description, task_id=task_id)
|
1162
1299
|
interaction_displayed = True
|
1163
1300
|
return response_text
|
1164
1301
|
continue
|
@@ -1206,6 +1343,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1206
1343
|
agent_name: Optional[str] = None,
|
1207
1344
|
agent_role: Optional[str] = None,
|
1208
1345
|
agent_tools: Optional[List[str]] = None,
|
1346
|
+
task_name: Optional[str] = None,
|
1347
|
+
task_description: Optional[str] = None,
|
1348
|
+
task_id: Optional[str] = None,
|
1209
1349
|
execute_tool_fn: Optional[Callable] = None,
|
1210
1350
|
stream: bool = True,
|
1211
1351
|
**kwargs
|
@@ -1276,6 +1416,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1276
1416
|
|
1277
1417
|
start_time = time.time()
|
1278
1418
|
reflection_count = 0
|
1419
|
+
callback_executed = False # Track if callback has been executed for this interaction
|
1279
1420
|
interaction_displayed = False # Track if interaction has been displayed
|
1280
1421
|
|
1281
1422
|
# Format tools for LiteLLM using the shared helper
|
@@ -1313,7 +1454,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1313
1454
|
f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
|
1314
1455
|
markdown=markdown,
|
1315
1456
|
generation_time=time.time() - start_time,
|
1316
|
-
console=console
|
1457
|
+
console=console,
|
1458
|
+
agent_name=agent_name,
|
1459
|
+
agent_role=agent_role,
|
1460
|
+
agent_tools=agent_tools,
|
1461
|
+
task_name=task_name,
|
1462
|
+
task_description=task_description,
|
1463
|
+
task_id=task_id
|
1317
1464
|
)
|
1318
1465
|
interaction_displayed = True
|
1319
1466
|
elif verbose and not interaction_displayed:
|
@@ -1322,7 +1469,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1322
1469
|
response_text,
|
1323
1470
|
markdown=markdown,
|
1324
1471
|
generation_time=time.time() - start_time,
|
1325
|
-
console=console
|
1472
|
+
console=console,
|
1473
|
+
agent_name=agent_name,
|
1474
|
+
agent_role=agent_role,
|
1475
|
+
agent_tools=agent_tools,
|
1476
|
+
task_name=task_name,
|
1477
|
+
task_description=task_description,
|
1478
|
+
task_id=task_id
|
1326
1479
|
)
|
1327
1480
|
interaction_displayed = True
|
1328
1481
|
else:
|
@@ -1406,7 +1559,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1406
1559
|
response_text,
|
1407
1560
|
markdown=markdown,
|
1408
1561
|
generation_time=time.time() - start_time,
|
1409
|
-
console=console
|
1562
|
+
console=console,
|
1563
|
+
agent_name=agent_name,
|
1564
|
+
agent_role=agent_role,
|
1565
|
+
agent_tools=agent_tools,
|
1566
|
+
task_name=task_name,
|
1567
|
+
task_description=task_description,
|
1568
|
+
task_id=task_id
|
1410
1569
|
)
|
1411
1570
|
interaction_displayed = True
|
1412
1571
|
|
@@ -1500,7 +1659,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1500
1659
|
f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
|
1501
1660
|
markdown=markdown,
|
1502
1661
|
generation_time=time.time() - start_time,
|
1503
|
-
console=console
|
1662
|
+
console=console,
|
1663
|
+
agent_name=agent_name,
|
1664
|
+
agent_role=agent_role,
|
1665
|
+
agent_tools=agent_tools,
|
1666
|
+
task_name=task_name,
|
1667
|
+
task_description=task_description,
|
1668
|
+
task_id=task_id
|
1504
1669
|
)
|
1505
1670
|
interaction_displayed = True
|
1506
1671
|
elif verbose and not interaction_displayed:
|
@@ -1509,7 +1674,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1509
1674
|
response_text,
|
1510
1675
|
markdown=markdown,
|
1511
1676
|
generation_time=time.time() - start_time,
|
1512
|
-
console=console
|
1677
|
+
console=console,
|
1678
|
+
agent_name=agent_name,
|
1679
|
+
agent_role=agent_role,
|
1680
|
+
agent_tools=agent_tools,
|
1681
|
+
task_name=task_name,
|
1682
|
+
task_description=task_description,
|
1683
|
+
task_id=task_id
|
1513
1684
|
)
|
1514
1685
|
interaction_displayed = True
|
1515
1686
|
else:
|
@@ -1559,6 +1730,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1559
1730
|
if reasoning_steps and reasoning_content:
|
1560
1731
|
stored_reasoning_content = reasoning_content
|
1561
1732
|
|
1733
|
+
# Check if the LLM provided a final answer alongside the tool calls
|
1734
|
+
# If response_text contains substantive content, treat it as the final answer
|
1735
|
+
if response_text and response_text.strip() and len(response_text.strip()) > 10:
|
1736
|
+
# LLM provided a final answer after tool execution, don't continue
|
1737
|
+
final_response_text = response_text.strip()
|
1738
|
+
break
|
1739
|
+
|
1562
1740
|
# Continue the loop to check if more tools are needed
|
1563
1741
|
iteration_count += 1
|
1564
1742
|
continue
|
@@ -1575,7 +1753,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1575
1753
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1576
1754
|
if verbose and not interaction_displayed:
|
1577
1755
|
display_interaction(original_prompt, response_text, markdown=markdown,
|
1578
|
-
generation_time=time.time() - start_time, console=console
|
1756
|
+
generation_time=time.time() - start_time, console=console,
|
1757
|
+
agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
|
1758
|
+
task_name=task_name, task_description=task_description, task_id=task_id)
|
1579
1759
|
interaction_displayed = True
|
1580
1760
|
return response_text
|
1581
1761
|
|
@@ -1591,11 +1771,19 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1591
1771
|
f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{display_text}",
|
1592
1772
|
markdown=markdown,
|
1593
1773
|
generation_time=time.time() - start_time,
|
1594
|
-
console=console
|
1774
|
+
console=console,
|
1775
|
+
agent_name=agent_name,
|
1776
|
+
agent_role=agent_role,
|
1777
|
+
agent_tools=agent_tools,
|
1778
|
+
task_name=task_name,
|
1779
|
+
task_description=task_description,
|
1780
|
+
task_id=task_id
|
1595
1781
|
)
|
1596
1782
|
else:
|
1597
1783
|
display_interaction(original_prompt, display_text, markdown=markdown,
|
1598
|
-
generation_time=time.time() - start_time, console=console
|
1784
|
+
generation_time=time.time() - start_time, console=console,
|
1785
|
+
agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
|
1786
|
+
task_name=task_name, task_description=task_description, task_id=task_id)
|
1599
1787
|
interaction_displayed = True
|
1600
1788
|
|
1601
1789
|
# Return reasoning content if reasoning_steps is True and we have it
|
@@ -1640,7 +1828,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1640
1828
|
f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
|
1641
1829
|
markdown=markdown,
|
1642
1830
|
generation_time=time.time() - start_time,
|
1643
|
-
console=console
|
1831
|
+
console=console,
|
1832
|
+
agent_name=agent_name,
|
1833
|
+
agent_role=agent_role,
|
1834
|
+
agent_tools=agent_tools,
|
1835
|
+
task_name=task_name,
|
1836
|
+
task_description=task_description,
|
1837
|
+
task_id=task_id
|
1644
1838
|
)
|
1645
1839
|
elif verbose:
|
1646
1840
|
display_interaction(
|
@@ -1648,7 +1842,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1648
1842
|
reflection_text,
|
1649
1843
|
markdown=markdown,
|
1650
1844
|
generation_time=time.time() - start_time,
|
1651
|
-
console=console
|
1845
|
+
console=console,
|
1846
|
+
agent_name=agent_name,
|
1847
|
+
agent_role=agent_role,
|
1848
|
+
agent_tools=agent_tools,
|
1849
|
+
task_name=task_name,
|
1850
|
+
task_description=task_description,
|
1851
|
+
task_id=task_id
|
1652
1852
|
)
|
1653
1853
|
else:
|
1654
1854
|
# Existing streaming approach
|
@@ -1700,14 +1900,18 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1700
1900
|
if satisfactory and reflection_count >= min_reflect - 1:
|
1701
1901
|
if verbose and not interaction_displayed:
|
1702
1902
|
display_interaction(prompt, response_text, markdown=markdown,
|
1703
|
-
generation_time=time.time() - start_time, console=console
|
1903
|
+
generation_time=time.time() - start_time, console=console,
|
1904
|
+
agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
|
1905
|
+
task_name=task_name, task_description=task_description, task_id=task_id)
|
1704
1906
|
interaction_displayed = True
|
1705
1907
|
return response_text
|
1706
1908
|
|
1707
1909
|
if reflection_count >= max_reflect - 1:
|
1708
1910
|
if verbose and not interaction_displayed:
|
1709
1911
|
display_interaction(prompt, response_text, markdown=markdown,
|
1710
|
-
generation_time=time.time() - start_time, console=console
|
1912
|
+
generation_time=time.time() - start_time, console=console,
|
1913
|
+
agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
|
1914
|
+
task_name=task_name, task_description=task_description, task_id=task_id)
|
1711
1915
|
interaction_displayed = True
|
1712
1916
|
return response_text
|
1713
1917
|
|
@@ -1843,10 +2047,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1843
2047
|
output_json = override_params.get('output_json')
|
1844
2048
|
output_pydantic = override_params.get('output_pydantic')
|
1845
2049
|
|
2050
|
+
# Always remove these from params as they're not native litellm parameters
|
2051
|
+
params.pop('output_json', None)
|
2052
|
+
params.pop('output_pydantic', None)
|
2053
|
+
|
1846
2054
|
if output_json or output_pydantic:
|
1847
|
-
# Always remove these from params as they're not native litellm parameters
|
1848
|
-
params.pop('output_json', None)
|
1849
|
-
params.pop('output_pydantic', None)
|
1850
2055
|
|
1851
2056
|
# Check if this is a Gemini model that supports native structured outputs
|
1852
2057
|
if self._is_gemini_model():
|
@@ -1972,6 +2177,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1972
2177
|
verbose: bool = True,
|
1973
2178
|
markdown: bool = True,
|
1974
2179
|
console: Optional[Console] = None,
|
2180
|
+
agent_name: Optional[str] = None,
|
2181
|
+
agent_role: Optional[str] = None,
|
2182
|
+
agent_tools: Optional[List[str]] = None,
|
2183
|
+
task_name: Optional[str] = None,
|
2184
|
+
task_description: Optional[str] = None,
|
2185
|
+
task_id: Optional[str] = None,
|
1975
2186
|
**kwargs
|
1976
2187
|
) -> str:
|
1977
2188
|
"""Simple function to get model response without tool calls or complex features"""
|
@@ -2040,7 +2251,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2040
2251
|
response_text,
|
2041
2252
|
markdown=markdown,
|
2042
2253
|
generation_time=time.time() - start_time,
|
2043
|
-
console=console or self.console
|
2254
|
+
console=console or self.console,
|
2255
|
+
agent_name=agent_name,
|
2256
|
+
agent_role=agent_role,
|
2257
|
+
agent_tools=agent_tools,
|
2258
|
+
task_name=task_name,
|
2259
|
+
task_description=task_description,
|
2260
|
+
task_id=task_id
|
2044
2261
|
)
|
2045
2262
|
|
2046
2263
|
return response_text.strip() if response_text else ""
|
@@ -2059,6 +2276,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2059
2276
|
verbose: bool = True,
|
2060
2277
|
markdown: bool = True,
|
2061
2278
|
console: Optional[Console] = None,
|
2279
|
+
agent_name: Optional[str] = None,
|
2280
|
+
agent_role: Optional[str] = None,
|
2281
|
+
agent_tools: Optional[List[str]] = None,
|
2282
|
+
task_name: Optional[str] = None,
|
2283
|
+
task_description: Optional[str] = None,
|
2284
|
+
task_id: Optional[str] = None,
|
2062
2285
|
**kwargs
|
2063
2286
|
) -> str:
|
2064
2287
|
"""Async version of response function"""
|
@@ -2128,7 +2351,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2128
2351
|
response_text,
|
2129
2352
|
markdown=markdown,
|
2130
2353
|
generation_time=time.time() - start_time,
|
2131
|
-
console=console or self.console
|
2354
|
+
console=console or self.console,
|
2355
|
+
agent_name=agent_name,
|
2356
|
+
agent_role=agent_role,
|
2357
|
+
agent_tools=agent_tools,
|
2358
|
+
task_name=task_name,
|
2359
|
+
task_description=task_description,
|
2360
|
+
task_id=task_id
|
2132
2361
|
)
|
2133
2362
|
|
2134
2363
|
return response_text.strip() if response_text else ""
|