praisonaiagents 0.0.58__py3-none-any.whl → 0.0.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/llm/llm.py +171 -1
- praisonaiagents/process/process.py +389 -124
- praisonaiagents/task/task.py +18 -16
- praisonaiagents/tools/train/data/generatecot.py +47 -14
- {praisonaiagents-0.0.58.dist-info → praisonaiagents-0.0.60.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.58.dist-info → praisonaiagents-0.0.60.dist-info}/RECORD +8 -8
- {praisonaiagents-0.0.58.dist-info → praisonaiagents-0.0.60.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.58.dist-info → praisonaiagents-0.0.60.dist-info}/top_level.txt +0 -0
praisonaiagents/llm/llm.py
CHANGED
@@ -1040,4 +1040,174 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1040
1040
|
if type(event) in event_types:
|
1041
1041
|
litellm._async_success_callback.remove(event)
|
1042
1042
|
|
1043
|
-
litellm.callbacks = events
|
1043
|
+
litellm.callbacks = events
|
1044
|
+
|
1045
|
+
# Response without tool calls
|
1046
|
+
def response(
|
1047
|
+
self,
|
1048
|
+
prompt: Union[str, List[Dict]],
|
1049
|
+
system_prompt: Optional[str] = None,
|
1050
|
+
temperature: float = 0.2,
|
1051
|
+
stream: bool = True,
|
1052
|
+
verbose: bool = True,
|
1053
|
+
markdown: bool = True,
|
1054
|
+
console: Optional[Console] = None,
|
1055
|
+
**kwargs
|
1056
|
+
) -> str:
|
1057
|
+
"""Simple function to get model response without tool calls or complex features"""
|
1058
|
+
try:
|
1059
|
+
import litellm
|
1060
|
+
import logging
|
1061
|
+
logger = logging.getLogger(__name__)
|
1062
|
+
|
1063
|
+
litellm.set_verbose = False
|
1064
|
+
start_time = time.time()
|
1065
|
+
|
1066
|
+
logger.debug("Using synchronous response function")
|
1067
|
+
|
1068
|
+
# Build messages list
|
1069
|
+
messages = []
|
1070
|
+
if system_prompt:
|
1071
|
+
messages.append({"role": "system", "content": system_prompt})
|
1072
|
+
|
1073
|
+
# Add prompt to messages
|
1074
|
+
if isinstance(prompt, list):
|
1075
|
+
messages.append({"role": "user", "content": prompt})
|
1076
|
+
else:
|
1077
|
+
messages.append({"role": "user", "content": prompt})
|
1078
|
+
|
1079
|
+
# Get response from LiteLLM
|
1080
|
+
if stream:
|
1081
|
+
response_text = ""
|
1082
|
+
if verbose:
|
1083
|
+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
1084
|
+
for chunk in litellm.completion(
|
1085
|
+
model=self.model,
|
1086
|
+
messages=messages,
|
1087
|
+
temperature=temperature,
|
1088
|
+
stream=True,
|
1089
|
+
**kwargs
|
1090
|
+
):
|
1091
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1092
|
+
content = chunk.choices[0].delta.content
|
1093
|
+
response_text += content
|
1094
|
+
live.update(display_generating(response_text, start_time))
|
1095
|
+
else:
|
1096
|
+
for chunk in litellm.completion(
|
1097
|
+
model=self.model,
|
1098
|
+
messages=messages,
|
1099
|
+
temperature=temperature,
|
1100
|
+
stream=True,
|
1101
|
+
**kwargs
|
1102
|
+
):
|
1103
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1104
|
+
response_text += chunk.choices[0].delta.content
|
1105
|
+
else:
|
1106
|
+
response = litellm.completion(
|
1107
|
+
model=self.model,
|
1108
|
+
messages=messages,
|
1109
|
+
temperature=temperature,
|
1110
|
+
stream=False,
|
1111
|
+
**kwargs
|
1112
|
+
)
|
1113
|
+
response_text = response.choices[0].message.content.strip()
|
1114
|
+
|
1115
|
+
if verbose:
|
1116
|
+
display_interaction(
|
1117
|
+
prompt if isinstance(prompt, str) else prompt[0].get("text", ""),
|
1118
|
+
response_text,
|
1119
|
+
markdown=markdown,
|
1120
|
+
generation_time=time.time() - start_time,
|
1121
|
+
console=console or self.console
|
1122
|
+
)
|
1123
|
+
|
1124
|
+
return response_text.strip()
|
1125
|
+
|
1126
|
+
except Exception as error:
|
1127
|
+
display_error(f"Error in response: {str(error)}")
|
1128
|
+
raise
|
1129
|
+
|
1130
|
+
# Async version of response function. Response without tool calls
|
1131
|
+
async def response_async(
|
1132
|
+
self,
|
1133
|
+
prompt: Union[str, List[Dict]],
|
1134
|
+
system_prompt: Optional[str] = None,
|
1135
|
+
temperature: float = 0.2,
|
1136
|
+
stream: bool = True,
|
1137
|
+
verbose: bool = True,
|
1138
|
+
markdown: bool = True,
|
1139
|
+
console: Optional[Console] = None,
|
1140
|
+
**kwargs
|
1141
|
+
) -> str:
|
1142
|
+
"""Async version of response function"""
|
1143
|
+
try:
|
1144
|
+
import litellm
|
1145
|
+
import logging
|
1146
|
+
logger = logging.getLogger(__name__)
|
1147
|
+
|
1148
|
+
litellm.set_verbose = False
|
1149
|
+
start_time = time.time()
|
1150
|
+
|
1151
|
+
logger.debug("Using asynchronous response function")
|
1152
|
+
|
1153
|
+
# Build messages list
|
1154
|
+
messages = []
|
1155
|
+
if system_prompt:
|
1156
|
+
messages.append({"role": "system", "content": system_prompt})
|
1157
|
+
|
1158
|
+
# Add prompt to messages
|
1159
|
+
if isinstance(prompt, list):
|
1160
|
+
messages.append({"role": "user", "content": prompt})
|
1161
|
+
else:
|
1162
|
+
messages.append({"role": "user", "content": prompt})
|
1163
|
+
|
1164
|
+
# Get response from LiteLLM
|
1165
|
+
if stream:
|
1166
|
+
response_text = ""
|
1167
|
+
if verbose:
|
1168
|
+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
1169
|
+
async for chunk in await litellm.acompletion(
|
1170
|
+
model=self.model,
|
1171
|
+
messages=messages,
|
1172
|
+
temperature=temperature,
|
1173
|
+
stream=True,
|
1174
|
+
**kwargs
|
1175
|
+
):
|
1176
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1177
|
+
content = chunk.choices[0].delta.content
|
1178
|
+
response_text += content
|
1179
|
+
live.update(display_generating(response_text, start_time))
|
1180
|
+
else:
|
1181
|
+
async for chunk in await litellm.acompletion(
|
1182
|
+
model=self.model,
|
1183
|
+
messages=messages,
|
1184
|
+
temperature=temperature,
|
1185
|
+
stream=True,
|
1186
|
+
**kwargs
|
1187
|
+
):
|
1188
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1189
|
+
response_text += chunk.choices[0].delta.content
|
1190
|
+
else:
|
1191
|
+
response = await litellm.acompletion(
|
1192
|
+
model=self.model,
|
1193
|
+
messages=messages,
|
1194
|
+
temperature=temperature,
|
1195
|
+
stream=False,
|
1196
|
+
**kwargs
|
1197
|
+
)
|
1198
|
+
response_text = response.choices[0].message.content.strip()
|
1199
|
+
|
1200
|
+
if verbose:
|
1201
|
+
display_interaction(
|
1202
|
+
prompt if isinstance(prompt, str) else prompt[0].get("text", ""),
|
1203
|
+
response_text,
|
1204
|
+
markdown=markdown,
|
1205
|
+
generation_time=time.time() - start_time,
|
1206
|
+
console=console or self.console
|
1207
|
+
)
|
1208
|
+
|
1209
|
+
return response_text.strip()
|
1210
|
+
|
1211
|
+
except Exception as error:
|
1212
|
+
display_error(f"Error in response_async: {str(error)}")
|
1213
|
+
raise
|