PraisonAI 0.0.69__cp312-cp312-manylinux_2_35_x86_64.whl → 0.0.71__cp312-cp312-manylinux_2_35_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- praisonai/deploy.py +1 -1
- praisonai/ui/chat.py +164 -16
- praisonai/ui/code.py +165 -20
- {praisonai-0.0.69.dist-info → praisonai-0.0.71.dist-info}/METADATA +2 -1
- {praisonai-0.0.69.dist-info → praisonai-0.0.71.dist-info}/RECORD +8 -8
- {praisonai-0.0.69.dist-info → praisonai-0.0.71.dist-info}/LICENSE +0 -0
- {praisonai-0.0.69.dist-info → praisonai-0.0.71.dist-info}/WHEEL +0 -0
- {praisonai-0.0.69.dist-info → praisonai-0.0.71.dist-info}/entry_points.txt +0 -0
praisonai/deploy.py
CHANGED
|
@@ -56,7 +56,7 @@ class CloudDeployer:
|
|
|
56
56
|
file.write("FROM python:3.11-slim\n")
|
|
57
57
|
file.write("WORKDIR /app\n")
|
|
58
58
|
file.write("COPY . .\n")
|
|
59
|
-
file.write("RUN pip install flask praisonai==0.0.
|
|
59
|
+
file.write("RUN pip install flask praisonai==0.0.71 gunicorn markdown\n")
|
|
60
60
|
file.write("EXPOSE 8080\n")
|
|
61
61
|
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
|
|
62
62
|
|
praisonai/ui/chat.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import chainlit as cl
|
|
2
2
|
from chainlit.input_widget import TextInput
|
|
3
|
-
from chainlit.types import ThreadDict
|
|
3
|
+
from chainlit.types import ThreadDict # Change this import
|
|
4
4
|
from litellm import acompletion
|
|
5
5
|
import os
|
|
6
6
|
import sqlite3
|
|
@@ -14,6 +14,7 @@ from literalai.helper import utc_now
|
|
|
14
14
|
import logging
|
|
15
15
|
import json
|
|
16
16
|
from sql_alchemy import SQLAlchemyDataLayer
|
|
17
|
+
from tavily import TavilyClient
|
|
17
18
|
|
|
18
19
|
# Set up logging
|
|
19
20
|
logger = logging.getLogger(__name__)
|
|
@@ -171,6 +172,41 @@ deleted_thread_ids = [] # type: List[str]
|
|
|
171
172
|
|
|
172
173
|
cl_data._data_layer = SQLAlchemyDataLayer(conninfo=f"sqlite+aiosqlite:///{DB_PATH}")
|
|
173
174
|
|
|
175
|
+
# Set Tavily API key
|
|
176
|
+
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
|
177
|
+
tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
|
|
178
|
+
|
|
179
|
+
# Function to call Tavily Search API
|
|
180
|
+
def tavily_web_search(query):
|
|
181
|
+
if not tavily_client:
|
|
182
|
+
return json.dumps({
|
|
183
|
+
"query": query,
|
|
184
|
+
"error": "Tavily API key is not set. Web search is unavailable."
|
|
185
|
+
})
|
|
186
|
+
response = tavily_client.search(query)
|
|
187
|
+
print(response) # Print the full response
|
|
188
|
+
return json.dumps({
|
|
189
|
+
"query": query,
|
|
190
|
+
"answer": response.get('answer'),
|
|
191
|
+
"top_result": response['results'][0]['content'] if response['results'] else 'No results found'
|
|
192
|
+
})
|
|
193
|
+
|
|
194
|
+
# Define the tool for function calling
|
|
195
|
+
tools = [{
|
|
196
|
+
"type": "function",
|
|
197
|
+
"function": {
|
|
198
|
+
"name": "tavily_web_search",
|
|
199
|
+
"description": "Search the web using Tavily API",
|
|
200
|
+
"parameters": {
|
|
201
|
+
"type": "object",
|
|
202
|
+
"properties": {
|
|
203
|
+
"query": {"type": "string", "description": "Search query"}
|
|
204
|
+
},
|
|
205
|
+
"required": ["query"]
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
}] if tavily_api_key else []
|
|
209
|
+
|
|
174
210
|
@cl.on_chat_start
|
|
175
211
|
async def start():
|
|
176
212
|
initialize_db()
|
|
@@ -224,31 +260,130 @@ async def setup_agent(settings):
|
|
|
224
260
|
async def main(message: cl.Message):
|
|
225
261
|
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
226
262
|
message_history = cl.user_session.get("message_history", [])
|
|
227
|
-
|
|
263
|
+
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
264
|
+
|
|
265
|
+
# Add the current date and time to the user's message
|
|
266
|
+
user_message = f"""
|
|
267
|
+
Answer the question and use tools if needed:\n{message.content}.\n\n
|
|
268
|
+
Current Date and Time: {now}
|
|
269
|
+
"""
|
|
270
|
+
message_history.append({"role": "user", "content": user_message})
|
|
228
271
|
|
|
229
272
|
msg = cl.Message(content="")
|
|
230
273
|
await msg.send()
|
|
231
274
|
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
275
|
+
# Prepare the completion parameters
|
|
276
|
+
completion_params = {
|
|
277
|
+
"model": model_name,
|
|
278
|
+
"messages": message_history,
|
|
279
|
+
"stream": True,
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
# Only add tools and tool_choice if Tavily API key is available
|
|
283
|
+
if tavily_api_key:
|
|
284
|
+
completion_params["tools"] = tools
|
|
285
|
+
completion_params["tool_choice"] = "auto"
|
|
286
|
+
|
|
287
|
+
response = await acompletion(**completion_params)
|
|
240
288
|
|
|
241
289
|
full_response = ""
|
|
290
|
+
tool_calls = []
|
|
291
|
+
current_tool_call = None
|
|
292
|
+
|
|
242
293
|
async for part in response:
|
|
243
|
-
if
|
|
244
|
-
|
|
245
|
-
|
|
294
|
+
if 'choices' in part and len(part['choices']) > 0:
|
|
295
|
+
delta = part['choices'][0].get('delta', {})
|
|
296
|
+
|
|
297
|
+
if 'content' in delta and delta['content'] is not None:
|
|
298
|
+
token = delta['content']
|
|
299
|
+
await msg.stream_token(token)
|
|
300
|
+
full_response += token
|
|
301
|
+
|
|
302
|
+
if tavily_api_key and 'tool_calls' in delta and delta['tool_calls'] is not None:
|
|
303
|
+
for tool_call in delta['tool_calls']:
|
|
304
|
+
if current_tool_call is None or tool_call.index != current_tool_call['index']:
|
|
305
|
+
if current_tool_call:
|
|
306
|
+
tool_calls.append(current_tool_call)
|
|
307
|
+
current_tool_call = {
|
|
308
|
+
'id': tool_call.id,
|
|
309
|
+
'type': tool_call.type,
|
|
310
|
+
'index': tool_call.index,
|
|
311
|
+
'function': {
|
|
312
|
+
'name': tool_call.function.name if tool_call.function else None,
|
|
313
|
+
'arguments': ''
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
if tool_call.function:
|
|
317
|
+
if tool_call.function.name:
|
|
318
|
+
current_tool_call['function']['name'] = tool_call.function.name
|
|
319
|
+
if tool_call.function.arguments:
|
|
320
|
+
current_tool_call['function']['arguments'] += tool_call.function.arguments
|
|
321
|
+
|
|
322
|
+
if current_tool_call:
|
|
323
|
+
tool_calls.append(current_tool_call)
|
|
324
|
+
|
|
246
325
|
logger.debug(f"Full response: {full_response}")
|
|
326
|
+
logger.debug(f"Tool calls: {tool_calls}")
|
|
247
327
|
message_history.append({"role": "assistant", "content": full_response})
|
|
248
328
|
logger.debug(f"Message history: {message_history}")
|
|
249
329
|
cl.user_session.set("message_history", message_history)
|
|
250
330
|
await msg.update()
|
|
251
331
|
|
|
332
|
+
if tavily_api_key and tool_calls:
|
|
333
|
+
available_functions = {
|
|
334
|
+
"tavily_web_search": tavily_web_search,
|
|
335
|
+
}
|
|
336
|
+
messages = message_history + [{"role": "assistant", "content": None, "function_call": {
|
|
337
|
+
"name": tool_calls[0]['function']['name'],
|
|
338
|
+
"arguments": tool_calls[0]['function']['arguments']
|
|
339
|
+
}}]
|
|
340
|
+
|
|
341
|
+
for tool_call in tool_calls:
|
|
342
|
+
function_name = tool_call['function']['name']
|
|
343
|
+
if function_name in available_functions:
|
|
344
|
+
function_to_call = available_functions[function_name]
|
|
345
|
+
function_args = tool_call['function']['arguments']
|
|
346
|
+
if function_args:
|
|
347
|
+
try:
|
|
348
|
+
function_args = json.loads(function_args)
|
|
349
|
+
function_response = function_to_call(
|
|
350
|
+
query=function_args.get("query"),
|
|
351
|
+
)
|
|
352
|
+
messages.append(
|
|
353
|
+
{
|
|
354
|
+
"role": "function",
|
|
355
|
+
"name": function_name,
|
|
356
|
+
"content": function_response,
|
|
357
|
+
}
|
|
358
|
+
)
|
|
359
|
+
except json.JSONDecodeError:
|
|
360
|
+
logger.error(f"Failed to parse function arguments: {function_args}")
|
|
361
|
+
|
|
362
|
+
second_response = await acompletion(
|
|
363
|
+
model=model_name,
|
|
364
|
+
stream=True,
|
|
365
|
+
messages=messages,
|
|
366
|
+
)
|
|
367
|
+
logger.debug(f"Second LLM response: {second_response}")
|
|
368
|
+
|
|
369
|
+
# Handle the streaming response
|
|
370
|
+
full_response = ""
|
|
371
|
+
async for part in second_response:
|
|
372
|
+
if 'choices' in part and len(part['choices']) > 0:
|
|
373
|
+
delta = part['choices'][0].get('delta', {})
|
|
374
|
+
if 'content' in delta and delta['content'] is not None:
|
|
375
|
+
token = delta['content']
|
|
376
|
+
await msg.stream_token(token)
|
|
377
|
+
full_response += token
|
|
378
|
+
|
|
379
|
+
# Update the message content
|
|
380
|
+
msg.content = full_response
|
|
381
|
+
await msg.update()
|
|
382
|
+
else:
|
|
383
|
+
# If no tool calls or Tavily API key is not set, the full_response is already set
|
|
384
|
+
msg.content = full_response
|
|
385
|
+
await msg.update()
|
|
386
|
+
|
|
252
387
|
username = os.getenv("CHAINLIT_USERNAME", "admin") # Default to "admin" if not found
|
|
253
388
|
password = os.getenv("CHAINLIT_PASSWORD", "admin") # Default to "admin" if not found
|
|
254
389
|
|
|
@@ -267,7 +402,7 @@ async def send_count():
|
|
|
267
402
|
).send()
|
|
268
403
|
|
|
269
404
|
@cl.on_chat_resume
|
|
270
|
-
async def on_chat_resume(thread:
|
|
405
|
+
async def on_chat_resume(thread: ThreadDict): # Change the type hint here
|
|
271
406
|
logger.info(f"Resuming chat: {thread['id']}")
|
|
272
407
|
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
273
408
|
logger.debug(f"Model name: {model_name}")
|
|
@@ -285,8 +420,14 @@ async def on_chat_resume(thread: cl_data.ThreadDict):
|
|
|
285
420
|
thread_id = thread["id"]
|
|
286
421
|
cl.user_session.set("thread_id", thread["id"])
|
|
287
422
|
|
|
288
|
-
#
|
|
423
|
+
# Ensure metadata is a dictionary
|
|
289
424
|
metadata = thread.get("metadata", {})
|
|
425
|
+
if isinstance(metadata, str):
|
|
426
|
+
try:
|
|
427
|
+
metadata = json.loads(metadata)
|
|
428
|
+
except json.JSONDecodeError:
|
|
429
|
+
metadata = {}
|
|
430
|
+
|
|
290
431
|
cl.user_session.set("metadata", metadata)
|
|
291
432
|
|
|
292
433
|
message_history = cl.user_session.get("message_history", [])
|
|
@@ -298,7 +439,14 @@ async def on_chat_resume(thread: cl_data.ThreadDict):
|
|
|
298
439
|
message_history.append({"role": "user", "content": message.get("output", "")})
|
|
299
440
|
elif msg_type == "assistant_message":
|
|
300
441
|
message_history.append({"role": "assistant", "content": message.get("output", "")})
|
|
442
|
+
elif msg_type == "run":
|
|
443
|
+
# Handle 'run' type messages
|
|
444
|
+
if message.get("isError"):
|
|
445
|
+
message_history.append({"role": "system", "content": f"Error: {message.get('output', '')}"})
|
|
446
|
+
else:
|
|
447
|
+
# You might want to handle non-error 'run' messages differently
|
|
448
|
+
pass
|
|
301
449
|
else:
|
|
302
|
-
logger.warning(f"Message without type: {message}")
|
|
450
|
+
logger.warning(f"Message without recognized type: {message}")
|
|
303
451
|
|
|
304
452
|
cl.user_session.set("message_history", message_history)
|
praisonai/ui/code.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import chainlit as cl
|
|
2
2
|
from chainlit.input_widget import TextInput
|
|
3
3
|
from chainlit.types import ThreadDict
|
|
4
|
-
from litellm import acompletion
|
|
4
|
+
from litellm import acompletion, completion
|
|
5
5
|
import os
|
|
6
6
|
import sqlite3
|
|
7
7
|
from datetime import datetime
|
|
@@ -15,7 +15,8 @@ import logging
|
|
|
15
15
|
import json
|
|
16
16
|
from sql_alchemy import SQLAlchemyDataLayer
|
|
17
17
|
from context import ContextGatherer
|
|
18
|
-
|
|
18
|
+
from tavily import TavilyClient
|
|
19
|
+
from datetime import datetime
|
|
19
20
|
# Set up logging
|
|
20
21
|
logger = logging.getLogger(__name__)
|
|
21
22
|
log_level = os.getenv("LOGLEVEL", "INFO").upper()
|
|
@@ -177,7 +178,7 @@ async def start():
|
|
|
177
178
|
initialize_db()
|
|
178
179
|
model_name = load_setting("model_name")
|
|
179
180
|
|
|
180
|
-
if model_name:
|
|
181
|
+
if (model_name):
|
|
181
182
|
cl.user_session.set("model_name", model_name)
|
|
182
183
|
else:
|
|
183
184
|
# If no setting found, use default or environment variable
|
|
@@ -216,17 +217,58 @@ async def setup_agent(settings):
|
|
|
216
217
|
# Save in thread metadata
|
|
217
218
|
thread_id = cl.user_session.get("thread_id")
|
|
218
219
|
if thread_id:
|
|
219
|
-
thread = await cl_data.get_thread(thread_id)
|
|
220
|
+
thread = await cl_data._data_layer.get_thread(thread_id)
|
|
220
221
|
if thread:
|
|
221
222
|
metadata = thread.get("metadata", {})
|
|
223
|
+
if isinstance(metadata, str):
|
|
224
|
+
try:
|
|
225
|
+
metadata = json.loads(metadata)
|
|
226
|
+
except json.JSONDecodeError:
|
|
227
|
+
metadata = {}
|
|
228
|
+
|
|
222
229
|
metadata["model_name"] = model_name
|
|
223
230
|
|
|
224
|
-
# Always store metadata as a
|
|
225
|
-
await cl_data.update_thread(thread_id, metadata=
|
|
231
|
+
# Always store metadata as a dictionary
|
|
232
|
+
await cl_data._data_layer.update_thread(thread_id, metadata=metadata)
|
|
226
233
|
|
|
227
234
|
# Update the user session with the new metadata
|
|
228
235
|
cl.user_session.set("metadata", metadata)
|
|
229
236
|
|
|
237
|
+
# Set Tavily API key
|
|
238
|
+
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
|
239
|
+
tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
|
|
240
|
+
|
|
241
|
+
# Function to call Tavily Search API
|
|
242
|
+
def tavily_web_search(query):
|
|
243
|
+
if not tavily_client:
|
|
244
|
+
return json.dumps({
|
|
245
|
+
"query": query,
|
|
246
|
+
"error": "Tavily API key is not set. Web search is unavailable."
|
|
247
|
+
})
|
|
248
|
+
response = tavily_client.search(query)
|
|
249
|
+
print(response) # Print the full response
|
|
250
|
+
return json.dumps({
|
|
251
|
+
"query": query,
|
|
252
|
+
"answer": response.get('answer'),
|
|
253
|
+
"top_result": response['results'][0]['content'] if response['results'] else 'No results found'
|
|
254
|
+
})
|
|
255
|
+
|
|
256
|
+
# Define the tool for function calling
|
|
257
|
+
tools = [{
|
|
258
|
+
"type": "function",
|
|
259
|
+
"function": {
|
|
260
|
+
"name": "tavily_web_search",
|
|
261
|
+
"description": "Search the web using Tavily API",
|
|
262
|
+
"parameters": {
|
|
263
|
+
"type": "object",
|
|
264
|
+
"properties": {
|
|
265
|
+
"query": {"type": "string", "description": "Search query"}
|
|
266
|
+
},
|
|
267
|
+
"required": ["query"]
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
}] if tavily_api_key else []
|
|
271
|
+
|
|
230
272
|
@cl.on_message
|
|
231
273
|
async def main(message: cl.Message):
|
|
232
274
|
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
@@ -234,35 +276,131 @@ async def main(message: cl.Message):
|
|
|
234
276
|
message_history.append({"role": "user", "content": message.content})
|
|
235
277
|
gatherer = ContextGatherer()
|
|
236
278
|
context, token_count, context_tree = gatherer.run()
|
|
279
|
+
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
237
280
|
prompt_history = message_history
|
|
238
281
|
prompt_history.append({"role": "user", "content": """
|
|
239
|
-
Answer the question:\n{question}.\n\n
|
|
282
|
+
Answer the question and use tools if needed:\n{question}.\n\n
|
|
283
|
+
Current Date and Time: {now}
|
|
240
284
|
Below is the Context:\n{context}\n\n"""
|
|
241
|
-
.format(context=context, question=message.content)})
|
|
285
|
+
.format(context=context, question=message.content, now=now)})
|
|
242
286
|
|
|
243
287
|
msg = cl.Message(content="")
|
|
244
288
|
await msg.send()
|
|
245
289
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
290
|
+
# Prepare the completion parameters
|
|
291
|
+
completion_params = {
|
|
292
|
+
"model": model_name,
|
|
293
|
+
"messages": prompt_history,
|
|
294
|
+
"stream": True,
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
# Only add tools and tool_choice if Tavily API key is available
|
|
298
|
+
if tavily_api_key:
|
|
299
|
+
completion_params["tools"] = tools
|
|
300
|
+
completion_params["tool_choice"] = "auto"
|
|
301
|
+
|
|
302
|
+
response = await acompletion(**completion_params)
|
|
303
|
+
print(response)
|
|
254
304
|
|
|
255
305
|
full_response = ""
|
|
306
|
+
tool_calls = []
|
|
307
|
+
current_tool_call = None
|
|
308
|
+
|
|
256
309
|
async for part in response:
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
310
|
+
print(part)
|
|
311
|
+
if 'choices' in part and len(part['choices']) > 0:
|
|
312
|
+
delta = part['choices'][0].get('delta', {})
|
|
313
|
+
|
|
314
|
+
if 'content' in delta and delta['content'] is not None:
|
|
315
|
+
token = delta['content']
|
|
316
|
+
await msg.stream_token(token)
|
|
317
|
+
full_response += token
|
|
318
|
+
|
|
319
|
+
if tavily_api_key and 'tool_calls' in delta and delta['tool_calls'] is not None:
|
|
320
|
+
for tool_call in delta['tool_calls']:
|
|
321
|
+
if current_tool_call is None or tool_call.index != current_tool_call['index']:
|
|
322
|
+
if current_tool_call:
|
|
323
|
+
tool_calls.append(current_tool_call)
|
|
324
|
+
current_tool_call = {
|
|
325
|
+
'id': tool_call.id,
|
|
326
|
+
'type': tool_call.type,
|
|
327
|
+
'index': tool_call.index,
|
|
328
|
+
'function': {
|
|
329
|
+
'name': tool_call.function.name if tool_call.function else None,
|
|
330
|
+
'arguments': ''
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
if tool_call.function:
|
|
334
|
+
if tool_call.function.name:
|
|
335
|
+
current_tool_call['function']['name'] = tool_call.function.name
|
|
336
|
+
if tool_call.function.arguments:
|
|
337
|
+
current_tool_call['function']['arguments'] += tool_call.function.arguments
|
|
338
|
+
|
|
339
|
+
if current_tool_call:
|
|
340
|
+
tool_calls.append(current_tool_call)
|
|
341
|
+
|
|
260
342
|
logger.debug(f"Full response: {full_response}")
|
|
343
|
+
logger.debug(f"Tool calls: {tool_calls}")
|
|
261
344
|
message_history.append({"role": "assistant", "content": full_response})
|
|
262
345
|
logger.debug(f"Message history: {message_history}")
|
|
263
346
|
cl.user_session.set("message_history", message_history)
|
|
264
347
|
await msg.update()
|
|
265
348
|
|
|
349
|
+
if tavily_api_key and tool_calls:
|
|
350
|
+
available_functions = {
|
|
351
|
+
"tavily_web_search": tavily_web_search,
|
|
352
|
+
}
|
|
353
|
+
messages = prompt_history + [{"role": "assistant", "content": None, "function_call": {
|
|
354
|
+
"name": tool_calls[0]['function']['name'],
|
|
355
|
+
"arguments": tool_calls[0]['function']['arguments']
|
|
356
|
+
}}]
|
|
357
|
+
|
|
358
|
+
for tool_call in tool_calls:
|
|
359
|
+
function_name = tool_call['function']['name']
|
|
360
|
+
if function_name in available_functions:
|
|
361
|
+
function_to_call = available_functions[function_name]
|
|
362
|
+
function_args = tool_call['function']['arguments']
|
|
363
|
+
if function_args:
|
|
364
|
+
try:
|
|
365
|
+
function_args = json.loads(function_args)
|
|
366
|
+
function_response = function_to_call(
|
|
367
|
+
query=function_args.get("query"),
|
|
368
|
+
)
|
|
369
|
+
messages.append(
|
|
370
|
+
{
|
|
371
|
+
"role": "function",
|
|
372
|
+
"name": function_name,
|
|
373
|
+
"content": function_response,
|
|
374
|
+
}
|
|
375
|
+
)
|
|
376
|
+
except json.JSONDecodeError:
|
|
377
|
+
logger.error(f"Failed to parse function arguments: {function_args}")
|
|
378
|
+
|
|
379
|
+
second_response = await acompletion(
|
|
380
|
+
model=model_name,
|
|
381
|
+
stream=True,
|
|
382
|
+
messages=messages,
|
|
383
|
+
)
|
|
384
|
+
logger.debug(f"Second LLM response: {second_response}")
|
|
385
|
+
|
|
386
|
+
# Handle the streaming response
|
|
387
|
+
full_response = ""
|
|
388
|
+
async for part in second_response:
|
|
389
|
+
if 'choices' in part and len(part['choices']) > 0:
|
|
390
|
+
delta = part['choices'][0].get('delta', {})
|
|
391
|
+
if 'content' in delta and delta['content'] is not None:
|
|
392
|
+
token = delta['content']
|
|
393
|
+
await msg.stream_token(token)
|
|
394
|
+
full_response += token
|
|
395
|
+
|
|
396
|
+
# Update the message content
|
|
397
|
+
msg.content = full_response
|
|
398
|
+
await msg.update()
|
|
399
|
+
else:
|
|
400
|
+
# If no tool calls or Tavily API key is not set, the full_response is already set
|
|
401
|
+
msg.content = full_response
|
|
402
|
+
await msg.update()
|
|
403
|
+
|
|
266
404
|
username = os.getenv("CHAINLIT_USERNAME", "admin") # Default to "admin" if not found
|
|
267
405
|
password = os.getenv("CHAINLIT_PASSWORD", "admin") # Default to "admin" if not found
|
|
268
406
|
|
|
@@ -318,7 +456,14 @@ async def on_chat_resume(thread: ThreadDict):
|
|
|
318
456
|
message_history.append({"role": "user", "content": message.get("output", "")})
|
|
319
457
|
elif msg_type == "assistant_message":
|
|
320
458
|
message_history.append({"role": "assistant", "content": message.get("output", "")})
|
|
459
|
+
elif msg_type == "run":
|
|
460
|
+
# Handle 'run' type messages
|
|
461
|
+
if message.get("isError"):
|
|
462
|
+
message_history.append({"role": "system", "content": f"Error: {message.get('output', '')}"})
|
|
463
|
+
else:
|
|
464
|
+
# You might want to handle non-error 'run' messages differently
|
|
465
|
+
pass
|
|
321
466
|
else:
|
|
322
|
-
logger.warning(f"Message without type: {message}")
|
|
467
|
+
logger.warning(f"Message without recognized type: {message}")
|
|
323
468
|
|
|
324
469
|
cl.user_session.set("message_history", message_history)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PraisonAI
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.71
|
|
4
4
|
Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
|
|
5
5
|
Author: Mervin Praison
|
|
6
6
|
Requires-Python: >=3.10,<3.13
|
|
@@ -36,6 +36,7 @@ Requires-Dist: praisonai-tools (>=0.0.7)
|
|
|
36
36
|
Requires-Dist: pyautogen (>=0.2.19)
|
|
37
37
|
Requires-Dist: pyparsing (>=3.0.0)
|
|
38
38
|
Requires-Dist: rich (>=13.7)
|
|
39
|
+
Requires-Dist: tavily-python (==0.5.0) ; extra == "chat" or extra == "code"
|
|
39
40
|
Project-URL: Homepage, https://docs.praison.ai
|
|
40
41
|
Project-URL: Repository, https://github.com/mervinpraison/PraisonAI
|
|
41
42
|
Description-Content-Type: text/markdown
|
|
@@ -4,7 +4,7 @@ praisonai/agents_generator.py,sha256=8d1WRbubvEkBrW1HZ7_xnGyqgJi0yxmXa3MgTIqef1c
|
|
|
4
4
|
praisonai/auto.py,sha256=9spTXqj47Hmmqv5QHRYE_RzSVHH_KoPbaZjskUj2UcE,7895
|
|
5
5
|
praisonai/chainlit_ui.py,sha256=bNR7s509lp0I9JlJNvwCZRUZosC64qdvlFCt8NmFamQ,12216
|
|
6
6
|
praisonai/cli.py,sha256=M23MbUUzNS7z9ZHz3cGawUrGWzXM-FzhMyiRWQPZSEk,18485
|
|
7
|
-
praisonai/deploy.py,sha256=
|
|
7
|
+
praisonai/deploy.py,sha256=mqUqKDDG1wYv6KMkjFtFvVztF1PTxkg1tCqxxlQbL1w,6028
|
|
8
8
|
praisonai/inbuilt_tools/__init__.py,sha256=mUKnbL6Gram9c9f2m8wJwEzURBLmPEOcHzwySBH89YA,74
|
|
9
9
|
praisonai/inbuilt_tools/autogen_tools.py,sha256=svYkM2N7DVFvbiwgoAS7U_MqTOD8rHf8VD3BaFUV5_Y,14907
|
|
10
10
|
praisonai/inc/__init__.py,sha256=sPDlYBBwdk0VlWzaaM_lG0_LD07lS2HRGvPdxXJFiYg,62
|
|
@@ -30,8 +30,8 @@ praisonai/setup/setup_conda_env.py,sha256=4QiWrqgEObivzOMwfJgWaCPpUEpB68cQ6lFwVw
|
|
|
30
30
|
praisonai/setup/setup_conda_env.sh,sha256=te7s0KHsTi7XM-vkNvE0dKC1HeU2tXxqE-sPUScV6fY,2718
|
|
31
31
|
praisonai/test.py,sha256=OL-wesjA5JTohr8rtr6kWoaS4ImkJg2l0GXJ-dUUfRU,4090
|
|
32
32
|
praisonai/train.py,sha256=DvORlrwKOD-2v4r_z84eV3LsfzpNs-WnPKb5cQB3_t4,11071
|
|
33
|
-
praisonai/ui/chat.py,sha256=
|
|
34
|
-
praisonai/ui/code.py,sha256=
|
|
33
|
+
praisonai/ui/chat.py,sha256=1isfD8j47rfqNrTfcUQ8rWH3MqX2OsfRSzZRu7uC-h8,15305
|
|
34
|
+
praisonai/ui/code.py,sha256=wtlziGIJBDHrpH_0VZhtejN5K5_Yr0tcKaoajvcv7RQ,16231
|
|
35
35
|
praisonai/ui/context.py,sha256=oWO2I_WBZb7kZnuXItf18EJX0ZQv-1nAd8rxhwhuuDU,11871
|
|
36
36
|
praisonai/ui/public/fantasy.svg,sha256=4Gs3kIOux-pjGtw6ogI_rv5_viVJxnE5gRwGilsSg0o,1553
|
|
37
37
|
praisonai/ui/public/game.svg,sha256=y2QMaA01m8XzuDjTOBWzupOC3-TpnUl9ah89mIhviUw,2406
|
|
@@ -41,8 +41,8 @@ praisonai/ui/public/movie.svg,sha256=aJ2EQ8vXZusVsF2SeuAVxP4RFJzQ14T26ejrGYdBgzk
|
|
|
41
41
|
praisonai/ui/public/thriller.svg,sha256=2dYY72EcgbEyTxS4QzjAm37Y4srtPWEW4vCMFki98ZI,3163
|
|
42
42
|
praisonai/ui/sql_alchemy.py,sha256=kf025P_37C505YDDJZ-dPSmN_d62J2DCrkxbDAzXyrM,29884
|
|
43
43
|
praisonai/version.py,sha256=ugyuFliEqtAwQmH4sTlc16YXKYbFWDmfyk87fErB8-8,21
|
|
44
|
-
praisonai-0.0.
|
|
45
|
-
praisonai-0.0.
|
|
46
|
-
praisonai-0.0.
|
|
47
|
-
praisonai-0.0.
|
|
48
|
-
praisonai-0.0.
|
|
44
|
+
praisonai-0.0.71.dist-info/LICENSE,sha256=kqvFysVlnFxYOu0HxCe2HlmZmJtdmNGOxWRRkT9TsWc,1035
|
|
45
|
+
praisonai-0.0.71.dist-info/METADATA,sha256=gdrxsufd4HKygMtgjdDRx3Olg7ktaENlfVgf9hvN8PQ,11473
|
|
46
|
+
praisonai-0.0.71.dist-info/WHEEL,sha256=HBsDV7Hj4OTiS1GX6ua7iQXUQTB9UHftbBxr7Q8Xm9c,110
|
|
47
|
+
praisonai-0.0.71.dist-info/entry_points.txt,sha256=jB078LEGLY3Ky_indhclomRIVVpXrPSksHjJ-tcBZ-o,133
|
|
48
|
+
praisonai-0.0.71.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|