PraisonAI 0.0.70__tar.gz → 0.0.71__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (47) hide show
  1. {praisonai-0.0.70 → praisonai-0.0.71}/PKG-INFO +2 -1
  2. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/deploy.py +1 -1
  3. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/chat.py +164 -16
  4. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/code.py +150 -18
  5. {praisonai-0.0.70 → praisonai-0.0.71}/pyproject.toml +4 -3
  6. {praisonai-0.0.70 → praisonai-0.0.71}/LICENSE +0 -0
  7. {praisonai-0.0.70 → praisonai-0.0.71}/README.md +0 -0
  8. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/__init__.py +0 -0
  9. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/__main__.py +0 -0
  10. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/agents_generator.py +0 -0
  11. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/auto.py +0 -0
  12. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/chainlit_ui.py +0 -0
  13. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/cli.py +0 -0
  14. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/inbuilt_tools/__init__.py +0 -0
  15. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
  16. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/inc/__init__.py +0 -0
  17. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/inc/config.py +0 -0
  18. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/inc/models.py +0 -0
  19. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/android-chrome-192x192.png +0 -0
  20. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/android-chrome-512x512.png +0 -0
  21. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/apple-touch-icon.png +0 -0
  22. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/fantasy.svg +0 -0
  23. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/favicon-16x16.png +0 -0
  24. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/favicon-32x32.png +0 -0
  25. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/favicon.ico +0 -0
  26. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/game.svg +0 -0
  27. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/logo_dark.png +0 -0
  28. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/logo_light.png +0 -0
  29. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/movie.svg +0 -0
  30. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/public/thriller.svg +0 -0
  31. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/setup/__init__.py +0 -0
  32. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/setup/build.py +0 -0
  33. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/setup/config.yaml +0 -0
  34. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/setup/post_install.py +0 -0
  35. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/setup/setup_conda_env.py +0 -0
  36. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/setup/setup_conda_env.sh +0 -0
  37. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/test.py +0 -0
  38. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/train.py +0 -0
  39. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/context.py +0 -0
  40. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/public/fantasy.svg +0 -0
  41. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/public/game.svg +0 -0
  42. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/public/logo_dark.png +0 -0
  43. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/public/logo_light.png +0 -0
  44. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/public/movie.svg +0 -0
  45. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/public/thriller.svg +0 -0
  46. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/ui/sql_alchemy.py +0 -0
  47. {praisonai-0.0.70 → praisonai-0.0.71}/praisonai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PraisonAI
3
- Version: 0.0.70
3
+ Version: 0.0.71
4
4
  Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10,<3.13
@@ -36,6 +36,7 @@ Requires-Dist: praisonai-tools (>=0.0.7)
36
36
  Requires-Dist: pyautogen (>=0.2.19)
37
37
  Requires-Dist: pyparsing (>=3.0.0)
38
38
  Requires-Dist: rich (>=13.7)
39
+ Requires-Dist: tavily-python (==0.5.0) ; extra == "chat" or extra == "code"
39
40
  Project-URL: Homepage, https://docs.praison.ai
40
41
  Project-URL: Repository, https://github.com/mervinpraison/PraisonAI
41
42
  Description-Content-Type: text/markdown
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==0.0.70 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==0.0.71 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
@@ -1,6 +1,6 @@
1
1
  import chainlit as cl
2
2
  from chainlit.input_widget import TextInput
3
- from chainlit.types import ThreadDict
3
+ from chainlit.types import ThreadDict # Change this import
4
4
  from litellm import acompletion
5
5
  import os
6
6
  import sqlite3
@@ -14,6 +14,7 @@ from literalai.helper import utc_now
14
14
  import logging
15
15
  import json
16
16
  from sql_alchemy import SQLAlchemyDataLayer
17
+ from tavily import TavilyClient
17
18
 
18
19
  # Set up logging
19
20
  logger = logging.getLogger(__name__)
@@ -171,6 +172,41 @@ deleted_thread_ids = [] # type: List[str]
171
172
 
172
173
  cl_data._data_layer = SQLAlchemyDataLayer(conninfo=f"sqlite+aiosqlite:///{DB_PATH}")
173
174
 
175
+ # Set Tavily API key
176
+ tavily_api_key = os.getenv("TAVILY_API_KEY")
177
+ tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
178
+
179
+ # Function to call Tavily Search API
180
+ def tavily_web_search(query):
181
+ if not tavily_client:
182
+ return json.dumps({
183
+ "query": query,
184
+ "error": "Tavily API key is not set. Web search is unavailable."
185
+ })
186
+ response = tavily_client.search(query)
187
+ print(response) # Print the full response
188
+ return json.dumps({
189
+ "query": query,
190
+ "answer": response.get('answer'),
191
+ "top_result": response['results'][0]['content'] if response['results'] else 'No results found'
192
+ })
193
+
194
+ # Define the tool for function calling
195
+ tools = [{
196
+ "type": "function",
197
+ "function": {
198
+ "name": "tavily_web_search",
199
+ "description": "Search the web using Tavily API",
200
+ "parameters": {
201
+ "type": "object",
202
+ "properties": {
203
+ "query": {"type": "string", "description": "Search query"}
204
+ },
205
+ "required": ["query"]
206
+ }
207
+ }
208
+ }] if tavily_api_key else []
209
+
174
210
  @cl.on_chat_start
175
211
  async def start():
176
212
  initialize_db()
@@ -224,31 +260,130 @@ async def setup_agent(settings):
224
260
  async def main(message: cl.Message):
225
261
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
226
262
  message_history = cl.user_session.get("message_history", [])
227
- message_history.append({"role": "user", "content": message.content})
263
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
264
+
265
+ # Add the current date and time to the user's message
266
+ user_message = f"""
267
+ Answer the question and use tools if needed:\n{message.content}.\n\n
268
+ Current Date and Time: {now}
269
+ """
270
+ message_history.append({"role": "user", "content": user_message})
228
271
 
229
272
  msg = cl.Message(content="")
230
273
  await msg.send()
231
274
 
232
- response = await acompletion(
233
- model=model_name,
234
- messages=message_history,
235
- stream=True,
236
- # temperature=0.7,
237
- # max_tokens=500,
238
- # top_p=1
239
- )
275
+ # Prepare the completion parameters
276
+ completion_params = {
277
+ "model": model_name,
278
+ "messages": message_history,
279
+ "stream": True,
280
+ }
281
+
282
+ # Only add tools and tool_choice if Tavily API key is available
283
+ if tavily_api_key:
284
+ completion_params["tools"] = tools
285
+ completion_params["tool_choice"] = "auto"
286
+
287
+ response = await acompletion(**completion_params)
240
288
 
241
289
  full_response = ""
290
+ tool_calls = []
291
+ current_tool_call = None
292
+
242
293
  async for part in response:
243
- if token := part['choices'][0]['delta']['content']:
244
- await msg.stream_token(token)
245
- full_response += token
294
+ if 'choices' in part and len(part['choices']) > 0:
295
+ delta = part['choices'][0].get('delta', {})
296
+
297
+ if 'content' in delta and delta['content'] is not None:
298
+ token = delta['content']
299
+ await msg.stream_token(token)
300
+ full_response += token
301
+
302
+ if tavily_api_key and 'tool_calls' in delta and delta['tool_calls'] is not None:
303
+ for tool_call in delta['tool_calls']:
304
+ if current_tool_call is None or tool_call.index != current_tool_call['index']:
305
+ if current_tool_call:
306
+ tool_calls.append(current_tool_call)
307
+ current_tool_call = {
308
+ 'id': tool_call.id,
309
+ 'type': tool_call.type,
310
+ 'index': tool_call.index,
311
+ 'function': {
312
+ 'name': tool_call.function.name if tool_call.function else None,
313
+ 'arguments': ''
314
+ }
315
+ }
316
+ if tool_call.function:
317
+ if tool_call.function.name:
318
+ current_tool_call['function']['name'] = tool_call.function.name
319
+ if tool_call.function.arguments:
320
+ current_tool_call['function']['arguments'] += tool_call.function.arguments
321
+
322
+ if current_tool_call:
323
+ tool_calls.append(current_tool_call)
324
+
246
325
  logger.debug(f"Full response: {full_response}")
326
+ logger.debug(f"Tool calls: {tool_calls}")
247
327
  message_history.append({"role": "assistant", "content": full_response})
248
328
  logger.debug(f"Message history: {message_history}")
249
329
  cl.user_session.set("message_history", message_history)
250
330
  await msg.update()
251
331
 
332
+ if tavily_api_key and tool_calls:
333
+ available_functions = {
334
+ "tavily_web_search": tavily_web_search,
335
+ }
336
+ messages = message_history + [{"role": "assistant", "content": None, "function_call": {
337
+ "name": tool_calls[0]['function']['name'],
338
+ "arguments": tool_calls[0]['function']['arguments']
339
+ }}]
340
+
341
+ for tool_call in tool_calls:
342
+ function_name = tool_call['function']['name']
343
+ if function_name in available_functions:
344
+ function_to_call = available_functions[function_name]
345
+ function_args = tool_call['function']['arguments']
346
+ if function_args:
347
+ try:
348
+ function_args = json.loads(function_args)
349
+ function_response = function_to_call(
350
+ query=function_args.get("query"),
351
+ )
352
+ messages.append(
353
+ {
354
+ "role": "function",
355
+ "name": function_name,
356
+ "content": function_response,
357
+ }
358
+ )
359
+ except json.JSONDecodeError:
360
+ logger.error(f"Failed to parse function arguments: {function_args}")
361
+
362
+ second_response = await acompletion(
363
+ model=model_name,
364
+ stream=True,
365
+ messages=messages,
366
+ )
367
+ logger.debug(f"Second LLM response: {second_response}")
368
+
369
+ # Handle the streaming response
370
+ full_response = ""
371
+ async for part in second_response:
372
+ if 'choices' in part and len(part['choices']) > 0:
373
+ delta = part['choices'][0].get('delta', {})
374
+ if 'content' in delta and delta['content'] is not None:
375
+ token = delta['content']
376
+ await msg.stream_token(token)
377
+ full_response += token
378
+
379
+ # Update the message content
380
+ msg.content = full_response
381
+ await msg.update()
382
+ else:
383
+ # If no tool calls or Tavily API key is not set, the full_response is already set
384
+ msg.content = full_response
385
+ await msg.update()
386
+
252
387
  username = os.getenv("CHAINLIT_USERNAME", "admin") # Default to "admin" if not found
253
388
  password = os.getenv("CHAINLIT_PASSWORD", "admin") # Default to "admin" if not found
254
389
 
@@ -267,7 +402,7 @@ async def send_count():
267
402
  ).send()
268
403
 
269
404
  @cl.on_chat_resume
270
- async def on_chat_resume(thread: cl_data.ThreadDict):
405
+ async def on_chat_resume(thread: ThreadDict): # Change the type hint here
271
406
  logger.info(f"Resuming chat: {thread['id']}")
272
407
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
273
408
  logger.debug(f"Model name: {model_name}")
@@ -285,8 +420,14 @@ async def on_chat_resume(thread: cl_data.ThreadDict):
285
420
  thread_id = thread["id"]
286
421
  cl.user_session.set("thread_id", thread["id"])
287
422
 
288
- # The metadata should now already be a dictionary
423
+ # Ensure metadata is a dictionary
289
424
  metadata = thread.get("metadata", {})
425
+ if isinstance(metadata, str):
426
+ try:
427
+ metadata = json.loads(metadata)
428
+ except json.JSONDecodeError:
429
+ metadata = {}
430
+
290
431
  cl.user_session.set("metadata", metadata)
291
432
 
292
433
  message_history = cl.user_session.get("message_history", [])
@@ -298,7 +439,14 @@ async def on_chat_resume(thread: cl_data.ThreadDict):
298
439
  message_history.append({"role": "user", "content": message.get("output", "")})
299
440
  elif msg_type == "assistant_message":
300
441
  message_history.append({"role": "assistant", "content": message.get("output", "")})
442
+ elif msg_type == "run":
443
+ # Handle 'run' type messages
444
+ if message.get("isError"):
445
+ message_history.append({"role": "system", "content": f"Error: {message.get('output', '')}"})
446
+ else:
447
+ # You might want to handle non-error 'run' messages differently
448
+ pass
301
449
  else:
302
- logger.warning(f"Message without type: {message}")
450
+ logger.warning(f"Message without recognized type: {message}")
303
451
 
304
452
  cl.user_session.set("message_history", message_history)
@@ -1,7 +1,7 @@
1
1
  import chainlit as cl
2
2
  from chainlit.input_widget import TextInput
3
3
  from chainlit.types import ThreadDict
4
- from litellm import acompletion
4
+ from litellm import acompletion, completion
5
5
  import os
6
6
  import sqlite3
7
7
  from datetime import datetime
@@ -15,7 +15,8 @@ import logging
15
15
  import json
16
16
  from sql_alchemy import SQLAlchemyDataLayer
17
17
  from context import ContextGatherer
18
-
18
+ from tavily import TavilyClient
19
+ from datetime import datetime
19
20
  # Set up logging
20
21
  logger = logging.getLogger(__name__)
21
22
  log_level = os.getenv("LOGLEVEL", "INFO").upper()
@@ -177,7 +178,7 @@ async def start():
177
178
  initialize_db()
178
179
  model_name = load_setting("model_name")
179
180
 
180
- if model_name:
181
+ if (model_name):
181
182
  cl.user_session.set("model_name", model_name)
182
183
  else:
183
184
  # If no setting found, use default or environment variable
@@ -227,12 +228,47 @@ async def setup_agent(settings):
227
228
 
228
229
  metadata["model_name"] = model_name
229
230
 
230
- # Always store metadata as a JSON string
231
- await cl_data._data_layer.update_thread(thread_id, metadata=json.dumps(metadata))
231
+ # Always store metadata as a dictionary
232
+ await cl_data._data_layer.update_thread(thread_id, metadata=metadata)
232
233
 
233
234
  # Update the user session with the new metadata
234
235
  cl.user_session.set("metadata", metadata)
235
236
 
237
+ # Set Tavily API key
238
+ tavily_api_key = os.getenv("TAVILY_API_KEY")
239
+ tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
240
+
241
+ # Function to call Tavily Search API
242
+ def tavily_web_search(query):
243
+ if not tavily_client:
244
+ return json.dumps({
245
+ "query": query,
246
+ "error": "Tavily API key is not set. Web search is unavailable."
247
+ })
248
+ response = tavily_client.search(query)
249
+ print(response) # Print the full response
250
+ return json.dumps({
251
+ "query": query,
252
+ "answer": response.get('answer'),
253
+ "top_result": response['results'][0]['content'] if response['results'] else 'No results found'
254
+ })
255
+
256
+ # Define the tool for function calling
257
+ tools = [{
258
+ "type": "function",
259
+ "function": {
260
+ "name": "tavily_web_search",
261
+ "description": "Search the web using Tavily API",
262
+ "parameters": {
263
+ "type": "object",
264
+ "properties": {
265
+ "query": {"type": "string", "description": "Search query"}
266
+ },
267
+ "required": ["query"]
268
+ }
269
+ }
270
+ }] if tavily_api_key else []
271
+
236
272
  @cl.on_message
237
273
  async def main(message: cl.Message):
238
274
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
@@ -240,35 +276,131 @@ async def main(message: cl.Message):
240
276
  message_history.append({"role": "user", "content": message.content})
241
277
  gatherer = ContextGatherer()
242
278
  context, token_count, context_tree = gatherer.run()
279
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
243
280
  prompt_history = message_history
244
281
  prompt_history.append({"role": "user", "content": """
245
- Answer the question:\n{question}.\n\n
282
+ Answer the question and use tools if needed:\n{question}.\n\n
283
+ Current Date and Time: {now}
246
284
  Below is the Context:\n{context}\n\n"""
247
- .format(context=context, question=message.content)})
285
+ .format(context=context, question=message.content, now=now)})
248
286
 
249
287
  msg = cl.Message(content="")
250
288
  await msg.send()
251
289
 
252
- response = await acompletion(
253
- model=model_name,
254
- messages=prompt_history,
255
- stream=True,
256
- # temperature=0.7,
257
- # max_tokens=500,
258
- # top_p=1
259
- )
290
+ # Prepare the completion parameters
291
+ completion_params = {
292
+ "model": model_name,
293
+ "messages": prompt_history,
294
+ "stream": True,
295
+ }
296
+
297
+ # Only add tools and tool_choice if Tavily API key is available
298
+ if tavily_api_key:
299
+ completion_params["tools"] = tools
300
+ completion_params["tool_choice"] = "auto"
301
+
302
+ response = await acompletion(**completion_params)
303
+ print(response)
260
304
 
261
305
  full_response = ""
306
+ tool_calls = []
307
+ current_tool_call = None
308
+
262
309
  async for part in response:
263
- if token := part['choices'][0]['delta']['content']:
264
- await msg.stream_token(token)
265
- full_response += token
310
+ print(part)
311
+ if 'choices' in part and len(part['choices']) > 0:
312
+ delta = part['choices'][0].get('delta', {})
313
+
314
+ if 'content' in delta and delta['content'] is not None:
315
+ token = delta['content']
316
+ await msg.stream_token(token)
317
+ full_response += token
318
+
319
+ if tavily_api_key and 'tool_calls' in delta and delta['tool_calls'] is not None:
320
+ for tool_call in delta['tool_calls']:
321
+ if current_tool_call is None or tool_call.index != current_tool_call['index']:
322
+ if current_tool_call:
323
+ tool_calls.append(current_tool_call)
324
+ current_tool_call = {
325
+ 'id': tool_call.id,
326
+ 'type': tool_call.type,
327
+ 'index': tool_call.index,
328
+ 'function': {
329
+ 'name': tool_call.function.name if tool_call.function else None,
330
+ 'arguments': ''
331
+ }
332
+ }
333
+ if tool_call.function:
334
+ if tool_call.function.name:
335
+ current_tool_call['function']['name'] = tool_call.function.name
336
+ if tool_call.function.arguments:
337
+ current_tool_call['function']['arguments'] += tool_call.function.arguments
338
+
339
+ if current_tool_call:
340
+ tool_calls.append(current_tool_call)
341
+
266
342
  logger.debug(f"Full response: {full_response}")
343
+ logger.debug(f"Tool calls: {tool_calls}")
267
344
  message_history.append({"role": "assistant", "content": full_response})
268
345
  logger.debug(f"Message history: {message_history}")
269
346
  cl.user_session.set("message_history", message_history)
270
347
  await msg.update()
271
348
 
349
+ if tavily_api_key and tool_calls:
350
+ available_functions = {
351
+ "tavily_web_search": tavily_web_search,
352
+ }
353
+ messages = prompt_history + [{"role": "assistant", "content": None, "function_call": {
354
+ "name": tool_calls[0]['function']['name'],
355
+ "arguments": tool_calls[0]['function']['arguments']
356
+ }}]
357
+
358
+ for tool_call in tool_calls:
359
+ function_name = tool_call['function']['name']
360
+ if function_name in available_functions:
361
+ function_to_call = available_functions[function_name]
362
+ function_args = tool_call['function']['arguments']
363
+ if function_args:
364
+ try:
365
+ function_args = json.loads(function_args)
366
+ function_response = function_to_call(
367
+ query=function_args.get("query"),
368
+ )
369
+ messages.append(
370
+ {
371
+ "role": "function",
372
+ "name": function_name,
373
+ "content": function_response,
374
+ }
375
+ )
376
+ except json.JSONDecodeError:
377
+ logger.error(f"Failed to parse function arguments: {function_args}")
378
+
379
+ second_response = await acompletion(
380
+ model=model_name,
381
+ stream=True,
382
+ messages=messages,
383
+ )
384
+ logger.debug(f"Second LLM response: {second_response}")
385
+
386
+ # Handle the streaming response
387
+ full_response = ""
388
+ async for part in second_response:
389
+ if 'choices' in part and len(part['choices']) > 0:
390
+ delta = part['choices'][0].get('delta', {})
391
+ if 'content' in delta and delta['content'] is not None:
392
+ token = delta['content']
393
+ await msg.stream_token(token)
394
+ full_response += token
395
+
396
+ # Update the message content
397
+ msg.content = full_response
398
+ await msg.update()
399
+ else:
400
+ # If no tool calls or Tavily API key is not set, the full_response is already set
401
+ msg.content = full_response
402
+ await msg.update()
403
+
272
404
  username = os.getenv("CHAINLIT_USERNAME", "admin") # Default to "admin" if not found
273
405
  password = os.getenv("CHAINLIT_PASSWORD", "admin") # Default to "admin" if not found
274
406
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "PraisonAI"
3
- version = "0.0.70"
3
+ version = "0.0.71"
4
4
  description = "PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration."
5
5
  authors = ["Mervin Praison"]
6
6
  license = ""
@@ -32,6 +32,7 @@ langchain-cohere = {version = ">=0.1.4", optional = true}
32
32
  litellm = {version = ">=1.41.8", optional = true}
33
33
  aiosqlite= {version = ">=0.20.0", optional = true}
34
34
  greenlet = {version = ">=3.0.3", optional = true}
35
+ tavily-python = {version = "==0.5.0", optional=true}
35
36
 
36
37
  [tool.poetry.group.docs.dependencies]
37
38
  mkdocs = "*"
@@ -101,8 +102,8 @@ google = ["langchain-google-genai"]
101
102
  openai = ["langchain-openai"]
102
103
  anthropic = ["langchain-anthropic"]
103
104
  cohere = ["langchain-cohere"]
104
- chat = ["chainlit", "litellm", "aiosqlite", "greenlet"]
105
- code = ["chainlit", "litellm", "aiosqlite", "greenlet"]
105
+ chat = ["chainlit", "litellm", "aiosqlite", "greenlet", "tavily-python"]
106
+ code = ["chainlit", "litellm", "aiosqlite", "greenlet", "tavily-python"]
106
107
  train = ["setup-conda-env"]
107
108
 
108
109
  [tool.poetry-dynamic-versioning]
File without changes
File without changes
File without changes
File without changes
File without changes