PraisonAI 0.0.71__tar.gz → 0.0.73__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (47) hide show
  1. {praisonai-0.0.71 → praisonai-0.0.73}/PKG-INFO +3 -2
  2. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/deploy.py +1 -1
  3. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/chat.py +86 -10
  4. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/code.py +94 -17
  5. {praisonai-0.0.71 → praisonai-0.0.73}/pyproject.toml +6 -5
  6. {praisonai-0.0.71 → praisonai-0.0.73}/LICENSE +0 -0
  7. {praisonai-0.0.71 → praisonai-0.0.73}/README.md +0 -0
  8. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/__init__.py +0 -0
  9. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/__main__.py +0 -0
  10. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/agents_generator.py +0 -0
  11. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/auto.py +0 -0
  12. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/chainlit_ui.py +0 -0
  13. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/cli.py +0 -0
  14. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/inbuilt_tools/__init__.py +0 -0
  15. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
  16. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/inc/__init__.py +0 -0
  17. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/inc/config.py +0 -0
  18. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/inc/models.py +0 -0
  19. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/android-chrome-192x192.png +0 -0
  20. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/android-chrome-512x512.png +0 -0
  21. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/apple-touch-icon.png +0 -0
  22. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/fantasy.svg +0 -0
  23. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/favicon-16x16.png +0 -0
  24. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/favicon-32x32.png +0 -0
  25. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/favicon.ico +0 -0
  26. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/game.svg +0 -0
  27. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/logo_dark.png +0 -0
  28. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/logo_light.png +0 -0
  29. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/movie.svg +0 -0
  30. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/public/thriller.svg +0 -0
  31. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/setup/__init__.py +0 -0
  32. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/setup/build.py +0 -0
  33. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/setup/config.yaml +0 -0
  34. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/setup/post_install.py +0 -0
  35. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/setup/setup_conda_env.py +0 -0
  36. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/setup/setup_conda_env.sh +0 -0
  37. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/test.py +0 -0
  38. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/train.py +0 -0
  39. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/context.py +0 -0
  40. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/public/fantasy.svg +0 -0
  41. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/public/game.svg +0 -0
  42. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/public/logo_dark.png +0 -0
  43. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/public/logo_light.png +0 -0
  44. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/public/movie.svg +0 -0
  45. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/public/thriller.svg +0 -0
  46. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/ui/sql_alchemy.py +0 -0
  47. {praisonai-0.0.71 → praisonai-0.0.73}/praisonai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PraisonAI
3
- Version: 0.0.71
3
+ Version: 0.0.73
4
4
  Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10,<3.13
@@ -19,9 +19,10 @@ Provides-Extra: gradio
19
19
  Provides-Extra: openai
20
20
  Provides-Extra: train
21
21
  Provides-Extra: ui
22
- Requires-Dist: agentops (>=0.2.6) ; extra == "agentops"
22
+ Requires-Dist: agentops (>=0.3.12) ; extra == "agentops"
23
23
  Requires-Dist: aiosqlite (>=0.20.0) ; extra == "chat" or extra == "code"
24
24
  Requires-Dist: chainlit (==1.2.0) ; extra == "ui" or extra == "chat" or extra == "code"
25
+ Requires-Dist: crawl4ai (==0.3.4) ; extra == "chat" or extra == "code"
25
26
  Requires-Dist: crewai (>=0.32.0)
26
27
  Requires-Dist: flask (>=3.0.0) ; extra == "api"
27
28
  Requires-Dist: gradio (>=4.26.0) ; extra == "gradio"
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==0.0.71 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==0.0.73 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
@@ -15,6 +15,11 @@ import logging
15
15
  import json
16
16
  from sql_alchemy import SQLAlchemyDataLayer
17
17
  from tavily import TavilyClient
18
+ from crawl4ai import WebCrawler
19
+ import asyncio
20
+ from PIL import Image
21
+ import io
22
+ import base64
18
23
 
19
24
  # Set up logging
20
25
  logger = logging.getLogger(__name__)
@@ -176,27 +181,55 @@ cl_data._data_layer = SQLAlchemyDataLayer(conninfo=f"sqlite+aiosqlite:///{DB_PAT
176
181
  tavily_api_key = os.getenv("TAVILY_API_KEY")
177
182
  tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
178
183
 
179
- # Function to call Tavily Search API
184
+ # Modify the tavily_web_search function to be synchronous
180
185
  def tavily_web_search(query):
181
186
  if not tavily_client:
182
187
  return json.dumps({
183
188
  "query": query,
184
189
  "error": "Tavily API key is not set. Web search is unavailable."
185
190
  })
191
+
186
192
  response = tavily_client.search(query)
187
- print(response) # Print the full response
193
+ logger.debug(f"Tavily search response: {response}")
194
+
195
+ # Create an instance of WebCrawler
196
+ crawler = WebCrawler()
197
+
198
+ # Warm up the crawler (load necessary models)
199
+ crawler.warmup()
200
+
201
+ # Prepare the results
202
+ results = []
203
+ for result in response.get('results', []):
204
+ url = result.get('url')
205
+ if url:
206
+ try:
207
+ # Run the crawler on each URL
208
+ crawl_result = crawler.run(url=url)
209
+ results.append({
210
+ "content": result.get('content'),
211
+ "url": url,
212
+ "full_content": crawl_result.markdown
213
+ })
214
+ except Exception as e:
215
+ logger.error(f"Error crawling {url}: {str(e)}")
216
+ results.append({
217
+ "content": result.get('content'),
218
+ "url": url,
219
+ "full_content": "Error: Unable to crawl this URL"
220
+ })
221
+
188
222
  return json.dumps({
189
223
  "query": query,
190
- "answer": response.get('answer'),
191
- "top_result": response['results'][0]['content'] if response['results'] else 'No results found'
224
+ "results": results
192
225
  })
193
226
 
194
- # Define the tool for function calling
227
+ # Update the tools definition
195
228
  tools = [{
196
229
  "type": "function",
197
230
  "function": {
198
231
  "name": "tavily_web_search",
199
- "description": "Search the web using Tavily API",
232
+ "description": "Search the web using Tavily API and crawl the resulting URLs",
200
233
  "parameters": {
201
234
  "type": "object",
202
235
  "properties": {
@@ -262,11 +295,32 @@ async def main(message: cl.Message):
262
295
  message_history = cl.user_session.get("message_history", [])
263
296
  now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
264
297
 
265
- # Add the current date and time to the user's message
298
+ # Check if an image was uploaded with this message
299
+ image = None
300
+ if message.elements and isinstance(message.elements[0], cl.Image):
301
+ image_element = message.elements[0]
302
+ try:
303
+ # Open the image and keep it in memory
304
+ image = Image.open(image_element.path)
305
+ image.load() # This ensures the file is fully loaded into memory
306
+ cl.user_session.set("image", image)
307
+ except Exception as e:
308
+ logger.error(f"Error processing image: {str(e)}")
309
+ await cl.Message(content="There was an error processing the uploaded image. Please try again.").send()
310
+ return
311
+
312
+ # Prepare user message
266
313
  user_message = f"""
267
- Answer the question and use tools if needed:\n{message.content}.\n\n
314
+ Answer the question and use tools if needed:\n
315
+
268
316
  Current Date and Time: {now}
317
+
318
+ User Question: {message.content}
269
319
  """
320
+
321
+ if image:
322
+ user_message = f"Image uploaded. {user_message}"
323
+
270
324
  message_history.append({"role": "user", "content": user_message})
271
325
 
272
326
  msg = cl.Message(content="")
@@ -279,6 +333,19 @@ Current Date and Time: {now}
279
333
  "stream": True,
280
334
  }
281
335
 
336
+ # If an image is uploaded, include it in the message
337
+ if image:
338
+ buffered = io.BytesIO()
339
+ image.save(buffered, format="PNG")
340
+ img_str = base64.b64encode(buffered.getvalue()).decode()
341
+
342
+ completion_params["messages"][-1] = {
343
+ "role": "user",
344
+ "content": [
345
+ {"type": "text", "text": user_message},
346
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
347
+ ]
348
+ }
282
349
  # Only add tools and tool_choice if Tavily API key is available
283
350
  if tavily_api_key:
284
351
  completion_params["tools"] = tools
@@ -329,6 +396,7 @@ Current Date and Time: {now}
329
396
  cl.user_session.set("message_history", message_history)
330
397
  await msg.update()
331
398
 
399
+ # Handle tool calls if any
332
400
  if tavily_api_key and tool_calls:
333
401
  available_functions = {
334
402
  "tavily_web_search": tavily_web_search,
@@ -346,6 +414,7 @@ Current Date and Time: {now}
346
414
  if function_args:
347
415
  try:
348
416
  function_args = json.loads(function_args)
417
+ # Call the function synchronously
349
418
  function_response = function_to_call(
350
419
  query=function_args.get("query"),
351
420
  )
@@ -380,7 +449,7 @@ Current Date and Time: {now}
380
449
  msg.content = full_response
381
450
  await msg.update()
382
451
  else:
383
- # If no tool calls or Tavily API key is not set, the full_response is already set
452
+ # If no tool calls, the full_response is already set
384
453
  msg.content = full_response
385
454
  await msg.update()
386
455
 
@@ -402,7 +471,7 @@ async def send_count():
402
471
  ).send()
403
472
 
404
473
  @cl.on_chat_resume
405
- async def on_chat_resume(thread: ThreadDict): # Change the type hint here
474
+ async def on_chat_resume(thread: ThreadDict):
406
475
  logger.info(f"Resuming chat: {thread['id']}")
407
476
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
408
477
  logger.debug(f"Model name: {model_name}")
@@ -450,3 +519,10 @@ async def on_chat_resume(thread: ThreadDict): # Change the type hint here
450
519
  logger.warning(f"Message without recognized type: {message}")
451
520
 
452
521
  cl.user_session.set("message_history", message_history)
522
+
523
+ # Check if there's an image in the thread metadata
524
+ image_data = metadata.get("image")
525
+ if image_data:
526
+ image = Image.open(io.BytesIO(base64.b64decode(image_data)))
527
+ cl.user_session.set("image", image)
528
+ await cl.Message(content="Previous image loaded. You can continue asking questions about it or upload a new image.").send()
@@ -17,6 +17,11 @@ from sql_alchemy import SQLAlchemyDataLayer
17
17
  from context import ContextGatherer
18
18
  from tavily import TavilyClient
19
19
  from datetime import datetime
20
+ from crawl4ai import WebCrawler
21
+ from PIL import Image
22
+ import io
23
+ import base64
24
+
20
25
  # Set up logging
21
26
  logger = logging.getLogger(__name__)
22
27
  log_level = os.getenv("LOGLEVEL", "INFO").upper()
@@ -238,19 +243,47 @@ async def setup_agent(settings):
238
243
  tavily_api_key = os.getenv("TAVILY_API_KEY")
239
244
  tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
240
245
 
241
- # Function to call Tavily Search API
246
+ # Function to call Tavily Search API and crawl the results
242
247
  def tavily_web_search(query):
243
248
  if not tavily_client:
244
249
  return json.dumps({
245
250
  "query": query,
246
251
  "error": "Tavily API key is not set. Web search is unavailable."
247
252
  })
253
+
248
254
  response = tavily_client.search(query)
249
- print(response) # Print the full response
255
+ logger.debug(f"Tavily search response: {response}")
256
+
257
+ # Create an instance of WebCrawler
258
+ crawler = WebCrawler()
259
+
260
+ # Warm up the crawler (load necessary models)
261
+ crawler.warmup()
262
+
263
+ # Prepare the results
264
+ results = []
265
+ for result in response.get('results', []):
266
+ url = result.get('url')
267
+ if url:
268
+ try:
269
+ # Run the crawler on each URL
270
+ crawl_result = crawler.run(url=url)
271
+ results.append({
272
+ "content": result.get('content'),
273
+ "url": url,
274
+ "full_content": crawl_result.markdown
275
+ })
276
+ except Exception as e:
277
+ logger.error(f"Error crawling {url}: {str(e)}")
278
+ results.append({
279
+ "content": result.get('content'),
280
+ "url": url,
281
+ "full_content": "Error: Unable to crawl this URL"
282
+ })
283
+
250
284
  return json.dumps({
251
285
  "query": query,
252
- "answer": response.get('answer'),
253
- "top_result": response['results'][0]['content'] if response['results'] else 'No results found'
286
+ "results": results
254
287
  })
255
288
 
256
289
  # Define the tool for function calling
@@ -258,7 +291,7 @@ tools = [{
258
291
  "type": "function",
259
292
  "function": {
260
293
  "name": "tavily_web_search",
261
- "description": "Search the web using Tavily API",
294
+ "description": "Search the web using Tavily API and crawl the resulting URLs",
262
295
  "parameters": {
263
296
  "type": "object",
264
297
  "properties": {
@@ -273,16 +306,37 @@ tools = [{
273
306
  async def main(message: cl.Message):
274
307
  model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
275
308
  message_history = cl.user_session.get("message_history", [])
276
- message_history.append({"role": "user", "content": message.content})
277
309
  gatherer = ContextGatherer()
278
310
  context, token_count, context_tree = gatherer.run()
279
311
  now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
280
- prompt_history = message_history
281
- prompt_history.append({"role": "user", "content": """
282
- Answer the question and use tools if needed:\n{question}.\n\n
283
- Current Date and Time: {now}
284
- Below is the Context:\n{context}\n\n"""
285
- .format(context=context, question=message.content, now=now)})
312
+
313
+ # Check if an image was uploaded with this message
314
+ image = None
315
+ if message.elements and isinstance(message.elements[0], cl.Image):
316
+ image_element = message.elements[0]
317
+ try:
318
+ # Open the image and keep it in memory
319
+ image = Image.open(image_element.path)
320
+ image.load() # This ensures the file is fully loaded into memory
321
+ cl.user_session.set("image", image)
322
+ except Exception as e:
323
+ logger.error(f"Error processing image: {str(e)}")
324
+ await cl.Message(content="There was an error processing the uploaded image. Please try again.").send()
325
+ return
326
+
327
+ # Prepare user message
328
+ user_message = f"""
329
+ Answer the question and use tools if needed:\n{message.content}.\n\n
330
+ Current Date and Time: {now}
331
+
332
+ Context:
333
+ {context}
334
+ """
335
+
336
+ if image:
337
+ user_message = f"Image uploaded. {user_message}"
338
+
339
+ message_history.append({"role": "user", "content": user_message})
286
340
 
287
341
  msg = cl.Message(content="")
288
342
  await msg.send()
@@ -290,24 +344,40 @@ async def main(message: cl.Message):
290
344
  # Prepare the completion parameters
291
345
  completion_params = {
292
346
  "model": model_name,
293
- "messages": prompt_history,
347
+ "messages": message_history,
294
348
  "stream": True,
295
349
  }
296
350
 
297
- # Only add tools and tool_choice if Tavily API key is available
351
+ # If an image is uploaded, include it in the message
352
+ if image:
353
+ buffered = io.BytesIO()
354
+ image.save(buffered, format="PNG")
355
+ img_str = base64.b64encode(buffered.getvalue()).decode()
356
+
357
+ completion_params["messages"][-1] = {
358
+ "role": "user",
359
+ "content": [
360
+ {"type": "text", "text": user_message},
361
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
362
+ ]
363
+ }
364
+ # Use a vision-capable model when an image is present
365
+ completion_params["model"] = "gpt-4-vision-preview" # Adjust this to your actual vision-capable model
366
+
367
+ # Only add tools and tool_choice if Tavily API key is available and no image is uploaded
298
368
  if tavily_api_key:
299
369
  completion_params["tools"] = tools
300
370
  completion_params["tool_choice"] = "auto"
301
371
 
302
372
  response = await acompletion(**completion_params)
303
- print(response)
373
+ logger.debug(f"LLM response: {response}")
304
374
 
305
375
  full_response = ""
306
376
  tool_calls = []
307
377
  current_tool_call = None
308
378
 
309
379
  async for part in response:
310
- print(part)
380
+ logger.debug(f"LLM part: {part}")
311
381
  if 'choices' in part and len(part['choices']) > 0:
312
382
  delta = part['choices'][0].get('delta', {})
313
383
 
@@ -350,7 +420,7 @@ async def main(message: cl.Message):
350
420
  available_functions = {
351
421
  "tavily_web_search": tavily_web_search,
352
422
  }
353
- messages = prompt_history + [{"role": "assistant", "content": None, "function_call": {
423
+ messages = message_history + [{"role": "assistant", "content": None, "function_call": {
354
424
  "name": tool_calls[0]['function']['name'],
355
425
  "arguments": tool_calls[0]['function']['arguments']
356
426
  }}]
@@ -467,3 +537,10 @@ async def on_chat_resume(thread: ThreadDict):
467
537
  logger.warning(f"Message without recognized type: {message}")
468
538
 
469
539
  cl.user_session.set("message_history", message_history)
540
+
541
+ # Check if there's an image in the thread metadata
542
+ image_data = metadata.get("image")
543
+ if image_data:
544
+ image = Image.open(io.BytesIO(base64.b64decode(image_data)))
545
+ cl.user_session.set("image", image)
546
+ await cl.Message(content="Previous image loaded. You can continue asking questions about it, upload a new image, or just chat.").send()
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "PraisonAI"
3
- version = "0.0.71"
3
+ version = "0.0.73"
4
4
  description = "PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration."
5
5
  authors = ["Mervin Praison"]
6
6
  license = ""
@@ -24,7 +24,7 @@ pyparsing = ">=3.0.0"
24
24
  chainlit = {version = "==1.2.0", optional = true}
25
25
  gradio = {version = ">=4.26.0", optional = true}
26
26
  flask = {version = ">=3.0.0", optional = true}
27
- agentops = {version = ">=0.2.6", optional = true}
27
+ agentops = {version = ">=0.3.12", optional = true}
28
28
  langchain-google-genai = {version = ">=1.0.4", optional = true}
29
29
  langchain-anthropic = {version = ">=0.1.13", optional = true}
30
30
  langchain-openai = {version = ">=0.1.7", optional = true}
@@ -33,6 +33,7 @@ litellm = {version = ">=1.41.8", optional = true}
33
33
  aiosqlite= {version = ">=0.20.0", optional = true}
34
34
  greenlet = {version = ">=3.0.3", optional = true}
35
35
  tavily-python = {version = "==0.5.0", optional=true}
36
+ crawl4ai = {version = "==0.3.4", optional = true}
36
37
 
37
38
  [tool.poetry.group.docs.dependencies]
38
39
  mkdocs = "*"
@@ -74,7 +75,7 @@ unittest2 = "*"
74
75
  chainlit = "==1.2.0"
75
76
  gradio = ">=4.26.0"
76
77
  flask = ">=3.0.0"
77
- agentops = ">=0.2.6"
78
+ agentops = ">=0.3.12"
78
79
  langchain-google-genai = ">=1.0.4"
79
80
  langchain-anthropic = ">=0.1.13"
80
81
  langchain-openai = ">=0.1.7"
@@ -102,8 +103,8 @@ google = ["langchain-google-genai"]
102
103
  openai = ["langchain-openai"]
103
104
  anthropic = ["langchain-anthropic"]
104
105
  cohere = ["langchain-cohere"]
105
- chat = ["chainlit", "litellm", "aiosqlite", "greenlet", "tavily-python"]
106
- code = ["chainlit", "litellm", "aiosqlite", "greenlet", "tavily-python"]
106
+ chat = ["chainlit", "litellm", "aiosqlite", "greenlet", "tavily-python", "crawl4ai"]
107
+ code = ["chainlit", "litellm", "aiosqlite", "greenlet", "tavily-python", "crawl4ai"]
107
108
  train = ["setup-conda-env"]
108
109
 
109
110
  [tool.poetry-dynamic-versioning]
File without changes
File without changes
File without changes
File without changes
File without changes