PraisonAI 2.0.22__tar.gz → 2.0.24__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (84) hide show
  1. {praisonai-2.0.22 → praisonai-2.0.24}/PKG-INFO +2 -2
  2. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/deploy.py +1 -1
  3. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/agents.py +232 -85
  4. {praisonai-2.0.22 → praisonai-2.0.24}/pyproject.toml +4 -4
  5. {praisonai-2.0.22 → praisonai-2.0.24}/LICENSE +0 -0
  6. {praisonai-2.0.22 → praisonai-2.0.24}/README.md +0 -0
  7. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/__init__.py +0 -0
  8. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/__main__.py +0 -0
  9. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/agents_generator.py +0 -0
  10. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/api/call.py +0 -0
  11. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/auto.py +0 -0
  12. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/chainlit_ui.py +0 -0
  13. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/cli.py +0 -0
  14. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/inbuilt_tools/__init__.py +0 -0
  15. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
  16. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/inc/__init__.py +0 -0
  17. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/inc/config.py +0 -0
  18. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/inc/models.py +0 -0
  19. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/android-chrome-192x192.png +0 -0
  20. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/android-chrome-512x512.png +0 -0
  21. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/apple-touch-icon.png +0 -0
  22. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/fantasy.svg +0 -0
  23. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/favicon-16x16.png +0 -0
  24. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/favicon-32x32.png +0 -0
  25. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/favicon.ico +0 -0
  26. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/game.svg +0 -0
  27. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/logo_dark.png +0 -0
  28. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/logo_light.png +0 -0
  29. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/movie.svg +0 -0
  30. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
  31. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/praison-ai-agents-architecture.png +0 -0
  32. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/public/thriller.svg +0 -0
  33. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/setup/__init__.py +0 -0
  34. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/setup/build.py +0 -0
  35. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/setup/config.yaml +0 -0
  36. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/setup/post_install.py +0 -0
  37. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/setup/setup_conda_env.py +0 -0
  38. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/setup/setup_conda_env.sh +0 -0
  39. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/setup.py +0 -0
  40. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/test.py +0 -0
  41. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/train.py +0 -0
  42. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/README.md +0 -0
  43. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/chat.py +0 -0
  44. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/code.py +0 -0
  45. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/components/aicoder.py +0 -0
  46. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/config.toml +0 -0
  47. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/bn.json +0 -0
  48. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/en-US.json +0 -0
  49. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/gu.json +0 -0
  50. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/he-IL.json +0 -0
  51. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/hi.json +0 -0
  52. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/kn.json +0 -0
  53. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/ml.json +0 -0
  54. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/mr.json +0 -0
  55. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/ta.json +0 -0
  56. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/te.json +0 -0
  57. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/.chainlit/translations/zh-CN.json +0 -0
  58. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/chainlit.md +0 -0
  59. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/bn.json +0 -0
  60. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/en-US.json +0 -0
  61. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/gu.json +0 -0
  62. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/he-IL.json +0 -0
  63. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/hi.json +0 -0
  64. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/kn.json +0 -0
  65. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/ml.json +0 -0
  66. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/mr.json +0 -0
  67. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/ta.json +0 -0
  68. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/te.json +0 -0
  69. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/config/translations/zh-CN.json +0 -0
  70. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/context.py +0 -0
  71. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/db.py +0 -0
  72. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/public/fantasy.svg +0 -0
  73. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/public/game.svg +0 -0
  74. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/public/logo_dark.png +0 -0
  75. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/public/logo_light.png +0 -0
  76. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/public/movie.svg +0 -0
  77. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/public/praison.css +0 -0
  78. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/public/thriller.svg +0 -0
  79. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/realtime.py +0 -0
  80. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/realtimeclient/__init__.py +0 -0
  81. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/realtimeclient/realtimedocs.txt +0 -0
  82. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/realtimeclient/tools.py +0 -0
  83. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/ui/sql_alchemy.py +0 -0
  84. {praisonai-2.0.22 → praisonai-2.0.24}/praisonai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PraisonAI
3
- Version: 2.0.22
3
+ Version: 2.0.24
4
4
  Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10,<3.13
@@ -46,7 +46,7 @@ Requires-Dist: openai (>=1.54.0) ; extra == "call"
46
46
  Requires-Dist: playwright (>=1.47.0) ; extra == "chat" or extra == "code"
47
47
  Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
48
48
  Requires-Dist: praisonai-tools (>=0.0.7) ; extra == "crewai" or extra == "autogen"
49
- Requires-Dist: praisonaiagents (>=0.0.16)
49
+ Requires-Dist: praisonaiagents (>=0.0.18)
50
50
  Requires-Dist: pyautogen (>=0.2.19) ; extra == "autogen"
51
51
  Requires-Dist: pydantic (<=2.10.1) ; extra == "ui" or extra == "chat" or extra == "code"
52
52
  Requires-Dist: pyngrok (>=1.4.0) ; extra == "call"
@@ -56,7 +56,7 @@ class CloudDeployer:
56
56
  file.write("FROM python:3.11-slim\n")
57
57
  file.write("WORKDIR /app\n")
58
58
  file.write("COPY . .\n")
59
- file.write("RUN pip install flask praisonai==2.0.22 gunicorn markdown\n")
59
+ file.write("RUN pip install flask praisonai==2.0.24 gunicorn markdown\n")
60
60
  file.write("EXPOSE 8080\n")
61
61
  file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
62
62
 
@@ -5,7 +5,7 @@ import chainlit as cl
5
5
  import os
6
6
  from chainlit.types import ThreadDict
7
7
  from chainlit.input_widget import Select, TextInput
8
- from typing import Optional, Dict, Any
8
+ from typing import Optional, Dict, Any, AsyncGenerator, List, Callable
9
9
  from dotenv import load_dotenv
10
10
  from datetime import datetime
11
11
  import json
@@ -18,6 +18,9 @@ from contextlib import redirect_stdout, asynccontextmanager
18
18
  from db import DatabaseManager
19
19
  import time
20
20
  import sqlite3
21
+ from openai import AsyncOpenAI
22
+ from functools import partial
23
+ import yaml
21
24
 
22
25
  # Load environment variables
23
26
  load_dotenv()
@@ -354,37 +357,193 @@ async def on_chat_resume(thread: ThreadDict):
354
357
  # async def tool(data: Optional[str] = None, language: Optional[str] = None):
355
358
  # return cl.Message(content=data, language=language)
356
359
 
360
+ # Add callback handler class
361
+ class ChainlitCallbackHandler:
362
+ """Callback handler for streaming agent execution to Chainlit"""
363
+
364
+ def __init__(self, parent_step: Optional[cl.Step] = None):
365
+ self.parent_step = parent_step
366
+ self.current_step = None
367
+ self._steps = {}
368
+
369
+ async def on_agent_start(self, agent_name: str):
370
+ """Called when an agent starts execution"""
371
+ self.current_step = cl.Step(
372
+ name=f"Agent: {agent_name}",
373
+ type="agent",
374
+ show_input=True,
375
+ parent_id=self.parent_step.id if self.parent_step else None
376
+ )
377
+ await self.current_step.start()
378
+ await self.current_step.stream_token(f"🤖 Agent {agent_name} started\n")
379
+ self._steps[agent_name] = self.current_step
380
+
381
+ async def on_agent_action(self, agent_name: str, action: str):
382
+ """Called when an agent performs an action"""
383
+ step = self._steps.get(agent_name, self.current_step)
384
+ if step:
385
+ await step.stream_token(f"⚡ {action}\n")
386
+
387
+ async def on_agent_finish(self, agent_name: str, output: Any):
388
+ """Called when an agent finishes execution"""
389
+ step = self._steps.get(agent_name)
390
+ if step:
391
+ await step.stream_token(f"\n✅ Agent {agent_name} finished\n")
392
+ step.output = str(output)
393
+ await step.end()
394
+ self._steps.pop(agent_name, None)
395
+
396
+ async def on_task_start(self, task_id: str, task_name: str):
397
+ """Called when a task starts execution"""
398
+ self.current_step = cl.Step(
399
+ name=f"Task: {task_name}",
400
+ type="task",
401
+ show_input=True,
402
+ parent_id=self.parent_step.id if self.parent_step else None
403
+ )
404
+ await self.current_step.start()
405
+ await self.current_step.stream_token(f"📋 Starting task: {task_name}\n")
406
+ self._steps[task_id] = self.current_step
407
+
408
+ async def on_task_finish(self, task_id: str, output: Any):
409
+ """Called when a task finishes execution"""
410
+ step = self._steps.get(task_id, self.current_step)
411
+ if step:
412
+ await step.stream_token(f"\n✅ Task completed\n")
413
+ step.output = str(output)
414
+ await step.end()
415
+ self._steps.pop(task_id, None)
416
+
417
+ async def on_error(self, error: str):
418
+ """Called when an error occurs"""
419
+ if self.current_step:
420
+ await self.current_step.stream_token(f"\n❌ Error: {error}\n")
421
+ await self.current_step.end()
422
+
357
423
  @cl.step(type="tool", show_input=False)
358
424
  async def run_agents(agent_file: str, framework: str):
359
425
  """Runs the agents and returns the result."""
360
426
  try:
361
427
  logger.debug(f"Running agents with file: {agent_file}, framework: {framework}")
362
- agents_generator = AgentsGenerator(agent_file, framework, config_list)
363
- current_step = cl.context.current_step
364
- logger.debug(f"Current Step: {current_step}")
365
-
366
- stdout_buffer = StringIO()
367
- with redirect_stdout(stdout_buffer):
368
- result = agents_generator.generate_crew_and_kickoff()
428
+
429
+ # Create main execution step
430
+ async with cl.Step(name="Agents Execution", type="agents") as agents_step:
431
+ agents_step.input = f"Running agents from {agent_file}"
369
432
 
370
- complete_output = stdout_buffer.getvalue()
371
- logger.debug(f"Agent execution output: {complete_output}")
372
-
373
- async with cl.Step(name="gpt4", type="llm", show_input=True) as step:
374
- step.input = ""
433
+ # Initialize callback handler
434
+ callback_handler = ChainlitCallbackHandler(parent_step=agents_step)
375
435
 
376
- for line in stdout_buffer.getvalue().splitlines():
377
- logger.debug(f"Agent output line: {line}")
378
- await step.stream_token(line)
436
+ try:
437
+ # Load YAML config first
438
+ with open(agent_file, 'r') as f:
439
+ config = yaml.safe_load(f)
440
+
441
+ # Get topic from message content
442
+ topic = cl.user_session.get("message_history", [{}])[-1].get("content", "")
443
+
444
+ # Create agents generator with loaded config
445
+ agents_generator = AgentsGenerator(
446
+ agent_file=agent_file,
447
+ framework=framework,
448
+ config_list=config_list,
449
+ agent_yaml=yaml.dump(config) # Pass the loaded config as YAML string
450
+ )
451
+
452
+ # Execute based on framework
453
+ if framework == "crewai":
454
+ result = agents_generator._run_crewai(config, topic, [])
455
+ elif framework == "autogen":
456
+ result = agents_generator._run_autogen(config, topic, [])
457
+ elif framework == "praisonai":
458
+ result = agents_generator._run_praisonai(config, topic, [])
459
+ else:
460
+ raise ValueError(f"Unsupported framework: {framework}")
461
+
462
+ # Process the result if it has tasks
463
+ if hasattr(result, 'tasks') and result.tasks:
464
+ for task in result.tasks:
465
+ task_id = getattr(task, 'id', str(id(task)))
466
+ task_desc = getattr(task, 'description', 'Executing task...')
467
+
468
+ # Signal task start
469
+ await callback_handler.on_task_start(
470
+ task_id,
471
+ task_desc[:50] + "..." if len(task_desc) > 50 else task_desc
472
+ )
473
+
474
+ try:
475
+ # Handle agent actions if present
476
+ agent = getattr(task, 'agent', None)
477
+ if agent:
478
+ agent_name = getattr(agent, 'name', 'Unknown Agent')
479
+ await callback_handler.on_agent_start(agent_name)
480
+ await callback_handler.on_agent_action(
481
+ agent_name,
482
+ f"Working on task: {task_desc[:50]}..."
483
+ )
484
+
485
+ # Get task output
486
+ task_output = getattr(task, 'output', str(task))
487
+
488
+ # Signal agent completion if exists
489
+ if agent:
490
+ await callback_handler.on_agent_finish(agent_name, task_output)
491
+
492
+ # Signal task completion
493
+ await callback_handler.on_task_finish(task_id, task_output)
494
+
495
+ except Exception as e:
496
+ await callback_handler.on_error(f"Error in task {task_id}: {str(e)}")
497
+ raise
379
498
 
380
- tool_res = await output(complete_output)
499
+ # Return the final result
500
+ agents_step.output = "Agents execution completed"
501
+ return result if isinstance(result, str) else str(result)
502
+
503
+ except Exception as e:
504
+ await callback_handler.on_error(str(e))
505
+ raise
381
506
 
382
- return result
383
507
  except Exception as e:
384
508
  error_msg = f"Error running agents: {str(e)}"
385
509
  logger.error(error_msg)
386
510
  raise Exception(error_msg)
387
511
 
512
+ async def stream_agents_execution(agents) -> AsyncGenerator[tuple[str, str, str], None]:
513
+ """
514
+ Generator to stream agents execution status and messages.
515
+ Yields tuples of (task_id, status, message)
516
+ """
517
+ try:
518
+ for task_id in agents.tasks:
519
+ task = agents.tasks[task_id]
520
+
521
+ # Signal task start
522
+ yield task_id, "start", ""
523
+
524
+ if task.async_execution:
525
+ # Execute async task
526
+ result = await agents.aexecute_task(task_id)
527
+ else:
528
+ # Execute sync task in thread pool
529
+ result = await cl.make_async(agents.execute_task)(task_id)
530
+
531
+ if result:
532
+ # Stream agent messages
533
+ if isinstance(result, str):
534
+ yield task_id, "agent_message", result
535
+ else:
536
+ yield task_id, "agent_message", result.raw
537
+
538
+ # Signal completion
539
+ yield task_id, "complete", ""
540
+ else:
541
+ yield task_id, "error", "Task execution failed"
542
+
543
+ except Exception as e:
544
+ logger.error(f"Error in stream_agents_execution: {e}")
545
+ yield task_id, "error", str(e)
546
+
388
547
  @cl.step(type="tool", show_input=False, language="yaml")
389
548
  async def output(output):
390
549
  return output
@@ -405,17 +564,38 @@ def task(output):
405
564
  {output}
406
565
  """)
407
566
 
567
+ # Add retry decorator for database operations
568
+ def with_retries(max_retries=3, delay=1):
569
+ def decorator(func):
570
+ async def wrapper(*args, **kwargs):
571
+ for attempt in range(max_retries):
572
+ try:
573
+ return await func(*args, **kwargs)
574
+ except sqlite3.OperationalError as e:
575
+ if "database is locked" in str(e) and attempt < max_retries - 1:
576
+ await asyncio.sleep(delay)
577
+ continue
578
+ raise
579
+ return await func(*args, **kwargs)
580
+ return wrapper
581
+ return decorator
582
+
583
+ @with_retries(max_retries=3, delay=1)
584
+ async def update_thread_metadata(thread_id: str, metadata: dict):
585
+ """Update thread metadata with retry logic"""
586
+ await cl_data.update_thread(thread_id, metadata=metadata)
587
+
408
588
  @cl.on_message
409
589
  async def main(message: cl.Message):
410
- """Run PraisonAI with the provided message as the topic."""
411
590
  try:
412
- # Get or initialize message history
413
- message_history = cl.user_session.get("message_history")
414
- if message_history is None:
415
- message_history = []
416
- cl.user_session.set("message_history", message_history)
591
+ # Get settings and chat profile
592
+ settings = cl.user_session.get("settings")
593
+ chat_profile = cl.user_session.get("chat_profile")
417
594
 
418
- # Add current message to history
595
+ # Get message history or initialize if not exists
596
+ message_history = cl.user_session.get("message_history", [])
597
+
598
+ # Format user message with context
419
599
  now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
420
600
  user_message = f"""
421
601
  Answer the question and use tools if needed:
@@ -424,54 +604,27 @@ async def main(message: cl.Message):
424
604
 
425
605
  User Question: {message.content}
426
606
  """
607
+
608
+ # Add to message history
427
609
  message_history.append({"role": "user", "content": user_message})
428
610
 
429
- # Get chat profile and process accordingly
611
+ # Get configuration
612
+ framework = settings["Framework"]
430
613
  topic = message.content
431
- chat_profile = cl.user_session.get("chat_profile")
432
- logger.debug(f"Processing message with chat profile: {chat_profile}")
433
614
 
434
615
  if chat_profile == "Auto":
435
616
  agent_file = "agents.yaml"
436
617
  logger.info(f"Generating agents for topic: {topic}")
437
618
  generator = AutoGenerator(topic=topic, agent_file=agent_file, framework=framework, config_list=config_list)
619
+
438
620
  await cl.sleep(2)
439
621
  agent_file = generator.generate()
440
622
 
441
- logger.debug("Starting agents execution")
442
- agents_generator = AgentsGenerator(
443
- agent_file,
444
- framework,
445
- config_list
446
- )
623
+ # Run agents with streaming while preserving context
624
+ result = await run_agents(agent_file, framework)
625
+ await cl.Message(content=result).send()
447
626
 
448
- # Capture stdout
449
- stdout_buffer = StringIO()
450
- with redirect_stdout(stdout_buffer):
451
- result = agents_generator.generate_crew_and_kickoff()
452
-
453
- complete_output = stdout_buffer.getvalue()
454
- logger.debug(f"Agents execution output: {complete_output}")
455
-
456
- tool_res = await output(complete_output)
457
- msg = cl.Message(content=result)
458
- await msg.send()
459
-
460
- # Save to message history
461
- message_history.append({"role": "assistant", "content": result})
462
- cl.user_session.set("message_history", message_history)
463
-
464
- # Update thread metadata if exists
465
- thread_id = cl.user_session.get("thread_id")
466
- if thread_id:
467
- metadata = {
468
- "last_response": result,
469
- "timestamp": now,
470
- "mode": "auto"
471
- }
472
- await cl_data.update_thread(thread_id, metadata=metadata)
473
-
474
- else: # chat_profile == "Manual"
627
+ else: # Manual mode
475
628
  agent_file = "agents.yaml"
476
629
  full_agent_file_path = os.path.abspath(agent_file)
477
630
  full_tools_file_path = os.path.abspath("tools.py")
@@ -487,31 +640,25 @@ async def main(message: cl.Message):
487
640
  tools_content = f.read()
488
641
  msg_tools = cl.Message(content=tools_content, language="python")
489
642
  await msg_tools.send()
490
- else:
491
- logger.info("Generating agents for manual mode")
492
- generator = AutoGenerator(topic=topic, agent_file=agent_file, framework=framework, config_list=config_list)
493
- agent_file = generator.generate()
494
-
495
- logger.debug("Starting agents execution for manual mode")
496
- agents_generator = AgentsGenerator(agent_file, framework, config_list)
497
- result = agents_generator.generate_crew_and_kickoff()
498
- msg = cl.Message(content=result, actions=actions)
499
- await msg.send()
500
-
501
- # Save to message history
502
- message_history.append({"role": "assistant", "content": result})
503
- cl.user_session.set("message_history", message_history)
504
643
 
505
- # Update thread metadata if exists
506
- thread_id = cl.user_session.get("thread_id")
507
- if thread_id:
508
- metadata = {
509
- "last_response": result,
510
- "timestamp": now,
511
- "mode": "manual"
512
- }
513
- await cl_data.update_thread(thread_id, metadata=metadata)
514
-
644
+ # Run agents with streaming while preserving context
645
+ result = await run_agents(agent_file, framework)
646
+ await cl.Message(content=result, actions=actions).send()
647
+
648
+ # Update message history
649
+ message_history.append({"role": "assistant", "content": result})
650
+ cl.user_session.set("message_history", message_history)
651
+
652
+ # Update thread metadata with retry logic
653
+ thread_id = cl.user_session.get("thread_id")
654
+ if thread_id:
655
+ metadata = {
656
+ "last_response": result,
657
+ "timestamp": now,
658
+ "mode": chat_profile.lower()
659
+ }
660
+ await update_thread_metadata(thread_id, metadata)
661
+
515
662
  except Exception as e:
516
663
  error_msg = f"Error processing message: {str(e)}"
517
664
  logger.error(error_msg)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "PraisonAI"
3
- version = "2.0.22"
3
+ version = "2.0.24"
4
4
  description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
5
5
  readme = "README.md"
6
6
  license = ""
@@ -12,7 +12,7 @@ dependencies = [
12
12
  "rich>=13.7",
13
13
  "markdown>=3.5",
14
14
  "pyparsing>=3.0.0",
15
- "praisonaiagents>=0.0.16",
15
+ "praisonaiagents>=0.0.18",
16
16
  "python-dotenv>=0.19.0",
17
17
  "instructor>=1.3.3",
18
18
  "PyYAML>=6.0",
@@ -84,7 +84,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.7", "crewai"]
84
84
 
85
85
  [tool.poetry]
86
86
  name = "PraisonAI"
87
- version = "2.0.22"
87
+ version = "2.0.24"
88
88
  description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
89
89
  authors = ["Mervin Praison"]
90
90
  license = ""
@@ -102,7 +102,7 @@ python = ">=3.10,<3.13"
102
102
  rich = ">=13.7"
103
103
  markdown = ">=3.5"
104
104
  pyparsing = ">=3.0.0"
105
- praisonaiagents = ">=0.0.16"
105
+ praisonaiagents = ">=0.0.18"
106
106
  python-dotenv = ">=0.19.0"
107
107
  instructor = ">=1.3.3"
108
108
  PyYAML = ">=6.0"
File without changes
File without changes
File without changes
File without changes
File without changes