PraisonAI 2.0.20__tar.gz → 2.0.22__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- {praisonai-2.0.20 → praisonai-2.0.22}/PKG-INFO +20 -7
- {praisonai-2.0.20 → praisonai-2.0.22}/README.md +15 -2
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/cli.py +1 -1
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/deploy.py +1 -1
- praisonai-2.0.22/praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
- praisonai-2.0.22/praisonai/public/praison-ai-agents-architecture.png +0 -0
- praisonai-2.0.22/praisonai/ui/agents.py +535 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/pyproject.toml +19 -7
- {praisonai-2.0.20 → praisonai-2.0.22}/LICENSE +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/__init__.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/__main__.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/agents_generator.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/api/call.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/auto.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/chainlit_ui.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/inbuilt_tools/__init__.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/inc/__init__.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/inc/config.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/inc/models.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/android-chrome-192x192.png +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/android-chrome-512x512.png +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/apple-touch-icon.png +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/fantasy.svg +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/favicon-16x16.png +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/favicon-32x32.png +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/favicon.ico +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/game.svg +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/logo_dark.png +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/logo_light.png +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/movie.svg +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/public/thriller.svg +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/setup/__init__.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/setup/build.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/setup/config.yaml +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/setup/post_install.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/setup/setup_conda_env.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/setup/setup_conda_env.sh +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/setup.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/test.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/train.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/README.md +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/chat.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/code.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/components/aicoder.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/config.toml +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/bn.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/en-US.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/gu.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/he-IL.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/hi.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/kn.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/ml.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/mr.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/ta.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/te.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/.chainlit/translations/zh-CN.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/chainlit.md +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/bn.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/en-US.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/gu.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/he-IL.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/hi.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/kn.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/ml.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/mr.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/ta.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/te.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/config/translations/zh-CN.json +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/context.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/db.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/public/fantasy.svg +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/public/game.svg +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/public/logo_dark.png +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/public/logo_light.png +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/public/movie.svg +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/public/praison.css +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/public/thriller.svg +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/realtime.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/realtimeclient/__init__.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/realtimeclient/realtimedocs.txt +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/realtimeclient/tools.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/ui/sql_alchemy.py +0 -0
- {praisonai-2.0.20 → praisonai-2.0.22}/praisonai/version.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PraisonAI
|
|
3
|
-
Version: 2.0.
|
|
3
|
+
Version: 2.0.22
|
|
4
4
|
Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration.
|
|
5
5
|
Author: Mervin Praison
|
|
6
6
|
Requires-Python: >=3.10,<3.13
|
|
@@ -25,7 +25,7 @@ Provides-Extra: train
|
|
|
25
25
|
Provides-Extra: ui
|
|
26
26
|
Requires-Dist: PyYAML (>=6.0)
|
|
27
27
|
Requires-Dist: agentops (>=0.3.12) ; extra == "agentops"
|
|
28
|
-
Requires-Dist: aiosqlite (>=0.20.0) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
28
|
+
Requires-Dist: aiosqlite (>=0.20.0) ; extra == "ui" or extra == "chat" or extra == "code" or extra == "realtime"
|
|
29
29
|
Requires-Dist: chainlit (==2.0rc1) ; extra == "ui" or extra == "chat" or extra == "code" or extra == "realtime"
|
|
30
30
|
Requires-Dist: crawl4ai (==0.3.4) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
31
31
|
Requires-Dist: crewai (>=0.32.0) ; extra == "crewai" or extra == "autogen"
|
|
@@ -34,7 +34,7 @@ Requires-Dist: fastapi (>=0.95.0) ; extra == "call"
|
|
|
34
34
|
Requires-Dist: flaml[automl] (>=2.3.1) ; extra == "call"
|
|
35
35
|
Requires-Dist: flask (>=3.0.0) ; extra == "api"
|
|
36
36
|
Requires-Dist: gradio (>=4.26.0) ; extra == "gradio"
|
|
37
|
-
Requires-Dist: greenlet (>=3.0.3) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
37
|
+
Requires-Dist: greenlet (>=3.0.3) ; extra == "ui" or extra == "chat" or extra == "code" or extra == "realtime"
|
|
38
38
|
Requires-Dist: instructor (>=1.3.3)
|
|
39
39
|
Requires-Dist: langchain-anthropic (>=0.1.13) ; extra == "anthropic"
|
|
40
40
|
Requires-Dist: langchain-cohere (>=0.1.4) ; extra == "cohere"
|
|
@@ -48,12 +48,12 @@ Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
|
|
|
48
48
|
Requires-Dist: praisonai-tools (>=0.0.7) ; extra == "crewai" or extra == "autogen"
|
|
49
49
|
Requires-Dist: praisonaiagents (>=0.0.16)
|
|
50
50
|
Requires-Dist: pyautogen (>=0.2.19) ; extra == "autogen"
|
|
51
|
-
Requires-Dist: pydantic (<=2.10.1) ; extra == "chat" or extra == "code"
|
|
51
|
+
Requires-Dist: pydantic (<=2.10.1) ; extra == "ui" or extra == "chat" or extra == "code"
|
|
52
52
|
Requires-Dist: pyngrok (>=1.4.0) ; extra == "call"
|
|
53
53
|
Requires-Dist: pyparsing (>=3.0.0)
|
|
54
54
|
Requires-Dist: python-dotenv (>=0.19.0)
|
|
55
55
|
Requires-Dist: rich (>=13.7) ; extra == "chat" or extra == "call"
|
|
56
|
-
Requires-Dist: sqlalchemy (>=2.0.36) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
56
|
+
Requires-Dist: sqlalchemy (>=2.0.36) ; extra == "ui" or extra == "chat" or extra == "code" or extra == "realtime"
|
|
57
57
|
Requires-Dist: tavily-python (==0.5.0) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
58
58
|
Requires-Dist: twilio (>=7.0.0) ; extra == "call"
|
|
59
59
|
Requires-Dist: uvicorn (>=0.20.0) ; extra == "call"
|
|
@@ -87,17 +87,30 @@ Description-Content-Type: text/markdown
|
|
|
87
87
|
|
|
88
88
|
PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration.
|
|
89
89
|
|
|
90
|
+
<div align="center">
|
|
91
|
+
<a href="https://docs.praison.ai">
|
|
92
|
+
<p align="center">
|
|
93
|
+
<img src="https://img.shields.io/badge/📚_Documentation-Visit_docs.praison.ai-blue?style=for-the-badge&logo=bookstack&logoColor=white" alt="Documentation" />
|
|
94
|
+
</p>
|
|
95
|
+
</a>
|
|
96
|
+
</div>
|
|
97
|
+
|
|
90
98
|
## Key Features
|
|
91
99
|
|
|
92
100
|
- 🤖 Automated AI Agents Creation
|
|
101
|
+
- 🔄 Self Reflection AI Agents
|
|
102
|
+
- 🧠 Reasoning AI Agents
|
|
103
|
+
- 👁️ Multi Modal AI Agents
|
|
104
|
+
- 🤝 Multi Agent Collaboration
|
|
105
|
+
- ⚡ AI Agent Workflow
|
|
93
106
|
- 🔄 Use CrewAI or AutoGen Framework
|
|
94
107
|
- 💯 100+ LLM Support
|
|
95
108
|
- 💻 Chat with ENTIRE Codebase
|
|
96
|
-
-
|
|
109
|
+
- 🎨 Interactive UIs
|
|
97
110
|
- 📄 YAML-based Configuration
|
|
98
111
|
- 🛠️ Custom Tool Integration
|
|
99
112
|
- 🔍 Internet Search Capability (using Crawl4AI and Tavily)
|
|
100
|
-
-
|
|
113
|
+
- 🖼️ Vision Language Model (VLM) Support
|
|
101
114
|
- 🎙️ Real-time Voice Interaction
|
|
102
115
|
|
|
103
116
|
## Using No Code
|
|
@@ -22,17 +22,30 @@
|
|
|
22
22
|
|
|
23
23
|
PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration.
|
|
24
24
|
|
|
25
|
+
<div align="center">
|
|
26
|
+
<a href="https://docs.praison.ai">
|
|
27
|
+
<p align="center">
|
|
28
|
+
<img src="https://img.shields.io/badge/📚_Documentation-Visit_docs.praison.ai-blue?style=for-the-badge&logo=bookstack&logoColor=white" alt="Documentation" />
|
|
29
|
+
</p>
|
|
30
|
+
</a>
|
|
31
|
+
</div>
|
|
32
|
+
|
|
25
33
|
## Key Features
|
|
26
34
|
|
|
27
35
|
- 🤖 Automated AI Agents Creation
|
|
36
|
+
- 🔄 Self Reflection AI Agents
|
|
37
|
+
- 🧠 Reasoning AI Agents
|
|
38
|
+
- 👁️ Multi Modal AI Agents
|
|
39
|
+
- 🤝 Multi Agent Collaboration
|
|
40
|
+
- ⚡ AI Agent Workflow
|
|
28
41
|
- 🔄 Use CrewAI or AutoGen Framework
|
|
29
42
|
- 💯 100+ LLM Support
|
|
30
43
|
- 💻 Chat with ENTIRE Codebase
|
|
31
|
-
-
|
|
44
|
+
- 🎨 Interactive UIs
|
|
32
45
|
- 📄 YAML-based Configuration
|
|
33
46
|
- 🛠️ Custom Tool Integration
|
|
34
47
|
- 🔍 Internet Search Capability (using Crawl4AI and Tavily)
|
|
35
|
-
-
|
|
48
|
+
- 🖼️ Vision Language Model (VLM) Support
|
|
36
49
|
- 🎙️ Real-time Voice Interaction
|
|
37
50
|
|
|
38
51
|
## Using No Code
|
|
@@ -478,7 +478,7 @@ class PraisonAI:
|
|
|
478
478
|
logging.info("Public folder not found in the package.")
|
|
479
479
|
else:
|
|
480
480
|
logging.info("Public folder already exists.")
|
|
481
|
-
chainlit_ui_path = os.path.join(os.path.dirname(praisonai.__file__), '
|
|
481
|
+
chainlit_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'ui', 'agents.py')
|
|
482
482
|
chainlit_run([chainlit_ui_path])
|
|
483
483
|
else:
|
|
484
484
|
print("ERROR: Chainlit is not installed. Please install it with 'pip install \"praisonai[ui]\"' to use the UI.")
|
|
@@ -56,7 +56,7 @@ class CloudDeployer:
|
|
|
56
56
|
file.write("FROM python:3.11-slim\n")
|
|
57
57
|
file.write("WORKDIR /app\n")
|
|
58
58
|
file.write("COPY . .\n")
|
|
59
|
-
file.write("RUN pip install flask praisonai==2.0.
|
|
59
|
+
file.write("RUN pip install flask praisonai==2.0.22 gunicorn markdown\n")
|
|
60
60
|
file.write("EXPOSE 8080\n")
|
|
61
61
|
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
|
|
62
62
|
|
|
Binary file
|
|
@@ -0,0 +1,535 @@
|
|
|
1
|
+
# praisonai/chainlit_ui.py
|
|
2
|
+
from praisonai.agents_generator import AgentsGenerator
|
|
3
|
+
from praisonai.auto import AutoGenerator
|
|
4
|
+
import chainlit as cl
|
|
5
|
+
import os
|
|
6
|
+
from chainlit.types import ThreadDict
|
|
7
|
+
from chainlit.input_widget import Select, TextInput
|
|
8
|
+
from typing import Optional, Dict, Any
|
|
9
|
+
from dotenv import load_dotenv
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
import json
|
|
12
|
+
import asyncio
|
|
13
|
+
import logging
|
|
14
|
+
import chainlit.data as cl_data
|
|
15
|
+
from literalai.helper import utc_now
|
|
16
|
+
from io import StringIO
|
|
17
|
+
from contextlib import redirect_stdout, asynccontextmanager
|
|
18
|
+
from db import DatabaseManager
|
|
19
|
+
import time
|
|
20
|
+
import sqlite3
|
|
21
|
+
|
|
22
|
+
# Load environment variables
|
|
23
|
+
load_dotenv()
|
|
24
|
+
|
|
25
|
+
# Initialize database with retry logic
|
|
26
|
+
MAX_RETRIES = 3
|
|
27
|
+
RETRY_DELAY = 1 # seconds
|
|
28
|
+
|
|
29
|
+
async def init_database_with_retry():
|
|
30
|
+
for attempt in range(MAX_RETRIES):
|
|
31
|
+
try:
|
|
32
|
+
db_manager = DatabaseManager()
|
|
33
|
+
db_manager.initialize()
|
|
34
|
+
return db_manager
|
|
35
|
+
except sqlite3.OperationalError as e:
|
|
36
|
+
if "database is locked" in str(e):
|
|
37
|
+
if attempt < MAX_RETRIES - 1:
|
|
38
|
+
await asyncio.sleep(RETRY_DELAY)
|
|
39
|
+
continue
|
|
40
|
+
raise
|
|
41
|
+
except Exception as e:
|
|
42
|
+
raise
|
|
43
|
+
|
|
44
|
+
# Initialize database
|
|
45
|
+
db_manager = asyncio.run(init_database_with_retry())
|
|
46
|
+
|
|
47
|
+
# Logging configuration
|
|
48
|
+
logger = logging.getLogger(__name__)
|
|
49
|
+
log_level = os.getenv("LOGLEVEL", "INFO").upper()
|
|
50
|
+
logger.handlers = []
|
|
51
|
+
console_handler = logging.StreamHandler()
|
|
52
|
+
console_handler.setLevel(log_level)
|
|
53
|
+
console_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
54
|
+
console_handler.setFormatter(console_formatter)
|
|
55
|
+
logger.addHandler(console_handler)
|
|
56
|
+
logger.setLevel(log_level)
|
|
57
|
+
|
|
58
|
+
# Authentication secret setup
|
|
59
|
+
CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
|
|
60
|
+
if not CHAINLIT_AUTH_SECRET:
|
|
61
|
+
os.environ["CHAINLIT_AUTH_SECRET"] = "p8BPhQChpg@J>jBz$wGxqLX2V>yTVgP*7Ky9H$aV:axW~ANNX-7_T:o@lnyCBu^U"
|
|
62
|
+
CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
|
|
63
|
+
|
|
64
|
+
async def save_setting_with_retry(key: str, value: str):
|
|
65
|
+
"""Save a setting to the database with retry logic"""
|
|
66
|
+
for attempt in range(MAX_RETRIES):
|
|
67
|
+
try:
|
|
68
|
+
await db_manager.save_setting(key, value)
|
|
69
|
+
return
|
|
70
|
+
except sqlite3.OperationalError as e:
|
|
71
|
+
if "database is locked" in str(e):
|
|
72
|
+
if attempt < MAX_RETRIES - 1:
|
|
73
|
+
await asyncio.sleep(RETRY_DELAY)
|
|
74
|
+
continue
|
|
75
|
+
raise
|
|
76
|
+
except Exception as e:
|
|
77
|
+
raise
|
|
78
|
+
|
|
79
|
+
async def load_setting_with_retry(key: str) -> str:
|
|
80
|
+
"""Load a setting from the database with retry logic"""
|
|
81
|
+
for attempt in range(MAX_RETRIES):
|
|
82
|
+
try:
|
|
83
|
+
return await db_manager.load_setting(key)
|
|
84
|
+
except sqlite3.OperationalError as e:
|
|
85
|
+
if "database is locked" in str(e):
|
|
86
|
+
if attempt < MAX_RETRIES - 1:
|
|
87
|
+
await asyncio.sleep(RETRY_DELAY)
|
|
88
|
+
continue
|
|
89
|
+
raise
|
|
90
|
+
except Exception as e:
|
|
91
|
+
raise
|
|
92
|
+
|
|
93
|
+
def save_setting(key: str, value: str):
|
|
94
|
+
"""Save a setting to the database"""
|
|
95
|
+
asyncio.run(save_setting_with_retry(key, value))
|
|
96
|
+
|
|
97
|
+
def load_setting(key: str) -> str:
|
|
98
|
+
"""Load a setting from the database"""
|
|
99
|
+
return asyncio.run(load_setting_with_retry(key))
|
|
100
|
+
|
|
101
|
+
cl_data._data_layer = db_manager
|
|
102
|
+
|
|
103
|
+
# Authentication configuration
|
|
104
|
+
AUTH_PASSWORD_ENABLED = os.getenv("AUTH_PASSWORD_ENABLED", "true").lower() == "true"
|
|
105
|
+
AUTH_OAUTH_ENABLED = os.getenv("AUTH_OAUTH_ENABLED", "false").lower() == "true"
|
|
106
|
+
|
|
107
|
+
username = os.getenv("CHAINLIT_USERNAME", "admin")
|
|
108
|
+
password = os.getenv("CHAINLIT_PASSWORD", "admin")
|
|
109
|
+
|
|
110
|
+
def auth_callback(u: str, p: str):
|
|
111
|
+
if (u, p) == (username, password):
|
|
112
|
+
return cl.User(identifier=username, metadata={"role": "ADMIN", "provider": "credentials"})
|
|
113
|
+
return None
|
|
114
|
+
|
|
115
|
+
def oauth_callback(
|
|
116
|
+
provider_id: str,
|
|
117
|
+
token: str,
|
|
118
|
+
raw_user_data: Dict[str, str],
|
|
119
|
+
default_user: cl.User,
|
|
120
|
+
) -> Optional[cl.User]:
|
|
121
|
+
return default_user
|
|
122
|
+
|
|
123
|
+
if AUTH_PASSWORD_ENABLED:
|
|
124
|
+
auth_callback = cl.password_auth_callback(auth_callback)
|
|
125
|
+
|
|
126
|
+
if AUTH_OAUTH_ENABLED:
|
|
127
|
+
oauth_callback = cl.oauth_callback(oauth_callback)
|
|
128
|
+
|
|
129
|
+
framework = "praisonai"
|
|
130
|
+
config_list = [
|
|
131
|
+
{
|
|
132
|
+
'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o"),
|
|
133
|
+
'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
|
134
|
+
'api_key': os.environ.get("OPENAI_API_KEY", "")
|
|
135
|
+
}
|
|
136
|
+
]
|
|
137
|
+
agent_file = "test.yaml"
|
|
138
|
+
|
|
139
|
+
actions=[
|
|
140
|
+
cl.Action(name="run", value="run", label="✅ Run"),
|
|
141
|
+
cl.Action(name="modify", value="modify", label="🔧 Modify"),
|
|
142
|
+
]
|
|
143
|
+
|
|
144
|
+
@cl.action_callback("run")
|
|
145
|
+
async def on_run(action):
|
|
146
|
+
await main(cl.Message(content=""))
|
|
147
|
+
|
|
148
|
+
@cl.action_callback("modify")
|
|
149
|
+
async def on_modify(action):
|
|
150
|
+
await cl.Message(content="Modify the agents and tools from below settings").send()
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
@cl.set_chat_profiles
|
|
154
|
+
async def set_profiles(current_user: cl.User):
|
|
155
|
+
return [
|
|
156
|
+
cl.ChatProfile(
|
|
157
|
+
name="Auto",
|
|
158
|
+
markdown_description="Automatically generate agents and tasks based on your input.",
|
|
159
|
+
starters=[
|
|
160
|
+
cl.Starter(
|
|
161
|
+
label="Create a movie script",
|
|
162
|
+
message="Create a movie script about a futuristic society where AI and humans coexist, focusing on the conflict and resolution between them. Start with an intriguing opening scene.",
|
|
163
|
+
icon="/public/movie.svg",
|
|
164
|
+
),
|
|
165
|
+
cl.Starter(
|
|
166
|
+
label="Design a fantasy world",
|
|
167
|
+
message="Design a detailed fantasy world with unique geography, cultures, and magical systems. Start by describing the main continent and its inhabitants.",
|
|
168
|
+
icon="/public/fantasy.svg",
|
|
169
|
+
),
|
|
170
|
+
cl.Starter(
|
|
171
|
+
label="Write a futuristic political thriller",
|
|
172
|
+
message="Write a futuristic political thriller involving a conspiracy within a global government. Start with a high-stakes meeting that sets the plot in motion.",
|
|
173
|
+
icon="/public/thriller.svg",
|
|
174
|
+
),
|
|
175
|
+
cl.Starter(
|
|
176
|
+
label="Develop a new board game",
|
|
177
|
+
message="Develop a new, innovative board game. Describe the game's objective, rules, and unique mechanics. Create a scenario to illustrate gameplay.",
|
|
178
|
+
icon="/public/game.svg",
|
|
179
|
+
),
|
|
180
|
+
]
|
|
181
|
+
),
|
|
182
|
+
cl.ChatProfile(
|
|
183
|
+
name="Manual",
|
|
184
|
+
markdown_description="Manually define your agents and tasks using a YAML file.",
|
|
185
|
+
),
|
|
186
|
+
]
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
@cl.on_chat_start
|
|
190
|
+
async def start_chat():
|
|
191
|
+
try:
|
|
192
|
+
# Load model name from database
|
|
193
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
|
|
194
|
+
cl.user_session.set("model_name", model_name)
|
|
195
|
+
logger.debug(f"Model name: {model_name}")
|
|
196
|
+
|
|
197
|
+
cl.user_session.set(
|
|
198
|
+
"message_history",
|
|
199
|
+
[{"role": "system", "content": "You are a helpful assistant."}],
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
# Create tools.py if it doesn't exist
|
|
203
|
+
if not os.path.exists("tools.py"):
|
|
204
|
+
with open("tools.py", "w") as f:
|
|
205
|
+
f.write("# Add your custom tools here\n")
|
|
206
|
+
|
|
207
|
+
settings = await cl.ChatSettings(
|
|
208
|
+
[
|
|
209
|
+
TextInput(id="Model", label="OpenAI - Model", initial=model_name),
|
|
210
|
+
TextInput(id="BaseUrl", label="OpenAI - Base URL", initial=config_list[0]['base_url']),
|
|
211
|
+
TextInput(id="ApiKey", label="OpenAI - API Key", initial=config_list[0]['api_key']),
|
|
212
|
+
Select(
|
|
213
|
+
id="Framework",
|
|
214
|
+
label="Framework",
|
|
215
|
+
values=["praisonai", "crewai", "autogen"],
|
|
216
|
+
initial_index=0,
|
|
217
|
+
),
|
|
218
|
+
]
|
|
219
|
+
).send()
|
|
220
|
+
cl.user_session.set("settings", settings)
|
|
221
|
+
chat_profile = cl.user_session.get("chat_profile")
|
|
222
|
+
|
|
223
|
+
if chat_profile=="Manual":
|
|
224
|
+
agent_file = "agents.yaml"
|
|
225
|
+
full_agent_file_path = os.path.abspath(agent_file)
|
|
226
|
+
if os.path.exists(full_agent_file_path):
|
|
227
|
+
with open(full_agent_file_path, 'r') as f:
|
|
228
|
+
yaml_content = f.read()
|
|
229
|
+
msg = cl.Message(content=yaml_content, language="yaml")
|
|
230
|
+
await msg.send()
|
|
231
|
+
|
|
232
|
+
full_tools_file_path = os.path.abspath("tools.py")
|
|
233
|
+
if os.path.exists(full_tools_file_path):
|
|
234
|
+
with open(full_tools_file_path, 'r') as f:
|
|
235
|
+
tools_content = f.read()
|
|
236
|
+
msg = cl.Message(content=tools_content, language="python")
|
|
237
|
+
await msg.send()
|
|
238
|
+
|
|
239
|
+
settings = await cl.ChatSettings(
|
|
240
|
+
[
|
|
241
|
+
TextInput(id="Model", label="OpenAI - Model", initial=model_name),
|
|
242
|
+
TextInput(id="BaseUrl", label="OpenAI - Base URL", initial=config_list[0]['base_url']),
|
|
243
|
+
TextInput(id="ApiKey", label="OpenAI - API Key", initial=config_list[0]['api_key']),
|
|
244
|
+
Select(
|
|
245
|
+
id="Framework",
|
|
246
|
+
label="Framework",
|
|
247
|
+
values=["praisonai", "crewai", "autogen"],
|
|
248
|
+
initial_index=0,
|
|
249
|
+
),
|
|
250
|
+
TextInput(id="agents", label="agents.yaml", initial=yaml_content, multiline=True),
|
|
251
|
+
TextInput(id="tools", label="tools.py", initial=tools_content, multiline=True),
|
|
252
|
+
]
|
|
253
|
+
).send()
|
|
254
|
+
cl.user_session.set("settings", settings)
|
|
255
|
+
|
|
256
|
+
res = await cl.AskActionMessage(
|
|
257
|
+
content="Pick an action!",
|
|
258
|
+
actions=actions,
|
|
259
|
+
).send()
|
|
260
|
+
if res and res.get("value") == "modify":
|
|
261
|
+
await cl.Message(content="Modify the agents and tools from below settings", actions=actions).send()
|
|
262
|
+
elif res and res.get("value") == "run":
|
|
263
|
+
await main(cl.Message(content="", actions=actions))
|
|
264
|
+
|
|
265
|
+
await on_settings_update(settings)
|
|
266
|
+
except Exception as e:
|
|
267
|
+
logger.error(f"Error in start_chat: {str(e)}")
|
|
268
|
+
await cl.Message(content=f"An error occurred while starting the chat: {str(e)}").send()
|
|
269
|
+
|
|
270
|
+
@cl.on_settings_update
|
|
271
|
+
async def on_settings_update(settings):
|
|
272
|
+
"""Handle updates to the ChatSettings form."""
|
|
273
|
+
try:
|
|
274
|
+
global config_list, framework
|
|
275
|
+
config_list[0]['model'] = settings["Model"]
|
|
276
|
+
config_list[0]['base_url'] = settings["BaseUrl"]
|
|
277
|
+
config_list[0]['api_key'] = settings["ApiKey"]
|
|
278
|
+
|
|
279
|
+
# Save settings to database with retry
|
|
280
|
+
for attempt in range(MAX_RETRIES):
|
|
281
|
+
try:
|
|
282
|
+
await save_setting_with_retry("model_name", config_list[0]['model'])
|
|
283
|
+
await save_setting_with_retry("base_url", config_list[0]['base_url'])
|
|
284
|
+
await save_setting_with_retry("api_key", config_list[0]['api_key'])
|
|
285
|
+
break
|
|
286
|
+
except sqlite3.OperationalError as e:
|
|
287
|
+
if "database is locked" in str(e) and attempt < MAX_RETRIES - 1:
|
|
288
|
+
await asyncio.sleep(RETRY_DELAY)
|
|
289
|
+
continue
|
|
290
|
+
raise
|
|
291
|
+
|
|
292
|
+
# Save to environment variables for compatibility
|
|
293
|
+
os.environ["OPENAI_API_KEY"] = config_list[0]['api_key']
|
|
294
|
+
os.environ["OPENAI_MODEL_NAME"] = config_list[0]['model']
|
|
295
|
+
os.environ["OPENAI_API_BASE"] = config_list[0]['base_url']
|
|
296
|
+
os.environ["MODEL_NAME"] = config_list[0]['model']
|
|
297
|
+
framework = settings["Framework"]
|
|
298
|
+
os.environ["FRAMEWORK"] = framework
|
|
299
|
+
|
|
300
|
+
if "agents" in settings:
|
|
301
|
+
with open("agents.yaml", "w") as f:
|
|
302
|
+
f.write(settings["agents"])
|
|
303
|
+
if "tools" in settings:
|
|
304
|
+
with open("tools.py", "w") as f:
|
|
305
|
+
f.write(settings["tools"])
|
|
306
|
+
|
|
307
|
+
# Update thread metadata if exists with retry
|
|
308
|
+
thread_id = cl.user_session.get("thread_id")
|
|
309
|
+
if thread_id:
|
|
310
|
+
for attempt in range(MAX_RETRIES):
|
|
311
|
+
try:
|
|
312
|
+
thread = await cl_data.get_thread(thread_id)
|
|
313
|
+
if thread:
|
|
314
|
+
metadata = thread.get("metadata", {})
|
|
315
|
+
if isinstance(metadata, str):
|
|
316
|
+
try:
|
|
317
|
+
metadata = json.loads(metadata)
|
|
318
|
+
except json.JSONDecodeError:
|
|
319
|
+
metadata = {}
|
|
320
|
+
metadata["model_name"] = config_list[0]['model']
|
|
321
|
+
await cl_data.update_thread(thread_id, metadata=metadata)
|
|
322
|
+
cl.user_session.set("metadata", metadata)
|
|
323
|
+
break
|
|
324
|
+
except sqlite3.OperationalError as e:
|
|
325
|
+
if "database is locked" in str(e) and attempt < MAX_RETRIES - 1:
|
|
326
|
+
await asyncio.sleep(RETRY_DELAY)
|
|
327
|
+
continue
|
|
328
|
+
raise
|
|
329
|
+
|
|
330
|
+
logger.info("Settings updated successfully")
|
|
331
|
+
except Exception as e:
|
|
332
|
+
logger.error(f"Error updating settings: {str(e)}")
|
|
333
|
+
await cl.Message(content=f"An error occurred while updating settings: {str(e)}. Retrying...").send()
|
|
334
|
+
# One final retry after a longer delay
|
|
335
|
+
try:
|
|
336
|
+
await asyncio.sleep(RETRY_DELAY * 2)
|
|
337
|
+
await on_settings_update(settings)
|
|
338
|
+
except Exception as e:
|
|
339
|
+
logger.error(f"Final retry failed: {str(e)}")
|
|
340
|
+
await cl.Message(content=f"Failed to update settings after retries: {str(e)}").send()
|
|
341
|
+
|
|
342
|
+
@cl.on_chat_resume
|
|
343
|
+
async def on_chat_resume(thread: ThreadDict):
|
|
344
|
+
message_history = cl.user_session.get("message_history", [])
|
|
345
|
+
root_messages = [m for m in thread["steps"] if m["parentId"] is None]
|
|
346
|
+
for message in root_messages:
|
|
347
|
+
if message["type"] == "user_message":
|
|
348
|
+
message_history.append({"role": "user", "content": message["output"]})
|
|
349
|
+
elif message["type"] == "ai_message":
|
|
350
|
+
message_history.append({"role": "assistant", "content": message["content"]})
|
|
351
|
+
cl.user_session.set("message_history", message_history)
|
|
352
|
+
|
|
353
|
+
# @cl.step(type="tool")
|
|
354
|
+
# async def tool(data: Optional[str] = None, language: Optional[str] = None):
|
|
355
|
+
# return cl.Message(content=data, language=language)
|
|
356
|
+
|
|
357
|
+
@cl.step(type="tool", show_input=False)
|
|
358
|
+
async def run_agents(agent_file: str, framework: str):
|
|
359
|
+
"""Runs the agents and returns the result."""
|
|
360
|
+
try:
|
|
361
|
+
logger.debug(f"Running agents with file: {agent_file}, framework: {framework}")
|
|
362
|
+
agents_generator = AgentsGenerator(agent_file, framework, config_list)
|
|
363
|
+
current_step = cl.context.current_step
|
|
364
|
+
logger.debug(f"Current Step: {current_step}")
|
|
365
|
+
|
|
366
|
+
stdout_buffer = StringIO()
|
|
367
|
+
with redirect_stdout(stdout_buffer):
|
|
368
|
+
result = agents_generator.generate_crew_and_kickoff()
|
|
369
|
+
|
|
370
|
+
complete_output = stdout_buffer.getvalue()
|
|
371
|
+
logger.debug(f"Agent execution output: {complete_output}")
|
|
372
|
+
|
|
373
|
+
async with cl.Step(name="gpt4", type="llm", show_input=True) as step:
|
|
374
|
+
step.input = ""
|
|
375
|
+
|
|
376
|
+
for line in stdout_buffer.getvalue().splitlines():
|
|
377
|
+
logger.debug(f"Agent output line: {line}")
|
|
378
|
+
await step.stream_token(line)
|
|
379
|
+
|
|
380
|
+
tool_res = await output(complete_output)
|
|
381
|
+
|
|
382
|
+
return result
|
|
383
|
+
except Exception as e:
|
|
384
|
+
error_msg = f"Error running agents: {str(e)}"
|
|
385
|
+
logger.error(error_msg)
|
|
386
|
+
raise Exception(error_msg)
|
|
387
|
+
|
|
388
|
+
@cl.step(type="tool", show_input=False, language="yaml")
|
|
389
|
+
async def output(output):
|
|
390
|
+
return output
|
|
391
|
+
|
|
392
|
+
@cl.step(type="tool", show_input=False, language="yaml")
|
|
393
|
+
def agent(output):
|
|
394
|
+
return(f"""
|
|
395
|
+
Agent Step Completed!
|
|
396
|
+
Output: {output}
|
|
397
|
+
""")
|
|
398
|
+
|
|
399
|
+
@cl.step(type="tool", show_input=False, language="yaml")
|
|
400
|
+
def task(output):
|
|
401
|
+
return(f"""
|
|
402
|
+
Task Completed!
|
|
403
|
+
Task: {output.description}
|
|
404
|
+
Output: {output.raw_output}
|
|
405
|
+
{output}
|
|
406
|
+
""")
|
|
407
|
+
|
|
408
|
+
@cl.on_message
|
|
409
|
+
async def main(message: cl.Message):
|
|
410
|
+
"""Run PraisonAI with the provided message as the topic."""
|
|
411
|
+
try:
|
|
412
|
+
# Get or initialize message history
|
|
413
|
+
message_history = cl.user_session.get("message_history")
|
|
414
|
+
if message_history is None:
|
|
415
|
+
message_history = []
|
|
416
|
+
cl.user_session.set("message_history", message_history)
|
|
417
|
+
|
|
418
|
+
# Add current message to history
|
|
419
|
+
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
420
|
+
user_message = f"""
|
|
421
|
+
Answer the question and use tools if needed:
|
|
422
|
+
|
|
423
|
+
Current Date and Time: {now}
|
|
424
|
+
|
|
425
|
+
User Question: {message.content}
|
|
426
|
+
"""
|
|
427
|
+
message_history.append({"role": "user", "content": user_message})
|
|
428
|
+
|
|
429
|
+
# Get chat profile and process accordingly
|
|
430
|
+
topic = message.content
|
|
431
|
+
chat_profile = cl.user_session.get("chat_profile")
|
|
432
|
+
logger.debug(f"Processing message with chat profile: {chat_profile}")
|
|
433
|
+
|
|
434
|
+
if chat_profile == "Auto":
|
|
435
|
+
agent_file = "agents.yaml"
|
|
436
|
+
logger.info(f"Generating agents for topic: {topic}")
|
|
437
|
+
generator = AutoGenerator(topic=topic, agent_file=agent_file, framework=framework, config_list=config_list)
|
|
438
|
+
await cl.sleep(2)
|
|
439
|
+
agent_file = generator.generate()
|
|
440
|
+
|
|
441
|
+
logger.debug("Starting agents execution")
|
|
442
|
+
agents_generator = AgentsGenerator(
|
|
443
|
+
agent_file,
|
|
444
|
+
framework,
|
|
445
|
+
config_list
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
# Capture stdout
|
|
449
|
+
stdout_buffer = StringIO()
|
|
450
|
+
with redirect_stdout(stdout_buffer):
|
|
451
|
+
result = agents_generator.generate_crew_and_kickoff()
|
|
452
|
+
|
|
453
|
+
complete_output = stdout_buffer.getvalue()
|
|
454
|
+
logger.debug(f"Agents execution output: {complete_output}")
|
|
455
|
+
|
|
456
|
+
tool_res = await output(complete_output)
|
|
457
|
+
msg = cl.Message(content=result)
|
|
458
|
+
await msg.send()
|
|
459
|
+
|
|
460
|
+
# Save to message history
|
|
461
|
+
message_history.append({"role": "assistant", "content": result})
|
|
462
|
+
cl.user_session.set("message_history", message_history)
|
|
463
|
+
|
|
464
|
+
# Update thread metadata if exists
|
|
465
|
+
thread_id = cl.user_session.get("thread_id")
|
|
466
|
+
if thread_id:
|
|
467
|
+
metadata = {
|
|
468
|
+
"last_response": result,
|
|
469
|
+
"timestamp": now,
|
|
470
|
+
"mode": "auto"
|
|
471
|
+
}
|
|
472
|
+
await cl_data.update_thread(thread_id, metadata=metadata)
|
|
473
|
+
|
|
474
|
+
else: # chat_profile == "Manual"
|
|
475
|
+
agent_file = "agents.yaml"
|
|
476
|
+
full_agent_file_path = os.path.abspath(agent_file)
|
|
477
|
+
full_tools_file_path = os.path.abspath("tools.py")
|
|
478
|
+
|
|
479
|
+
if os.path.exists(full_agent_file_path):
|
|
480
|
+
with open(full_agent_file_path, 'r') as f:
|
|
481
|
+
yaml_content = f.read()
|
|
482
|
+
msg_agents = cl.Message(content=yaml_content, language="yaml")
|
|
483
|
+
await msg_agents.send()
|
|
484
|
+
|
|
485
|
+
if os.path.exists(full_tools_file_path):
|
|
486
|
+
with open(full_tools_file_path, 'r') as f:
|
|
487
|
+
tools_content = f.read()
|
|
488
|
+
msg_tools = cl.Message(content=tools_content, language="python")
|
|
489
|
+
await msg_tools.send()
|
|
490
|
+
else:
|
|
491
|
+
logger.info("Generating agents for manual mode")
|
|
492
|
+
generator = AutoGenerator(topic=topic, agent_file=agent_file, framework=framework, config_list=config_list)
|
|
493
|
+
agent_file = generator.generate()
|
|
494
|
+
|
|
495
|
+
logger.debug("Starting agents execution for manual mode")
|
|
496
|
+
agents_generator = AgentsGenerator(agent_file, framework, config_list)
|
|
497
|
+
result = agents_generator.generate_crew_and_kickoff()
|
|
498
|
+
msg = cl.Message(content=result, actions=actions)
|
|
499
|
+
await msg.send()
|
|
500
|
+
|
|
501
|
+
# Save to message history
|
|
502
|
+
message_history.append({"role": "assistant", "content": result})
|
|
503
|
+
cl.user_session.set("message_history", message_history)
|
|
504
|
+
|
|
505
|
+
# Update thread metadata if exists
|
|
506
|
+
thread_id = cl.user_session.get("thread_id")
|
|
507
|
+
if thread_id:
|
|
508
|
+
metadata = {
|
|
509
|
+
"last_response": result,
|
|
510
|
+
"timestamp": now,
|
|
511
|
+
"mode": "manual"
|
|
512
|
+
}
|
|
513
|
+
await cl_data.update_thread(thread_id, metadata=metadata)
|
|
514
|
+
|
|
515
|
+
except Exception as e:
|
|
516
|
+
error_msg = f"Error processing message: {str(e)}"
|
|
517
|
+
logger.error(error_msg)
|
|
518
|
+
await cl.Message(content=error_msg).send()
|
|
519
|
+
|
|
520
|
+
# Load environment variables from .env file
|
|
521
|
+
load_dotenv()
|
|
522
|
+
|
|
523
|
+
# Get username and password from environment variables
|
|
524
|
+
username = os.getenv("CHAINLIT_USERNAME", "admin") # Default to "admin" if not found
|
|
525
|
+
password = os.getenv("CHAINLIT_PASSWORD", "admin") # Default to "admin" if not found
|
|
526
|
+
|
|
527
|
+
@cl.password_auth_callback
|
|
528
|
+
def auth_callback(username: str, password: str):
|
|
529
|
+
# Compare the username and password with environment variables
|
|
530
|
+
if (username, password) == (username, password):
|
|
531
|
+
return cl.User(
|
|
532
|
+
identifier=username, metadata={"role": "ADMIN", "provider": "credentials"}
|
|
533
|
+
)
|
|
534
|
+
else:
|
|
535
|
+
return None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "PraisonAI"
|
|
3
|
-
version = "2.0.
|
|
3
|
+
version = "2.0.22"
|
|
4
4
|
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
license = ""
|
|
@@ -19,7 +19,13 @@ dependencies = [
|
|
|
19
19
|
]
|
|
20
20
|
|
|
21
21
|
[project.optional-dependencies]
|
|
22
|
-
ui = [
|
|
22
|
+
ui = [
|
|
23
|
+
"chainlit==1.3.2",
|
|
24
|
+
"sqlalchemy>=2.0.36",
|
|
25
|
+
"aiosqlite>=0.20.0",
|
|
26
|
+
"greenlet>=3.0.3",
|
|
27
|
+
"pydantic<=2.10.1"
|
|
28
|
+
]
|
|
23
29
|
gradio = ["gradio>=4.26.0"]
|
|
24
30
|
api = ["flask>=3.0.0"]
|
|
25
31
|
agentops = ["agentops>=0.3.12"]
|
|
@@ -28,7 +34,7 @@ openai = ["langchain-openai>=0.1.7"]
|
|
|
28
34
|
anthropic = ["langchain-anthropic>=0.1.13"]
|
|
29
35
|
cohere = ["langchain-cohere>=0.1.4"]
|
|
30
36
|
chat = [
|
|
31
|
-
"chainlit==2
|
|
37
|
+
"chainlit==1.3.2",
|
|
32
38
|
"litellm>=1.41.8",
|
|
33
39
|
"aiosqlite>=0.20.0",
|
|
34
40
|
"greenlet>=3.0.3",
|
|
@@ -40,7 +46,7 @@ chat = [
|
|
|
40
46
|
"pydantic<=2.10.1"
|
|
41
47
|
]
|
|
42
48
|
code = [
|
|
43
|
-
"chainlit==2
|
|
49
|
+
"chainlit==1.3.2",
|
|
44
50
|
"litellm>=1.41.8",
|
|
45
51
|
"aiosqlite>=0.20.0",
|
|
46
52
|
"greenlet>=3.0.3",
|
|
@@ -51,7 +57,7 @@ code = [
|
|
|
51
57
|
"pydantic<=2.10.1"
|
|
52
58
|
]
|
|
53
59
|
realtime = [
|
|
54
|
-
"chainlit==2
|
|
60
|
+
"chainlit==1.3.2",
|
|
55
61
|
"litellm>=1.41.8",
|
|
56
62
|
"aiosqlite>=0.20.0",
|
|
57
63
|
"greenlet>=3.0.3",
|
|
@@ -78,7 +84,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.7", "crewai"]
|
|
|
78
84
|
|
|
79
85
|
[tool.poetry]
|
|
80
86
|
name = "PraisonAI"
|
|
81
|
-
version = "2.0.
|
|
87
|
+
version = "2.0.22"
|
|
82
88
|
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
|
|
83
89
|
authors = ["Mervin Praison"]
|
|
84
90
|
license = ""
|
|
@@ -193,7 +199,13 @@ setup-conda-env = "setup.setup_conda_env:main"
|
|
|
193
199
|
praisonai-call = "praisonai.api.call:main"
|
|
194
200
|
|
|
195
201
|
[tool.poetry.extras]
|
|
196
|
-
ui = [
|
|
202
|
+
ui = [
|
|
203
|
+
"chainlit",
|
|
204
|
+
"aiosqlite",
|
|
205
|
+
"greenlet",
|
|
206
|
+
"sqlalchemy",
|
|
207
|
+
"pydantic"
|
|
208
|
+
]
|
|
197
209
|
gradio = ["gradio"]
|
|
198
210
|
api = ["flask"]
|
|
199
211
|
agentops = ["agentops"]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|