PraisonAI 0.0.72__tar.gz → 0.0.74__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- {praisonai-0.0.72 → praisonai-0.0.74}/PKG-INFO +27 -7
- {praisonai-0.0.72 → praisonai-0.0.74}/README.md +17 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/cli.py +30 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/deploy.py +1 -1
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/chat.py +49 -4
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/code.py +57 -10
- praisonai-0.0.74/praisonai/ui/realtime.py +368 -0
- praisonai-0.0.74/praisonai/ui/realtimeclient/__init__.py +650 -0
- praisonai-0.0.74/praisonai/ui/realtimeclient/tools.py +192 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/pyproject.toml +6 -3
- {praisonai-0.0.72 → praisonai-0.0.74}/LICENSE +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/__init__.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/__main__.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/agents_generator.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/auto.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/chainlit_ui.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/inbuilt_tools/__init__.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/inc/__init__.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/inc/config.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/inc/models.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/android-chrome-192x192.png +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/android-chrome-512x512.png +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/apple-touch-icon.png +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/fantasy.svg +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/favicon-16x16.png +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/favicon-32x32.png +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/favicon.ico +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/game.svg +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/logo_dark.png +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/logo_light.png +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/movie.svg +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/public/thriller.svg +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/setup/__init__.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/setup/build.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/setup/config.yaml +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/setup/post_install.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/setup/setup_conda_env.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/setup/setup_conda_env.sh +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/test.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/train.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/context.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/public/fantasy.svg +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/public/game.svg +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/public/logo_dark.png +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/public/logo_light.png +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/public/movie.svg +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/public/thriller.svg +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/ui/sql_alchemy.py +0 -0
- {praisonai-0.0.72 → praisonai-0.0.74}/praisonai/version.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PraisonAI
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.74
|
|
4
4
|
Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
|
|
5
5
|
Author: Mervin Praison
|
|
6
6
|
Requires-Python: >=3.10,<3.13
|
|
@@ -17,27 +17,30 @@ Provides-Extra: cohere
|
|
|
17
17
|
Provides-Extra: google
|
|
18
18
|
Provides-Extra: gradio
|
|
19
19
|
Provides-Extra: openai
|
|
20
|
+
Provides-Extra: realtime
|
|
20
21
|
Provides-Extra: train
|
|
21
22
|
Provides-Extra: ui
|
|
22
23
|
Requires-Dist: agentops (>=0.3.12) ; extra == "agentops"
|
|
23
|
-
Requires-Dist: aiosqlite (>=0.20.0) ; extra == "chat" or extra == "code"
|
|
24
|
-
Requires-Dist: chainlit (==1.
|
|
25
|
-
Requires-Dist: crawl4ai (==0.3.4) ; extra == "chat" or extra == "code"
|
|
24
|
+
Requires-Dist: aiosqlite (>=0.20.0) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
25
|
+
Requires-Dist: chainlit (==1.3.0rc1) ; extra == "ui" or extra == "chat" or extra == "code" or extra == "realtime"
|
|
26
|
+
Requires-Dist: crawl4ai (==0.3.4) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
26
27
|
Requires-Dist: crewai (>=0.32.0)
|
|
27
28
|
Requires-Dist: flask (>=3.0.0) ; extra == "api"
|
|
28
29
|
Requires-Dist: gradio (>=4.26.0) ; extra == "gradio"
|
|
29
|
-
Requires-Dist: greenlet (>=3.0.3) ; extra == "chat" or extra == "code"
|
|
30
|
+
Requires-Dist: greenlet (>=3.0.3) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
30
31
|
Requires-Dist: langchain-anthropic (>=0.1.13) ; extra == "anthropic"
|
|
31
32
|
Requires-Dist: langchain-cohere (>=0.1.4) ; extra == "cohere"
|
|
32
33
|
Requires-Dist: langchain-google-genai (>=1.0.4) ; extra == "google"
|
|
33
34
|
Requires-Dist: langchain-openai (>=0.1.7) ; extra == "openai"
|
|
34
|
-
Requires-Dist: litellm (>=1.41.8) ; extra == "chat" or extra == "code"
|
|
35
|
+
Requires-Dist: litellm (>=1.41.8) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
35
36
|
Requires-Dist: markdown (>=3.5)
|
|
37
|
+
Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
|
|
36
38
|
Requires-Dist: praisonai-tools (>=0.0.7)
|
|
37
39
|
Requires-Dist: pyautogen (>=0.2.19)
|
|
38
40
|
Requires-Dist: pyparsing (>=3.0.0)
|
|
39
41
|
Requires-Dist: rich (>=13.7)
|
|
40
|
-
Requires-Dist: tavily-python (==0.5.0) ; extra == "chat" or extra == "code"
|
|
42
|
+
Requires-Dist: tavily-python (==0.5.0) ; extra == "chat" or extra == "code" or extra == "realtime"
|
|
43
|
+
Requires-Dist: websockets (>=12.0) ; extra == "realtime"
|
|
41
44
|
Project-URL: Homepage, https://docs.praison.ai
|
|
42
45
|
Project-URL: Repository, https://github.com/mervinpraison/PraisonAI
|
|
43
46
|
Description-Content-Type: text/markdown
|
|
@@ -79,6 +82,7 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
|
|
|
79
82
|
| **UI** | Multi Agents such as CrewAI or AutoGen | [https://docs.praison.ai/ui/ui](https://docs.praison.ai/ui/ui) |
|
|
80
83
|
| **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
|
|
81
84
|
| **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
|
|
85
|
+
| **Realtime** | Real-time voice interaction with AI | [https://docs.praison.ai/ui/realtime](https://docs.praison.ai/ui/realtime) |
|
|
82
86
|
|
|
83
87
|
| Other Features | Description | Docs |
|
|
84
88
|
|---|---|---|
|
|
@@ -100,6 +104,7 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
|
|
|
100
104
|
| **PraisonAI Code** | `pip install "praisonai[code]"` |
|
|
101
105
|
| **PraisonAI Chat** | `pip install "praisonai[chat]"` |
|
|
102
106
|
| **PraisonAI Train** | `pip install "praisonai[train]"` |
|
|
107
|
+
| **PraisonAI Realtime** | `pip install "praisonai[realtime]"` |
|
|
103
108
|
|
|
104
109
|
## Key Features
|
|
105
110
|
|
|
@@ -110,6 +115,9 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
|
|
|
110
115
|
- Interactive UIs
|
|
111
116
|
- YAML-based Configuration
|
|
112
117
|
- Custom Tool Integration
|
|
118
|
+
- Internet Search Capability (using Crawl4AI and Tavily)
|
|
119
|
+
- Vision Language Model (VLM) Support
|
|
120
|
+
- Real-time Voice Interaction
|
|
113
121
|
|
|
114
122
|
## TL;DR Multi Agents
|
|
115
123
|
|
|
@@ -222,6 +230,14 @@ export OPENAI_API_KEY="Enter your API key"
|
|
|
222
230
|
praisonai chat
|
|
223
231
|
```
|
|
224
232
|
|
|
233
|
+
### Internet Search
|
|
234
|
+
|
|
235
|
+
Praison AI Chat and Praison AI Code now includes internet search capabilities using Crawl4AI and Tavily, allowing you to retrieve up-to-date information during your conversations.
|
|
236
|
+
|
|
237
|
+
### Vision Language Model Support
|
|
238
|
+
|
|
239
|
+
You can now upload images and ask questions based on them using Vision Language Models. This feature enables visual understanding and analysis within your chat sessions.
|
|
240
|
+
|
|
225
241
|
## Praison AI Code
|
|
226
242
|
|
|
227
243
|
```bash
|
|
@@ -230,6 +246,10 @@ export OPENAI_API_KEY="Enter your API key"
|
|
|
230
246
|
praisonai code
|
|
231
247
|
```
|
|
232
248
|
|
|
249
|
+
### Internet Search
|
|
250
|
+
|
|
251
|
+
Praison AI Code also includes internet search functionality, enabling you to find relevant code snippets and programming information online.
|
|
252
|
+
|
|
233
253
|
## Create Custom Tools
|
|
234
254
|
|
|
235
255
|
- https://docs.praison.ai/tools/custom/
|
|
@@ -35,6 +35,7 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
|
|
|
35
35
|
| **UI** | Multi Agents such as CrewAI or AutoGen | [https://docs.praison.ai/ui/ui](https://docs.praison.ai/ui/ui) |
|
|
36
36
|
| **Chat** | Chat with 100+ LLMs, single AI Agent | [https://docs.praison.ai/ui/chat](https://docs.praison.ai/ui/chat) |
|
|
37
37
|
| **Code** | Chat with entire Codebase, single AI Agent | [https://docs.praison.ai/ui/code](https://docs.praison.ai/ui/code) |
|
|
38
|
+
| **Realtime** | Real-time voice interaction with AI | [https://docs.praison.ai/ui/realtime](https://docs.praison.ai/ui/realtime) |
|
|
38
39
|
|
|
39
40
|
| Other Features | Description | Docs |
|
|
40
41
|
|---|---|---|
|
|
@@ -56,6 +57,7 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
|
|
|
56
57
|
| **PraisonAI Code** | `pip install "praisonai[code]"` |
|
|
57
58
|
| **PraisonAI Chat** | `pip install "praisonai[chat]"` |
|
|
58
59
|
| **PraisonAI Train** | `pip install "praisonai[train]"` |
|
|
60
|
+
| **PraisonAI Realtime** | `pip install "praisonai[realtime]"` |
|
|
59
61
|
|
|
60
62
|
## Key Features
|
|
61
63
|
|
|
@@ -66,6 +68,9 @@ Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, rep
|
|
|
66
68
|
- Interactive UIs
|
|
67
69
|
- YAML-based Configuration
|
|
68
70
|
- Custom Tool Integration
|
|
71
|
+
- Internet Search Capability (using Crawl4AI and Tavily)
|
|
72
|
+
- Vision Language Model (VLM) Support
|
|
73
|
+
- Real-time Voice Interaction
|
|
69
74
|
|
|
70
75
|
## TL;DR Multi Agents
|
|
71
76
|
|
|
@@ -178,6 +183,14 @@ export OPENAI_API_KEY="Enter your API key"
|
|
|
178
183
|
praisonai chat
|
|
179
184
|
```
|
|
180
185
|
|
|
186
|
+
### Internet Search
|
|
187
|
+
|
|
188
|
+
Praison AI Chat and Praison AI Code now includes internet search capabilities using Crawl4AI and Tavily, allowing you to retrieve up-to-date information during your conversations.
|
|
189
|
+
|
|
190
|
+
### Vision Language Model Support
|
|
191
|
+
|
|
192
|
+
You can now upload images and ask questions based on them using Vision Language Models. This feature enables visual understanding and analysis within your chat sessions.
|
|
193
|
+
|
|
181
194
|
## Praison AI Code
|
|
182
195
|
|
|
183
196
|
```bash
|
|
@@ -186,6 +199,10 @@ export OPENAI_API_KEY="Enter your API key"
|
|
|
186
199
|
praisonai code
|
|
187
200
|
```
|
|
188
201
|
|
|
202
|
+
### Internet Search
|
|
203
|
+
|
|
204
|
+
Praison AI Code also includes internet search functionality, enabling you to find relevant code snippets and programming information online.
|
|
205
|
+
|
|
189
206
|
## Create Custom Tools
|
|
190
207
|
|
|
191
208
|
- https://docs.praison.ai/tools/custom/
|
|
@@ -130,6 +130,10 @@ class PraisonAI:
|
|
|
130
130
|
self.create_code_interface()
|
|
131
131
|
return
|
|
132
132
|
|
|
133
|
+
if getattr(args, 'realtime', False):
|
|
134
|
+
self.create_realtime_interface()
|
|
135
|
+
return
|
|
136
|
+
|
|
133
137
|
if args.agent_file == 'train':
|
|
134
138
|
package_root = os.path.dirname(os.path.abspath(__file__))
|
|
135
139
|
config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')
|
|
@@ -256,6 +260,7 @@ class PraisonAI:
|
|
|
256
260
|
parser.add_argument("--hf", type=str, help="Hugging Face model name")
|
|
257
261
|
parser.add_argument("--ollama", type=str, help="Ollama model name")
|
|
258
262
|
parser.add_argument("--dataset", type=str, help="Dataset name for training", default="yahma/alpaca-cleaned")
|
|
263
|
+
parser.add_argument("--realtime", action="store_true", help="Start the realtime voice interaction interface")
|
|
259
264
|
args, unknown_args = parser.parse_known_args()
|
|
260
265
|
|
|
261
266
|
if unknown_args and unknown_args[0] == '-b' and unknown_args[1] == 'api:app':
|
|
@@ -270,6 +275,8 @@ class PraisonAI:
|
|
|
270
275
|
if args.agent_file == 'code':
|
|
271
276
|
args.ui = 'chainlit'
|
|
272
277
|
args.code = True
|
|
278
|
+
if args.agent_file == 'realtime':
|
|
279
|
+
args.realtime = True
|
|
273
280
|
|
|
274
281
|
return args
|
|
275
282
|
|
|
@@ -416,6 +423,29 @@ class PraisonAI:
|
|
|
416
423
|
else:
|
|
417
424
|
print("ERROR: Chainlit is not installed. Please install it with 'pip install \"praisonai\[ui]\"' to use the UI.")
|
|
418
425
|
|
|
426
|
+
def create_realtime_interface(self):
|
|
427
|
+
"""
|
|
428
|
+
Create a Chainlit interface for the realtime voice interaction application.
|
|
429
|
+
"""
|
|
430
|
+
if CHAINLIT_AVAILABLE:
|
|
431
|
+
import praisonai
|
|
432
|
+
os.environ["CHAINLIT_PORT"] = "8088" # Ensure this port is not in use by another service
|
|
433
|
+
root_path = os.path.join(os.path.expanduser("~"), ".praison")
|
|
434
|
+
os.environ["CHAINLIT_APP_ROOT"] = root_path
|
|
435
|
+
public_folder = os.path.join(os.path.dirname(praisonai.__file__), 'public')
|
|
436
|
+
if not os.path.exists(os.path.join(root_path, "public")):
|
|
437
|
+
if os.path.exists(public_folder):
|
|
438
|
+
shutil.copytree(public_folder, os.path.join(root_path, "public"), dirs_exist_ok=True)
|
|
439
|
+
logging.info("Public folder copied successfully!")
|
|
440
|
+
else:
|
|
441
|
+
logging.info("Public folder not found in the package.")
|
|
442
|
+
else:
|
|
443
|
+
logging.info("Public folder already exists.")
|
|
444
|
+
realtime_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'ui', 'realtime.py')
|
|
445
|
+
chainlit_run([realtime_ui_path])
|
|
446
|
+
else:
|
|
447
|
+
print("ERROR: Realtime UI is not installed. Please install it with 'pip install \"praisonai[realtime]\"' to use the realtime UI.")
|
|
448
|
+
|
|
419
449
|
if __name__ == "__main__":
|
|
420
450
|
praison_ai = PraisonAI()
|
|
421
451
|
praison_ai.main()
|
|
@@ -56,7 +56,7 @@ class CloudDeployer:
|
|
|
56
56
|
file.write("FROM python:3.11-slim\n")
|
|
57
57
|
file.write("WORKDIR /app\n")
|
|
58
58
|
file.write("COPY . .\n")
|
|
59
|
-
file.write("RUN pip install flask praisonai==0.0.
|
|
59
|
+
file.write("RUN pip install flask praisonai==0.0.74 gunicorn markdown\n")
|
|
60
60
|
file.write("EXPOSE 8080\n")
|
|
61
61
|
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
|
|
62
62
|
|
|
@@ -17,6 +17,9 @@ from sql_alchemy import SQLAlchemyDataLayer
|
|
|
17
17
|
from tavily import TavilyClient
|
|
18
18
|
from crawl4ai import WebCrawler
|
|
19
19
|
import asyncio
|
|
20
|
+
from PIL import Image
|
|
21
|
+
import io
|
|
22
|
+
import base64
|
|
20
23
|
|
|
21
24
|
# Set up logging
|
|
22
25
|
logger = logging.getLogger(__name__)
|
|
@@ -292,11 +295,32 @@ async def main(message: cl.Message):
|
|
|
292
295
|
message_history = cl.user_session.get("message_history", [])
|
|
293
296
|
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
294
297
|
|
|
295
|
-
#
|
|
298
|
+
# Check if an image was uploaded with this message
|
|
299
|
+
image = None
|
|
300
|
+
if message.elements and isinstance(message.elements[0], cl.Image):
|
|
301
|
+
image_element = message.elements[0]
|
|
302
|
+
try:
|
|
303
|
+
# Open the image and keep it in memory
|
|
304
|
+
image = Image.open(image_element.path)
|
|
305
|
+
image.load() # This ensures the file is fully loaded into memory
|
|
306
|
+
cl.user_session.set("image", image)
|
|
307
|
+
except Exception as e:
|
|
308
|
+
logger.error(f"Error processing image: {str(e)}")
|
|
309
|
+
await cl.Message(content="There was an error processing the uploaded image. Please try again.").send()
|
|
310
|
+
return
|
|
311
|
+
|
|
312
|
+
# Prepare user message
|
|
296
313
|
user_message = f"""
|
|
297
|
-
Answer the question and use tools if needed:\n
|
|
314
|
+
Answer the question and use tools if needed:\n
|
|
315
|
+
|
|
298
316
|
Current Date and Time: {now}
|
|
317
|
+
|
|
318
|
+
User Question: {message.content}
|
|
299
319
|
"""
|
|
320
|
+
|
|
321
|
+
if image:
|
|
322
|
+
user_message = f"Image uploaded. {user_message}"
|
|
323
|
+
|
|
300
324
|
message_history.append({"role": "user", "content": user_message})
|
|
301
325
|
|
|
302
326
|
msg = cl.Message(content="")
|
|
@@ -309,6 +333,19 @@ Current Date and Time: {now}
|
|
|
309
333
|
"stream": True,
|
|
310
334
|
}
|
|
311
335
|
|
|
336
|
+
# If an image is uploaded, include it in the message
|
|
337
|
+
if image:
|
|
338
|
+
buffered = io.BytesIO()
|
|
339
|
+
image.save(buffered, format="PNG")
|
|
340
|
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
341
|
+
|
|
342
|
+
completion_params["messages"][-1] = {
|
|
343
|
+
"role": "user",
|
|
344
|
+
"content": [
|
|
345
|
+
{"type": "text", "text": user_message},
|
|
346
|
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
|
|
347
|
+
]
|
|
348
|
+
}
|
|
312
349
|
# Only add tools and tool_choice if Tavily API key is available
|
|
313
350
|
if tavily_api_key:
|
|
314
351
|
completion_params["tools"] = tools
|
|
@@ -359,6 +396,7 @@ Current Date and Time: {now}
|
|
|
359
396
|
cl.user_session.set("message_history", message_history)
|
|
360
397
|
await msg.update()
|
|
361
398
|
|
|
399
|
+
# Handle tool calls if any
|
|
362
400
|
if tavily_api_key and tool_calls:
|
|
363
401
|
available_functions = {
|
|
364
402
|
"tavily_web_search": tavily_web_search,
|
|
@@ -411,7 +449,7 @@ Current Date and Time: {now}
|
|
|
411
449
|
msg.content = full_response
|
|
412
450
|
await msg.update()
|
|
413
451
|
else:
|
|
414
|
-
# If no tool calls
|
|
452
|
+
# If no tool calls, the full_response is already set
|
|
415
453
|
msg.content = full_response
|
|
416
454
|
await msg.update()
|
|
417
455
|
|
|
@@ -433,7 +471,7 @@ async def send_count():
|
|
|
433
471
|
).send()
|
|
434
472
|
|
|
435
473
|
@cl.on_chat_resume
|
|
436
|
-
async def on_chat_resume(thread: ThreadDict):
|
|
474
|
+
async def on_chat_resume(thread: ThreadDict):
|
|
437
475
|
logger.info(f"Resuming chat: {thread['id']}")
|
|
438
476
|
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
439
477
|
logger.debug(f"Model name: {model_name}")
|
|
@@ -481,3 +519,10 @@ async def on_chat_resume(thread: ThreadDict): # Change the type hint here
|
|
|
481
519
|
logger.warning(f"Message without recognized type: {message}")
|
|
482
520
|
|
|
483
521
|
cl.user_session.set("message_history", message_history)
|
|
522
|
+
|
|
523
|
+
# Check if there's an image in the thread metadata
|
|
524
|
+
image_data = metadata.get("image")
|
|
525
|
+
if image_data:
|
|
526
|
+
image = Image.open(io.BytesIO(base64.b64decode(image_data)))
|
|
527
|
+
cl.user_session.set("image", image)
|
|
528
|
+
await cl.Message(content="Previous image loaded. You can continue asking questions about it or upload a new image.").send()
|
|
@@ -18,6 +18,9 @@ from context import ContextGatherer
|
|
|
18
18
|
from tavily import TavilyClient
|
|
19
19
|
from datetime import datetime
|
|
20
20
|
from crawl4ai import WebCrawler
|
|
21
|
+
from PIL import Image
|
|
22
|
+
import io
|
|
23
|
+
import base64
|
|
21
24
|
|
|
22
25
|
# Set up logging
|
|
23
26
|
logger = logging.getLogger(__name__)
|
|
@@ -303,16 +306,37 @@ tools = [{
|
|
|
303
306
|
async def main(message: cl.Message):
|
|
304
307
|
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
305
308
|
message_history = cl.user_session.get("message_history", [])
|
|
306
|
-
message_history.append({"role": "user", "content": message.content})
|
|
307
309
|
gatherer = ContextGatherer()
|
|
308
310
|
context, token_count, context_tree = gatherer.run()
|
|
309
311
|
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
312
|
+
|
|
313
|
+
# Check if an image was uploaded with this message
|
|
314
|
+
image = None
|
|
315
|
+
if message.elements and isinstance(message.elements[0], cl.Image):
|
|
316
|
+
image_element = message.elements[0]
|
|
317
|
+
try:
|
|
318
|
+
# Open the image and keep it in memory
|
|
319
|
+
image = Image.open(image_element.path)
|
|
320
|
+
image.load() # This ensures the file is fully loaded into memory
|
|
321
|
+
cl.user_session.set("image", image)
|
|
322
|
+
except Exception as e:
|
|
323
|
+
logger.error(f"Error processing image: {str(e)}")
|
|
324
|
+
await cl.Message(content="There was an error processing the uploaded image. Please try again.").send()
|
|
325
|
+
return
|
|
326
|
+
|
|
327
|
+
# Prepare user message
|
|
328
|
+
user_message = f"""
|
|
329
|
+
Answer the question and use tools if needed:\n{message.content}.\n\n
|
|
330
|
+
Current Date and Time: {now}
|
|
331
|
+
|
|
332
|
+
Context:
|
|
333
|
+
{context}
|
|
334
|
+
"""
|
|
335
|
+
|
|
336
|
+
if image:
|
|
337
|
+
user_message = f"Image uploaded. {user_message}"
|
|
338
|
+
|
|
339
|
+
message_history.append({"role": "user", "content": user_message})
|
|
316
340
|
|
|
317
341
|
msg = cl.Message(content="")
|
|
318
342
|
await msg.send()
|
|
@@ -320,11 +344,27 @@ async def main(message: cl.Message):
|
|
|
320
344
|
# Prepare the completion parameters
|
|
321
345
|
completion_params = {
|
|
322
346
|
"model": model_name,
|
|
323
|
-
"messages":
|
|
347
|
+
"messages": message_history,
|
|
324
348
|
"stream": True,
|
|
325
349
|
}
|
|
326
350
|
|
|
327
|
-
#
|
|
351
|
+
# If an image is uploaded, include it in the message
|
|
352
|
+
if image:
|
|
353
|
+
buffered = io.BytesIO()
|
|
354
|
+
image.save(buffered, format="PNG")
|
|
355
|
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
356
|
+
|
|
357
|
+
completion_params["messages"][-1] = {
|
|
358
|
+
"role": "user",
|
|
359
|
+
"content": [
|
|
360
|
+
{"type": "text", "text": user_message},
|
|
361
|
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
|
|
362
|
+
]
|
|
363
|
+
}
|
|
364
|
+
# Use a vision-capable model when an image is present
|
|
365
|
+
completion_params["model"] = "gpt-4-vision-preview" # Adjust this to your actual vision-capable model
|
|
366
|
+
|
|
367
|
+
# Only add tools and tool_choice if Tavily API key is available and no image is uploaded
|
|
328
368
|
if tavily_api_key:
|
|
329
369
|
completion_params["tools"] = tools
|
|
330
370
|
completion_params["tool_choice"] = "auto"
|
|
@@ -380,7 +420,7 @@ async def main(message: cl.Message):
|
|
|
380
420
|
available_functions = {
|
|
381
421
|
"tavily_web_search": tavily_web_search,
|
|
382
422
|
}
|
|
383
|
-
messages =
|
|
423
|
+
messages = message_history + [{"role": "assistant", "content": None, "function_call": {
|
|
384
424
|
"name": tool_calls[0]['function']['name'],
|
|
385
425
|
"arguments": tool_calls[0]['function']['arguments']
|
|
386
426
|
}}]
|
|
@@ -497,3 +537,10 @@ async def on_chat_resume(thread: ThreadDict):
|
|
|
497
537
|
logger.warning(f"Message without recognized type: {message}")
|
|
498
538
|
|
|
499
539
|
cl.user_session.set("message_history", message_history)
|
|
540
|
+
|
|
541
|
+
# Check if there's an image in the thread metadata
|
|
542
|
+
image_data = metadata.get("image")
|
|
543
|
+
if image_data:
|
|
544
|
+
image = Image.open(io.BytesIO(base64.b64decode(image_data)))
|
|
545
|
+
cl.user_session.set("image", image)
|
|
546
|
+
await cl.Message(content="Previous image loaded. You can continue asking questions about it, upload a new image, or just chat.").send()
|