PraisonAI 0.0.72__cp312-cp312-manylinux_2_35_x86_64.whl → 0.0.74__cp312-cp312-manylinux_2_35_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- praisonai/cli.py +30 -0
- praisonai/deploy.py +1 -1
- praisonai/ui/chat.py +49 -4
- praisonai/ui/code.py +57 -10
- praisonai/ui/realtime.py +368 -0
- praisonai/ui/realtimeclient/__init__.py +650 -0
- praisonai/ui/realtimeclient/tools.py +192 -0
- {praisonai-0.0.72.dist-info → praisonai-0.0.74.dist-info}/METADATA +27 -7
- {praisonai-0.0.72.dist-info → praisonai-0.0.74.dist-info}/RECORD +12 -9
- {praisonai-0.0.72.dist-info → praisonai-0.0.74.dist-info}/LICENSE +0 -0
- {praisonai-0.0.72.dist-info → praisonai-0.0.74.dist-info}/WHEEL +0 -0
- {praisonai-0.0.72.dist-info → praisonai-0.0.74.dist-info}/entry_points.txt +0 -0
praisonai/cli.py
CHANGED
|
@@ -130,6 +130,10 @@ class PraisonAI:
|
|
|
130
130
|
self.create_code_interface()
|
|
131
131
|
return
|
|
132
132
|
|
|
133
|
+
if getattr(args, 'realtime', False):
|
|
134
|
+
self.create_realtime_interface()
|
|
135
|
+
return
|
|
136
|
+
|
|
133
137
|
if args.agent_file == 'train':
|
|
134
138
|
package_root = os.path.dirname(os.path.abspath(__file__))
|
|
135
139
|
config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')
|
|
@@ -256,6 +260,7 @@ class PraisonAI:
|
|
|
256
260
|
parser.add_argument("--hf", type=str, help="Hugging Face model name")
|
|
257
261
|
parser.add_argument("--ollama", type=str, help="Ollama model name")
|
|
258
262
|
parser.add_argument("--dataset", type=str, help="Dataset name for training", default="yahma/alpaca-cleaned")
|
|
263
|
+
parser.add_argument("--realtime", action="store_true", help="Start the realtime voice interaction interface")
|
|
259
264
|
args, unknown_args = parser.parse_known_args()
|
|
260
265
|
|
|
261
266
|
if unknown_args and unknown_args[0] == '-b' and unknown_args[1] == 'api:app':
|
|
@@ -270,6 +275,8 @@ class PraisonAI:
|
|
|
270
275
|
if args.agent_file == 'code':
|
|
271
276
|
args.ui = 'chainlit'
|
|
272
277
|
args.code = True
|
|
278
|
+
if args.agent_file == 'realtime':
|
|
279
|
+
args.realtime = True
|
|
273
280
|
|
|
274
281
|
return args
|
|
275
282
|
|
|
@@ -416,6 +423,29 @@ class PraisonAI:
|
|
|
416
423
|
else:
|
|
417
424
|
print("ERROR: Chainlit is not installed. Please install it with 'pip install \"praisonai\[ui]\"' to use the UI.")
|
|
418
425
|
|
|
426
|
+
def create_realtime_interface(self):
|
|
427
|
+
"""
|
|
428
|
+
Create a Chainlit interface for the realtime voice interaction application.
|
|
429
|
+
"""
|
|
430
|
+
if CHAINLIT_AVAILABLE:
|
|
431
|
+
import praisonai
|
|
432
|
+
os.environ["CHAINLIT_PORT"] = "8088" # Ensure this port is not in use by another service
|
|
433
|
+
root_path = os.path.join(os.path.expanduser("~"), ".praison")
|
|
434
|
+
os.environ["CHAINLIT_APP_ROOT"] = root_path
|
|
435
|
+
public_folder = os.path.join(os.path.dirname(praisonai.__file__), 'public')
|
|
436
|
+
if not os.path.exists(os.path.join(root_path, "public")):
|
|
437
|
+
if os.path.exists(public_folder):
|
|
438
|
+
shutil.copytree(public_folder, os.path.join(root_path, "public"), dirs_exist_ok=True)
|
|
439
|
+
logging.info("Public folder copied successfully!")
|
|
440
|
+
else:
|
|
441
|
+
logging.info("Public folder not found in the package.")
|
|
442
|
+
else:
|
|
443
|
+
logging.info("Public folder already exists.")
|
|
444
|
+
realtime_ui_path = os.path.join(os.path.dirname(praisonai.__file__), 'ui', 'realtime.py')
|
|
445
|
+
chainlit_run([realtime_ui_path])
|
|
446
|
+
else:
|
|
447
|
+
print("ERROR: Realtime UI is not installed. Please install it with 'pip install \"praisonai[realtime]\"' to use the realtime UI.")
|
|
448
|
+
|
|
419
449
|
if __name__ == "__main__":
|
|
420
450
|
praison_ai = PraisonAI()
|
|
421
451
|
praison_ai.main()
|
praisonai/deploy.py
CHANGED
|
@@ -56,7 +56,7 @@ class CloudDeployer:
|
|
|
56
56
|
file.write("FROM python:3.11-slim\n")
|
|
57
57
|
file.write("WORKDIR /app\n")
|
|
58
58
|
file.write("COPY . .\n")
|
|
59
|
-
file.write("RUN pip install flask praisonai==0.0.
|
|
59
|
+
file.write("RUN pip install flask praisonai==0.0.74 gunicorn markdown\n")
|
|
60
60
|
file.write("EXPOSE 8080\n")
|
|
61
61
|
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
|
|
62
62
|
|
praisonai/ui/chat.py
CHANGED
|
@@ -17,6 +17,9 @@ from sql_alchemy import SQLAlchemyDataLayer
|
|
|
17
17
|
from tavily import TavilyClient
|
|
18
18
|
from crawl4ai import WebCrawler
|
|
19
19
|
import asyncio
|
|
20
|
+
from PIL import Image
|
|
21
|
+
import io
|
|
22
|
+
import base64
|
|
20
23
|
|
|
21
24
|
# Set up logging
|
|
22
25
|
logger = logging.getLogger(__name__)
|
|
@@ -292,11 +295,32 @@ async def main(message: cl.Message):
|
|
|
292
295
|
message_history = cl.user_session.get("message_history", [])
|
|
293
296
|
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
294
297
|
|
|
295
|
-
#
|
|
298
|
+
# Check if an image was uploaded with this message
|
|
299
|
+
image = None
|
|
300
|
+
if message.elements and isinstance(message.elements[0], cl.Image):
|
|
301
|
+
image_element = message.elements[0]
|
|
302
|
+
try:
|
|
303
|
+
# Open the image and keep it in memory
|
|
304
|
+
image = Image.open(image_element.path)
|
|
305
|
+
image.load() # This ensures the file is fully loaded into memory
|
|
306
|
+
cl.user_session.set("image", image)
|
|
307
|
+
except Exception as e:
|
|
308
|
+
logger.error(f"Error processing image: {str(e)}")
|
|
309
|
+
await cl.Message(content="There was an error processing the uploaded image. Please try again.").send()
|
|
310
|
+
return
|
|
311
|
+
|
|
312
|
+
# Prepare user message
|
|
296
313
|
user_message = f"""
|
|
297
|
-
Answer the question and use tools if needed:\n
|
|
314
|
+
Answer the question and use tools if needed:\n
|
|
315
|
+
|
|
298
316
|
Current Date and Time: {now}
|
|
317
|
+
|
|
318
|
+
User Question: {message.content}
|
|
299
319
|
"""
|
|
320
|
+
|
|
321
|
+
if image:
|
|
322
|
+
user_message = f"Image uploaded. {user_message}"
|
|
323
|
+
|
|
300
324
|
message_history.append({"role": "user", "content": user_message})
|
|
301
325
|
|
|
302
326
|
msg = cl.Message(content="")
|
|
@@ -309,6 +333,19 @@ Current Date and Time: {now}
|
|
|
309
333
|
"stream": True,
|
|
310
334
|
}
|
|
311
335
|
|
|
336
|
+
# If an image is uploaded, include it in the message
|
|
337
|
+
if image:
|
|
338
|
+
buffered = io.BytesIO()
|
|
339
|
+
image.save(buffered, format="PNG")
|
|
340
|
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
341
|
+
|
|
342
|
+
completion_params["messages"][-1] = {
|
|
343
|
+
"role": "user",
|
|
344
|
+
"content": [
|
|
345
|
+
{"type": "text", "text": user_message},
|
|
346
|
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
|
|
347
|
+
]
|
|
348
|
+
}
|
|
312
349
|
# Only add tools and tool_choice if Tavily API key is available
|
|
313
350
|
if tavily_api_key:
|
|
314
351
|
completion_params["tools"] = tools
|
|
@@ -359,6 +396,7 @@ Current Date and Time: {now}
|
|
|
359
396
|
cl.user_session.set("message_history", message_history)
|
|
360
397
|
await msg.update()
|
|
361
398
|
|
|
399
|
+
# Handle tool calls if any
|
|
362
400
|
if tavily_api_key and tool_calls:
|
|
363
401
|
available_functions = {
|
|
364
402
|
"tavily_web_search": tavily_web_search,
|
|
@@ -411,7 +449,7 @@ Current Date and Time: {now}
|
|
|
411
449
|
msg.content = full_response
|
|
412
450
|
await msg.update()
|
|
413
451
|
else:
|
|
414
|
-
# If no tool calls
|
|
452
|
+
# If no tool calls, the full_response is already set
|
|
415
453
|
msg.content = full_response
|
|
416
454
|
await msg.update()
|
|
417
455
|
|
|
@@ -433,7 +471,7 @@ async def send_count():
|
|
|
433
471
|
).send()
|
|
434
472
|
|
|
435
473
|
@cl.on_chat_resume
|
|
436
|
-
async def on_chat_resume(thread: ThreadDict):
|
|
474
|
+
async def on_chat_resume(thread: ThreadDict):
|
|
437
475
|
logger.info(f"Resuming chat: {thread['id']}")
|
|
438
476
|
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
439
477
|
logger.debug(f"Model name: {model_name}")
|
|
@@ -481,3 +519,10 @@ async def on_chat_resume(thread: ThreadDict): # Change the type hint here
|
|
|
481
519
|
logger.warning(f"Message without recognized type: {message}")
|
|
482
520
|
|
|
483
521
|
cl.user_session.set("message_history", message_history)
|
|
522
|
+
|
|
523
|
+
# Check if there's an image in the thread metadata
|
|
524
|
+
image_data = metadata.get("image")
|
|
525
|
+
if image_data:
|
|
526
|
+
image = Image.open(io.BytesIO(base64.b64decode(image_data)))
|
|
527
|
+
cl.user_session.set("image", image)
|
|
528
|
+
await cl.Message(content="Previous image loaded. You can continue asking questions about it or upload a new image.").send()
|
praisonai/ui/code.py
CHANGED
|
@@ -18,6 +18,9 @@ from context import ContextGatherer
|
|
|
18
18
|
from tavily import TavilyClient
|
|
19
19
|
from datetime import datetime
|
|
20
20
|
from crawl4ai import WebCrawler
|
|
21
|
+
from PIL import Image
|
|
22
|
+
import io
|
|
23
|
+
import base64
|
|
21
24
|
|
|
22
25
|
# Set up logging
|
|
23
26
|
logger = logging.getLogger(__name__)
|
|
@@ -303,16 +306,37 @@ tools = [{
|
|
|
303
306
|
async def main(message: cl.Message):
|
|
304
307
|
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
305
308
|
message_history = cl.user_session.get("message_history", [])
|
|
306
|
-
message_history.append({"role": "user", "content": message.content})
|
|
307
309
|
gatherer = ContextGatherer()
|
|
308
310
|
context, token_count, context_tree = gatherer.run()
|
|
309
311
|
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
312
|
+
|
|
313
|
+
# Check if an image was uploaded with this message
|
|
314
|
+
image = None
|
|
315
|
+
if message.elements and isinstance(message.elements[0], cl.Image):
|
|
316
|
+
image_element = message.elements[0]
|
|
317
|
+
try:
|
|
318
|
+
# Open the image and keep it in memory
|
|
319
|
+
image = Image.open(image_element.path)
|
|
320
|
+
image.load() # This ensures the file is fully loaded into memory
|
|
321
|
+
cl.user_session.set("image", image)
|
|
322
|
+
except Exception as e:
|
|
323
|
+
logger.error(f"Error processing image: {str(e)}")
|
|
324
|
+
await cl.Message(content="There was an error processing the uploaded image. Please try again.").send()
|
|
325
|
+
return
|
|
326
|
+
|
|
327
|
+
# Prepare user message
|
|
328
|
+
user_message = f"""
|
|
329
|
+
Answer the question and use tools if needed:\n{message.content}.\n\n
|
|
330
|
+
Current Date and Time: {now}
|
|
331
|
+
|
|
332
|
+
Context:
|
|
333
|
+
{context}
|
|
334
|
+
"""
|
|
335
|
+
|
|
336
|
+
if image:
|
|
337
|
+
user_message = f"Image uploaded. {user_message}"
|
|
338
|
+
|
|
339
|
+
message_history.append({"role": "user", "content": user_message})
|
|
316
340
|
|
|
317
341
|
msg = cl.Message(content="")
|
|
318
342
|
await msg.send()
|
|
@@ -320,11 +344,27 @@ async def main(message: cl.Message):
|
|
|
320
344
|
# Prepare the completion parameters
|
|
321
345
|
completion_params = {
|
|
322
346
|
"model": model_name,
|
|
323
|
-
"messages":
|
|
347
|
+
"messages": message_history,
|
|
324
348
|
"stream": True,
|
|
325
349
|
}
|
|
326
350
|
|
|
327
|
-
#
|
|
351
|
+
# If an image is uploaded, include it in the message
|
|
352
|
+
if image:
|
|
353
|
+
buffered = io.BytesIO()
|
|
354
|
+
image.save(buffered, format="PNG")
|
|
355
|
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
356
|
+
|
|
357
|
+
completion_params["messages"][-1] = {
|
|
358
|
+
"role": "user",
|
|
359
|
+
"content": [
|
|
360
|
+
{"type": "text", "text": user_message},
|
|
361
|
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
|
|
362
|
+
]
|
|
363
|
+
}
|
|
364
|
+
# Use a vision-capable model when an image is present
|
|
365
|
+
completion_params["model"] = "gpt-4-vision-preview" # Adjust this to your actual vision-capable model
|
|
366
|
+
|
|
367
|
+
# Only add tools and tool_choice if Tavily API key is available and no image is uploaded
|
|
328
368
|
if tavily_api_key:
|
|
329
369
|
completion_params["tools"] = tools
|
|
330
370
|
completion_params["tool_choice"] = "auto"
|
|
@@ -380,7 +420,7 @@ async def main(message: cl.Message):
|
|
|
380
420
|
available_functions = {
|
|
381
421
|
"tavily_web_search": tavily_web_search,
|
|
382
422
|
}
|
|
383
|
-
messages =
|
|
423
|
+
messages = message_history + [{"role": "assistant", "content": None, "function_call": {
|
|
384
424
|
"name": tool_calls[0]['function']['name'],
|
|
385
425
|
"arguments": tool_calls[0]['function']['arguments']
|
|
386
426
|
}}]
|
|
@@ -497,3 +537,10 @@ async def on_chat_resume(thread: ThreadDict):
|
|
|
497
537
|
logger.warning(f"Message without recognized type: {message}")
|
|
498
538
|
|
|
499
539
|
cl.user_session.set("message_history", message_history)
|
|
540
|
+
|
|
541
|
+
# Check if there's an image in the thread metadata
|
|
542
|
+
image_data = metadata.get("image")
|
|
543
|
+
if image_data:
|
|
544
|
+
image = Image.open(io.BytesIO(base64.b64decode(image_data)))
|
|
545
|
+
cl.user_session.set("image", image)
|
|
546
|
+
await cl.Message(content="Previous image loaded. You can continue asking questions about it, upload a new image, or just chat.").send()
|
praisonai/ui/realtime.py
ADDED
|
@@ -0,0 +1,368 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
import sqlite3
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from uuid import uuid4
|
|
6
|
+
|
|
7
|
+
from openai import AsyncOpenAI
|
|
8
|
+
import chainlit as cl
|
|
9
|
+
from chainlit.logger import logger
|
|
10
|
+
from chainlit.input_widget import TextInput
|
|
11
|
+
from chainlit.types import ThreadDict
|
|
12
|
+
|
|
13
|
+
from realtimeclient import RealtimeClient
|
|
14
|
+
from realtimeclient.tools import tools
|
|
15
|
+
from sql_alchemy import SQLAlchemyDataLayer
|
|
16
|
+
import chainlit.data as cl_data
|
|
17
|
+
from literalai.helper import utc_now
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
# Set up CHAINLIT_AUTH_SECRET
|
|
21
|
+
CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
|
|
22
|
+
|
|
23
|
+
if not CHAINLIT_AUTH_SECRET:
|
|
24
|
+
os.environ["CHAINLIT_AUTH_SECRET"] = "p8BPhQChpg@J>jBz$wGxqLX2V>yTVgP*7Ky9H$aV:axW~ANNX-7_T:o@lnyCBu^U"
|
|
25
|
+
CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
|
|
26
|
+
|
|
27
|
+
# Database path
|
|
28
|
+
DB_PATH = os.path.expanduser("~/.praison/database.sqlite")
|
|
29
|
+
|
|
30
|
+
def initialize_db():
|
|
31
|
+
os.makedirs(os.path.dirname(DB_PATH), exist_ok=True)
|
|
32
|
+
conn = sqlite3.connect(DB_PATH)
|
|
33
|
+
cursor = conn.cursor()
|
|
34
|
+
cursor.execute('''
|
|
35
|
+
CREATE TABLE IF NOT EXISTS users (
|
|
36
|
+
id UUID PRIMARY KEY,
|
|
37
|
+
identifier TEXT NOT NULL UNIQUE,
|
|
38
|
+
metadata JSONB NOT NULL,
|
|
39
|
+
createdAt TEXT
|
|
40
|
+
)
|
|
41
|
+
''')
|
|
42
|
+
cursor.execute('''
|
|
43
|
+
CREATE TABLE IF NOT EXISTS threads (
|
|
44
|
+
id UUID PRIMARY KEY,
|
|
45
|
+
createdAt TEXT,
|
|
46
|
+
name TEXT,
|
|
47
|
+
userId UUID,
|
|
48
|
+
userIdentifier TEXT,
|
|
49
|
+
tags TEXT[],
|
|
50
|
+
metadata JSONB NOT NULL DEFAULT '{}',
|
|
51
|
+
FOREIGN KEY (userId) REFERENCES users(id) ON DELETE CASCADE
|
|
52
|
+
)
|
|
53
|
+
''')
|
|
54
|
+
cursor.execute('''
|
|
55
|
+
CREATE TABLE IF NOT EXISTS steps (
|
|
56
|
+
id UUID PRIMARY KEY,
|
|
57
|
+
name TEXT NOT NULL,
|
|
58
|
+
type TEXT NOT NULL,
|
|
59
|
+
threadId UUID NOT NULL,
|
|
60
|
+
parentId UUID,
|
|
61
|
+
disableFeedback BOOLEAN NOT NULL DEFAULT 0,
|
|
62
|
+
streaming BOOLEAN NOT NULL DEFAULT 0,
|
|
63
|
+
waitForAnswer BOOLEAN DEFAULT 0,
|
|
64
|
+
isError BOOLEAN NOT NULL DEFAULT 0,
|
|
65
|
+
metadata JSONB DEFAULT '{}',
|
|
66
|
+
tags TEXT[],
|
|
67
|
+
input TEXT,
|
|
68
|
+
output TEXT,
|
|
69
|
+
createdAt TEXT,
|
|
70
|
+
start TEXT,
|
|
71
|
+
end TEXT,
|
|
72
|
+
generation JSONB,
|
|
73
|
+
showInput TEXT,
|
|
74
|
+
language TEXT,
|
|
75
|
+
indent INT,
|
|
76
|
+
FOREIGN KEY (threadId) REFERENCES threads (id) ON DELETE CASCADE
|
|
77
|
+
)
|
|
78
|
+
''')
|
|
79
|
+
cursor.execute('''
|
|
80
|
+
CREATE TABLE IF NOT EXISTS elements (
|
|
81
|
+
id UUID PRIMARY KEY,
|
|
82
|
+
threadId UUID,
|
|
83
|
+
type TEXT,
|
|
84
|
+
url TEXT,
|
|
85
|
+
chainlitKey TEXT,
|
|
86
|
+
name TEXT NOT NULL,
|
|
87
|
+
display TEXT,
|
|
88
|
+
objectKey TEXT,
|
|
89
|
+
size TEXT,
|
|
90
|
+
page INT,
|
|
91
|
+
language TEXT,
|
|
92
|
+
forId UUID,
|
|
93
|
+
mime TEXT,
|
|
94
|
+
FOREIGN KEY (threadId) REFERENCES threads (id) ON DELETE CASCADE
|
|
95
|
+
)
|
|
96
|
+
''')
|
|
97
|
+
cursor.execute('''
|
|
98
|
+
CREATE TABLE IF NOT EXISTS feedbacks (
|
|
99
|
+
id UUID PRIMARY KEY,
|
|
100
|
+
forId UUID NOT NULL,
|
|
101
|
+
value INT NOT NULL,
|
|
102
|
+
threadId UUID,
|
|
103
|
+
comment TEXT
|
|
104
|
+
)
|
|
105
|
+
''')
|
|
106
|
+
cursor.execute('''
|
|
107
|
+
CREATE TABLE IF NOT EXISTS settings (
|
|
108
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
109
|
+
key TEXT UNIQUE,
|
|
110
|
+
value TEXT
|
|
111
|
+
)
|
|
112
|
+
''')
|
|
113
|
+
conn.commit()
|
|
114
|
+
conn.close()
|
|
115
|
+
|
|
116
|
+
def save_setting(key: str, value: str):
|
|
117
|
+
"""Saves a setting to the database."""
|
|
118
|
+
conn = sqlite3.connect(DB_PATH)
|
|
119
|
+
cursor = conn.cursor()
|
|
120
|
+
cursor.execute(
|
|
121
|
+
"""
|
|
122
|
+
INSERT OR REPLACE INTO settings (id, key, value)
|
|
123
|
+
VALUES ((SELECT id FROM settings WHERE key = ?), ?, ?)
|
|
124
|
+
""",
|
|
125
|
+
(key, key, value),
|
|
126
|
+
)
|
|
127
|
+
conn.commit()
|
|
128
|
+
conn.close()
|
|
129
|
+
|
|
130
|
+
def load_setting(key: str) -> str:
|
|
131
|
+
"""Loads a setting from the database."""
|
|
132
|
+
conn = sqlite3.connect(DB_PATH)
|
|
133
|
+
cursor = conn.cursor()
|
|
134
|
+
cursor.execute('SELECT value FROM settings WHERE key = ?', (key,))
|
|
135
|
+
result = cursor.fetchone()
|
|
136
|
+
conn.close()
|
|
137
|
+
return result[0] if result else None
|
|
138
|
+
|
|
139
|
+
# Initialize the database
|
|
140
|
+
initialize_db()
|
|
141
|
+
|
|
142
|
+
# Set up SQLAlchemy data layer
|
|
143
|
+
cl_data._data_layer = SQLAlchemyDataLayer(conninfo=f"sqlite+aiosqlite:///{DB_PATH}")
|
|
144
|
+
|
|
145
|
+
client = AsyncOpenAI()
|
|
146
|
+
|
|
147
|
+
@cl.on_chat_start
|
|
148
|
+
async def start():
|
|
149
|
+
initialize_db()
|
|
150
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
|
|
151
|
+
cl.user_session.set("model_name", model_name)
|
|
152
|
+
cl.user_session.set("message_history", []) # Initialize message history
|
|
153
|
+
logger.debug(f"Model name: {model_name}")
|
|
154
|
+
# settings = cl.ChatSettings(
|
|
155
|
+
# [
|
|
156
|
+
# TextInput(
|
|
157
|
+
# id="model_name",
|
|
158
|
+
# label="Enter the Model Name",
|
|
159
|
+
# placeholder="e.g., gpt-4o-mini",
|
|
160
|
+
# initial=model_name
|
|
161
|
+
# )
|
|
162
|
+
# ]
|
|
163
|
+
# )
|
|
164
|
+
# cl.user_session.set("settings", settings)
|
|
165
|
+
# await settings.send()
|
|
166
|
+
await cl.Message(
|
|
167
|
+
content="Welcome to the PraisonAI realtime. Press `P` to talk!"
|
|
168
|
+
).send()
|
|
169
|
+
await setup_openai_realtime()
|
|
170
|
+
|
|
171
|
+
@cl.on_message
|
|
172
|
+
async def on_message(message: cl.Message):
|
|
173
|
+
openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
|
|
174
|
+
message_history = cl.user_session.get("message_history", [])
|
|
175
|
+
|
|
176
|
+
if openai_realtime and openai_realtime.is_connected():
|
|
177
|
+
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
178
|
+
prompt = f"Current time Just for reference: {current_date}\n\n{message.content}"
|
|
179
|
+
|
|
180
|
+
# Add user message to history
|
|
181
|
+
message_history.append({"role": "user", "content": prompt})
|
|
182
|
+
cl.user_session.set("message_history", message_history)
|
|
183
|
+
|
|
184
|
+
await openai_realtime.send_user_message_content([{ "type": 'input_text', "text": message.content }])
|
|
185
|
+
else:
|
|
186
|
+
await cl.Message(content="Please activate voice mode before sending messages!").send()
|
|
187
|
+
|
|
188
|
+
async def setup_openai_realtime():
|
|
189
|
+
"""Instantiate and configure the OpenAI Realtime Client"""
|
|
190
|
+
openai_realtime = RealtimeClient(api_key=os.getenv("OPENAI_API_KEY"))
|
|
191
|
+
cl.user_session.set("track_id", str(uuid4()))
|
|
192
|
+
|
|
193
|
+
async def handle_conversation_updated(event):
|
|
194
|
+
item = event.get("item")
|
|
195
|
+
delta = event.get("delta")
|
|
196
|
+
"""Currently used to stream audio back to the client."""
|
|
197
|
+
if delta:
|
|
198
|
+
if 'audio' in delta:
|
|
199
|
+
audio = delta['audio'] # Int16Array, audio added
|
|
200
|
+
await cl.context.emitter.send_audio_chunk(cl.OutputAudioChunk(mimeType="pcm16", data=audio, track=cl.user_session.get("track_id")))
|
|
201
|
+
if 'transcript' in delta:
|
|
202
|
+
transcript = delta['transcript'] # string, transcript added
|
|
203
|
+
logger.debug(f"Transcript delta: {transcript}")
|
|
204
|
+
if 'text' in delta:
|
|
205
|
+
text = delta['text'] # string, text added
|
|
206
|
+
logger.debug(f"Text delta: {text}")
|
|
207
|
+
if 'arguments' in delta:
|
|
208
|
+
arguments = delta['arguments'] # string, function arguments added
|
|
209
|
+
logger.debug(f"Function arguments delta: {arguments}")
|
|
210
|
+
|
|
211
|
+
async def handle_item_completed(event):
|
|
212
|
+
"""Used to populate the chat context with transcription once an item is completed."""
|
|
213
|
+
try:
|
|
214
|
+
item = event.get("item")
|
|
215
|
+
logger.debug(f"Item completed: {json.dumps(item, indent=2, default=str)}")
|
|
216
|
+
await openai_realtime._send_chainlit_message(item)
|
|
217
|
+
|
|
218
|
+
# Add assistant message to history
|
|
219
|
+
message_history = cl.user_session.get("message_history", [])
|
|
220
|
+
content = item.get("formatted", {}).get("text", "") or item.get("formatted", {}).get("transcript", "")
|
|
221
|
+
if content:
|
|
222
|
+
message_history.append({"role": "assistant", "content": content})
|
|
223
|
+
cl.user_session.set("message_history", message_history)
|
|
224
|
+
except Exception as e:
|
|
225
|
+
error_message = f"Error in handle_item_completed: {str(e)}"
|
|
226
|
+
logger.error(error_message)
|
|
227
|
+
debug_item = json.dumps(item, indent=2, default=str)
|
|
228
|
+
logger.error(f"Item causing error: {debug_item}")
|
|
229
|
+
|
|
230
|
+
async def handle_conversation_interrupt(event):
|
|
231
|
+
"""Used to cancel the client previous audio playback."""
|
|
232
|
+
cl.user_session.set("track_id", str(uuid4()))
|
|
233
|
+
await cl.context.emitter.send_audio_interrupt()
|
|
234
|
+
|
|
235
|
+
async def handle_error(event):
|
|
236
|
+
logger.error(event)
|
|
237
|
+
await cl.Message(content=f"Error: {event}", author="System").send()
|
|
238
|
+
|
|
239
|
+
openai_realtime.on('conversation.updated', handle_conversation_updated)
|
|
240
|
+
openai_realtime.on('conversation.item.completed', handle_item_completed)
|
|
241
|
+
openai_realtime.on('conversation.interrupted', handle_conversation_interrupt)
|
|
242
|
+
openai_realtime.on('error', handle_error)
|
|
243
|
+
|
|
244
|
+
cl.user_session.set("openai_realtime", openai_realtime)
|
|
245
|
+
coros = [openai_realtime.add_tool(tool_def, tool_handler) for tool_def, tool_handler in tools]
|
|
246
|
+
await asyncio.gather(*coros)
|
|
247
|
+
|
|
248
|
+
@cl.on_settings_update
|
|
249
|
+
async def setup_agent(settings):
|
|
250
|
+
logger.debug(settings)
|
|
251
|
+
cl.user_session.set("settings", settings)
|
|
252
|
+
model_name = settings["model_name"]
|
|
253
|
+
cl.user_session.set("model_name", model_name)
|
|
254
|
+
|
|
255
|
+
# Save in settings table
|
|
256
|
+
save_setting("model_name", model_name)
|
|
257
|
+
|
|
258
|
+
# Save in thread metadata
|
|
259
|
+
thread_id = cl.user_session.get("thread_id")
|
|
260
|
+
if thread_id:
|
|
261
|
+
thread = await cl_data._data_layer.get_thread(thread_id)
|
|
262
|
+
if thread:
|
|
263
|
+
metadata = thread.get("metadata", {})
|
|
264
|
+
if isinstance(metadata, str):
|
|
265
|
+
try:
|
|
266
|
+
metadata = json.loads(metadata)
|
|
267
|
+
except json.JSONDecodeError:
|
|
268
|
+
metadata = {}
|
|
269
|
+
|
|
270
|
+
metadata["model_name"] = model_name
|
|
271
|
+
|
|
272
|
+
# Always store metadata as a dictionary
|
|
273
|
+
await cl_data._data_layer.update_thread(thread_id, metadata=metadata)
|
|
274
|
+
|
|
275
|
+
# Update the user session with the new metadata
|
|
276
|
+
cl.user_session.set("metadata", metadata)
|
|
277
|
+
|
|
278
|
+
@cl.on_audio_start
|
|
279
|
+
async def on_audio_start():
|
|
280
|
+
try:
|
|
281
|
+
openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
|
|
282
|
+
await openai_realtime.connect()
|
|
283
|
+
logger.info("Connected to OpenAI realtime")
|
|
284
|
+
return True
|
|
285
|
+
except Exception as e:
|
|
286
|
+
await cl.ErrorMessage(content=f"Failed to connect to OpenAI realtime: {e}").send()
|
|
287
|
+
return False
|
|
288
|
+
|
|
289
|
+
@cl.on_audio_chunk
|
|
290
|
+
async def on_audio_chunk(chunk: cl.InputAudioChunk):
|
|
291
|
+
openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
|
|
292
|
+
if openai_realtime.is_connected():
|
|
293
|
+
await openai_realtime.append_input_audio(chunk.data)
|
|
294
|
+
else:
|
|
295
|
+
logger.info("RealtimeClient is not connected")
|
|
296
|
+
|
|
297
|
+
@cl.on_audio_end
|
|
298
|
+
@cl.on_chat_end
|
|
299
|
+
@cl.on_stop
|
|
300
|
+
async def on_end():
|
|
301
|
+
openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
|
|
302
|
+
if openai_realtime and openai_realtime.is_connected():
|
|
303
|
+
await openai_realtime.disconnect()
|
|
304
|
+
|
|
305
|
+
@cl.password_auth_callback
|
|
306
|
+
def auth_callback(username: str, password: str):
|
|
307
|
+
# You can customize this function to use your own authentication logic
|
|
308
|
+
expected_username = os.getenv("CHAINLIT_USERNAME", "admin")
|
|
309
|
+
expected_password = os.getenv("CHAINLIT_PASSWORD", "admin")
|
|
310
|
+
if (username, password) == (expected_username, expected_password):
|
|
311
|
+
return cl.User(
|
|
312
|
+
identifier=username, metadata={"role": "ADMIN", "provider": "credentials"}
|
|
313
|
+
)
|
|
314
|
+
else:
|
|
315
|
+
return None
|
|
316
|
+
|
|
317
|
+
@cl.on_chat_resume
|
|
318
|
+
async def on_chat_resume(thread: ThreadDict):
|
|
319
|
+
logger.info(f"Resuming chat: {thread['id']}")
|
|
320
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
|
|
321
|
+
logger.debug(f"Model name: {model_name}")
|
|
322
|
+
settings = cl.ChatSettings(
|
|
323
|
+
[
|
|
324
|
+
TextInput(
|
|
325
|
+
id="model_name",
|
|
326
|
+
label="Enter the Model Name",
|
|
327
|
+
placeholder="e.g., gpt-4o-mini",
|
|
328
|
+
initial=model_name
|
|
329
|
+
)
|
|
330
|
+
]
|
|
331
|
+
)
|
|
332
|
+
await settings.send()
|
|
333
|
+
thread_id = thread["id"]
|
|
334
|
+
cl.user_session.set("thread_id", thread["id"])
|
|
335
|
+
|
|
336
|
+
# Ensure metadata is a dictionary
|
|
337
|
+
metadata = thread.get("metadata", {})
|
|
338
|
+
if isinstance(metadata, str):
|
|
339
|
+
try:
|
|
340
|
+
metadata = json.loads(metadata)
|
|
341
|
+
except json.JSONDecodeError:
|
|
342
|
+
metadata = {}
|
|
343
|
+
|
|
344
|
+
cl.user_session.set("metadata", metadata)
|
|
345
|
+
|
|
346
|
+
message_history = []
|
|
347
|
+
steps = thread["steps"]
|
|
348
|
+
|
|
349
|
+
for message in steps:
|
|
350
|
+
msg_type = message.get("type")
|
|
351
|
+
if msg_type == "user_message":
|
|
352
|
+
message_history.append({"role": "user", "content": message.get("output", "")})
|
|
353
|
+
elif msg_type == "assistant_message":
|
|
354
|
+
message_history.append({"role": "assistant", "content": message.get("output", "")})
|
|
355
|
+
elif msg_type == "run":
|
|
356
|
+
# Handle 'run' type messages
|
|
357
|
+
if message.get("isError"):
|
|
358
|
+
message_history.append({"role": "system", "content": f"Error: {message.get('output', '')}"})
|
|
359
|
+
else:
|
|
360
|
+
# You might want to handle non-error 'run' messages differently
|
|
361
|
+
pass
|
|
362
|
+
else:
|
|
363
|
+
logger.warning(f"Message without recognized type: {message}")
|
|
364
|
+
|
|
365
|
+
cl.user_session.set("message_history", message_history)
|
|
366
|
+
|
|
367
|
+
# Reconnect to OpenAI realtime
|
|
368
|
+
await setup_openai_realtime()
|