PraisonAI 2.0.61__cp313-cp313-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- praisonai/__init__.py +6 -0
- praisonai/__main__.py +10 -0
- praisonai/agents_generator.py +648 -0
- praisonai/api/call.py +292 -0
- praisonai/auto.py +238 -0
- praisonai/chainlit_ui.py +304 -0
- praisonai/cli.py +518 -0
- praisonai/deploy.py +138 -0
- praisonai/inbuilt_tools/__init__.py +24 -0
- praisonai/inbuilt_tools/autogen_tools.py +117 -0
- praisonai/inc/__init__.py +2 -0
- praisonai/inc/config.py +96 -0
- praisonai/inc/models.py +128 -0
- praisonai/public/android-chrome-192x192.png +0 -0
- praisonai/public/android-chrome-512x512.png +0 -0
- praisonai/public/apple-touch-icon.png +0 -0
- praisonai/public/fantasy.svg +3 -0
- praisonai/public/favicon-16x16.png +0 -0
- praisonai/public/favicon-32x32.png +0 -0
- praisonai/public/favicon.ico +0 -0
- praisonai/public/game.svg +3 -0
- praisonai/public/logo_dark.png +0 -0
- praisonai/public/logo_light.png +0 -0
- praisonai/public/movie.svg +3 -0
- praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
- praisonai/public/praison-ai-agents-architecture.png +0 -0
- praisonai/public/thriller.svg +3 -0
- praisonai/setup/__init__.py +1 -0
- praisonai/setup/build.py +21 -0
- praisonai/setup/config.yaml +60 -0
- praisonai/setup/post_install.py +23 -0
- praisonai/setup/setup_conda_env.py +25 -0
- praisonai/setup/setup_conda_env.sh +72 -0
- praisonai/setup.py +16 -0
- praisonai/test.py +105 -0
- praisonai/train.py +276 -0
- praisonai/ui/README.md +21 -0
- praisonai/ui/agents.py +822 -0
- praisonai/ui/callbacks.py +57 -0
- praisonai/ui/chat.py +387 -0
- praisonai/ui/code.py +440 -0
- praisonai/ui/colab.py +474 -0
- praisonai/ui/colab_chainlit.py +81 -0
- praisonai/ui/components/aicoder.py +269 -0
- praisonai/ui/config/.chainlit/config.toml +120 -0
- praisonai/ui/config/.chainlit/translations/bn.json +231 -0
- praisonai/ui/config/.chainlit/translations/en-US.json +229 -0
- praisonai/ui/config/.chainlit/translations/gu.json +231 -0
- praisonai/ui/config/.chainlit/translations/he-IL.json +231 -0
- praisonai/ui/config/.chainlit/translations/hi.json +231 -0
- praisonai/ui/config/.chainlit/translations/kn.json +231 -0
- praisonai/ui/config/.chainlit/translations/ml.json +231 -0
- praisonai/ui/config/.chainlit/translations/mr.json +231 -0
- praisonai/ui/config/.chainlit/translations/ta.json +231 -0
- praisonai/ui/config/.chainlit/translations/te.json +231 -0
- praisonai/ui/config/.chainlit/translations/zh-CN.json +229 -0
- praisonai/ui/config/chainlit.md +1 -0
- praisonai/ui/config/translations/bn.json +231 -0
- praisonai/ui/config/translations/en-US.json +229 -0
- praisonai/ui/config/translations/gu.json +231 -0
- praisonai/ui/config/translations/he-IL.json +231 -0
- praisonai/ui/config/translations/hi.json +231 -0
- praisonai/ui/config/translations/kn.json +231 -0
- praisonai/ui/config/translations/ml.json +231 -0
- praisonai/ui/config/translations/mr.json +231 -0
- praisonai/ui/config/translations/ta.json +231 -0
- praisonai/ui/config/translations/te.json +231 -0
- praisonai/ui/config/translations/zh-CN.json +229 -0
- praisonai/ui/context.py +283 -0
- praisonai/ui/db.py +291 -0
- praisonai/ui/public/fantasy.svg +3 -0
- praisonai/ui/public/game.svg +3 -0
- praisonai/ui/public/logo_dark.png +0 -0
- praisonai/ui/public/logo_light.png +0 -0
- praisonai/ui/public/movie.svg +3 -0
- praisonai/ui/public/praison.css +3 -0
- praisonai/ui/public/thriller.svg +3 -0
- praisonai/ui/realtime.py +476 -0
- praisonai/ui/realtimeclient/__init__.py +653 -0
- praisonai/ui/realtimeclient/realtimedocs.txt +1484 -0
- praisonai/ui/realtimeclient/tools.py +236 -0
- praisonai/ui/sql_alchemy.py +707 -0
- praisonai/ui/tools.md +133 -0
- praisonai/version.py +1 -0
- praisonai-2.0.61.dist-info/LICENSE +20 -0
- praisonai-2.0.61.dist-info/METADATA +679 -0
- praisonai-2.0.61.dist-info/RECORD +89 -0
- praisonai-2.0.61.dist-info/WHEEL +4 -0
- praisonai-2.0.61.dist-info/entry_points.txt +5 -0
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Dict, Any, Callable, Optional, Union
|
|
3
|
+
import asyncio
|
|
4
|
+
|
|
5
|
+
logger = logging.getLogger(__name__)
|
|
6
|
+
|
|
7
|
+
class CallbackManager:
|
|
8
|
+
"""Manages callbacks for the PraisonAI UI"""
|
|
9
|
+
|
|
10
|
+
def __init__(self):
|
|
11
|
+
self._callbacks: Dict[str, Dict[str, Union[Callable, bool]]] = {}
|
|
12
|
+
|
|
13
|
+
def register(self, name: str, callback: Callable, is_async: bool = False) -> None:
|
|
14
|
+
"""Register a callback function"""
|
|
15
|
+
self._callbacks[name] = {
|
|
16
|
+
'func': callback,
|
|
17
|
+
'is_async': is_async
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
async def call(self, name: str, **kwargs) -> None:
|
|
21
|
+
"""Call a registered callback"""
|
|
22
|
+
if name not in self._callbacks:
|
|
23
|
+
logger.warning(f"No callback registered for {name}")
|
|
24
|
+
return
|
|
25
|
+
|
|
26
|
+
callback_info = self._callbacks[name]
|
|
27
|
+
func = callback_info['func']
|
|
28
|
+
is_async = callback_info['is_async']
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
if is_async:
|
|
32
|
+
await func(**kwargs)
|
|
33
|
+
else:
|
|
34
|
+
if asyncio.iscoroutinefunction(func):
|
|
35
|
+
await func(**kwargs)
|
|
36
|
+
else:
|
|
37
|
+
await asyncio.get_event_loop().run_in_executor(None, lambda: func(**kwargs))
|
|
38
|
+
except Exception as e:
|
|
39
|
+
logger.error(f"Error in callback {name}: {str(e)}")
|
|
40
|
+
|
|
41
|
+
# Global callback manager instance
|
|
42
|
+
callback_manager = CallbackManager()
|
|
43
|
+
|
|
44
|
+
def register_callback(name: str, callback: Callable, is_async: bool = False) -> None:
|
|
45
|
+
"""Register a callback with the global callback manager"""
|
|
46
|
+
callback_manager.register(name, callback, is_async)
|
|
47
|
+
|
|
48
|
+
async def trigger_callback(name: str, **kwargs) -> None:
|
|
49
|
+
"""Trigger a callback from the global callback manager"""
|
|
50
|
+
await callback_manager.call(name, **kwargs)
|
|
51
|
+
|
|
52
|
+
# Decorator for registering callbacks
|
|
53
|
+
def callback(name: str, is_async: bool = False):
|
|
54
|
+
def decorator(func):
|
|
55
|
+
register_callback(name, func, is_async)
|
|
56
|
+
return func
|
|
57
|
+
return decorator
|
praisonai/ui/chat.py
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
1
|
+
# Standard library imports
|
|
2
|
+
import os
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from typing import Dict, Optional
|
|
5
|
+
import logging
|
|
6
|
+
import json
|
|
7
|
+
import asyncio
|
|
8
|
+
import io
|
|
9
|
+
import base64
|
|
10
|
+
|
|
11
|
+
# Third-party imports
|
|
12
|
+
from dotenv import load_dotenv
|
|
13
|
+
from PIL import Image
|
|
14
|
+
from tavily import TavilyClient
|
|
15
|
+
from crawl4ai import AsyncWebCrawler
|
|
16
|
+
|
|
17
|
+
# Local application/library imports
|
|
18
|
+
import chainlit as cl
|
|
19
|
+
from chainlit.input_widget import TextInput
|
|
20
|
+
from chainlit.types import ThreadDict
|
|
21
|
+
import chainlit.data as cl_data
|
|
22
|
+
from litellm import acompletion
|
|
23
|
+
from literalai.helper import utc_now
|
|
24
|
+
from db import DatabaseManager
|
|
25
|
+
|
|
26
|
+
# Load environment variables
|
|
27
|
+
load_dotenv()
|
|
28
|
+
|
|
29
|
+
# Logging configuration
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
log_level = os.getenv("LOGLEVEL", "INFO").upper()
|
|
32
|
+
logger.handlers = []
|
|
33
|
+
console_handler = logging.StreamHandler()
|
|
34
|
+
console_handler.setLevel(log_level)
|
|
35
|
+
console_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
36
|
+
console_handler.setFormatter(console_formatter)
|
|
37
|
+
logger.addHandler(console_handler)
|
|
38
|
+
logger.setLevel(log_level)
|
|
39
|
+
|
|
40
|
+
CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
|
|
41
|
+
if not CHAINLIT_AUTH_SECRET:
|
|
42
|
+
os.environ["CHAINLIT_AUTH_SECRET"] = "p8BPhQChpg@J>jBz$wGxqLX2V>yTVgP*7Ky9H$aV:axW~ANNX-7_T:o@lnyCBu^U"
|
|
43
|
+
CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
|
|
44
|
+
|
|
45
|
+
now = utc_now()
|
|
46
|
+
create_step_counter = 0
|
|
47
|
+
|
|
48
|
+
# Initialize database
|
|
49
|
+
db_manager = DatabaseManager()
|
|
50
|
+
db_manager.initialize()
|
|
51
|
+
|
|
52
|
+
def save_setting(key: str, value: str):
|
|
53
|
+
"""Save a setting to the database"""
|
|
54
|
+
asyncio.run(db_manager.save_setting(key, value))
|
|
55
|
+
|
|
56
|
+
def load_setting(key: str) -> str:
|
|
57
|
+
"""Load a setting from the database"""
|
|
58
|
+
return asyncio.run(db_manager.load_setting(key))
|
|
59
|
+
|
|
60
|
+
cl_data._data_layer = db_manager
|
|
61
|
+
|
|
62
|
+
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
|
63
|
+
tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
|
|
64
|
+
|
|
65
|
+
async def tavily_web_search(query):
|
|
66
|
+
if not tavily_client:
|
|
67
|
+
return json.dumps({
|
|
68
|
+
"query": query,
|
|
69
|
+
"error": "Tavily API key is not set. Web search is unavailable."
|
|
70
|
+
})
|
|
71
|
+
|
|
72
|
+
response = tavily_client.search(query)
|
|
73
|
+
logger.debug(f"Tavily search response: {response}")
|
|
74
|
+
|
|
75
|
+
async with AsyncWebCrawler() as crawler:
|
|
76
|
+
results = []
|
|
77
|
+
for result in response.get('results', []):
|
|
78
|
+
url = result.get('url')
|
|
79
|
+
if url:
|
|
80
|
+
try:
|
|
81
|
+
crawl_result = await crawler.arun(url=url)
|
|
82
|
+
results.append({
|
|
83
|
+
"content": result.get('content'),
|
|
84
|
+
"url": url,
|
|
85
|
+
"full_content": crawl_result.markdown
|
|
86
|
+
})
|
|
87
|
+
except Exception as e:
|
|
88
|
+
logger.error(f"Error crawling {url}: {str(e)}")
|
|
89
|
+
results.append({
|
|
90
|
+
"content": result.get('content'),
|
|
91
|
+
"url": url,
|
|
92
|
+
"full_content": "Error: Unable to crawl this URL"
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
return json.dumps({
|
|
96
|
+
"query": query,
|
|
97
|
+
"results": results
|
|
98
|
+
})
|
|
99
|
+
|
|
100
|
+
tools = [{
|
|
101
|
+
"type": "function",
|
|
102
|
+
"function": {
|
|
103
|
+
"name": "tavily_web_search",
|
|
104
|
+
"description": "Search the web using Tavily API and crawl the resulting URLs",
|
|
105
|
+
"parameters": {
|
|
106
|
+
"type": "object",
|
|
107
|
+
"properties": {
|
|
108
|
+
"query": {"type": "string", "description": "Search query"}
|
|
109
|
+
},
|
|
110
|
+
"required": ["query"]
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}] if tavily_api_key else []
|
|
114
|
+
|
|
115
|
+
# Authentication configuration
|
|
116
|
+
AUTH_PASSWORD_ENABLED = os.getenv("AUTH_PASSWORD_ENABLED", "true").lower() == "true" # Password authentication enabled by default
|
|
117
|
+
AUTH_OAUTH_ENABLED = os.getenv("AUTH_OAUTH_ENABLED", "false").lower() == "true" # OAuth authentication disabled by default
|
|
118
|
+
|
|
119
|
+
username = os.getenv("CHAINLIT_USERNAME", "admin")
|
|
120
|
+
password = os.getenv("CHAINLIT_PASSWORD", "admin")
|
|
121
|
+
|
|
122
|
+
def auth_callback(u: str, p: str):
|
|
123
|
+
if (u, p) == (username, password):
|
|
124
|
+
return cl.User(identifier=username, metadata={"role": "ADMIN", "provider": "credentials"})
|
|
125
|
+
return None
|
|
126
|
+
|
|
127
|
+
def oauth_callback(
|
|
128
|
+
provider_id: str,
|
|
129
|
+
token: str,
|
|
130
|
+
raw_user_data: Dict[str, str],
|
|
131
|
+
default_user: cl.User,
|
|
132
|
+
) -> Optional[cl.User]:
|
|
133
|
+
return default_user
|
|
134
|
+
|
|
135
|
+
if AUTH_PASSWORD_ENABLED:
|
|
136
|
+
auth_callback = cl.password_auth_callback(auth_callback)
|
|
137
|
+
|
|
138
|
+
if AUTH_OAUTH_ENABLED:
|
|
139
|
+
oauth_callback = cl.oauth_callback(oauth_callback)
|
|
140
|
+
|
|
141
|
+
async def send_count():
|
|
142
|
+
await cl.Message(
|
|
143
|
+
f"Create step counter: {create_step_counter}", disable_feedback=True
|
|
144
|
+
).send()
|
|
145
|
+
|
|
146
|
+
@cl.on_chat_start
|
|
147
|
+
async def start():
|
|
148
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
|
|
149
|
+
cl.user_session.set("model_name", model_name)
|
|
150
|
+
logger.debug(f"Model name: {model_name}")
|
|
151
|
+
settings = cl.ChatSettings(
|
|
152
|
+
[
|
|
153
|
+
TextInput(
|
|
154
|
+
id="model_name",
|
|
155
|
+
label="Enter the Model Name",
|
|
156
|
+
placeholder="e.g., gpt-4o-mini",
|
|
157
|
+
initial=model_name
|
|
158
|
+
)
|
|
159
|
+
]
|
|
160
|
+
)
|
|
161
|
+
cl.user_session.set("settings", settings)
|
|
162
|
+
await settings.send()
|
|
163
|
+
|
|
164
|
+
@cl.on_settings_update
|
|
165
|
+
async def setup_agent(settings):
|
|
166
|
+
logger.debug(settings)
|
|
167
|
+
cl.user_session.set("settings", settings)
|
|
168
|
+
model_name = settings["model_name"]
|
|
169
|
+
cl.user_session.set("model_name", model_name)
|
|
170
|
+
|
|
171
|
+
save_setting("model_name", model_name)
|
|
172
|
+
|
|
173
|
+
thread_id = cl.user_session.get("thread_id")
|
|
174
|
+
if thread_id:
|
|
175
|
+
thread = await cl_data.get_thread(thread_id)
|
|
176
|
+
if thread:
|
|
177
|
+
metadata = thread.get("metadata", {})
|
|
178
|
+
if isinstance(metadata, str):
|
|
179
|
+
try:
|
|
180
|
+
metadata = json.loads(metadata)
|
|
181
|
+
except json.JSONDecodeError:
|
|
182
|
+
metadata = {}
|
|
183
|
+
metadata["model_name"] = model_name
|
|
184
|
+
await cl_data.update_thread(thread_id, metadata=metadata)
|
|
185
|
+
cl.user_session.set("metadata", metadata)
|
|
186
|
+
|
|
187
|
+
@cl.on_message
|
|
188
|
+
async def main(message: cl.Message):
|
|
189
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
|
|
190
|
+
message_history = cl.user_session.get("message_history", [])
|
|
191
|
+
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
192
|
+
|
|
193
|
+
image = None
|
|
194
|
+
if message.elements and isinstance(message.elements[0], cl.Image):
|
|
195
|
+
image_element = message.elements[0]
|
|
196
|
+
try:
|
|
197
|
+
image = Image.open(image_element.path)
|
|
198
|
+
image.load()
|
|
199
|
+
cl.user_session.set("image", image)
|
|
200
|
+
except Exception as e:
|
|
201
|
+
logger.error(f"Error processing image: {str(e)}")
|
|
202
|
+
await cl.Message(content="Error processing the image. Please try again.").send()
|
|
203
|
+
return
|
|
204
|
+
|
|
205
|
+
user_message = f"""
|
|
206
|
+
Answer the question and use tools if needed:
|
|
207
|
+
|
|
208
|
+
Current Date and Time: {now}
|
|
209
|
+
|
|
210
|
+
User Question: {message.content}
|
|
211
|
+
"""
|
|
212
|
+
|
|
213
|
+
if image:
|
|
214
|
+
user_message = f"Image uploaded. {user_message}"
|
|
215
|
+
|
|
216
|
+
message_history.append({"role": "user", "content": user_message})
|
|
217
|
+
msg = cl.Message(content="")
|
|
218
|
+
await msg.send()
|
|
219
|
+
|
|
220
|
+
completion_params = {
|
|
221
|
+
"model": model_name,
|
|
222
|
+
"messages": message_history,
|
|
223
|
+
"stream": True,
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
if image:
|
|
227
|
+
buffered = io.BytesIO()
|
|
228
|
+
image.save(buffered, format="PNG")
|
|
229
|
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
230
|
+
completion_params["messages"][-1] = {
|
|
231
|
+
"role": "user",
|
|
232
|
+
"content": [
|
|
233
|
+
{"type": "text", "text": user_message},
|
|
234
|
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
|
|
235
|
+
]
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
if tavily_api_key:
|
|
239
|
+
completion_params["tools"] = tools
|
|
240
|
+
completion_params["tool_choice"] = "auto"
|
|
241
|
+
|
|
242
|
+
response = await acompletion(**completion_params)
|
|
243
|
+
|
|
244
|
+
full_response = ""
|
|
245
|
+
tool_calls = []
|
|
246
|
+
current_tool_call = None
|
|
247
|
+
|
|
248
|
+
async for part in response:
|
|
249
|
+
if 'choices' in part and len(part['choices']) > 0:
|
|
250
|
+
delta = part['choices'][0].get('delta', {})
|
|
251
|
+
|
|
252
|
+
if 'content' in delta and delta['content'] is not None:
|
|
253
|
+
token = delta['content']
|
|
254
|
+
await msg.stream_token(token)
|
|
255
|
+
full_response += token
|
|
256
|
+
|
|
257
|
+
if tavily_api_key and 'tool_calls' in delta and delta['tool_calls'] is not None:
|
|
258
|
+
for tool_call in delta['tool_calls']:
|
|
259
|
+
if current_tool_call is None or tool_call.index != current_tool_call['index']:
|
|
260
|
+
if current_tool_call:
|
|
261
|
+
tool_calls.append(current_tool_call)
|
|
262
|
+
current_tool_call = {
|
|
263
|
+
'id': tool_call.id,
|
|
264
|
+
'type': tool_call.type,
|
|
265
|
+
'index': tool_call.index,
|
|
266
|
+
'function': {
|
|
267
|
+
'name': tool_call.function.name if tool_call.function else None,
|
|
268
|
+
'arguments': ''
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
if tool_call.function:
|
|
272
|
+
if tool_call.function.name:
|
|
273
|
+
current_tool_call['function']['name'] = tool_call.function.name
|
|
274
|
+
if tool_call.function.arguments:
|
|
275
|
+
current_tool_call['function']['arguments'] += tool_call.function.arguments
|
|
276
|
+
|
|
277
|
+
if current_tool_call:
|
|
278
|
+
tool_calls.append(current_tool_call)
|
|
279
|
+
|
|
280
|
+
logger.debug(f"Full response: {full_response}")
|
|
281
|
+
logger.debug(f"Tool calls: {tool_calls}")
|
|
282
|
+
message_history.append({"role": "assistant", "content": full_response})
|
|
283
|
+
logger.debug(f"Message history: {message_history}")
|
|
284
|
+
cl.user_session.set("message_history", message_history)
|
|
285
|
+
await msg.update()
|
|
286
|
+
|
|
287
|
+
if tavily_api_key and tool_calls:
|
|
288
|
+
available_functions = {
|
|
289
|
+
"tavily_web_search": tavily_web_search,
|
|
290
|
+
}
|
|
291
|
+
messages = message_history + [{"role": "assistant", "content": None, "function_call": {
|
|
292
|
+
"name": tool_calls[0]['function']['name'],
|
|
293
|
+
"arguments": tool_calls[0]['function']['arguments']
|
|
294
|
+
}}]
|
|
295
|
+
|
|
296
|
+
for tool_call in tool_calls:
|
|
297
|
+
function_name = tool_call['function']['name']
|
|
298
|
+
if function_name in available_functions:
|
|
299
|
+
function_to_call = available_functions[function_name]
|
|
300
|
+
function_args = tool_call['function']['arguments']
|
|
301
|
+
if function_args:
|
|
302
|
+
try:
|
|
303
|
+
function_args = json.loads(function_args)
|
|
304
|
+
function_response = await function_to_call(
|
|
305
|
+
query=function_args.get("query"),
|
|
306
|
+
)
|
|
307
|
+
messages.append(
|
|
308
|
+
{
|
|
309
|
+
"role": "function",
|
|
310
|
+
"name": function_name,
|
|
311
|
+
"content": function_response,
|
|
312
|
+
}
|
|
313
|
+
)
|
|
314
|
+
except json.JSONDecodeError:
|
|
315
|
+
logger.error(f"Failed to parse function arguments: {function_args}")
|
|
316
|
+
|
|
317
|
+
second_response = await acompletion(
|
|
318
|
+
model=model_name,
|
|
319
|
+
stream=True,
|
|
320
|
+
messages=messages,
|
|
321
|
+
)
|
|
322
|
+
logger.debug(f"Second LLM response: {second_response}")
|
|
323
|
+
|
|
324
|
+
full_response = ""
|
|
325
|
+
async for part in second_response:
|
|
326
|
+
if 'choices' in part and len(part['choices']) > 0:
|
|
327
|
+
delta = part['choices'][0].get('delta', {})
|
|
328
|
+
if 'content' in delta and delta['content'] is not None:
|
|
329
|
+
token = delta['content']
|
|
330
|
+
await msg.stream_token(token)
|
|
331
|
+
full_response += token
|
|
332
|
+
|
|
333
|
+
msg.content = full_response
|
|
334
|
+
await msg.update()
|
|
335
|
+
else:
|
|
336
|
+
msg.content = full_response
|
|
337
|
+
await msg.update()
|
|
338
|
+
|
|
339
|
+
@cl.on_chat_resume
|
|
340
|
+
async def on_chat_resume(thread: ThreadDict):
|
|
341
|
+
logger.info(f"Resuming chat: {thread['id']}")
|
|
342
|
+
model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
|
|
343
|
+
logger.debug(f"Model name: {model_name}")
|
|
344
|
+
settings = cl.ChatSettings(
|
|
345
|
+
[
|
|
346
|
+
TextInput(
|
|
347
|
+
id="model_name",
|
|
348
|
+
label="Enter the Model Name",
|
|
349
|
+
placeholder="e.g., gpt-4o-mini",
|
|
350
|
+
initial=model_name
|
|
351
|
+
)
|
|
352
|
+
]
|
|
353
|
+
)
|
|
354
|
+
await settings.send()
|
|
355
|
+
thread_id = thread["id"]
|
|
356
|
+
cl.user_session.set("thread_id", thread_id)
|
|
357
|
+
|
|
358
|
+
metadata = thread.get("metadata", {})
|
|
359
|
+
if isinstance(metadata, str):
|
|
360
|
+
try:
|
|
361
|
+
metadata = json.loads(metadata)
|
|
362
|
+
except json.JSONDecodeError:
|
|
363
|
+
metadata = {}
|
|
364
|
+
cl.user_session.set("metadata", metadata)
|
|
365
|
+
|
|
366
|
+
message_history = cl.user_session.get("message_history", [])
|
|
367
|
+
steps = thread["steps"]
|
|
368
|
+
|
|
369
|
+
for m in steps:
|
|
370
|
+
msg_type = m.get("type")
|
|
371
|
+
if msg_type == "user_message":
|
|
372
|
+
message_history.append({"role": "user", "content": m.get("output", "")})
|
|
373
|
+
elif msg_type == "assistant_message":
|
|
374
|
+
message_history.append({"role": "assistant", "content": m.get("output", "")})
|
|
375
|
+
elif msg_type == "run":
|
|
376
|
+
if m.get("isError"):
|
|
377
|
+
message_history.append({"role": "system", "content": f"Error: {m.get('output', '')}"})
|
|
378
|
+
else:
|
|
379
|
+
logger.warning(f"Message without recognized type: {m}")
|
|
380
|
+
|
|
381
|
+
cl.user_session.set("message_history", message_history)
|
|
382
|
+
|
|
383
|
+
image_data = metadata.get("image")
|
|
384
|
+
if image_data:
|
|
385
|
+
image = Image.open(io.BytesIO(base64.b64decode(image_data)))
|
|
386
|
+
cl.user_session.set("image", image)
|
|
387
|
+
await cl.Message(content="Previous image loaded.").send()
|