PraisonAI 2.0.61__cp313-cp313-manylinux_2_39_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

Files changed (89) hide show
  1. praisonai/__init__.py +6 -0
  2. praisonai/__main__.py +10 -0
  3. praisonai/agents_generator.py +648 -0
  4. praisonai/api/call.py +292 -0
  5. praisonai/auto.py +238 -0
  6. praisonai/chainlit_ui.py +304 -0
  7. praisonai/cli.py +518 -0
  8. praisonai/deploy.py +138 -0
  9. praisonai/inbuilt_tools/__init__.py +24 -0
  10. praisonai/inbuilt_tools/autogen_tools.py +117 -0
  11. praisonai/inc/__init__.py +2 -0
  12. praisonai/inc/config.py +96 -0
  13. praisonai/inc/models.py +128 -0
  14. praisonai/public/android-chrome-192x192.png +0 -0
  15. praisonai/public/android-chrome-512x512.png +0 -0
  16. praisonai/public/apple-touch-icon.png +0 -0
  17. praisonai/public/fantasy.svg +3 -0
  18. praisonai/public/favicon-16x16.png +0 -0
  19. praisonai/public/favicon-32x32.png +0 -0
  20. praisonai/public/favicon.ico +0 -0
  21. praisonai/public/game.svg +3 -0
  22. praisonai/public/logo_dark.png +0 -0
  23. praisonai/public/logo_light.png +0 -0
  24. praisonai/public/movie.svg +3 -0
  25. praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
  26. praisonai/public/praison-ai-agents-architecture.png +0 -0
  27. praisonai/public/thriller.svg +3 -0
  28. praisonai/setup/__init__.py +1 -0
  29. praisonai/setup/build.py +21 -0
  30. praisonai/setup/config.yaml +60 -0
  31. praisonai/setup/post_install.py +23 -0
  32. praisonai/setup/setup_conda_env.py +25 -0
  33. praisonai/setup/setup_conda_env.sh +72 -0
  34. praisonai/setup.py +16 -0
  35. praisonai/test.py +105 -0
  36. praisonai/train.py +276 -0
  37. praisonai/ui/README.md +21 -0
  38. praisonai/ui/agents.py +822 -0
  39. praisonai/ui/callbacks.py +57 -0
  40. praisonai/ui/chat.py +387 -0
  41. praisonai/ui/code.py +440 -0
  42. praisonai/ui/colab.py +474 -0
  43. praisonai/ui/colab_chainlit.py +81 -0
  44. praisonai/ui/components/aicoder.py +269 -0
  45. praisonai/ui/config/.chainlit/config.toml +120 -0
  46. praisonai/ui/config/.chainlit/translations/bn.json +231 -0
  47. praisonai/ui/config/.chainlit/translations/en-US.json +229 -0
  48. praisonai/ui/config/.chainlit/translations/gu.json +231 -0
  49. praisonai/ui/config/.chainlit/translations/he-IL.json +231 -0
  50. praisonai/ui/config/.chainlit/translations/hi.json +231 -0
  51. praisonai/ui/config/.chainlit/translations/kn.json +231 -0
  52. praisonai/ui/config/.chainlit/translations/ml.json +231 -0
  53. praisonai/ui/config/.chainlit/translations/mr.json +231 -0
  54. praisonai/ui/config/.chainlit/translations/ta.json +231 -0
  55. praisonai/ui/config/.chainlit/translations/te.json +231 -0
  56. praisonai/ui/config/.chainlit/translations/zh-CN.json +229 -0
  57. praisonai/ui/config/chainlit.md +1 -0
  58. praisonai/ui/config/translations/bn.json +231 -0
  59. praisonai/ui/config/translations/en-US.json +229 -0
  60. praisonai/ui/config/translations/gu.json +231 -0
  61. praisonai/ui/config/translations/he-IL.json +231 -0
  62. praisonai/ui/config/translations/hi.json +231 -0
  63. praisonai/ui/config/translations/kn.json +231 -0
  64. praisonai/ui/config/translations/ml.json +231 -0
  65. praisonai/ui/config/translations/mr.json +231 -0
  66. praisonai/ui/config/translations/ta.json +231 -0
  67. praisonai/ui/config/translations/te.json +231 -0
  68. praisonai/ui/config/translations/zh-CN.json +229 -0
  69. praisonai/ui/context.py +283 -0
  70. praisonai/ui/db.py +291 -0
  71. praisonai/ui/public/fantasy.svg +3 -0
  72. praisonai/ui/public/game.svg +3 -0
  73. praisonai/ui/public/logo_dark.png +0 -0
  74. praisonai/ui/public/logo_light.png +0 -0
  75. praisonai/ui/public/movie.svg +3 -0
  76. praisonai/ui/public/praison.css +3 -0
  77. praisonai/ui/public/thriller.svg +3 -0
  78. praisonai/ui/realtime.py +476 -0
  79. praisonai/ui/realtimeclient/__init__.py +653 -0
  80. praisonai/ui/realtimeclient/realtimedocs.txt +1484 -0
  81. praisonai/ui/realtimeclient/tools.py +236 -0
  82. praisonai/ui/sql_alchemy.py +707 -0
  83. praisonai/ui/tools.md +133 -0
  84. praisonai/version.py +1 -0
  85. praisonai-2.0.61.dist-info/LICENSE +20 -0
  86. praisonai-2.0.61.dist-info/METADATA +679 -0
  87. praisonai-2.0.61.dist-info/RECORD +89 -0
  88. praisonai-2.0.61.dist-info/WHEEL +4 -0
  89. praisonai-2.0.61.dist-info/entry_points.txt +5 -0
praisonai/ui/code.py ADDED
@@ -0,0 +1,440 @@
1
+ # Standard library imports
2
+ import os
3
+ from datetime import datetime
4
+ import logging
5
+ import json
6
+ import io
7
+ import base64
8
+ import asyncio
9
+
10
+ # Third-party imports
11
+ from dotenv import load_dotenv
12
+ from PIL import Image
13
+ from context import ContextGatherer
14
+ from tavily import TavilyClient
15
+ from crawl4ai import AsyncWebCrawler
16
+
17
+ # Local application/library imports
18
+ import chainlit as cl
19
+ from chainlit.input_widget import TextInput
20
+ from chainlit.types import ThreadDict
21
+ import chainlit.data as cl_data
22
+ from litellm import acompletion
23
+ from db import DatabaseManager
24
+
25
+ # Load environment variables
26
+ load_dotenv()
27
+
28
+ # Set up logging
29
+ logger = logging.getLogger(__name__)
30
+ log_level = os.getenv("LOGLEVEL", "INFO").upper()
31
+ logger.handlers = []
32
+
33
+ # Set up logging to console
34
+ console_handler = logging.StreamHandler()
35
+ console_handler.setLevel(log_level)
36
+ console_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
37
+ console_handler.setFormatter(console_formatter)
38
+ logger.addHandler(console_handler)
39
+
40
+ # Set the logging level for the logger
41
+ logger.setLevel(log_level)
42
+
43
+ CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
44
+
45
+ if not CHAINLIT_AUTH_SECRET:
46
+ os.environ["CHAINLIT_AUTH_SECRET"] = "p8BPhQChpg@J>jBz$wGxqLX2V>yTVgP*7Ky9H$aV:axW~ANNX-7_T:o@lnyCBu^U"
47
+ CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
48
+
49
+ now = datetime.now()
50
+ create_step_counter = 0
51
+
52
+ # Initialize database
53
+ db_manager = DatabaseManager()
54
+ db_manager.initialize()
55
+
56
+ deleted_thread_ids = [] # type: List[str]
57
+
58
+ def save_setting(key: str, value: str):
59
+ """Saves a setting to the database.
60
+
61
+ Args:
62
+ key: The setting key.
63
+ value: The setting value.
64
+ """
65
+ asyncio.run(db_manager.save_setting(key, value))
66
+
67
+ def load_setting(key: str) -> str:
68
+ """Loads a setting from the database.
69
+
70
+ Args:
71
+ key: The setting key.
72
+
73
+ Returns:
74
+ The setting value, or None if the key is not found.
75
+ """
76
+ return asyncio.run(db_manager.load_setting(key))
77
+
78
+ cl_data._data_layer = db_manager
79
+
80
+ @cl.on_chat_start
81
+ async def start():
82
+ model_name = load_setting("model_name")
83
+
84
+ if (model_name):
85
+ cl.user_session.set("model_name", model_name)
86
+ else:
87
+ # If no setting found, use default or environment variable
88
+ model_name = os.getenv("MODEL_NAME", "gpt-4o-mini")
89
+ cl.user_session.set("model_name", model_name)
90
+ logger.debug(f"Model name: {model_name}")
91
+ settings = cl.ChatSettings(
92
+ [
93
+ TextInput(
94
+ id="model_name",
95
+ label="Enter the Model Name",
96
+ placeholder="e.g., gpt-4o-mini",
97
+ initial=model_name
98
+ )
99
+ ]
100
+ )
101
+ cl.user_session.set("settings", settings)
102
+ await settings.send()
103
+ gatherer = ContextGatherer()
104
+ context, token_count, context_tree = gatherer.run()
105
+ msg = cl.Message(content="""Token Count: {token_count},
106
+ Files include: \n```bash\n{context_tree}\n"""
107
+ .format(token_count=token_count, context_tree=context_tree))
108
+ await msg.send()
109
+
110
+ @cl.on_settings_update
111
+ async def setup_agent(settings):
112
+ logger.debug(settings)
113
+ cl.user_session.set("settings", settings)
114
+ model_name = settings["model_name"]
115
+ cl.user_session.set("model_name", model_name)
116
+
117
+ # Save in settings table
118
+ save_setting("model_name", model_name)
119
+
120
+ # Save in thread metadata
121
+ thread_id = cl.user_session.get("thread_id")
122
+ if thread_id:
123
+ thread = await cl_data._data_layer.get_thread(thread_id)
124
+ if thread:
125
+ metadata = thread.get("metadata", {})
126
+ if isinstance(metadata, str):
127
+ try:
128
+ metadata = json.loads(metadata)
129
+ except json.JSONDecodeError:
130
+ metadata = {}
131
+
132
+ metadata["model_name"] = model_name
133
+
134
+ # Always store metadata as a dictionary
135
+ await cl_data._data_layer.update_thread(thread_id, metadata=metadata)
136
+
137
+ # Update the user session with the new metadata
138
+ cl.user_session.set("metadata", metadata)
139
+
140
+ # Set Tavily API key
141
+ tavily_api_key = os.getenv("TAVILY_API_KEY")
142
+ tavily_client = TavilyClient(api_key=tavily_api_key) if tavily_api_key else None
143
+
144
+ # Function to call Tavily Search API and crawl the results
145
+ async def tavily_web_search(query):
146
+ if not tavily_client:
147
+ return json.dumps({
148
+ "query": query,
149
+ "error": "Tavily API key is not set. Web search is unavailable."
150
+ })
151
+
152
+ response = tavily_client.search(query)
153
+ logger.debug(f"Tavily search response: {response}")
154
+
155
+ # Create an instance of AsyncWebCrawler
156
+ async with AsyncWebCrawler() as crawler:
157
+ # Prepare the results
158
+ results = []
159
+ for result in response.get('results', []):
160
+ url = result.get('url')
161
+ if url:
162
+ try:
163
+ # Run the crawler asynchronously on each URL
164
+ crawl_result = await crawler.arun(url=url)
165
+ results.append({
166
+ "content": result.get('content'),
167
+ "url": url,
168
+ "full_content": crawl_result.markdown
169
+ })
170
+ except Exception as e:
171
+ logger.error(f"Error crawling {url}: {str(e)}")
172
+ results.append({
173
+ "content": result.get('content'),
174
+ "url": url,
175
+ "full_content": "Error: Unable to crawl this URL"
176
+ })
177
+
178
+ return json.dumps({
179
+ "query": query,
180
+ "results": results
181
+ })
182
+
183
+ # Define the tool for function calling
184
+ tools = [{
185
+ "type": "function",
186
+ "function": {
187
+ "name": "tavily_web_search",
188
+ "description": "Search the web using Tavily API and crawl the resulting URLs",
189
+ "parameters": {
190
+ "type": "object",
191
+ "properties": {
192
+ "query": {"type": "string", "description": "Search query"}
193
+ },
194
+ "required": ["query"]
195
+ }
196
+ }
197
+ }] if tavily_api_key else []
198
+
199
+ @cl.on_message
200
+ async def main(message: cl.Message):
201
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
202
+ message_history = cl.user_session.get("message_history", [])
203
+ gatherer = ContextGatherer()
204
+ context, token_count, context_tree = gatherer.run()
205
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
206
+
207
+ # Check if an image was uploaded with this message
208
+ image = None
209
+ if message.elements and isinstance(message.elements[0], cl.Image):
210
+ image_element = message.elements[0]
211
+ try:
212
+ # Open the image and keep it in memory
213
+ image = Image.open(image_element.path)
214
+ image.load() # This ensures the file is fully loaded into memory
215
+ cl.user_session.set("image", image)
216
+ except Exception as e:
217
+ logger.error(f"Error processing image: {str(e)}")
218
+ await cl.Message(content="There was an error processing the uploaded image. Please try again.").send()
219
+ return
220
+
221
+ # Prepare user message
222
+ user_message = f"""
223
+ Answer the question and use tools if needed:\n{message.content}.\n\n
224
+ Current Date and Time: {now}
225
+
226
+ Context:
227
+ {context}
228
+ """
229
+
230
+ if image:
231
+ user_message = f"Image uploaded. {user_message}"
232
+
233
+ message_history.append({"role": "user", "content": user_message})
234
+
235
+ msg = cl.Message(content="")
236
+ await msg.send()
237
+
238
+ # Prepare the completion parameters
239
+ completion_params = {
240
+ "model": model_name,
241
+ "messages": message_history,
242
+ "stream": True,
243
+ }
244
+
245
+ # If an image is uploaded, include it in the message
246
+ if image:
247
+ buffered = io.BytesIO()
248
+ image.save(buffered, format="PNG")
249
+ img_str = base64.b64encode(buffered.getvalue()).decode()
250
+
251
+ completion_params["messages"][-1] = {
252
+ "role": "user",
253
+ "content": [
254
+ {"type": "text", "text": user_message},
255
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
256
+ ]
257
+ }
258
+ # Use a vision-capable model when an image is present
259
+ completion_params["model"] = "gpt-4-vision-preview" # Adjust this to your actual vision-capable model
260
+
261
+ # Only add tools and tool_choice if Tavily API key is available and no image is uploaded
262
+ if tavily_api_key:
263
+ completion_params["tools"] = tools
264
+ completion_params["tool_choice"] = "auto"
265
+
266
+ response = await acompletion(**completion_params)
267
+ logger.debug(f"LLM response: {response}")
268
+
269
+ full_response = ""
270
+ tool_calls = []
271
+ current_tool_call = None
272
+
273
+ async for part in response:
274
+ logger.debug(f"LLM part: {part}")
275
+ if 'choices' in part and len(part['choices']) > 0:
276
+ delta = part['choices'][0].get('delta', {})
277
+
278
+ if 'content' in delta and delta['content'] is not None:
279
+ token = delta['content']
280
+ await msg.stream_token(token)
281
+ full_response += token
282
+
283
+ if tavily_api_key and 'tool_calls' in delta and delta['tool_calls'] is not None:
284
+ for tool_call in delta['tool_calls']:
285
+ if current_tool_call is None or tool_call.index != current_tool_call['index']:
286
+ if current_tool_call:
287
+ tool_calls.append(current_tool_call)
288
+ current_tool_call = {
289
+ 'id': tool_call.id,
290
+ 'type': tool_call.type,
291
+ 'index': tool_call.index,
292
+ 'function': {
293
+ 'name': tool_call.function.name if tool_call.function else None,
294
+ 'arguments': ''
295
+ }
296
+ }
297
+ if tool_call.function:
298
+ if tool_call.function.name:
299
+ current_tool_call['function']['name'] = tool_call.function.name
300
+ if tool_call.function.arguments:
301
+ current_tool_call['function']['arguments'] += tool_call.function.arguments
302
+
303
+ if current_tool_call:
304
+ tool_calls.append(current_tool_call)
305
+
306
+ logger.debug(f"Full response: {full_response}")
307
+ logger.debug(f"Tool calls: {tool_calls}")
308
+ message_history.append({"role": "assistant", "content": full_response})
309
+ logger.debug(f"Message history: {message_history}")
310
+ cl.user_session.set("message_history", message_history)
311
+ await msg.update()
312
+
313
+ if tavily_api_key and tool_calls:
314
+ available_functions = {
315
+ "tavily_web_search": tavily_web_search,
316
+ }
317
+ messages = message_history + [{"role": "assistant", "content": None, "function_call": {
318
+ "name": tool_calls[0]['function']['name'],
319
+ "arguments": tool_calls[0]['function']['arguments']
320
+ }}]
321
+
322
+ for tool_call in tool_calls:
323
+ function_name = tool_call['function']['name']
324
+ if function_name in available_functions:
325
+ function_to_call = available_functions[function_name]
326
+ function_args = tool_call['function']['arguments']
327
+ if function_args:
328
+ try:
329
+ function_args = json.loads(function_args)
330
+ # Call the function asynchronously
331
+ function_response = await function_to_call(
332
+ query=function_args.get("query"),
333
+ )
334
+ messages.append(
335
+ {
336
+ "role": "function",
337
+ "name": function_name,
338
+ "content": function_response,
339
+ }
340
+ )
341
+ except json.JSONDecodeError:
342
+ logger.error(f"Failed to parse function arguments: {function_args}")
343
+
344
+ second_response = await acompletion(
345
+ model=model_name,
346
+ stream=True,
347
+ messages=messages,
348
+ )
349
+ logger.debug(f"Second LLM response: {second_response}")
350
+
351
+ # Handle the streaming response
352
+ full_response = ""
353
+ async for part in second_response:
354
+ if 'choices' in part and len(part['choices']) > 0:
355
+ delta = part['choices'][0].get('delta', {})
356
+ if 'content' in delta and delta['content'] is not None:
357
+ token = delta['content']
358
+ await msg.stream_token(token)
359
+ full_response += token
360
+
361
+ # Update the message content
362
+ msg.content = full_response
363
+ await msg.update()
364
+ else:
365
+ # If no tool calls or Tavily API key is not set, the full_response is already set
366
+ msg.content = full_response
367
+ await msg.update()
368
+
369
+ username = os.getenv("CHAINLIT_USERNAME", "admin") # Default to "admin" if not found
370
+ password = os.getenv("CHAINLIT_PASSWORD", "admin") # Default to "admin" if not found
371
+
372
+ @cl.password_auth_callback
373
+ def auth_callback(username: str, password: str):
374
+ if (username, password) == (username, password):
375
+ return cl.User(
376
+ identifier=username, metadata={"role": "ADMIN", "provider": "credentials"}
377
+ )
378
+ else:
379
+ return None
380
+
381
+ async def send_count():
382
+ await cl.Message(
383
+ f"Create step counter: {create_step_counter}", disable_feedback=True
384
+ ).send()
385
+
386
+ @cl.on_chat_resume
387
+ async def on_chat_resume(thread: ThreadDict):
388
+ logger.info(f"Resuming chat: {thread['id']}")
389
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
390
+ logger.debug(f"Model name: {model_name}")
391
+ settings = cl.ChatSettings(
392
+ [
393
+ TextInput(
394
+ id="model_name",
395
+ label="Enter the Model Name",
396
+ placeholder="e.g., gpt-4o-mini",
397
+ initial=model_name
398
+ )
399
+ ]
400
+ )
401
+ await settings.send()
402
+ cl.user_session.set("thread_id", thread["id"])
403
+
404
+ # Ensure metadata is a dictionary
405
+ metadata = thread.get("metadata", {})
406
+ if isinstance(metadata, str):
407
+ try:
408
+ metadata = json.loads(metadata)
409
+ except json.JSONDecodeError:
410
+ metadata = {}
411
+
412
+ cl.user_session.set("metadata", metadata)
413
+
414
+ message_history = cl.user_session.get("message_history", [])
415
+ steps = thread["steps"]
416
+
417
+ for message in steps:
418
+ msg_type = message.get("type")
419
+ if msg_type == "user_message":
420
+ message_history.append({"role": "user", "content": message.get("output", "")})
421
+ elif msg_type == "assistant_message":
422
+ message_history.append({"role": "assistant", "content": message.get("output", "")})
423
+ elif msg_type == "run":
424
+ # Handle 'run' type messages
425
+ if message.get("isError"):
426
+ message_history.append({"role": "system", "content": f"Error: {message.get('output', '')}"})
427
+ else:
428
+ # You might want to handle non-error 'run' messages differently
429
+ pass
430
+ else:
431
+ logger.warning(f"Message without recognized type: {message}")
432
+
433
+ cl.user_session.set("message_history", message_history)
434
+
435
+ # Check if there's an image in the thread metadata
436
+ image_data = metadata.get("image")
437
+ if image_data:
438
+ image = Image.open(io.BytesIO(base64.b64decode(image_data)))
439
+ cl.user_session.set("image", image)
440
+ await cl.Message(content="Previous image loaded. You can continue asking questions about it, upload a new image, or just chat.").send()