PraisonAI 2.0.12__cp311-cp311-macosx_15_0_arm64.whl → 2.0.53__cp311-cp311-macosx_15_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PraisonAI might be problematic. Click here for more details.

praisonai/ui/realtime.py CHANGED
@@ -186,11 +186,42 @@ try:
186
186
  if custom_tools_module:
187
187
  # Update the tools list with custom tools
188
188
  if hasattr(custom_tools_module, 'tools') and isinstance(custom_tools_module.tools, list):
189
- tools.extend(custom_tools_module.tools)
189
+ # Only add tools that have proper function definitions
190
+ for tool in custom_tools_module.tools:
191
+ if isinstance(tool, tuple) and len(tool) == 2:
192
+ tool_def, handler = tool
193
+ if isinstance(tool_def, dict) and "type" in tool_def and tool_def["type"] == "function":
194
+ # Convert class/function to proper tool definition
195
+ if "function" in tool_def:
196
+ func = tool_def["function"]
197
+ if hasattr(func, "__name__"):
198
+ tool_def = {
199
+ "name": func.__name__,
200
+ "description": func.__doc__ or f"Execute {func.__name__}",
201
+ "parameters": {
202
+ "type": "object",
203
+ "properties": {},
204
+ "required": []
205
+ }
206
+ }
207
+ tools.append((tool_def, handler))
208
+ else:
209
+ # Tool definition is already properly formatted
210
+ tools.append(tool)
190
211
  else:
212
+ # Process individual functions/classes
191
213
  for name, obj in custom_tools_module.__dict__.items():
192
214
  if callable(obj) and not name.startswith("__"):
193
- tools.append(({"type": "function", "function": obj}, obj))
215
+ tool_def = {
216
+ "name": name,
217
+ "description": obj.__doc__ or f"Execute {name}",
218
+ "parameters": {
219
+ "type": "object",
220
+ "properties": {},
221
+ "required": []
222
+ }
223
+ }
224
+ tools.append((tool_def, obj))
194
225
 
195
226
  except Exception as e:
196
227
  logger.warning(f"Error importing custom tools: {str(e)}. Continuing without custom tools.")
@@ -198,7 +229,7 @@ except Exception as e:
198
229
  @cl.on_chat_start
199
230
  async def start():
200
231
  initialize_db()
201
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini")
232
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini-realtime-preview")
202
233
  cl.user_session.set("model_name", model_name)
203
234
  cl.user_session.set("message_history", []) # Initialize message history
204
235
  logger.debug(f"Model name: {model_name}")
@@ -207,7 +238,7 @@ async def start():
207
238
  # TextInput(
208
239
  # id="model_name",
209
240
  # label="Enter the Model Name",
210
- # placeholder="e.g., gpt-4o-mini",
241
+ # placeholder="e.g., gpt-4o-mini-realtime-preview",
211
242
  # initial=model_name
212
243
  # )
213
244
  # ]
@@ -287,14 +318,30 @@ async def setup_openai_realtime():
287
318
  logger.error(event)
288
319
  await cl.Message(content=f"Error: {event}", author="System").send()
289
320
 
321
+ # Register event handlers
290
322
  openai_realtime.on('conversation.updated', handle_conversation_updated)
291
323
  openai_realtime.on('conversation.item.completed', handle_item_completed)
292
324
  openai_realtime.on('conversation.interrupted', handle_conversation_interrupt)
293
325
  openai_realtime.on('error', handle_error)
294
326
 
295
327
  cl.user_session.set("openai_realtime", openai_realtime)
296
- coros = [openai_realtime.add_tool(tool_def, tool_handler) for tool_def, tool_handler in tools]
297
- await asyncio.gather(*coros)
328
+
329
+ # Filter out invalid tools and add valid ones
330
+ valid_tools = []
331
+ for tool_def, tool_handler in tools:
332
+ try:
333
+ if isinstance(tool_def, dict) and "name" in tool_def:
334
+ valid_tools.append((tool_def, tool_handler))
335
+ else:
336
+ logger.warning(f"Skipping invalid tool definition: {tool_def}")
337
+ except Exception as e:
338
+ logger.warning(f"Error processing tool: {e}")
339
+
340
+ if valid_tools:
341
+ coros = [openai_realtime.add_tool(tool_def, tool_handler) for tool_def, tool_handler in valid_tools]
342
+ await asyncio.gather(*coros)
343
+ else:
344
+ logger.warning("No valid tools found to add")
298
345
 
299
346
  @cl.on_settings_update
300
347
  async def setup_agent(settings):
@@ -330,11 +377,19 @@ async def setup_agent(settings):
330
377
  async def on_audio_start():
331
378
  try:
332
379
  openai_realtime: RealtimeClient = cl.user_session.get("openai_realtime")
333
- await openai_realtime.connect()
380
+ if not openai_realtime:
381
+ await setup_openai_realtime()
382
+ openai_realtime = cl.user_session.get("openai_realtime")
383
+
384
+ if not openai_realtime.is_connected():
385
+ await openai_realtime.connect()
386
+
334
387
  logger.info("Connected to OpenAI realtime")
335
388
  return True
336
389
  except Exception as e:
337
- await cl.ErrorMessage(content=f"Failed to connect to OpenAI realtime: {e}").send()
390
+ error_msg = f"Failed to connect to OpenAI realtime: {str(e)}"
391
+ logger.error(error_msg)
392
+ await cl.ErrorMessage(content=error_msg).send()
338
393
  return False
339
394
 
340
395
  @cl.on_audio_chunk
@@ -368,14 +423,14 @@ def auth_callback(username: str, password: str):
368
423
  @cl.on_chat_resume
369
424
  async def on_chat_resume(thread: ThreadDict):
370
425
  logger.info(f"Resuming chat: {thread['id']}")
371
- model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini"
426
+ model_name = load_setting("model_name") or os.getenv("MODEL_NAME") or "gpt-4o-mini-realtime-preview"
372
427
  logger.debug(f"Model name: {model_name}")
373
428
  settings = cl.ChatSettings(
374
429
  [
375
430
  TextInput(
376
431
  id="model_name",
377
432
  label="Enter the Model Name",
378
- placeholder="e.g., gpt-4o-mini",
433
+ placeholder="e.g., gpt-4o-mini-realtime-preview",
379
434
  initial=model_name
380
435
  )
381
436
  ]
@@ -9,7 +9,8 @@ import os
9
9
  import aiofiles
10
10
  import aiohttp
11
11
 
12
- from chainlit.data.base import BaseDataLayer, BaseStorageClient
12
+ from chainlit.data.base import BaseDataLayer
13
+ from chainlit.data.storage_clients.base import EXPIRY_TIME, BaseStorageClient
13
14
  from chainlit.data.utils import queue_until_user_message
14
15
  from chainlit.element import ElementDict
15
16
  from chainlit.logger import logger
praisonai/ui/tools.md ADDED
@@ -0,0 +1,133 @@
1
+ # Understanding Tool Integration in AI Agents - A Beginner's Guide
2
+
3
+ ## Overview
4
+ This guide explains how to properly integrate tools (functions) that an AI agent can use, making them both understandable to the OpenAI API and executable by your code.
5
+
6
+ ## Key Components
7
+
8
+ ### 1. Tool Definition Structure
9
+ ```python
10
+ # Example tool definition in tools.py
11
+ def search_tool(query: str) -> list:
12
+ """
13
+ Perform a web search using DuckDuckGo.
14
+
15
+ Args:
16
+ query (str): The search query string.
17
+
18
+ Returns:
19
+ list: Search results with title, url, and snippet.
20
+ """
21
+ # Function implementation...
22
+ ```
23
+
24
+ ### 2. Tool Dictionary Format
25
+ ```python
26
+ tools_dict = {
27
+ 'search_tool': {
28
+ 'type': 'function',
29
+ 'function': {
30
+ 'name': 'search_tool',
31
+ 'description': '...',
32
+ 'parameters': {
33
+ 'type': 'object',
34
+ 'properties': {
35
+ 'query': {'type': 'string'}
36
+ }
37
+ }
38
+ },
39
+ 'callable': search_tool # The actual Python function
40
+ }
41
+ }
42
+ ```
43
+
44
+ ## The Two-Part System
45
+
46
+ ### Part 1: OpenAI API Communication
47
+ ```python
48
+ # task_tools: What OpenAI understands
49
+ task_tools = []
50
+ tool_def = tools_dict[tool_name].copy()
51
+ callable_func = tool_def.pop("callable") # Remove the Python function
52
+ task_tools.append(tool_def) # Add clean JSON-serializable definition
53
+ ```
54
+
55
+ ### Part 2: Function Execution
56
+ ```python
57
+ # role_tools: What your code executes
58
+ role_tools = []
59
+ role_tools.append(callable_func) # Store the actual function
60
+ agent.tools = role_tools # Give agent access to executable functions
61
+ ```
62
+
63
+ ## Putting It All Together
64
+
65
+ ```python
66
+ # Initialize empty lists
67
+ role_tools = [] # For executable functions
68
+ task_tools = [] # For OpenAI API definitions
69
+
70
+ # Process each tool
71
+ for tool_name in tools_list:
72
+ if tool_name in tools_dict:
73
+ # 1. Get the tool definition
74
+ tool_def = tools_dict[tool_name].copy()
75
+
76
+ # 2. Separate the callable function
77
+ callable_func = tool_def.pop("callable")
78
+
79
+ # 3. Store the function for execution
80
+ role_tools.append(callable_func)
81
+
82
+ # 4. Store the API definition
83
+ task_tools.append(tool_def)
84
+
85
+ # 5. Give agent access to functions
86
+ agent.tools = role_tools
87
+
88
+ # Create task with API definitions
89
+ task = Task(
90
+ description="...",
91
+ tools=task_tools, # OpenAI API will use these
92
+ agent=agent, # Agent has access to callable functions
93
+ # ... other parameters ...
94
+ )
95
+ ```
96
+
97
+ ## Why This Works
98
+
99
+ 1. **API Communication**
100
+ - OpenAI API receives clean JSON tool definitions
101
+ - No Python functions that would cause serialization errors
102
+
103
+ 2. **Function Execution**
104
+ - Agent has access to actual Python functions
105
+ - Can execute tools when OpenAI decides to use them
106
+
107
+ 3. **Separation of Concerns**
108
+ - `task_tools`: Describes what tools can do (for OpenAI)
109
+ - `role_tools`: Actually does the work (for Python)
110
+
111
+ ## Common Errors and Solutions
112
+
113
+ 1. **"Invalid type for 'tools[0]'"**
114
+ - Cause: Sending null or invalid tool definition to OpenAI
115
+ - Solution: Use proper tool definition format in `task_tools`
116
+
117
+ 2. **"Object of type function is not JSON serializable"**
118
+ - Cause: Trying to send Python function to OpenAI API
119
+ - Solution: Remove callable function from API definition
120
+
121
+ 3. **"Tool is not callable"**
122
+ - Cause: Agent doesn't have access to executable functions
123
+ - Solution: Set `agent.tools = role_tools`
124
+
125
+ ## Best Practices
126
+
127
+ 1. Always initialize both `task_tools` and `role_tools` lists
128
+ 2. Make clean copies of tool definitions to avoid modifying originals
129
+ 3. Keep tool definitions JSON-serializable for API communication
130
+ 4. Ensure agents have access to callable functions
131
+ 5. Document tool parameters and return values clearly
132
+
133
+ This structure maintains clean separation between API communication and actual function execution, making your AI agent system both reliable and maintainable.
@@ -1,7 +1,7 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: PraisonAI
3
- Version: 2.0.12
4
- Summary: PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration.
3
+ Version: 2.0.53
4
+ Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration.
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10,<3.13
7
7
  Classifier: Programming Language :: Python :: 3
@@ -21,43 +21,68 @@ Provides-Extra: google
21
21
  Provides-Extra: gradio
22
22
  Provides-Extra: openai
23
23
  Provides-Extra: realtime
24
- Provides-Extra: train
25
24
  Provides-Extra: ui
26
25
  Requires-Dist: PyYAML (>=6.0)
27
26
  Requires-Dist: agentops (>=0.3.12) ; extra == "agentops"
28
- Requires-Dist: aiosqlite (>=0.20.0) ; extra == "chat" or extra == "code" or extra == "realtime"
29
- Requires-Dist: chainlit (==2.0rc1) ; extra == "ui" or extra == "chat" or extra == "code" or extra == "realtime"
30
- Requires-Dist: crawl4ai (==0.3.4) ; extra == "chat" or extra == "code" or extra == "realtime"
31
- Requires-Dist: crewai (>=0.32.0) ; extra == "crewai" or extra == "autogen"
27
+ Requires-Dist: aiosqlite (>=0.20.0) ; extra == "chat"
28
+ Requires-Dist: aiosqlite (>=0.20.0) ; extra == "code"
29
+ Requires-Dist: aiosqlite (>=0.20.0) ; extra == "realtime"
30
+ Requires-Dist: aiosqlite (>=0.20.0) ; extra == "ui"
31
+ Requires-Dist: chainlit (==2.0.3) ; extra == "chat"
32
+ Requires-Dist: chainlit (==2.0.3) ; extra == "code"
33
+ Requires-Dist: chainlit (==2.0.3) ; extra == "realtime"
34
+ Requires-Dist: chainlit (==2.0.3) ; extra == "ui"
35
+ Requires-Dist: crawl4ai (==0.3.4) ; extra == "chat"
36
+ Requires-Dist: crawl4ai (==0.3.4) ; extra == "code"
37
+ Requires-Dist: crawl4ai (==0.3.4) ; extra == "realtime"
38
+ Requires-Dist: crewai (>=0.32.0) ; extra == "crewai"
39
+ Requires-Dist: crewai ; extra == "autogen"
32
40
  Requires-Dist: duckduckgo_search (>=6.3.0) ; extra == "realtime"
33
41
  Requires-Dist: fastapi (>=0.95.0) ; extra == "call"
34
42
  Requires-Dist: flaml[automl] (>=2.3.1) ; extra == "call"
35
43
  Requires-Dist: flask (>=3.0.0) ; extra == "api"
36
44
  Requires-Dist: gradio (>=4.26.0) ; extra == "gradio"
37
- Requires-Dist: greenlet (>=3.0.3) ; extra == "chat" or extra == "code" or extra == "realtime"
45
+ Requires-Dist: greenlet (>=3.0.3) ; extra == "chat"
46
+ Requires-Dist: greenlet (>=3.0.3) ; extra == "code"
47
+ Requires-Dist: greenlet (>=3.0.3) ; extra == "realtime"
48
+ Requires-Dist: greenlet (>=3.0.3) ; extra == "ui"
38
49
  Requires-Dist: instructor (>=1.3.3)
39
50
  Requires-Dist: langchain-anthropic (>=0.1.13) ; extra == "anthropic"
40
51
  Requires-Dist: langchain-cohere (>=0.1.4) ; extra == "cohere"
41
52
  Requires-Dist: langchain-google-genai (>=1.0.4) ; extra == "google"
42
53
  Requires-Dist: langchain-openai (>=0.1.7) ; extra == "openai"
43
- Requires-Dist: litellm (>=1.41.8) ; extra == "chat" or extra == "code" or extra == "realtime"
54
+ Requires-Dist: litellm (>=1.41.8) ; extra == "chat"
55
+ Requires-Dist: litellm (>=1.41.8) ; extra == "code"
56
+ Requires-Dist: litellm (>=1.41.8) ; extra == "realtime"
44
57
  Requires-Dist: markdown (>=3.5)
45
58
  Requires-Dist: openai (>=1.54.0) ; extra == "call"
46
- Requires-Dist: playwright (>=1.47.0) ; extra == "chat" or extra == "code"
59
+ Requires-Dist: playwright (>=1.47.0) ; extra == "chat"
60
+ Requires-Dist: playwright (>=1.47.0) ; extra == "code"
47
61
  Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
48
- Requires-Dist: praisonai-tools (>=0.0.7) ; extra == "crewai" or extra == "autogen"
49
- Requires-Dist: praisonaiagents (>=0.0.4)
62
+ Requires-Dist: praisonai-tools (>=0.0.7) ; extra == "autogen"
63
+ Requires-Dist: praisonai-tools (>=0.0.7) ; extra == "crewai"
64
+ Requires-Dist: praisonaiagents (>=0.0.44)
50
65
  Requires-Dist: pyautogen (>=0.2.19) ; extra == "autogen"
51
- Requires-Dist: pydantic (<=2.10.1) ; extra == "chat" or extra == "code"
66
+ Requires-Dist: pydantic (<=2.10.1) ; extra == "chat"
67
+ Requires-Dist: pydantic (<=2.10.1) ; extra == "code"
68
+ Requires-Dist: pydantic (<=2.10.1) ; extra == "ui"
52
69
  Requires-Dist: pyngrok (>=1.4.0) ; extra == "call"
53
70
  Requires-Dist: pyparsing (>=3.0.0)
54
71
  Requires-Dist: python-dotenv (>=0.19.0)
55
- Requires-Dist: rich (>=13.7) ; extra == "chat" or extra == "call"
56
- Requires-Dist: sqlalchemy (>=2.0.36) ; extra == "chat" or extra == "code" or extra == "realtime"
57
- Requires-Dist: tavily-python (==0.5.0) ; extra == "chat" or extra == "code" or extra == "realtime"
72
+ Requires-Dist: rich (>=13.7)
73
+ Requires-Dist: rich ; extra == "call"
74
+ Requires-Dist: rich ; extra == "chat"
75
+ Requires-Dist: sqlalchemy (>=2.0.36) ; extra == "chat"
76
+ Requires-Dist: sqlalchemy (>=2.0.36) ; extra == "code"
77
+ Requires-Dist: sqlalchemy (>=2.0.36) ; extra == "realtime"
78
+ Requires-Dist: sqlalchemy (>=2.0.36) ; extra == "ui"
79
+ Requires-Dist: tavily-python (==0.5.0) ; extra == "chat"
80
+ Requires-Dist: tavily-python (==0.5.0) ; extra == "code"
81
+ Requires-Dist: tavily-python (==0.5.0) ; extra == "realtime"
58
82
  Requires-Dist: twilio (>=7.0.0) ; extra == "call"
59
83
  Requires-Dist: uvicorn (>=0.20.0) ; extra == "call"
60
- Requires-Dist: websockets (>=12.0) ; extra == "realtime" or extra == "call"
84
+ Requires-Dist: websockets (>=12.0) ; extra == "call"
85
+ Requires-Dist: websockets (>=12.0) ; extra == "realtime"
61
86
  Requires-Dist: yfinance (>=0.2.44) ; extra == "realtime"
62
87
  Project-URL: Homepage, https://docs.praison.ai
63
88
  Project-URL: Repository, https://github.com/mervinpraison/PraisonAI
@@ -85,7 +110,142 @@ Description-Content-Type: text/markdown
85
110
 
86
111
  </div>
87
112
 
88
- Praison AI, leveraging both AutoGen and CrewAI or any other agent framework, represents a low-code, centralised framework designed to simplify the creation and orchestration of multi-agent systems for various LLM applications, emphasizing ease of use, customization, and human-agent interaction.
113
+ PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient humanagent collaboration.
114
+
115
+ <div align="center">
116
+ <a href="https://docs.praison.ai">
117
+ <p align="center">
118
+ <img src="https://img.shields.io/badge/📚_Documentation-Visit_docs.praison.ai-blue?style=for-the-badge&logo=bookstack&logoColor=white" alt="Documentation" />
119
+ </p>
120
+ </a>
121
+ </div>
122
+
123
+ ## Key Features
124
+
125
+ - 🤖 Automated AI Agents Creation
126
+ - 🔄 Self Reflection AI Agents
127
+ - 🧠 Reasoning AI Agents
128
+ - 👁️ Multi Modal AI Agents
129
+ - 🤝 Multi Agent Collaboration
130
+ - ⚡ AI Agent Workflow
131
+ - 🔄 Use CrewAI or AutoGen Framework
132
+ - 💯 100+ LLM Support
133
+ - 💻 Chat with ENTIRE Codebase
134
+ - 🎨 Interactive UIs
135
+ - 📄 YAML-based Configuration
136
+ - 🛠️ Custom Tool Integration
137
+ - 🔍 Internet Search Capability (using Crawl4AI and Tavily)
138
+ - 🖼️ Vision Language Model (VLM) Support
139
+ - 🎙️ Real-time Voice Interaction
140
+
141
+ ## Using No Code
142
+
143
+ ### Auto Mode:
144
+ ```bash
145
+ pip install praisonai
146
+ export OPENAI_API_KEY=xxxxxxxxxxxxxxxxxxxxxx
147
+ praisonai --auto create a movie script about Robots in Mars
148
+ ```
149
+
150
+ ### Initialise Mode:
151
+ ```bash
152
+ pip install praisonai
153
+ export OPENAI_API_KEY=xxxxxxxxxxxxxxxxxxxxxx
154
+ praisonai --init create a movie script about Robots in Mars
155
+ praisonai
156
+ ```
157
+
158
+ ## Using Coding
159
+
160
+ Light weight package dedicated for coding:
161
+ ```bash
162
+ pip install praisonaiagents
163
+ ```
164
+
165
+ ```bash
166
+ export OPENAI_API_KEY=xxxxxxxxxxxxxxxxxxxxxx
167
+ ```
168
+
169
+ Create app.py file and add the code below:
170
+ ```python
171
+ from praisonaiagents import Agent, Task, PraisonAIAgents
172
+
173
+ # 1. Create agents
174
+ researcher = Agent(
175
+ name="Researcher",
176
+ role="Senior Research Analyst",
177
+ goal="Uncover cutting-edge developments in AI and data science",
178
+ backstory="""You are an expert at a technology research group,
179
+ skilled in identifying trends and analyzing complex data.""",
180
+ verbose=True,
181
+ llm="gpt-4o",
182
+ markdown=True
183
+ )
184
+ writer = Agent(
185
+ name="Writer",
186
+ role="Tech Content Strategist",
187
+ goal="Craft compelling content on tech advancements",
188
+ backstory="""You are a content strategist known for
189
+ making complex tech topics interesting and easy to understand.""",
190
+ llm="gpt-4o",
191
+ markdown=True
192
+ )
193
+
194
+ # 2. Define Tasks
195
+ task1 = Task(
196
+ name="research_task",
197
+ description="""Analyze 2024's AI advancements.
198
+ Find major trends, new technologies, and their effects.""",
199
+ expected_output="""A detailed report on 2024 AI advancements""",
200
+ agent=researcher
201
+ )
202
+
203
+ task2 = Task(
204
+ name="writing_task",
205
+ description="""Create a blog post about major AI advancements using the insights you have.
206
+ Make it interesting, clear, and suited for tech enthusiasts.
207
+ It should be at least 4 paragraphs long.""",
208
+ expected_output="A blog post of at least 4 paragraphs",
209
+ agent=writer,
210
+ )
211
+
212
+ agents = PraisonAIAgents(
213
+ agents=[researcher, writer],
214
+ tasks=[task1, task2],
215
+ verbose=False,
216
+ process="hierarchical",
217
+ manager_llm="gpt-4o"
218
+ )
219
+
220
+ result = agents.start()
221
+ ```
222
+
223
+ Run:
224
+ ```bash
225
+ python app.py
226
+ ```
227
+
228
+ ## Ollama Integration
229
+ ```bash
230
+ export OPENAI_BASE_URL=http://localhost:11434/v1
231
+ ```
232
+
233
+ ## Groq Integration
234
+ Replace xxxx with Groq API KEY:
235
+ ```bash
236
+ export OPENAI_API_KEY=xxxxxxxxxxx
237
+ export OPENAI_BASE_URL=https://api.groq.com/openai/v1
238
+ ```
239
+
240
+ ## Logging
241
+ ```bash
242
+ export LOGLEVEL=info
243
+ ```
244
+
245
+ Advanced logging:
246
+ ```bash
247
+ export LOGLEVEL=debug
248
+ ```
89
249
 
90
250
  <div align="center">
91
251
  <picture>
@@ -195,18 +355,7 @@ When installing with `pip install "praisonai[autogen]"`, you get:
195
355
  - Multi-agent conversation capabilities
196
356
  - Code execution environment
197
357
 
198
- ## Key Features
199
358
 
200
- - 🤖 Automated AI Agents Creation
201
- - 🔄 Use CrewAI or AutoGen Framework
202
- - 💯 100+ LLM Support
203
- - 💻 Chat with ENTIRE Codebase
204
- - 🖥️ Interactive UIs
205
- - 📄 YAML-based Configuration
206
- - 🛠️ Custom Tool Integration
207
- - 🔍 Internet Search Capability (using Crawl4AI and Tavily)
208
- - 👁️ Vision Language Model (VLM) Support
209
- - 🎙️ Real-time Voice Interaction
210
359
 
211
360
  ## TL;DR Multi Agents
212
361
 
@@ -418,33 +567,7 @@ if __name__ == "__main__":
418
567
 
419
568
  ## Commands to Install Dependencies:
420
569
 
421
- 1. **Install all dependencies, including dev dependencies:**
422
-
423
- ```sh
424
- poetry install
425
- ```
426
-
427
- 2. **Install only documentation dependencies:**
428
-
429
- ```sh
430
- poetry install --with docs
431
- ```
432
-
433
- 3. **Install only test dependencies:**
434
-
435
- ```sh
436
- poetry install --with test
437
- ```
438
-
439
- 4. **Install only dev dependencies:**
440
-
441
- ```sh
442
- poetry install --with dev
443
- ```
444
-
445
- This configuration ensures that your development dependencies are correctly categorized and installed as needed.
446
-
447
- ### Using uv (Fast Python Package Installer)
570
+ ### Using uv
448
571
  ```bash
449
572
  # Install uv if you haven't already
450
573
  pip install uv
@@ -496,3 +619,55 @@ Praison AI is an open-sourced software licensed under the **[MIT license](https:
496
619
 
497
620
  Praison AI is an open-sourced software licensed under the **[MIT license](https://opensource.org/licenses/MIT)**.
498
621
 
622
+ ## Local Docker Development with Live Reload
623
+
624
+ To facilitate local development with live reload, you can use Docker. Follow the steps below:
625
+
626
+ 1. **Create a `Dockerfile.dev`**:
627
+ ```dockerfile
628
+ FROM python:3.11-slim
629
+
630
+ WORKDIR /app
631
+
632
+ COPY . .
633
+
634
+ RUN pip install flask praisonai==2.0.18 watchdog
635
+
636
+ EXPOSE 5555
637
+
638
+ ENV FLASK_ENV=development
639
+
640
+ CMD ["flask", "run", "--host=0.0.0.0"]
641
+ ```
642
+
643
+ 2. **Create a `docker-compose.yml`**:
644
+ ```yaml
645
+ version: '3.8'
646
+
647
+ services:
648
+ app:
649
+ build:
650
+ context: .
651
+ dockerfile: Dockerfile.dev
652
+ volumes:
653
+ - .:/app
654
+ ports:
655
+ - "5555:5555"
656
+ environment:
657
+ FLASK_ENV: development
658
+ command: flask run --host=0.0.0.0
659
+
660
+ watch:
661
+ image: alpine:latest
662
+ volumes:
663
+ - .:/app
664
+ command: sh -c "apk add --no-cache inotify-tools && while inotifywait -r -e modify,create,delete /app; do kill -HUP 1; done"
665
+ ```
666
+
667
+ 3. **Run Docker Compose**:
668
+ ```bash
669
+ docker-compose up
670
+ ```
671
+
672
+ This setup will allow you to develop locally with live reload, making it easier to test and iterate on your code.
673
+