PraisonAI 2.2.22__tar.gz → 2.2.24__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- {praisonai-2.2.22 → praisonai-2.2.24}/PKG-INFO +3 -2
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/auto.py +94 -21
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/cli.py +115 -5
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/deploy.py +1 -1
- praisonai-2.2.24/praisonai/scheduler.py +215 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/code.py +32 -9
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/realtimeclient/tools.py +46 -46
- {praisonai-2.2.22 → praisonai-2.2.24}/pyproject.toml +7 -5
- {praisonai-2.2.22 → praisonai-2.2.24}/README.md +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/README.md +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/__init__.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/__main__.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/agents_generator.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/api/call.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/chainlit_ui.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/inbuilt_tools/__init__.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/inbuilt_tools/autogen_tools.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/inc/__init__.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/inc/config.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/inc/models.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/android-chrome-192x192.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/android-chrome-512x512.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/apple-touch-icon.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/fantasy.svg +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/favicon-16x16.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/favicon-32x32.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/favicon.ico +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/game.svg +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/logo_dark.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/logo_light.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/movie.svg +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/praison-ai-agents-architecture.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/thriller.svg +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/setup/__init__.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/setup/build.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/setup/config.yaml +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/setup/post_install.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/setup/setup_conda_env.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/setup/setup_conda_env.sh +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/setup.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/test.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/train.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/train_vision.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/README.md +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/agents.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/callbacks.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/chat.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/colab.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/colab_chainlit.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/components/aicoder.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/chainlit.md +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/bn.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/en-US.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/gu.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/he-IL.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/hi.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/kn.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/ml.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/mr.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/ta.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/te.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/config/translations/zh-CN.json +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/context.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/database_config.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/db.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/public/fantasy.svg +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/public/game.svg +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/public/logo_dark.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/public/logo_light.png +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/public/movie.svg +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/public/praison.css +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/public/thriller.svg +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/realtime.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/realtimeclient/__init__.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/realtimeclient/realtimedocs.txt +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/sql_alchemy.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/ui/tools.md +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/upload_vision.py +0 -0
- {praisonai-2.2.22 → praisonai-2.2.24}/praisonai/version.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: PraisonAI
|
|
3
|
-
Version: 2.2.
|
|
3
|
+
Version: 2.2.24
|
|
4
4
|
Summary: PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration.
|
|
5
5
|
Author: Mervin Praison
|
|
6
6
|
Requires-Python: >=3.10
|
|
@@ -21,6 +21,7 @@ Provides-Extra: google
|
|
|
21
21
|
Provides-Extra: gradio
|
|
22
22
|
Provides-Extra: openai
|
|
23
23
|
Provides-Extra: realtime
|
|
24
|
+
Provides-Extra: train
|
|
24
25
|
Provides-Extra: ui
|
|
25
26
|
Requires-Dist: PyYAML (>=6.0)
|
|
26
27
|
Requires-Dist: agentops (>=0.3.12) ; extra == "agentops"
|
|
@@ -63,7 +64,7 @@ Requires-Dist: playwright (>=1.47.0) ; extra == "code"
|
|
|
63
64
|
Requires-Dist: plotly (>=5.24.0) ; extra == "realtime"
|
|
64
65
|
Requires-Dist: praisonai-tools (>=0.0.15) ; extra == "autogen"
|
|
65
66
|
Requires-Dist: praisonai-tools (>=0.0.15) ; extra == "crewai"
|
|
66
|
-
Requires-Dist: praisonaiagents (>=0.0.
|
|
67
|
+
Requires-Dist: praisonaiagents (>=0.0.95)
|
|
67
68
|
Requires-Dist: pyautogen (>=0.2.19) ; extra == "autogen"
|
|
68
69
|
Requires-Dist: pydantic (<=2.10.1) ; extra == "chat"
|
|
69
70
|
Requires-Dist: pydantic (<=2.10.1) ; extra == "code"
|
|
@@ -121,12 +121,12 @@ Tools are not available for {framework}. To use tools, install:
|
|
|
121
121
|
mode=instructor.Mode.JSON,
|
|
122
122
|
)
|
|
123
123
|
|
|
124
|
-
def generate(self):
|
|
124
|
+
def generate(self, merge=False):
|
|
125
125
|
"""
|
|
126
126
|
Generates a team structure for the specified topic.
|
|
127
127
|
|
|
128
128
|
Args:
|
|
129
|
-
|
|
129
|
+
merge (bool): Whether to merge with existing agents.yaml file instead of overwriting.
|
|
130
130
|
|
|
131
131
|
Returns:
|
|
132
132
|
str: The full path of the YAML file containing the generated team structure.
|
|
@@ -149,45 +149,118 @@ Tools are not available for {framework}. To use tools, install:
|
|
|
149
149
|
]
|
|
150
150
|
)
|
|
151
151
|
json_data = json.loads(response.model_dump_json())
|
|
152
|
-
self.convert_and_save(json_data)
|
|
152
|
+
self.convert_and_save(json_data, merge=merge)
|
|
153
153
|
full_path = os.path.abspath(self.agent_file)
|
|
154
154
|
return full_path
|
|
155
155
|
|
|
156
|
-
def convert_and_save(self, json_data):
|
|
156
|
+
def convert_and_save(self, json_data, merge=False):
|
|
157
157
|
"""Converts the provided JSON data into the desired YAML format and saves it to a file.
|
|
158
158
|
|
|
159
159
|
Args:
|
|
160
160
|
json_data (dict): The JSON data representing the team structure.
|
|
161
|
-
|
|
162
|
-
agent_file (str, optional): The name of the YAML file to save. Defaults to "test.yaml".
|
|
161
|
+
merge (bool): Whether to merge with existing agents.yaml file instead of overwriting.
|
|
163
162
|
"""
|
|
164
163
|
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
164
|
+
# Handle merge functionality
|
|
165
|
+
if merge and os.path.exists(self.agent_file):
|
|
166
|
+
yaml_data = self.merge_with_existing_agents(json_data)
|
|
167
|
+
else:
|
|
168
|
+
# Original behavior: create new yaml_data structure
|
|
169
|
+
yaml_data = {
|
|
170
|
+
"framework": self.framework,
|
|
171
|
+
"topic": self.topic,
|
|
172
|
+
"roles": {},
|
|
173
|
+
"dependencies": []
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
for role_id, role_details in json_data['roles'].items():
|
|
177
|
+
yaml_data['roles'][role_id] = {
|
|
178
|
+
"backstory": "" + role_details['backstory'],
|
|
179
|
+
"goal": role_details['goal'],
|
|
180
|
+
"role": role_details['role'],
|
|
181
|
+
"tasks": {},
|
|
182
|
+
# "tools": role_details.get('tools', []),
|
|
183
|
+
"tools": ['']
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
for task_id, task_details in role_details['tasks'].items():
|
|
187
|
+
yaml_data['roles'][role_id]['tasks'][task_id] = {
|
|
188
|
+
"description": "" + task_details['description'],
|
|
189
|
+
"expected_output": "" + task_details['expected_output']
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
# Save to YAML file, maintaining the order
|
|
193
|
+
with open(self.agent_file, 'w') as f:
|
|
194
|
+
yaml.dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
|
171
195
|
|
|
172
|
-
|
|
173
|
-
|
|
196
|
+
def merge_with_existing_agents(self, new_json_data):
|
|
197
|
+
"""
|
|
198
|
+
Merge existing agents.yaml with new auto-generated agents.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
new_json_data (dict): The JSON data representing the new team structure.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
dict: The merged YAML data structure.
|
|
205
|
+
"""
|
|
206
|
+
try:
|
|
207
|
+
# Load existing agents.yaml
|
|
208
|
+
with open(self.agent_file, 'r') as f:
|
|
209
|
+
existing_data = yaml.safe_load(f)
|
|
210
|
+
|
|
211
|
+
if not existing_data:
|
|
212
|
+
# If existing file is empty, treat as new file
|
|
213
|
+
existing_data = {"roles": {}, "dependencies": []}
|
|
214
|
+
except (yaml.YAMLError, FileNotFoundError) as e:
|
|
215
|
+
logging.warning(f"Could not load existing agents file {self.agent_file}: {e}")
|
|
216
|
+
logging.warning("Creating new file instead of merging")
|
|
217
|
+
existing_data = {"roles": {}, "dependencies": []}
|
|
218
|
+
|
|
219
|
+
# Start with existing data structure
|
|
220
|
+
merged_data = existing_data.copy()
|
|
221
|
+
|
|
222
|
+
# Ensure required fields exist
|
|
223
|
+
if 'roles' not in merged_data:
|
|
224
|
+
merged_data['roles'] = {}
|
|
225
|
+
if 'dependencies' not in merged_data:
|
|
226
|
+
merged_data['dependencies'] = []
|
|
227
|
+
if 'framework' not in merged_data:
|
|
228
|
+
merged_data['framework'] = self.framework
|
|
229
|
+
|
|
230
|
+
# Handle topic merging
|
|
231
|
+
existing_topic = merged_data.get('topic', '')
|
|
232
|
+
new_topic = self.topic
|
|
233
|
+
if existing_topic and existing_topic != new_topic:
|
|
234
|
+
merged_data['topic'] = f"{existing_topic} + {new_topic}"
|
|
235
|
+
else:
|
|
236
|
+
merged_data['topic'] = new_topic
|
|
237
|
+
|
|
238
|
+
# Merge new roles with existing ones
|
|
239
|
+
for role_id, role_details in new_json_data['roles'].items():
|
|
240
|
+
# Check for conflicts and rename if necessary
|
|
241
|
+
final_role_id = role_id
|
|
242
|
+
counter = 1
|
|
243
|
+
while final_role_id in merged_data['roles']:
|
|
244
|
+
final_role_id = f"{role_id}_auto_{counter}"
|
|
245
|
+
counter += 1
|
|
246
|
+
|
|
247
|
+
# Add the new role
|
|
248
|
+
merged_data['roles'][final_role_id] = {
|
|
174
249
|
"backstory": "" + role_details['backstory'],
|
|
175
250
|
"goal": role_details['goal'],
|
|
176
251
|
"role": role_details['role'],
|
|
177
252
|
"tasks": {},
|
|
178
|
-
# "tools": role_details.get('tools', []),
|
|
179
253
|
"tools": ['']
|
|
180
254
|
}
|
|
181
|
-
|
|
255
|
+
|
|
256
|
+
# Add tasks for this role
|
|
182
257
|
for task_id, task_details in role_details['tasks'].items():
|
|
183
|
-
|
|
258
|
+
merged_data['roles'][final_role_id]['tasks'][task_id] = {
|
|
184
259
|
"description": "" + task_details['description'],
|
|
185
260
|
"expected_output": "" + task_details['expected_output']
|
|
186
261
|
}
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
with open(self.agent_file, 'w') as f:
|
|
190
|
-
yaml.dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
|
262
|
+
|
|
263
|
+
return merged_data
|
|
191
264
|
|
|
192
265
|
def get_user_content(self):
|
|
193
266
|
"""
|
|
@@ -5,6 +5,7 @@ import argparse
|
|
|
5
5
|
from .version import __version__
|
|
6
6
|
import yaml
|
|
7
7
|
import os
|
|
8
|
+
import time
|
|
8
9
|
from rich import print
|
|
9
10
|
from dotenv import load_dotenv
|
|
10
11
|
load_dotenv()
|
|
@@ -25,6 +26,7 @@ CALL_MODULE_AVAILABLE = False
|
|
|
25
26
|
CREWAI_AVAILABLE = False
|
|
26
27
|
AUTOGEN_AVAILABLE = False
|
|
27
28
|
PRAISONAI_AVAILABLE = False
|
|
29
|
+
TRAIN_AVAILABLE = False
|
|
28
30
|
try:
|
|
29
31
|
# Create necessary directories and set CHAINLIT_APP_ROOT
|
|
30
32
|
if "CHAINLIT_APP_ROOT" not in os.environ:
|
|
@@ -71,6 +73,12 @@ try:
|
|
|
71
73
|
except ImportError:
|
|
72
74
|
pass
|
|
73
75
|
|
|
76
|
+
try:
|
|
77
|
+
from unsloth import FastLanguageModel
|
|
78
|
+
TRAIN_AVAILABLE = True
|
|
79
|
+
except ImportError:
|
|
80
|
+
pass
|
|
81
|
+
|
|
74
82
|
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO'), format='%(asctime)s - %(levelname)s - %(message)s')
|
|
75
83
|
logging.getLogger('alembic').setLevel(logging.ERROR)
|
|
76
84
|
logging.getLogger('gradio').setLevel(logging.ERROR)
|
|
@@ -217,9 +225,66 @@ class PraisonAI:
|
|
|
217
225
|
# If no command or direct_prompt, preserve agent_file from constructor (don't overwrite)
|
|
218
226
|
|
|
219
227
|
if args.deploy:
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
228
|
+
if args.schedule or args.schedule_config:
|
|
229
|
+
# Scheduled deployment
|
|
230
|
+
from .scheduler import create_scheduler
|
|
231
|
+
|
|
232
|
+
# Load configuration from file if provided
|
|
233
|
+
config = {"max_retries": args.max_retries}
|
|
234
|
+
schedule_expr = args.schedule
|
|
235
|
+
provider = args.provider
|
|
236
|
+
|
|
237
|
+
if args.schedule_config:
|
|
238
|
+
try:
|
|
239
|
+
with open(args.schedule_config, 'r') as f:
|
|
240
|
+
file_config = yaml.safe_load(f)
|
|
241
|
+
|
|
242
|
+
# Extract deployment config
|
|
243
|
+
deploy_config = file_config.get('deployment', {})
|
|
244
|
+
schedule_expr = schedule_expr or deploy_config.get('schedule')
|
|
245
|
+
provider = deploy_config.get('provider', provider)
|
|
246
|
+
config['max_retries'] = deploy_config.get('max_retries', config['max_retries'])
|
|
247
|
+
|
|
248
|
+
# Apply environment variables if specified
|
|
249
|
+
env_vars = file_config.get('environment', {})
|
|
250
|
+
for key, value in env_vars.items():
|
|
251
|
+
os.environ[key] = str(value)
|
|
252
|
+
|
|
253
|
+
except FileNotFoundError:
|
|
254
|
+
print(f"Configuration file not found: {args.schedule_config}")
|
|
255
|
+
sys.exit(1)
|
|
256
|
+
except yaml.YAMLError as e:
|
|
257
|
+
print(f"Error parsing configuration file: {e}")
|
|
258
|
+
sys.exit(1)
|
|
259
|
+
|
|
260
|
+
if not schedule_expr:
|
|
261
|
+
print("Error: Schedule expression required. Use --schedule or specify in config file.")
|
|
262
|
+
sys.exit(1)
|
|
263
|
+
|
|
264
|
+
scheduler = create_scheduler(provider=provider, config=config)
|
|
265
|
+
|
|
266
|
+
print(f"Starting scheduled deployment with schedule: {schedule_expr}")
|
|
267
|
+
print(f"Provider: {provider}")
|
|
268
|
+
print(f"Max retries: {config['max_retries']}")
|
|
269
|
+
print("Press Ctrl+C to stop the scheduler")
|
|
270
|
+
|
|
271
|
+
if scheduler.start(schedule_expr, config['max_retries']):
|
|
272
|
+
try:
|
|
273
|
+
# Keep the main thread alive
|
|
274
|
+
while scheduler.is_running:
|
|
275
|
+
time.sleep(1)
|
|
276
|
+
except KeyboardInterrupt:
|
|
277
|
+
print("\nStopping scheduler...")
|
|
278
|
+
scheduler.stop()
|
|
279
|
+
print("Scheduler stopped successfully")
|
|
280
|
+
else:
|
|
281
|
+
print("Failed to start scheduler")
|
|
282
|
+
sys.exit(1)
|
|
283
|
+
else:
|
|
284
|
+
# One-time deployment (backward compatible)
|
|
285
|
+
from .deploy import CloudDeployer
|
|
286
|
+
deployer = CloudDeployer()
|
|
287
|
+
deployer.run_commands()
|
|
223
288
|
return
|
|
224
289
|
|
|
225
290
|
if getattr(args, 'chat', False):
|
|
@@ -242,6 +307,11 @@ class PraisonAI:
|
|
|
242
307
|
return
|
|
243
308
|
|
|
244
309
|
if args.command == 'train':
|
|
310
|
+
if not TRAIN_AVAILABLE:
|
|
311
|
+
print("[red]ERROR: Training dependencies not installed. Install with:[/red]")
|
|
312
|
+
print("\npip install \"praisonai[train]\"")
|
|
313
|
+
print("Or run: praisonai train init\n")
|
|
314
|
+
sys.exit(1)
|
|
245
315
|
package_root = os.path.dirname(os.path.abspath(__file__))
|
|
246
316
|
config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')
|
|
247
317
|
|
|
@@ -347,7 +417,7 @@ class PraisonAI:
|
|
|
347
417
|
|
|
348
418
|
self.agent_file = "test.yaml"
|
|
349
419
|
generator = AutoGenerator(topic=self.topic, framework=self.framework, agent_file=self.agent_file)
|
|
350
|
-
self.agent_file = generator.generate()
|
|
420
|
+
self.agent_file = generator.generate(merge=getattr(args, 'merge', False))
|
|
351
421
|
agents_generator = AgentsGenerator(self.agent_file, self.framework, self.config_list)
|
|
352
422
|
result = agents_generator.generate_crew_and_kickoff()
|
|
353
423
|
print(result)
|
|
@@ -360,7 +430,7 @@ class PraisonAI:
|
|
|
360
430
|
|
|
361
431
|
self.agent_file = "agents.yaml"
|
|
362
432
|
generator = AutoGenerator(topic=self.topic, framework=self.framework, agent_file=self.agent_file)
|
|
363
|
-
self.agent_file = generator.generate()
|
|
433
|
+
self.agent_file = generator.generate(merge=getattr(args, 'merge', False))
|
|
364
434
|
print(f"File {self.agent_file} created successfully")
|
|
365
435
|
return f"File {self.agent_file} created successfully"
|
|
366
436
|
|
|
@@ -406,6 +476,36 @@ class PraisonAI:
|
|
|
406
476
|
'unittest' in sys.modules
|
|
407
477
|
)
|
|
408
478
|
|
|
479
|
+
# Check if we're being used as a library (not from praisonai CLI)
|
|
480
|
+
# Skip CLI parsing to avoid conflicts with applications like Fabric
|
|
481
|
+
is_library_usage = (
|
|
482
|
+
'praisonai' not in sys.argv[0] and
|
|
483
|
+
not in_test_env
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
if is_library_usage:
|
|
487
|
+
# Return default args when used as library to prevent CLI conflicts
|
|
488
|
+
default_args = argparse.Namespace()
|
|
489
|
+
default_args.framework = None
|
|
490
|
+
default_args.ui = None
|
|
491
|
+
default_args.auto = None
|
|
492
|
+
default_args.init = None
|
|
493
|
+
default_args.command = None
|
|
494
|
+
default_args.deploy = False
|
|
495
|
+
default_args.schedule = None
|
|
496
|
+
default_args.schedule_config = None
|
|
497
|
+
default_args.provider = "gcp"
|
|
498
|
+
default_args.max_retries = 3
|
|
499
|
+
default_args.model = None
|
|
500
|
+
default_args.llm = None
|
|
501
|
+
default_args.hf = None
|
|
502
|
+
default_args.ollama = None
|
|
503
|
+
default_args.dataset = "yahma/alpaca-cleaned"
|
|
504
|
+
default_args.realtime = False
|
|
505
|
+
default_args.call = False
|
|
506
|
+
default_args.public = False
|
|
507
|
+
return default_args
|
|
508
|
+
|
|
409
509
|
# Define special commands
|
|
410
510
|
special_commands = ['chat', 'code', 'call', 'realtime', 'train', 'ui']
|
|
411
511
|
|
|
@@ -416,6 +516,10 @@ class PraisonAI:
|
|
|
416
516
|
parser.add_argument("--init", nargs=argparse.REMAINDER, help="Initialize agents with optional topic")
|
|
417
517
|
parser.add_argument("command", nargs="?", help="Command to run or direct prompt")
|
|
418
518
|
parser.add_argument("--deploy", action="store_true", help="Deploy the application")
|
|
519
|
+
parser.add_argument("--schedule", type=str, help="Schedule deployment (e.g., 'daily', 'hourly', '*/6h', '3600')")
|
|
520
|
+
parser.add_argument("--schedule-config", type=str, help="Path to scheduling configuration file")
|
|
521
|
+
parser.add_argument("--provider", type=str, default="gcp", help="Deployment provider (gcp, aws, azure)")
|
|
522
|
+
parser.add_argument("--max-retries", type=int, default=3, help="Maximum retry attempts for scheduled deployments")
|
|
419
523
|
parser.add_argument("--model", type=str, help="Model name")
|
|
420
524
|
parser.add_argument("--llm", type=str, help="LLM model to use for direct prompts")
|
|
421
525
|
parser.add_argument("--hf", type=str, help="Hugging Face model name")
|
|
@@ -424,6 +528,7 @@ class PraisonAI:
|
|
|
424
528
|
parser.add_argument("--realtime", action="store_true", help="Start the realtime voice interaction interface")
|
|
425
529
|
parser.add_argument("--call", action="store_true", help="Start the PraisonAI Call server")
|
|
426
530
|
parser.add_argument("--public", action="store_true", help="Use ngrok to expose the server publicly (only with --call)")
|
|
531
|
+
parser.add_argument("--merge", action="store_true", help="Merge existing agents.yaml with auto-generated agents instead of overwriting")
|
|
427
532
|
|
|
428
533
|
# If we're in a test environment, parse with empty args to avoid pytest interference
|
|
429
534
|
if in_test_env:
|
|
@@ -509,6 +614,11 @@ class PraisonAI:
|
|
|
509
614
|
sys.exit(0)
|
|
510
615
|
|
|
511
616
|
elif args.command == 'train':
|
|
617
|
+
if not TRAIN_AVAILABLE:
|
|
618
|
+
print("[red]ERROR: Training dependencies not installed. Install with:[/red]")
|
|
619
|
+
print("\npip install \"praisonai[train]\"")
|
|
620
|
+
print("Or run: praisonai train init\n")
|
|
621
|
+
sys.exit(1)
|
|
512
622
|
package_root = os.path.dirname(os.path.abspath(__file__))
|
|
513
623
|
config_yaml_destination = os.path.join(os.getcwd(), 'config.yaml')
|
|
514
624
|
|
|
@@ -56,7 +56,7 @@ class CloudDeployer:
|
|
|
56
56
|
file.write("FROM python:3.11-slim\n")
|
|
57
57
|
file.write("WORKDIR /app\n")
|
|
58
58
|
file.write("COPY . .\n")
|
|
59
|
-
file.write("RUN pip install flask praisonai==2.2.
|
|
59
|
+
file.write("RUN pip install flask praisonai==2.2.24 gunicorn markdown\n")
|
|
60
60
|
file.write("EXPOSE 8080\n")
|
|
61
61
|
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
|
|
62
62
|
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import threading
|
|
3
|
+
import time
|
|
4
|
+
import logging
|
|
5
|
+
from datetime import datetime, timedelta
|
|
6
|
+
from typing import Union, Optional, Callable, Dict, Any
|
|
7
|
+
from abc import ABC, abstractmethod
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
class DeployerInterface(ABC):
|
|
12
|
+
"""Abstract interface for deployers to ensure provider compatibility."""
|
|
13
|
+
|
|
14
|
+
@abstractmethod
|
|
15
|
+
def deploy(self) -> bool:
|
|
16
|
+
"""Execute deployment. Returns True on success, False on failure."""
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
class CloudDeployerAdapter(DeployerInterface):
|
|
20
|
+
"""Adapter for existing CloudDeployer to match interface."""
|
|
21
|
+
|
|
22
|
+
def __init__(self):
|
|
23
|
+
from .deploy import CloudDeployer
|
|
24
|
+
self._deployer = CloudDeployer()
|
|
25
|
+
|
|
26
|
+
def deploy(self) -> bool:
|
|
27
|
+
"""Execute deployment using CloudDeployer."""
|
|
28
|
+
try:
|
|
29
|
+
self._deployer.run_commands()
|
|
30
|
+
return True
|
|
31
|
+
except Exception as e:
|
|
32
|
+
logger.error(f"Deployment failed: {e}")
|
|
33
|
+
return False
|
|
34
|
+
|
|
35
|
+
class ScheduleParser:
|
|
36
|
+
"""Parse schedule expressions into intervals."""
|
|
37
|
+
|
|
38
|
+
@staticmethod
|
|
39
|
+
def parse(schedule_expr: str) -> int:
|
|
40
|
+
"""
|
|
41
|
+
Parse schedule expression and return interval in seconds.
|
|
42
|
+
|
|
43
|
+
Supported formats:
|
|
44
|
+
- "daily" -> 86400 seconds
|
|
45
|
+
- "hourly" -> 3600 seconds
|
|
46
|
+
- "*/30m" -> 1800 seconds (every 30 minutes)
|
|
47
|
+
- "*/6h" -> 21600 seconds (every 6 hours)
|
|
48
|
+
- "60" -> 60 seconds (plain number)
|
|
49
|
+
"""
|
|
50
|
+
schedule_expr = schedule_expr.strip().lower()
|
|
51
|
+
|
|
52
|
+
if schedule_expr == "daily":
|
|
53
|
+
return 86400
|
|
54
|
+
elif schedule_expr == "hourly":
|
|
55
|
+
return 3600
|
|
56
|
+
elif schedule_expr.isdigit():
|
|
57
|
+
return int(schedule_expr)
|
|
58
|
+
elif schedule_expr.startswith("*/"):
|
|
59
|
+
# Handle */30m, */6h patterns
|
|
60
|
+
interval_part = schedule_expr[2:]
|
|
61
|
+
if interval_part.endswith("m"):
|
|
62
|
+
minutes = int(interval_part[:-1])
|
|
63
|
+
return minutes * 60
|
|
64
|
+
elif interval_part.endswith("h"):
|
|
65
|
+
hours = int(interval_part[:-1])
|
|
66
|
+
return hours * 3600
|
|
67
|
+
elif interval_part.endswith("s"):
|
|
68
|
+
return int(interval_part[:-1])
|
|
69
|
+
else:
|
|
70
|
+
return int(interval_part)
|
|
71
|
+
else:
|
|
72
|
+
raise ValueError(f"Unsupported schedule format: {schedule_expr}")
|
|
73
|
+
|
|
74
|
+
class DeploymentScheduler:
|
|
75
|
+
"""
|
|
76
|
+
Minimal deployment scheduler with provider-agnostic design.
|
|
77
|
+
|
|
78
|
+
Features:
|
|
79
|
+
- Simple interval-based scheduling
|
|
80
|
+
- Thread-safe operation
|
|
81
|
+
- Extensible deployer factory pattern
|
|
82
|
+
- Minimal dependencies
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
def __init__(self, schedule_config: Optional[Dict[str, Any]] = None):
|
|
86
|
+
self.config = schedule_config or {}
|
|
87
|
+
self.is_running = False
|
|
88
|
+
self._stop_event = threading.Event()
|
|
89
|
+
self._thread = None
|
|
90
|
+
self._deployer = None
|
|
91
|
+
|
|
92
|
+
def set_deployer(self, deployer: DeployerInterface):
|
|
93
|
+
"""Set custom deployer implementation."""
|
|
94
|
+
self._deployer = deployer
|
|
95
|
+
|
|
96
|
+
def _get_deployer(self) -> DeployerInterface:
|
|
97
|
+
"""Get deployer instance using factory pattern."""
|
|
98
|
+
if self._deployer:
|
|
99
|
+
return self._deployer
|
|
100
|
+
|
|
101
|
+
# Default to CloudDeployer for backward compatibility
|
|
102
|
+
return CloudDeployerAdapter()
|
|
103
|
+
|
|
104
|
+
def start(self, schedule_expr: str, max_retries: int = 3) -> bool:
|
|
105
|
+
"""
|
|
106
|
+
Start scheduled deployment.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
schedule_expr: Schedule expression (e.g., "daily", "*/6h", "3600")
|
|
110
|
+
max_retries: Maximum retry attempts on failure
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
True if scheduler started successfully
|
|
114
|
+
"""
|
|
115
|
+
if self.is_running:
|
|
116
|
+
logger.warning("Scheduler is already running")
|
|
117
|
+
return False
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
interval = ScheduleParser.parse(schedule_expr)
|
|
121
|
+
self.is_running = True
|
|
122
|
+
self._stop_event.clear()
|
|
123
|
+
|
|
124
|
+
self._thread = threading.Thread(
|
|
125
|
+
target=self._run_schedule,
|
|
126
|
+
args=(interval, max_retries),
|
|
127
|
+
daemon=True
|
|
128
|
+
)
|
|
129
|
+
self._thread.start()
|
|
130
|
+
|
|
131
|
+
logger.info(f"Deployment scheduler started with {interval}s interval")
|
|
132
|
+
return True
|
|
133
|
+
|
|
134
|
+
except Exception as e:
|
|
135
|
+
logger.error(f"Failed to start scheduler: {e}")
|
|
136
|
+
self.is_running = False
|
|
137
|
+
return False
|
|
138
|
+
|
|
139
|
+
def stop(self) -> bool:
|
|
140
|
+
"""Stop the scheduler."""
|
|
141
|
+
if not self.is_running:
|
|
142
|
+
return True
|
|
143
|
+
|
|
144
|
+
self._stop_event.set()
|
|
145
|
+
if self._thread and self._thread.is_alive():
|
|
146
|
+
self._thread.join(timeout=5)
|
|
147
|
+
|
|
148
|
+
self.is_running = False
|
|
149
|
+
logger.info("Deployment scheduler stopped")
|
|
150
|
+
return True
|
|
151
|
+
|
|
152
|
+
def _run_schedule(self, interval: int, max_retries: int):
|
|
153
|
+
"""Internal method to run scheduled deployments."""
|
|
154
|
+
deployer = self._get_deployer()
|
|
155
|
+
|
|
156
|
+
while not self._stop_event.is_set():
|
|
157
|
+
logger.info("Starting scheduled deployment")
|
|
158
|
+
|
|
159
|
+
success = False
|
|
160
|
+
for attempt in range(max_retries):
|
|
161
|
+
try:
|
|
162
|
+
if deployer.deploy():
|
|
163
|
+
logger.info(f"Deployment successful on attempt {attempt + 1}")
|
|
164
|
+
success = True
|
|
165
|
+
break
|
|
166
|
+
else:
|
|
167
|
+
logger.warning(f"Deployment failed on attempt {attempt + 1}")
|
|
168
|
+
except Exception as e:
|
|
169
|
+
logger.error(f"Deployment error on attempt {attempt + 1}: {e}")
|
|
170
|
+
|
|
171
|
+
if attempt < max_retries - 1:
|
|
172
|
+
time.sleep(30) # Wait before retry
|
|
173
|
+
|
|
174
|
+
if not success:
|
|
175
|
+
logger.error(f"Deployment failed after {max_retries} attempts")
|
|
176
|
+
|
|
177
|
+
# Wait for next scheduled time
|
|
178
|
+
self._stop_event.wait(interval)
|
|
179
|
+
|
|
180
|
+
def deploy_once(self) -> bool:
|
|
181
|
+
"""Execute a single deployment immediately."""
|
|
182
|
+
deployer = self._get_deployer()
|
|
183
|
+
try:
|
|
184
|
+
return deployer.deploy()
|
|
185
|
+
except Exception as e:
|
|
186
|
+
logger.error(f"One-time deployment failed: {e}")
|
|
187
|
+
return False
|
|
188
|
+
|
|
189
|
+
def create_scheduler(provider: str = "gcp", config: Optional[Dict[str, Any]] = None) -> DeploymentScheduler:
|
|
190
|
+
"""
|
|
191
|
+
Factory function to create scheduler for different providers.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
provider: Deployment provider ("gcp", "aws", "azure", etc.)
|
|
195
|
+
config: Optional configuration dict
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
Configured DeploymentScheduler instance
|
|
199
|
+
"""
|
|
200
|
+
scheduler = DeploymentScheduler(config)
|
|
201
|
+
|
|
202
|
+
# Provider-specific deployer setup can be added here
|
|
203
|
+
if provider == "gcp":
|
|
204
|
+
# Default CloudDeployer for GCP
|
|
205
|
+
pass
|
|
206
|
+
elif provider == "aws":
|
|
207
|
+
# Future: AWS deployer implementation
|
|
208
|
+
logger.warning("AWS provider not yet implemented, using default")
|
|
209
|
+
elif provider == "azure":
|
|
210
|
+
# Future: Azure deployer implementation
|
|
211
|
+
logger.warning("Azure provider not yet implemented, using default")
|
|
212
|
+
else:
|
|
213
|
+
logger.warning(f"Unknown provider {provider}, using default")
|
|
214
|
+
|
|
215
|
+
return scheduler
|
|
@@ -20,6 +20,7 @@ from chainlit.input_widget import TextInput
|
|
|
20
20
|
from chainlit.types import ThreadDict
|
|
21
21
|
import chainlit.data as cl_data
|
|
22
22
|
from litellm import acompletion
|
|
23
|
+
import litellm
|
|
23
24
|
from db import DatabaseManager
|
|
24
25
|
|
|
25
26
|
# Load environment variables
|
|
@@ -40,6 +41,15 @@ logger.addHandler(console_handler)
|
|
|
40
41
|
# Set the logging level for the logger
|
|
41
42
|
logger.setLevel(log_level)
|
|
42
43
|
|
|
44
|
+
# Configure litellm same as in llm.py
|
|
45
|
+
litellm.set_verbose = False
|
|
46
|
+
litellm.success_callback = []
|
|
47
|
+
litellm._async_success_callback = []
|
|
48
|
+
litellm.callbacks = []
|
|
49
|
+
litellm.drop_params = True
|
|
50
|
+
litellm.modify_params = True
|
|
51
|
+
litellm.suppress_debug_messages = True
|
|
52
|
+
|
|
43
53
|
CHAINLIT_AUTH_SECRET = os.getenv("CHAINLIT_AUTH_SECRET")
|
|
44
54
|
|
|
45
55
|
if not CHAINLIT_AUTH_SECRET:
|
|
@@ -55,6 +65,17 @@ db_manager.initialize()
|
|
|
55
65
|
|
|
56
66
|
deleted_thread_ids = [] # type: List[str]
|
|
57
67
|
|
|
68
|
+
def _build_completion_params(model_name, **override_params):
|
|
69
|
+
"""Build parameters for litellm completion calls with proper model handling"""
|
|
70
|
+
params = {
|
|
71
|
+
"model": model_name,
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# Override with any provided parameters
|
|
75
|
+
params.update(override_params)
|
|
76
|
+
|
|
77
|
+
return params
|
|
78
|
+
|
|
58
79
|
def save_setting(key: str, value: str):
|
|
59
80
|
"""Saves a setting to the database.
|
|
60
81
|
|
|
@@ -237,12 +258,12 @@ Context:
|
|
|
237
258
|
msg = cl.Message(content="")
|
|
238
259
|
await msg.send()
|
|
239
260
|
|
|
240
|
-
# Prepare the completion parameters
|
|
241
|
-
completion_params =
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
261
|
+
# Prepare the completion parameters using the helper function
|
|
262
|
+
completion_params = _build_completion_params(
|
|
263
|
+
model_name,
|
|
264
|
+
messages=message_history,
|
|
265
|
+
stream=True,
|
|
266
|
+
)
|
|
246
267
|
|
|
247
268
|
# If an image is uploaded, include it in the message
|
|
248
269
|
if image:
|
|
@@ -344,9 +365,11 @@ Context:
|
|
|
344
365
|
logger.error(f"Failed to parse function arguments: {function_args}")
|
|
345
366
|
|
|
346
367
|
second_response = await acompletion(
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
368
|
+
**_build_completion_params(
|
|
369
|
+
model_name,
|
|
370
|
+
stream=True,
|
|
371
|
+
messages=messages,
|
|
372
|
+
)
|
|
350
373
|
)
|
|
351
374
|
logger.debug(f"Second LLM response: {second_response}")
|
|
352
375
|
|
|
@@ -108,7 +108,7 @@ async def tavily_web_search_handler(query):
|
|
|
108
108
|
try:
|
|
109
109
|
response = tavily_client.search(query_with_date)
|
|
110
110
|
logger.debug(f"Tavily search response: {response}")
|
|
111
|
-
results = process_tavily_results(response)
|
|
111
|
+
results = await process_tavily_results(response)
|
|
112
112
|
except Exception as e:
|
|
113
113
|
logger.error(f"Error in Tavily search: {str(e)}")
|
|
114
114
|
results = await fallback_to_duckduckgo(query_with_date)
|
|
@@ -121,63 +121,63 @@ async def tavily_web_search_handler(query):
|
|
|
121
121
|
"results": results
|
|
122
122
|
})
|
|
123
123
|
|
|
124
|
-
def process_tavily_results(response):
|
|
125
|
-
|
|
126
|
-
crawler.warmup()
|
|
127
|
-
results = []
|
|
128
|
-
for result in response.get('results', []):
|
|
129
|
-
url = result.get('url')
|
|
130
|
-
if url:
|
|
131
|
-
try:
|
|
132
|
-
crawl_result = crawler.run(url=url)
|
|
133
|
-
results.append({
|
|
134
|
-
"content": result.get('content'),
|
|
135
|
-
"url": url,
|
|
136
|
-
"full_content": crawl_result.markdown
|
|
137
|
-
})
|
|
138
|
-
except Exception as e:
|
|
139
|
-
logger.error(f"Error crawling {url}: {str(e)}")
|
|
140
|
-
results.append({
|
|
141
|
-
"content": result.get('content'),
|
|
142
|
-
"url": url,
|
|
143
|
-
"full_content": "Error: Unable to crawl this URL"
|
|
144
|
-
})
|
|
145
|
-
return results
|
|
146
|
-
|
|
147
|
-
async def fallback_to_duckduckgo(query):
|
|
148
|
-
try:
|
|
149
|
-
with DDGS() as ddgs:
|
|
150
|
-
ddg_results = list(ddgs.text(query, max_results=5))
|
|
151
|
-
|
|
152
|
-
logger.debug(f"DuckDuckGo search results: {ddg_results}")
|
|
153
|
-
|
|
154
|
-
crawler = AsyncWebCrawler()
|
|
155
|
-
crawler.warmup()
|
|
124
|
+
async def process_tavily_results(response):
|
|
125
|
+
async with AsyncWebCrawler() as crawler:
|
|
156
126
|
results = []
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
url = result.get('href')
|
|
127
|
+
for result in response.get('results', []):
|
|
128
|
+
url = result.get('url')
|
|
160
129
|
if url:
|
|
161
130
|
try:
|
|
162
|
-
crawl_result = crawler.
|
|
131
|
+
crawl_result = await crawler.arun(url=url)
|
|
132
|
+
full_content = crawl_result.markdown if crawl_result and hasattr(crawl_result, 'markdown') and crawl_result.markdown else "No content available"
|
|
163
133
|
results.append({
|
|
164
|
-
"content": result.get('
|
|
134
|
+
"content": result.get('content'),
|
|
165
135
|
"url": url,
|
|
166
|
-
"full_content":
|
|
136
|
+
"full_content": full_content
|
|
167
137
|
})
|
|
168
138
|
except Exception as e:
|
|
169
139
|
logger.error(f"Error crawling {url}: {str(e)}")
|
|
170
140
|
results.append({
|
|
171
|
-
"content": result.get('
|
|
141
|
+
"content": result.get('content'),
|
|
172
142
|
"url": url,
|
|
173
143
|
"full_content": "Error: Unable to crawl this URL"
|
|
174
144
|
})
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
145
|
+
return results
|
|
146
|
+
|
|
147
|
+
async def fallback_to_duckduckgo(query):
|
|
148
|
+
try:
|
|
149
|
+
with DDGS() as ddgs:
|
|
150
|
+
ddg_results = list(ddgs.text(query, max_results=5))
|
|
151
|
+
|
|
152
|
+
logger.debug(f"DuckDuckGo search results: {ddg_results}")
|
|
153
|
+
|
|
154
|
+
async with AsyncWebCrawler() as crawler:
|
|
155
|
+
results = []
|
|
156
|
+
|
|
157
|
+
for result in ddg_results:
|
|
158
|
+
url = result.get('href')
|
|
159
|
+
if url:
|
|
160
|
+
try:
|
|
161
|
+
crawl_result = await crawler.arun(url=url)
|
|
162
|
+
full_content = crawl_result.markdown if crawl_result and hasattr(crawl_result, 'markdown') and crawl_result.markdown else "No content available"
|
|
163
|
+
results.append({
|
|
164
|
+
"content": result.get('body'),
|
|
165
|
+
"url": url,
|
|
166
|
+
"full_content": full_content
|
|
167
|
+
})
|
|
168
|
+
except Exception as e:
|
|
169
|
+
logger.error(f"Error crawling {url}: {str(e)}")
|
|
170
|
+
results.append({
|
|
171
|
+
"content": result.get('body'),
|
|
172
|
+
"url": url,
|
|
173
|
+
"full_content": "Error: Unable to crawl this URL"
|
|
174
|
+
})
|
|
175
|
+
else:
|
|
176
|
+
results.append({
|
|
177
|
+
"content": result.get('body'),
|
|
178
|
+
"url": "N/A",
|
|
179
|
+
"full_content": "No URL provided for crawling"
|
|
180
|
+
})
|
|
181
181
|
|
|
182
182
|
return results
|
|
183
183
|
except Exception as e:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "PraisonAI"
|
|
3
|
-
version = "2.2.
|
|
3
|
+
version = "2.2.24"
|
|
4
4
|
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
license = ""
|
|
@@ -12,7 +12,7 @@ dependencies = [
|
|
|
12
12
|
"rich>=13.7",
|
|
13
13
|
"markdown>=3.5",
|
|
14
14
|
"pyparsing>=3.0.0",
|
|
15
|
-
"praisonaiagents>=0.0.
|
|
15
|
+
"praisonaiagents>=0.0.95",
|
|
16
16
|
"python-dotenv>=0.19.0",
|
|
17
17
|
"instructor>=1.3.3",
|
|
18
18
|
"PyYAML>=6.0",
|
|
@@ -89,12 +89,13 @@ call = [
|
|
|
89
89
|
"rich",
|
|
90
90
|
"openai>=1.54.0",
|
|
91
91
|
]
|
|
92
|
+
train = []
|
|
92
93
|
crewai = ["crewai>=0.32.0", "praisonai-tools>=0.0.15"]
|
|
93
94
|
autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.15", "crewai"]
|
|
94
95
|
|
|
95
96
|
[tool.poetry]
|
|
96
97
|
name = "PraisonAI"
|
|
97
|
-
version = "2.2.
|
|
98
|
+
version = "2.2.24"
|
|
98
99
|
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
|
|
99
100
|
authors = ["Mervin Praison"]
|
|
100
101
|
license = ""
|
|
@@ -112,7 +113,7 @@ python = ">=3.10,<3.13"
|
|
|
112
113
|
rich = ">=13.7"
|
|
113
114
|
markdown = ">=3.5"
|
|
114
115
|
pyparsing = ">=3.0.0"
|
|
115
|
-
praisonaiagents = ">=0.0.
|
|
116
|
+
praisonaiagents = ">=0.0.95"
|
|
116
117
|
python-dotenv = ">=0.19.0"
|
|
117
118
|
instructor = ">=1.3.3"
|
|
118
119
|
PyYAML = ">=6.0"
|
|
@@ -146,6 +147,7 @@ sqlalchemy = {version = ">=2.0.36", optional = true}
|
|
|
146
147
|
playwright = {version = ">=1.47.0", optional = true}
|
|
147
148
|
openai = {version = ">=1.54.0", optional = true}
|
|
148
149
|
pydantic = {version = "<=2.10.1", optional = true}
|
|
150
|
+
# unsloth = {version = ">=2024.11.7", extras = ["colab-new"], optional = true}
|
|
149
151
|
|
|
150
152
|
[tool.poetry.group.docs.dependencies]
|
|
151
153
|
mkdocs = "*"
|
|
@@ -249,7 +251,7 @@ code = [
|
|
|
249
251
|
"playwright",
|
|
250
252
|
"pydantic"
|
|
251
253
|
]
|
|
252
|
-
train = [
|
|
254
|
+
train = []
|
|
253
255
|
realtime = [
|
|
254
256
|
"chainlit",
|
|
255
257
|
"litellm",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{praisonai-2.2.22 → praisonai-2.2.24}/praisonai/public/praison-ai-agents-architecture-dark.png
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|