PraisonAI 0.0.59__cp312-cp312-manylinux_2_35_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- praisonai/__init__.py +6 -0
- praisonai/__main__.py +10 -0
- praisonai/agents_generator.py +381 -0
- praisonai/auto.py +190 -0
- praisonai/chainlit_ui.py +304 -0
- praisonai/cli.py +416 -0
- praisonai/deploy.py +138 -0
- praisonai/inbuilt_tools/__init__.py +2 -0
- praisonai/inbuilt_tools/autogen_tools.py +209 -0
- praisonai/inc/__init__.py +2 -0
- praisonai/inc/config.py +96 -0
- praisonai/inc/models.py +128 -0
- praisonai/public/android-chrome-192x192.png +0 -0
- praisonai/public/android-chrome-512x512.png +0 -0
- praisonai/public/apple-touch-icon.png +0 -0
- praisonai/public/fantasy.svg +3 -0
- praisonai/public/favicon-16x16.png +0 -0
- praisonai/public/favicon-32x32.png +0 -0
- praisonai/public/favicon.ico +0 -0
- praisonai/public/game.svg +3 -0
- praisonai/public/logo_dark.png +0 -0
- praisonai/public/logo_light.png +0 -0
- praisonai/public/movie.svg +3 -0
- praisonai/public/thriller.svg +3 -0
- praisonai/setup/__init__.py +0 -0
- praisonai/setup/build.py +21 -0
- praisonai/setup/config.yaml +60 -0
- praisonai/setup/post_install.py +20 -0
- praisonai/setup/setup_conda_env.py +25 -0
- praisonai/setup/setup_conda_env.sh +72 -0
- praisonai/test.py +105 -0
- praisonai/train.py +276 -0
- praisonai/ui/chat.py +304 -0
- praisonai/ui/code.py +318 -0
- praisonai/ui/context.py +283 -0
- praisonai/ui/public/fantasy.svg +3 -0
- praisonai/ui/public/game.svg +3 -0
- praisonai/ui/public/logo_dark.png +0 -0
- praisonai/ui/public/logo_light.png +0 -0
- praisonai/ui/public/movie.svg +3 -0
- praisonai/ui/public/thriller.svg +3 -0
- praisonai/ui/sql_alchemy.py +638 -0
- praisonai/version.py +1 -0
- praisonai-0.0.59.dist-info/LICENSE +20 -0
- praisonai-0.0.59.dist-info/METADATA +344 -0
- praisonai-0.0.59.dist-info/RECORD +48 -0
- praisonai-0.0.59.dist-info/WHEEL +4 -0
- praisonai-0.0.59.dist-info/entry_points.txt +5 -0
praisonai/__init__.py
ADDED
praisonai/__main__.py
ADDED
|
@@ -0,0 +1,381 @@
|
|
|
1
|
+
# praisonai/agents_generator.py
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
from .version import __version__
|
|
5
|
+
import yaml, os
|
|
6
|
+
from rich import print
|
|
7
|
+
from dotenv import load_dotenv
|
|
8
|
+
from crewai import Agent, Task, Crew
|
|
9
|
+
from crewai.telemetry import Telemetry
|
|
10
|
+
load_dotenv()
|
|
11
|
+
import autogen
|
|
12
|
+
import argparse
|
|
13
|
+
from .auto import AutoGenerator
|
|
14
|
+
from praisonai_tools import (
|
|
15
|
+
CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool, DirectoryReadTool,
|
|
16
|
+
FileReadTool, TXTSearchTool, JSONSearchTool, MDXSearchTool, PDFSearchTool, RagTool,
|
|
17
|
+
ScrapeElementFromWebsiteTool, ScrapeWebsiteTool, WebsiteSearchTool, XMLSearchTool, YoutubeChannelSearchTool,
|
|
18
|
+
YoutubeVideoSearchTool
|
|
19
|
+
)
|
|
20
|
+
from .inbuilt_tools import *
|
|
21
|
+
from .inc import PraisonAIModel
|
|
22
|
+
import inspect
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
import importlib
|
|
25
|
+
import importlib.util
|
|
26
|
+
from praisonai_tools import BaseTool
|
|
27
|
+
import os
|
|
28
|
+
import logging
|
|
29
|
+
|
|
30
|
+
agentops_exists = False
|
|
31
|
+
try:
|
|
32
|
+
import agentops
|
|
33
|
+
agentops_exists = True
|
|
34
|
+
except ImportError:
|
|
35
|
+
agentops_exists = False
|
|
36
|
+
|
|
37
|
+
os.environ["OTEL_SDK_DISABLED"] = "true"
|
|
38
|
+
|
|
39
|
+
def noop(*args, **kwargs):
|
|
40
|
+
pass
|
|
41
|
+
|
|
42
|
+
def disable_crewai_telemetry():
|
|
43
|
+
for attr in dir(Telemetry):
|
|
44
|
+
if callable(getattr(Telemetry, attr)) and not attr.startswith("__"):
|
|
45
|
+
setattr(Telemetry, attr, noop)
|
|
46
|
+
|
|
47
|
+
disable_crewai_telemetry()
|
|
48
|
+
|
|
49
|
+
class AgentsGenerator:
|
|
50
|
+
def __init__(self, agent_file, framework, config_list, log_level=None, agent_callback=None, task_callback=None, agent_yaml=None):
|
|
51
|
+
"""
|
|
52
|
+
Initialize the AgentsGenerator object.
|
|
53
|
+
|
|
54
|
+
Parameters:
|
|
55
|
+
agent_file (str): The path to the agent file.
|
|
56
|
+
framework (str): The framework to be used for the agents.
|
|
57
|
+
config_list (list): A list of configurations for the agents.
|
|
58
|
+
log_level (int, optional): The logging level to use. Defaults to logging.INFO.
|
|
59
|
+
agent_callback (callable, optional): A callback function to be executed after each agent step.
|
|
60
|
+
task_callback (callable, optional): A callback function to be executed after each tool run.
|
|
61
|
+
agent_yaml (str, optional): The content of the YAML file. Defaults to None.
|
|
62
|
+
|
|
63
|
+
Attributes:
|
|
64
|
+
agent_file (str): The path to the agent file.
|
|
65
|
+
framework (str): The framework to be used for the agents.
|
|
66
|
+
config_list (list): A list of configurations for the agents.
|
|
67
|
+
log_level (int): The logging level to use.
|
|
68
|
+
agent_callback (callable, optional): A callback function to be executed after each agent step.
|
|
69
|
+
task_callback (callable, optional): A callback function to be executed after each tool run.
|
|
70
|
+
"""
|
|
71
|
+
self.agent_file = agent_file
|
|
72
|
+
self.framework = framework
|
|
73
|
+
self.config_list = config_list
|
|
74
|
+
self.log_level = log_level
|
|
75
|
+
self.agent_callback = agent_callback
|
|
76
|
+
self.task_callback = task_callback
|
|
77
|
+
self.agent_yaml = agent_yaml
|
|
78
|
+
self.log_level = log_level or logging.getLogger().getEffectiveLevel()
|
|
79
|
+
if self.log_level == logging.NOTSET:
|
|
80
|
+
self.log_level = os.environ.get('LOGLEVEL', 'INFO').upper()
|
|
81
|
+
|
|
82
|
+
logging.basicConfig(level=self.log_level, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
83
|
+
self.logger = logging.getLogger(__name__)
|
|
84
|
+
self.logger.setLevel(self.log_level)
|
|
85
|
+
|
|
86
|
+
def is_function_or_decorated(self, obj):
|
|
87
|
+
"""
|
|
88
|
+
Checks if the given object is a function or has a __call__ method.
|
|
89
|
+
|
|
90
|
+
Parameters:
|
|
91
|
+
obj (object): The object to be checked.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
bool: True if the object is a function or has a __call__ method, False otherwise.
|
|
95
|
+
"""
|
|
96
|
+
return inspect.isfunction(obj) or hasattr(obj, '__call__')
|
|
97
|
+
|
|
98
|
+
def load_tools_from_module(self, module_path):
|
|
99
|
+
"""
|
|
100
|
+
Loads tools from a specified module path.
|
|
101
|
+
|
|
102
|
+
Parameters:
|
|
103
|
+
module_path (str): The path to the module containing the tools.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
dict: A dictionary containing the names of the tools as keys and the corresponding functions or objects as values.
|
|
107
|
+
|
|
108
|
+
Raises:
|
|
109
|
+
FileNotFoundError: If the specified module path does not exist.
|
|
110
|
+
"""
|
|
111
|
+
spec = importlib.util.spec_from_file_location("tools_module", module_path)
|
|
112
|
+
module = importlib.util.module_from_spec(spec)
|
|
113
|
+
spec.loader.exec_module(module)
|
|
114
|
+
return {name: obj for name, obj in inspect.getmembers(module, self.is_function_or_decorated)}
|
|
115
|
+
|
|
116
|
+
def load_tools_from_module_class(self, module_path):
|
|
117
|
+
"""
|
|
118
|
+
Loads tools from a specified module path containing classes that inherit from BaseTool or are part of langchain_community.tools package.
|
|
119
|
+
|
|
120
|
+
Parameters:
|
|
121
|
+
module_path (str): The path to the module containing the tools.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
dict: A dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.
|
|
125
|
+
|
|
126
|
+
Raises:
|
|
127
|
+
FileNotFoundError: If the specified module path does not exist.
|
|
128
|
+
"""
|
|
129
|
+
spec = importlib.util.spec_from_file_location("tools_module", module_path)
|
|
130
|
+
module = importlib.util.module_from_spec(spec)
|
|
131
|
+
spec.loader.exec_module(module)
|
|
132
|
+
return {name: obj() for name, obj in inspect.getmembers(module, lambda x: inspect.isclass(x) and (x.__module__.startswith('langchain_community.tools') or issubclass(x, BaseTool)) and x is not BaseTool)}
|
|
133
|
+
|
|
134
|
+
def load_tools_from_package(self, package_path):
|
|
135
|
+
"""
|
|
136
|
+
Loads tools from a specified package path containing modules with functions or classes.
|
|
137
|
+
|
|
138
|
+
Parameters:
|
|
139
|
+
package_path (str): The path to the package containing the tools.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
dict: A dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.
|
|
143
|
+
|
|
144
|
+
Raises:
|
|
145
|
+
FileNotFoundError: If the specified package path does not exist.
|
|
146
|
+
|
|
147
|
+
This function iterates through all the .py files in the specified package path, excluding those that start with "__". For each file, it imports the corresponding module and checks if it contains any functions or classes that can be loaded as tools. The function then returns a dictionary containing the names of the tools as keys and the corresponding initialized instances of the classes as values.
|
|
148
|
+
"""
|
|
149
|
+
tools_dict = {}
|
|
150
|
+
for module_file in os.listdir(package_path):
|
|
151
|
+
if module_file.endswith('.py') and not module_file.startswith('__'):
|
|
152
|
+
module_name = f"{package_path.name}.{module_file[:-3]}" # Remove .py for import
|
|
153
|
+
module = importlib.import_module(module_name)
|
|
154
|
+
for name, obj in inspect.getmembers(module, self.is_function_or_decorated):
|
|
155
|
+
tools_dict[name] = obj
|
|
156
|
+
return tools_dict
|
|
157
|
+
|
|
158
|
+
def generate_crew_and_kickoff(self):
|
|
159
|
+
"""
|
|
160
|
+
Generates a crew of agents and initiates tasks based on the provided configuration.
|
|
161
|
+
|
|
162
|
+
Parameters:
|
|
163
|
+
agent_file (str): The path to the agent file.
|
|
164
|
+
framework (str): The framework to be used for the agents.
|
|
165
|
+
config_list (list): A list of configurations for the agents.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
str: The output of the tasks performed by the crew of agents.
|
|
169
|
+
|
|
170
|
+
Raises:
|
|
171
|
+
FileNotFoundError: If the specified agent file does not exist.
|
|
172
|
+
|
|
173
|
+
This function first loads the agent configuration from the specified file. It then initializes the tools required for the agents based on the specified framework. If the specified framework is "autogen", it loads the LLM configuration dynamically and creates an AssistantAgent for each role in the configuration. It then adds tools to the agents if specified in the configuration. Finally, it prepares tasks for the agents based on the configuration and initiates the tasks using the crew of agents. If the specified framework is not "autogen", it creates a crew of agents and initiates tasks based on the configuration.
|
|
174
|
+
"""
|
|
175
|
+
if self.agent_yaml:
|
|
176
|
+
config = yaml.safe_load(self.agent_yaml)
|
|
177
|
+
else:
|
|
178
|
+
if self.agent_file == '/app/api:app' or self.agent_file == 'api:app':
|
|
179
|
+
self.agent_file = 'agents.yaml'
|
|
180
|
+
try:
|
|
181
|
+
with open(self.agent_file, 'r') as f:
|
|
182
|
+
config = yaml.safe_load(f)
|
|
183
|
+
except FileNotFoundError:
|
|
184
|
+
print(f"File not found: {self.agent_file}")
|
|
185
|
+
return
|
|
186
|
+
|
|
187
|
+
topic = config['topic']
|
|
188
|
+
tools_dict = {
|
|
189
|
+
'CodeDocsSearchTool': CodeDocsSearchTool(),
|
|
190
|
+
'CSVSearchTool': CSVSearchTool(),
|
|
191
|
+
'DirectorySearchTool': DirectorySearchTool(),
|
|
192
|
+
'DOCXSearchTool': DOCXSearchTool(),
|
|
193
|
+
'DirectoryReadTool': DirectoryReadTool(),
|
|
194
|
+
'FileReadTool': FileReadTool(),
|
|
195
|
+
# 'GithubSearchTool': GithubSearchTool(),
|
|
196
|
+
# 'SeperDevTool': SeperDevTool(),
|
|
197
|
+
'TXTSearchTool': TXTSearchTool(),
|
|
198
|
+
'JSONSearchTool': JSONSearchTool(),
|
|
199
|
+
'MDXSearchTool': MDXSearchTool(),
|
|
200
|
+
'PDFSearchTool': PDFSearchTool(),
|
|
201
|
+
# 'PGSearchTool': PGSearchTool(),
|
|
202
|
+
'RagTool': RagTool(),
|
|
203
|
+
'ScrapeElementFromWebsiteTool': ScrapeElementFromWebsiteTool(),
|
|
204
|
+
'ScrapeWebsiteTool': ScrapeWebsiteTool(),
|
|
205
|
+
'WebsiteSearchTool': WebsiteSearchTool(),
|
|
206
|
+
'XMLSearchTool': XMLSearchTool(),
|
|
207
|
+
'YoutubeChannelSearchTool': YoutubeChannelSearchTool(),
|
|
208
|
+
'YoutubeVideoSearchTool': YoutubeVideoSearchTool(),
|
|
209
|
+
}
|
|
210
|
+
root_directory = os.getcwd()
|
|
211
|
+
tools_py_path = os.path.join(root_directory, 'tools.py')
|
|
212
|
+
tools_dir_path = Path(root_directory) / 'tools'
|
|
213
|
+
|
|
214
|
+
if os.path.isfile(tools_py_path):
|
|
215
|
+
tools_dict.update(self.load_tools_from_module_class(tools_py_path))
|
|
216
|
+
self.logger.debug("tools.py exists in the root directory. Loading tools.py and skipping tools folder.")
|
|
217
|
+
elif tools_dir_path.is_dir():
|
|
218
|
+
tools_dict.update(self.load_tools_from_module_class(tools_dir_path))
|
|
219
|
+
self.logger.debug("tools folder exists in the root directory")
|
|
220
|
+
|
|
221
|
+
framework = self.framework or config.get('framework')
|
|
222
|
+
|
|
223
|
+
agents = {}
|
|
224
|
+
tasks = []
|
|
225
|
+
if framework == "autogen":
|
|
226
|
+
# Load the LLM configuration dynamically
|
|
227
|
+
# print(self.config_list)
|
|
228
|
+
llm_config = {"config_list": self.config_list}
|
|
229
|
+
|
|
230
|
+
if agentops_exists:
|
|
231
|
+
agentops.init(os.environ.get("AGENTOPS_API_KEY"), tags=["autogen"])
|
|
232
|
+
# Assuming the user proxy agent is set up as per your requirements
|
|
233
|
+
user_proxy = autogen.UserProxyAgent(
|
|
234
|
+
name="User",
|
|
235
|
+
human_input_mode="NEVER",
|
|
236
|
+
is_termination_msg=lambda x: (x.get("content") or "").rstrip().rstrip(".").lower().endswith("terminate") or "TERMINATE" in (x.get("content") or ""),
|
|
237
|
+
code_execution_config={
|
|
238
|
+
"work_dir": "coding",
|
|
239
|
+
"use_docker": False,
|
|
240
|
+
},
|
|
241
|
+
# additional setup for the user proxy agent
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
for role, details in config['roles'].items():
|
|
245
|
+
agent_name = details['role'].format(topic=topic).replace("{topic}", topic)
|
|
246
|
+
agent_goal = details['goal'].format(topic=topic)
|
|
247
|
+
# Creating an AssistantAgent for each role dynamically
|
|
248
|
+
agents[role] = autogen.AssistantAgent(
|
|
249
|
+
name=agent_name,
|
|
250
|
+
llm_config=llm_config,
|
|
251
|
+
system_message=details['backstory'].format(topic=topic)+". Must Reply \"TERMINATE\" in the end when everything is done.",
|
|
252
|
+
)
|
|
253
|
+
for tool in details.get('tools', []):
|
|
254
|
+
if tool in tools_dict:
|
|
255
|
+
try:
|
|
256
|
+
tool_class = globals()[f'autogen_{type(tools_dict[tool]).__name__}']
|
|
257
|
+
print(f"Found {tool_class.__name__} for {tool}")
|
|
258
|
+
except KeyError:
|
|
259
|
+
print(f"Warning: autogen_{type(tools_dict[tool]).__name__} function not found. Skipping this tool.")
|
|
260
|
+
continue
|
|
261
|
+
tool_class(agents[role], user_proxy)
|
|
262
|
+
|
|
263
|
+
# Preparing tasks for initiate_chats
|
|
264
|
+
for task_name, task_details in details.get('tasks', {}).items():
|
|
265
|
+
description_filled = task_details['description'].format(topic=topic)
|
|
266
|
+
expected_output_filled = task_details['expected_output'].format(topic=topic)
|
|
267
|
+
|
|
268
|
+
chat_task = {
|
|
269
|
+
"recipient": agents[role],
|
|
270
|
+
"message": description_filled,
|
|
271
|
+
"summary_method": "last_msg",
|
|
272
|
+
# Additional fields like carryover can be added based on dependencies
|
|
273
|
+
}
|
|
274
|
+
tasks.append(chat_task)
|
|
275
|
+
response = user_proxy.initiate_chats(tasks)
|
|
276
|
+
result = "### Output ###\n"+response[-1].summary if hasattr(response[-1], 'summary') else ""
|
|
277
|
+
if agentops_exists:
|
|
278
|
+
agentops.end_session("Success")
|
|
279
|
+
else: # framework=crewai
|
|
280
|
+
if agentops_exists:
|
|
281
|
+
agentops.init(os.environ.get("AGENTOPS_API_KEY"), tags=["crewai"])
|
|
282
|
+
|
|
283
|
+
tasks_dict = {}
|
|
284
|
+
|
|
285
|
+
for role, details in config['roles'].items():
|
|
286
|
+
role_filled = details['role'].format(topic=topic)
|
|
287
|
+
goal_filled = details['goal'].format(topic=topic)
|
|
288
|
+
backstory_filled = details['backstory'].format(topic=topic)
|
|
289
|
+
|
|
290
|
+
# Adding tools to the agent if exists
|
|
291
|
+
agent_tools = [tools_dict[tool] for tool in details.get('tools', []) if tool in tools_dict]
|
|
292
|
+
|
|
293
|
+
llm_model = details.get('llm') # Get the llm configuration
|
|
294
|
+
if llm_model:
|
|
295
|
+
llm = PraisonAIModel(
|
|
296
|
+
model=llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o")),
|
|
297
|
+
).get_model()
|
|
298
|
+
else:
|
|
299
|
+
llm = PraisonAIModel().get_model()
|
|
300
|
+
|
|
301
|
+
function_calling_llm_model = details.get('function_calling_llm')
|
|
302
|
+
if function_calling_llm_model:
|
|
303
|
+
function_calling_llm = PraisonAIModel(
|
|
304
|
+
model=function_calling_llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o")),
|
|
305
|
+
).get_model()
|
|
306
|
+
else:
|
|
307
|
+
function_calling_llm = PraisonAIModel().get_model()
|
|
308
|
+
|
|
309
|
+
agent = Agent(
|
|
310
|
+
role=role_filled,
|
|
311
|
+
goal=goal_filled,
|
|
312
|
+
backstory=backstory_filled,
|
|
313
|
+
tools=agent_tools,
|
|
314
|
+
allow_delegation=details.get('allow_delegation', False),
|
|
315
|
+
llm=llm,
|
|
316
|
+
function_calling_llm=function_calling_llm,
|
|
317
|
+
max_iter=details.get('max_iter', 15),
|
|
318
|
+
max_rpm=details.get('max_rpm'),
|
|
319
|
+
max_execution_time=details.get('max_execution_time'),
|
|
320
|
+
verbose=details.get('verbose', True),
|
|
321
|
+
cache=details.get('cache', True),
|
|
322
|
+
system_template=details.get('system_template'),
|
|
323
|
+
prompt_template=details.get('prompt_template'),
|
|
324
|
+
response_template=details.get('response_template'),
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
# Set agent callback if provided
|
|
328
|
+
if self.agent_callback:
|
|
329
|
+
agent.step_callback = self.agent_callback
|
|
330
|
+
|
|
331
|
+
agents[role] = agent
|
|
332
|
+
|
|
333
|
+
for task_name, task_details in details.get('tasks', {}).items():
|
|
334
|
+
description_filled = task_details['description'].format(topic=topic)
|
|
335
|
+
expected_output_filled = task_details['expected_output'].format(topic=topic)
|
|
336
|
+
|
|
337
|
+
task = Task(
|
|
338
|
+
description=description_filled, # Clear, concise statement of what the task entails
|
|
339
|
+
expected_output=expected_output_filled, # Detailed description of what task's completion looks like
|
|
340
|
+
agent=agent, # The agent responsible for the task
|
|
341
|
+
tools=task_details.get('tools', []), # Functions or capabilities the agent can utilize
|
|
342
|
+
async_execution=task_details.get('async_execution') if task_details.get('async_execution') is not None else False, # Execute asynchronously if set
|
|
343
|
+
context=[], ## TODO:
|
|
344
|
+
config=task_details.get('config') if task_details.get('config') is not None else {}, # Additional configuration details
|
|
345
|
+
output_json=task_details.get('output_json') if task_details.get('output_json') is not None else None, # Outputs a JSON object
|
|
346
|
+
output_pydantic=task_details.get('output_pydantic') if task_details.get('output_pydantic') is not None else None, # Outputs a Pydantic model object
|
|
347
|
+
output_file=task_details.get('output_file') if task_details.get('output_file') is not None else "", # Saves the task output to a file
|
|
348
|
+
callback=task_details.get('callback') if task_details.get('callback') is not None else None, # Python callable executed with the task's output
|
|
349
|
+
human_input=task_details.get('human_input') if task_details.get('human_input') is not None else False, # Indicates if the task requires human feedback
|
|
350
|
+
create_directory=task_details.get('create_directory') if task_details.get('create_directory') is not None else False # Indicates if a directory needs to be created
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
# Set tool callback if provided
|
|
354
|
+
if self.task_callback:
|
|
355
|
+
task.callback = self.task_callback
|
|
356
|
+
|
|
357
|
+
tasks.append(task)
|
|
358
|
+
tasks_dict[task_name] = task
|
|
359
|
+
|
|
360
|
+
for role, details in config['roles'].items():
|
|
361
|
+
for task_name, task_details in details.get('tasks', {}).items():
|
|
362
|
+
task = tasks_dict[task_name]
|
|
363
|
+
context_tasks = [tasks_dict[ctx] for ctx in task_details.get('context', []) if ctx in tasks_dict]
|
|
364
|
+
task.context = context_tasks
|
|
365
|
+
|
|
366
|
+
crew = Crew(
|
|
367
|
+
agents=list(agents.values()),
|
|
368
|
+
tasks=tasks,
|
|
369
|
+
verbose=2
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
self.logger.debug("Final Crew Configuration:")
|
|
373
|
+
self.logger.debug(f"Agents: {crew.agents}")
|
|
374
|
+
self.logger.debug(f"Tasks: {crew.tasks}")
|
|
375
|
+
|
|
376
|
+
response = crew.kickoff()
|
|
377
|
+
result = f"### Task Output ###\n{response}"
|
|
378
|
+
if agentops_exists:
|
|
379
|
+
agentops.end_session("Success")
|
|
380
|
+
return result
|
|
381
|
+
|
praisonai/auto.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
from openai import OpenAI
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
from typing import Dict, List, Optional
|
|
4
|
+
import instructor
|
|
5
|
+
import os
|
|
6
|
+
import json
|
|
7
|
+
import yaml
|
|
8
|
+
from rich import print
|
|
9
|
+
import logging
|
|
10
|
+
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO').upper(), format='%(asctime)s - %(levelname)s - %(message)s')
|
|
11
|
+
|
|
12
|
+
# Define Pydantic models outside of the generate method
|
|
13
|
+
class TaskDetails(BaseModel):
|
|
14
|
+
description: str
|
|
15
|
+
expected_output: str
|
|
16
|
+
|
|
17
|
+
class RoleDetails(BaseModel):
|
|
18
|
+
role: str
|
|
19
|
+
goal: str
|
|
20
|
+
backstory: str
|
|
21
|
+
tasks: Dict[str, TaskDetails]
|
|
22
|
+
tools: List[str]
|
|
23
|
+
|
|
24
|
+
class TeamStructure(BaseModel):
|
|
25
|
+
roles: Dict[str, RoleDetails]
|
|
26
|
+
|
|
27
|
+
class AutoGenerator:
|
|
28
|
+
def __init__(self, topic="Movie Story writing about AI", agent_file="test.yaml", framework="crewai", config_list: Optional[List[Dict]] = None):
|
|
29
|
+
"""
|
|
30
|
+
Initialize the AutoGenerator class with the specified topic, agent file, and framework.
|
|
31
|
+
Note: autogen framework is different from this AutoGenerator class.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
topic (str, optional): The topic for the generated team structure. Defaults to "Movie Story writing about AI".
|
|
35
|
+
agent_file (str, optional): The name of the YAML file to save the generated team structure. Defaults to "test.yaml".
|
|
36
|
+
framework (str, optional): The framework for the generated team structure. Defaults to "crewai".
|
|
37
|
+
config_list (Optional[List[Dict]], optional): A list containing the configuration details for the OpenAI API.
|
|
38
|
+
If None, it defaults to using environment variables or hardcoded values.
|
|
39
|
+
Attributes:
|
|
40
|
+
config_list (list): A list containing the configuration details for the OpenAI API.
|
|
41
|
+
topic (str): The specified topic for the generated team structure.
|
|
42
|
+
agent_file (str): The specified name of the YAML file to save the generated team structure.
|
|
43
|
+
framework (str): The specified framework for the generated team structure.
|
|
44
|
+
client (instructor.Client): An instance of the instructor.Client class initialized with the specified OpenAI API configuration.
|
|
45
|
+
"""
|
|
46
|
+
self.config_list = config_list or [
|
|
47
|
+
{
|
|
48
|
+
'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o"),
|
|
49
|
+
'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
|
50
|
+
'api_key': os.environ.get("OPENAI_API_KEY")
|
|
51
|
+
}
|
|
52
|
+
]
|
|
53
|
+
self.topic = topic
|
|
54
|
+
self.agent_file = agent_file
|
|
55
|
+
self.framework = framework or "crewai"
|
|
56
|
+
self.client = instructor.patch(
|
|
57
|
+
OpenAI(
|
|
58
|
+
base_url=self.config_list[0]['base_url'],
|
|
59
|
+
api_key=os.getenv("OPENAI_API_KEY"),
|
|
60
|
+
),
|
|
61
|
+
mode=instructor.Mode.JSON,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
def generate(self):
|
|
65
|
+
"""
|
|
66
|
+
Generates a team structure for the specified topic.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
None
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
str: The full path of the YAML file containing the generated team structure.
|
|
73
|
+
|
|
74
|
+
Raises:
|
|
75
|
+
Exception: If the generation process fails.
|
|
76
|
+
|
|
77
|
+
Usage:
|
|
78
|
+
generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
|
|
79
|
+
path = generator.generate()
|
|
80
|
+
print(path)
|
|
81
|
+
"""
|
|
82
|
+
response = self.client.chat.completions.create(
|
|
83
|
+
model=self.config_list[0]['model'],
|
|
84
|
+
response_model=TeamStructure,
|
|
85
|
+
max_retries=10,
|
|
86
|
+
messages=[
|
|
87
|
+
{"role": "system", "content": "You are a helpful assistant designed to output complex team structures."},
|
|
88
|
+
{"role": "user", "content": self.get_user_content()}
|
|
89
|
+
]
|
|
90
|
+
)
|
|
91
|
+
json_data = json.loads(response.model_dump_json())
|
|
92
|
+
self.convert_and_save(json_data)
|
|
93
|
+
full_path = os.path.abspath(self.agent_file)
|
|
94
|
+
return full_path
|
|
95
|
+
|
|
96
|
+
def convert_and_save(self, json_data):
|
|
97
|
+
"""Converts the provided JSON data into the desired YAML format and saves it to a file.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
json_data (dict): The JSON data representing the team structure.
|
|
101
|
+
topic (str, optional): The topic to be inserted into the YAML. Defaults to "Artificial Intelligence".
|
|
102
|
+
agent_file (str, optional): The name of the YAML file to save. Defaults to "test.yaml".
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
yaml_data = {
|
|
106
|
+
"framework": self.framework,
|
|
107
|
+
"topic": self.topic,
|
|
108
|
+
"roles": {},
|
|
109
|
+
"dependencies": []
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
for role_id, role_details in json_data['roles'].items():
|
|
113
|
+
yaml_data['roles'][role_id] = {
|
|
114
|
+
"backstory": "" + role_details['backstory'],
|
|
115
|
+
"goal": role_details['goal'],
|
|
116
|
+
"role": role_details['role'],
|
|
117
|
+
"tasks": {},
|
|
118
|
+
# "tools": role_details.get('tools', []),
|
|
119
|
+
"tools": ['']
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
for task_id, task_details in role_details['tasks'].items():
|
|
123
|
+
yaml_data['roles'][role_id]['tasks'][task_id] = {
|
|
124
|
+
"description": "" + task_details['description'],
|
|
125
|
+
"expected_output": "" + task_details['expected_output']
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
# Save to YAML file, maintaining the order
|
|
129
|
+
with open(self.agent_file, 'w') as f:
|
|
130
|
+
yaml.dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
|
131
|
+
|
|
132
|
+
def get_user_content(self):
|
|
133
|
+
"""
|
|
134
|
+
Generates a prompt for the OpenAI API to generate a team structure.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
None
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
str: The prompt for the OpenAI API.
|
|
141
|
+
|
|
142
|
+
Usage:
|
|
143
|
+
generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
|
|
144
|
+
prompt = generator.get_user_content()
|
|
145
|
+
print(prompt)
|
|
146
|
+
"""
|
|
147
|
+
user_content = """Generate a team structure for \"""" + self.topic + """\" task.
|
|
148
|
+
No Input data will be provided to the team.
|
|
149
|
+
The team will work in sequence. First role will pass the output to the next role, and so on.
|
|
150
|
+
The last role will generate the final output.
|
|
151
|
+
Think step by step.
|
|
152
|
+
With maximum 3 roles, each with 1 task. Include role goals, backstories, task descriptions, and expected outputs.
|
|
153
|
+
List of Available Tools: CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool, DirectoryReadTool, FileReadTool, TXTSearchTool, JSONSearchTool, MDXSearchTool, PDFSearchTool, RagTool, ScrapeElementFromWebsiteTool, ScrapeWebsiteTool, WebsiteSearchTool, XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool.
|
|
154
|
+
Only use Available Tools. Do Not use any other tools.
|
|
155
|
+
Example Below:
|
|
156
|
+
Use below example to understand the structure of the output.
|
|
157
|
+
The final role you create should satisfy the provided task: """ + self.topic + """.
|
|
158
|
+
{
|
|
159
|
+
"roles": {
|
|
160
|
+
"narrative_designer": {
|
|
161
|
+
"role": "Narrative Designer",
|
|
162
|
+
"goal": "Create AI storylines",
|
|
163
|
+
"backstory": "Skilled in narrative development for AI, with a focus on story resonance.",
|
|
164
|
+
"tools": ["ScrapeWebsiteTool"],
|
|
165
|
+
"tasks": {
|
|
166
|
+
"story_concept_development": {
|
|
167
|
+
"description": "Craft a unique AI story concept with depth and engagement using concept from this page the content https://www.asthebirdfliesblog.com/posts/how-to-write-book-story-development .",
|
|
168
|
+
"expected_output": "Document with narrative arcs, character bios, and settings."
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
},
|
|
172
|
+
"scriptwriter": {
|
|
173
|
+
"role": "Scriptwriter",
|
|
174
|
+
"goal": "Write scripts from AI concepts",
|
|
175
|
+
"backstory": "Expert in dialogue and script structure, translating concepts into scripts.",
|
|
176
|
+
"tasks": {
|
|
177
|
+
"scriptwriting_task": {
|
|
178
|
+
"description": "Turn narrative concepts into scripts, including dialogue and scenes.",
|
|
179
|
+
"expected_output": "Production-ready script with dialogue and scene details."
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
"""
|
|
186
|
+
return user_content
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
# generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
|
|
190
|
+
# print(generator.generate())
|