PraisonAI 2.0.61__cp313-cp313-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PraisonAI might be problematic. Click here for more details.
- praisonai/__init__.py +6 -0
- praisonai/__main__.py +10 -0
- praisonai/agents_generator.py +648 -0
- praisonai/api/call.py +292 -0
- praisonai/auto.py +238 -0
- praisonai/chainlit_ui.py +304 -0
- praisonai/cli.py +518 -0
- praisonai/deploy.py +138 -0
- praisonai/inbuilt_tools/__init__.py +24 -0
- praisonai/inbuilt_tools/autogen_tools.py +117 -0
- praisonai/inc/__init__.py +2 -0
- praisonai/inc/config.py +96 -0
- praisonai/inc/models.py +128 -0
- praisonai/public/android-chrome-192x192.png +0 -0
- praisonai/public/android-chrome-512x512.png +0 -0
- praisonai/public/apple-touch-icon.png +0 -0
- praisonai/public/fantasy.svg +3 -0
- praisonai/public/favicon-16x16.png +0 -0
- praisonai/public/favicon-32x32.png +0 -0
- praisonai/public/favicon.ico +0 -0
- praisonai/public/game.svg +3 -0
- praisonai/public/logo_dark.png +0 -0
- praisonai/public/logo_light.png +0 -0
- praisonai/public/movie.svg +3 -0
- praisonai/public/praison-ai-agents-architecture-dark.png +0 -0
- praisonai/public/praison-ai-agents-architecture.png +0 -0
- praisonai/public/thriller.svg +3 -0
- praisonai/setup/__init__.py +1 -0
- praisonai/setup/build.py +21 -0
- praisonai/setup/config.yaml +60 -0
- praisonai/setup/post_install.py +23 -0
- praisonai/setup/setup_conda_env.py +25 -0
- praisonai/setup/setup_conda_env.sh +72 -0
- praisonai/setup.py +16 -0
- praisonai/test.py +105 -0
- praisonai/train.py +276 -0
- praisonai/ui/README.md +21 -0
- praisonai/ui/agents.py +822 -0
- praisonai/ui/callbacks.py +57 -0
- praisonai/ui/chat.py +387 -0
- praisonai/ui/code.py +440 -0
- praisonai/ui/colab.py +474 -0
- praisonai/ui/colab_chainlit.py +81 -0
- praisonai/ui/components/aicoder.py +269 -0
- praisonai/ui/config/.chainlit/config.toml +120 -0
- praisonai/ui/config/.chainlit/translations/bn.json +231 -0
- praisonai/ui/config/.chainlit/translations/en-US.json +229 -0
- praisonai/ui/config/.chainlit/translations/gu.json +231 -0
- praisonai/ui/config/.chainlit/translations/he-IL.json +231 -0
- praisonai/ui/config/.chainlit/translations/hi.json +231 -0
- praisonai/ui/config/.chainlit/translations/kn.json +231 -0
- praisonai/ui/config/.chainlit/translations/ml.json +231 -0
- praisonai/ui/config/.chainlit/translations/mr.json +231 -0
- praisonai/ui/config/.chainlit/translations/ta.json +231 -0
- praisonai/ui/config/.chainlit/translations/te.json +231 -0
- praisonai/ui/config/.chainlit/translations/zh-CN.json +229 -0
- praisonai/ui/config/chainlit.md +1 -0
- praisonai/ui/config/translations/bn.json +231 -0
- praisonai/ui/config/translations/en-US.json +229 -0
- praisonai/ui/config/translations/gu.json +231 -0
- praisonai/ui/config/translations/he-IL.json +231 -0
- praisonai/ui/config/translations/hi.json +231 -0
- praisonai/ui/config/translations/kn.json +231 -0
- praisonai/ui/config/translations/ml.json +231 -0
- praisonai/ui/config/translations/mr.json +231 -0
- praisonai/ui/config/translations/ta.json +231 -0
- praisonai/ui/config/translations/te.json +231 -0
- praisonai/ui/config/translations/zh-CN.json +229 -0
- praisonai/ui/context.py +283 -0
- praisonai/ui/db.py +291 -0
- praisonai/ui/public/fantasy.svg +3 -0
- praisonai/ui/public/game.svg +3 -0
- praisonai/ui/public/logo_dark.png +0 -0
- praisonai/ui/public/logo_light.png +0 -0
- praisonai/ui/public/movie.svg +3 -0
- praisonai/ui/public/praison.css +3 -0
- praisonai/ui/public/thriller.svg +3 -0
- praisonai/ui/realtime.py +476 -0
- praisonai/ui/realtimeclient/__init__.py +653 -0
- praisonai/ui/realtimeclient/realtimedocs.txt +1484 -0
- praisonai/ui/realtimeclient/tools.py +236 -0
- praisonai/ui/sql_alchemy.py +707 -0
- praisonai/ui/tools.md +133 -0
- praisonai/version.py +1 -0
- praisonai-2.0.61.dist-info/LICENSE +20 -0
- praisonai-2.0.61.dist-info/METADATA +679 -0
- praisonai-2.0.61.dist-info/RECORD +89 -0
- praisonai-2.0.61.dist-info/WHEEL +4 -0
- praisonai-2.0.61.dist-info/entry_points.txt +5 -0
praisonai/api/call.py
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
import base64
|
|
4
|
+
import asyncio
|
|
5
|
+
import websockets
|
|
6
|
+
from fastapi import FastAPI, WebSocket, Request
|
|
7
|
+
from fastapi.responses import HTMLResponse
|
|
8
|
+
from fastapi.websockets import WebSocketDisconnect
|
|
9
|
+
from twilio.twiml.voice_response import VoiceResponse, Connect
|
|
10
|
+
from dotenv import load_dotenv
|
|
11
|
+
import uvicorn
|
|
12
|
+
from pyngrok import ngrok, conf
|
|
13
|
+
from rich import print
|
|
14
|
+
import argparse
|
|
15
|
+
import logging
|
|
16
|
+
import importlib.util
|
|
17
|
+
|
|
18
|
+
load_dotenv()
|
|
19
|
+
|
|
20
|
+
# Configuration
|
|
21
|
+
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # requires OpenAI Realtime API Access
|
|
22
|
+
PORT = int(os.getenv('PORT', 8090))
|
|
23
|
+
NGROK_AUTH_TOKEN = os.getenv('NGROK_AUTH_TOKEN')
|
|
24
|
+
PUBLIC = os.getenv('PUBLIC', 'false').lower() == 'true'
|
|
25
|
+
SYSTEM_MESSAGE = (
|
|
26
|
+
"You are a helpful and bubbly AI assistant who loves to chat about "
|
|
27
|
+
"anything the user is interested in and is prepared to offer them facts. "
|
|
28
|
+
"Keep your responses short and to the point. "
|
|
29
|
+
"You have a penchant for dad jokes, owl jokes, and rickrolling – subtly. "
|
|
30
|
+
"Always stay positive, but work in a joke when appropriate."
|
|
31
|
+
"Start your conversation by saying 'Hi! I'm Praison AI. How can I help you today?'"
|
|
32
|
+
)
|
|
33
|
+
VOICE = 'alloy'
|
|
34
|
+
LOG_EVENT_TYPES = [
|
|
35
|
+
'response.content.done', 'rate_limits.updated', 'response.done',
|
|
36
|
+
'input_audio_buffer.committed', 'input_audio_buffer.speech_stopped',
|
|
37
|
+
'input_audio_buffer.speech_started', 'session.created'
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
app = FastAPI()
|
|
41
|
+
|
|
42
|
+
if not OPENAI_API_KEY:
|
|
43
|
+
raise ValueError('Missing the OpenAI API key. Please set it in the .env file.')
|
|
44
|
+
|
|
45
|
+
# Set up logging
|
|
46
|
+
logger = logging.getLogger(__name__)
|
|
47
|
+
log_level = os.getenv("LOGLEVEL", "INFO").upper()
|
|
48
|
+
logger.handlers = []
|
|
49
|
+
|
|
50
|
+
# Try to import tools from the root directory
|
|
51
|
+
tools = []
|
|
52
|
+
tools_path = os.path.join(os.getcwd(), 'tools.py')
|
|
53
|
+
logger.debug(f"Tools path: {tools_path}")
|
|
54
|
+
|
|
55
|
+
def import_tools_from_file(file_path):
|
|
56
|
+
spec = importlib.util.spec_from_file_location("custom_tools", file_path)
|
|
57
|
+
custom_tools_module = importlib.util.module_from_spec(spec)
|
|
58
|
+
spec.loader.exec_module(custom_tools_module)
|
|
59
|
+
logger.debug(f"Imported tools from {file_path}")
|
|
60
|
+
return custom_tools_module
|
|
61
|
+
|
|
62
|
+
try:
|
|
63
|
+
if os.path.exists(tools_path):
|
|
64
|
+
# tools.py exists in the root directory, import from file
|
|
65
|
+
custom_tools_module = import_tools_from_file(tools_path)
|
|
66
|
+
logger.debug("Successfully imported custom tools from root tools.py")
|
|
67
|
+
else:
|
|
68
|
+
logger.debug("No custom tools.py file found in the root directory")
|
|
69
|
+
custom_tools_module = None
|
|
70
|
+
|
|
71
|
+
if custom_tools_module:
|
|
72
|
+
# Update the tools list with custom tools
|
|
73
|
+
if hasattr(custom_tools_module, 'tools') and isinstance(custom_tools_module.tools, list):
|
|
74
|
+
tools.extend(custom_tools_module.tools)
|
|
75
|
+
else:
|
|
76
|
+
for name, obj in custom_tools_module.__dict__.items():
|
|
77
|
+
if callable(obj) and not name.startswith("__"):
|
|
78
|
+
tool_definition = getattr(obj, 'definition', None)
|
|
79
|
+
if tool_definition:
|
|
80
|
+
tools.append(tool_definition)
|
|
81
|
+
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.warning(f"Error importing custom tools: {str(e)}. Continuing without custom tools.")
|
|
84
|
+
|
|
85
|
+
@app.get("/status", response_class=HTMLResponse)
|
|
86
|
+
async def index_page():
|
|
87
|
+
return """
|
|
88
|
+
<html>
|
|
89
|
+
<head>
|
|
90
|
+
<title>Praison AI Call Server</title>
|
|
91
|
+
</head>
|
|
92
|
+
<body>
|
|
93
|
+
<h1>Praison AI Call Server is running!</h1>
|
|
94
|
+
</body>
|
|
95
|
+
</html>
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
@app.api_route("/", methods=["GET", "POST"])
|
|
99
|
+
async def handle_incoming_call(request: Request):
|
|
100
|
+
"""Handle incoming call and return TwiML response to connect to Media Stream."""
|
|
101
|
+
response = VoiceResponse()
|
|
102
|
+
response.say("")
|
|
103
|
+
response.pause(length=1)
|
|
104
|
+
# response.say("")
|
|
105
|
+
host = request.url.hostname
|
|
106
|
+
connect = Connect()
|
|
107
|
+
connect.stream(url=f'wss://{host}/media-stream')
|
|
108
|
+
response.append(connect)
|
|
109
|
+
return HTMLResponse(content=str(response), media_type="application/xml")
|
|
110
|
+
|
|
111
|
+
@app.websocket("/media-stream")
|
|
112
|
+
async def handle_media_stream(websocket: WebSocket):
|
|
113
|
+
"""Handle WebSocket connections between Twilio and OpenAI."""
|
|
114
|
+
print("Client connected")
|
|
115
|
+
await websocket.accept()
|
|
116
|
+
|
|
117
|
+
async with websockets.connect(
|
|
118
|
+
'wss://api.openai.com/v1/realtime?model=gpt-4o-realtime-preview-2024-10-01',
|
|
119
|
+
extra_headers={
|
|
120
|
+
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
|
121
|
+
"OpenAI-Beta": "realtime=v1"
|
|
122
|
+
}
|
|
123
|
+
) as openai_ws:
|
|
124
|
+
await send_session_update(openai_ws)
|
|
125
|
+
stream_sid = None
|
|
126
|
+
|
|
127
|
+
async def receive_from_twilio():
|
|
128
|
+
"""Receive audio data from Twilio and send it to the OpenAI Realtime API."""
|
|
129
|
+
nonlocal stream_sid
|
|
130
|
+
try:
|
|
131
|
+
async for message in websocket.iter_text():
|
|
132
|
+
data = json.loads(message)
|
|
133
|
+
if data['event'] == 'media' and openai_ws.open:
|
|
134
|
+
audio_append = {
|
|
135
|
+
"type": "input_audio_buffer.append",
|
|
136
|
+
"audio": data['media']['payload']
|
|
137
|
+
}
|
|
138
|
+
await openai_ws.send(json.dumps(audio_append))
|
|
139
|
+
elif data['event'] == 'start':
|
|
140
|
+
stream_sid = data['start']['streamSid']
|
|
141
|
+
print(f"Incoming stream has started {stream_sid}")
|
|
142
|
+
except WebSocketDisconnect:
|
|
143
|
+
print("Client disconnected.")
|
|
144
|
+
if openai_ws.open:
|
|
145
|
+
await openai_ws.close()
|
|
146
|
+
|
|
147
|
+
async def send_to_twilio():
|
|
148
|
+
"""Receive events from the OpenAI Realtime API, send audio back to Twilio."""
|
|
149
|
+
nonlocal stream_sid
|
|
150
|
+
try:
|
|
151
|
+
async for openai_message in openai_ws:
|
|
152
|
+
response = json.loads(openai_message)
|
|
153
|
+
if response['type'] in LOG_EVENT_TYPES:
|
|
154
|
+
print(f"Received event: {response['type']}", response)
|
|
155
|
+
if response['type'] == 'session.updated':
|
|
156
|
+
print("Session updated successfully:", response)
|
|
157
|
+
|
|
158
|
+
if response['type'] == 'response.done':
|
|
159
|
+
await handle_response_done(response, openai_ws)
|
|
160
|
+
|
|
161
|
+
if response['type'] == 'response.audio.delta' and response.get('delta'):
|
|
162
|
+
# Audio from OpenAI
|
|
163
|
+
try:
|
|
164
|
+
audio_payload = base64.b64encode(base64.b64decode(response['delta'])).decode('utf-8')
|
|
165
|
+
audio_delta = {
|
|
166
|
+
"event": "media",
|
|
167
|
+
"streamSid": stream_sid,
|
|
168
|
+
"media": {
|
|
169
|
+
"payload": audio_payload
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
await websocket.send_json(audio_delta)
|
|
173
|
+
except Exception as e:
|
|
174
|
+
print(f"Error processing audio data: {e}")
|
|
175
|
+
except Exception as e:
|
|
176
|
+
print(f"Error in Sending to Phone: {e}")
|
|
177
|
+
|
|
178
|
+
await asyncio.gather(receive_from_twilio(), send_to_twilio())
|
|
179
|
+
|
|
180
|
+
async def handle_response_done(response, openai_ws):
|
|
181
|
+
"""Handle the response.done event and process any function calls."""
|
|
182
|
+
print("Handling response.done:", response)
|
|
183
|
+
output_items = response.get('response', {}).get('output', [])
|
|
184
|
+
for item in output_items:
|
|
185
|
+
if item.get('type') == 'function_call':
|
|
186
|
+
await process_function_call(item, openai_ws)
|
|
187
|
+
|
|
188
|
+
async def process_function_call(item, openai_ws):
|
|
189
|
+
"""Process a function call item and send the result back to OpenAI."""
|
|
190
|
+
function_name = item.get('name')
|
|
191
|
+
arguments = json.loads(item.get('arguments', '{}'))
|
|
192
|
+
call_id = item.get('call_id')
|
|
193
|
+
|
|
194
|
+
print(f"Processing function call: {function_name}")
|
|
195
|
+
print(f"Arguments: {arguments}")
|
|
196
|
+
|
|
197
|
+
result = await call_tool(function_name, arguments)
|
|
198
|
+
|
|
199
|
+
# Send the function call result back to OpenAI
|
|
200
|
+
await openai_ws.send(json.dumps({
|
|
201
|
+
"type": "conversation.item.create",
|
|
202
|
+
"item": {
|
|
203
|
+
"type": "function_call_output",
|
|
204
|
+
"call_id": call_id,
|
|
205
|
+
"output": json.dumps(result)
|
|
206
|
+
}
|
|
207
|
+
}))
|
|
208
|
+
|
|
209
|
+
# Create a new response after sending the function call result
|
|
210
|
+
await openai_ws.send(json.dumps({
|
|
211
|
+
"type": "response.create"
|
|
212
|
+
}))
|
|
213
|
+
|
|
214
|
+
async def call_tool(function_name, arguments):
|
|
215
|
+
"""Call the appropriate tool function and return the result."""
|
|
216
|
+
tool = next((t for t in tools if t[0]['name'] == function_name), None)
|
|
217
|
+
if not tool:
|
|
218
|
+
return {"error": f"Function {function_name} not found"}
|
|
219
|
+
|
|
220
|
+
try:
|
|
221
|
+
# Assuming the tool function is the second element in the tuple
|
|
222
|
+
result = await tool[1](**arguments)
|
|
223
|
+
return result
|
|
224
|
+
except Exception as e:
|
|
225
|
+
return {"error": str(e)}
|
|
226
|
+
|
|
227
|
+
async def send_session_update(openai_ws):
|
|
228
|
+
"""Send session update to OpenAI WebSocket."""
|
|
229
|
+
global tools
|
|
230
|
+
print(f"Formatted tools: {tools}")
|
|
231
|
+
|
|
232
|
+
use_tools = [
|
|
233
|
+
{**tool[0], "type": "function"}
|
|
234
|
+
for tool in tools
|
|
235
|
+
if isinstance(tool, tuple) and len(tool) > 0 and isinstance(tool[0], dict)
|
|
236
|
+
]
|
|
237
|
+
|
|
238
|
+
session_update = {
|
|
239
|
+
"type": "session.update",
|
|
240
|
+
"session": {
|
|
241
|
+
"turn_detection": {
|
|
242
|
+
"type": "server_vad",
|
|
243
|
+
"threshold": 0.5,
|
|
244
|
+
"prefix_padding_ms": 300,
|
|
245
|
+
"silence_duration_ms": 200
|
|
246
|
+
},
|
|
247
|
+
"input_audio_format": "g711_ulaw",
|
|
248
|
+
"output_audio_format": "g711_ulaw",
|
|
249
|
+
"voice": VOICE,
|
|
250
|
+
"tools": use_tools,
|
|
251
|
+
"tool_choice": "auto",
|
|
252
|
+
"instructions": SYSTEM_MESSAGE,
|
|
253
|
+
"modalities": ["text", "audio"],
|
|
254
|
+
"temperature": 0.8
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
print('Sending session update:', json.dumps(session_update))
|
|
258
|
+
await openai_ws.send(json.dumps(session_update))
|
|
259
|
+
|
|
260
|
+
def setup_public_url(port):
|
|
261
|
+
if NGROK_AUTH_TOKEN:
|
|
262
|
+
conf.get_default().auth_token = NGROK_AUTH_TOKEN
|
|
263
|
+
public_url = ngrok.connect(addr=str(port)).public_url
|
|
264
|
+
print(f"Praison AI Voice URL: {public_url}")
|
|
265
|
+
return public_url
|
|
266
|
+
|
|
267
|
+
def run_server(port: int, use_public: bool = False):
|
|
268
|
+
"""Run the FastAPI server using uvicorn."""
|
|
269
|
+
if use_public:
|
|
270
|
+
setup_public_url(port)
|
|
271
|
+
else:
|
|
272
|
+
print(f"Starting Praison AI Call Server on http://localhost:{port}")
|
|
273
|
+
uvicorn.run(app, host="0.0.0.0", port=port, log_level="warning")
|
|
274
|
+
|
|
275
|
+
def main(args=None):
|
|
276
|
+
"""Run the Praison AI Call Server."""
|
|
277
|
+
parser = argparse.ArgumentParser(description="Run the Praison AI Call Server.")
|
|
278
|
+
parser.add_argument('--public', action='store_true', help="Use ngrok to expose the server publicly")
|
|
279
|
+
parser.add_argument('--port', type=int, default=PORT, help="Port to run the server on")
|
|
280
|
+
|
|
281
|
+
if args is None:
|
|
282
|
+
args = parser.parse_args()
|
|
283
|
+
else:
|
|
284
|
+
args = parser.parse_args(args)
|
|
285
|
+
|
|
286
|
+
port = args.port
|
|
287
|
+
use_public = args.public or PUBLIC
|
|
288
|
+
|
|
289
|
+
run_server(port=port, use_public=use_public)
|
|
290
|
+
|
|
291
|
+
if __name__ == "__main__":
|
|
292
|
+
main()
|
praisonai/auto.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
from openai import OpenAI
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
from typing import Dict, List, Optional
|
|
4
|
+
import instructor
|
|
5
|
+
import os
|
|
6
|
+
import json
|
|
7
|
+
import yaml
|
|
8
|
+
from rich import print
|
|
9
|
+
import logging
|
|
10
|
+
|
|
11
|
+
# Framework-specific imports with availability checks
|
|
12
|
+
CREWAI_AVAILABLE = False
|
|
13
|
+
AUTOGEN_AVAILABLE = False
|
|
14
|
+
PRAISONAI_TOOLS_AVAILABLE = False
|
|
15
|
+
PRAISONAI_AVAILABLE = False
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
from praisonaiagents import Agent as PraisonAgent, Task as PraisonTask, PraisonAIAgents
|
|
19
|
+
PRAISONAI_AVAILABLE = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
from crewai import Agent, Task, Crew
|
|
25
|
+
CREWAI_AVAILABLE = True
|
|
26
|
+
except ImportError:
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
import autogen
|
|
31
|
+
AUTOGEN_AVAILABLE = True
|
|
32
|
+
except ImportError:
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
from praisonai_tools import (
|
|
37
|
+
CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool,
|
|
38
|
+
DirectoryReadTool, FileReadTool, TXTSearchTool, JSONSearchTool,
|
|
39
|
+
MDXSearchTool, PDFSearchTool, RagTool, ScrapeElementFromWebsiteTool,
|
|
40
|
+
ScrapeWebsiteTool, WebsiteSearchTool, XMLSearchTool,
|
|
41
|
+
YoutubeChannelSearchTool, YoutubeVideoSearchTool
|
|
42
|
+
)
|
|
43
|
+
PRAISONAI_TOOLS_AVAILABLE = True
|
|
44
|
+
except ImportError:
|
|
45
|
+
PRAISONAI_TOOLS_AVAILABLE = False
|
|
46
|
+
|
|
47
|
+
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO').upper(), format='%(asctime)s - %(levelname)s - %(message)s')
|
|
48
|
+
|
|
49
|
+
# Define Pydantic models outside of the generate method
|
|
50
|
+
class TaskDetails(BaseModel):
|
|
51
|
+
description: str
|
|
52
|
+
expected_output: str
|
|
53
|
+
|
|
54
|
+
class RoleDetails(BaseModel):
|
|
55
|
+
role: str
|
|
56
|
+
goal: str
|
|
57
|
+
backstory: str
|
|
58
|
+
tasks: Dict[str, TaskDetails]
|
|
59
|
+
tools: List[str]
|
|
60
|
+
|
|
61
|
+
class TeamStructure(BaseModel):
|
|
62
|
+
roles: Dict[str, RoleDetails]
|
|
63
|
+
|
|
64
|
+
class AutoGenerator:
|
|
65
|
+
def __init__(self, topic="Movie Story writing about AI", agent_file="test.yaml", framework="crewai", config_list: Optional[List[Dict]] = None):
|
|
66
|
+
"""
|
|
67
|
+
Initialize the AutoGenerator class with the specified topic, agent file, and framework.
|
|
68
|
+
Note: autogen framework is different from this AutoGenerator class.
|
|
69
|
+
"""
|
|
70
|
+
# Validate framework availability and show framework-specific messages
|
|
71
|
+
if framework == "crewai" and not CREWAI_AVAILABLE:
|
|
72
|
+
raise ImportError("""
|
|
73
|
+
CrewAI is not installed. Please install with:
|
|
74
|
+
pip install "praisonai[crewai]"
|
|
75
|
+
""")
|
|
76
|
+
elif framework == "autogen" and not AUTOGEN_AVAILABLE:
|
|
77
|
+
raise ImportError("""
|
|
78
|
+
AutoGen is not installed. Please install with:
|
|
79
|
+
pip install "praisonai[autogen]"
|
|
80
|
+
""")
|
|
81
|
+
elif framework == "praisonai" and not PRAISONAI_AVAILABLE:
|
|
82
|
+
raise ImportError("""
|
|
83
|
+
Praisonai is not installed. Please install with:
|
|
84
|
+
pip install praisonaiagents
|
|
85
|
+
""")
|
|
86
|
+
|
|
87
|
+
# Only show tools message if using a framework and tools are needed
|
|
88
|
+
if (framework in ["crewai", "autogen"]) and not PRAISONAI_TOOLS_AVAILABLE:
|
|
89
|
+
logging.warning(f"""
|
|
90
|
+
Tools are not available for {framework}. To use tools, install:
|
|
91
|
+
pip install "praisonai[{framework}]"
|
|
92
|
+
""")
|
|
93
|
+
|
|
94
|
+
self.config_list = config_list or [
|
|
95
|
+
{
|
|
96
|
+
'model': os.environ.get("OPENAI_MODEL_NAME", "gpt-4o"),
|
|
97
|
+
'base_url': os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
|
98
|
+
'api_key': os.environ.get("OPENAI_API_KEY")
|
|
99
|
+
}
|
|
100
|
+
]
|
|
101
|
+
self.topic = topic
|
|
102
|
+
self.agent_file = agent_file
|
|
103
|
+
self.framework = framework or "praisonai"
|
|
104
|
+
self.client = instructor.patch(
|
|
105
|
+
OpenAI(
|
|
106
|
+
base_url=self.config_list[0]['base_url'],
|
|
107
|
+
api_key=os.getenv("OPENAI_API_KEY"),
|
|
108
|
+
),
|
|
109
|
+
mode=instructor.Mode.JSON,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def generate(self):
|
|
113
|
+
"""
|
|
114
|
+
Generates a team structure for the specified topic.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
None
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
str: The full path of the YAML file containing the generated team structure.
|
|
121
|
+
|
|
122
|
+
Raises:
|
|
123
|
+
Exception: If the generation process fails.
|
|
124
|
+
|
|
125
|
+
Usage:
|
|
126
|
+
generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
|
|
127
|
+
path = generator.generate()
|
|
128
|
+
print(path)
|
|
129
|
+
"""
|
|
130
|
+
response = self.client.chat.completions.create(
|
|
131
|
+
model=self.config_list[0]['model'],
|
|
132
|
+
response_model=TeamStructure,
|
|
133
|
+
max_retries=10,
|
|
134
|
+
messages=[
|
|
135
|
+
{"role": "system", "content": "You are a helpful assistant designed to output complex team structures."},
|
|
136
|
+
{"role": "user", "content": self.get_user_content()}
|
|
137
|
+
]
|
|
138
|
+
)
|
|
139
|
+
json_data = json.loads(response.model_dump_json())
|
|
140
|
+
self.convert_and_save(json_data)
|
|
141
|
+
full_path = os.path.abspath(self.agent_file)
|
|
142
|
+
return full_path
|
|
143
|
+
|
|
144
|
+
def convert_and_save(self, json_data):
|
|
145
|
+
"""Converts the provided JSON data into the desired YAML format and saves it to a file.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
json_data (dict): The JSON data representing the team structure.
|
|
149
|
+
topic (str, optional): The topic to be inserted into the YAML. Defaults to "Artificial Intelligence".
|
|
150
|
+
agent_file (str, optional): The name of the YAML file to save. Defaults to "test.yaml".
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
yaml_data = {
|
|
154
|
+
"framework": self.framework,
|
|
155
|
+
"topic": self.topic,
|
|
156
|
+
"roles": {},
|
|
157
|
+
"dependencies": []
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
for role_id, role_details in json_data['roles'].items():
|
|
161
|
+
yaml_data['roles'][role_id] = {
|
|
162
|
+
"backstory": "" + role_details['backstory'],
|
|
163
|
+
"goal": role_details['goal'],
|
|
164
|
+
"role": role_details['role'],
|
|
165
|
+
"tasks": {},
|
|
166
|
+
# "tools": role_details.get('tools', []),
|
|
167
|
+
"tools": ['']
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
for task_id, task_details in role_details['tasks'].items():
|
|
171
|
+
yaml_data['roles'][role_id]['tasks'][task_id] = {
|
|
172
|
+
"description": "" + task_details['description'],
|
|
173
|
+
"expected_output": "" + task_details['expected_output']
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
# Save to YAML file, maintaining the order
|
|
177
|
+
with open(self.agent_file, 'w') as f:
|
|
178
|
+
yaml.dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
|
179
|
+
|
|
180
|
+
def get_user_content(self):
|
|
181
|
+
"""
|
|
182
|
+
Generates a prompt for the OpenAI API to generate a team structure.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
None
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
str: The prompt for the OpenAI API.
|
|
189
|
+
|
|
190
|
+
Usage:
|
|
191
|
+
generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
|
|
192
|
+
prompt = generator.get_user_content()
|
|
193
|
+
print(prompt)
|
|
194
|
+
"""
|
|
195
|
+
user_content = """Generate a team structure for \"""" + self.topic + """\" task.
|
|
196
|
+
No Input data will be provided to the team.
|
|
197
|
+
The team will work in sequence. First role will pass the output to the next role, and so on.
|
|
198
|
+
The last role will generate the final output.
|
|
199
|
+
Think step by step.
|
|
200
|
+
With maximum 3 roles, each with 1 task. Include role goals, backstories, task descriptions, and expected outputs.
|
|
201
|
+
List of Available Tools: CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool, DirectoryReadTool, FileReadTool, TXTSearchTool, JSONSearchTool, MDXSearchTool, PDFSearchTool, RagTool, ScrapeElementFromWebsiteTool, ScrapeWebsiteTool, WebsiteSearchTool, XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool.
|
|
202
|
+
Only use Available Tools. Do Not use any other tools.
|
|
203
|
+
Example Below:
|
|
204
|
+
Use below example to understand the structure of the output.
|
|
205
|
+
The final role you create should satisfy the provided task: """ + self.topic + """.
|
|
206
|
+
{
|
|
207
|
+
"roles": {
|
|
208
|
+
"narrative_designer": {
|
|
209
|
+
"role": "Narrative Designer",
|
|
210
|
+
"goal": "Create AI storylines",
|
|
211
|
+
"backstory": "Skilled in narrative development for AI, with a focus on story resonance.",
|
|
212
|
+
"tools": ["ScrapeWebsiteTool"],
|
|
213
|
+
"tasks": {
|
|
214
|
+
"story_concept_development": {
|
|
215
|
+
"description": "Craft a unique AI story concept with depth and engagement using concept from this page the content https://www.asthebirdfliesblog.com/posts/how-to-write-book-story-development .",
|
|
216
|
+
"expected_output": "Document with narrative arcs, character bios, and settings."
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
},
|
|
220
|
+
"scriptwriter": {
|
|
221
|
+
"role": "Scriptwriter",
|
|
222
|
+
"goal": "Write scripts from AI concepts",
|
|
223
|
+
"backstory": "Expert in dialogue and script structure, translating concepts into scripts.",
|
|
224
|
+
"tasks": {
|
|
225
|
+
"scriptwriting_task": {
|
|
226
|
+
"description": "Turn narrative concepts into scripts, including dialogue and scenes.",
|
|
227
|
+
"expected_output": "Production-ready script with dialogue and scene details."
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
"""
|
|
234
|
+
return user_content
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
# generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
|
|
238
|
+
# print(generator.generate())
|