praisonaiagents 0.0.12__tar.gz → 0.0.14__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/PKG-INFO +1 -1
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/agent/agent.py +91 -23
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/agents/agents.py +237 -4
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/main.py +64 -15
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/task/task.py +20 -4
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/pyproject.toml +1 -1
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/build/lib/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/build/lib/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/build/lib/praisonaiagents/agent/agent.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/build/lib/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/build/lib/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/build/lib/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/build/lib/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/build/lib/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/setup.cfg +0 -0
@@ -140,7 +140,7 @@ class Agent:
|
|
140
140
|
max_rpm: Optional[int] = None,
|
141
141
|
max_execution_time: Optional[int] = None,
|
142
142
|
memory: bool = True,
|
143
|
-
verbose: bool =
|
143
|
+
verbose: bool = True,
|
144
144
|
allow_delegation: bool = False,
|
145
145
|
step_callback: Optional[Any] = None,
|
146
146
|
cache: bool = True,
|
@@ -191,6 +191,8 @@ class Agent:
|
|
191
191
|
self.max_reflect = max_reflect
|
192
192
|
self.min_reflect = min_reflect
|
193
193
|
self.reflect_llm = reflect_llm
|
194
|
+
self.console = Console() # Create a single console instance for the agent
|
195
|
+
|
194
196
|
def execute_tool(self, function_name, arguments):
|
195
197
|
"""
|
196
198
|
Execute a tool dynamically based on the function name and arguments.
|
@@ -235,7 +237,6 @@ class Agent:
|
|
235
237
|
return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
|
236
238
|
|
237
239
|
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True):
|
238
|
-
console = Console()
|
239
240
|
start_time = time.time()
|
240
241
|
logging.debug(f"{self.name} sending messages to LLM: {messages}")
|
241
242
|
|
@@ -305,12 +306,24 @@ class Agent:
|
|
305
306
|
stream=True
|
306
307
|
)
|
307
308
|
full_response_text = ""
|
308
|
-
|
309
|
+
|
310
|
+
# Create Live display with proper configuration
|
311
|
+
with Live(
|
312
|
+
display_generating("", start_time),
|
313
|
+
console=self.console,
|
314
|
+
refresh_per_second=4,
|
315
|
+
transient=False, # Changed to False to preserve output
|
316
|
+
vertical_overflow="ellipsis",
|
317
|
+
auto_refresh=True
|
318
|
+
) as live:
|
309
319
|
for chunk in response_stream:
|
310
320
|
if chunk.choices[0].delta.content:
|
311
321
|
full_response_text += chunk.choices[0].delta.content
|
312
322
|
live.update(display_generating(full_response_text, start_time))
|
313
|
-
|
323
|
+
|
324
|
+
# Clear the last generating display with a blank line
|
325
|
+
self.console.print()
|
326
|
+
|
314
327
|
final_response = client.chat.completions.create(
|
315
328
|
model=self.llm,
|
316
329
|
messages=messages,
|
@@ -340,14 +353,33 @@ class Agent:
|
|
340
353
|
Your Role: {self.role}\n
|
341
354
|
Your Goal: {self.goal}
|
342
355
|
"""
|
356
|
+
if output_json:
|
357
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_json.schema_json()}"
|
343
358
|
else:
|
344
359
|
system_prompt = None
|
345
|
-
|
360
|
+
|
346
361
|
messages = []
|
347
362
|
if system_prompt:
|
348
363
|
messages.append({"role": "system", "content": system_prompt})
|
349
364
|
messages.extend(self.chat_history)
|
350
|
-
|
365
|
+
|
366
|
+
# Modify prompt if output_json is specified
|
367
|
+
original_prompt = prompt
|
368
|
+
if output_json:
|
369
|
+
if isinstance(prompt, str):
|
370
|
+
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
371
|
+
elif isinstance(prompt, list):
|
372
|
+
# For multimodal prompts, append to the text content
|
373
|
+
for item in prompt:
|
374
|
+
if item["type"] == "text":
|
375
|
+
item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
376
|
+
break
|
377
|
+
|
378
|
+
if isinstance(prompt, list):
|
379
|
+
# If we receive a multimodal prompt list, place it directly in the user message
|
380
|
+
messages.append({"role": "user", "content": prompt})
|
381
|
+
else:
|
382
|
+
messages.append({"role": "user", "content": prompt})
|
351
383
|
|
352
384
|
final_response_text = None
|
353
385
|
reflection_count = 0
|
@@ -356,18 +388,26 @@ Your Goal: {self.goal}
|
|
356
388
|
while True:
|
357
389
|
try:
|
358
390
|
if self.verbose:
|
359
|
-
|
391
|
+
# Handle both string and list prompts for instruction display
|
392
|
+
display_text = prompt
|
393
|
+
if isinstance(prompt, list):
|
394
|
+
# Extract text content from multimodal prompt
|
395
|
+
display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
|
396
|
+
|
397
|
+
if display_text and str(display_text).strip():
|
398
|
+
display_instruction(f"Agent {self.name} is processing prompt: {display_text}", console=self.console)
|
360
399
|
|
361
400
|
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None)
|
362
401
|
if not response:
|
363
402
|
return None
|
364
|
-
|
403
|
+
|
365
404
|
tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
|
405
|
+
response_text = response.choices[0].message.content.strip()
|
366
406
|
|
367
407
|
if tool_calls:
|
368
408
|
messages.append({
|
369
409
|
"role": "assistant",
|
370
|
-
"content":
|
410
|
+
"content": response_text,
|
371
411
|
"tool_calls": tool_calls
|
372
412
|
})
|
373
413
|
|
@@ -376,13 +416,13 @@ Your Goal: {self.goal}
|
|
376
416
|
arguments = json.loads(tool_call.function.arguments)
|
377
417
|
|
378
418
|
if self.verbose:
|
379
|
-
display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
|
419
|
+
display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}", console=self.console)
|
380
420
|
|
381
421
|
tool_result = self.execute_tool(function_name, arguments)
|
382
422
|
|
383
423
|
if tool_result:
|
384
424
|
if self.verbose:
|
385
|
-
display_tool_call(f"Function '{function_name}' returned: {tool_result}")
|
425
|
+
display_tool_call(f"Function '{function_name}' returned: {tool_result}", console=self.console)
|
386
426
|
messages.append({
|
387
427
|
"role": "tool",
|
388
428
|
"tool_call_id": tool_call.id,
|
@@ -399,15 +439,31 @@ Your Goal: {self.goal}
|
|
399
439
|
if not response:
|
400
440
|
return None
|
401
441
|
response_text = response.choices[0].message.content.strip()
|
402
|
-
|
403
|
-
|
442
|
+
|
443
|
+
# Handle output_json if specified
|
444
|
+
if output_json:
|
445
|
+
try:
|
446
|
+
# Clean the response text to get only JSON
|
447
|
+
cleaned_json = self.clean_json_output(response_text)
|
448
|
+
# Parse into Pydantic model
|
449
|
+
parsed_model = output_json.model_validate_json(cleaned_json)
|
450
|
+
# Add to chat history and return
|
451
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
452
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
453
|
+
if self.verbose:
|
454
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
455
|
+
generation_time=time.time() - start_time, console=self.console)
|
456
|
+
return parsed_model
|
457
|
+
except Exception as e:
|
458
|
+
display_error(f"Failed to parse response as {output_json.__name__}: {e}")
|
459
|
+
return None
|
404
460
|
|
405
461
|
if not self.self_reflect:
|
406
|
-
self.chat_history.append({"role": "user", "content":
|
462
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
407
463
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
408
464
|
if self.verbose:
|
409
465
|
logging.info(f"Agent {self.name} final response: {response_text}")
|
410
|
-
display_interaction(
|
466
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
411
467
|
return response_text
|
412
468
|
|
413
469
|
reflection_prompt = f"""
|
@@ -430,26 +486,26 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
430
486
|
reflection_output = reflection_response.choices[0].message.parsed
|
431
487
|
|
432
488
|
if self.verbose:
|
433
|
-
display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
|
489
|
+
display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
|
434
490
|
|
435
491
|
messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
|
436
492
|
|
437
493
|
# Only consider satisfactory after minimum reflections
|
438
494
|
if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
|
439
495
|
if self.verbose:
|
440
|
-
display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections")
|
496
|
+
display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
|
441
497
|
self.chat_history.append({"role": "user", "content": prompt})
|
442
498
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
443
|
-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
|
499
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
444
500
|
return response_text
|
445
501
|
|
446
502
|
# Check if we've hit max reflections
|
447
503
|
if reflection_count >= self.max_reflect - 1:
|
448
504
|
if self.verbose:
|
449
|
-
display_self_reflection("Maximum reflection count reached, returning current response")
|
505
|
+
display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
|
450
506
|
self.chat_history.append({"role": "user", "content": prompt})
|
451
507
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
452
|
-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
|
508
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
453
509
|
return response_text
|
454
510
|
|
455
511
|
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
|
@@ -460,12 +516,24 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
460
516
|
continue # Continue the loop for more reflections
|
461
517
|
|
462
518
|
except Exception as e:
|
463
|
-
display_error(f"Error in parsing self-reflection json {e}. Retrying")
|
519
|
+
display_error(f"Error in parsing self-reflection json {e}. Retrying", console=self.console)
|
464
520
|
logging.error("Reflection parsing failed.", exc_info=True)
|
465
521
|
messages.append({"role": "assistant", "content": f"Self Reflection failed."})
|
466
522
|
reflection_count += 1
|
467
523
|
continue # Continue even after error to try again
|
468
524
|
|
469
525
|
except Exception as e:
|
470
|
-
display_error(f"Error in chat: {e}")
|
471
|
-
return None
|
526
|
+
display_error(f"Error in chat: {e}", console=self.console)
|
527
|
+
return None
|
528
|
+
|
529
|
+
def clean_json_output(self, output: str) -> str:
|
530
|
+
"""Clean and extract JSON from response text."""
|
531
|
+
cleaned = output.strip()
|
532
|
+
# Remove markdown code blocks if present
|
533
|
+
if cleaned.startswith("```json"):
|
534
|
+
cleaned = cleaned[len("```json"):].strip()
|
535
|
+
if cleaned.startswith("```"):
|
536
|
+
cleaned = cleaned[len("```"):].strip()
|
537
|
+
if cleaned.endswith("```"):
|
538
|
+
cleaned = cleaned[:-3].strip()
|
539
|
+
return cleaned
|
@@ -2,7 +2,7 @@ import os
|
|
2
2
|
import time
|
3
3
|
import json
|
4
4
|
import logging
|
5
|
-
from typing import Any, Dict, Optional
|
5
|
+
from typing import Any, Dict, Optional, List
|
6
6
|
from pydantic import BaseModel
|
7
7
|
from rich.text import Text
|
8
8
|
from rich.panel import Panel
|
@@ -11,6 +11,36 @@ from ..main import display_error, TaskOutput, error_logs, client
|
|
11
11
|
from ..agent.agent import Agent
|
12
12
|
from ..task.task import Task
|
13
13
|
|
14
|
+
class LoopItems(BaseModel):
|
15
|
+
items: List[Any]
|
16
|
+
|
17
|
+
def encode_file_to_base64(file_path: str) -> str:
|
18
|
+
"""Base64-encode a file."""
|
19
|
+
import base64
|
20
|
+
with open(file_path, "rb") as f:
|
21
|
+
return base64.b64encode(f.read()).decode("utf-8")
|
22
|
+
|
23
|
+
def process_video(video_path: str, seconds_per_frame=2):
|
24
|
+
"""Split video into frames (base64-encoded)."""
|
25
|
+
import cv2
|
26
|
+
import base64
|
27
|
+
base64_frames = []
|
28
|
+
video = cv2.VideoCapture(video_path)
|
29
|
+
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
30
|
+
fps = video.get(cv2.CAP_PROP_FPS)
|
31
|
+
frames_to_skip = int(fps * seconds_per_frame)
|
32
|
+
curr_frame = 0
|
33
|
+
while curr_frame < total_frames:
|
34
|
+
video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
|
35
|
+
success, frame = video.read()
|
36
|
+
if not success:
|
37
|
+
break
|
38
|
+
_, buffer = cv2.imencode(".jpg", frame)
|
39
|
+
base64_frames.append(base64.b64encode(buffer).decode("utf-8"))
|
40
|
+
curr_frame += frames_to_skip
|
41
|
+
video.release()
|
42
|
+
return base64_frames
|
43
|
+
|
14
44
|
class PraisonAIAgents:
|
15
45
|
def __init__(self, agents, tasks, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None):
|
16
46
|
self.agents = agents
|
@@ -28,6 +58,7 @@ class PraisonAIAgents:
|
|
28
58
|
for task in tasks:
|
29
59
|
self.add_task(task)
|
30
60
|
task.status = "not started"
|
61
|
+
self._state = {} # Add state storage at PraisonAIAgents level
|
31
62
|
|
32
63
|
def add_task(self, task):
|
33
64
|
task_id = self.task_id_counter
|
@@ -58,6 +89,19 @@ class PraisonAIAgents:
|
|
58
89
|
display_error(f"Error: Task with ID {task_id} does not exist")
|
59
90
|
return
|
60
91
|
task = self.tasks[task_id]
|
92
|
+
|
93
|
+
# Only import multimodal dependencies if task has images
|
94
|
+
if task.images and task.status == "not started":
|
95
|
+
try:
|
96
|
+
import cv2
|
97
|
+
import base64
|
98
|
+
from moviepy import VideoFileClip
|
99
|
+
except ImportError as e:
|
100
|
+
display_error(f"Error: Missing required dependencies for image/video processing: {e}")
|
101
|
+
display_error("Please install with: pip install opencv-python moviepy")
|
102
|
+
task.status = "failed"
|
103
|
+
return None
|
104
|
+
|
61
105
|
if task.status == "not started":
|
62
106
|
task.status = "in progress"
|
63
107
|
|
@@ -83,7 +127,47 @@ Expected Output: {task.expected_output}.
|
|
83
127
|
if self.verbose >= 2:
|
84
128
|
logging.info(f"Executing task {task_id}: {task.description} using {executor_agent.name}")
|
85
129
|
logging.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}")
|
86
|
-
|
130
|
+
|
131
|
+
if task.images:
|
132
|
+
def _get_multimodal_message(text_prompt, images):
|
133
|
+
content = [{"type": "text", "text": text_prompt}]
|
134
|
+
|
135
|
+
for img in images:
|
136
|
+
# If local file path for a valid image
|
137
|
+
if os.path.exists(img):
|
138
|
+
ext = os.path.splitext(img)[1].lower()
|
139
|
+
# If it's a .mp4, convert to frames
|
140
|
+
if ext == ".mp4":
|
141
|
+
frames = process_video(img, seconds_per_frame=1)
|
142
|
+
content.append({"type": "text", "text": "These are frames from the video."})
|
143
|
+
for f in frames:
|
144
|
+
content.append({
|
145
|
+
"type": "image_url",
|
146
|
+
"image_url": {"url": f"data:image/jpg;base64,{f}"}
|
147
|
+
})
|
148
|
+
else:
|
149
|
+
encoded = encode_file_to_base64(img)
|
150
|
+
content.append({
|
151
|
+
"type": "image_url",
|
152
|
+
"image_url": {
|
153
|
+
"url": f"data:image/{ext.lstrip('.')};base64,{encoded}"
|
154
|
+
}
|
155
|
+
})
|
156
|
+
else:
|
157
|
+
# Treat as a remote URL
|
158
|
+
content.append({
|
159
|
+
"type": "image_url",
|
160
|
+
"image_url": {"url": img}
|
161
|
+
})
|
162
|
+
return content
|
163
|
+
|
164
|
+
agent_output = executor_agent.chat(
|
165
|
+
_get_multimodal_message(task_prompt, task.images),
|
166
|
+
tools=task.tools
|
167
|
+
)
|
168
|
+
else:
|
169
|
+
agent_output = executor_agent.chat(task_prompt, tools=task.tools)
|
170
|
+
|
87
171
|
if agent_output:
|
88
172
|
task_output = TaskOutput(
|
89
173
|
description=task.description,
|
@@ -171,11 +255,144 @@ Expected Output: {task.expected_output}.
|
|
171
255
|
logging.info(f"Task {task_id} failed after {self.max_retries} retries.")
|
172
256
|
|
173
257
|
def run_all_tasks(self):
|
174
|
-
|
258
|
+
"""Execute tasks based on execution mode"""
|
259
|
+
if self.process == "workflow":
|
260
|
+
# Build workflow relationships first
|
261
|
+
for task in self.tasks.values():
|
262
|
+
if task.next_tasks:
|
263
|
+
for next_task_name in task.next_tasks:
|
264
|
+
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
265
|
+
if next_task:
|
266
|
+
next_task.previous_tasks.append(task.name)
|
267
|
+
|
268
|
+
# Find start task
|
269
|
+
start_task = None
|
270
|
+
for task_id, task in self.tasks.items():
|
271
|
+
if task.is_start:
|
272
|
+
start_task = task
|
273
|
+
break
|
274
|
+
|
275
|
+
if not start_task:
|
276
|
+
start_task = list(self.tasks.values())[0]
|
277
|
+
logging.info("No start task marked, using first task")
|
278
|
+
|
279
|
+
current_task = start_task
|
280
|
+
visited_tasks = set()
|
281
|
+
loop_data = {} # Store loop-specific data
|
282
|
+
|
283
|
+
while current_task and current_task.id not in visited_tasks:
|
284
|
+
task_id = current_task.id
|
285
|
+
logging.info(f"Executing workflow task: {current_task.name if current_task.name else task_id}")
|
286
|
+
|
287
|
+
# Add context from previous tasks to description
|
288
|
+
if current_task.previous_tasks or current_task.context:
|
289
|
+
context = "\nInput data from previous tasks:"
|
290
|
+
|
291
|
+
# Add data from previous tasks in workflow
|
292
|
+
for prev_name in current_task.previous_tasks:
|
293
|
+
prev_task = next((t for t in self.tasks.values() if t.name == prev_name), None)
|
294
|
+
if prev_task and prev_task.result:
|
295
|
+
# Handle loop data
|
296
|
+
if current_task.task_type == "loop":
|
297
|
+
# create a loop manager Agent
|
298
|
+
loop_manager = Agent(
|
299
|
+
name="Loop Manager",
|
300
|
+
role="Loop data processor",
|
301
|
+
goal="Process loop data and convert it to list format",
|
302
|
+
backstory="Expert at handling loop data and converting it to proper format",
|
303
|
+
llm=self.manager_llm,
|
304
|
+
verbose=self.verbose,
|
305
|
+
markdown=True
|
306
|
+
)
|
307
|
+
|
308
|
+
# get the loop data convert it to list using calling Agent class chat
|
309
|
+
loop_prompt = f"""
|
310
|
+
Process this data into a list format:
|
311
|
+
{prev_task.result.raw}
|
312
|
+
|
313
|
+
Return a JSON object with an 'items' array containing the items to process.
|
314
|
+
"""
|
315
|
+
loop_data_str = loop_manager.chat(
|
316
|
+
prompt=loop_prompt,
|
317
|
+
output_json=LoopItems
|
318
|
+
)
|
319
|
+
|
320
|
+
try:
|
321
|
+
# The response will already be parsed into LoopItems model
|
322
|
+
loop_data[f"loop_{current_task.name}"] = {
|
323
|
+
"items": loop_data_str.items,
|
324
|
+
"index": 0,
|
325
|
+
"remaining": len(loop_data_str.items)
|
326
|
+
}
|
327
|
+
context += f"\nCurrent loop item: {loop_data_str.items[0]}"
|
328
|
+
except Exception as e:
|
329
|
+
display_error(f"Failed to process loop data: {e}")
|
330
|
+
context += f"\n{prev_name}: {prev_task.result.raw}"
|
331
|
+
else:
|
332
|
+
context += f"\n{prev_name}: {prev_task.result.raw}"
|
333
|
+
|
334
|
+
# Add data from context tasks
|
335
|
+
if current_task.context:
|
336
|
+
for ctx_task in current_task.context:
|
337
|
+
if ctx_task.result and ctx_task.name != current_task.name:
|
338
|
+
context += f"\n{ctx_task.name}: {ctx_task.result.raw}"
|
339
|
+
|
340
|
+
# Update task description with context
|
341
|
+
current_task.description = current_task.description + context
|
342
|
+
|
343
|
+
# Execute task using existing run_task method
|
344
|
+
self.run_task(task_id)
|
345
|
+
visited_tasks.add(task_id)
|
346
|
+
|
347
|
+
# Handle loop progression
|
348
|
+
if current_task.task_type == "loop":
|
349
|
+
loop_key = f"loop_{current_task.name}"
|
350
|
+
if loop_key in loop_data:
|
351
|
+
loop_info = loop_data[loop_key]
|
352
|
+
loop_info["index"] += 1
|
353
|
+
has_more = loop_info["remaining"] > 0
|
354
|
+
|
355
|
+
# Update result to trigger correct condition
|
356
|
+
if current_task.result:
|
357
|
+
result = current_task.result.raw
|
358
|
+
if has_more:
|
359
|
+
result += "\nmore"
|
360
|
+
else:
|
361
|
+
result += "\ndone"
|
362
|
+
current_task.result.raw = result
|
363
|
+
|
364
|
+
# Determine next task based on result
|
365
|
+
next_task = None
|
366
|
+
if current_task.result:
|
367
|
+
if current_task.task_type in ["decision", "loop"]:
|
368
|
+
result = current_task.result.raw.lower()
|
369
|
+
# Check conditions
|
370
|
+
for condition, tasks in current_task.condition.items():
|
371
|
+
if condition.lower() in result and tasks:
|
372
|
+
next_task_name = tasks[0]
|
373
|
+
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
374
|
+
# For loops, allow revisiting the same task
|
375
|
+
if next_task and next_task.id == current_task.id:
|
376
|
+
visited_tasks.discard(current_task.id)
|
377
|
+
break
|
378
|
+
|
379
|
+
if not next_task and current_task.next_tasks:
|
380
|
+
next_task_name = current_task.next_tasks[0]
|
381
|
+
next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
|
382
|
+
|
383
|
+
current_task = next_task
|
384
|
+
if not current_task:
|
385
|
+
logging.info("Workflow execution completed")
|
386
|
+
break
|
387
|
+
|
388
|
+
elif self.process == "sequential":
|
389
|
+
# Keep original sequential execution
|
175
390
|
for task_id in self.tasks:
|
176
391
|
if self.tasks[task_id].status != "completed":
|
177
392
|
self.run_task(task_id)
|
393
|
+
|
178
394
|
elif self.process == "hierarchical":
|
395
|
+
# Keep original hierarchical execution
|
179
396
|
logging.debug(f"Starting hierarchical task execution with {len(self.tasks)} tasks")
|
180
397
|
manager_agent = Agent(
|
181
398
|
name="Manager",
|
@@ -317,4 +534,20 @@ Provide a JSON with the structure:
|
|
317
534
|
return {
|
318
535
|
"task_status": self.get_all_tasks_status(),
|
319
536
|
"task_results": {task_id: self.get_task_result(task_id) for task_id in self.tasks}
|
320
|
-
}
|
537
|
+
}
|
538
|
+
|
539
|
+
def set_state(self, key: str, value: Any) -> None:
|
540
|
+
"""Set a state value"""
|
541
|
+
self._state[key] = value
|
542
|
+
|
543
|
+
def get_state(self, key: str, default: Any = None) -> Any:
|
544
|
+
"""Get a state value"""
|
545
|
+
return self._state.get(key, default)
|
546
|
+
|
547
|
+
def update_state(self, updates: Dict) -> None:
|
548
|
+
"""Update multiple state values"""
|
549
|
+
self._state.update(updates)
|
550
|
+
|
551
|
+
def clear_state(self) -> None:
|
552
|
+
"""Clear all state values"""
|
553
|
+
self._state.clear()
|
@@ -25,43 +25,92 @@ logging.basicConfig(
|
|
25
25
|
# Global list to store error logs
|
26
26
|
error_logs = []
|
27
27
|
|
28
|
-
def
|
29
|
-
|
30
|
-
if
|
28
|
+
def _clean_display_content(content: str, max_length: int = 20000) -> str:
|
29
|
+
"""Helper function to clean and truncate content for display."""
|
30
|
+
if not content or not str(content).strip():
|
31
|
+
return ""
|
32
|
+
|
33
|
+
content = str(content)
|
34
|
+
# Handle base64 content
|
35
|
+
if "base64" in content:
|
36
|
+
content_parts = []
|
37
|
+
for line in content.split('\n'):
|
38
|
+
if "base64" not in line:
|
39
|
+
content_parts.append(line)
|
40
|
+
content = '\n'.join(content_parts)
|
41
|
+
|
42
|
+
# Truncate if too long
|
43
|
+
if len(content) > max_length:
|
44
|
+
content = content[:max_length] + "..."
|
45
|
+
|
46
|
+
return content.strip()
|
47
|
+
|
48
|
+
def display_interaction(message, response, markdown=True, generation_time=None, console=None):
|
49
|
+
"""Display the interaction between user and assistant."""
|
50
|
+
if console is None:
|
51
|
+
console = Console()
|
52
|
+
if generation_time:
|
31
53
|
console.print(Text(f"Response generated in {generation_time:.1f}s", style="dim"))
|
32
|
-
|
33
|
-
|
54
|
+
|
55
|
+
# Handle multimodal content (list)
|
56
|
+
if isinstance(message, list):
|
57
|
+
# Extract just the text content from the multimodal message
|
58
|
+
text_content = next((item["text"] for item in message if item["type"] == "text"), "")
|
59
|
+
message = text_content
|
60
|
+
|
61
|
+
message = _clean_display_content(str(message))
|
62
|
+
response = _clean_display_content(str(response))
|
34
63
|
|
35
64
|
if markdown:
|
36
65
|
console.print(Panel.fit(Markdown(message), title="Message", border_style="cyan"))
|
37
66
|
console.print(Panel.fit(Markdown(response), title="Response", border_style="cyan"))
|
38
67
|
else:
|
39
68
|
console.print(Panel.fit(Text(message, style="bold green"), title="Message", border_style="cyan"))
|
40
|
-
console.print(Panel.fit(Text(response, style="bold
|
41
|
-
|
42
|
-
def display_self_reflection(message: str):
|
43
|
-
|
69
|
+
console.print(Panel.fit(Text(response, style="bold blue"), title="Response", border_style="cyan"))
|
70
|
+
|
71
|
+
def display_self_reflection(message: str, console=None):
|
72
|
+
if not message or not message.strip():
|
73
|
+
return
|
74
|
+
if console is None:
|
75
|
+
console = Console()
|
76
|
+
message = _clean_display_content(str(message))
|
44
77
|
console.print(Panel.fit(Text(message, style="bold yellow"), title="Self Reflection", border_style="magenta"))
|
45
78
|
|
46
|
-
def display_instruction(message: str):
|
47
|
-
|
79
|
+
def display_instruction(message: str, console=None):
|
80
|
+
if not message or not message.strip():
|
81
|
+
return
|
82
|
+
if console is None:
|
83
|
+
console = Console()
|
84
|
+
message = _clean_display_content(str(message))
|
48
85
|
console.print(Panel.fit(Text(message, style="bold blue"), title="Instruction", border_style="cyan"))
|
49
86
|
|
50
|
-
def display_tool_call(message: str):
|
51
|
-
|
87
|
+
def display_tool_call(message: str, console=None):
|
88
|
+
if not message or not message.strip():
|
89
|
+
return
|
90
|
+
if console is None:
|
91
|
+
console = Console()
|
92
|
+
message = _clean_display_content(str(message))
|
52
93
|
console.print(Panel.fit(Text(message, style="bold cyan"), title="Tool Call", border_style="green"))
|
53
94
|
|
54
|
-
def display_error(message: str):
|
55
|
-
|
95
|
+
def display_error(message: str, console=None):
|
96
|
+
if not message or not message.strip():
|
97
|
+
return
|
98
|
+
if console is None:
|
99
|
+
console = Console()
|
100
|
+
message = _clean_display_content(str(message))
|
56
101
|
console.print(Panel.fit(Text(message, style="bold red"), title="Error", border_style="red"))
|
57
102
|
# Store errors
|
58
103
|
error_logs.append(message)
|
59
104
|
|
60
105
|
def display_generating(content: str = "", start_time: Optional[float] = None):
|
106
|
+
if not content or not str(content).strip():
|
107
|
+
return Panel("", title="", border_style="green") # Return empty panel when no content
|
61
108
|
elapsed_str = ""
|
62
109
|
if start_time is not None:
|
63
110
|
elapsed = time.time() - start_time
|
64
111
|
elapsed_str = f" {elapsed:.1f}s"
|
112
|
+
|
113
|
+
content = _clean_display_content(str(content))
|
65
114
|
return Panel(Markdown(content), title=f"Generating...{elapsed_str}", border_style="green")
|
66
115
|
|
67
116
|
def clean_triple_backticks(text: str) -> str:
|
@@ -1,8 +1,9 @@
|
|
1
1
|
import logging
|
2
|
-
from typing import List, Optional, Dict, Any, Type
|
2
|
+
from typing import List, Optional, Dict, Any, Type, Callable, Union
|
3
3
|
from pydantic import BaseModel
|
4
4
|
from ..main import TaskOutput
|
5
5
|
from ..agent.agent import Agent
|
6
|
+
import uuid
|
6
7
|
|
7
8
|
class Task:
|
8
9
|
def __init__(
|
@@ -22,11 +23,18 @@ class Task:
|
|
22
23
|
status: str = "not started",
|
23
24
|
result: Optional[TaskOutput] = None,
|
24
25
|
create_directory: Optional[bool] = False,
|
25
|
-
id: Optional[int] = None
|
26
|
+
id: Optional[int] = None,
|
27
|
+
images: Optional[List[str]] = None,
|
28
|
+
next_tasks: Optional[List[str]] = None,
|
29
|
+
task_type: str = "task",
|
30
|
+
condition: Optional[Dict[str, List[str]]] = None,
|
31
|
+
is_start: bool = False,
|
32
|
+
loop_state: Optional[Dict[str, Union[str, int]]] = None
|
26
33
|
):
|
34
|
+
self.id = str(uuid.uuid4()) if id is None else str(id)
|
35
|
+
self.name = name
|
27
36
|
self.description = description
|
28
37
|
self.expected_output = expected_output
|
29
|
-
self.name = name
|
30
38
|
self.agent = agent
|
31
39
|
self.tools = tools if tools else []
|
32
40
|
self.context = context if context else []
|
@@ -39,10 +47,18 @@ class Task:
|
|
39
47
|
self.status = status
|
40
48
|
self.result = result
|
41
49
|
self.create_directory = create_directory
|
42
|
-
self.
|
50
|
+
self.images = images if images else []
|
51
|
+
self.next_tasks = next_tasks if next_tasks else []
|
52
|
+
self.task_type = task_type
|
53
|
+
self.condition = condition if condition else {}
|
54
|
+
self.is_start = is_start
|
55
|
+
self.loop_state = loop_state if loop_state else {}
|
43
56
|
|
44
57
|
if self.output_json and self.output_pydantic:
|
45
58
|
raise ValueError("Only one output type can be defined")
|
46
59
|
|
60
|
+
# Track previous tasks based on next_tasks relationships
|
61
|
+
self.previous_tasks = []
|
62
|
+
|
47
63
|
def __str__(self):
|
48
64
|
return f"Task(name='{self.name if self.name else 'None'}', description='{self.description}', agent='{self.agent.name if self.agent else 'None'}', status='{self.status}')"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents/build/lib/praisonaiagents/main.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.12 → praisonaiagents-0.0.14}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|