praisonaiagents 0.0.15__tar.gz → 0.0.17__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (27) hide show
  1. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/agent/agent.py +151 -3
  3. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/agents/agents.py +206 -8
  4. praisonaiagents-0.0.17/praisonaiagents/process/process.py +534 -0
  5. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/task/task.py +13 -3
  6. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents.egg-info/PKG-INFO +1 -1
  7. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/pyproject.toml +1 -1
  8. praisonaiagents-0.0.15/praisonaiagents/process/process.py +0 -262
  9. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/__init__.py +0 -0
  10. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/agent/__init__.py +0 -0
  11. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/agents/__init__.py +0 -0
  12. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/build/lib/praisonaiagents/__init__.py +0 -0
  13. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/build/lib/praisonaiagents/agent/__init__.py +0 -0
  14. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/build/lib/praisonaiagents/agent/agent.py +0 -0
  15. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/build/lib/praisonaiagents/agents/__init__.py +0 -0
  16. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/build/lib/praisonaiagents/agents/agents.py +0 -0
  17. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/build/lib/praisonaiagents/main.py +0 -0
  18. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/build/lib/praisonaiagents/task/__init__.py +0 -0
  19. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/build/lib/praisonaiagents/task/task.py +0 -0
  20. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/main.py +0 -0
  21. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/process/__init__.py +0 -0
  22. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents/task/__init__.py +0 -0
  23. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  24. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  25. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents.egg-info/requires.txt +0 -0
  26. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/praisonaiagents.egg-info/top_level.txt +0 -0
  27. {praisonaiagents-0.0.15 → praisonaiagents-0.0.17}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: praisonaiagents
3
- Version: 0.0.15
3
+ Version: 0.0.17
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,9 +1,12 @@
1
- import logging
2
- import json
1
+ import os
3
2
  import time
3
+ import json
4
+ import logging
5
+ import asyncio
4
6
  from typing import List, Optional, Any, Dict, Union, Literal
5
7
  from rich.console import Console
6
8
  from rich.live import Live
9
+ from openai import AsyncOpenAI
7
10
  from ..main import (
8
11
  display_error,
9
12
  display_tool_call,
@@ -192,6 +195,12 @@ class Agent:
192
195
  self.min_reflect = min_reflect
193
196
  self.reflect_llm = reflect_llm
194
197
  self.console = Console() # Create a single console instance for the agent
198
+
199
+ # Initialize system prompt
200
+ self.system_prompt = f"""{self.backstory}\n
201
+ Your Role: {self.role}\n
202
+ Your Goal: {self.goal}
203
+ """
195
204
 
196
205
  def execute_tool(self, function_name, arguments):
197
206
  """
@@ -536,4 +545,143 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
536
545
  cleaned = cleaned[len("```"):].strip()
537
546
  if cleaned.endswith("```"):
538
547
  cleaned = cleaned[:-3].strip()
539
- return cleaned
548
+ return cleaned
549
+
550
+ async def achat(self, prompt, temperature=0.2, tools=None, output_json=None):
551
+ """Async version of chat method"""
552
+ try:
553
+ # Build system prompt
554
+ system_prompt = self.system_prompt
555
+ if output_json:
556
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_json.schema_json()}"
557
+
558
+ # Build messages
559
+ if isinstance(prompt, str):
560
+ messages = [
561
+ {"role": "system", "content": system_prompt},
562
+ {"role": "user", "content": prompt + ("\nReturn ONLY a valid JSON object. No other text or explanation." if output_json else "")}
563
+ ]
564
+ else:
565
+ # For multimodal prompts
566
+ messages = [
567
+ {"role": "system", "content": system_prompt},
568
+ {"role": "user", "content": prompt}
569
+ ]
570
+ if output_json:
571
+ # Add JSON instruction to text content
572
+ for item in messages[-1]["content"]:
573
+ if item["type"] == "text":
574
+ item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
575
+ break
576
+
577
+ # Format tools if provided
578
+ formatted_tools = []
579
+ if tools:
580
+ for tool in tools:
581
+ if isinstance(tool, str):
582
+ tool_def = self._generate_tool_definition(tool)
583
+ if tool_def:
584
+ formatted_tools.append(tool_def)
585
+ elif isinstance(tool, dict):
586
+ formatted_tools.append(tool)
587
+ elif hasattr(tool, "to_openai_tool"):
588
+ formatted_tools.append(tool.to_openai_tool())
589
+ elif callable(tool):
590
+ formatted_tools.append(self._generate_tool_definition(tool.__name__))
591
+
592
+ # Create async OpenAI client
593
+ async_client = AsyncOpenAI()
594
+
595
+ # Make the API call based on the type of request
596
+ if tools:
597
+ response = await async_client.chat.completions.create(
598
+ model=self.llm,
599
+ messages=messages,
600
+ temperature=temperature,
601
+ tools=formatted_tools
602
+ )
603
+ return await self._achat_completion(response, tools)
604
+ elif output_json:
605
+ response = await async_client.chat.completions.create(
606
+ model=self.llm,
607
+ messages=messages,
608
+ temperature=temperature,
609
+ response_format={"type": "json_object"}
610
+ )
611
+ result = response.choices[0].message.content
612
+ # Clean and parse the JSON response
613
+ cleaned_json = self.clean_json_output(result)
614
+ try:
615
+ parsed = json.loads(cleaned_json)
616
+ return output_json(**parsed)
617
+ except Exception as e:
618
+ display_error(f"Error parsing JSON response: {e}")
619
+ return None
620
+ else:
621
+ response = await async_client.chat.completions.create(
622
+ model=self.llm,
623
+ messages=messages,
624
+ temperature=temperature
625
+ )
626
+ return response.choices[0].message.content
627
+ except Exception as e:
628
+ display_error(f"Error in chat completion: {e}")
629
+ return None
630
+
631
+ async def _achat_completion(self, response, tools):
632
+ """Async version of _chat_completion method"""
633
+ try:
634
+ message = response.choices[0].message
635
+ if not hasattr(message, 'tool_calls') or not message.tool_calls:
636
+ return message.content
637
+
638
+ results = []
639
+ for tool_call in message.tool_calls:
640
+ try:
641
+ function_name = tool_call.function.name
642
+ arguments = json.loads(tool_call.function.arguments)
643
+
644
+ # Find the matching tool
645
+ tool = next((t for t in tools if t.__name__ == function_name), None)
646
+ if not tool:
647
+ display_error(f"Tool {function_name} not found")
648
+ continue
649
+
650
+ # Check if the tool is async
651
+ if asyncio.iscoroutinefunction(tool):
652
+ result = await tool(**arguments)
653
+ else:
654
+ # Run sync function in executor to avoid blocking
655
+ loop = asyncio.get_event_loop()
656
+ result = await loop.run_in_executor(None, lambda: tool(**arguments))
657
+
658
+ results.append(result)
659
+ except Exception as e:
660
+ display_error(f"Error executing tool {function_name}: {e}")
661
+ results.append(None)
662
+
663
+ # If we have results, format them into a response
664
+ if results:
665
+ formatted_results = "\n".join([str(r) for r in results if r is not None])
666
+ if formatted_results:
667
+ messages = [
668
+ {"role": "system", "content": self.system_prompt},
669
+ {"role": "assistant", "content": "Here are the tool results:"},
670
+ {"role": "user", "content": formatted_results + "\nPlease process these results and provide a final response."}
671
+ ]
672
+ try:
673
+ async_client = AsyncOpenAI()
674
+ final_response = await async_client.chat.completions.create(
675
+ model=self.llm,
676
+ messages=messages,
677
+ temperature=0.2
678
+ )
679
+ return final_response.choices[0].message.content
680
+ except Exception as e:
681
+ display_error(f"Error in final chat completion: {e}")
682
+ return formatted_results
683
+ return formatted_results
684
+ return None
685
+ except Exception as e:
686
+ display_error(f"Error in _achat_completion: {e}")
687
+ return None
@@ -10,10 +10,8 @@ from rich.console import Console
10
10
  from ..main import display_error, TaskOutput, error_logs, client
11
11
  from ..agent.agent import Agent
12
12
  from ..task.task import Task
13
- from ..process.process import Process
14
-
15
- class LoopItems(BaseModel):
16
- items: List[Any]
13
+ from ..process.process import Process, LoopItems
14
+ import asyncio
17
15
 
18
16
  def encode_file_to_base64(file_path: str) -> str:
19
17
  """Base64-encode a file."""
@@ -85,7 +83,8 @@ class PraisonAIAgents:
85
83
  return True
86
84
  return len(agent_output.strip()) > 0
87
85
 
88
- def execute_task(self, task_id):
86
+ async def aexecute_task(self, task_id):
87
+ """Async version of execute_task method"""
89
88
  if task_id not in self.tasks:
90
89
  display_error(f"Error: Task with ID {task_id} does not exist")
91
90
  return
@@ -162,12 +161,12 @@ Expected Output: {task.expected_output}.
162
161
  })
163
162
  return content
164
163
 
165
- agent_output = executor_agent.chat(
164
+ agent_output = await executor_agent.achat(
166
165
  _get_multimodal_message(task_prompt, task.images),
167
166
  tools=task.tools
168
167
  )
169
168
  else:
170
- agent_output = executor_agent.chat(task_prompt, tools=task.tools)
169
+ agent_output = await executor_agent.achat(task_prompt, tools=task.tools)
171
170
 
172
171
  if agent_output:
173
172
  task_output = TaskOutput(
@@ -205,6 +204,83 @@ Expected Output: {task.expected_output}.
205
204
  task.status = "failed"
206
205
  return None
207
206
 
207
+ async def arun_task(self, task_id):
208
+ """Async version of run_task method"""
209
+ if task_id not in self.tasks:
210
+ display_error(f"Error: Task with ID {task_id} does not exist")
211
+ return
212
+ task = self.tasks[task_id]
213
+ if task.status == "completed":
214
+ logging.info(f"Task with ID {task_id} is already completed")
215
+ return
216
+
217
+ retries = 0
218
+ while task.status != "completed" and retries < self.max_retries:
219
+ logging.debug(f"Attempt {retries+1} for task {task_id}")
220
+ if task.status in ["not started", "in progress"]:
221
+ task_output = await self.aexecute_task(task_id)
222
+ if task_output and self.completion_checker(task, task_output.raw):
223
+ task.status = "completed"
224
+ if task.callback:
225
+ await task.execute_callback(task_output)
226
+ self.save_output_to_file(task, task_output)
227
+ if self.verbose >= 1:
228
+ logging.info(f"Task {task_id} completed successfully.")
229
+ else:
230
+ task.status = "in progress"
231
+ if self.verbose >= 1:
232
+ logging.info(f"Task {task_id} not completed, retrying")
233
+ await asyncio.sleep(1)
234
+ retries += 1
235
+ else:
236
+ if task.status == "failed":
237
+ logging.info("Task is failed, resetting to in-progress for another try...")
238
+ task.status = "in progress"
239
+ else:
240
+ logging.info("Invalid Task status")
241
+ break
242
+
243
+ if retries == self.max_retries and task.status != "completed":
244
+ logging.info(f"Task {task_id} failed after {self.max_retries} retries.")
245
+
246
+ async def arun_all_tasks(self):
247
+ """Async version of run_all_tasks method"""
248
+ process = Process(
249
+ tasks=self.tasks,
250
+ agents=self.agents,
251
+ manager_llm=self.manager_llm,
252
+ verbose=self.verbose
253
+ )
254
+
255
+ if self.process == "workflow":
256
+ async for task_id in process.aworkflow():
257
+ if self.tasks[task_id].async_execution:
258
+ await self.arun_task(task_id)
259
+ else:
260
+ self.run_task(task_id)
261
+ elif self.process == "sequential":
262
+ async for task_id in process.asequential():
263
+ if self.tasks[task_id].async_execution:
264
+ await self.arun_task(task_id)
265
+ else:
266
+ self.run_task(task_id)
267
+ elif self.process == "hierarchical":
268
+ async for task_id in process.ahierarchical():
269
+ if isinstance(task_id, Task):
270
+ task_id = self.add_task(task_id)
271
+ if self.tasks[task_id].async_execution:
272
+ await self.arun_task(task_id)
273
+ else:
274
+ self.run_task(task_id)
275
+
276
+ async def astart(self):
277
+ """Async version of start method"""
278
+ await self.arun_all_tasks()
279
+ return {
280
+ "task_status": self.get_all_tasks_status(),
281
+ "task_results": {task_id: self.get_task_result(task_id) for task_id in self.tasks}
282
+ }
283
+
208
284
  def save_output_to_file(self, task, task_output):
209
285
  if task.output_file:
210
286
  try:
@@ -217,7 +293,129 @@ Expected Output: {task.expected_output}.
217
293
  except Exception as e:
218
294
  display_error(f"Error saving task output to file: {e}")
219
295
 
296
+ def execute_task(self, task_id):
297
+ """Synchronous version of execute_task method"""
298
+ if task_id not in self.tasks:
299
+ display_error(f"Error: Task with ID {task_id} does not exist")
300
+ return
301
+ task = self.tasks[task_id]
302
+
303
+ # Only import multimodal dependencies if task has images
304
+ if task.images and task.status == "not started":
305
+ try:
306
+ import cv2
307
+ import base64
308
+ from moviepy import VideoFileClip
309
+ except ImportError as e:
310
+ display_error(f"Error: Missing required dependencies for image/video processing: {e}")
311
+ display_error("Please install with: pip install opencv-python moviepy")
312
+ task.status = "failed"
313
+ return None
314
+
315
+ if task.status == "not started":
316
+ task.status = "in progress"
317
+
318
+ executor_agent = task.agent
319
+
320
+ task_prompt = f"""
321
+ You need to do the following task: {task.description}.
322
+ Expected Output: {task.expected_output}.
323
+ """
324
+ if task.context:
325
+ context_results = ""
326
+ for context_task in task.context:
327
+ if context_task.result:
328
+ context_results += f"Result of previous task {context_task.name if context_task.name else context_task.description}: {context_task.result.raw}\n"
329
+ else:
330
+ context_results += f"Previous task {context_task.name if context_task.name else context_task.description} had no result.\n"
331
+ task_prompt += f"""
332
+ Here are the results of previous tasks that might be useful:\n
333
+ {context_results}
334
+ """
335
+ task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
336
+
337
+ if self.verbose >= 2:
338
+ logging.info(f"Executing task {task_id}: {task.description} using {executor_agent.name}")
339
+ logging.debug(f"Starting execution of task {task_id} with prompt:\n{task_prompt}")
340
+
341
+ if task.images:
342
+ def _get_multimodal_message(text_prompt, images):
343
+ content = [{"type": "text", "text": text_prompt}]
344
+
345
+ for img in images:
346
+ # If local file path for a valid image
347
+ if os.path.exists(img):
348
+ ext = os.path.splitext(img)[1].lower()
349
+ # If it's a .mp4, convert to frames
350
+ if ext == ".mp4":
351
+ frames = process_video(img, seconds_per_frame=1)
352
+ content.append({"type": "text", "text": "These are frames from the video."})
353
+ for f in frames:
354
+ content.append({
355
+ "type": "image_url",
356
+ "image_url": {"url": f"data:image/jpg;base64,{f}"}
357
+ })
358
+ else:
359
+ encoded = encode_file_to_base64(img)
360
+ content.append({
361
+ "type": "image_url",
362
+ "image_url": {
363
+ "url": f"data:image/{ext.lstrip('.')};base64,{encoded}"
364
+ }
365
+ })
366
+ else:
367
+ # Treat as a remote URL
368
+ content.append({
369
+ "type": "image_url",
370
+ "image_url": {"url": img}
371
+ })
372
+ return content
373
+
374
+ agent_output = executor_agent.chat(
375
+ _get_multimodal_message(task_prompt, task.images),
376
+ tools=task.tools
377
+ )
378
+ else:
379
+ agent_output = executor_agent.chat(task_prompt, tools=task.tools)
380
+
381
+ if agent_output:
382
+ task_output = TaskOutput(
383
+ description=task.description,
384
+ summary=task.description[:10],
385
+ raw=agent_output,
386
+ agent=executor_agent.name,
387
+ output_format="RAW"
388
+ )
389
+
390
+ if task.output_json:
391
+ cleaned = self.clean_json_output(agent_output)
392
+ try:
393
+ parsed = json.loads(cleaned)
394
+ task_output.json_dict = parsed
395
+ task_output.output_format = "JSON"
396
+ except:
397
+ logging.warning(f"Warning: Could not parse output of task {task_id} as JSON")
398
+ logging.debug(f"Output that failed JSON parsing: {agent_output}")
399
+
400
+ if task.output_pydantic:
401
+ cleaned = self.clean_json_output(agent_output)
402
+ try:
403
+ parsed = json.loads(cleaned)
404
+ pyd_obj = task.output_pydantic(**parsed)
405
+ task_output.pydantic = pyd_obj
406
+ task_output.output_format = "Pydantic"
407
+ except:
408
+ logging.warning(f"Warning: Could not parse output of task {task_id} as Pydantic Model")
409
+ logging.debug(f"Output that failed Pydantic parsing: {agent_output}")
410
+
411
+ task.result = task_output
412
+ return task_output
413
+ else:
414
+ task.status = "failed"
415
+ return None
416
+
220
417
  def run_task(self, task_id):
418
+ """Synchronous version of run_task method"""
221
419
  if task_id not in self.tasks:
222
420
  display_error(f"Error: Task with ID {task_id} does not exist")
223
421
  return
@@ -256,7 +454,7 @@ Expected Output: {task.expected_output}.
256
454
  logging.info(f"Task {task_id} failed after {self.max_retries} retries.")
257
455
 
258
456
  def run_all_tasks(self):
259
- """Execute tasks based on execution mode"""
457
+ """Synchronous version of run_all_tasks method"""
260
458
  process = Process(
261
459
  tasks=self.tasks,
262
460
  agents=self.agents,