praisonaiagents 0.0.13__py3-none-any.whl → 0.0.14__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -353,13 +353,28 @@ class Agent:
353
353
  Your Role: {self.role}\n
354
354
  Your Goal: {self.goal}
355
355
  """
356
+ if output_json:
357
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_json.schema_json()}"
356
358
  else:
357
359
  system_prompt = None
358
-
360
+
359
361
  messages = []
360
362
  if system_prompt:
361
363
  messages.append({"role": "system", "content": system_prompt})
362
364
  messages.extend(self.chat_history)
365
+
366
+ # Modify prompt if output_json is specified
367
+ original_prompt = prompt
368
+ if output_json:
369
+ if isinstance(prompt, str):
370
+ prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
371
+ elif isinstance(prompt, list):
372
+ # For multimodal prompts, append to the text content
373
+ for item in prompt:
374
+ if item["type"] == "text":
375
+ item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
376
+ break
377
+
363
378
  if isinstance(prompt, list):
364
379
  # If we receive a multimodal prompt list, place it directly in the user message
365
380
  messages.append({"role": "user", "content": prompt})
@@ -385,13 +400,14 @@ Your Goal: {self.goal}
385
400
  response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None)
386
401
  if not response:
387
402
  return None
388
-
403
+
389
404
  tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
405
+ response_text = response.choices[0].message.content.strip()
390
406
 
391
407
  if tool_calls:
392
408
  messages.append({
393
409
  "role": "assistant",
394
- "content": response.choices[0].message.content,
410
+ "content": response_text,
395
411
  "tool_calls": tool_calls
396
412
  })
397
413
 
@@ -423,15 +439,31 @@ Your Goal: {self.goal}
423
439
  if not response:
424
440
  return None
425
441
  response_text = response.choices[0].message.content.strip()
426
- else:
427
- response_text = response.choices[0].message.content.strip()
442
+
443
+ # Handle output_json if specified
444
+ if output_json:
445
+ try:
446
+ # Clean the response text to get only JSON
447
+ cleaned_json = self.clean_json_output(response_text)
448
+ # Parse into Pydantic model
449
+ parsed_model = output_json.model_validate_json(cleaned_json)
450
+ # Add to chat history and return
451
+ self.chat_history.append({"role": "user", "content": original_prompt})
452
+ self.chat_history.append({"role": "assistant", "content": response_text})
453
+ if self.verbose:
454
+ display_interaction(original_prompt, response_text, markdown=self.markdown,
455
+ generation_time=time.time() - start_time, console=self.console)
456
+ return parsed_model
457
+ except Exception as e:
458
+ display_error(f"Failed to parse response as {output_json.__name__}: {e}")
459
+ return None
428
460
 
429
461
  if not self.self_reflect:
430
- self.chat_history.append({"role": "user", "content": prompt})
462
+ self.chat_history.append({"role": "user", "content": original_prompt})
431
463
  self.chat_history.append({"role": "assistant", "content": response_text})
432
464
  if self.verbose:
433
465
  logging.info(f"Agent {self.name} final response: {response_text}")
434
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
466
+ display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
435
467
  return response_text
436
468
 
437
469
  reflection_prompt = f"""
@@ -492,4 +524,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
492
524
 
493
525
  except Exception as e:
494
526
  display_error(f"Error in chat: {e}", console=self.console)
495
- return None
527
+ return None
528
+
529
+ def clean_json_output(self, output: str) -> str:
530
+ """Clean and extract JSON from response text."""
531
+ cleaned = output.strip()
532
+ # Remove markdown code blocks if present
533
+ if cleaned.startswith("```json"):
534
+ cleaned = cleaned[len("```json"):].strip()
535
+ if cleaned.startswith("```"):
536
+ cleaned = cleaned[len("```"):].strip()
537
+ if cleaned.endswith("```"):
538
+ cleaned = cleaned[:-3].strip()
539
+ return cleaned
@@ -2,7 +2,7 @@ import os
2
2
  import time
3
3
  import json
4
4
  import logging
5
- from typing import Any, Dict, Optional
5
+ from typing import Any, Dict, Optional, List
6
6
  from pydantic import BaseModel
7
7
  from rich.text import Text
8
8
  from rich.panel import Panel
@@ -11,6 +11,9 @@ from ..main import display_error, TaskOutput, error_logs, client
11
11
  from ..agent.agent import Agent
12
12
  from ..task.task import Task
13
13
 
14
+ class LoopItems(BaseModel):
15
+ items: List[Any]
16
+
14
17
  def encode_file_to_base64(file_path: str) -> str:
15
18
  """Base64-encode a file."""
16
19
  import base64
@@ -55,6 +58,7 @@ class PraisonAIAgents:
55
58
  for task in tasks:
56
59
  self.add_task(task)
57
60
  task.status = "not started"
61
+ self._state = {} # Add state storage at PraisonAIAgents level
58
62
 
59
63
  def add_task(self, task):
60
64
  task_id = self.task_id_counter
@@ -251,11 +255,144 @@ Expected Output: {task.expected_output}.
251
255
  logging.info(f"Task {task_id} failed after {self.max_retries} retries.")
252
256
 
253
257
  def run_all_tasks(self):
254
- if self.process == "sequential":
258
+ """Execute tasks based on execution mode"""
259
+ if self.process == "workflow":
260
+ # Build workflow relationships first
261
+ for task in self.tasks.values():
262
+ if task.next_tasks:
263
+ for next_task_name in task.next_tasks:
264
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
265
+ if next_task:
266
+ next_task.previous_tasks.append(task.name)
267
+
268
+ # Find start task
269
+ start_task = None
270
+ for task_id, task in self.tasks.items():
271
+ if task.is_start:
272
+ start_task = task
273
+ break
274
+
275
+ if not start_task:
276
+ start_task = list(self.tasks.values())[0]
277
+ logging.info("No start task marked, using first task")
278
+
279
+ current_task = start_task
280
+ visited_tasks = set()
281
+ loop_data = {} # Store loop-specific data
282
+
283
+ while current_task and current_task.id not in visited_tasks:
284
+ task_id = current_task.id
285
+ logging.info(f"Executing workflow task: {current_task.name if current_task.name else task_id}")
286
+
287
+ # Add context from previous tasks to description
288
+ if current_task.previous_tasks or current_task.context:
289
+ context = "\nInput data from previous tasks:"
290
+
291
+ # Add data from previous tasks in workflow
292
+ for prev_name in current_task.previous_tasks:
293
+ prev_task = next((t for t in self.tasks.values() if t.name == prev_name), None)
294
+ if prev_task and prev_task.result:
295
+ # Handle loop data
296
+ if current_task.task_type == "loop":
297
+ # create a loop manager Agent
298
+ loop_manager = Agent(
299
+ name="Loop Manager",
300
+ role="Loop data processor",
301
+ goal="Process loop data and convert it to list format",
302
+ backstory="Expert at handling loop data and converting it to proper format",
303
+ llm=self.manager_llm,
304
+ verbose=self.verbose,
305
+ markdown=True
306
+ )
307
+
308
+ # get the loop data convert it to list using calling Agent class chat
309
+ loop_prompt = f"""
310
+ Process this data into a list format:
311
+ {prev_task.result.raw}
312
+
313
+ Return a JSON object with an 'items' array containing the items to process.
314
+ """
315
+ loop_data_str = loop_manager.chat(
316
+ prompt=loop_prompt,
317
+ output_json=LoopItems
318
+ )
319
+
320
+ try:
321
+ # The response will already be parsed into LoopItems model
322
+ loop_data[f"loop_{current_task.name}"] = {
323
+ "items": loop_data_str.items,
324
+ "index": 0,
325
+ "remaining": len(loop_data_str.items)
326
+ }
327
+ context += f"\nCurrent loop item: {loop_data_str.items[0]}"
328
+ except Exception as e:
329
+ display_error(f"Failed to process loop data: {e}")
330
+ context += f"\n{prev_name}: {prev_task.result.raw}"
331
+ else:
332
+ context += f"\n{prev_name}: {prev_task.result.raw}"
333
+
334
+ # Add data from context tasks
335
+ if current_task.context:
336
+ for ctx_task in current_task.context:
337
+ if ctx_task.result and ctx_task.name != current_task.name:
338
+ context += f"\n{ctx_task.name}: {ctx_task.result.raw}"
339
+
340
+ # Update task description with context
341
+ current_task.description = current_task.description + context
342
+
343
+ # Execute task using existing run_task method
344
+ self.run_task(task_id)
345
+ visited_tasks.add(task_id)
346
+
347
+ # Handle loop progression
348
+ if current_task.task_type == "loop":
349
+ loop_key = f"loop_{current_task.name}"
350
+ if loop_key in loop_data:
351
+ loop_info = loop_data[loop_key]
352
+ loop_info["index"] += 1
353
+ has_more = loop_info["remaining"] > 0
354
+
355
+ # Update result to trigger correct condition
356
+ if current_task.result:
357
+ result = current_task.result.raw
358
+ if has_more:
359
+ result += "\nmore"
360
+ else:
361
+ result += "\ndone"
362
+ current_task.result.raw = result
363
+
364
+ # Determine next task based on result
365
+ next_task = None
366
+ if current_task.result:
367
+ if current_task.task_type in ["decision", "loop"]:
368
+ result = current_task.result.raw.lower()
369
+ # Check conditions
370
+ for condition, tasks in current_task.condition.items():
371
+ if condition.lower() in result and tasks:
372
+ next_task_name = tasks[0]
373
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
374
+ # For loops, allow revisiting the same task
375
+ if next_task and next_task.id == current_task.id:
376
+ visited_tasks.discard(current_task.id)
377
+ break
378
+
379
+ if not next_task and current_task.next_tasks:
380
+ next_task_name = current_task.next_tasks[0]
381
+ next_task = next((t for t in self.tasks.values() if t.name == next_task_name), None)
382
+
383
+ current_task = next_task
384
+ if not current_task:
385
+ logging.info("Workflow execution completed")
386
+ break
387
+
388
+ elif self.process == "sequential":
389
+ # Keep original sequential execution
255
390
  for task_id in self.tasks:
256
391
  if self.tasks[task_id].status != "completed":
257
392
  self.run_task(task_id)
393
+
258
394
  elif self.process == "hierarchical":
395
+ # Keep original hierarchical execution
259
396
  logging.debug(f"Starting hierarchical task execution with {len(self.tasks)} tasks")
260
397
  manager_agent = Agent(
261
398
  name="Manager",
@@ -397,4 +534,20 @@ Provide a JSON with the structure:
397
534
  return {
398
535
  "task_status": self.get_all_tasks_status(),
399
536
  "task_results": {task_id: self.get_task_result(task_id) for task_id in self.tasks}
400
- }
537
+ }
538
+
539
+ def set_state(self, key: str, value: Any) -> None:
540
+ """Set a state value"""
541
+ self._state[key] = value
542
+
543
+ def get_state(self, key: str, default: Any = None) -> Any:
544
+ """Get a state value"""
545
+ return self._state.get(key, default)
546
+
547
+ def update_state(self, updates: Dict) -> None:
548
+ """Update multiple state values"""
549
+ self._state.update(updates)
550
+
551
+ def clear_state(self) -> None:
552
+ """Clear all state values"""
553
+ self._state.clear()
@@ -1,8 +1,9 @@
1
1
  import logging
2
- from typing import List, Optional, Dict, Any, Type
2
+ from typing import List, Optional, Dict, Any, Type, Callable, Union
3
3
  from pydantic import BaseModel
4
4
  from ..main import TaskOutput
5
5
  from ..agent.agent import Agent
6
+ import uuid
6
7
 
7
8
  class Task:
8
9
  def __init__(
@@ -23,11 +24,17 @@ class Task:
23
24
  result: Optional[TaskOutput] = None,
24
25
  create_directory: Optional[bool] = False,
25
26
  id: Optional[int] = None,
26
- images: Optional[List[str]] = None
27
+ images: Optional[List[str]] = None,
28
+ next_tasks: Optional[List[str]] = None,
29
+ task_type: str = "task",
30
+ condition: Optional[Dict[str, List[str]]] = None,
31
+ is_start: bool = False,
32
+ loop_state: Optional[Dict[str, Union[str, int]]] = None
27
33
  ):
34
+ self.id = str(uuid.uuid4()) if id is None else str(id)
35
+ self.name = name
28
36
  self.description = description
29
37
  self.expected_output = expected_output
30
- self.name = name
31
38
  self.agent = agent
32
39
  self.tools = tools if tools else []
33
40
  self.context = context if context else []
@@ -40,11 +47,18 @@ class Task:
40
47
  self.status = status
41
48
  self.result = result
42
49
  self.create_directory = create_directory
43
- self.id = id
44
50
  self.images = images if images else []
51
+ self.next_tasks = next_tasks if next_tasks else []
52
+ self.task_type = task_type
53
+ self.condition = condition if condition else {}
54
+ self.is_start = is_start
55
+ self.loop_state = loop_state if loop_state else {}
45
56
 
46
57
  if self.output_json and self.output_pydantic:
47
58
  raise ValueError("Only one output type can be defined")
48
59
 
60
+ # Track previous tasks based on next_tasks relationships
61
+ self.previous_tasks = []
62
+
49
63
  def __str__(self):
50
64
  return f"Task(name='{self.name if self.name else 'None'}', description='{self.description}', agent='{self.agent.name if self.agent else 'None'}', status='{self.status}')"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: praisonaiagents
3
- Version: 0.0.13
3
+ Version: 0.0.14
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,9 +1,9 @@
1
1
  praisonaiagents/__init__.py,sha256=gI8vEabBTRPsE_E8GA5sBMi4sTtJI-YokPrH2Nor-k0,741
2
2
  praisonaiagents/main.py,sha256=K2OxVKPmo4dNJbSWIsXDi_hm9CRx5O4km_74UGcszhk,5744
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=QjzduZ97-gqFiU6O89EIL0fyTAzTa7RwCoLl29S6-7w,21773
4
+ praisonaiagents/agent/agent.py,sha256=zTYcDpJ5DzzBnefwLvhrtBlGQoRI4ZZAioDu5nKTPSs,24042
5
5
  praisonaiagents/agents/__init__.py,sha256=7RDeQNSqZg5uBjD4M_0p_F6YgfWuDuxPFydPU50kDYc,120
6
- praisonaiagents/agents/agents.py,sha256=X02q695IeScdZKQQI9xNpaGAPeaLxSC26iIFlI5_E0g,16941
6
+ praisonaiagents/agents/agents.py,sha256=ITvH8Yq_OzhyMC_Aid4qlqQbEM9cCfp7SayXg0ASJ5k,24526
7
7
  praisonaiagents/build/lib/praisonaiagents/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
8
8
  praisonaiagents/build/lib/praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
9
9
  praisonaiagents/build/lib/praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
@@ -13,8 +13,8 @@ praisonaiagents/build/lib/praisonaiagents/agents/agents.py,sha256=P2FAtlfD3kPib5
13
13
  praisonaiagents/build/lib/praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
14
14
  praisonaiagents/build/lib/praisonaiagents/task/task.py,sha256=4Y1qX8OeEFcid2yhAiPYylvHpuDmWORsyNL16_BiVvI,1831
15
15
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
16
- praisonaiagents/task/task.py,sha256=-0GZ8FDo9Sq2Lkwz25Utliuq50FcexwhnZNuQtA3NLw,1922
17
- praisonaiagents-0.0.13.dist-info/METADATA,sha256=ErtmhU_c6J52-LFUOvMPsBwQztVGkF529xla0L2YWz4,233
18
- praisonaiagents-0.0.13.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
19
- praisonaiagents-0.0.13.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
20
- praisonaiagents-0.0.13.dist-info/RECORD,,
16
+ praisonaiagents/task/task.py,sha256=oMC5Zz1dMj0Ceice69aBS1KQQXMLqphc8wNOQ9zcu0Q,2570
17
+ praisonaiagents-0.0.14.dist-info/METADATA,sha256=-pdlX7m7Sr2IovIrRt9QyBfkiwgK81rEd3_VklcmHNs,233
18
+ praisonaiagents-0.0.14.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
19
+ praisonaiagents-0.0.14.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
20
+ praisonaiagents-0.0.14.dist-info/RECORD,,