praisonaiagents 0.0.97__py3-none-any.whl → 0.0.99__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,6 +12,7 @@ from .knowledge.knowledge import Knowledge
12
12
  from .knowledge.chunking import Chunking
13
13
  from .mcp.mcp import MCP
14
14
  from .session import Session
15
+ from .guardrails import GuardrailResult, LLMGuardrail
15
16
  from .main import (
16
17
  TaskOutput,
17
18
  ReflectionOutput,
@@ -55,5 +56,7 @@ __all__ = [
55
56
  'async_display_callbacks',
56
57
  'Knowledge',
57
58
  'Chunking',
58
- 'MCP'
59
+ 'MCP',
60
+ 'GuardrailResult',
61
+ 'LLMGuardrail'
59
62
  ]
@@ -3,7 +3,7 @@ import time
3
3
  import json
4
4
  import logging
5
5
  import asyncio
6
- from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING
6
+ from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING, Callable, Tuple
7
7
  from rich.console import Console
8
8
  from rich.live import Live
9
9
  from openai import AsyncOpenAI
@@ -32,6 +32,7 @@ _shared_apps = {} # Dict of port -> FastAPI app
32
32
 
33
33
  if TYPE_CHECKING:
34
34
  from ..task.task import Task
35
+ from ..main import TaskOutput
35
36
 
36
37
  @dataclass
37
38
  class ChatCompletionMessage:
@@ -368,7 +369,9 @@ class Agent:
368
369
  min_reflect: int = 1,
369
370
  reflect_llm: Optional[str] = None,
370
371
  user_id: Optional[str] = None,
371
- reasoning_steps: bool = False
372
+ reasoning_steps: bool = False,
373
+ guardrail: Optional[Union[Callable[['TaskOutput'], Tuple[bool, Any]], str]] = None,
374
+ max_guardrail_retries: int = 3
372
375
  ):
373
376
  # Add check at start if memory is requested
374
377
  if memory is not None:
@@ -483,6 +486,12 @@ Your Goal: {self.goal}
483
486
  # Store user_id
484
487
  self.user_id = user_id or "praison"
485
488
  self.reasoning_steps = reasoning_steps
489
+
490
+ # Initialize guardrail settings
491
+ self.guardrail = guardrail
492
+ self.max_guardrail_retries = max_guardrail_retries
493
+ self._guardrail_fn = None
494
+ self._setup_guardrail()
486
495
 
487
496
  # Check if knowledge parameter has any values
488
497
  if not knowledge:
@@ -512,6 +521,152 @@ Your Goal: {self.goal}
512
521
  except Exception as e:
513
522
  logging.error(f"Error processing knowledge item: {knowledge_item}, error: {e}")
514
523
 
524
+ def _setup_guardrail(self):
525
+ """Setup the guardrail function based on the provided guardrail parameter."""
526
+ if self.guardrail is None:
527
+ self._guardrail_fn = None
528
+ return
529
+
530
+ if callable(self.guardrail):
531
+ # Validate function signature
532
+ sig = inspect.signature(self.guardrail)
533
+ positional_args = [
534
+ param for param in sig.parameters.values()
535
+ if param.default is inspect.Parameter.empty
536
+ ]
537
+ if len(positional_args) != 1:
538
+ raise ValueError("Agent guardrail function must accept exactly one parameter (TaskOutput)")
539
+
540
+ # Check return annotation if present
541
+ from typing import get_args, get_origin
542
+ return_annotation = sig.return_annotation
543
+ if return_annotation != inspect.Signature.empty:
544
+ return_annotation_args = get_args(return_annotation)
545
+ if not (
546
+ get_origin(return_annotation) is tuple
547
+ and len(return_annotation_args) == 2
548
+ and return_annotation_args[0] is bool
549
+ and (
550
+ return_annotation_args[1] is Any
551
+ or return_annotation_args[1] is str
552
+ or str(return_annotation_args[1]).endswith('TaskOutput')
553
+ or str(return_annotation_args[1]).startswith('typing.Union')
554
+ )
555
+ ):
556
+ raise ValueError(
557
+ "If return type is annotated, it must be Tuple[bool, Any] or Tuple[bool, Union[str, TaskOutput]]"
558
+ )
559
+
560
+ self._guardrail_fn = self.guardrail
561
+ elif isinstance(self.guardrail, str):
562
+ # Create LLM-based guardrail
563
+ from ..guardrails import LLMGuardrail
564
+ llm = getattr(self, 'llm', None) or getattr(self, 'llm_instance', None)
565
+ self._guardrail_fn = LLMGuardrail(description=self.guardrail, llm=llm)
566
+ else:
567
+ raise ValueError("Agent guardrail must be either a callable or a string description")
568
+
569
+ def _process_guardrail(self, task_output):
570
+ """Process the guardrail validation for a task output.
571
+
572
+ Args:
573
+ task_output: The task output to validate
574
+
575
+ Returns:
576
+ GuardrailResult: The result of the guardrail validation
577
+ """
578
+ from ..guardrails import GuardrailResult
579
+
580
+ if not self._guardrail_fn:
581
+ return GuardrailResult(success=True, result=task_output)
582
+
583
+ try:
584
+ # Call the guardrail function
585
+ result = self._guardrail_fn(task_output)
586
+
587
+ # Convert the result to a GuardrailResult
588
+ return GuardrailResult.from_tuple(result)
589
+
590
+ except Exception as e:
591
+ logging.error(f"Agent {self.name}: Error in guardrail validation: {e}")
592
+ # On error, return failure
593
+ return GuardrailResult(
594
+ success=False,
595
+ result=None,
596
+ error=f"Agent guardrail validation error: {str(e)}"
597
+ )
598
+
599
+ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None):
600
+ """Apply guardrail validation with retry logic.
601
+
602
+ Args:
603
+ response_text: The response to validate
604
+ prompt: Original prompt for regeneration if needed
605
+ temperature: Temperature for regeneration
606
+ tools: Tools for regeneration
607
+
608
+ Returns:
609
+ str: The validated response text or None if validation fails after retries
610
+ """
611
+ if not self._guardrail_fn:
612
+ return response_text
613
+
614
+ from ..main import TaskOutput
615
+
616
+ retry_count = 0
617
+ current_response = response_text
618
+
619
+ while retry_count <= self.max_guardrail_retries:
620
+ # Create TaskOutput object
621
+ task_output = TaskOutput(
622
+ raw=current_response,
623
+ output=current_response,
624
+ pydantic=None,
625
+ json_dict=None,
626
+ name=f"{self.name}_output",
627
+ description="Agent response output"
628
+ )
629
+
630
+ # Process guardrail
631
+ guardrail_result = self._process_guardrail(task_output)
632
+
633
+ if guardrail_result.success:
634
+ logging.info(f"Agent {self.name}: Guardrail validation passed")
635
+ # Return the potentially modified result
636
+ if guardrail_result.result and hasattr(guardrail_result.result, 'raw'):
637
+ return guardrail_result.result.raw
638
+ elif guardrail_result.result:
639
+ return str(guardrail_result.result)
640
+ else:
641
+ return current_response
642
+
643
+ # Guardrail failed
644
+ if retry_count >= self.max_guardrail_retries:
645
+ raise Exception(
646
+ f"Agent {self.name} response failed guardrail validation after {self.max_guardrail_retries} retries. "
647
+ f"Last error: {guardrail_result.error}"
648
+ )
649
+
650
+ retry_count += 1
651
+ logging.warning(f"Agent {self.name}: Guardrail validation failed (retry {retry_count}/{self.max_guardrail_retries}): {guardrail_result.error}")
652
+
653
+ # Regenerate response for retry
654
+ try:
655
+ retry_prompt = f"{prompt}\n\nNote: Previous response failed validation due to: {guardrail_result.error}. Please provide an improved response."
656
+ response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools)
657
+ if response and response.choices:
658
+ current_response = response.choices[0].message.content.strip()
659
+ else:
660
+ raise Exception("Failed to generate retry response")
661
+ except Exception as e:
662
+ logging.error(f"Agent {self.name}: Error during guardrail retry: {e}")
663
+ # If we can't regenerate, fail the guardrail
664
+ raise Exception(
665
+ f"Agent {self.name} guardrail retry failed: {e}"
666
+ )
667
+
668
+ return current_response
669
+
515
670
  def generate_task(self) -> 'Task':
516
671
  """Generate a Task object from the agent's instructions"""
517
672
  from ..task.task import Task
@@ -788,69 +943,92 @@ Your Goal: {self.goal}
788
943
  )
789
944
  else:
790
945
  # Use the standard OpenAI client approach
791
- if stream:
792
- # Process as streaming response with formatted tools
793
- final_response = self._process_stream_response(
794
- messages,
795
- temperature,
796
- start_time,
797
- formatted_tools=formatted_tools if formatted_tools else None,
798
- reasoning_steps=reasoning_steps
799
- )
800
- else:
801
- # Process as regular non-streaming response
802
- final_response = client.chat.completions.create(
803
- model=self.llm,
804
- messages=messages,
805
- temperature=temperature,
806
- tools=formatted_tools if formatted_tools else None,
807
- stream=False
808
- )
809
-
810
- tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
811
-
812
- if tool_calls:
813
- messages.append({
814
- "role": "assistant",
815
- "content": final_response.choices[0].message.content,
816
- "tool_calls": tool_calls
817
- })
946
+ # Continue tool execution loop until no more tool calls are needed
947
+ max_iterations = 10 # Prevent infinite loops
948
+ iteration_count = 0
949
+
950
+ while iteration_count < max_iterations:
951
+ if stream:
952
+ # Process as streaming response with formatted tools
953
+ final_response = self._process_stream_response(
954
+ messages,
955
+ temperature,
956
+ start_time,
957
+ formatted_tools=formatted_tools if formatted_tools else None,
958
+ reasoning_steps=reasoning_steps
959
+ )
960
+ else:
961
+ # Process as regular non-streaming response
962
+ final_response = client.chat.completions.create(
963
+ model=self.llm,
964
+ messages=messages,
965
+ temperature=temperature,
966
+ tools=formatted_tools if formatted_tools else None,
967
+ stream=False
968
+ )
818
969
 
819
- for tool_call in tool_calls:
820
- function_name = tool_call.function.name
821
- arguments = json.loads(tool_call.function.arguments)
970
+ tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
822
971
 
823
- if self.verbose:
824
- display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
972
+ if tool_calls:
973
+ messages.append({
974
+ "role": "assistant",
975
+ "content": final_response.choices[0].message.content,
976
+ "tool_calls": tool_calls
977
+ })
825
978
 
826
- tool_result = self.execute_tool(function_name, arguments)
827
- results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
979
+ for tool_call in tool_calls:
980
+ function_name = tool_call.function.name
981
+ arguments = json.loads(tool_call.function.arguments)
828
982
 
829
- if self.verbose:
830
- display_tool_call(f"Function '{function_name}' returned: {results_str}")
983
+ if self.verbose:
984
+ display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
831
985
 
832
- messages.append({
833
- "role": "tool",
834
- "tool_call_id": tool_call.id,
835
- "content": results_str
836
- })
986
+ tool_result = self.execute_tool(function_name, arguments)
987
+ results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
837
988
 
838
- # Get final response after tool calls
839
- if stream:
840
- final_response = self._process_stream_response(
841
- messages,
842
- temperature,
843
- start_time,
844
- formatted_tools=formatted_tools if formatted_tools else None,
845
- reasoning_steps=reasoning_steps
846
- )
847
- else:
848
- final_response = client.chat.completions.create(
849
- model=self.llm,
850
- messages=messages,
851
- temperature=temperature,
852
- stream=False
853
- )
989
+ if self.verbose:
990
+ display_tool_call(f"Function '{function_name}' returned: {results_str}")
991
+
992
+ messages.append({
993
+ "role": "tool",
994
+ "tool_call_id": tool_call.id,
995
+ "content": results_str
996
+ })
997
+
998
+ # Check if we should continue (for tools like sequential thinking)
999
+ should_continue = False
1000
+ for tool_call in tool_calls:
1001
+ function_name = tool_call.function.name
1002
+ arguments = json.loads(tool_call.function.arguments)
1003
+
1004
+ # For sequential thinking tool, check if nextThoughtNeeded is True
1005
+ if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
1006
+ should_continue = True
1007
+ break
1008
+
1009
+ if not should_continue:
1010
+ # Get final response after tool calls
1011
+ if stream:
1012
+ final_response = self._process_stream_response(
1013
+ messages,
1014
+ temperature,
1015
+ start_time,
1016
+ formatted_tools=formatted_tools if formatted_tools else None,
1017
+ reasoning_steps=reasoning_steps
1018
+ )
1019
+ else:
1020
+ final_response = client.chat.completions.create(
1021
+ model=self.llm,
1022
+ messages=messages,
1023
+ temperature=temperature,
1024
+ stream=False
1025
+ )
1026
+ break
1027
+
1028
+ iteration_count += 1
1029
+ else:
1030
+ # No tool calls, we're done
1031
+ break
854
1032
 
855
1033
  return final_response
856
1034
 
@@ -944,7 +1122,13 @@ Your Goal: {self.goal}
944
1122
  total_time = time.time() - start_time
945
1123
  logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
946
1124
 
947
- return response_text
1125
+ # Apply guardrail validation for custom LLM response
1126
+ try:
1127
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1128
+ return validated_response
1129
+ except Exception as e:
1130
+ logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
1131
+ return None
948
1132
  except Exception as e:
949
1133
  display_error(f"Error in LLM chat: {e}")
950
1134
  return None
@@ -1032,8 +1216,20 @@ Your Goal: {self.goal}
1032
1216
  display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1033
1217
  # Return only reasoning content if reasoning_steps is True
1034
1218
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1035
- return response.choices[0].message.reasoning_content
1036
- return response_text
1219
+ # Apply guardrail to reasoning content
1220
+ try:
1221
+ validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
1222
+ return validated_reasoning
1223
+ except Exception as e:
1224
+ logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
1225
+ return None
1226
+ # Apply guardrail to regular response
1227
+ try:
1228
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1229
+ return validated_response
1230
+ except Exception as e:
1231
+ logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
1232
+ return None
1037
1233
 
1038
1234
  reflection_prompt = f"""
1039
1235
  Reflect on your previous response: '{response_text}'.
@@ -1066,7 +1262,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1066
1262
  self.chat_history.append({"role": "user", "content": prompt})
1067
1263
  self.chat_history.append({"role": "assistant", "content": response_text})
1068
1264
  display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1069
- return response_text
1265
+ # Apply guardrail validation after satisfactory reflection
1266
+ try:
1267
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1268
+ return validated_response
1269
+ except Exception as e:
1270
+ logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
1271
+ return None
1070
1272
 
1071
1273
  # Check if we've hit max reflections
1072
1274
  if reflection_count >= self.max_reflect - 1:
@@ -1075,7 +1277,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1075
1277
  self.chat_history.append({"role": "user", "content": prompt})
1076
1278
  self.chat_history.append({"role": "assistant", "content": response_text})
1077
1279
  display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1078
- return response_text
1280
+ # Apply guardrail validation after max reflections
1281
+ try:
1282
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1283
+ return validated_response
1284
+ except Exception as e:
1285
+ logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
1286
+ return None
1079
1287
 
1080
1288
  logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
1081
1289
  messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
@@ -1099,8 +1307,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1099
1307
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1100
1308
  total_time = time.time() - start_time
1101
1309
  logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
1102
-
1103
- return response_text
1310
+
1311
+ # Apply guardrail validation before returning
1312
+ try:
1313
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1314
+ return validated_response
1315
+ except Exception as e:
1316
+ logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
1317
+ if self.verbose:
1318
+ display_error(f"Guardrail validation failed: {e}", console=self.console)
1319
+ return None
1104
1320
 
1105
1321
  def clean_json_output(self, output: str) -> str:
1106
1322
  """Clean and extract JSON from response text."""
@@ -1670,6 +1886,7 @@ Your Goal: {self.goal}
1670
1886
  import threading
1671
1887
  import time
1672
1888
  import inspect
1889
+ import asyncio # Import asyncio in the MCP scope
1673
1890
  # logging is already imported at the module level
1674
1891
 
1675
1892
  except ImportError as e:
@@ -413,24 +413,30 @@ class LLM:
413
413
  start_time = time.time()
414
414
  reflection_count = 0
415
415
 
416
- while True:
417
- try:
418
- if verbose:
419
- display_text = prompt
420
- if isinstance(prompt, list):
421
- display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
422
-
423
- if display_text and str(display_text).strip():
424
- display_instruction(
425
- f"Agent {agent_name} is processing prompt: {display_text}",
426
- console=console,
427
- agent_name=agent_name,
428
- agent_role=agent_role,
429
- agent_tools=agent_tools
430
- )
416
+ # Display initial instruction once
417
+ if verbose:
418
+ display_text = prompt
419
+ if isinstance(prompt, list):
420
+ display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
421
+
422
+ if display_text and str(display_text).strip():
423
+ display_instruction(
424
+ f"Agent {agent_name} is processing prompt: {display_text}",
425
+ console=console,
426
+ agent_name=agent_name,
427
+ agent_role=agent_role,
428
+ agent_tools=agent_tools
429
+ )
431
430
 
431
+ # Sequential tool calling loop - similar to agent.py
432
+ max_iterations = 10 # Prevent infinite loops
433
+ iteration_count = 0
434
+ final_response_text = ""
435
+
436
+ while iteration_count < max_iterations:
437
+ try:
432
438
  # Get response from LiteLLM
433
- start_time = time.time()
439
+ current_time = time.time()
434
440
 
435
441
  # If reasoning_steps is True, do a single non-streaming call
436
442
  if reasoning_steps:
@@ -445,6 +451,7 @@ class LLM:
445
451
  )
446
452
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
447
453
  response_text = resp["choices"][0]["message"]["content"]
454
+ final_response = resp
448
455
 
449
456
  # Optionally display reasoning if present
450
457
  if verbose and reasoning_content:
@@ -452,7 +459,7 @@ class LLM:
452
459
  original_prompt,
453
460
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
454
461
  markdown=markdown,
455
- generation_time=time.time() - start_time,
462
+ generation_time=time.time() - current_time,
456
463
  console=console
457
464
  )
458
465
  else:
@@ -460,14 +467,14 @@ class LLM:
460
467
  original_prompt,
461
468
  response_text,
462
469
  markdown=markdown,
463
- generation_time=time.time() - start_time,
470
+ generation_time=time.time() - current_time,
464
471
  console=console
465
472
  )
466
473
 
467
474
  # Otherwise do the existing streaming approach
468
475
  else:
469
476
  if verbose:
470
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
477
+ with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
471
478
  response_text = ""
472
479
  for chunk in litellm.completion(
473
480
  **self._build_completion_params(
@@ -481,7 +488,7 @@ class LLM:
481
488
  if chunk and chunk.choices and chunk.choices[0].delta.content:
482
489
  content = chunk.choices[0].delta.content
483
490
  response_text += content
484
- live.update(display_generating(response_text, start_time))
491
+ live.update(display_generating(response_text, current_time))
485
492
  else:
486
493
  # Non-verbose mode, just collect the response
487
494
  response_text = ""
@@ -499,20 +506,20 @@ class LLM:
499
506
 
500
507
  response_text = response_text.strip()
501
508
 
502
- # Get final completion to check for tool calls
503
- final_response = litellm.completion(
504
- **self._build_completion_params(
505
- messages=messages,
506
- tools=formatted_tools,
507
- temperature=temperature,
508
- stream=False, # No streaming for tool call check
509
- **kwargs
509
+ # Get final completion to check for tool calls
510
+ final_response = litellm.completion(
511
+ **self._build_completion_params(
512
+ messages=messages,
513
+ tools=formatted_tools,
514
+ temperature=temperature,
515
+ stream=False, # No streaming for tool call check
516
+ **kwargs
517
+ )
510
518
  )
511
- )
512
519
 
513
520
  tool_calls = final_response["choices"][0]["message"].get("tool_calls")
514
521
 
515
- # Handle tool calls
522
+ # Handle tool calls - Sequential tool calling logic
516
523
  if tool_calls and execute_tool_fn:
517
524
  # Convert tool_calls to a serializable format for all providers
518
525
  serializable_tool_calls = []
@@ -535,6 +542,7 @@ class LLM:
535
542
  "tool_calls": serializable_tool_calls
536
543
  })
537
544
 
545
+ should_continue = False
538
546
  for tool_call in tool_calls:
539
547
  # Handle both object and dict access patterns
540
548
  if isinstance(tool_call, dict):
@@ -574,6 +582,18 @@ class LLM:
574
582
  "content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
575
583
  })
576
584
 
585
+ # Check if we should continue (for tools like sequential thinking)
586
+ # This mimics the logic from agent.py lines 1004-1007
587
+ if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
588
+ should_continue = True
589
+
590
+ # If we should continue, increment iteration and continue loop
591
+ if should_continue:
592
+ iteration_count += 1
593
+ continue
594
+
595
+ # If we reach here, no more tool calls needed - get final response
596
+ # Make one more call to get the final summary response
577
597
  # Special handling for Ollama models that don't automatically process tool results
578
598
  if self.model and self.model.startswith("ollama/") and tool_result:
579
599
  # For Ollama models, we need to explicitly ask the model to process the tool results
@@ -666,115 +686,141 @@ class LLM:
666
686
  else:
667
687
  # Get response after tool calls with streaming
668
688
  if verbose:
669
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
670
- response_text = ""
689
+ with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
690
+ final_response_text = ""
671
691
  for chunk in litellm.completion(
672
692
  **self._build_completion_params(
673
693
  messages=messages,
694
+ tools=formatted_tools,
674
695
  temperature=temperature,
675
- stream=True
696
+ stream=True,
697
+ **kwargs
676
698
  )
677
699
  ):
678
700
  if chunk and chunk.choices and chunk.choices[0].delta.content:
679
701
  content = chunk.choices[0].delta.content
680
- response_text += content
681
- live.update(display_generating(response_text, start_time))
702
+ final_response_text += content
703
+ live.update(display_generating(final_response_text, current_time))
682
704
  else:
683
- response_text = ""
705
+ final_response_text = ""
684
706
  for chunk in litellm.completion(
685
707
  **self._build_completion_params(
686
708
  messages=messages,
709
+ tools=formatted_tools,
687
710
  temperature=temperature,
688
- stream=True
711
+ stream=True,
712
+ **kwargs
689
713
  )
690
714
  ):
691
715
  if chunk and chunk.choices and chunk.choices[0].delta.content:
692
- response_text += chunk.choices[0].delta.content
693
-
694
- response_text = response_text.strip()
695
-
696
- # Handle output formatting
697
- if output_json or output_pydantic:
698
- self.chat_history.append({"role": "user", "content": original_prompt})
699
- self.chat_history.append({"role": "assistant", "content": response_text})
716
+ final_response_text += chunk.choices[0].delta.content
717
+
718
+ final_response_text = final_response_text.strip()
719
+
720
+ # Display final response
700
721
  if verbose:
701
- display_interaction(original_prompt, response_text, markdown=markdown,
702
- generation_time=time.time() - start_time, console=console)
703
- return response_text
722
+ display_interaction(
723
+ original_prompt,
724
+ final_response_text,
725
+ markdown=markdown,
726
+ generation_time=time.time() - start_time,
727
+ console=console
728
+ )
729
+
730
+ return final_response_text
731
+ else:
732
+ # No tool calls, we're done with this iteration
733
+ break
734
+
735
+ except Exception as e:
736
+ logging.error(f"Error in LLM iteration {iteration_count}: {e}")
737
+ break
738
+
739
+ # End of while loop - return final response
740
+ if final_response_text:
741
+ return final_response_text
742
+
743
+ # No tool calls were made in this iteration, return the response
744
+ if verbose:
745
+ display_interaction(
746
+ original_prompt,
747
+ response_text,
748
+ markdown=markdown,
749
+ generation_time=time.time() - start_time,
750
+ console=console
751
+ )
752
+
753
+ response_text = response_text.strip()
754
+
755
+ # Handle output formatting
756
+ if output_json or output_pydantic:
757
+ self.chat_history.append({"role": "user", "content": original_prompt})
758
+ self.chat_history.append({"role": "assistant", "content": response_text})
759
+ if verbose:
760
+ display_interaction(original_prompt, response_text, markdown=markdown,
761
+ generation_time=time.time() - start_time, console=console)
762
+ return response_text
704
763
 
705
- if not self_reflect:
706
- if verbose:
707
- display_interaction(original_prompt, response_text, markdown=markdown,
708
- generation_time=time.time() - start_time, console=console)
709
- # Return reasoning content if reasoning_steps is True
710
- if reasoning_steps and reasoning_content:
711
- return reasoning_content
712
- return response_text
764
+ if not self_reflect:
765
+ if verbose:
766
+ display_interaction(original_prompt, response_text, markdown=markdown,
767
+ generation_time=time.time() - start_time, console=console)
768
+ # Return reasoning content if reasoning_steps is True
769
+ if reasoning_steps and reasoning_content:
770
+ return reasoning_content
771
+ return response_text
713
772
 
714
- # Handle self-reflection
715
- reflection_prompt = f"""
773
+ # Handle self-reflection loop
774
+ while reflection_count < max_reflect:
775
+ # Handle self-reflection
776
+ reflection_prompt = f"""
716
777
  Reflect on your previous response: '{response_text}'.
717
778
  Identify any flaws, improvements, or actions.
718
779
  Provide a "satisfactory" status ('yes' or 'no').
719
780
  Output MUST be JSON with 'reflection' and 'satisfactory'.
720
- """
721
-
722
- reflection_messages = messages + [
723
- {"role": "assistant", "content": response_text},
724
- {"role": "user", "content": reflection_prompt}
725
- ]
726
-
727
- # If reasoning_steps is True, do a single non-streaming call to capture reasoning
728
- if reasoning_steps:
729
- reflection_resp = litellm.completion(
730
- **self._build_completion_params(
731
- messages=reflection_messages,
732
- temperature=temperature,
733
- stream=False, # Force non-streaming
734
- response_format={"type": "json_object"},
735
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
736
- )
781
+ """
782
+
783
+ reflection_messages = messages + [
784
+ {"role": "assistant", "content": response_text},
785
+ {"role": "user", "content": reflection_prompt}
786
+ ]
787
+
788
+ # If reasoning_steps is True, do a single non-streaming call to capture reasoning
789
+ if reasoning_steps:
790
+ reflection_resp = litellm.completion(
791
+ **self._build_completion_params(
792
+ messages=reflection_messages,
793
+ temperature=temperature,
794
+ stream=False, # Force non-streaming
795
+ response_format={"type": "json_object"},
796
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
737
797
  )
738
- # Grab reflection text and optional reasoning
739
- reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
740
- reflection_text = reflection_resp["choices"][0]["message"]["content"]
741
-
742
- # Optionally display reasoning if present
743
- if verbose and reasoning_content:
744
- display_interaction(
745
- "Reflection reasoning:",
746
- f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
747
- markdown=markdown,
748
- generation_time=time.time() - start_time,
749
- console=console
750
- )
751
- elif verbose:
752
- display_interaction(
753
- "Self-reflection (non-streaming):",
754
- reflection_text,
755
- markdown=markdown,
756
- generation_time=time.time() - start_time,
757
- console=console
758
- )
759
- else:
760
- # Existing streaming approach
761
- if verbose:
762
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
763
- reflection_text = ""
764
- for chunk in litellm.completion(
765
- **self._build_completion_params(
766
- messages=reflection_messages,
767
- temperature=temperature,
768
- stream=True,
769
- response_format={"type": "json_object"},
770
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
771
- )
772
- ):
773
- if chunk and chunk.choices and chunk.choices[0].delta.content:
774
- content = chunk.choices[0].delta.content
775
- reflection_text += content
776
- live.update(display_generating(reflection_text, start_time))
777
- else:
798
+ )
799
+ # Grab reflection text and optional reasoning
800
+ reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
801
+ reflection_text = reflection_resp["choices"][0]["message"]["content"]
802
+
803
+ # Optionally display reasoning if present
804
+ if verbose and reasoning_content:
805
+ display_interaction(
806
+ "Reflection reasoning:",
807
+ f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
808
+ markdown=markdown,
809
+ generation_time=time.time() - start_time,
810
+ console=console
811
+ )
812
+ elif verbose:
813
+ display_interaction(
814
+ "Self-reflection (non-streaming):",
815
+ reflection_text,
816
+ markdown=markdown,
817
+ generation_time=time.time() - start_time,
818
+ console=console
819
+ )
820
+ else:
821
+ # Existing streaming approach
822
+ if verbose:
823
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
778
824
  reflection_text = ""
779
825
  for chunk in litellm.completion(
780
826
  **self._build_completion_params(
@@ -786,48 +832,102 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
786
832
  )
787
833
  ):
788
834
  if chunk and chunk.choices and chunk.choices[0].delta.content:
789
- reflection_text += chunk.choices[0].delta.content
835
+ content = chunk.choices[0].delta.content
836
+ reflection_text += content
837
+ live.update(display_generating(reflection_text, start_time))
838
+ else:
839
+ reflection_text = ""
840
+ for chunk in litellm.completion(
841
+ **self._build_completion_params(
842
+ messages=reflection_messages,
843
+ temperature=temperature,
844
+ stream=True,
845
+ response_format={"type": "json_object"},
846
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
847
+ )
848
+ ):
849
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
850
+ reflection_text += chunk.choices[0].delta.content
790
851
 
791
- try:
792
- reflection_data = json.loads(reflection_text)
793
- satisfactory = reflection_data.get("satisfactory", "no").lower() == "yes"
852
+ try:
853
+ reflection_data = json.loads(reflection_text)
854
+ satisfactory = reflection_data.get("satisfactory", "no").lower() == "yes"
855
+
856
+ if verbose:
857
+ display_self_reflection(
858
+ f"Agent {agent_name} self reflection: reflection='{reflection_data['reflection']}' satisfactory='{reflection_data['satisfactory']}'",
859
+ console=console
860
+ )
794
861
 
862
+ if satisfactory and reflection_count >= min_reflect - 1:
795
863
  if verbose:
796
- display_self_reflection(
797
- f"Agent {agent_name} self reflection: reflection='{reflection_data['reflection']}' satisfactory='{reflection_data['satisfactory']}'",
798
- console=console
799
- )
864
+ display_interaction(prompt, response_text, markdown=markdown,
865
+ generation_time=time.time() - start_time, console=console)
866
+ return response_text
800
867
 
801
- if satisfactory and reflection_count >= min_reflect - 1:
802
- if verbose:
803
- display_interaction(prompt, response_text, markdown=markdown,
804
- generation_time=time.time() - start_time, console=console)
805
- return response_text
868
+ if reflection_count >= max_reflect - 1:
869
+ if verbose:
870
+ display_interaction(prompt, response_text, markdown=markdown,
871
+ generation_time=time.time() - start_time, console=console)
872
+ return response_text
806
873
 
807
- if reflection_count >= max_reflect - 1:
808
- if verbose:
809
- display_interaction(prompt, response_text, markdown=markdown,
810
- generation_time=time.time() - start_time, console=console)
811
- return response_text
812
-
813
- reflection_count += 1
814
- messages.extend([
815
- {"role": "assistant", "content": response_text},
816
- {"role": "user", "content": reflection_prompt},
817
- {"role": "assistant", "content": reflection_text},
818
- {"role": "user", "content": "Now regenerate your response using the reflection you made"}
819
- ])
820
- continue
821
-
822
- except json.JSONDecodeError:
823
- reflection_count += 1
824
- if reflection_count >= max_reflect:
825
- return response_text
826
- continue
874
+ reflection_count += 1
875
+ messages.extend([
876
+ {"role": "assistant", "content": response_text},
877
+ {"role": "user", "content": reflection_prompt},
878
+ {"role": "assistant", "content": reflection_text},
879
+ {"role": "user", "content": "Now regenerate your response using the reflection you made"}
880
+ ])
881
+
882
+ # Get new response after reflection
883
+ if verbose:
884
+ with Live(display_generating("", time.time()), console=console, refresh_per_second=4) as live:
885
+ response_text = ""
886
+ for chunk in litellm.completion(
887
+ **self._build_completion_params(
888
+ messages=messages,
889
+ temperature=temperature,
890
+ stream=True,
891
+ **kwargs
892
+ )
893
+ ):
894
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
895
+ content = chunk.choices[0].delta.content
896
+ response_text += content
897
+ live.update(display_generating(response_text, time.time()))
898
+ else:
899
+ response_text = ""
900
+ for chunk in litellm.completion(
901
+ **self._build_completion_params(
902
+ messages=messages,
903
+ temperature=temperature,
904
+ stream=True,
905
+ **kwargs
906
+ )
907
+ ):
908
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
909
+ response_text += chunk.choices[0].delta.content
910
+
911
+ response_text = response_text.strip()
912
+ continue
827
913
 
914
+ except json.JSONDecodeError:
915
+ reflection_count += 1
916
+ if reflection_count >= max_reflect:
917
+ if verbose:
918
+ display_interaction(prompt, response_text, markdown=markdown,
919
+ generation_time=time.time() - start_time, console=console)
920
+ return response_text
921
+ continue
828
922
  except Exception as e:
829
923
  display_error(f"Error in LLM response: {str(e)}")
830
924
  return None
925
+
926
+ # If we've exhausted reflection attempts
927
+ if verbose:
928
+ display_interaction(prompt, response_text, markdown=markdown,
929
+ generation_time=time.time() - start_time, console=console)
930
+ return response_text
831
931
 
832
932
  except Exception as error:
833
933
  display_error(f"Error in get_response: {str(error)}")
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  import asyncio
3
- from typing import List, Optional, Dict, Any, Type, Callable, Union, Coroutine, Literal
3
+ import inspect
4
+ from typing import List, Optional, Dict, Any, Type, Callable, Union, Coroutine, Literal, Tuple, get_args, get_origin
4
5
  from pydantic import BaseModel
5
6
  from ..main import TaskOutput
6
7
  from ..agent.agent import Agent
@@ -40,7 +41,10 @@ class Task:
40
41
  quality_check=True,
41
42
  input_file: Optional[str] = None,
42
43
  rerun: bool = False, # Renamed from can_rerun and logic inverted, default True for backward compatibility
43
- retain_full_context: bool = False # By default, only use previous task output, not all previous tasks
44
+ retain_full_context: bool = False, # By default, only use previous task output, not all previous tasks
45
+ guardrail: Optional[Union[Callable[[TaskOutput], Tuple[bool, Any]], str]] = None,
46
+ max_retries: int = 3,
47
+ retry_count: int = 0
44
48
  ):
45
49
  # Add check if memory config is provided
46
50
  if memory is not None or (config and config.get('memory_config')):
@@ -80,6 +84,10 @@ class Task:
80
84
  self.quality_check = quality_check
81
85
  self.rerun = rerun # Assigning the rerun parameter
82
86
  self.retain_full_context = retain_full_context
87
+ self.guardrail = guardrail
88
+ self.max_retries = max_retries
89
+ self.retry_count = retry_count
90
+ self._guardrail_fn = None
83
91
 
84
92
  # Set logger level based on config verbose level
85
93
  verbose = self.config.get("verbose", 0)
@@ -141,6 +149,55 @@ class Task:
141
149
 
142
150
  self.output_pydantic = LoopModel
143
151
 
152
+ # Initialize guardrail
153
+ self._setup_guardrail()
154
+
155
+ def _setup_guardrail(self):
156
+ """Setup the guardrail function based on the provided guardrail parameter."""
157
+ if self.guardrail is None:
158
+ self._guardrail_fn = None
159
+ return
160
+
161
+ if callable(self.guardrail):
162
+ # Validate function signature
163
+ sig = inspect.signature(self.guardrail)
164
+ positional_args = [
165
+ param for param in sig.parameters.values()
166
+ if param.default is inspect.Parameter.empty
167
+ ]
168
+ if len(positional_args) != 1:
169
+ raise ValueError("Guardrail function must accept exactly one parameter (TaskOutput)")
170
+
171
+ # Check return annotation if present
172
+ return_annotation = sig.return_annotation
173
+ if return_annotation != inspect.Signature.empty:
174
+ return_annotation_args = get_args(return_annotation)
175
+ if not (
176
+ get_origin(return_annotation) is tuple
177
+ and len(return_annotation_args) == 2
178
+ and return_annotation_args[0] is bool
179
+ and (
180
+ return_annotation_args[1] is Any
181
+ or return_annotation_args[1] is str
182
+ or return_annotation_args[1] is TaskOutput
183
+ or return_annotation_args[1] == Union[str, TaskOutput]
184
+ )
185
+ ):
186
+ raise ValueError(
187
+ "If return type is annotated, it must be Tuple[bool, Any]"
188
+ )
189
+
190
+ self._guardrail_fn = self.guardrail
191
+ elif isinstance(self.guardrail, str):
192
+ # Create LLM-based guardrail
193
+ from ..guardrails import LLMGuardrail
194
+ if not self.agent:
195
+ raise ValueError("Agent is required for string-based guardrails")
196
+ llm = getattr(self.agent, 'llm', None) or getattr(self.agent, 'llm_instance', None)
197
+ self._guardrail_fn = LLMGuardrail(description=self.guardrail, llm=llm)
198
+ else:
199
+ raise ValueError("Guardrail must be either a callable or a string description")
200
+
144
201
  def __str__(self):
145
202
  return f"Task(name='{self.name if self.name else 'None'}', description='{self.description}', agent='{self.agent.name if self.agent else 'None'}', status='{self.status}')"
146
203
 
@@ -187,6 +244,37 @@ class Task:
187
244
  logger.info(f"Task {self.id}: execute_callback called")
188
245
  logger.info(f"Quality check enabled: {self.quality_check}")
189
246
 
247
+ # Process guardrail if configured
248
+ if self._guardrail_fn:
249
+ try:
250
+ guardrail_result = self._process_guardrail(task_output)
251
+ if not guardrail_result.success:
252
+ if self.retry_count >= self.max_retries:
253
+ raise Exception(
254
+ f"Task failed guardrail validation after {self.max_retries} retries. "
255
+ f"Last error: {guardrail_result.error}"
256
+ )
257
+
258
+ self.retry_count += 1
259
+ logger.warning(f"Task {self.id}: Guardrail validation failed (retry {self.retry_count}/{self.max_retries}): {guardrail_result.error}")
260
+ # Note: In a real execution, this would trigger a retry, but since this is a callback
261
+ # the retry logic would need to be handled at the agent/execution level
262
+ return
263
+
264
+ # If guardrail passed and returned a modified result
265
+ if guardrail_result.result is not None:
266
+ if isinstance(guardrail_result.result, str):
267
+ # Update the task output with the modified result
268
+ task_output.raw = guardrail_result.result
269
+ elif isinstance(guardrail_result.result, TaskOutput):
270
+ # Replace with the new task output
271
+ task_output = guardrail_result.result
272
+
273
+ logger.info(f"Task {self.id}: Guardrail validation passed")
274
+ except Exception as e:
275
+ logger.error(f"Task {self.id}: Error in guardrail processing: {e}")
276
+ # Continue execution even if guardrail fails to avoid breaking the task
277
+
190
278
  # Initialize memory if not already initialized
191
279
  if not self.memory:
192
280
  self.memory = self.initialize_memory()
@@ -334,4 +422,34 @@ Context:
334
422
  loop.run_until_complete(self.execute_callback(task_output))
335
423
  except RuntimeError:
336
424
  # If no loop is running in this context
337
- asyncio.run(self.execute_callback(task_output))
425
+ asyncio.run(self.execute_callback(task_output))
426
+
427
+ def _process_guardrail(self, task_output: TaskOutput):
428
+ """Process the guardrail validation for a task output.
429
+
430
+ Args:
431
+ task_output: The task output to validate
432
+
433
+ Returns:
434
+ GuardrailResult: The result of the guardrail validation
435
+ """
436
+ from ..guardrails import GuardrailResult
437
+
438
+ if not self._guardrail_fn:
439
+ return GuardrailResult(success=True, result=task_output)
440
+
441
+ try:
442
+ # Call the guardrail function
443
+ result = self._guardrail_fn(task_output)
444
+
445
+ # Convert the result to a GuardrailResult
446
+ return GuardrailResult.from_tuple(result)
447
+
448
+ except Exception as e:
449
+ logger.error(f"Task {self.id}: Error in guardrail validation: {e}")
450
+ # On error, return failure
451
+ return GuardrailResult(
452
+ success=False,
453
+ result=None,
454
+ error=f"Guardrail validation error: {str(e)}"
455
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.97
3
+ Version: 0.0.99
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1,9 +1,9 @@
1
- praisonaiagents/__init__.py,sha256=jd2-rTrVQRH7fY8gO70UZBvV0Sq6FyijUMokQHJa-q0,1368
1
+ praisonaiagents/__init__.py,sha256=GmTiMNta4iwmfarh_6cTUpry50hpqFE8YqolrYfZ_7U,1465
2
2
  praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9813
3
3
  praisonaiagents/main.py,sha256=_-XE7_Y7ChvtLQMivfNFrrnAhv4wSSDhH9WJMWlkS0w,16315
4
4
  praisonaiagents/session.py,sha256=CI-ffCiOfmgB-1zFFik9daKCB5Sm41Q9ZOaq1-oSLW8,9250
5
5
  praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
6
- praisonaiagents/agent/agent.py,sha256=irU4M5n23LD57hHtB5K6FHXrWgE_p0HeCX6UuIvhMlQ,86753
6
+ praisonaiagents/agent/agent.py,sha256=gWTkPOhNHLosZbeZSWXp9sk4H9AN531EgclHmWeZVXk,97881
7
7
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
8
8
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
9
9
  praisonaiagents/agents/agents.py,sha256=C_yDdJB4XUuwKA9DrysAtAj3zSYT0IKtfCT4Pxo0oyI,63309
@@ -12,7 +12,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
12
12
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
13
13
  praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
14
14
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
15
- praisonaiagents/llm/llm.py,sha256=SzD_qoUqQnC9FpY-d1HHqKQGkIGPR5wEmE1OcqVEPFY,93577
15
+ praisonaiagents/llm/llm.py,sha256=hoIxHzo9aNygeOiw9RtoPhpuSCVTUrKPe3OPvsT5qLc,98212
16
16
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
17
17
  praisonaiagents/mcp/mcp.py,sha256=_gfp8hrSVT9aPqEDDfU8MiCdg0-3dVQpEQUE6AbrJlo,17243
18
18
  praisonaiagents/mcp/mcp_sse.py,sha256=DLh3F_aoVRM1X-7hgIOWOw4FQ1nGmn9YNbQTesykzn4,6792
@@ -20,7 +20,7 @@ praisonaiagents/memory/memory.py,sha256=6tvsLWkpvyNU-t-8d6XpW7vOFUm1pNReW5B7rA8c
20
20
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
21
21
  praisonaiagents/process/process.py,sha256=gxhMXG3s4CzaREyuwE5zxCMx2Wp_b_Wd53tDfkj8Qk8,66567
22
22
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
23
- praisonaiagents/task/task.py,sha256=03Vcz3TaKIYnryFnKAuuQ7Ly5nTaxysFpem6sgn4gJA,15112
23
+ praisonaiagents/task/task.py,sha256=60JE07JPpRowbEO420f4Ol49e_1AK856wSJDi_ricbg,20531
24
24
  praisonaiagents/tools/__init__.py,sha256=Rrgi7_3-yLHpfBB81WUi0-wD_wb_BsukwHVdjDYAF-0,9316
25
25
  praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
26
26
  praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
@@ -42,7 +42,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
42
42
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
43
43
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
44
44
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
45
- praisonaiagents-0.0.97.dist-info/METADATA,sha256=8vBs0ezuT_KyidSMGofmM_YJTr4p9z5dUT-5o4LpohA,1452
46
- praisonaiagents-0.0.97.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
47
- praisonaiagents-0.0.97.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
48
- praisonaiagents-0.0.97.dist-info/RECORD,,
45
+ praisonaiagents-0.0.99.dist-info/METADATA,sha256=nwlbhRlDcuelH3q-qUQS1jaEPb_CeGL849Epd6NonJM,1452
46
+ praisonaiagents-0.0.99.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
47
+ praisonaiagents-0.0.99.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
48
+ praisonaiagents-0.0.99.dist-info/RECORD,,