praisonaiagents 0.0.98__py3-none-any.whl → 0.0.100__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +199 -9
- praisonaiagents/guardrails/__init__.py +11 -0
- praisonaiagents/guardrails/guardrail_result.py +43 -0
- praisonaiagents/guardrails/llm_guardrail.py +88 -0
- praisonaiagents/llm/llm.py +248 -148
- praisonaiagents/memory/__init__.py +15 -0
- praisonaiagents/memory/memory.py +7 -4
- praisonaiagents/task/task.py +5 -1
- praisonaiagents/tools/searxng_tools.py +94 -0
- {praisonaiagents-0.0.98.dist-info → praisonaiagents-0.0.100.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.98.dist-info → praisonaiagents-0.0.100.dist-info}/RECORD +13 -8
- {praisonaiagents-0.0.98.dist-info → praisonaiagents-0.0.100.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.98.dist-info → praisonaiagents-0.0.100.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -3,7 +3,7 @@ import time
|
|
3
3
|
import json
|
4
4
|
import logging
|
5
5
|
import asyncio
|
6
|
-
from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING
|
6
|
+
from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING, Callable, Tuple
|
7
7
|
from rich.console import Console
|
8
8
|
from rich.live import Live
|
9
9
|
from openai import AsyncOpenAI
|
@@ -32,6 +32,7 @@ _shared_apps = {} # Dict of port -> FastAPI app
|
|
32
32
|
|
33
33
|
if TYPE_CHECKING:
|
34
34
|
from ..task.task import Task
|
35
|
+
from ..main import TaskOutput
|
35
36
|
|
36
37
|
@dataclass
|
37
38
|
class ChatCompletionMessage:
|
@@ -368,7 +369,9 @@ class Agent:
|
|
368
369
|
min_reflect: int = 1,
|
369
370
|
reflect_llm: Optional[str] = None,
|
370
371
|
user_id: Optional[str] = None,
|
371
|
-
reasoning_steps: bool = False
|
372
|
+
reasoning_steps: bool = False,
|
373
|
+
guardrail: Optional[Union[Callable[['TaskOutput'], Tuple[bool, Any]], str]] = None,
|
374
|
+
max_guardrail_retries: int = 3
|
372
375
|
):
|
373
376
|
# Add check at start if memory is requested
|
374
377
|
if memory is not None:
|
@@ -483,6 +486,12 @@ Your Goal: {self.goal}
|
|
483
486
|
# Store user_id
|
484
487
|
self.user_id = user_id or "praison"
|
485
488
|
self.reasoning_steps = reasoning_steps
|
489
|
+
|
490
|
+
# Initialize guardrail settings
|
491
|
+
self.guardrail = guardrail
|
492
|
+
self.max_guardrail_retries = max_guardrail_retries
|
493
|
+
self._guardrail_fn = None
|
494
|
+
self._setup_guardrail()
|
486
495
|
|
487
496
|
# Check if knowledge parameter has any values
|
488
497
|
if not knowledge:
|
@@ -512,6 +521,149 @@ Your Goal: {self.goal}
|
|
512
521
|
except Exception as e:
|
513
522
|
logging.error(f"Error processing knowledge item: {knowledge_item}, error: {e}")
|
514
523
|
|
524
|
+
def _setup_guardrail(self):
|
525
|
+
"""Setup the guardrail function based on the provided guardrail parameter."""
|
526
|
+
if self.guardrail is None:
|
527
|
+
self._guardrail_fn = None
|
528
|
+
return
|
529
|
+
|
530
|
+
if callable(self.guardrail):
|
531
|
+
# Validate function signature
|
532
|
+
sig = inspect.signature(self.guardrail)
|
533
|
+
positional_args = [
|
534
|
+
param for param in sig.parameters.values()
|
535
|
+
if param.default is inspect.Parameter.empty
|
536
|
+
]
|
537
|
+
if len(positional_args) != 1:
|
538
|
+
raise ValueError("Agent guardrail function must accept exactly one parameter (TaskOutput)")
|
539
|
+
|
540
|
+
# Check return annotation if present
|
541
|
+
from typing import get_args, get_origin
|
542
|
+
return_annotation = sig.return_annotation
|
543
|
+
if return_annotation != inspect.Signature.empty:
|
544
|
+
return_annotation_args = get_args(return_annotation)
|
545
|
+
if not (
|
546
|
+
get_origin(return_annotation) is tuple
|
547
|
+
and len(return_annotation_args) == 2
|
548
|
+
and return_annotation_args[0] is bool
|
549
|
+
and (
|
550
|
+
return_annotation_args[1] is Any
|
551
|
+
or return_annotation_args[1] is str
|
552
|
+
or str(return_annotation_args[1]).endswith('TaskOutput')
|
553
|
+
or str(return_annotation_args[1]).startswith('typing.Union')
|
554
|
+
)
|
555
|
+
):
|
556
|
+
raise ValueError(
|
557
|
+
"If return type is annotated, it must be Tuple[bool, Any] or Tuple[bool, Union[str, TaskOutput]]"
|
558
|
+
)
|
559
|
+
|
560
|
+
self._guardrail_fn = self.guardrail
|
561
|
+
elif isinstance(self.guardrail, str):
|
562
|
+
# Create LLM-based guardrail
|
563
|
+
from ..guardrails import LLMGuardrail
|
564
|
+
llm = getattr(self, 'llm', None) or getattr(self, 'llm_instance', None)
|
565
|
+
self._guardrail_fn = LLMGuardrail(description=self.guardrail, llm=llm)
|
566
|
+
else:
|
567
|
+
raise ValueError("Agent guardrail must be either a callable or a string description")
|
568
|
+
|
569
|
+
def _process_guardrail(self, task_output):
|
570
|
+
"""Process the guardrail validation for a task output.
|
571
|
+
|
572
|
+
Args:
|
573
|
+
task_output: The task output to validate
|
574
|
+
|
575
|
+
Returns:
|
576
|
+
GuardrailResult: The result of the guardrail validation
|
577
|
+
"""
|
578
|
+
from ..guardrails import GuardrailResult
|
579
|
+
|
580
|
+
if not self._guardrail_fn:
|
581
|
+
return GuardrailResult(success=True, result=task_output)
|
582
|
+
|
583
|
+
try:
|
584
|
+
# Call the guardrail function
|
585
|
+
result = self._guardrail_fn(task_output)
|
586
|
+
|
587
|
+
# Convert the result to a GuardrailResult
|
588
|
+
return GuardrailResult.from_tuple(result)
|
589
|
+
|
590
|
+
except Exception as e:
|
591
|
+
logging.error(f"Agent {self.name}: Error in guardrail validation: {e}")
|
592
|
+
# On error, return failure
|
593
|
+
return GuardrailResult(
|
594
|
+
success=False,
|
595
|
+
result=None,
|
596
|
+
error=f"Agent guardrail validation error: {str(e)}"
|
597
|
+
)
|
598
|
+
|
599
|
+
def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None):
|
600
|
+
"""Apply guardrail validation with retry logic.
|
601
|
+
|
602
|
+
Args:
|
603
|
+
response_text: The response to validate
|
604
|
+
prompt: Original prompt for regeneration if needed
|
605
|
+
temperature: Temperature for regeneration
|
606
|
+
tools: Tools for regeneration
|
607
|
+
|
608
|
+
Returns:
|
609
|
+
str: The validated response text or None if validation fails after retries
|
610
|
+
"""
|
611
|
+
if not self._guardrail_fn:
|
612
|
+
return response_text
|
613
|
+
|
614
|
+
from ..main import TaskOutput
|
615
|
+
|
616
|
+
retry_count = 0
|
617
|
+
current_response = response_text
|
618
|
+
|
619
|
+
while retry_count <= self.max_guardrail_retries:
|
620
|
+
# Create TaskOutput object
|
621
|
+
task_output = TaskOutput(
|
622
|
+
description="Agent response output",
|
623
|
+
raw=current_response,
|
624
|
+
agent=self.name
|
625
|
+
)
|
626
|
+
|
627
|
+
# Process guardrail
|
628
|
+
guardrail_result = self._process_guardrail(task_output)
|
629
|
+
|
630
|
+
if guardrail_result.success:
|
631
|
+
logging.info(f"Agent {self.name}: Guardrail validation passed")
|
632
|
+
# Return the potentially modified result
|
633
|
+
if guardrail_result.result and hasattr(guardrail_result.result, 'raw'):
|
634
|
+
return guardrail_result.result.raw
|
635
|
+
elif guardrail_result.result:
|
636
|
+
return str(guardrail_result.result)
|
637
|
+
else:
|
638
|
+
return current_response
|
639
|
+
|
640
|
+
# Guardrail failed
|
641
|
+
if retry_count >= self.max_guardrail_retries:
|
642
|
+
raise Exception(
|
643
|
+
f"Agent {self.name} response failed guardrail validation after {self.max_guardrail_retries} retries. "
|
644
|
+
f"Last error: {guardrail_result.error}"
|
645
|
+
)
|
646
|
+
|
647
|
+
retry_count += 1
|
648
|
+
logging.warning(f"Agent {self.name}: Guardrail validation failed (retry {retry_count}/{self.max_guardrail_retries}): {guardrail_result.error}")
|
649
|
+
|
650
|
+
# Regenerate response for retry
|
651
|
+
try:
|
652
|
+
retry_prompt = f"{prompt}\n\nNote: Previous response failed validation due to: {guardrail_result.error}. Please provide an improved response."
|
653
|
+
response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools)
|
654
|
+
if response and response.choices:
|
655
|
+
current_response = response.choices[0].message.content.strip()
|
656
|
+
else:
|
657
|
+
raise Exception("Failed to generate retry response")
|
658
|
+
except Exception as e:
|
659
|
+
logging.error(f"Agent {self.name}: Error during guardrail retry: {e}")
|
660
|
+
# If we can't regenerate, fail the guardrail
|
661
|
+
raise Exception(
|
662
|
+
f"Agent {self.name} guardrail retry failed: {e}"
|
663
|
+
)
|
664
|
+
|
665
|
+
return current_response
|
666
|
+
|
515
667
|
def generate_task(self) -> 'Task':
|
516
668
|
"""Generate a Task object from the agent's instructions"""
|
517
669
|
from ..task.task import Task
|
@@ -967,7 +1119,13 @@ Your Goal: {self.goal}
|
|
967
1119
|
total_time = time.time() - start_time
|
968
1120
|
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
|
969
1121
|
|
970
|
-
|
1122
|
+
# Apply guardrail validation for custom LLM response
|
1123
|
+
try:
|
1124
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1125
|
+
return validated_response
|
1126
|
+
except Exception as e:
|
1127
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
|
1128
|
+
return None
|
971
1129
|
except Exception as e:
|
972
1130
|
display_error(f"Error in LLM chat: {e}")
|
973
1131
|
return None
|
@@ -1055,8 +1213,20 @@ Your Goal: {self.goal}
|
|
1055
1213
|
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1056
1214
|
# Return only reasoning content if reasoning_steps is True
|
1057
1215
|
if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
|
1058
|
-
|
1059
|
-
|
1216
|
+
# Apply guardrail to reasoning content
|
1217
|
+
try:
|
1218
|
+
validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
|
1219
|
+
return validated_reasoning
|
1220
|
+
except Exception as e:
|
1221
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
|
1222
|
+
return None
|
1223
|
+
# Apply guardrail to regular response
|
1224
|
+
try:
|
1225
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1226
|
+
return validated_response
|
1227
|
+
except Exception as e:
|
1228
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
|
1229
|
+
return None
|
1060
1230
|
|
1061
1231
|
reflection_prompt = f"""
|
1062
1232
|
Reflect on your previous response: '{response_text}'.
|
@@ -1089,7 +1259,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1089
1259
|
self.chat_history.append({"role": "user", "content": prompt})
|
1090
1260
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1091
1261
|
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1092
|
-
|
1262
|
+
# Apply guardrail validation after satisfactory reflection
|
1263
|
+
try:
|
1264
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1265
|
+
return validated_response
|
1266
|
+
except Exception as e:
|
1267
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
|
1268
|
+
return None
|
1093
1269
|
|
1094
1270
|
# Check if we've hit max reflections
|
1095
1271
|
if reflection_count >= self.max_reflect - 1:
|
@@ -1098,7 +1274,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1098
1274
|
self.chat_history.append({"role": "user", "content": prompt})
|
1099
1275
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1100
1276
|
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1101
|
-
|
1277
|
+
# Apply guardrail validation after max reflections
|
1278
|
+
try:
|
1279
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1280
|
+
return validated_response
|
1281
|
+
except Exception as e:
|
1282
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
|
1283
|
+
return None
|
1102
1284
|
|
1103
1285
|
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
|
1104
1286
|
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
|
@@ -1122,8 +1304,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1122
1304
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1123
1305
|
total_time = time.time() - start_time
|
1124
1306
|
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
|
1125
|
-
|
1126
|
-
|
1307
|
+
|
1308
|
+
# Apply guardrail validation before returning
|
1309
|
+
try:
|
1310
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1311
|
+
return validated_response
|
1312
|
+
except Exception as e:
|
1313
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
|
1314
|
+
if self.verbose:
|
1315
|
+
display_error(f"Guardrail validation failed: {e}", console=self.console)
|
1316
|
+
return None
|
1127
1317
|
|
1128
1318
|
def clean_json_output(self, output: str) -> str:
|
1129
1319
|
"""Clean and extract JSON from response text."""
|
@@ -0,0 +1,11 @@
|
|
1
|
+
"""
|
2
|
+
Guardrails module for PraisonAI Agents.
|
3
|
+
|
4
|
+
This module provides validation and safety mechanisms for task outputs,
|
5
|
+
including both function-based and LLM-based guardrails.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from .guardrail_result import GuardrailResult
|
9
|
+
from .llm_guardrail import LLMGuardrail
|
10
|
+
|
11
|
+
__all__ = ["GuardrailResult", "LLMGuardrail"]
|
@@ -0,0 +1,43 @@
|
|
1
|
+
"""
|
2
|
+
Guardrail result classes for PraisonAI Agents.
|
3
|
+
|
4
|
+
This module provides the result types for guardrail validation,
|
5
|
+
following the same pattern as CrewAI for consistency.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from typing import Any, Tuple, Union
|
9
|
+
from pydantic import BaseModel, Field
|
10
|
+
from ..main import TaskOutput
|
11
|
+
|
12
|
+
|
13
|
+
class GuardrailResult(BaseModel):
|
14
|
+
"""Result of a guardrail validation."""
|
15
|
+
|
16
|
+
success: bool = Field(description="Whether the guardrail check passed")
|
17
|
+
result: Union[str, TaskOutput, None] = Field(description="The result if modified, or None if unchanged")
|
18
|
+
error: str = Field(default="", description="Error message if validation failed")
|
19
|
+
|
20
|
+
@classmethod
|
21
|
+
def from_tuple(cls, result: Tuple[bool, Any]) -> "GuardrailResult":
|
22
|
+
"""Create a GuardrailResult from a tuple returned by a guardrail function.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
result: Tuple of (success, result_or_error)
|
26
|
+
|
27
|
+
Returns:
|
28
|
+
GuardrailResult: The structured result
|
29
|
+
"""
|
30
|
+
success, data = result
|
31
|
+
|
32
|
+
if success:
|
33
|
+
return cls(
|
34
|
+
success=True,
|
35
|
+
result=data,
|
36
|
+
error=""
|
37
|
+
)
|
38
|
+
else:
|
39
|
+
return cls(
|
40
|
+
success=False,
|
41
|
+
result=None,
|
42
|
+
error=str(data) if data else "Guardrail validation failed"
|
43
|
+
)
|
@@ -0,0 +1,88 @@
|
|
1
|
+
"""
|
2
|
+
LLM-based guardrail implementation for PraisonAI Agents.
|
3
|
+
|
4
|
+
This module provides LLM-powered guardrails that can validate task outputs
|
5
|
+
using natural language descriptions, similar to CrewAI's implementation.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import logging
|
9
|
+
from typing import Any, Tuple, Union, Optional
|
10
|
+
from pydantic import BaseModel
|
11
|
+
from ..main import TaskOutput
|
12
|
+
|
13
|
+
|
14
|
+
class LLMGuardrail:
|
15
|
+
"""An LLM-powered guardrail that validates task outputs using natural language."""
|
16
|
+
|
17
|
+
def __init__(self, description: str, llm: Any = None):
|
18
|
+
"""Initialize the LLM guardrail.
|
19
|
+
|
20
|
+
Args:
|
21
|
+
description: Natural language description of what to validate
|
22
|
+
llm: The LLM instance to use for validation
|
23
|
+
"""
|
24
|
+
self.description = description
|
25
|
+
self.llm = llm
|
26
|
+
self.logger = logging.getLogger(__name__)
|
27
|
+
|
28
|
+
def __call__(self, task_output: TaskOutput) -> Tuple[bool, Union[str, TaskOutput]]:
|
29
|
+
"""Validate the task output using the LLM.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
task_output: The task output to validate
|
33
|
+
|
34
|
+
Returns:
|
35
|
+
Tuple of (success, result) where result is the output or error message
|
36
|
+
"""
|
37
|
+
try:
|
38
|
+
if not self.llm:
|
39
|
+
self.logger.warning("No LLM provided for guardrail validation")
|
40
|
+
return True, task_output
|
41
|
+
|
42
|
+
# Create validation prompt
|
43
|
+
validation_prompt = f"""
|
44
|
+
You are a quality assurance validator. Your task is to evaluate the following output against specific criteria.
|
45
|
+
|
46
|
+
Validation Criteria: {self.description}
|
47
|
+
|
48
|
+
Output to Validate:
|
49
|
+
{task_output.raw}
|
50
|
+
|
51
|
+
Please evaluate if this output meets the criteria. Respond with:
|
52
|
+
1. "PASS" if the output meets all criteria
|
53
|
+
2. "FAIL: [specific reason]" if the output does not meet criteria
|
54
|
+
|
55
|
+
Your response:"""
|
56
|
+
|
57
|
+
# Get LLM response
|
58
|
+
if hasattr(self.llm, 'chat'):
|
59
|
+
# For Agent's LLM interface
|
60
|
+
response = self.llm.chat(validation_prompt, temperature=0.1)
|
61
|
+
elif hasattr(self.llm, 'get_response'):
|
62
|
+
# For custom LLM instances
|
63
|
+
response = self.llm.get_response(validation_prompt, temperature=0.1)
|
64
|
+
elif callable(self.llm):
|
65
|
+
# For simple callable LLMs
|
66
|
+
response = self.llm(validation_prompt)
|
67
|
+
else:
|
68
|
+
self.logger.error(f"Unsupported LLM type: {type(self.llm)}")
|
69
|
+
return True, task_output
|
70
|
+
|
71
|
+
# Parse response
|
72
|
+
response = str(response).strip()
|
73
|
+
|
74
|
+
if response.upper().startswith("PASS"):
|
75
|
+
return True, task_output
|
76
|
+
elif response.upper().startswith("FAIL"):
|
77
|
+
# Extract the reason
|
78
|
+
reason = response[5:].strip(": ")
|
79
|
+
return False, f"Guardrail validation failed: {reason}"
|
80
|
+
else:
|
81
|
+
# Unclear response, log and pass through
|
82
|
+
self.logger.warning(f"Unclear guardrail response: {response}")
|
83
|
+
return True, task_output
|
84
|
+
|
85
|
+
except Exception as e:
|
86
|
+
self.logger.error(f"Error in LLM guardrail validation: {str(e)}")
|
87
|
+
# On error, pass through the original output
|
88
|
+
return True, task_output
|
praisonaiagents/llm/llm.py
CHANGED
@@ -413,24 +413,30 @@ class LLM:
|
|
413
413
|
start_time = time.time()
|
414
414
|
reflection_count = 0
|
415
415
|
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
)
|
416
|
+
# Display initial instruction once
|
417
|
+
if verbose:
|
418
|
+
display_text = prompt
|
419
|
+
if isinstance(prompt, list):
|
420
|
+
display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
|
421
|
+
|
422
|
+
if display_text and str(display_text).strip():
|
423
|
+
display_instruction(
|
424
|
+
f"Agent {agent_name} is processing prompt: {display_text}",
|
425
|
+
console=console,
|
426
|
+
agent_name=agent_name,
|
427
|
+
agent_role=agent_role,
|
428
|
+
agent_tools=agent_tools
|
429
|
+
)
|
431
430
|
|
431
|
+
# Sequential tool calling loop - similar to agent.py
|
432
|
+
max_iterations = 10 # Prevent infinite loops
|
433
|
+
iteration_count = 0
|
434
|
+
final_response_text = ""
|
435
|
+
|
436
|
+
while iteration_count < max_iterations:
|
437
|
+
try:
|
432
438
|
# Get response from LiteLLM
|
433
|
-
|
439
|
+
current_time = time.time()
|
434
440
|
|
435
441
|
# If reasoning_steps is True, do a single non-streaming call
|
436
442
|
if reasoning_steps:
|
@@ -445,6 +451,7 @@ class LLM:
|
|
445
451
|
)
|
446
452
|
reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
447
453
|
response_text = resp["choices"][0]["message"]["content"]
|
454
|
+
final_response = resp
|
448
455
|
|
449
456
|
# Optionally display reasoning if present
|
450
457
|
if verbose and reasoning_content:
|
@@ -452,7 +459,7 @@ class LLM:
|
|
452
459
|
original_prompt,
|
453
460
|
f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
|
454
461
|
markdown=markdown,
|
455
|
-
generation_time=time.time() -
|
462
|
+
generation_time=time.time() - current_time,
|
456
463
|
console=console
|
457
464
|
)
|
458
465
|
else:
|
@@ -460,14 +467,14 @@ class LLM:
|
|
460
467
|
original_prompt,
|
461
468
|
response_text,
|
462
469
|
markdown=markdown,
|
463
|
-
generation_time=time.time() -
|
470
|
+
generation_time=time.time() - current_time,
|
464
471
|
console=console
|
465
472
|
)
|
466
473
|
|
467
474
|
# Otherwise do the existing streaming approach
|
468
475
|
else:
|
469
476
|
if verbose:
|
470
|
-
with Live(display_generating("",
|
477
|
+
with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
|
471
478
|
response_text = ""
|
472
479
|
for chunk in litellm.completion(
|
473
480
|
**self._build_completion_params(
|
@@ -481,7 +488,7 @@ class LLM:
|
|
481
488
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
482
489
|
content = chunk.choices[0].delta.content
|
483
490
|
response_text += content
|
484
|
-
live.update(display_generating(response_text,
|
491
|
+
live.update(display_generating(response_text, current_time))
|
485
492
|
else:
|
486
493
|
# Non-verbose mode, just collect the response
|
487
494
|
response_text = ""
|
@@ -499,20 +506,20 @@ class LLM:
|
|
499
506
|
|
500
507
|
response_text = response_text.strip()
|
501
508
|
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
509
|
+
# Get final completion to check for tool calls
|
510
|
+
final_response = litellm.completion(
|
511
|
+
**self._build_completion_params(
|
512
|
+
messages=messages,
|
513
|
+
tools=formatted_tools,
|
514
|
+
temperature=temperature,
|
515
|
+
stream=False, # No streaming for tool call check
|
516
|
+
**kwargs
|
517
|
+
)
|
510
518
|
)
|
511
|
-
)
|
512
519
|
|
513
520
|
tool_calls = final_response["choices"][0]["message"].get("tool_calls")
|
514
521
|
|
515
|
-
# Handle tool calls
|
522
|
+
# Handle tool calls - Sequential tool calling logic
|
516
523
|
if tool_calls and execute_tool_fn:
|
517
524
|
# Convert tool_calls to a serializable format for all providers
|
518
525
|
serializable_tool_calls = []
|
@@ -535,6 +542,7 @@ class LLM:
|
|
535
542
|
"tool_calls": serializable_tool_calls
|
536
543
|
})
|
537
544
|
|
545
|
+
should_continue = False
|
538
546
|
for tool_call in tool_calls:
|
539
547
|
# Handle both object and dict access patterns
|
540
548
|
if isinstance(tool_call, dict):
|
@@ -574,6 +582,18 @@ class LLM:
|
|
574
582
|
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
|
575
583
|
})
|
576
584
|
|
585
|
+
# Check if we should continue (for tools like sequential thinking)
|
586
|
+
# This mimics the logic from agent.py lines 1004-1007
|
587
|
+
if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
|
588
|
+
should_continue = True
|
589
|
+
|
590
|
+
# If we should continue, increment iteration and continue loop
|
591
|
+
if should_continue:
|
592
|
+
iteration_count += 1
|
593
|
+
continue
|
594
|
+
|
595
|
+
# If we reach here, no more tool calls needed - get final response
|
596
|
+
# Make one more call to get the final summary response
|
577
597
|
# Special handling for Ollama models that don't automatically process tool results
|
578
598
|
if self.model and self.model.startswith("ollama/") and tool_result:
|
579
599
|
# For Ollama models, we need to explicitly ask the model to process the tool results
|
@@ -666,115 +686,141 @@ class LLM:
|
|
666
686
|
else:
|
667
687
|
# Get response after tool calls with streaming
|
668
688
|
if verbose:
|
669
|
-
with Live(display_generating("",
|
670
|
-
|
689
|
+
with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
|
690
|
+
final_response_text = ""
|
671
691
|
for chunk in litellm.completion(
|
672
692
|
**self._build_completion_params(
|
673
693
|
messages=messages,
|
694
|
+
tools=formatted_tools,
|
674
695
|
temperature=temperature,
|
675
|
-
stream=True
|
696
|
+
stream=True,
|
697
|
+
**kwargs
|
676
698
|
)
|
677
699
|
):
|
678
700
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
679
701
|
content = chunk.choices[0].delta.content
|
680
|
-
|
681
|
-
live.update(display_generating(
|
702
|
+
final_response_text += content
|
703
|
+
live.update(display_generating(final_response_text, current_time))
|
682
704
|
else:
|
683
|
-
|
705
|
+
final_response_text = ""
|
684
706
|
for chunk in litellm.completion(
|
685
707
|
**self._build_completion_params(
|
686
708
|
messages=messages,
|
709
|
+
tools=formatted_tools,
|
687
710
|
temperature=temperature,
|
688
|
-
stream=True
|
711
|
+
stream=True,
|
712
|
+
**kwargs
|
689
713
|
)
|
690
714
|
):
|
691
715
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
if output_json or output_pydantic:
|
698
|
-
self.chat_history.append({"role": "user", "content": original_prompt})
|
699
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
716
|
+
final_response_text += chunk.choices[0].delta.content
|
717
|
+
|
718
|
+
final_response_text = final_response_text.strip()
|
719
|
+
|
720
|
+
# Display final response
|
700
721
|
if verbose:
|
701
|
-
display_interaction(
|
702
|
-
|
703
|
-
|
722
|
+
display_interaction(
|
723
|
+
original_prompt,
|
724
|
+
final_response_text,
|
725
|
+
markdown=markdown,
|
726
|
+
generation_time=time.time() - start_time,
|
727
|
+
console=console
|
728
|
+
)
|
729
|
+
|
730
|
+
return final_response_text
|
731
|
+
else:
|
732
|
+
# No tool calls, we're done with this iteration
|
733
|
+
break
|
734
|
+
|
735
|
+
except Exception as e:
|
736
|
+
logging.error(f"Error in LLM iteration {iteration_count}: {e}")
|
737
|
+
break
|
738
|
+
|
739
|
+
# End of while loop - return final response
|
740
|
+
if final_response_text:
|
741
|
+
return final_response_text
|
742
|
+
|
743
|
+
# No tool calls were made in this iteration, return the response
|
744
|
+
if verbose:
|
745
|
+
display_interaction(
|
746
|
+
original_prompt,
|
747
|
+
response_text,
|
748
|
+
markdown=markdown,
|
749
|
+
generation_time=time.time() - start_time,
|
750
|
+
console=console
|
751
|
+
)
|
752
|
+
|
753
|
+
response_text = response_text.strip()
|
754
|
+
|
755
|
+
# Handle output formatting
|
756
|
+
if output_json or output_pydantic:
|
757
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
758
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
759
|
+
if verbose:
|
760
|
+
display_interaction(original_prompt, response_text, markdown=markdown,
|
761
|
+
generation_time=time.time() - start_time, console=console)
|
762
|
+
return response_text
|
704
763
|
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
764
|
+
if not self_reflect:
|
765
|
+
if verbose:
|
766
|
+
display_interaction(original_prompt, response_text, markdown=markdown,
|
767
|
+
generation_time=time.time() - start_time, console=console)
|
768
|
+
# Return reasoning content if reasoning_steps is True
|
769
|
+
if reasoning_steps and reasoning_content:
|
770
|
+
return reasoning_content
|
771
|
+
return response_text
|
713
772
|
|
714
|
-
|
715
|
-
|
773
|
+
# Handle self-reflection loop
|
774
|
+
while reflection_count < max_reflect:
|
775
|
+
# Handle self-reflection
|
776
|
+
reflection_prompt = f"""
|
716
777
|
Reflect on your previous response: '{response_text}'.
|
717
778
|
Identify any flaws, improvements, or actions.
|
718
779
|
Provide a "satisfactory" status ('yes' or 'no').
|
719
780
|
Output MUST be JSON with 'reflection' and 'satisfactory'.
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
)
|
781
|
+
"""
|
782
|
+
|
783
|
+
reflection_messages = messages + [
|
784
|
+
{"role": "assistant", "content": response_text},
|
785
|
+
{"role": "user", "content": reflection_prompt}
|
786
|
+
]
|
787
|
+
|
788
|
+
# If reasoning_steps is True, do a single non-streaming call to capture reasoning
|
789
|
+
if reasoning_steps:
|
790
|
+
reflection_resp = litellm.completion(
|
791
|
+
**self._build_completion_params(
|
792
|
+
messages=reflection_messages,
|
793
|
+
temperature=temperature,
|
794
|
+
stream=False, # Force non-streaming
|
795
|
+
response_format={"type": "json_object"},
|
796
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
737
797
|
)
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
753
|
-
|
754
|
-
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
|
764
|
-
for chunk in litellm.completion(
|
765
|
-
**self._build_completion_params(
|
766
|
-
messages=reflection_messages,
|
767
|
-
temperature=temperature,
|
768
|
-
stream=True,
|
769
|
-
response_format={"type": "json_object"},
|
770
|
-
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
771
|
-
)
|
772
|
-
):
|
773
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
774
|
-
content = chunk.choices[0].delta.content
|
775
|
-
reflection_text += content
|
776
|
-
live.update(display_generating(reflection_text, start_time))
|
777
|
-
else:
|
798
|
+
)
|
799
|
+
# Grab reflection text and optional reasoning
|
800
|
+
reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
801
|
+
reflection_text = reflection_resp["choices"][0]["message"]["content"]
|
802
|
+
|
803
|
+
# Optionally display reasoning if present
|
804
|
+
if verbose and reasoning_content:
|
805
|
+
display_interaction(
|
806
|
+
"Reflection reasoning:",
|
807
|
+
f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
|
808
|
+
markdown=markdown,
|
809
|
+
generation_time=time.time() - start_time,
|
810
|
+
console=console
|
811
|
+
)
|
812
|
+
elif verbose:
|
813
|
+
display_interaction(
|
814
|
+
"Self-reflection (non-streaming):",
|
815
|
+
reflection_text,
|
816
|
+
markdown=markdown,
|
817
|
+
generation_time=time.time() - start_time,
|
818
|
+
console=console
|
819
|
+
)
|
820
|
+
else:
|
821
|
+
# Existing streaming approach
|
822
|
+
if verbose:
|
823
|
+
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
778
824
|
reflection_text = ""
|
779
825
|
for chunk in litellm.completion(
|
780
826
|
**self._build_completion_params(
|
@@ -786,48 +832,102 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
786
832
|
)
|
787
833
|
):
|
788
834
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
789
|
-
|
835
|
+
content = chunk.choices[0].delta.content
|
836
|
+
reflection_text += content
|
837
|
+
live.update(display_generating(reflection_text, start_time))
|
838
|
+
else:
|
839
|
+
reflection_text = ""
|
840
|
+
for chunk in litellm.completion(
|
841
|
+
**self._build_completion_params(
|
842
|
+
messages=reflection_messages,
|
843
|
+
temperature=temperature,
|
844
|
+
stream=True,
|
845
|
+
response_format={"type": "json_object"},
|
846
|
+
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
847
|
+
)
|
848
|
+
):
|
849
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
850
|
+
reflection_text += chunk.choices[0].delta.content
|
790
851
|
|
791
|
-
|
792
|
-
|
793
|
-
|
852
|
+
try:
|
853
|
+
reflection_data = json.loads(reflection_text)
|
854
|
+
satisfactory = reflection_data.get("satisfactory", "no").lower() == "yes"
|
855
|
+
|
856
|
+
if verbose:
|
857
|
+
display_self_reflection(
|
858
|
+
f"Agent {agent_name} self reflection: reflection='{reflection_data['reflection']}' satisfactory='{reflection_data['satisfactory']}'",
|
859
|
+
console=console
|
860
|
+
)
|
794
861
|
|
862
|
+
if satisfactory and reflection_count >= min_reflect - 1:
|
795
863
|
if verbose:
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
)
|
864
|
+
display_interaction(prompt, response_text, markdown=markdown,
|
865
|
+
generation_time=time.time() - start_time, console=console)
|
866
|
+
return response_text
|
800
867
|
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
868
|
+
if reflection_count >= max_reflect - 1:
|
869
|
+
if verbose:
|
870
|
+
display_interaction(prompt, response_text, markdown=markdown,
|
871
|
+
generation_time=time.time() - start_time, console=console)
|
872
|
+
return response_text
|
806
873
|
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
814
|
-
|
815
|
-
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
874
|
+
reflection_count += 1
|
875
|
+
messages.extend([
|
876
|
+
{"role": "assistant", "content": response_text},
|
877
|
+
{"role": "user", "content": reflection_prompt},
|
878
|
+
{"role": "assistant", "content": reflection_text},
|
879
|
+
{"role": "user", "content": "Now regenerate your response using the reflection you made"}
|
880
|
+
])
|
881
|
+
|
882
|
+
# Get new response after reflection
|
883
|
+
if verbose:
|
884
|
+
with Live(display_generating("", time.time()), console=console, refresh_per_second=4) as live:
|
885
|
+
response_text = ""
|
886
|
+
for chunk in litellm.completion(
|
887
|
+
**self._build_completion_params(
|
888
|
+
messages=messages,
|
889
|
+
temperature=temperature,
|
890
|
+
stream=True,
|
891
|
+
**kwargs
|
892
|
+
)
|
893
|
+
):
|
894
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
895
|
+
content = chunk.choices[0].delta.content
|
896
|
+
response_text += content
|
897
|
+
live.update(display_generating(response_text, time.time()))
|
898
|
+
else:
|
899
|
+
response_text = ""
|
900
|
+
for chunk in litellm.completion(
|
901
|
+
**self._build_completion_params(
|
902
|
+
messages=messages,
|
903
|
+
temperature=temperature,
|
904
|
+
stream=True,
|
905
|
+
**kwargs
|
906
|
+
)
|
907
|
+
):
|
908
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
909
|
+
response_text += chunk.choices[0].delta.content
|
910
|
+
|
911
|
+
response_text = response_text.strip()
|
912
|
+
continue
|
827
913
|
|
914
|
+
except json.JSONDecodeError:
|
915
|
+
reflection_count += 1
|
916
|
+
if reflection_count >= max_reflect:
|
917
|
+
if verbose:
|
918
|
+
display_interaction(prompt, response_text, markdown=markdown,
|
919
|
+
generation_time=time.time() - start_time, console=console)
|
920
|
+
return response_text
|
921
|
+
continue
|
828
922
|
except Exception as e:
|
829
923
|
display_error(f"Error in LLM response: {str(e)}")
|
830
924
|
return None
|
925
|
+
|
926
|
+
# If we've exhausted reflection attempts
|
927
|
+
if verbose:
|
928
|
+
display_interaction(prompt, response_text, markdown=markdown,
|
929
|
+
generation_time=time.time() - start_time, console=console)
|
930
|
+
return response_text
|
831
931
|
|
832
932
|
except Exception as error:
|
833
933
|
display_error(f"Error in get_response: {str(error)}")
|
@@ -0,0 +1,15 @@
|
|
1
|
+
"""
|
2
|
+
Memory module for PraisonAI Agents
|
3
|
+
|
4
|
+
This module provides memory management capabilities including:
|
5
|
+
- Short-term memory (STM) for ephemeral context
|
6
|
+
- Long-term memory (LTM) for persistent knowledge
|
7
|
+
- Entity memory for structured data
|
8
|
+
- User memory for preferences/history
|
9
|
+
- Quality-based storage decisions
|
10
|
+
- Graph memory support via Mem0
|
11
|
+
"""
|
12
|
+
|
13
|
+
from .memory import Memory
|
14
|
+
|
15
|
+
__all__ = ["Memory"]
|
praisonaiagents/memory/memory.py
CHANGED
@@ -910,11 +910,14 @@ class Memory:
|
|
910
910
|
"""
|
911
911
|
|
912
912
|
try:
|
913
|
-
# Use
|
914
|
-
|
913
|
+
# Use LiteLLM for consistency with the rest of the codebase
|
914
|
+
import litellm
|
915
915
|
|
916
|
-
|
917
|
-
|
916
|
+
# Convert model name if it's in litellm format
|
917
|
+
model_name = llm or "gpt-4o-mini"
|
918
|
+
|
919
|
+
response = litellm.completion(
|
920
|
+
model=model_name,
|
918
921
|
messages=[{
|
919
922
|
"role": "user",
|
920
923
|
"content": custom_prompt or default_prompt
|
praisonaiagents/task/task.py
CHANGED
@@ -308,7 +308,11 @@ class Task:
|
|
308
308
|
if self.agent:
|
309
309
|
if getattr(self.agent, '_using_custom_llm', False) and hasattr(self.agent, 'llm_instance'):
|
310
310
|
# For custom LLM instances (like Ollama)
|
311
|
-
|
311
|
+
# Extract the model name from the LLM instance
|
312
|
+
if hasattr(self.agent.llm_instance, 'model'):
|
313
|
+
llm_model = self.agent.llm_instance.model
|
314
|
+
else:
|
315
|
+
llm_model = "gpt-4o-mini" # Default fallback
|
312
316
|
elif hasattr(self.agent, 'llm') and self.agent.llm:
|
313
317
|
# For standard model strings
|
314
318
|
llm_model = self.agent.llm
|
@@ -0,0 +1,94 @@
|
|
1
|
+
"""SearxNG search functionality.
|
2
|
+
|
3
|
+
Usage:
|
4
|
+
from praisonaiagents.tools import searxng_search
|
5
|
+
results = searxng_search("AI news")
|
6
|
+
|
7
|
+
or
|
8
|
+
from praisonaiagents.tools import searxng
|
9
|
+
results = searxng("AI news")
|
10
|
+
"""
|
11
|
+
|
12
|
+
from typing import List, Dict, Optional
|
13
|
+
import logging
|
14
|
+
from importlib import util
|
15
|
+
|
16
|
+
def searxng_search(
|
17
|
+
query: str,
|
18
|
+
max_results: int = 5,
|
19
|
+
searxng_url: Optional[str] = None
|
20
|
+
) -> List[Dict]:
|
21
|
+
"""Perform an internet search using SearxNG instance.
|
22
|
+
|
23
|
+
Args:
|
24
|
+
query: Search query string
|
25
|
+
max_results: Maximum number of results to return
|
26
|
+
searxng_url: SearxNG instance URL (defaults to localhost:32768)
|
27
|
+
|
28
|
+
Returns:
|
29
|
+
List[Dict]: Search results with title, url, and snippet keys
|
30
|
+
Returns error dict on failure
|
31
|
+
"""
|
32
|
+
# Check if requests is available
|
33
|
+
if util.find_spec("requests") is None:
|
34
|
+
error_msg = "SearxNG search requires requests package. Install with: pip install requests"
|
35
|
+
logging.error(error_msg)
|
36
|
+
return [{"error": error_msg}]
|
37
|
+
|
38
|
+
try:
|
39
|
+
import requests
|
40
|
+
|
41
|
+
# Default URL for local SearxNG instance
|
42
|
+
url = searxng_url or "http://localhost:32768/search"
|
43
|
+
|
44
|
+
params = {
|
45
|
+
'q': query,
|
46
|
+
'format': 'json',
|
47
|
+
'engines': 'google,bing,duckduckgo', # Multiple engines
|
48
|
+
'safesearch': '1' # Safe search enabled
|
49
|
+
}
|
50
|
+
|
51
|
+
response = requests.get(url, params=params, timeout=10)
|
52
|
+
response.raise_for_status()
|
53
|
+
|
54
|
+
raw_results = response.json().get('results', [])
|
55
|
+
|
56
|
+
# Standardize to PraisonAI format
|
57
|
+
results = []
|
58
|
+
for i, result in enumerate(raw_results[:max_results]):
|
59
|
+
results.append({
|
60
|
+
"title": result.get("title", ""),
|
61
|
+
"url": result.get("url", ""),
|
62
|
+
"snippet": result.get("content", "")
|
63
|
+
})
|
64
|
+
|
65
|
+
return results
|
66
|
+
|
67
|
+
except requests.exceptions.ConnectionError:
|
68
|
+
error_msg = f"Could not connect to SearxNG at {url}. Ensure SearxNG is running."
|
69
|
+
logging.error(error_msg)
|
70
|
+
return [{"error": error_msg}]
|
71
|
+
except requests.exceptions.Timeout:
|
72
|
+
error_msg = "SearxNG search request timed out"
|
73
|
+
logging.error(error_msg)
|
74
|
+
return [{"error": error_msg}]
|
75
|
+
except requests.exceptions.RequestException as e:
|
76
|
+
error_msg = f"SearxNG search request failed: {e}"
|
77
|
+
logging.error(error_msg)
|
78
|
+
return [{"error": error_msg}]
|
79
|
+
except (ValueError, KeyError) as e:
|
80
|
+
error_msg = f"Error parsing SearxNG response: {e}"
|
81
|
+
logging.error(error_msg)
|
82
|
+
return [{"error": error_msg}]
|
83
|
+
|
84
|
+
def searxng(query: str, max_results: int = 5, searxng_url: Optional[str] = None) -> List[Dict]:
|
85
|
+
"""Alias for searxng_search function."""
|
86
|
+
return searxng_search(query, max_results, searxng_url)
|
87
|
+
|
88
|
+
if __name__ == "__main__":
|
89
|
+
# Example usage
|
90
|
+
results = searxng_search("Python programming")
|
91
|
+
for result in results:
|
92
|
+
print(f"\nTitle: {result.get('title')}")
|
93
|
+
print(f"URL: {result.get('url')}")
|
94
|
+
print(f"Snippet: {result.get('snippet')}")
|
@@ -3,24 +3,28 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
|
|
3
3
|
praisonaiagents/main.py,sha256=_-XE7_Y7ChvtLQMivfNFrrnAhv4wSSDhH9WJMWlkS0w,16315
|
4
4
|
praisonaiagents/session.py,sha256=CI-ffCiOfmgB-1zFFik9daKCB5Sm41Q9ZOaq1-oSLW8,9250
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=it38pIYzHQIn2qscIuuvfgWyC9gPLZFj-nN8tDI8x5A,97766
|
7
7
|
praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
|
8
8
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
9
9
|
praisonaiagents/agents/agents.py,sha256=C_yDdJB4XUuwKA9DrysAtAj3zSYT0IKtfCT4Pxo0oyI,63309
|
10
10
|
praisonaiagents/agents/autoagents.py,sha256=Lc_b9mO2MeefBrsHkHoqFxEr5iRGrYuzDhslyybXwdw,13649
|
11
|
+
praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
|
12
|
+
praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
|
13
|
+
praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
|
11
14
|
praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
|
12
15
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
13
16
|
praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
|
14
17
|
praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
|
15
|
-
praisonaiagents/llm/llm.py,sha256=
|
18
|
+
praisonaiagents/llm/llm.py,sha256=hoIxHzo9aNygeOiw9RtoPhpuSCVTUrKPe3OPvsT5qLc,98212
|
16
19
|
praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
|
17
20
|
praisonaiagents/mcp/mcp.py,sha256=_gfp8hrSVT9aPqEDDfU8MiCdg0-3dVQpEQUE6AbrJlo,17243
|
18
21
|
praisonaiagents/mcp/mcp_sse.py,sha256=DLh3F_aoVRM1X-7hgIOWOw4FQ1nGmn9YNbQTesykzn4,6792
|
19
|
-
praisonaiagents/memory/
|
22
|
+
praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
|
23
|
+
praisonaiagents/memory/memory.py,sha256=x6CEMYhgzvlJH6SGKHPLRDt6kF0DVFFSUQbgr1OK3JM,38729
|
20
24
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
21
25
|
praisonaiagents/process/process.py,sha256=gxhMXG3s4CzaREyuwE5zxCMx2Wp_b_Wd53tDfkj8Qk8,66567
|
22
26
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
23
|
-
praisonaiagents/task/task.py,sha256=
|
27
|
+
praisonaiagents/task/task.py,sha256=imqJ8wzZzVyUSym2EyF2tC-vAsV1UdfI_P3YM5mqAiw,20786
|
24
28
|
praisonaiagents/tools/__init__.py,sha256=Rrgi7_3-yLHpfBB81WUi0-wD_wb_BsukwHVdjDYAF-0,9316
|
25
29
|
praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
|
26
30
|
praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
|
@@ -33,6 +37,7 @@ praisonaiagents/tools/json_tools.py,sha256=ApUYNuQ1qnbmYNCxSlx6Tth_H1yo8mhWtZ7Rr
|
|
33
37
|
praisonaiagents/tools/newspaper_tools.py,sha256=NyhojNPeyULBGcAWGOT1X70qVkh3FgZrpH-S7PEmrwI,12667
|
34
38
|
praisonaiagents/tools/pandas_tools.py,sha256=yzCeY4jetKrFIRA15Tr5OQ5d94T8DaSpzglx2UiWfPs,11092
|
35
39
|
praisonaiagents/tools/python_tools.py,sha256=puqLANl5YaG1YG8ixkl_MgWayF7uj5iXUEE15UYwIZE,13513
|
40
|
+
praisonaiagents/tools/searxng_tools.py,sha256=LzxFenzGlSBxnckEPwtEZYemAkU8FUflbFbHf5IZE7o,3159
|
36
41
|
praisonaiagents/tools/shell_tools.py,sha256=6IlnFkNg04tVxQVM_fYgscIWLtcgIikpEi3olB1THuA,9431
|
37
42
|
praisonaiagents/tools/spider_tools.py,sha256=lrZnT1V1BC46We-AzBrDB1Ryifr3KKGmYNntMsScU7w,15094
|
38
43
|
praisonaiagents/tools/test.py,sha256=UHOTNrnMo0_H6I2g48re1WNZkrR7f6z25UnlWxiOSbM,1600
|
@@ -42,7 +47,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
42
47
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
43
48
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
44
49
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
45
|
-
praisonaiagents-0.0.
|
46
|
-
praisonaiagents-0.0.
|
47
|
-
praisonaiagents-0.0.
|
48
|
-
praisonaiagents-0.0.
|
50
|
+
praisonaiagents-0.0.100.dist-info/METADATA,sha256=sP3J1zX6-LWPWRdLHRmy-Ca-ADgUxL3GR1-qsaiFO4Y,1453
|
51
|
+
praisonaiagents-0.0.100.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
52
|
+
praisonaiagents-0.0.100.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
53
|
+
praisonaiagents-0.0.100.dist-info/RECORD,,
|
File without changes
|
File without changes
|