agentsflowcompiler-lib 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentsflow/__init__.py +53 -0
- agentsflow/_prod.py +72 -0
- agentsflow/agent/__init__.py +9 -0
- agentsflow/agent/_utils.py +122 -0
- agentsflow/agent/agent.py +237 -0
- agentsflow/agent/llm_loop.py +137 -0
- agentsflow/agent/log_dispatcher.py +97 -0
- agentsflow/agent/prompt_builder.py +48 -0
- agentsflow/agent/run_context.py +39 -0
- agentsflow/agent/stats.py +240 -0
- agentsflow/agent/tools.py +126 -0
- agentsflow/builder/__init__.py +11 -0
- agentsflow/builder/agents_builder.py +110 -0
- agentsflow/config.py +46 -0
- agentsflow/constants.py +49 -0
- agentsflow/dev/__init__.py +112 -0
- agentsflow/dev/agent_api.py +414 -0
- agentsflow/dev/helper/__init__.py +4 -0
- agentsflow/dev/helper/agent_helper.py +234 -0
- agentsflow/dev/helper/duplicate_helper.py +82 -0
- agentsflow/dev/helper/monitoring_helper.py +12 -0
- agentsflow/dev/helper/processing_helper.py +66 -0
- agentsflow/dev/helper/prod_sync_helper.py +173 -0
- agentsflow/dev/helper/project_helper.py +119 -0
- agentsflow/dev/helper/tool_helper.py +231 -0
- agentsflow/dev/model_api.py +74 -0
- agentsflow/dev/monitoring_api.py +297 -0
- agentsflow/dev/monitoring_types.py +72 -0
- agentsflow/dev/processing_api.py +275 -0
- agentsflow/dev/project_api.py +123 -0
- agentsflow/dev/tool_api.py +686 -0
- agentsflow/errors.py +72 -0
- agentsflow/llm/__init__.py +13 -0
- agentsflow/llm/_error_utils.py +27 -0
- agentsflow/llm/ai21_client.py +38 -0
- agentsflow/llm/amazon_client.py +204 -0
- agentsflow/llm/anthropic_client.py +179 -0
- agentsflow/llm/base.py +70 -0
- agentsflow/llm/cohere_client.py +118 -0
- agentsflow/llm/factory.py +105 -0
- agentsflow/llm/google_client.py +164 -0
- agentsflow/llm/mistral_client.py +38 -0
- agentsflow/llm/openai_client.py +35 -0
- agentsflow/llm/openai_compatible_client.py +204 -0
- agentsflow/llm/provider_registry.py +228 -0
- agentsflow/logging_utils.py +58 -0
- agentsflow/py.typed +0 -0
- agentsflow/schema/__init__.py +19 -0
- agentsflow/schema/agent_config_schema.py +258 -0
- agentsflow/schema/manifest_schema.py +11 -0
- agentsflow/schema/project_config_schema.py +87 -0
- agentsflow/schema/tool_config_schema.py +116 -0
- agentsflow/schema/tool_schema.py +25 -0
- agentsflow/tools/__init__.py +12 -0
- agentsflow/tools/builtins/base32_base58/tool.py +105 -0
- agentsflow/tools/builtins/base32_base58/tool.yaml +24 -0
- agentsflow/tools/builtins/base64_codec/tool.py +44 -0
- agentsflow/tools/builtins/base64_codec/tool.yaml +24 -0
- agentsflow/tools/builtins/calculator/tool.py +41 -0
- agentsflow/tools/builtins/calculator/tool.yaml +24 -0
- agentsflow/tools/builtins/csv_parser/tool.py +69 -0
- agentsflow/tools/builtins/csv_parser/tool.yaml +28 -0
- agentsflow/tools/builtins/date_formatter/tool.py +37 -0
- agentsflow/tools/builtins/date_formatter/tool.yaml +24 -0
- agentsflow/tools/builtins/diff_checker/tool.py +46 -0
- agentsflow/tools/builtins/diff_checker/tool.yaml +24 -0
- agentsflow/tools/builtins/directory_tree/tool.py +95 -0
- agentsflow/tools/builtins/directory_tree/tool.yaml +28 -0
- agentsflow/tools/builtins/env_reader/tool.py +46 -0
- agentsflow/tools/builtins/env_reader/tool.yaml +24 -0
- agentsflow/tools/builtins/file_size_reader/tool.py +59 -0
- agentsflow/tools/builtins/file_size_reader/tool.yaml +16 -0
- agentsflow/tools/builtins/hash_generator/tool.py +40 -0
- agentsflow/tools/builtins/hash_generator/tool.yaml +20 -0
- agentsflow/tools/builtins/http_headers_analyzer/tool.py +69 -0
- agentsflow/tools/builtins/http_headers_analyzer/tool.yaml +16 -0
- agentsflow/tools/builtins/ip_lookup/tool.py +52 -0
- agentsflow/tools/builtins/ip_lookup/tool.yaml +16 -0
- agentsflow/tools/builtins/json_validator/tool.py +41 -0
- agentsflow/tools/builtins/json_validator/tool.yaml +20 -0
- agentsflow/tools/builtins/jwt_decoder/tool.py +64 -0
- agentsflow/tools/builtins/jwt_decoder/tool.yaml +16 -0
- agentsflow/tools/builtins/markdown_converter/tool.py +121 -0
- agentsflow/tools/builtins/markdown_converter/tool.yaml +20 -0
- agentsflow/tools/builtins/password_strength/tool.py +114 -0
- agentsflow/tools/builtins/password_strength/tool.yaml +16 -0
- agentsflow/tools/builtins/path_normalizer/tool.py +41 -0
- agentsflow/tools/builtins/path_normalizer/tool.yaml +20 -0
- agentsflow/tools/builtins/ping_checker/tool.py +128 -0
- agentsflow/tools/builtins/ping_checker/tool.yaml +24 -0
- agentsflow/tools/builtins/port_scanner/tool.py +98 -0
- agentsflow/tools/builtins/port_scanner/tool.yaml +24 -0
- agentsflow/tools/builtins/random_generator/tool.py +80 -0
- agentsflow/tools/builtins/random_generator/tool.yaml +36 -0
- agentsflow/tools/builtins/regex_tester/tool.py +65 -0
- agentsflow/tools/builtins/regex_tester/tool.yaml +24 -0
- agentsflow/tools/builtins/slug_generator/tool.py +47 -0
- agentsflow/tools/builtins/slug_generator/tool.yaml +24 -0
- agentsflow/tools/builtins/temp_file_generator/tool.py +36 -0
- agentsflow/tools/builtins/temp_file_generator/tool.yaml +24 -0
- agentsflow/tools/builtins/text_summarizer/tool.py +83 -0
- agentsflow/tools/builtins/text_summarizer/tool.yaml +20 -0
- agentsflow/tools/builtins/timestamp_converter/tool.py +82 -0
- agentsflow/tools/builtins/timestamp_converter/tool.yaml +24 -0
- agentsflow/tools/builtins/title_case_converter/tool.py +80 -0
- agentsflow/tools/builtins/title_case_converter/tool.yaml +20 -0
- agentsflow/tools/builtins/url_parser/tool.py +56 -0
- agentsflow/tools/builtins/url_parser/tool.yaml +16 -0
- agentsflow/tools/builtins/uuid_generator/tool.py +32 -0
- agentsflow/tools/builtins/uuid_generator/tool.yaml +20 -0
- agentsflow/tools/builtins/word_counter/tool.py +33 -0
- agentsflow/tools/builtins/word_counter/tool.yaml +16 -0
- agentsflow/tools/registry.py +96 -0
- agentsflow/types.py +83 -0
- agentsflow/utils/__init__.py +25 -0
- agentsflow/utils/audit.py +78 -0
- agentsflow/utils/config_io.py +51 -0
- agentsflow/utils/fs.py +85 -0
- agentsflow/utils/manifest_io.py +111 -0
- agentsflow/utils/paths.py +70 -0
- agentsflow/validate/__init__.py +37 -0
- agentsflow/validate/agent.py +139 -0
- agentsflow/validate/processing.py +60 -0
- agentsflowcompiler_lib-0.1.5.dist-info/LICENSE +21 -0
- agentsflowcompiler_lib-0.1.5.dist-info/METADATA +654 -0
- agentsflowcompiler_lib-0.1.5.dist-info/RECORD +128 -0
- agentsflowcompiler_lib-0.1.5.dist-info/WHEEL +5 -0
- agentsflowcompiler_lib-0.1.5.dist-info/top_level.txt +1 -0
agentsflow/__init__.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""
|
|
2
|
+
agentsflow — PROD API
|
|
3
|
+
=====================
|
|
4
|
+
Production entry point. Import ``load_agents`` to load and run agents.
|
|
5
|
+
|
|
6
|
+
Usage::
|
|
7
|
+
|
|
8
|
+
from agentsflow import load_agents
|
|
9
|
+
|
|
10
|
+
agents = load_agents("/path/to/agents_dir")
|
|
11
|
+
result = agents["my_agent"].run("Hello")
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from agentsflow._prod import load_agents
|
|
15
|
+
from agentsflow.config import AgentsFlowConfig
|
|
16
|
+
from agentsflow.errors import (
|
|
17
|
+
AgentsFlowError,
|
|
18
|
+
AgentsFlowConfigError,
|
|
19
|
+
AgentsFlowLogError,
|
|
20
|
+
AgentsFlowToolError,
|
|
21
|
+
MaxToolRoundsError,
|
|
22
|
+
LLMProviderError,
|
|
23
|
+
LLMAuthenticationError,
|
|
24
|
+
LLMInvalidRequestError,
|
|
25
|
+
LLMRateLimitError,
|
|
26
|
+
LLMServerError,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
from importlib.metadata import version, PackageNotFoundError
|
|
31
|
+
except ImportError: # pragma: no cover
|
|
32
|
+
from importlib_metadata import version, PackageNotFoundError # type: ignore
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
__version__ = version("AgentsFlowCompiler-lib")
|
|
36
|
+
except PackageNotFoundError:
|
|
37
|
+
# package is not installed
|
|
38
|
+
__version__ = "0.0.0"
|
|
39
|
+
__all__ = [
|
|
40
|
+
"load_agents",
|
|
41
|
+
"AgentsFlowConfig",
|
|
42
|
+
"__version__",
|
|
43
|
+
"AgentsFlowError",
|
|
44
|
+
"AgentsFlowConfigError",
|
|
45
|
+
"AgentsFlowLogError",
|
|
46
|
+
"AgentsFlowToolError",
|
|
47
|
+
"MaxToolRoundsError",
|
|
48
|
+
"LLMProviderError",
|
|
49
|
+
"LLMAuthenticationError",
|
|
50
|
+
"LLMInvalidRequestError",
|
|
51
|
+
"LLMRateLimitError",
|
|
52
|
+
"LLMServerError",
|
|
53
|
+
]
|
agentsflow/_prod.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""
|
|
2
|
+
agentsflow — Production Loader
|
|
3
|
+
===============================
|
|
4
|
+
Minimal PROD API: load agents from a directory and get ready-to-use Agent objects.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import TYPE_CHECKING, Optional
|
|
12
|
+
|
|
13
|
+
from dotenv import load_dotenv
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from agentsflow.agent import Agent
|
|
17
|
+
from agentsflow.config import AgentsFlowConfig
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def load_agents(
|
|
23
|
+
agents_dir: str,
|
|
24
|
+
env_path: Optional[str] = None,
|
|
25
|
+
config: Optional["AgentsFlowConfig"] = None,
|
|
26
|
+
) -> dict[str, "Agent"]:
|
|
27
|
+
"""
|
|
28
|
+
Load all agents from a directory and return a dict of Agent instances.
|
|
29
|
+
|
|
30
|
+
This is the **single entry point** for production use.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
agents_dir: Path to the agents root directory (contains ``config/agents.yaml``
|
|
34
|
+
and ``agents/`` subdirectory with per-agent configs).
|
|
35
|
+
env_path: Optional path to a ``.env`` file. If provided, environment
|
|
36
|
+
variables are loaded from it before resolving API keys.
|
|
37
|
+
config: Optional SDK-wide config for log level, network logger silencing,
|
|
38
|
+
and log format. If provided, applies log_level and optionally
|
|
39
|
+
silence_network_loggers.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Dict mapping agent name → ``Agent`` instance, ready to call ``.run()``.
|
|
43
|
+
|
|
44
|
+
Example::
|
|
45
|
+
|
|
46
|
+
from agentsflow import load_agents
|
|
47
|
+
|
|
48
|
+
agents = load_agents("/home/user/my_project/DEV")
|
|
49
|
+
result = agents["analyzer"].run("Analyze this data")
|
|
50
|
+
|
|
51
|
+
# With custom .env
|
|
52
|
+
agents = load_agents("/path/to/project", env_path="/path/to/.env")
|
|
53
|
+
"""
|
|
54
|
+
# Load .env file if provided
|
|
55
|
+
if env_path:
|
|
56
|
+
_load_env_file(env_path)
|
|
57
|
+
|
|
58
|
+
# Apply SDK-wide config if provided
|
|
59
|
+
if config is not None:
|
|
60
|
+
from agentsflow.config import _apply_config
|
|
61
|
+
_apply_config(config)
|
|
62
|
+
|
|
63
|
+
from agentsflow.builder import build_agents
|
|
64
|
+
return build_agents(Path(agents_dir))
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _load_env_file(env_path: str) -> None:
|
|
68
|
+
"""Load environment variables from a .env file using python-dotenv."""
|
|
69
|
+
path = Path(env_path)
|
|
70
|
+
if not path.exists():
|
|
71
|
+
raise FileNotFoundError(f".env file not found: {path}")
|
|
72
|
+
load_dotenv(env_path)
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent Utilities
|
|
3
|
+
===============
|
|
4
|
+
Shared helper functions used across the agent package:
|
|
5
|
+
file loading, dynamic function import, and token counting.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import importlib.util
|
|
11
|
+
import json
|
|
12
|
+
import sys
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any, Callable, Optional, Union
|
|
15
|
+
|
|
16
|
+
from tiktoken import encoding_for_model, get_encoding
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# ── File Loading ─────────────────────────────────────────────
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def load_file(path: Optional[Path]) -> Any:
|
|
23
|
+
"""
|
|
24
|
+
Load a file and return its contents.
|
|
25
|
+
|
|
26
|
+
Supports:
|
|
27
|
+
.json → parsed dict / list
|
|
28
|
+
.md / .txt / .text → raw string
|
|
29
|
+
|
|
30
|
+
Returns None if path is None.
|
|
31
|
+
Raises FileNotFoundError / ValueError for missing / unsupported files.
|
|
32
|
+
"""
|
|
33
|
+
if path is None:
|
|
34
|
+
return None
|
|
35
|
+
if not path.exists():
|
|
36
|
+
raise FileNotFoundError(f"File not found: {path}")
|
|
37
|
+
|
|
38
|
+
suffix = path.suffix.lower()
|
|
39
|
+
if suffix == ".json":
|
|
40
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
41
|
+
return json.load(f)
|
|
42
|
+
elif suffix in (".md", ".txt", ".text"):
|
|
43
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
44
|
+
return f.read()
|
|
45
|
+
else:
|
|
46
|
+
raise ValueError(f"Unsupported file type: {suffix}")
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def load_prompt(base_dir: Path, relative_path: Optional[Path]) -> Any:
|
|
50
|
+
"""Load a prompt file relative to a base directory."""
|
|
51
|
+
if not relative_path:
|
|
52
|
+
return None
|
|
53
|
+
full_path = (base_dir / relative_path).resolve()
|
|
54
|
+
if not full_path.is_relative_to(base_dir.resolve()):
|
|
55
|
+
raise ValueError(f"Path traversal detected: '{relative_path}' escapes base directory.")
|
|
56
|
+
return load_file(full_path)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
# ── Dynamic Function Loading ────────────────────────────────
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _load_function(
|
|
63
|
+
module_path: Union[str, Path],
|
|
64
|
+
func_name: Optional[str],
|
|
65
|
+
) -> Callable[..., Any]:
|
|
66
|
+
"""
|
|
67
|
+
Dynamically import a callable from a Python module.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
module_path: Absolute path to a .py file, or a dotted module name.
|
|
71
|
+
func_name: Name of the callable inside the module.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
The callable object.
|
|
75
|
+
"""
|
|
76
|
+
if not func_name:
|
|
77
|
+
raise ValueError("func_name is required")
|
|
78
|
+
if isinstance(module_path, Path):
|
|
79
|
+
module_path = module_path.resolve()
|
|
80
|
+
spec = importlib.util.spec_from_file_location(module_path.stem, module_path)
|
|
81
|
+
if spec is None or spec.loader is None:
|
|
82
|
+
raise ImportError(f"Cannot load module from file '{module_path}'")
|
|
83
|
+
|
|
84
|
+
module = importlib.util.module_from_spec(spec)
|
|
85
|
+
module_key = str(module_path)
|
|
86
|
+
sys.modules[module_key] = module
|
|
87
|
+
spec.loader.exec_module(module)
|
|
88
|
+
else:
|
|
89
|
+
module = importlib.import_module(module_path)
|
|
90
|
+
|
|
91
|
+
func = getattr(module, func_name, None)
|
|
92
|
+
if func is None:
|
|
93
|
+
raise AttributeError(
|
|
94
|
+
f"Module '{module.__name__}' has no attribute '{func_name}'"
|
|
95
|
+
)
|
|
96
|
+
if not callable(func):
|
|
97
|
+
raise TypeError(
|
|
98
|
+
f"Attribute '{func_name}' in module '{module.__name__}' is not callable"
|
|
99
|
+
)
|
|
100
|
+
return func
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# ── Token Counting ───────────────────────────────────────────
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def count_tokens(text: Optional[str], model: str = "o200k_base") -> int:
|
|
107
|
+
"""
|
|
108
|
+
Count the number of tokens in *text* for the given *model*.
|
|
109
|
+
|
|
110
|
+
NOTE: This uses tiktoken (OpenAI's tokenizer). For non-OpenAI models (Claude, Llama, Gemini),
|
|
111
|
+
this is a pragmatic surrogate and may not be 100% accurate.
|
|
112
|
+
For maximum accuracy, use the count_tokens() method on the respective LLMClient instance.
|
|
113
|
+
|
|
114
|
+
Falls back to the ``o200k_base`` encoding when the model is unknown.
|
|
115
|
+
"""
|
|
116
|
+
if text is None:
|
|
117
|
+
return 0
|
|
118
|
+
try:
|
|
119
|
+
encoding = encoding_for_model(model)
|
|
120
|
+
except KeyError:
|
|
121
|
+
encoding = get_encoding("o200k_base")
|
|
122
|
+
return len(encoding.encode(text))
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent — Core Runtime
|
|
3
|
+
====================
|
|
4
|
+
Slim orchestrator that wires together the modular components
|
|
5
|
+
(prompts, tools, stats) and runs the LLM interaction loop.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
import logging
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any, Optional, Callable
|
|
14
|
+
from uuid import uuid4
|
|
15
|
+
|
|
16
|
+
from agentsflow.constants import AGENTS_DIR, DEFAULT_AGENT_TIMEOUT
|
|
17
|
+
from agentsflow.schema import AgentConfig
|
|
18
|
+
from agentsflow.llm import create_llm_client, LLMClient
|
|
19
|
+
from agentsflow.types import ChatMessage, TokensSummary
|
|
20
|
+
|
|
21
|
+
from .run_context import RunContext, RunResult
|
|
22
|
+
from .llm_loop import run_llm_loop
|
|
23
|
+
from .prompt_builder import build_system_prompt
|
|
24
|
+
from .tools import AgentTools
|
|
25
|
+
from .stats import AgentStats
|
|
26
|
+
from ._utils import load_prompt, _load_function
|
|
27
|
+
from agentsflow.logging_utils import log_structured
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class Agent:
|
|
33
|
+
"""
|
|
34
|
+
A single AI agent that can be run with ``agent.run(user_prompt)``.
|
|
35
|
+
|
|
36
|
+
Internally delegates to:
|
|
37
|
+
- ``AgentTools`` — tool schemas & execution
|
|
38
|
+
- ``AgentStats`` — logging & token tracking
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
base_agents_dir: Path,
|
|
44
|
+
config: AgentConfig,
|
|
45
|
+
api_key: str,
|
|
46
|
+
base_url: Optional[str] = None,
|
|
47
|
+
tools_base_dir: Optional[Path] = None,
|
|
48
|
+
) -> None:
|
|
49
|
+
self._agent_name = config.identity.name
|
|
50
|
+
# ── Paths ────────────────────────────────────────────
|
|
51
|
+
self._agent_dir = base_agents_dir / AGENTS_DIR / self._agent_name
|
|
52
|
+
# ── LLM Client ──────────────────────────────────────
|
|
53
|
+
self.client: LLMClient = create_llm_client(
|
|
54
|
+
model=config.model_settings.model,
|
|
55
|
+
api_key=api_key,
|
|
56
|
+
base_url=base_url,
|
|
57
|
+
provider=config.model_settings.provider,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# ── Prompts ─────────────────────────────
|
|
61
|
+
self._system_prompt = build_system_prompt(self._agent_dir, config.prompt_config)
|
|
62
|
+
|
|
63
|
+
# ── Output Format ────────────────────────────────────────────
|
|
64
|
+
self._json_schema = load_prompt(self._agent_dir, config.output_format_config.json_schema_path if config.output_format_config else None)
|
|
65
|
+
# ── Processing ────────────────────────────────────────────
|
|
66
|
+
self._preprocess_fn: Optional[Callable] = None
|
|
67
|
+
if config.preprocess_config:
|
|
68
|
+
self._preprocess_fn = _load_function(
|
|
69
|
+
(self._agent_dir / config.preprocess_config.path).resolve(),
|
|
70
|
+
config.preprocess_config.function_name,
|
|
71
|
+
)
|
|
72
|
+
self._postprocess_fn: Optional[Callable] = None
|
|
73
|
+
if config.postprocess_config:
|
|
74
|
+
self._postprocess_fn = _load_function(
|
|
75
|
+
self._agent_dir / config.postprocess_config.path,
|
|
76
|
+
config.postprocess_config.function_name,
|
|
77
|
+
)
|
|
78
|
+
# ── Tools ────────────────────────────────────────────
|
|
79
|
+
|
|
80
|
+
self._tools = AgentTools(
|
|
81
|
+
agent_dir=self._agent_dir,
|
|
82
|
+
tool_configs=config.tools if config.tools else [],
|
|
83
|
+
tools_base_dir=tools_base_dir,
|
|
84
|
+
)
|
|
85
|
+
# ── Stats / Logging ──────────────────────────────────
|
|
86
|
+
self._stats = AgentStats(
|
|
87
|
+
agent_dir=self._agent_dir,
|
|
88
|
+
client=self.client,
|
|
89
|
+
logs_config=config.logs_config,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# ── Config Shortcuts ─────────────────────────────────
|
|
93
|
+
self._temperature = config.model_settings.temperature
|
|
94
|
+
self._max_tokens = config.model_settings.max_tokens
|
|
95
|
+
self._timeout = config.model_settings.timeout
|
|
96
|
+
self._retry_config = config.model_settings.retry_config
|
|
97
|
+
self._return_format = config.output_format_config.return_format if config.output_format_config else "text"
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# Log system prompt on init (hash-based dedup)
|
|
101
|
+
self._stats.log_system_prompt(self._system_prompt)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def name(self) -> str:
|
|
106
|
+
return self._agent_name
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def model(self) -> str:
|
|
110
|
+
return self.client.model
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def provider(self) -> str:
|
|
114
|
+
return self.client.provider_name
|
|
115
|
+
|
|
116
|
+
# ── Run ──────────────────────────────────────────────────
|
|
117
|
+
|
|
118
|
+
def build_messages(self, user_prompt: str) -> list[ChatMessage]:
|
|
119
|
+
messages: list[ChatMessage] = []
|
|
120
|
+
if self._system_prompt:
|
|
121
|
+
messages.append({"role": "system", "content": self._system_prompt})
|
|
122
|
+
messages.append({"role": "user", "content": user_prompt})
|
|
123
|
+
return messages
|
|
124
|
+
|
|
125
|
+
def run(self, user_prompt: str, timeout: Optional[float] = None) -> RunResult:
|
|
126
|
+
ctx = self._create_run_context(user_prompt)
|
|
127
|
+
self._tools.reset_log()
|
|
128
|
+
|
|
129
|
+
log_structured(
|
|
130
|
+
logging.INFO,
|
|
131
|
+
"run_start",
|
|
132
|
+
rid=ctx.rid,
|
|
133
|
+
agent_name=getattr(self, "_agent_name", None),
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
try:
|
|
137
|
+
self._apply_preprocess(ctx)
|
|
138
|
+
ctx.messages = self.build_messages(ctx.llm_input)
|
|
139
|
+
ctx.llm_raw_output = run_llm_loop(
|
|
140
|
+
client=self.client,
|
|
141
|
+
messages=ctx.messages,
|
|
142
|
+
tools=self._tools,
|
|
143
|
+
temperature=self._temperature,
|
|
144
|
+
max_tokens=self._max_tokens,
|
|
145
|
+
response_format=self._build_response_format(),
|
|
146
|
+
timeout=timeout if timeout is not None else self._timeout,
|
|
147
|
+
retry_config=self._retry_config,
|
|
148
|
+
rid=ctx.rid,
|
|
149
|
+
)
|
|
150
|
+
ctx.llm_output = self._default_output_fn(ctx.llm_raw_output)
|
|
151
|
+
self._apply_postprocess(ctx)
|
|
152
|
+
self._finalize_run_state(ctx)
|
|
153
|
+
self._log_run(ctx)
|
|
154
|
+
|
|
155
|
+
log_structured(
|
|
156
|
+
logging.INFO,
|
|
157
|
+
"run_end",
|
|
158
|
+
rid=ctx.rid,
|
|
159
|
+
agent_name=getattr(self, "_agent_name", None),
|
|
160
|
+
)
|
|
161
|
+
return RunResult(
|
|
162
|
+
output=ctx.final_output,
|
|
163
|
+
rid=ctx.rid,
|
|
164
|
+
token_input=self.client.count_tokens(ctx.llm_input),
|
|
165
|
+
token_output=self.client.count_tokens(str(ctx.llm_output) if ctx.llm_output else ""),
|
|
166
|
+
tool_calls=self._tools.call_log,
|
|
167
|
+
)
|
|
168
|
+
except Exception as e:
|
|
169
|
+
log_structured(
|
|
170
|
+
logging.ERROR,
|
|
171
|
+
"run_error",
|
|
172
|
+
rid=ctx.rid,
|
|
173
|
+
agent_name=getattr(self, "_agent_name", None),
|
|
174
|
+
error=str(e),
|
|
175
|
+
)
|
|
176
|
+
raise
|
|
177
|
+
|
|
178
|
+
def get_tokens_summary(self, ctx: RunContext) -> TokensSummary:
|
|
179
|
+
return self._stats.get_tokens_summary(
|
|
180
|
+
system_prompt=self._system_prompt,
|
|
181
|
+
user_prompt=ctx.user_prompt,
|
|
182
|
+
rag_context=getattr(ctx, "rag_context", None),
|
|
183
|
+
answer=ctx.final_output,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# ── Private ──────────────────────────────────────────────
|
|
187
|
+
|
|
188
|
+
def _build_response_format(self) -> Optional[dict]:
|
|
189
|
+
if self._json_schema:
|
|
190
|
+
return {
|
|
191
|
+
"type": "json_schema",
|
|
192
|
+
"json_schema": {
|
|
193
|
+
"name": self._agent_name,
|
|
194
|
+
"schema": self._json_schema,
|
|
195
|
+
"strict": True,
|
|
196
|
+
},
|
|
197
|
+
}
|
|
198
|
+
elif self._return_format == "json_object":
|
|
199
|
+
return {"type": "json_object"}
|
|
200
|
+
return None
|
|
201
|
+
|
|
202
|
+
def _create_run_context(self, user_prompt: str) -> RunContext:
|
|
203
|
+
return RunContext(rid=str(uuid4()), user_prompt=user_prompt, llm_input=user_prompt)
|
|
204
|
+
|
|
205
|
+
def _apply_preprocess(self, ctx: RunContext) -> None:
|
|
206
|
+
if self._preprocess_fn:
|
|
207
|
+
ctx.preprocess_output = self._preprocess_fn(ctx.user_prompt)
|
|
208
|
+
ctx.llm_input = str(ctx.preprocess_output)
|
|
209
|
+
|
|
210
|
+
def _apply_postprocess(self, ctx: RunContext) -> None:
|
|
211
|
+
if self._postprocess_fn:
|
|
212
|
+
ctx.postprocess_output = self._postprocess_fn(ctx.llm_output)
|
|
213
|
+
ctx.final_output = ctx.postprocess_output
|
|
214
|
+
else:
|
|
215
|
+
ctx.final_output = ctx.llm_output
|
|
216
|
+
|
|
217
|
+
def _finalize_run_state(self, ctx: RunContext) -> None:
|
|
218
|
+
pass
|
|
219
|
+
|
|
220
|
+
def _log_run(self, ctx: RunContext) -> None:
|
|
221
|
+
self._stats.log_io(
|
|
222
|
+
rid=ctx.rid,
|
|
223
|
+
input_text=ctx.user_prompt,
|
|
224
|
+
llm_input=ctx.llm_input,
|
|
225
|
+
llm_output=ctx.llm_output,
|
|
226
|
+
system_prompt=self._system_prompt,
|
|
227
|
+
preprocess_output=ctx.preprocess_output,
|
|
228
|
+
postprocess_output=ctx.postprocess_output,
|
|
229
|
+
tool_calls=self._tools.call_log if self._tools.call_log else None,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
def _default_output_fn(self, output: str) -> Any:
|
|
233
|
+
"""Try to parse output as JSON; fall back to raw string."""
|
|
234
|
+
try:
|
|
235
|
+
return json.loads(output)
|
|
236
|
+
except json.JSONDecodeError:
|
|
237
|
+
return output
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Loop
|
|
3
|
+
========
|
|
4
|
+
Provider-agnostic multi-turn chat loop with tool execution.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
from agentsflow.errors import LLMProviderError, LLMRateLimitError, LLMServerError, MaxToolRoundsError
|
|
14
|
+
from agentsflow.llm import LLMClient
|
|
15
|
+
from agentsflow.logging_utils import log_structured
|
|
16
|
+
from agentsflow.schema import RetryConfig
|
|
17
|
+
from agentsflow.types import ChatMessage, ToolCallSpec
|
|
18
|
+
|
|
19
|
+
from .tools import AgentTools
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def run_llm_loop(
|
|
23
|
+
client: LLMClient,
|
|
24
|
+
messages: list[ChatMessage],
|
|
25
|
+
tools: AgentTools,
|
|
26
|
+
temperature: Optional[float],
|
|
27
|
+
max_tokens: Optional[int],
|
|
28
|
+
response_format: Optional[dict],
|
|
29
|
+
timeout: Optional[float] = None,
|
|
30
|
+
retry_config: Optional[RetryConfig] = None,
|
|
31
|
+
rid: Optional[str] = None,
|
|
32
|
+
) -> str:
|
|
33
|
+
"""Run the model loop until final text or max tool rounds."""
|
|
34
|
+
tool_schemas = tools.get_schemas(provider=client.provider_name)
|
|
35
|
+
rc = retry_config or RetryConfig()
|
|
36
|
+
|
|
37
|
+
import tenacity
|
|
38
|
+
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
|
|
39
|
+
|
|
40
|
+
def _call_chat() -> str | dict:
|
|
41
|
+
try:
|
|
42
|
+
return client.chat(
|
|
43
|
+
messages=messages,
|
|
44
|
+
temperature=temperature,
|
|
45
|
+
max_tokens=max_tokens,
|
|
46
|
+
response_format=response_format if not tool_schemas else None,
|
|
47
|
+
tools=tool_schemas,
|
|
48
|
+
timeout=timeout,
|
|
49
|
+
)
|
|
50
|
+
except LLMProviderError as e:
|
|
51
|
+
log_structured(
|
|
52
|
+
logging.ERROR,
|
|
53
|
+
"llm_error",
|
|
54
|
+
rid=rid,
|
|
55
|
+
error=str(e),
|
|
56
|
+
provider_request_id=e.provider_request_id,
|
|
57
|
+
)
|
|
58
|
+
if rid and e.rid is None:
|
|
59
|
+
raise type(e)(
|
|
60
|
+
e.args[0],
|
|
61
|
+
cause=e,
|
|
62
|
+
rid=rid,
|
|
63
|
+
provider_request_id=e.provider_request_id,
|
|
64
|
+
) from e
|
|
65
|
+
raise
|
|
66
|
+
|
|
67
|
+
@retry(
|
|
68
|
+
retry=retry_if_exception_type((LLMRateLimitError, LLMServerError)),
|
|
69
|
+
wait=wait_exponential(multiplier=1, min=rc.base_delay, max=rc.max_delay),
|
|
70
|
+
stop=stop_after_attempt(rc.max_attempts),
|
|
71
|
+
reraise=True,
|
|
72
|
+
)
|
|
73
|
+
def _call_chat_with_retry() -> str | dict:
|
|
74
|
+
return _call_chat()
|
|
75
|
+
|
|
76
|
+
for _round in range(AgentTools.MAX_TOOL_ROUNDS):
|
|
77
|
+
result = _call_chat_with_retry()
|
|
78
|
+
|
|
79
|
+
if isinstance(result, str):
|
|
80
|
+
return result
|
|
81
|
+
|
|
82
|
+
if isinstance(result, dict) and "tool_calls" in result:
|
|
83
|
+
append_assistant_tool_calls(messages, result["tool_calls"])
|
|
84
|
+
execute_tool_calls(tools, messages, result["tool_calls"], rid=rid)
|
|
85
|
+
continue
|
|
86
|
+
|
|
87
|
+
return str(result)
|
|
88
|
+
|
|
89
|
+
raise MaxToolRoundsError(
|
|
90
|
+
f"Agent exceeded {AgentTools.MAX_TOOL_ROUNDS} tool-calling rounds without a final answer."
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def append_assistant_tool_calls(
|
|
95
|
+
messages: list[ChatMessage], tool_calls: list[ToolCallSpec]
|
|
96
|
+
) -> None:
|
|
97
|
+
"""Append the assistant tool-call message in OpenAI-style format."""
|
|
98
|
+
messages.append({
|
|
99
|
+
"role": "assistant",
|
|
100
|
+
"content": None,
|
|
101
|
+
"tool_calls": [
|
|
102
|
+
{
|
|
103
|
+
"id": tc["id"],
|
|
104
|
+
"type": "function",
|
|
105
|
+
"function": {
|
|
106
|
+
"name": tc["name"],
|
|
107
|
+
"arguments": json.dumps(tc["arguments"]),
|
|
108
|
+
},
|
|
109
|
+
}
|
|
110
|
+
for tc in tool_calls
|
|
111
|
+
],
|
|
112
|
+
})
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def execute_tool_calls(
|
|
116
|
+
tools: AgentTools,
|
|
117
|
+
messages: list[ChatMessage],
|
|
118
|
+
tool_calls: list[ToolCallSpec],
|
|
119
|
+
rid: Optional[str] = None,
|
|
120
|
+
) -> None:
|
|
121
|
+
"""Execute tool calls and append tool results to the transcript."""
|
|
122
|
+
agent_name = tools._agent_dir.name if hasattr(tools, "_agent_dir") else None
|
|
123
|
+
for tc in tool_calls:
|
|
124
|
+
log_structured(
|
|
125
|
+
logging.INFO,
|
|
126
|
+
"tool_call",
|
|
127
|
+
rid=rid,
|
|
128
|
+
agent_name=agent_name,
|
|
129
|
+
tool_name=tc["name"],
|
|
130
|
+
tool_arguments=tc.get("arguments"),
|
|
131
|
+
)
|
|
132
|
+
tool_result_str = tools.execute(tc["name"], tc["arguments"])
|
|
133
|
+
messages.append({
|
|
134
|
+
"role": "tool",
|
|
135
|
+
"tool_call_id": tc["id"],
|
|
136
|
+
"content": tool_result_str,
|
|
137
|
+
})
|