penguiflow 2.2.1__tar.gz → 2.2.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of penguiflow might be problematic. Click here for more details.
- {penguiflow-2.2.1 → penguiflow-2.2.3}/PKG-INFO +1 -1
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/__init__.py +1 -1
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/core.py +24 -1
- penguiflow-2.2.3/penguiflow/planner/__init__.py +27 -0
- penguiflow-2.2.3/penguiflow/planner/prompts.py +243 -0
- penguiflow-2.2.3/penguiflow/planner/react.py +1339 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow.egg-info/PKG-INFO +1 -1
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow.egg-info/SOURCES.txt +3 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/pyproject.toml +2 -2
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_errors.py +37 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/LICENSE +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/README.md +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/admin.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/bus.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/catalog.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/debug.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/errors.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/metrics.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/middlewares.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/node.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/patterns.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/policies.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/registry.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/remote.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/state.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/streaming.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/testkit.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/types.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow/viz.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow.egg-info/dependency_links.txt +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow.egg-info/entry_points.txt +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow.egg-info/requires.txt +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow.egg-info/top_level.txt +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow_a2a/__init__.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/penguiflow_a2a/server.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/setup.cfg +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_a2a_server.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_budgets.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_cancel.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_catalog.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_controller.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_core.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_distribution_hooks.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_metadata.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_metrics.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_middlewares.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_node.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_patterns.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_planner_prompts.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_property_based.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_react_planner.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_registry.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_remote.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_routing_policy.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_streaming.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_testkit.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_types.py +0 -0
- {penguiflow-2.2.1 → penguiflow-2.2.3}/tests/test_viz.py +0 -0
|
@@ -14,6 +14,7 @@ from collections import deque
|
|
|
14
14
|
from collections.abc import Awaitable, Callable, Mapping, Sequence
|
|
15
15
|
from contextlib import suppress
|
|
16
16
|
from dataclasses import dataclass
|
|
17
|
+
from types import TracebackType
|
|
17
18
|
from typing import Any, cast
|
|
18
19
|
|
|
19
20
|
from .bus import BusEnvelope, MessageBus
|
|
@@ -27,6 +28,14 @@ from .types import WM, FinalAnswer, Message, StreamChunk
|
|
|
27
28
|
|
|
28
29
|
logger = logging.getLogger("penguiflow.core")
|
|
29
30
|
|
|
31
|
+
ExcInfo = tuple[type[BaseException], BaseException, TracebackType | None]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _capture_exc_info(exc: BaseException | None) -> ExcInfo | None:
|
|
35
|
+
if exc is None:
|
|
36
|
+
return None
|
|
37
|
+
return (type(exc), exc, exc.__traceback__)
|
|
38
|
+
|
|
30
39
|
BUDGET_EXCEEDED_TEXT = "Hop budget exhausted"
|
|
31
40
|
DEADLINE_EXCEEDED_TEXT = "Deadline exceeded"
|
|
32
41
|
TOKEN_BUDGET_EXCEEDED_TEXT = "Token budget exhausted"
|
|
@@ -750,6 +759,7 @@ class PenguiFlow:
|
|
|
750
759
|
raise
|
|
751
760
|
except TimeoutError as exc:
|
|
752
761
|
latency = (time.perf_counter() - start) * 1000
|
|
762
|
+
exc_info = _capture_exc_info(exc)
|
|
753
763
|
await self._emit_event(
|
|
754
764
|
event="node_timeout",
|
|
755
765
|
node=node,
|
|
@@ -759,6 +769,7 @@ class PenguiFlow:
|
|
|
759
769
|
latency_ms=latency,
|
|
760
770
|
level=logging.WARNING,
|
|
761
771
|
extra={"exception": repr(exc)},
|
|
772
|
+
exc_info=exc_info,
|
|
762
773
|
)
|
|
763
774
|
if attempt >= node.policy.max_retries:
|
|
764
775
|
timeout_message: str | None = None
|
|
@@ -783,6 +794,7 @@ class PenguiFlow:
|
|
|
783
794
|
flow_error=flow_error,
|
|
784
795
|
latency=latency,
|
|
785
796
|
attempt=attempt,
|
|
797
|
+
exc_info=exc_info,
|
|
786
798
|
)
|
|
787
799
|
return
|
|
788
800
|
attempt += 1
|
|
@@ -801,6 +813,7 @@ class PenguiFlow:
|
|
|
801
813
|
continue
|
|
802
814
|
except Exception as exc: # noqa: BLE001
|
|
803
815
|
latency = (time.perf_counter() - start) * 1000
|
|
816
|
+
exc_info = _capture_exc_info(exc)
|
|
804
817
|
await self._emit_event(
|
|
805
818
|
event="node_error",
|
|
806
819
|
node=node,
|
|
@@ -810,6 +823,7 @@ class PenguiFlow:
|
|
|
810
823
|
latency_ms=latency,
|
|
811
824
|
level=logging.ERROR,
|
|
812
825
|
extra={"exception": repr(exc)},
|
|
826
|
+
exc_info=exc_info,
|
|
813
827
|
)
|
|
814
828
|
if attempt >= node.policy.max_retries:
|
|
815
829
|
flow_error = self._create_flow_error(
|
|
@@ -830,6 +844,7 @@ class PenguiFlow:
|
|
|
830
844
|
flow_error=flow_error,
|
|
831
845
|
latency=latency,
|
|
832
846
|
attempt=attempt,
|
|
847
|
+
exc_info=exc_info,
|
|
833
848
|
)
|
|
834
849
|
return
|
|
835
850
|
attempt += 1
|
|
@@ -890,6 +905,7 @@ class PenguiFlow:
|
|
|
890
905
|
flow_error: FlowError,
|
|
891
906
|
latency: float | None,
|
|
892
907
|
attempt: int,
|
|
908
|
+
exc_info: ExcInfo | None,
|
|
893
909
|
) -> None:
|
|
894
910
|
original = flow_error.unwrap()
|
|
895
911
|
exception_repr = repr(original) if original is not None else flow_error.message
|
|
@@ -906,6 +922,7 @@ class PenguiFlow:
|
|
|
906
922
|
latency_ms=latency,
|
|
907
923
|
level=logging.ERROR,
|
|
908
924
|
extra=extra,
|
|
925
|
+
exc_info=exc_info,
|
|
909
926
|
)
|
|
910
927
|
if self._emit_errors_to_rookery and flow_error.trace_id is not None:
|
|
911
928
|
await self._emit_to_rookery(flow_error, source=context.owner)
|
|
@@ -1365,6 +1382,7 @@ class PenguiFlow:
|
|
|
1365
1382
|
latency_ms: float | None,
|
|
1366
1383
|
level: int,
|
|
1367
1384
|
extra: dict[str, Any] | None = None,
|
|
1385
|
+
exc_info: ExcInfo | None = None,
|
|
1368
1386
|
) -> None:
|
|
1369
1387
|
node_name = getattr(node, "name", None)
|
|
1370
1388
|
node_id = getattr(node, "node_id", node_name)
|
|
@@ -1398,7 +1416,12 @@ class PenguiFlow:
|
|
|
1398
1416
|
extra=extra or {},
|
|
1399
1417
|
)
|
|
1400
1418
|
|
|
1401
|
-
|
|
1419
|
+
payload = event_obj.to_payload()
|
|
1420
|
+
log_kwargs: dict[str, Any] = {"extra": payload}
|
|
1421
|
+
if exc_info is not None:
|
|
1422
|
+
log_kwargs["exc_info"] = exc_info
|
|
1423
|
+
|
|
1424
|
+
logger.log(level, event, **log_kwargs)
|
|
1402
1425
|
|
|
1403
1426
|
if self._state_store is not None:
|
|
1404
1427
|
stored_event = StoredEvent.from_flow_event(event_obj)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Planner entry points."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .react import (
|
|
6
|
+
ParallelCall,
|
|
7
|
+
ParallelJoin,
|
|
8
|
+
PlannerAction,
|
|
9
|
+
PlannerFinish,
|
|
10
|
+
PlannerPause,
|
|
11
|
+
ReactPlanner,
|
|
12
|
+
Trajectory,
|
|
13
|
+
TrajectoryStep,
|
|
14
|
+
TrajectorySummary,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"ParallelCall",
|
|
19
|
+
"ParallelJoin",
|
|
20
|
+
"PlannerAction",
|
|
21
|
+
"PlannerFinish",
|
|
22
|
+
"PlannerPause",
|
|
23
|
+
"ReactPlanner",
|
|
24
|
+
"Trajectory",
|
|
25
|
+
"TrajectoryStep",
|
|
26
|
+
"TrajectorySummary",
|
|
27
|
+
]
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
"""Prompt helpers for the React planner."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from collections.abc import Mapping, Sequence
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def render_summary(summary: Mapping[str, Any]) -> str:
|
|
11
|
+
return "Trajectory summary: " + _compact_json(summary)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def render_resume_user_input(user_input: str) -> str:
|
|
15
|
+
return f"Resume input: {user_input}"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def render_planning_hints(hints: Mapping[str, Any]) -> str:
|
|
19
|
+
lines: list[str] = []
|
|
20
|
+
constraints = hints.get("constraints")
|
|
21
|
+
if constraints:
|
|
22
|
+
lines.append(f"Respect the following constraints: {constraints}")
|
|
23
|
+
preferred = hints.get("preferred_order")
|
|
24
|
+
if preferred:
|
|
25
|
+
lines.append(f"Preferred order (if feasible): {preferred}")
|
|
26
|
+
parallels = hints.get("parallel_groups")
|
|
27
|
+
if parallels:
|
|
28
|
+
lines.append(f"Allowed parallel groups: {parallels}")
|
|
29
|
+
disallowed = hints.get("disallow_nodes")
|
|
30
|
+
if disallowed:
|
|
31
|
+
lines.append(f"Disallowed tools: {disallowed}")
|
|
32
|
+
preferred_nodes = hints.get("preferred_nodes")
|
|
33
|
+
if preferred_nodes:
|
|
34
|
+
lines.append(f"Preferred tools: {preferred_nodes}")
|
|
35
|
+
budget = hints.get("budget")
|
|
36
|
+
if budget:
|
|
37
|
+
lines.append(f"Budget hints: {budget}")
|
|
38
|
+
if not lines:
|
|
39
|
+
return ""
|
|
40
|
+
return "\n".join(lines)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def render_disallowed_node(node_name: str) -> str:
|
|
44
|
+
return (
|
|
45
|
+
f"tool '{node_name}' is not permitted by constraints. "
|
|
46
|
+
"Choose an allowed tool or revise the plan."
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def render_ordering_hint_violation(expected: Sequence[str], proposed: str) -> str:
|
|
51
|
+
order = ", ".join(expected)
|
|
52
|
+
return (
|
|
53
|
+
"Ordering hint reminder: follow the preferred sequence "
|
|
54
|
+
f"[{order}]. Proposed: {proposed}. Revise the plan."
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def render_parallel_limit(max_parallel: int) -> str:
|
|
59
|
+
return (
|
|
60
|
+
f"Parallel action exceeds max_parallel={max_parallel}. Reduce parallel fan-out."
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def render_sequential_only(node_name: str) -> str:
|
|
65
|
+
return (
|
|
66
|
+
f"tool '{node_name}' must run sequentially. "
|
|
67
|
+
"Do not include it in a parallel plan."
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def render_parallel_setup_error(errors: Sequence[str]) -> str:
|
|
72
|
+
detail = "; ".join(errors)
|
|
73
|
+
return f"Parallel plan invalid: {detail}. Revise the plan and retry."
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def render_empty_parallel_plan() -> str:
|
|
77
|
+
return "Parallel plan must include at least one branch in 'plan'."
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def render_parallel_with_next_node(next_node: str) -> str:
|
|
81
|
+
return (
|
|
82
|
+
f"Parallel plan cannot set next_node='{next_node}'. "
|
|
83
|
+
"Use 'join' to continue or finish the run explicitly."
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def render_parallel_unknown_failure(node_name: str) -> str:
|
|
88
|
+
return (
|
|
89
|
+
f"tool '{node_name}' failed during parallel execution. "
|
|
90
|
+
"Investigate the tool and adjust the plan."
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def build_summarizer_messages(
|
|
95
|
+
query: str,
|
|
96
|
+
history: Sequence[Mapping[str, Any]],
|
|
97
|
+
base_summary: Mapping[str, Any],
|
|
98
|
+
) -> list[dict[str, str]]:
|
|
99
|
+
return [
|
|
100
|
+
{
|
|
101
|
+
"role": "system",
|
|
102
|
+
"content": (
|
|
103
|
+
"You are a summariser producing compact JSON state. "
|
|
104
|
+
"Respond with valid JSON matching the TrajectorySummary schema."
|
|
105
|
+
),
|
|
106
|
+
},
|
|
107
|
+
{
|
|
108
|
+
"role": "user",
|
|
109
|
+
"content": _compact_json(
|
|
110
|
+
{
|
|
111
|
+
"query": query,
|
|
112
|
+
"history": list(history),
|
|
113
|
+
"current_summary": dict(base_summary),
|
|
114
|
+
}
|
|
115
|
+
),
|
|
116
|
+
},
|
|
117
|
+
]
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def _compact_json(data: Any) -> str:
|
|
121
|
+
return json.dumps(data, ensure_ascii=False, sort_keys=True)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def render_tool(record: Mapping[str, Any]) -> str:
|
|
125
|
+
args_schema = _compact_json(record["args_schema"])
|
|
126
|
+
out_schema = _compact_json(record["out_schema"])
|
|
127
|
+
tags = ", ".join(record.get("tags", ()))
|
|
128
|
+
scopes = ", ".join(record.get("auth_scopes", ()))
|
|
129
|
+
parts = [
|
|
130
|
+
f"- name: {record['name']}",
|
|
131
|
+
f" desc: {record['desc']}",
|
|
132
|
+
f" side_effects: {record['side_effects']}",
|
|
133
|
+
f" args_schema: {args_schema}",
|
|
134
|
+
f" out_schema: {out_schema}",
|
|
135
|
+
]
|
|
136
|
+
if tags:
|
|
137
|
+
parts.append(f" tags: {tags}")
|
|
138
|
+
if scopes:
|
|
139
|
+
parts.append(f" auth_scopes: {scopes}")
|
|
140
|
+
if record.get("cost_hint"):
|
|
141
|
+
parts.append(f" cost_hint: {record['cost_hint']}")
|
|
142
|
+
if record.get("latency_hint_ms") is not None:
|
|
143
|
+
parts.append(f" latency_hint_ms: {record['latency_hint_ms']}")
|
|
144
|
+
if record.get("safety_notes"):
|
|
145
|
+
parts.append(f" safety_notes: {record['safety_notes']}")
|
|
146
|
+
if record.get("extra"):
|
|
147
|
+
parts.append(f" extra: {_compact_json(record['extra'])}")
|
|
148
|
+
return "\n".join(parts)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def build_system_prompt(
|
|
152
|
+
catalog: Sequence[Mapping[str, Any]],
|
|
153
|
+
*,
|
|
154
|
+
extra: str | None = None,
|
|
155
|
+
planning_hints: Mapping[str, Any] | None = None,
|
|
156
|
+
) -> str:
|
|
157
|
+
rendered_tools = "\n".join(render_tool(item) for item in catalog)
|
|
158
|
+
prompt = [
|
|
159
|
+
"You are PenguiFlow ReactPlanner, a JSON-only planner.",
|
|
160
|
+
"Follow these rules strictly:",
|
|
161
|
+
"1. Respond with valid JSON matching the PlannerAction schema.",
|
|
162
|
+
"2. Use the provided tools when necessary; never invent new tool names.",
|
|
163
|
+
"3. Keep 'thought' concise and factual.",
|
|
164
|
+
"4. When the task is complete, set 'next_node' to null "
|
|
165
|
+
"and include the final payload in 'args'.",
|
|
166
|
+
"5. Do not emit plain text outside JSON.",
|
|
167
|
+
"",
|
|
168
|
+
"Available tools:",
|
|
169
|
+
rendered_tools or "(none)",
|
|
170
|
+
]
|
|
171
|
+
if extra:
|
|
172
|
+
prompt.extend(["", "Additional guidance:", extra])
|
|
173
|
+
if planning_hints:
|
|
174
|
+
rendered_hints = render_planning_hints(planning_hints)
|
|
175
|
+
if rendered_hints:
|
|
176
|
+
prompt.extend(["", rendered_hints])
|
|
177
|
+
return "\n".join(prompt)
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def build_user_prompt(query: str, context_meta: Mapping[str, Any] | None = None) -> str:
|
|
181
|
+
if context_meta:
|
|
182
|
+
return _compact_json({"query": query, "context": dict(context_meta)})
|
|
183
|
+
return _compact_json({"query": query})
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def render_observation(
|
|
187
|
+
*,
|
|
188
|
+
observation: Any | None,
|
|
189
|
+
error: str | None,
|
|
190
|
+
failure: Mapping[str, Any] | None = None,
|
|
191
|
+
) -> str:
|
|
192
|
+
payload: dict[str, Any] = {}
|
|
193
|
+
if observation is not None:
|
|
194
|
+
payload["observation"] = observation
|
|
195
|
+
if error:
|
|
196
|
+
payload["error"] = error
|
|
197
|
+
if failure:
|
|
198
|
+
payload["failure"] = dict(failure)
|
|
199
|
+
if not payload:
|
|
200
|
+
payload["observation"] = None
|
|
201
|
+
return _compact_json(payload)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def render_hop_budget_violation(limit: int) -> str:
|
|
205
|
+
return (
|
|
206
|
+
"Hop budget exhausted; you have used all available tool calls. "
|
|
207
|
+
"Finish with the best answer so far or reply with no_path."
|
|
208
|
+
f" (limit={limit})"
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def render_deadline_exhausted() -> str:
|
|
213
|
+
return (
|
|
214
|
+
"Deadline reached. Provide the best available conclusion or return no_path."
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def render_validation_error(node_name: str, error: str) -> str:
|
|
219
|
+
return (
|
|
220
|
+
f"args for tool '{node_name}' did not validate: {error}. "
|
|
221
|
+
"Return corrected JSON."
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def render_output_validation_error(node_name: str, error: str) -> str:
|
|
226
|
+
return (
|
|
227
|
+
f"tool '{node_name}' returned data that did not validate: {error}. "
|
|
228
|
+
"Ensure the tool output matches the declared schema."
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def render_invalid_node(node_name: str, available: Sequence[str]) -> str:
|
|
233
|
+
options = ", ".join(sorted(available))
|
|
234
|
+
return (
|
|
235
|
+
f"tool '{node_name}' is not in the catalog. Choose one of: {options}."
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
def render_repair_message(error: str) -> str:
|
|
240
|
+
return (
|
|
241
|
+
"Previous response was invalid JSON or schema mismatch: "
|
|
242
|
+
f"{error}. Reply with corrected JSON only."
|
|
243
|
+
)
|