penguiflow 2.2.2__py3-none-any.whl → 2.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of penguiflow might be problematic. Click here for more details.
- examples/__init__.py +0 -0
- examples/controller_multihop/__init__.py +0 -0
- examples/controller_multihop/flow.py +54 -0
- examples/fanout_join/__init__.py +0 -0
- examples/fanout_join/flow.py +54 -0
- examples/map_concurrent/__init__.py +0 -0
- examples/map_concurrent/flow.py +56 -0
- examples/metadata_propagation/flow.py +61 -0
- examples/mlflow_metrics/__init__.py +1 -0
- examples/mlflow_metrics/flow.py +120 -0
- examples/playbook_retrieval/__init__.py +0 -0
- examples/playbook_retrieval/flow.py +61 -0
- examples/quickstart/__init__.py +0 -0
- examples/quickstart/flow.py +74 -0
- examples/react_minimal/main.py +109 -0
- examples/react_parallel/main.py +121 -0
- examples/react_pause_resume/main.py +157 -0
- examples/react_replan/main.py +133 -0
- examples/reliability_middleware/__init__.py +0 -0
- examples/reliability_middleware/flow.py +67 -0
- examples/roadmap_status_updates/__init__.py +0 -0
- examples/roadmap_status_updates/flow.py +640 -0
- examples/roadmap_status_updates_subflows/__init__.py +0 -0
- examples/roadmap_status_updates_subflows/flow.py +814 -0
- examples/routing_policy/__init__.py +0 -0
- examples/routing_policy/flow.py +89 -0
- examples/routing_predicate/__init__.py +0 -0
- examples/routing_predicate/flow.py +51 -0
- examples/routing_union/__init__.py +0 -0
- examples/routing_union/flow.py +56 -0
- examples/status_roadmap_flow/__init__.py +0 -0
- examples/status_roadmap_flow/flow.py +458 -0
- examples/streaming_llm/__init__.py +3 -0
- examples/streaming_llm/flow.py +77 -0
- examples/testkit_demo/flow.py +34 -0
- examples/trace_cancel/flow.py +78 -0
- examples/traceable_errors/flow.py +51 -0
- examples/visualizer/flow.py +49 -0
- penguiflow/__init__.py +1 -1
- penguiflow/core.py +24 -1
- {penguiflow-2.2.2.dist-info → penguiflow-2.2.4.dist-info}/METADATA +4 -1
- penguiflow-2.2.4.dist-info/RECORD +68 -0
- {penguiflow-2.2.2.dist-info → penguiflow-2.2.4.dist-info}/top_level.txt +1 -0
- penguiflow-2.2.2.dist-info/RECORD +0 -30
- {penguiflow-2.2.2.dist-info → penguiflow-2.2.4.dist-info}/WHEEL +0 -0
- {penguiflow-2.2.2.dist-info → penguiflow-2.2.4.dist-info}/entry_points.txt +0 -0
- {penguiflow-2.2.2.dist-info → penguiflow-2.2.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
"""Parallel fan-out example for the React planner."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import json
|
|
7
|
+
from collections.abc import Mapping
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel
|
|
11
|
+
|
|
12
|
+
from penguiflow.catalog import build_catalog, tool
|
|
13
|
+
from penguiflow.node import Node
|
|
14
|
+
from penguiflow.planner import ReactPlanner
|
|
15
|
+
from penguiflow.registry import ModelRegistry
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ShardRequest(BaseModel):
|
|
19
|
+
topic: str
|
|
20
|
+
shard: int
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ShardResult(BaseModel):
|
|
24
|
+
shard: int
|
|
25
|
+
text: str
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class MergeArgs(BaseModel):
|
|
29
|
+
expect: int
|
|
30
|
+
results: list[ShardResult]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class Documents(BaseModel):
|
|
34
|
+
documents: list[str]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@tool(desc="Fetch from the primary shard", tags=["parallel"])
|
|
38
|
+
async def fetch_primary(args: ShardRequest, ctx: Any) -> ShardResult:
|
|
39
|
+
await asyncio.sleep(0.1)
|
|
40
|
+
return ShardResult(shard=args.shard, text=f"{args.topic}-primary")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@tool(desc="Fetch from the secondary shard", tags=["parallel"])
|
|
44
|
+
async def fetch_secondary(args: ShardRequest, ctx: Any) -> ShardResult:
|
|
45
|
+
await asyncio.sleep(0.1)
|
|
46
|
+
return ShardResult(shard=args.shard, text=f"{args.topic}-secondary")
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@tool(desc="Merge shard payloads")
|
|
50
|
+
async def merge_results(args: MergeArgs, ctx: Any) -> Documents:
|
|
51
|
+
# The planner stores rich branch metadata in ctx.meta for joins.
|
|
52
|
+
assert ctx.meta["parallel_success_count"] == args.expect
|
|
53
|
+
merged = [item.text for item in args.results]
|
|
54
|
+
return Documents(documents=merged)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class SequenceLLM:
|
|
58
|
+
"""Deterministic stub returning scripted planner actions."""
|
|
59
|
+
|
|
60
|
+
def __init__(self, responses: list[Mapping[str, Any]]) -> None:
|
|
61
|
+
self._responses = [json.dumps(item) for item in responses]
|
|
62
|
+
|
|
63
|
+
async def complete(
|
|
64
|
+
self,
|
|
65
|
+
*,
|
|
66
|
+
messages: list[Mapping[str, str]],
|
|
67
|
+
response_format: Mapping[str, Any] | None = None,
|
|
68
|
+
) -> str:
|
|
69
|
+
del messages, response_format
|
|
70
|
+
if not self._responses:
|
|
71
|
+
raise RuntimeError("SequenceLLM has no responses left")
|
|
72
|
+
return self._responses.pop(0)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
async def main() -> None:
|
|
76
|
+
registry = ModelRegistry()
|
|
77
|
+
registry.register("fetch_primary", ShardRequest, ShardResult)
|
|
78
|
+
registry.register("fetch_secondary", ShardRequest, ShardResult)
|
|
79
|
+
registry.register("merge_results", MergeArgs, Documents)
|
|
80
|
+
|
|
81
|
+
nodes = [
|
|
82
|
+
Node(fetch_primary, name="fetch_primary"),
|
|
83
|
+
Node(fetch_secondary, name="fetch_secondary"),
|
|
84
|
+
Node(merge_results, name="merge_results"),
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
client = SequenceLLM(
|
|
88
|
+
[
|
|
89
|
+
{
|
|
90
|
+
"thought": "fan out",
|
|
91
|
+
"plan": [
|
|
92
|
+
{
|
|
93
|
+
"node": "fetch_primary",
|
|
94
|
+
"args": {"topic": "penguins", "shard": 0},
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
"node": "fetch_secondary",
|
|
98
|
+
"args": {"topic": "penguins", "shard": 1},
|
|
99
|
+
},
|
|
100
|
+
],
|
|
101
|
+
"join": {"node": "merge_results"},
|
|
102
|
+
},
|
|
103
|
+
{
|
|
104
|
+
"thought": "finish",
|
|
105
|
+
"next_node": None,
|
|
106
|
+
"args": {"documents": ["penguins-primary", "penguins-secondary"]},
|
|
107
|
+
},
|
|
108
|
+
]
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
planner = ReactPlanner(
|
|
112
|
+
llm_client=client,
|
|
113
|
+
catalog=build_catalog(nodes, registry),
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
result = await planner.run("Compile penguin metrics")
|
|
117
|
+
print(json.dumps(result.model_dump(), indent=2, ensure_ascii=False))
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
if __name__ == "__main__":
|
|
121
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
"""Phase B demo: summarisation + pause/resume + planning hints."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import json
|
|
7
|
+
from collections.abc import Mapping, Sequence
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel
|
|
11
|
+
|
|
12
|
+
from penguiflow.catalog import build_catalog, tool
|
|
13
|
+
from penguiflow.node import Node
|
|
14
|
+
from penguiflow.planner import PlannerPause, ReactPlanner
|
|
15
|
+
from penguiflow.registry import ModelRegistry
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Query(BaseModel):
|
|
19
|
+
topic: str
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Intent(BaseModel):
|
|
23
|
+
intent: str
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Documents(BaseModel):
|
|
27
|
+
documents: list[str]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class Answer(BaseModel):
|
|
31
|
+
answer: str
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@tool(desc="Detect the type of request", tags=["triage"])
|
|
35
|
+
async def triage(args: Query, ctx: object) -> Intent:
|
|
36
|
+
return Intent(intent="docs")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@tool(desc="Approval checkpoint before side-effects", side_effects="external")
|
|
40
|
+
async def approval(args: Intent, ctx: Any) -> Intent:
|
|
41
|
+
# Pause the planner for human approval. This raises internally and
|
|
42
|
+
# returns control to the caller as a PlannerPause.
|
|
43
|
+
await ctx.pause("approval_required", {"intent": args.intent})
|
|
44
|
+
return args # unreachable but keeps type checkers happy
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@tool(desc="Retrieve supporting documents", side_effects="read")
|
|
48
|
+
async def retrieve(args: Intent, ctx: object) -> Documents:
|
|
49
|
+
return Documents(documents=[f"weekly metrics summary for {args.intent}"])
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@tool(desc="Compose the final response", tags=["summary"])
|
|
53
|
+
async def respond(args: Answer, ctx: object) -> Answer:
|
|
54
|
+
return args
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class StubPlannerLLM:
|
|
58
|
+
"""Deterministic LiteLLM-style stub that serves pre-built actions."""
|
|
59
|
+
|
|
60
|
+
def __init__(self, actions: Sequence[Mapping[str, Any]]) -> None:
|
|
61
|
+
self._payloads = [json.dumps(item) for item in actions]
|
|
62
|
+
|
|
63
|
+
async def complete(
|
|
64
|
+
self,
|
|
65
|
+
*,
|
|
66
|
+
messages: Sequence[Mapping[str, str]],
|
|
67
|
+
response_format: Mapping[str, Any] | None = None,
|
|
68
|
+
) -> str:
|
|
69
|
+
return self._payloads.pop(0)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class StubSummariser:
|
|
73
|
+
"""Cheap summariser used when history exceeds the token budget."""
|
|
74
|
+
|
|
75
|
+
async def complete(
|
|
76
|
+
self,
|
|
77
|
+
*,
|
|
78
|
+
messages: Sequence[Mapping[str, str]],
|
|
79
|
+
response_format: Mapping[str, Any] | None = None,
|
|
80
|
+
) -> str:
|
|
81
|
+
return json.dumps(
|
|
82
|
+
{
|
|
83
|
+
"goals": ["Send weekly metrics to stakeholders"],
|
|
84
|
+
"facts": {"status": "awaiting approval"},
|
|
85
|
+
"pending": ["approval"],
|
|
86
|
+
"last_output_digest": "approval pending",
|
|
87
|
+
"note": "stub",
|
|
88
|
+
}
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
async def build_planner() -> ReactPlanner:
|
|
93
|
+
registry = ModelRegistry()
|
|
94
|
+
registry.register("triage", Query, Intent)
|
|
95
|
+
registry.register("approval", Intent, Intent)
|
|
96
|
+
registry.register("retrieve", Intent, Documents)
|
|
97
|
+
registry.register("respond", Answer, Answer)
|
|
98
|
+
|
|
99
|
+
nodes = [
|
|
100
|
+
Node(triage, name="triage"),
|
|
101
|
+
Node(approval, name="approval"),
|
|
102
|
+
Node(retrieve, name="retrieve"),
|
|
103
|
+
Node(respond, name="respond"),
|
|
104
|
+
]
|
|
105
|
+
catalog = build_catalog(nodes, registry)
|
|
106
|
+
|
|
107
|
+
scripted_actions = [
|
|
108
|
+
{"thought": "triage", "next_node": "triage", "args": {"topic": "metrics"}},
|
|
109
|
+
{"thought": "approval", "next_node": "approval", "args": {"intent": "docs"}},
|
|
110
|
+
{"thought": "retrieve", "next_node": "retrieve", "args": {"intent": "docs"}},
|
|
111
|
+
{
|
|
112
|
+
"thought": "respond",
|
|
113
|
+
"next_node": "respond",
|
|
114
|
+
"args": {"answer": "Metrics sent to Slack with highlights."},
|
|
115
|
+
},
|
|
116
|
+
{"thought": "finish", "next_node": None, "args": {"answer": "done"}},
|
|
117
|
+
]
|
|
118
|
+
|
|
119
|
+
planner = ReactPlanner(
|
|
120
|
+
llm_client=StubPlannerLLM(scripted_actions),
|
|
121
|
+
catalog=catalog,
|
|
122
|
+
pause_enabled=True,
|
|
123
|
+
token_budget=160,
|
|
124
|
+
planning_hints={
|
|
125
|
+
"ordering_hints": ["triage", "approval", "retrieve", "respond"],
|
|
126
|
+
"disallow_nodes": ["broken_tool"],
|
|
127
|
+
"budget_hints": {"max_parallel": 1},
|
|
128
|
+
},
|
|
129
|
+
)
|
|
130
|
+
planner._summarizer_client = StubSummariser() # type: ignore[attr-defined]
|
|
131
|
+
return planner
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
async def main() -> None:
|
|
135
|
+
planner = await build_planner()
|
|
136
|
+
result = await planner.run("Share weekly metrics with approvals")
|
|
137
|
+
|
|
138
|
+
if isinstance(result, PlannerPause):
|
|
139
|
+
print("Planner paused:")
|
|
140
|
+
print(f" reason: {result.reason}")
|
|
141
|
+
print(f" payload: {result.payload}")
|
|
142
|
+
print("Resuming with approval...\n")
|
|
143
|
+
final = await planner.resume(
|
|
144
|
+
result.resume_token,
|
|
145
|
+
user_input="approved by finance",
|
|
146
|
+
)
|
|
147
|
+
print("Final planner payload:")
|
|
148
|
+
print(final.payload)
|
|
149
|
+
print("Summary note:")
|
|
150
|
+
print(final.metadata["steps"][-1])
|
|
151
|
+
else:
|
|
152
|
+
print("Planner finished in one pass:")
|
|
153
|
+
print(result.payload)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
if __name__ == "__main__":
|
|
157
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
from collections.abc import Mapping
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from pydantic import BaseModel
|
|
9
|
+
|
|
10
|
+
from penguiflow.catalog import build_catalog, tool
|
|
11
|
+
from penguiflow.node import Node
|
|
12
|
+
from penguiflow.planner import ReactPlanner
|
|
13
|
+
from penguiflow.registry import ModelRegistry
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Question(BaseModel):
|
|
17
|
+
text: str
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Intent(BaseModel):
|
|
21
|
+
intent: str
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Documents(BaseModel):
|
|
25
|
+
documents: list[str]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class Answer(BaseModel):
|
|
29
|
+
answer: str
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class RemoteTimeout(RuntimeError):
|
|
33
|
+
"""Exception carrying a suggestion for the planner."""
|
|
34
|
+
|
|
35
|
+
def __init__(self, message: str, suggestion: str) -> None:
|
|
36
|
+
super().__init__(message)
|
|
37
|
+
self.suggestion = suggestion
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@tool(desc="Detect the caller intent", tags=["planner"])
|
|
41
|
+
async def triage(args: Question, ctx: object) -> Intent:
|
|
42
|
+
return Intent(intent="docs")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@tool(desc="Call remote retriever", side_effects="external")
|
|
46
|
+
async def remote_docs(args: Intent, ctx: object) -> Documents:
|
|
47
|
+
raise RemoteTimeout("remote search timed out", "use_cached_index")
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@tool(desc="Fallback to cached index", side_effects="read")
|
|
51
|
+
async def cached_docs(args: Intent, ctx: object) -> Documents:
|
|
52
|
+
return Documents(documents=[f"Cached snippet covering {args.intent}"])
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@tool(desc="Compose final answer")
|
|
56
|
+
async def summarise(args: Answer, ctx: object) -> Answer:
|
|
57
|
+
return args
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class SequenceLLM:
|
|
61
|
+
"""Deterministic stub returning authored planner actions."""
|
|
62
|
+
|
|
63
|
+
def __init__(self, responses: list[Mapping[str, Any]]) -> None:
|
|
64
|
+
self._responses = [json.dumps(item) for item in responses]
|
|
65
|
+
|
|
66
|
+
async def complete(
|
|
67
|
+
self,
|
|
68
|
+
*,
|
|
69
|
+
messages: list[Mapping[str, str]],
|
|
70
|
+
response_format: Mapping[str, Any] | None = None,
|
|
71
|
+
) -> str:
|
|
72
|
+
del messages, response_format
|
|
73
|
+
if not self._responses:
|
|
74
|
+
raise RuntimeError("SequenceLLM has no responses left")
|
|
75
|
+
return self._responses.pop(0)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
async def main() -> None:
|
|
79
|
+
registry = ModelRegistry()
|
|
80
|
+
registry.register("triage", Question, Intent)
|
|
81
|
+
registry.register("remote_docs", Intent, Documents)
|
|
82
|
+
registry.register("cached_docs", Intent, Documents)
|
|
83
|
+
registry.register("summarise", Answer, Answer)
|
|
84
|
+
|
|
85
|
+
nodes = [
|
|
86
|
+
Node(triage, name="triage"),
|
|
87
|
+
Node(remote_docs, name="remote_docs"),
|
|
88
|
+
Node(cached_docs, name="cached_docs"),
|
|
89
|
+
Node(summarise, name="summarise"),
|
|
90
|
+
]
|
|
91
|
+
|
|
92
|
+
client = SequenceLLM(
|
|
93
|
+
[
|
|
94
|
+
{
|
|
95
|
+
"thought": "triage",
|
|
96
|
+
"next_node": "triage",
|
|
97
|
+
"args": {"text": "Summarise latest metrics"},
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
"thought": "try remote",
|
|
101
|
+
"next_node": "remote_docs",
|
|
102
|
+
"args": {"intent": "docs"},
|
|
103
|
+
},
|
|
104
|
+
{
|
|
105
|
+
"thought": "fallback cache",
|
|
106
|
+
"next_node": "cached_docs",
|
|
107
|
+
"args": {"intent": "docs"},
|
|
108
|
+
},
|
|
109
|
+
{
|
|
110
|
+
"thought": "wrap up",
|
|
111
|
+
"next_node": "summarise",
|
|
112
|
+
"args": {"answer": "Used cached docs after timeout."},
|
|
113
|
+
},
|
|
114
|
+
{
|
|
115
|
+
"thought": "final",
|
|
116
|
+
"next_node": None,
|
|
117
|
+
"args": {"answer": "Cached docs describe the latest metrics."},
|
|
118
|
+
},
|
|
119
|
+
]
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
planner = ReactPlanner(
|
|
123
|
+
llm_client=client,
|
|
124
|
+
catalog=build_catalog(nodes, registry),
|
|
125
|
+
hop_budget=3,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
result = await planner.run("Summarise metrics with fallback")
|
|
129
|
+
print(json.dumps(result.model_dump(), indent=2, ensure_ascii=False))
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
if __name__ == "__main__":
|
|
133
|
+
asyncio.run(main())
|
|
File without changes
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""Showcase retries, timeouts, and middleware hooks."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
|
|
7
|
+
from penguiflow import (
|
|
8
|
+
FlowEvent,
|
|
9
|
+
Headers,
|
|
10
|
+
Message,
|
|
11
|
+
Node,
|
|
12
|
+
NodePolicy,
|
|
13
|
+
PenguiFlow,
|
|
14
|
+
create,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def build_flow() -> PenguiFlow:
|
|
19
|
+
attempts = {"count": 0}
|
|
20
|
+
|
|
21
|
+
async def flaky(msg: Message, ctx) -> Message:
|
|
22
|
+
attempts["count"] += 1
|
|
23
|
+
attempt = attempts["count"]
|
|
24
|
+
|
|
25
|
+
if attempt == 1:
|
|
26
|
+
await asyncio.sleep(0.2) # exceeds timeout -> triggers retry
|
|
27
|
+
elif attempt == 2:
|
|
28
|
+
raise RuntimeError("transient failure")
|
|
29
|
+
|
|
30
|
+
return msg.model_copy(update={"payload": f"success on attempt {attempt}"})
|
|
31
|
+
|
|
32
|
+
flaky_node = Node(
|
|
33
|
+
flaky,
|
|
34
|
+
name="flaky",
|
|
35
|
+
policy=NodePolicy(
|
|
36
|
+
validate="none",
|
|
37
|
+
timeout_s=0.05,
|
|
38
|
+
max_retries=2,
|
|
39
|
+
backoff_base=0.05,
|
|
40
|
+
),
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
flow = create(flaky_node.to())
|
|
44
|
+
|
|
45
|
+
async def middleware(event: FlowEvent) -> None:
|
|
46
|
+
attempt = event.attempt
|
|
47
|
+
latency = event.latency_ms
|
|
48
|
+
print(f"mw:{event.event_type}:attempt={attempt} latency={latency}")
|
|
49
|
+
|
|
50
|
+
flow.add_middleware(middleware)
|
|
51
|
+
return flow
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
async def main() -> None:
|
|
55
|
+
flow = build_flow()
|
|
56
|
+
flow.run()
|
|
57
|
+
|
|
58
|
+
message = Message(payload="ping", headers=Headers(tenant="acme"))
|
|
59
|
+
await flow.emit(message)
|
|
60
|
+
result = await flow.fetch()
|
|
61
|
+
print(f"result payload: {result.payload}")
|
|
62
|
+
|
|
63
|
+
await flow.stop()
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
if __name__ == "__main__": # pragma: no cover
|
|
67
|
+
asyncio.run(main())
|
|
File without changes
|