penguiflow 2.2.3__py3-none-any.whl → 2.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of penguiflow might be problematic. Click here for more details.

Files changed (46) hide show
  1. examples/__init__.py +0 -0
  2. examples/controller_multihop/__init__.py +0 -0
  3. examples/controller_multihop/flow.py +54 -0
  4. examples/fanout_join/__init__.py +0 -0
  5. examples/fanout_join/flow.py +54 -0
  6. examples/map_concurrent/__init__.py +0 -0
  7. examples/map_concurrent/flow.py +56 -0
  8. examples/metadata_propagation/flow.py +61 -0
  9. examples/mlflow_metrics/__init__.py +1 -0
  10. examples/mlflow_metrics/flow.py +120 -0
  11. examples/playbook_retrieval/__init__.py +0 -0
  12. examples/playbook_retrieval/flow.py +61 -0
  13. examples/quickstart/__init__.py +0 -0
  14. examples/quickstart/flow.py +71 -0
  15. examples/react_minimal/main.py +109 -0
  16. examples/react_parallel/main.py +121 -0
  17. examples/react_pause_resume/main.py +157 -0
  18. examples/react_replan/main.py +133 -0
  19. examples/reliability_middleware/__init__.py +0 -0
  20. examples/reliability_middleware/flow.py +67 -0
  21. examples/roadmap_status_updates/__init__.py +0 -0
  22. examples/roadmap_status_updates/flow.py +640 -0
  23. examples/roadmap_status_updates_subflows/__init__.py +0 -0
  24. examples/roadmap_status_updates_subflows/flow.py +814 -0
  25. examples/routing_policy/__init__.py +0 -0
  26. examples/routing_policy/flow.py +89 -0
  27. examples/routing_predicate/__init__.py +0 -0
  28. examples/routing_predicate/flow.py +51 -0
  29. examples/routing_union/__init__.py +0 -0
  30. examples/routing_union/flow.py +56 -0
  31. examples/status_roadmap_flow/__init__.py +0 -0
  32. examples/status_roadmap_flow/flow.py +458 -0
  33. examples/streaming_llm/__init__.py +3 -0
  34. examples/streaming_llm/flow.py +77 -0
  35. examples/testkit_demo/flow.py +34 -0
  36. examples/trace_cancel/flow.py +79 -0
  37. examples/traceable_errors/flow.py +51 -0
  38. examples/visualizer/flow.py +49 -0
  39. penguiflow/__init__.py +1 -1
  40. {penguiflow-2.2.3.dist-info → penguiflow-2.2.5.dist-info}/METADATA +4 -1
  41. penguiflow-2.2.5.dist-info/RECORD +68 -0
  42. {penguiflow-2.2.3.dist-info → penguiflow-2.2.5.dist-info}/top_level.txt +1 -0
  43. penguiflow-2.2.3.dist-info/RECORD +0 -30
  44. {penguiflow-2.2.3.dist-info → penguiflow-2.2.5.dist-info}/WHEEL +0 -0
  45. {penguiflow-2.2.3.dist-info → penguiflow-2.2.5.dist-info}/entry_points.txt +0 -0
  46. {penguiflow-2.2.3.dist-info → penguiflow-2.2.5.dist-info}/licenses/LICENSE +0 -0
File without changes
@@ -0,0 +1,89 @@
1
+ """Demonstrates config-driven routing policies."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from pathlib import Path
7
+
8
+ from penguiflow import (
9
+ DictRoutingPolicy,
10
+ Headers,
11
+ Message,
12
+ Node,
13
+ NodePolicy,
14
+ PenguiFlow,
15
+ RoutingRequest,
16
+ create,
17
+ predicate_router,
18
+ )
19
+
20
+ POLICY_PATH = Path(__file__).with_name("policy.json")
21
+
22
+
23
+ def tenant_key(request: RoutingRequest) -> str:
24
+ return request.message.headers.tenant
25
+
26
+
27
+ async def marketing(msg: Message, ctx) -> str:
28
+ await asyncio.sleep(0.01)
29
+ return f"marketing handled {msg.payload}"
30
+
31
+
32
+ async def support(msg: Message, ctx) -> str:
33
+ await asyncio.sleep(0.01)
34
+ return f"support handled {msg.payload}"
35
+
36
+
37
+ def build_flow() -> tuple[DictRoutingPolicy, PenguiFlow]:
38
+ policy = DictRoutingPolicy.from_json_file(
39
+ str(POLICY_PATH),
40
+ default="support",
41
+ key_getter=tenant_key,
42
+ )
43
+
44
+ router = predicate_router(
45
+ "router",
46
+ lambda msg: ["marketing", "support"],
47
+ policy=policy,
48
+ )
49
+ marketing_node = Node(
50
+ marketing,
51
+ name="marketing",
52
+ policy=NodePolicy(validate="none"),
53
+ )
54
+ support_node = Node(
55
+ support,
56
+ name="support",
57
+ policy=NodePolicy(validate="none"),
58
+ )
59
+ flow = create(
60
+ router.to(marketing_node, support_node),
61
+ marketing_node.to(),
62
+ support_node.to(),
63
+ )
64
+ flow.run()
65
+ return policy, flow
66
+
67
+
68
+ async def main() -> None:
69
+ policy, flow = build_flow()
70
+
71
+ async def emit(payload: str, tenant: str) -> str:
72
+ message = Message(payload=payload, headers=Headers(tenant=tenant))
73
+ await flow.emit(message)
74
+ return await flow.fetch()
75
+
76
+ print(await emit("launch campaign", tenant="marketing"))
77
+ print(await emit("reset password", tenant="support"))
78
+
79
+ policy.update_mapping(
80
+ {"marketing": "marketing", "support": "marketing", "vip": "support"}
81
+ )
82
+
83
+ print(await emit("premium issue", tenant="vip"))
84
+
85
+ await flow.stop()
86
+
87
+
88
+ if __name__ == "__main__":
89
+ asyncio.run(main())
File without changes
@@ -0,0 +1,51 @@
1
+ """Predicate routing example."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+
7
+ from penguiflow import Headers, Message, Node, NodePolicy, create, predicate_router
8
+
9
+
10
+ async def metrics_sink(msg: Message, ctx) -> str:
11
+ return f"[metrics] {msg.payload}"
12
+
13
+
14
+ async def general_sink(msg: Message, ctx) -> str:
15
+ return f"[general] {msg.payload}"
16
+
17
+
18
+ async def main() -> None:
19
+ router = predicate_router(
20
+ "router",
21
+ lambda msg: ["metrics"] if msg.payload.startswith("metric") else ["general"],
22
+ )
23
+ metrics_node = Node(
24
+ metrics_sink,
25
+ name="metrics",
26
+ policy=NodePolicy(validate="none"),
27
+ )
28
+ general_node = Node(
29
+ general_sink,
30
+ name="general",
31
+ policy=NodePolicy(validate="none"),
32
+ )
33
+
34
+ flow = create(
35
+ router.to(metrics_node, general_node),
36
+ metrics_node.to(),
37
+ general_node.to(),
38
+ )
39
+ flow.run()
40
+
41
+ await flow.emit(Message(payload="metric-usage", headers=Headers(tenant="acme")))
42
+ print(await flow.fetch()) # [metrics] metric-usage
43
+
44
+ await flow.emit(Message(payload="ad-spend", headers=Headers(tenant="acme")))
45
+ print(await flow.fetch()) # [general] ad-spend
46
+
47
+ await flow.stop()
48
+
49
+
50
+ if __name__ == "__main__": # pragma: no cover
51
+ asyncio.run(main())
File without changes
@@ -0,0 +1,56 @@
1
+ """Discriminated union routing example."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from typing import Annotated, Literal
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+ from penguiflow import Node, NodePolicy, create, union_router
11
+
12
+
13
+ class SearchWeb(BaseModel):
14
+ kind: Literal["web"]
15
+ query: str
16
+
17
+
18
+ class SearchSql(BaseModel):
19
+ kind: Literal["sql"]
20
+ table: str
21
+
22
+
23
+ SearchTask = Annotated[SearchWeb | SearchSql, Field(discriminator="kind")]
24
+
25
+
26
+ async def handle_web(task: SearchWeb, ctx) -> str:
27
+ return f"web::{task.query}"
28
+
29
+
30
+ async def handle_sql(task: SearchSql, ctx) -> str:
31
+ return f"sql::{task.table}"
32
+
33
+
34
+ async def main() -> None:
35
+ router = union_router("router", SearchTask)
36
+ web_node = Node(handle_web, name="web", policy=NodePolicy(validate="none"))
37
+ sql_node = Node(handle_sql, name="sql", policy=NodePolicy(validate="none"))
38
+
39
+ flow = create(
40
+ router.to(web_node, sql_node),
41
+ web_node.to(),
42
+ sql_node.to(),
43
+ )
44
+ flow.run()
45
+
46
+ await flow.emit(SearchWeb(kind="web", query="penguins"))
47
+ print(await flow.fetch()) # web::penguins
48
+
49
+ await flow.emit(SearchSql(kind="sql", table="metrics"))
50
+ print(await flow.fetch()) # sql::metrics
51
+
52
+ await flow.stop()
53
+
54
+
55
+ if __name__ == "__main__": # pragma: no cover
56
+ asyncio.run(main())
File without changes
@@ -0,0 +1,458 @@
1
+ """Roadmap-driven flow emitting websocket-friendly status updates."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from typing import Any, Literal
7
+
8
+ from pydantic import BaseModel
9
+
10
+ from penguiflow import (
11
+ Headers,
12
+ Message,
13
+ Node,
14
+ NodePolicy,
15
+ PenguiFlow,
16
+ create,
17
+ flow_to_mermaid,
18
+ map_concurrent,
19
+ predicate_router,
20
+ )
21
+ from penguiflow.types import StreamChunk
22
+
23
+
24
+ class FlowResponse(BaseModel):
25
+ """Pydantic response contract returned by each subflow."""
26
+
27
+ raw_output: str
28
+ artifacts: dict[str, Any] | None = None
29
+ session_info: str | None = None
30
+
31
+
32
+ class RoadmapStep(BaseModel):
33
+ id: int
34
+ name: str
35
+ description: str
36
+
37
+
38
+ class StatusUpdate(BaseModel):
39
+ status: Literal["thinking", "ok"]
40
+ message: str | None = None
41
+ roadmap_step_list: list[RoadmapStep] | None = None
42
+ roadmap_step_id: int | None = None
43
+ roadmap_step_status: Literal["running", "ok"] | None = None
44
+
45
+
46
+ class UserQuery(BaseModel):
47
+ text: str
48
+ session_id: str
49
+
50
+
51
+ class CodeAnalysisRequest(BaseModel):
52
+ kind: Literal["code"]
53
+ query: str
54
+ files: list[str]
55
+ session_id: str
56
+
57
+
58
+ class DataSummaryRequest(BaseModel):
59
+ kind: Literal["data"]
60
+ query: str
61
+ tables: list[str]
62
+ session_id: str
63
+
64
+
65
+ FINAL_STEP = RoadmapStep(
66
+ id=4,
67
+ name="Synthesize final reply",
68
+ description="Combine subflow output and compose the user response",
69
+ )
70
+
71
+ CODE_STEPS: list[RoadmapStep] = [
72
+ RoadmapStep(
73
+ id=1, name="Parse files", description="Load and tokenize the candidate modules"
74
+ ),
75
+ RoadmapStep(
76
+ id=2,
77
+ name="Inspect modules",
78
+ description="Review each module in parallel to collect findings",
79
+ ),
80
+ RoadmapStep(
81
+ id=3,
82
+ name="Draft code report",
83
+ description="Summarize findings and prepare a structured FlowResponse",
84
+ ),
85
+ FINAL_STEP,
86
+ ]
87
+
88
+ DATA_STEPS: list[RoadmapStep] = [
89
+ RoadmapStep(id=1, name="Collect metrics", description="Query analytics tables"),
90
+ RoadmapStep(
91
+ id=2,
92
+ name="Shape visualisations",
93
+ description="Derive chart-friendly data series",
94
+ ),
95
+ RoadmapStep(
96
+ id=3,
97
+ name="Draft data report",
98
+ description="Summarize key deltas in a FlowResponse",
99
+ ),
100
+ FINAL_STEP,
101
+ ]
102
+
103
+
104
+ async def emit_status(
105
+ ctx,
106
+ *,
107
+ status: Literal["thinking", "ok"] = "thinking",
108
+ message: str | None = None,
109
+ roadmap_step_list: list[RoadmapStep] | None = None,
110
+ roadmap_step_id: int | None = None,
111
+ roadmap_step_status: Literal["running", "ok"] | None = None,
112
+ ) -> StatusUpdate:
113
+ """Emit a websocket-friendly status update to the Rookery sink."""
114
+
115
+ update = StatusUpdate(
116
+ status=status,
117
+ message=message,
118
+ roadmap_step_list=roadmap_step_list,
119
+ roadmap_step_id=roadmap_step_id,
120
+ roadmap_step_status=roadmap_step_status,
121
+ )
122
+ runtime = ctx.runtime
123
+ if runtime is None:
124
+ raise RuntimeError("Context is not attached to a running flow")
125
+ await runtime._emit_to_rookery(update, source=ctx.owner)
126
+ return update
127
+
128
+
129
+ async def announce_start(message: Message, ctx) -> Message:
130
+ await emit_status(ctx, message="Determining message path")
131
+ message.meta.setdefault("context", {})
132
+ return message
133
+
134
+
135
+ async def triage(message: Message, ctx) -> Message:
136
+ query = UserQuery.model_validate(message.payload)
137
+ message.meta.setdefault("context", {})
138
+ message.meta["context"]["query"] = query.text
139
+ message.meta["context"]["session_id"] = query.session_id
140
+
141
+ lowered = query.text.lower()
142
+ if any(token in lowered for token in ["error", "bug", "stacktrace", "traceback"]):
143
+ payload = CodeAnalysisRequest(
144
+ kind="code",
145
+ query=query.text,
146
+ files=["app.py", "payments.py"],
147
+ session_id=query.session_id,
148
+ )
149
+ return message.model_copy(update={"payload": payload})
150
+
151
+ payload = DataSummaryRequest(
152
+ kind="data",
153
+ query=query.text,
154
+ tables=["daily_signups", "conversion_rate"],
155
+ session_id=query.session_id,
156
+ )
157
+ return message.model_copy(update={"payload": payload})
158
+
159
+
160
+ async def code_plan(message: Message, ctx) -> Message:
161
+ message.meta["roadmap"] = [step.model_dump() for step in CODE_STEPS]
162
+ await emit_status(ctx, roadmap_step_list=CODE_STEPS)
163
+ return message
164
+
165
+
166
+ async def code_parse_files(message: Message, ctx) -> Message:
167
+ request = CodeAnalysisRequest.model_validate(message.payload)
168
+ parsed = []
169
+ for file_name in request.files:
170
+ await emit_status(
171
+ ctx,
172
+ roadmap_step_id=1,
173
+ roadmap_step_status="running",
174
+ message=f"Parsing {file_name}",
175
+ )
176
+ await asyncio.sleep(0)
177
+ parsed.append({"file": file_name, "tokens": 128})
178
+ await emit_status(
179
+ ctx,
180
+ roadmap_step_id=1,
181
+ roadmap_step_status="ok",
182
+ message=f"Parsed {len(parsed)} files",
183
+ )
184
+ message.meta["parsed_files"] = parsed
185
+ return message
186
+
187
+
188
+ async def code_inspect_modules(message: Message, ctx) -> Message:
189
+ # request = CodeAnalysisRequest.model_validate(message.payload)
190
+ parsed = message.meta.get("parsed_files", [])
191
+
192
+ async def inspect(file_info: dict[str, Any]) -> dict[str, Any]:
193
+ file_name = file_info["file"]
194
+ await emit_status(
195
+ ctx,
196
+ roadmap_step_id=2,
197
+ roadmap_step_status="running",
198
+ message=f"Inspecting {file_name}",
199
+ )
200
+ await asyncio.sleep(0)
201
+ return {"file": file_name, "issues": ["no obvious bugs"]}
202
+
203
+ insights = await map_concurrent(parsed, inspect, max_concurrency=2)
204
+ await emit_status(
205
+ ctx,
206
+ roadmap_step_id=2,
207
+ roadmap_step_status="ok",
208
+ message=f"Reviewed {len(insights)} modules",
209
+ )
210
+ message.meta["code_insights"] = insights
211
+ message.meta.setdefault("context", {})["last_route"] = "code"
212
+ return message
213
+
214
+
215
+ async def code_finalize(message: Message, ctx) -> Message:
216
+ request = CodeAnalysisRequest.model_validate(message.payload)
217
+ await emit_status(
218
+ ctx,
219
+ roadmap_step_id=3,
220
+ roadmap_step_status="running",
221
+ message="Summarizing code findings",
222
+ )
223
+ insights = message.meta.get("code_insights", [])
224
+ summary_lines = [f"- {item['file']}: {item['issues'][0]}" for item in insights]
225
+ summary = "\n".join(summary_lines) or "- No issues detected"
226
+ response = FlowResponse(
227
+ raw_output=f"Code analysis completed for {request.query}",
228
+ artifacts={
229
+ "insights": insights,
230
+ "parsed_files": message.meta.get("parsed_files", []),
231
+ },
232
+ session_info=request.session_id,
233
+ )
234
+ message.meta["summary"] = summary
235
+ await emit_status(
236
+ ctx,
237
+ roadmap_step_id=3,
238
+ roadmap_step_status="ok",
239
+ message="Drafted code report",
240
+ )
241
+ return message.model_copy(update={"payload": response})
242
+
243
+
244
+ async def data_plan(message: Message, ctx) -> Message:
245
+ message.meta["roadmap"] = [step.model_dump() for step in DATA_STEPS]
246
+ await emit_status(ctx, roadmap_step_list=DATA_STEPS)
247
+ return message
248
+
249
+
250
+ async def data_collect_metrics(message: Message, ctx) -> Message:
251
+ request = DataSummaryRequest.model_validate(message.payload)
252
+ metrics = []
253
+ for table in request.tables:
254
+ await emit_status(
255
+ ctx,
256
+ roadmap_step_id=1,
257
+ roadmap_step_status="running",
258
+ message=f"Collecting metrics from {table}",
259
+ )
260
+ await asyncio.sleep(0)
261
+ metrics.append({"table": table, "value": 42})
262
+ await emit_status(
263
+ ctx,
264
+ roadmap_step_id=1,
265
+ roadmap_step_status="ok",
266
+ message=f"Collected {len(metrics)} metric sets",
267
+ )
268
+ message.meta["metrics"] = metrics
269
+ message.meta.setdefault("context", {})["last_route"] = "data"
270
+ return message
271
+
272
+
273
+ async def data_prepare_visuals(message: Message, ctx) -> Message:
274
+ metrics = message.meta.get("metrics", [])
275
+ await emit_status(
276
+ ctx,
277
+ roadmap_step_id=2,
278
+ roadmap_step_status="running",
279
+ message="Transforming metrics into chart data",
280
+ )
281
+ await asyncio.sleep(0)
282
+ chart = {
283
+ "series": [metric["value"] for metric in metrics],
284
+ "labels": [metric["table"] for metric in metrics],
285
+ }
286
+ message.meta["chart"] = chart
287
+ await emit_status(
288
+ ctx,
289
+ roadmap_step_id=2,
290
+ roadmap_step_status="ok",
291
+ message="Prepared chart inputs",
292
+ )
293
+ return message
294
+
295
+
296
+ async def data_finalize(message: Message, ctx) -> Message:
297
+ request = DataSummaryRequest.model_validate(message.payload)
298
+ await emit_status(
299
+ ctx,
300
+ roadmap_step_id=3,
301
+ roadmap_step_status="running",
302
+ message="Summarizing metric trends",
303
+ )
304
+ metrics = message.meta.get("metrics", [])
305
+ chart = message.meta.get("chart", {})
306
+ summary_lines = [f"- {m['table']}: {m['value']}" for m in metrics]
307
+ summary = "\n".join(summary_lines) or "- No metrics available"
308
+ response = FlowResponse(
309
+ raw_output=f"Data summary ready for {request.query}",
310
+ artifacts={"metrics": metrics, "chart": chart},
311
+ session_info=request.session_id,
312
+ )
313
+ message.meta["summary"] = summary
314
+ await emit_status(
315
+ ctx,
316
+ roadmap_step_id=3,
317
+ roadmap_step_status="ok",
318
+ message="Drafted data report",
319
+ )
320
+ return message.model_copy(update={"payload": response})
321
+
322
+
323
+ async def synthesize_answer(message: Message, ctx) -> Message:
324
+ await emit_status(
325
+ ctx,
326
+ roadmap_step_id=FINAL_STEP.id,
327
+ roadmap_step_status="running",
328
+ message="Synthesizing final response",
329
+ )
330
+ response = FlowResponse.model_validate(message.payload)
331
+ summary = message.meta.get("summary", "")
332
+ context = message.meta.get("context", {})
333
+ final_text = (
334
+ "Final reply for session {session}:\nSummary:\n{summary}\n\n{raw_output}"
335
+ ).format(
336
+ session=context.get("session_id", "unknown"),
337
+ summary=summary,
338
+ raw_output=response.raw_output,
339
+ )
340
+ await ctx.emit_chunk(parent=message, text="Synthesizing final reply... ")
341
+ await ctx.emit_chunk(parent=message, text="Done composing.", done=True)
342
+ await emit_status(
343
+ ctx,
344
+ roadmap_step_id=FINAL_STEP.id,
345
+ roadmap_step_status="ok",
346
+ message="Done!",
347
+ )
348
+ meta = dict(message.meta)
349
+ meta["flow_response"] = response.model_dump()
350
+ return message.model_copy(update={"payload": final_text, "meta": meta})
351
+
352
+
353
+ def build_flow() -> PenguiFlow:
354
+ """Construct the roadmap flow with routing and synthesis."""
355
+
356
+ announce_node = Node(
357
+ announce_start, name="announce_start", policy=NodePolicy(validate="none")
358
+ )
359
+ triage_node = Node(triage, name="triage", policy=NodePolicy(validate="none"))
360
+
361
+ def choose_branch(msg: Message) -> str:
362
+ payload = msg.payload
363
+ if isinstance(payload, CodeAnalysisRequest):
364
+ return "code_plan"
365
+ if isinstance(payload, DataSummaryRequest):
366
+ return "data_plan"
367
+ raise TypeError(f"Unsupported payload type: {type(payload)!r}")
368
+
369
+ dispatcher = predicate_router("dispatcher", choose_branch)
370
+
371
+ code_plan_node = Node(
372
+ code_plan, name="code_plan", policy=NodePolicy(validate="none")
373
+ )
374
+ code_parse_node = Node(
375
+ code_parse_files, name="code_parse", policy=NodePolicy(validate="none")
376
+ )
377
+ code_inspect_node = Node(
378
+ code_inspect_modules, name="code_inspect", policy=NodePolicy(validate="none")
379
+ )
380
+ code_finalize_node = Node(
381
+ code_finalize, name="code_finalize", policy=NodePolicy(validate="none")
382
+ )
383
+
384
+ data_plan_node = Node(
385
+ data_plan, name="data_plan", policy=NodePolicy(validate="none")
386
+ )
387
+ data_collect_node = Node(
388
+ data_collect_metrics, name="data_collect", policy=NodePolicy(validate="none")
389
+ )
390
+ data_prepare_node = Node(
391
+ data_prepare_visuals, name="data_prepare", policy=NodePolicy(validate="none")
392
+ )
393
+ data_finalize_node = Node(
394
+ data_finalize, name="data_finalize", policy=NodePolicy(validate="none")
395
+ )
396
+
397
+ synthesize_node = Node(
398
+ synthesize_answer, name="synthesize", policy=NodePolicy(validate="none")
399
+ )
400
+
401
+ flow = create(
402
+ announce_node.to(triage_node),
403
+ triage_node.to(dispatcher),
404
+ dispatcher.to(code_plan_node, data_plan_node),
405
+ code_plan_node.to(code_parse_node),
406
+ code_parse_node.to(code_inspect_node),
407
+ code_inspect_node.to(code_finalize_node),
408
+ data_plan_node.to(data_collect_node),
409
+ data_collect_node.to(data_prepare_node),
410
+ data_prepare_node.to(data_finalize_node),
411
+ code_finalize_node.to(synthesize_node),
412
+ data_finalize_node.to(synthesize_node),
413
+ synthesize_node.to(),
414
+ )
415
+ return flow
416
+
417
+
418
+ def mermaid_diagram(direction: str = "TD") -> str:
419
+ """Render the flow as a Mermaid graph."""
420
+
421
+ flow = build_flow()
422
+ return flow_to_mermaid(flow, direction=direction)
423
+
424
+
425
+ async def run_demo() -> None:
426
+ """Run the flow end-to-end and print status updates."""
427
+
428
+ flow = build_flow()
429
+ flow.run()
430
+ try:
431
+ message = Message(
432
+ payload=UserQuery(
433
+ text="Investigate the checkout bug", session_id="session-123"
434
+ ),
435
+ headers=Headers(tenant="demo"),
436
+ )
437
+ await flow.emit(message)
438
+
439
+ finished = False
440
+ while not finished:
441
+ result = await flow.fetch()
442
+ if isinstance(result, StatusUpdate):
443
+ print(f"[status] {result.model_dump()}")
444
+ elif isinstance(result, Message):
445
+ payload = result.payload
446
+ if isinstance(payload, StreamChunk):
447
+ print(f"[chunk] {payload.text} (done={payload.done})")
448
+ else:
449
+ print(f"[final] {payload}")
450
+ finished = True
451
+ else:
452
+ print(f"[event] {result}")
453
+ finally:
454
+ await flow.stop()
455
+
456
+
457
+ if __name__ == "__main__": # pragma: no cover - manual execution helper
458
+ asyncio.run(run_demo())
@@ -0,0 +1,3 @@
1
+ """Streaming LLM example package."""
2
+
3
+ __all__ = []