@llamaindex/workflow-debugger 0.1.7 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,3 +1,111 @@
1
1
  # Workflow Debugger
2
2
 
3
3
  Frontend to to LlamaIndex Workflows server: https://github.com/run-llama/workflows-py
4
+
5
+ ![Workflow Debugger](./ui_sample.png)
6
+
7
+ ## Usage
8
+
9
+ The `Workflow Debugger` is automatically configured in a `WorkflowServer` to open a UI at the `/` path.
10
+
11
+ To use it, first, ensure that `llama-index-workflows` is installed:
12
+
13
+ ```bash
14
+ pip install llama-index-workflows
15
+ ```
16
+
17
+ Then, start a server with a workflow:
18
+
19
+ ```python
20
+ import asyncio
21
+ from workflows import Workflow, step
22
+ from workflows.context import Context
23
+ from workflows.events import (
24
+ Event,
25
+ StartEvent,
26
+ StopEvent,
27
+ )
28
+ from workflows.server import WorkflowServer
29
+
30
+ class ProcessingInput(StartEvent):
31
+ things: list[str]
32
+
33
+ class ProcessedOutput(StopEvent):
34
+ results: list[str]
35
+
36
+ class ProgressEvent(Event):
37
+ step: str
38
+ progress: int
39
+ message: str
40
+
41
+
42
+ class ProcessingWorkflow(Workflow):
43
+ """Example workflow that demonstrates event streaming with progress updates."""
44
+
45
+ @step
46
+ async def process(
47
+ self,
48
+ ctx: Context,
49
+ ev: ProcessingInput
50
+ ) -> ProcessedOutput:
51
+ things = ev.things
52
+
53
+ ctx.write_event_to_stream(
54
+ ProgressEvent(
55
+ step="start",
56
+ progress=0,
57
+ message=f"Starting processing of {len(things)} things",
58
+ )
59
+ )
60
+
61
+ results = []
62
+ for i, item in enumerate(things):
63
+ # Simulate processing time
64
+ await asyncio.sleep(1.0)
65
+
66
+ # Emit progress event
67
+ progress = int((i + 1) / len(things) * 100)
68
+ ctx.write_event_to_stream(
69
+ ProgressEvent(
70
+ step="processing",
71
+ progress=progress,
72
+ message=f"Processed {item} ({i + 1}/{len(things)})",
73
+ )
74
+ )
75
+
76
+ results.append(f"processed_{item}")
77
+
78
+ ctx.write_event_to_stream(
79
+ ProgressEvent(
80
+ step="complete",
81
+ progress=100,
82
+ message="Processing completed successfully",
83
+ )
84
+ )
85
+
86
+ return ProcessedOutput(results=results)
87
+
88
+ async def main() -> None:
89
+ server = WorkflowServer()
90
+
91
+ # Register workflows
92
+ server.add_workflow("processor", ProcessingWorkflow())
93
+
94
+ await server.serve(host="127.0.0.1", port=8000)
95
+
96
+
97
+ if __name__ == "__main__":
98
+ try:
99
+ asyncio.run(main())
100
+ except KeyboardInterrupt:
101
+ pass
102
+ ```
103
+
104
+ From there, you can open your browser at `http://127.0.0.1:8000` to see the UI.
105
+
106
+ To use the debugger:
107
+
108
+ - select your workflow from the dropdown menu in the top middle
109
+ - input your payload to kick off the workflow (e.g. `["item1", "item2", "item3"]`)
110
+ - click the "Run Workflow" button to start the workflow
111
+ - the visualizer will display the workflow execution in real-time and allow you to see the streamed events