adk-graph-workflow 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. adk_graph_workflow-0.1.0/.claude/settings.json +23 -0
  2. adk_graph_workflow-0.1.0/.gitignore +22 -0
  3. adk_graph_workflow-0.1.0/CLAUDE.md +74 -0
  4. adk_graph_workflow-0.1.0/PKG-INFO +13 -0
  5. adk_graph_workflow-0.1.0/README.md +296 -0
  6. adk_graph_workflow-0.1.0/examples/__init__.py +1 -0
  7. adk_graph_workflow-0.1.0/examples/analysis.py +34 -0
  8. adk_graph_workflow-0.1.0/examples/content_moderation.yaml +396 -0
  9. adk_graph_workflow-0.1.0/examples/custom_model_demo.py +81 -0
  10. adk_graph_workflow-0.1.0/examples/custom_model_demo.yaml +36 -0
  11. adk_graph_workflow-0.1.0/examples/customer_service.yaml +64 -0
  12. adk_graph_workflow-0.1.0/examples/data_analysis_pipeline.yaml +258 -0
  13. adk_graph_workflow-0.1.0/examples/ecommerce.py +45 -0
  14. adk_graph_workflow-0.1.0/examples/ecommerce_order_flow.yaml +407 -0
  15. adk_graph_workflow-0.1.0/examples/helpers.py +3 -0
  16. adk_graph_workflow-0.1.0/examples/langgraph.py +11 -0
  17. adk_graph_workflow-0.1.0/examples/moderation.py +36 -0
  18. adk_graph_workflow-0.1.0/examples/multi_agent_research.yaml +299 -0
  19. adk_graph_workflow-0.1.0/examples/research.py +18 -0
  20. adk_graph_workflow-0.1.0/examples/travel.py +37 -0
  21. adk_graph_workflow-0.1.0/examples/travel_planner.yaml +260 -0
  22. adk_graph_workflow-0.1.0/pyproject.toml +29 -0
  23. adk_graph_workflow-0.1.0/src/graph_workflow/__init__.py +104 -0
  24. adk_graph_workflow-0.1.0/src/graph_workflow/compiler.py +405 -0
  25. adk_graph_workflow-0.1.0/src/graph_workflow/data/schema.json +140 -0
  26. adk_graph_workflow-0.1.0/src/graph_workflow/errors.py +38 -0
  27. adk_graph_workflow-0.1.0/src/graph_workflow/evaluator.py +137 -0
  28. adk_graph_workflow-0.1.0/src/graph_workflow/models.py +112 -0
  29. adk_graph_workflow-0.1.0/src/graph_workflow/resolver.py +87 -0
  30. adk_graph_workflow-0.1.0/src/graph_workflow/runner.py +136 -0
  31. adk_graph_workflow-0.1.0/src/graph_workflow/schema.py +33 -0
  32. adk_graph_workflow-0.1.0/src/graph_workflow/validator.py +141 -0
  33. adk_graph_workflow-0.1.0/tests/conftest.py +2 -0
  34. adk_graph_workflow-0.1.0/tests/test_compiler.py +581 -0
  35. adk_graph_workflow-0.1.0/tests/test_errors.py +71 -0
  36. adk_graph_workflow-0.1.0/tests/test_evaluator.py +108 -0
  37. adk_graph_workflow-0.1.0/tests/test_examples_end_to_end.py +414 -0
  38. adk_graph_workflow-0.1.0/tests/test_extreme.py +357 -0
  39. adk_graph_workflow-0.1.0/tests/test_from_config.py +69 -0
  40. adk_graph_workflow-0.1.0/tests/test_integration.py +90 -0
  41. adk_graph_workflow-0.1.0/tests/test_models.py +169 -0
  42. adk_graph_workflow-0.1.0/tests/test_resolver.py +137 -0
  43. adk_graph_workflow-0.1.0/tests/test_runner.py +560 -0
  44. adk_graph_workflow-0.1.0/tests/test_schema.py +101 -0
  45. adk_graph_workflow-0.1.0/tests/test_validator.py +174 -0
  46. adk_graph_workflow-0.1.0/uv.lock +3422 -0
@@ -0,0 +1,23 @@
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(python -m pytest --version)",
5
+ "Bash(python -m pytest --collect-only -q)",
6
+ "Bash(python -c ' *)",
7
+ "Read(//home/rowan/Projects/million-agents-workflow-builder/**)",
8
+ "mcp__plugin_everything-claude-code_sequential-thinking__sequentialthinking",
9
+ "Read(//home/rowan/Projects/adk-python/**)",
10
+ "Bash(python -m pytest tests/test_models.py tests/test_compiler.py tests/test_validator.py -x -q)",
11
+ "Bash(python -m pytest tests/test_compiler.py -v -x)",
12
+ "Bash(python -m pytest --cov=src --cov-report=term-missing -q)",
13
+ "Bash(python -c \"import google.adk.tools as t; print\\([x for x in dir\\(t\\) if not x.startswith\\('_'\\)]\\)\")",
14
+ "Bash(python -m pytest tests/test_examples_end_to_end.py -v -q)",
15
+ "Bash(python -m pytest tests/test_examples_end_to_end.py -v -x)",
16
+ "Bash(python -c \"import ast; ast.parse\\(open\\('src/graph_workflow/compiler.py'\\).read\\(\\)\\); print\\('Syntax OK'\\)\")",
17
+ "Bash(python -m pytest tests/test_compiler.py tests/test_examples_end_to_end.py -x -q)",
18
+ "Bash(python -m pytest tests/test_compiler.py -x -q)",
19
+ "Bash(git add *)",
20
+ "Bash(git commit -m ' *)"
21
+ ]
22
+ }
23
+ }
@@ -0,0 +1,22 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.egg-info/
5
+ dist/
6
+ build/
7
+ *.egg
8
+
9
+ # Virtual environments
10
+ .venv/
11
+
12
+ # Testing
13
+ .pytest_cache/
14
+ .coverage
15
+ htmlcov/
16
+
17
+ # IDE
18
+ .vscode/
19
+ .idea/
20
+
21
+ # OS
22
+ .DS_Store
@@ -0,0 +1,74 @@
1
+ # CLAUDE.md
2
+
3
+ This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
4
+
5
+ ## Project Overview
6
+
7
+ YAML-defined graph workflows for Google ADK agents. Users write workflow graphs in YAML, and the library compiles them into executable ADK agent graphs. The core entry point is `from_config("workflow.yaml")` which returns a `GraphRunnerAgent` (an ADK `BaseAgent`).
8
+
9
+ ## Commands
10
+
11
+ ```bash
12
+ # Install (editable, with dev deps)
13
+ pip install -e ".[dev]"
14
+
15
+ # Run all tests (pytest with async_mode=auto)
16
+ pytest
17
+
18
+ # Run with coverage
19
+ pytest --cov=src --cov-report=term-missing
20
+
21
+ # Run a single test file
22
+ pytest tests/test_compiler.py
23
+
24
+ # Run a single test by name
25
+ pytest tests/test_validator.py -k "test_cycle_detection"
26
+ ```
27
+
28
+ ## Architecture
29
+
30
+ ### Validation Pipeline (3 layers, in order)
31
+
32
+ 1. **SchemaValidator** — JSON Schema structural validation against `src/graph_workflow/data/schema.json`
33
+ 2. **GraphWorkflowDef.model_validate()** — Pydantic discriminated unions for type-safe parsing
34
+ 3. **GraphValidator** — Graph structure checks: entry/exit nodes, edge references, container sub-agents, function references, node reachability
35
+
36
+ ### Compilation Pipeline
37
+
38
+ `GraphCompiler` performs a two-pass compilation: first validates all node references and detects cycles, then converts YAML node definitions into ADK agent instances. Function nodes are wrapped into inline agents. Container nodes (`sequential`, `parallel`, `loop`) are built recursively.
39
+
40
+ ### Runtime Execution
41
+
42
+ `GraphRunnerAgent` (extends ADK `BaseAgent`) traverses the compiled graph. It evaluates edge conditions via `ConditionEvaluator` (safe AST-based Python subset) against session state, and propagates state deltas between nodes.
43
+
44
+ ### Key Module Responsibilities
45
+
46
+ | Module | Purpose |
47
+ |--------|---------|
48
+ | `models.py` | Pydantic models with discriminated unions for 6 node types |
49
+ | `schema.py` | JSON Schema validation (structural, IDE-friendly) |
50
+ | `validator.py` | Graph structure validation (edges, cycles, reachability) |
51
+ | `compiler.py` | Two-pass YAML→ADK agent compilation |
52
+ | `runner.py` | Runtime graph executor with state management |
53
+ | `resolver.py` | Dynamic function import, `${ENV_VAR}` interpolation, arg pre-binding |
54
+ | `evaluator.py` | Safe AST-based condition evaluation against session state |
55
+ | `errors.py` | Error hierarchy: `GraphWorkflowError` → 5 subclasses |
56
+
57
+ ### Node Types
58
+
59
+ `function`, `llm_agent`, `sequential_agent`, `parallel_agent`, `loop_agent`, `langgraph_agent`. Container nodes accept both agent and function nodes as `sub_agents`. Nested containers are supported; circular references between containers are detected at compile time.
60
+
61
+ ### Public API
62
+
63
+ Everything is exported from `graph_workflow.__init__`. The convenience function `from_config(path)` runs the full pipeline (load → schema validate → pydantic validate → graph validate → resolve functions → compile → return runner). For fine-grained control, each pipeline step can be called individually.
64
+
65
+ ## Key Patterns
66
+
67
+ - **Discriminated unions** in Pydantic models (`NodeDef` uses `type` field to dispatch)
68
+ - **Function resolution** at compile time via dotted module paths with static arg pre-binding
69
+ - **Condition evaluation** uses safe AST walking (not `eval`) — supports comparisons, boolean ops, attribute access with dict fallback
70
+ - **LLM agents** support custom models via LiteLLM with provider prefixes (e.g., `openai/`, `deepseek/`, `ollama/`)
71
+
72
+ ## Testing
73
+
74
+ 206 tests with 97%+ coverage. Tests are in `tests/` organized by component. `test_examples_end_to_end.py` validates all 6 example YAML workflows compile correctly. pytest runs with `asyncio_mode = "auto"`.
@@ -0,0 +1,13 @@
1
+ Metadata-Version: 2.4
2
+ Name: adk-graph-workflow
3
+ Version: 0.1.0
4
+ Summary: YAML-defined graph workflows for Google ADK agents
5
+ Requires-Python: >=3.10
6
+ Requires-Dist: google-adk>=1.0.0
7
+ Requires-Dist: jsonschema>=4.0
8
+ Requires-Dist: pydantic>=2.0
9
+ Requires-Dist: pyyaml>=6.0
10
+ Provides-Extra: dev
11
+ Requires-Dist: pytest-asyncio>=0.21; extra == 'dev'
12
+ Requires-Dist: pytest-cov>=4.0; extra == 'dev'
13
+ Requires-Dist: pytest>=7.0; extra == 'dev'
@@ -0,0 +1,296 @@
1
+ # adk-graph-workflow
2
+
3
+ YAML-defined graph workflows for Google ADK agents. Load a YAML file, compile it into an executable ADK agent graph, and run it.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install -e .
9
+ ```
10
+
11
+ Requires Python 3.10+, `google-adk>=1.0.0`, `pydantic>=2.0`, `pyyaml>=6.0`, `jsonschema>=4.0`.
12
+
13
+ ## Quick Start
14
+
15
+ ```python
16
+ from graph_workflow import from_config
17
+
18
+ agent = from_config("workflow.yaml")
19
+ ```
20
+
21
+ That's it. `from_config()` handles loading, validation, function resolution, and compilation in one call. The returned `GraphRunnerAgent` is an ADK `BaseAgent` ready to use with any ADK `Runner`.
22
+
23
+ ### Running the Agent
24
+
25
+ ```python
26
+ import asyncio
27
+ import uuid
28
+ from google.adk.agents import InvocationContext, RunConfig
29
+ from google.adk.sessions import InMemorySessionService
30
+ from graph_workflow import from_config
31
+
32
+ async def main():
33
+ agent = from_config("workflow.yaml")
34
+
35
+ session_service = InMemorySessionService()
36
+ session = await session_service.create_session(
37
+ app_name="my_app", user_id="user1", session_id=str(uuid.uuid4())
38
+ )
39
+ ctx = InvocationContext(
40
+ invocation_id=str(uuid.uuid4()),
41
+ agent=agent,
42
+ session=session,
43
+ session_service=session_service,
44
+ run_config=RunConfig(),
45
+ )
46
+
47
+ async for event in agent.run_async(ctx):
48
+ print(event)
49
+
50
+ asyncio.run(main())
51
+ ```
52
+
53
+ ### Advanced Usage
54
+
55
+ If you need fine-grained control over individual steps:
56
+
57
+ ```python
58
+ import yaml
59
+ from graph_workflow import (
60
+ SchemaValidator,
61
+ GraphWorkflowDef,
62
+ GraphValidator,
63
+ FunctionResolver,
64
+ GraphCompiler,
65
+ GraphRunnerAgent,
66
+ )
67
+
68
+ with open("workflow.yaml") as f:
69
+ data = yaml.safe_load(f)
70
+
71
+ SchemaValidator().validate(data)
72
+ workflow = GraphWorkflowDef.model_validate(data)
73
+ GraphValidator().validate(workflow)
74
+
75
+ resolver = FunctionResolver(workflow.functions)
76
+ registry = {name: resolver.resolve(name) for name in workflow.functions}
77
+ compiled = GraphCompiler(registry).compile(workflow)
78
+ runner = GraphRunnerAgent(name=workflow.name, graph=compiled)
79
+ ```
80
+
81
+ ## YAML Format
82
+
83
+ ```yaml
84
+ version: "2.0"
85
+ name: my_workflow
86
+ entry: start_node
87
+ exit: end_node
88
+
89
+ nodes:
90
+ start_node:
91
+ type: function
92
+ function: my_func
93
+ output_key: result
94
+
95
+ classifier:
96
+ type: llm_agent
97
+ model: gemini-2.0-flash
98
+ instruction: "Classify the input."
99
+ output_key: category
100
+
101
+ parallel_analysis:
102
+ type: parallel_agent
103
+ sub_agents: [agent_a, agent_b]
104
+
105
+ sequential_steps:
106
+ type: sequential_agent
107
+ sub_agents: [step_1, step_2, step_3]
108
+
109
+ retry_loop:
110
+ type: loop_agent
111
+ sub_agents: [try_action, check_result]
112
+ max_iterations: 5
113
+
114
+ reasoner:
115
+ type: langgraph_agent
116
+ graph: myapp.graphs.reasoning_graph
117
+ instruction: "Perform structured reasoning."
118
+
119
+ end_node:
120
+ type: function
121
+ function: cleanup
122
+
123
+ edges:
124
+ - from: start_node
125
+ to: classifier
126
+ - from: classifier
127
+ to: path_a
128
+ condition: category == "A"
129
+ - from: classifier
130
+ to: path_b
131
+ condition: category == "B"
132
+ - from: classifier
133
+ to: end_node # default (no condition)
134
+
135
+ functions:
136
+ my_func:
137
+ callable: myapp.functions.do_something
138
+ args:
139
+ timeout: 30
140
+ cleanup:
141
+ callable: myapp.functions.cleanup
142
+ ```
143
+
144
+ ## Node Types
145
+
146
+ | Type | YAML `type` | Description |
147
+ |------|-------------|-------------|
148
+ | Function | `function` | Executes a Python callable. Takes session state as positional arg. |
149
+ | LLM Agent | `llm_agent` | Wraps an ADK `LlmAgent`. Supports `model`, `instruction`, `tools`, `output_key`, `output_schema`. |
150
+ | Sequential | `sequential_agent` | Runs sub-agents in order. `sub_agents` lists node keys. |
151
+ | Parallel | `parallel_agent` | Runs sub-agents concurrently. |
152
+ | Loop | `loop_agent` | Repeats sub-agents up to `max_iterations` (default: 10). |
153
+ | LangGraph | `langgraph_agent` | Wraps an ADK `LanggraphAgent`. Requires `langchain-core` and `langgraph` packages. `graph` is a dotted path resolved via `FunctionResolver`. |
154
+
155
+ ## Container Sub-agents
156
+
157
+ Container nodes (`sequential_agent`, `parallel_agent`, `loop_agent`) accept both agent nodes and function nodes as `sub_agents`. Function nodes are automatically wrapped into inline agents at compile time.
158
+
159
+ Nested containers are supported (e.g., a `sequential_agent` containing a `parallel_agent`). Circular references between containers are detected and raise `GraphCompilationError`.
160
+
161
+ ## Condition Expressions
162
+
163
+ Edge conditions use a safe AST-evaluated subset of Python:
164
+
165
+ - **Comparisons**: `==`, `!=`, `<`, `<=`, `>`, `>=`, `in`, `not in`
166
+ - **Boolean ops**: `and`, `or`, `not`
167
+ - **Attribute access**: `obj.attr` (dict fallback: `{a: {b: 1}}` allows `a.b == 1`)
168
+ - **Literals**: strings (`"text"`), numbers (`42`, `3.14`), booleans (`True`, `False`)
169
+
170
+ Conditions are evaluated against session state. Use `True`/`False` (not `true`/`false`).
171
+
172
+ ## Function Resolution
173
+
174
+ Functions defined in the `functions` section are resolved at compile time:
175
+
176
+ - `callable` — dotted module path (e.g., `myapp.utils.process`)
177
+ - `args` — static arguments pre-bound to the callable
178
+ - `${ENV_VAR}` — environment variable interpolation in argument values
179
+
180
+ At runtime, the function receives session state as its first positional argument, followed by pre-bound keyword arguments.
181
+
182
+ ## Validation Pipeline
183
+
184
+ Three-layer validation:
185
+
186
+ 1. **JSON Schema** (`SchemaValidator`) — structural validation, IDE-friendly
187
+ 2. **Pydantic** (`GraphWorkflowDef.model_validate`) — type-safe parsing with discriminated unions
188
+ 3. **GraphValidator** — checks entry/exit nodes, edge references, container sub-agents, function references, node reachability
189
+
190
+ ## Error Hierarchy
191
+
192
+ ```
193
+ GraphWorkflowError (base)
194
+ +-- GraphValidationError # Schema/structural issues
195
+ +-- GraphCompilationError # Compilation failures
196
+ +-- GraphExecutionError # Runtime errors
197
+ +-- ConditionEvalError # Bad condition expressions
198
+ +-- FunctionResolutionError # Import/callable resolution failures
199
+ ```
200
+
201
+ ## Custom Models
202
+
203
+ LLM agent nodes support any OpenAI-compatible API (NewAPI, OneAPI, etc.) through ADK's LiteLlm integration. Set environment variables and use the `openai/` prefix:
204
+
205
+ ```yaml
206
+ nodes:
207
+ my_agent:
208
+ type: llm_agent
209
+ model: openai/your-model-name # Routes through LiteLLM
210
+ instruction: "..."
211
+ ```
212
+
213
+ ```python
214
+ import os
215
+ os.environ["OPENAI_API_KEY"] = "sk-your-key"
216
+ os.environ["OPENAI_API_BASE"] = "https://your-api-domain/v1"
217
+
218
+ agent = from_config("workflow.yaml")
219
+ ```
220
+
221
+ Supported prefixes include: `openai/`, `anthropic/`, `deepseek/`, `groq/`, `ollama/`, and 100+ more via [LiteLLM](https://docs.litellm.ai/docs/providers). Install with `pip install google-adk[extensions]`.
222
+
223
+ ## Examples
224
+
225
+ All examples can be loaded with `from_config()`:
226
+
227
+ ```python
228
+ from graph_workflow import from_config
229
+
230
+ agent = from_config("examples/customer_service.yaml") # 5 nodes
231
+ ```
232
+
233
+ | File | Nodes | Description |
234
+ |------|-------|-------------|
235
+ | `customer_service.yaml` | 5 | Intent classification with conditional routing |
236
+ | `data_analysis_pipeline.yaml` | 21 | Full pipeline: load, validate, parallel analysis, loop optimization, report |
237
+ | `multi_agent_research.yaml` | 24 | Multi-source research with fact-checking, iterative refinement, quality gate |
238
+ | `content_moderation.yaml` | 26 | Parallel multi-dimensional analysis, appeal loop, structured reasoning |
239
+ | `ecommerce_order_flow.yaml` | 30 | Fraud detection, payment retry, warehouse fulfillment, split orders |
240
+ | `custom_model_demo.yaml` | 3 | **Runnable** — translate (deepseek) → summarize (doubao) via NewAPI |
241
+
242
+ ## Testing
243
+
244
+ ```bash
245
+ pip install -e ".[dev]"
246
+ pytest
247
+ ```
248
+
249
+ 206 tests, 97%+ coverage.
250
+
251
+ ## Architecture: Job Executor Engine for million-agents-workflow-builder
252
+
253
+ `adk-graph-workflow` serves as the core execution engine for [million-agents-workflow-builder](https://gitlab.zhejianglab.com/dev426/million-agents-workflow-builder), a multi-agent orchestration platform with FastAPI backend, Kafka dispatch, and Kubernetes Job isolation.
254
+
255
+ ### Design Decision: Standalone Package
256
+
257
+ `adk-graph-workflow` remains an independent pip-installable package rather than being merged into the platform project.
258
+
259
+ **Rationale:**
260
+
261
+ - **Clean separation of concerns** — This library is a pure compile/execute engine (YAML → ADK graph) with zero infrastructure dependencies (no K8s, Kafka, DB). The platform layer handles scheduling, isolation, and monitoring.
262
+ - **Low coupling** — The platform only depends on this package in two places: YAML validation in the workflow builder module, and `from_config()` inside K8s Job Pod entrypoints.
263
+ - **Independent evolution** — Engine can be versioned, tested (206 tests / 97% coverage), and released independently of platform changes.
264
+
265
+ ### Integration Architecture
266
+
267
+ ```
268
+ million-agents-workflow-builder (platform layer)
269
+
270
+ ├─ Scheduler (Kafka) ──→ Orchestrator (K8s Job)
271
+ │ │
272
+ │ ▼
273
+ │ Job Pod
274
+ │ ┌─────────────────────┐
275
+ │ │ executor/entrypoint │
276
+ │ │ ↓ │
277
+ │ │ adk-graph-workflow │ ← pip dependency
278
+ │ │ from_config(yaml) │
279
+ │ │ GraphRunnerAgent │
280
+ │ └─────────────────────┘
281
+ │ │
282
+ │ ▼
283
+ │ Kafka (results)
284
+
285
+ └─ Workflow Builder (NL→YAML, validates via SchemaValidator/GraphValidator)
286
+ ```
287
+
288
+ ### Deployment
289
+
290
+ Install as a pip dependency in the platform's Job Pod environment:
291
+
292
+ ```bash
293
+ pip install adk-graph-workflow
294
+ # or from private PyPI / git source
295
+ pip install git+https://gitlab.zhejianglab.com/dev426/adk-graph-workflow.git
296
+ ```
@@ -0,0 +1 @@
1
+ """Stub functions for graph_workflow examples."""
@@ -0,0 +1,34 @@
1
+ """Data analysis pipeline example stubs."""
2
+
3
+
4
+ def create_visualizations(state, **kwargs):
5
+ return {"chart_path": "/tmp/chart.png"}
6
+ def load_data_source(state, **kwargs):
7
+ return {"loaded": True, "rows": 1000}
8
+
9
+ def validate_schema(state, **kwargs):
10
+ return {"valid": True}
11
+
12
+ def clean_missing_values(state, **kwargs):
13
+ return {"cleaned": True}
14
+
15
+ def encode_categories(state, **kwargs):
16
+ return {"encoded": True}
17
+
18
+ def normalize_features(state, **kwargs):
19
+ return {"normalized": True}
20
+
21
+ def compute_statistics(state, **kwargs):
22
+ return {"mean": 0.5, "std": 0.1}
23
+
24
+ def detect_anomalies(state, **kwargs):
25
+ return {"anomalies": []}
26
+
27
+ def train_model_step(state, **kwargs):
28
+ return {"accuracy": 0.95}
29
+
30
+ def evaluate_model(state, **kwargs):
31
+ return {"f1_score": 0.93}
32
+
33
+ def export_report(state, **kwargs):
34
+ return {"report_path": "/tmp/report.pdf"}