hyperloop 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hyperloop/compose.py ADDED
@@ -0,0 +1,126 @@
1
+ """Prompt composition — assembles worker prompts from base + overlay + task context.
2
+
3
+ Three layers composed at spawn time:
4
+ 1. Base prompt from base/{role}.yaml
5
+ 2. Process overlay from specs/prompts/{role}-overlay.yaml (if exists)
6
+ 3. Task context: spec content, findings, traceability refs (spec_ref, task_id)
7
+
8
+ For v1, kustomize integration (project overlay) is skipped. The orchestrator
9
+ reads base YAML files directly and injects process overlays + task context.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from pathlib import Path
15
+ from typing import TYPE_CHECKING, cast
16
+
17
+ import yaml
18
+
19
+ if TYPE_CHECKING:
20
+ from hyperloop.ports.state import StateStore
21
+
22
+
23
+ class PromptComposer:
24
+ """Composes agent prompts from base definitions + overlays + task context."""
25
+
26
+ def __init__(self, base_dir: str | Path, state: StateStore) -> None:
27
+ """Load base agent definitions from base_dir.
28
+
29
+ Args:
30
+ base_dir: Path to the directory containing base agent YAML files.
31
+ state: StateStore used to read process overlays and spec files
32
+ from the target repo.
33
+ """
34
+ self._base_dir = Path(base_dir)
35
+ self._state = state
36
+ self._base_prompts: dict[str, str] = {}
37
+ self._load_base_definitions()
38
+
39
+ def _load_base_definitions(self) -> None:
40
+ """Load all base agent YAML files and extract their prompt fields."""
41
+ for yaml_file in self._base_dir.glob("*.yaml"):
42
+ with open(yaml_file) as f:
43
+ doc = yaml.safe_load(f)
44
+ if doc and doc.get("kind") == "Agent" and "prompt" in doc:
45
+ name = doc["name"]
46
+ self._base_prompts[name] = doc["prompt"]
47
+
48
+ def compose(
49
+ self,
50
+ role: str,
51
+ task_id: str,
52
+ spec_ref: str,
53
+ findings: str,
54
+ ) -> str:
55
+ """Compose the full prompt for a worker.
56
+
57
+ Layers:
58
+ 1. Base prompt from base/{role}.yaml
59
+ 2. Process overlay from specs/prompts/{role}-overlay.yaml (if exists)
60
+ 3. Task context: spec content, findings, traceability refs
61
+
62
+ Args:
63
+ role: Agent role name (e.g. "implementer", "verifier").
64
+ task_id: Task identifier (e.g. "task-027").
65
+ spec_ref: Path to the originating spec file (e.g. "specs/persistence.md").
66
+ findings: Findings from prior rounds (empty string if none).
67
+
68
+ Returns:
69
+ The composed prompt string ready to pass to a worker.
70
+
71
+ Raises:
72
+ ValueError: If the role has no base agent definition.
73
+ """
74
+ # Layer 1: Base prompt
75
+ if role not in self._base_prompts:
76
+ msg = f"Unknown role '{role}': no base agent definition found in {self._base_dir}"
77
+ raise ValueError(msg)
78
+
79
+ base_prompt = self._base_prompts[role]
80
+
81
+ # Replace template variables
82
+ prompt = base_prompt.replace("{spec_ref}", spec_ref).replace("{task_id}", task_id)
83
+
84
+ # Layer 2: Process overlay (from target repo specs/prompts/)
85
+ overlay_path = f"specs/prompts/{role}-overlay.yaml"
86
+ overlay_content = self._state.read_file(overlay_path)
87
+ overlay_text = ""
88
+ if overlay_content is not None:
89
+ overlay_text = self._extract_overlay_prompt(overlay_content)
90
+
91
+ # Layer 3: Task context — spec content
92
+ spec_content = self._state.read_file(spec_ref)
93
+
94
+ # Assemble the final prompt
95
+ sections: list[str] = [prompt.rstrip()]
96
+
97
+ if overlay_text:
98
+ sections.append(f"## Process Overlay\n{overlay_text}")
99
+
100
+ if spec_content is not None:
101
+ sections.append(f"## Spec\n{spec_content}")
102
+ else:
103
+ sections.append(
104
+ f"## Spec\n[Spec file '{spec_ref}' not found. Proceed with available context.]"
105
+ )
106
+
107
+ if findings:
108
+ sections.append(f"## Findings\n{findings}")
109
+
110
+ return "\n\n".join(sections) + "\n"
111
+
112
+ @staticmethod
113
+ def _extract_overlay_prompt(raw_yaml: str) -> str:
114
+ """Extract prompt or content from overlay YAML.
115
+
116
+ Overlay files may contain a 'prompt' field (like agent definitions)
117
+ or raw text content. Handles both.
118
+ """
119
+ try:
120
+ doc = yaml.safe_load(raw_yaml)
121
+ if isinstance(doc, dict) and "prompt" in doc:
122
+ return str(cast("dict[str, object]", doc)["prompt"]).strip()
123
+ except yaml.YAMLError:
124
+ pass
125
+ # Fall back to raw content
126
+ return raw_yaml.strip()
hyperloop/config.py ADDED
@@ -0,0 +1,161 @@
1
+ """Configuration loading — parse .hyperloop.yaml with defaults.
2
+
3
+ Reads the YAML config file, applies defaults for missing fields, and
4
+ returns a frozen Config dataclass. CLI arguments can override file values.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from dataclasses import dataclass
10
+ from typing import TYPE_CHECKING, cast
11
+
12
+ import yaml
13
+
14
+ if TYPE_CHECKING:
15
+ from pathlib import Path
16
+
17
+
18
+ class ConfigError(Exception):
19
+ """Raised when the config file cannot be parsed or is structurally invalid."""
20
+
21
+
22
+ @dataclass(frozen=True)
23
+ class Config:
24
+ """Typed, immutable configuration for the orchestrator."""
25
+
26
+ repo: str | None # owner/repo or inferred from git remote
27
+ base_branch: str # default: "main"
28
+ specs_dir: str # default: "specs"
29
+ overlay: str | None # path or git URL to kustomization dir
30
+ runtime: str # "local" (v1 only)
31
+ max_workers: int # default: 6
32
+ auto_merge: bool # default: True
33
+ merge_strategy: str # default: "squash"
34
+ delete_branch: bool # default: True
35
+ poll_interval: int # default: 30
36
+ max_rounds: int # default: 50
37
+ max_rebase_attempts: int # default: 3
38
+
39
+
40
+ def _defaults() -> dict[str, object]:
41
+ """Return the full default config as a flat dict."""
42
+ return {
43
+ "repo": None,
44
+ "base_branch": "main",
45
+ "specs_dir": "specs",
46
+ "overlay": None,
47
+ "runtime": "local",
48
+ "max_workers": 6,
49
+ "auto_merge": True,
50
+ "merge_strategy": "squash",
51
+ "delete_branch": True,
52
+ "poll_interval": 30,
53
+ "max_rounds": 50,
54
+ "max_rebase_attempts": 3,
55
+ }
56
+
57
+
58
+ def _flatten_yaml(raw: dict[str, object]) -> dict[str, object]:
59
+ """Flatten the nested YAML structure into a flat dict matching Config fields.
60
+
61
+ The YAML has nested sections (target, runtime, merge) but Config is flat.
62
+ """
63
+ flat: dict[str, object] = {}
64
+
65
+ # Top-level scalars
66
+ for key in ("overlay", "poll_interval", "max_rounds", "max_rebase_attempts"):
67
+ if key in raw:
68
+ flat[key] = raw[key]
69
+
70
+ # target section
71
+ target = raw.get("target")
72
+ if isinstance(target, dict):
73
+ if "repo" in target:
74
+ flat["repo"] = target["repo"]
75
+ if "base_branch" in target:
76
+ flat["base_branch"] = target["base_branch"]
77
+ if "specs_dir" in target:
78
+ flat["specs_dir"] = target["specs_dir"]
79
+
80
+ # runtime section
81
+ runtime = raw.get("runtime")
82
+ if isinstance(runtime, dict):
83
+ if "default" in runtime:
84
+ flat["runtime"] = runtime["default"]
85
+ if "max_workers" in runtime:
86
+ flat["max_workers"] = runtime["max_workers"]
87
+
88
+ # merge section
89
+ merge = raw.get("merge")
90
+ if isinstance(merge, dict):
91
+ if "auto_merge" in merge:
92
+ flat["auto_merge"] = merge["auto_merge"]
93
+ if "strategy" in merge:
94
+ flat["merge_strategy"] = merge["strategy"]
95
+ if "delete_branch" in merge:
96
+ flat["delete_branch"] = merge["delete_branch"]
97
+
98
+ return flat
99
+
100
+
101
+ def load_config(
102
+ path: Path | None = None,
103
+ *,
104
+ repo: str | None = None,
105
+ base_branch: str | None = None,
106
+ max_workers: int | None = None,
107
+ ) -> Config:
108
+ """Load config from a YAML file, with defaults for missing fields.
109
+
110
+ Args:
111
+ path: Path to .hyperloop.yaml. If None or non-existent, use all defaults.
112
+ repo: CLI override for target.repo.
113
+ base_branch: CLI override for target.base_branch.
114
+ max_workers: CLI override for runtime.max_workers.
115
+
116
+ Returns:
117
+ A frozen Config instance.
118
+
119
+ Raises:
120
+ ConfigError: If the file exists but contains invalid YAML or is not a mapping.
121
+ """
122
+ values = _defaults()
123
+
124
+ # Load from file if it exists
125
+ if path is not None and path.exists():
126
+ try:
127
+ raw = yaml.safe_load(path.read_text())
128
+ except yaml.YAMLError as exc:
129
+ msg = f"Failed to parse config file {path}: {exc}"
130
+ raise ConfigError(msg) from exc
131
+
132
+ if raw is not None:
133
+ if not isinstance(raw, dict):
134
+ msg = f"Config file {path} must be a YAML mapping, got {type(raw).__name__}"
135
+ raise ConfigError(msg)
136
+
137
+ file_values = _flatten_yaml(cast("dict[str, object]", raw))
138
+ values.update(file_values)
139
+
140
+ # Apply CLI overrides (only if not None)
141
+ if repo is not None:
142
+ values["repo"] = repo
143
+ if base_branch is not None:
144
+ values["base_branch"] = base_branch
145
+ if max_workers is not None:
146
+ values["max_workers"] = max_workers
147
+
148
+ return Config(
149
+ repo=values["repo"], # type: ignore[arg-type]
150
+ base_branch=str(values["base_branch"]),
151
+ specs_dir=str(values["specs_dir"]),
152
+ overlay=values["overlay"] if values["overlay"] is not None else None, # type: ignore[arg-type]
153
+ runtime=str(values["runtime"]),
154
+ max_workers=int(values["max_workers"]), # type: ignore[arg-type]
155
+ auto_merge=bool(values["auto_merge"]),
156
+ merge_strategy=str(values["merge_strategy"]),
157
+ delete_branch=bool(values["delete_branch"]),
158
+ poll_interval=int(values["poll_interval"]), # type: ignore[arg-type]
159
+ max_rounds=int(values["max_rounds"]), # type: ignore[arg-type]
160
+ max_rebase_attempts=int(values["max_rebase_attempts"]), # type: ignore[arg-type]
161
+ )
File without changes
@@ -0,0 +1,104 @@
1
+ """Decision function — given a World snapshot, return a list of Actions.
2
+
3
+ This is the orchestrator's brain: a pure function with no I/O dependencies.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from typing import TYPE_CHECKING
9
+
10
+ from hyperloop.domain.model import (
11
+ AdvanceTask,
12
+ Halt,
13
+ ReapWorker,
14
+ SpawnWorker,
15
+ TaskStatus,
16
+ )
17
+
18
+ if TYPE_CHECKING:
19
+ from hyperloop.domain.model import Action, Task, World
20
+
21
+
22
+ def _deps_met(task: Task, tasks: dict[str, Task]) -> bool:
23
+ """Check if all dependencies of a task are complete."""
24
+ for dep_id in task.deps:
25
+ dep = tasks.get(dep_id)
26
+ if dep is None or dep.status != TaskStatus.COMPLETE:
27
+ return False
28
+ return True
29
+
30
+
31
+ def _task_has_worker(task_id: str, world: World) -> bool:
32
+ """Check if a task has any worker (running, done, or failed).
33
+
34
+ Tasks with a worker in any state should not be re-spawned: running workers
35
+ are active, done/failed workers are about to be reaped by the loop.
36
+ """
37
+ return any(ws.task_id == task_id for ws in world.workers.values())
38
+
39
+
40
+ def decide(world: World, max_workers: int, max_rounds: int) -> list[Action]:
41
+ """Decide what actions to take given the current world state.
42
+
43
+ Returns an ordered list of actions: reaps first, then advances, then spawns,
44
+ then halt if applicable.
45
+ """
46
+ actions: list[Action] = []
47
+
48
+ # ---- 1. Reap finished workers (status == "done" or "failed") -----------
49
+ for ws in world.workers.values():
50
+ if ws.status in ("done", "failed"):
51
+ actions.append(ReapWorker(task_id=ws.task_id))
52
+
53
+ # ---- 2. Check for tasks that hit max_rounds → AdvanceTask + Halt ------
54
+ for task in world.tasks.values():
55
+ if task.status == TaskStatus.IN_PROGRESS and task.round >= max_rounds:
56
+ actions.append(AdvanceTask(task_id=task.id, to_status=TaskStatus.FAILED, to_phase=None))
57
+ actions.append(Halt(reason=f"task {task.id} exceeded max_rounds ({max_rounds})"))
58
+ return actions
59
+
60
+ # ---- 3. Count active (running) workers ---------------------------------
61
+ active_count = sum(1 for ws in world.workers.values() if ws.status == "running")
62
+
63
+ # ---- 4. Find eligible tasks to spawn -----------------------------------
64
+ # Priority order:
65
+ # 1. needs-rebase tasks (rebase-resolver, from merge conflict handling)
66
+ # 2. in-progress without a worker (crash recovery / pipeline resume)
67
+ # 3. not-started with all deps met
68
+ #
69
+ # The role is a hint: "rebase-resolver" for needs-rebase tasks,
70
+ # "implementer" as a default for others. The loop overrides the role
71
+ # for in-progress tasks based on their pipeline position.
72
+ needs_rebase: list[Task] = []
73
+ resuming: list[Task] = []
74
+ ready: list[Task] = []
75
+
76
+ for task in world.tasks.values():
77
+ if task.status == TaskStatus.NEEDS_REBASE and not _task_has_worker(task.id, world):
78
+ needs_rebase.append(task)
79
+ elif task.status == TaskStatus.IN_PROGRESS and not _task_has_worker(task.id, world):
80
+ resuming.append(task)
81
+ elif task.status == TaskStatus.NOT_STARTED and _deps_met(task, world.tasks):
82
+ ready.append(task)
83
+
84
+ # Stable sort by task id for determinism
85
+ needs_rebase.sort(key=lambda t: t.id)
86
+ resuming.sort(key=lambda t: t.id)
87
+ ready.sort(key=lambda t: t.id)
88
+
89
+ eligible = needs_rebase + resuming + ready
90
+ slots = max_workers - active_count
91
+ to_spawn = eligible[:slots] if slots > 0 else []
92
+
93
+ for task in to_spawn:
94
+ role = "rebase-resolver" if task.status == TaskStatus.NEEDS_REBASE else "implementer"
95
+ actions.append(SpawnWorker(task_id=task.id, role=role))
96
+
97
+ # ---- 5. Check convergence: all complete + no workers → Halt ------------
98
+ all_complete = all(t.status == TaskStatus.COMPLETE for t in world.tasks.values())
99
+ no_workers = len(world.workers) == 0
100
+
101
+ if world.tasks and all_complete and no_workers:
102
+ actions.append(Halt(reason="all tasks complete"))
103
+
104
+ return actions
@@ -0,0 +1,66 @@
1
+ """Dependency cycle detection for the task graph.
2
+
3
+ Pure function, no I/O. Used by intake to reject dependency cycles.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from typing import TYPE_CHECKING
9
+
10
+ if TYPE_CHECKING:
11
+ from hyperloop.domain.model import Task
12
+
13
+
14
+ def detect_cycles(tasks: dict[str, Task]) -> list[list[str]]:
15
+ """Detect dependency cycles in the task graph.
16
+
17
+ Returns a list of cycles, where each cycle is a list of task IDs.
18
+ Empty list means no cycles. Dependencies referencing task IDs not
19
+ present in the dict are ignored (unmet, not cyclic).
20
+ """
21
+ # Standard DFS-based cycle detection with three coloring states
22
+ WHITE = 0 # not visited
23
+ GRAY = 1 # on the current DFS path
24
+ BLACK = 2 # fully processed
25
+
26
+ color: dict[str, int] = {tid: WHITE for tid in tasks}
27
+ parent: dict[str, str | None] = {}
28
+ cycles: list[list[str]] = []
29
+
30
+ def dfs(node: str) -> None:
31
+ color[node] = GRAY
32
+ task = tasks[node]
33
+ for dep in task.deps:
34
+ if dep not in tasks:
35
+ # Dependency references a task not in the dict -- skip
36
+ continue
37
+ if color[dep] == GRAY:
38
+ # Found a cycle -- trace back from node to dep
39
+ cycle = _extract_cycle(node, dep, parent)
40
+ cycles.append(cycle)
41
+ elif color[dep] == WHITE:
42
+ parent[dep] = node
43
+ dfs(dep)
44
+ color[node] = BLACK
45
+
46
+ for tid in tasks:
47
+ if color[tid] == WHITE:
48
+ parent[tid] = None
49
+ dfs(tid)
50
+
51
+ return cycles
52
+
53
+
54
+ def _extract_cycle(current: str, back_edge_target: str, parent: dict[str, str | None]) -> list[str]:
55
+ """Extract the cycle path from the parent map.
56
+
57
+ Walks from `current` back through `parent` until reaching `back_edge_target`.
58
+ """
59
+ cycle = [back_edge_target]
60
+ node: str | None = current
61
+ while node != back_edge_target:
62
+ assert node is not None
63
+ cycle.append(node)
64
+ node = parent.get(node)
65
+ cycle.reverse()
66
+ return cycle
@@ -0,0 +1,221 @@
1
+ """Domain model — value objects, entities, and pipeline primitives.
2
+
3
+ All types are pure data with no I/O dependencies.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from dataclasses import dataclass
9
+ from enum import Enum
10
+ from typing import Literal, NewType
11
+
12
+ # ---------------------------------------------------------------------------
13
+ # Enums
14
+ # ---------------------------------------------------------------------------
15
+
16
+
17
+ class TaskStatus(Enum):
18
+ """Lifecycle status of a task."""
19
+
20
+ NOT_STARTED = "not_started"
21
+ IN_PROGRESS = "in_progress"
22
+ NEEDS_REBASE = "needs_rebase"
23
+ COMPLETE = "complete"
24
+ FAILED = "failed"
25
+
26
+
27
+ class Verdict(Enum):
28
+ """Outcome reported by a worker."""
29
+
30
+ PASS = "pass"
31
+ FAIL = "fail"
32
+ TIMEOUT = "timeout"
33
+ ERROR = "error"
34
+
35
+
36
+ # ---------------------------------------------------------------------------
37
+ # Simple types
38
+ # ---------------------------------------------------------------------------
39
+
40
+ Phase = NewType("Phase", str)
41
+ """Current pipeline step name — a branded string for type safety."""
42
+
43
+ # ---------------------------------------------------------------------------
44
+ # Core entities / value objects
45
+ # ---------------------------------------------------------------------------
46
+
47
+
48
+ @dataclass(frozen=True)
49
+ class Task:
50
+ """A unit of work tracked by the orchestrator."""
51
+
52
+ id: str
53
+ title: str
54
+ spec_ref: str
55
+ status: TaskStatus
56
+ phase: Phase | None
57
+ deps: tuple[str, ...]
58
+ round: int
59
+ branch: str | None
60
+ pr: str | None
61
+
62
+
63
+ @dataclass(frozen=True)
64
+ class WorkerResult:
65
+ """Verdict reported by a finished worker."""
66
+
67
+ verdict: Verdict
68
+ findings: int
69
+ detail: str
70
+
71
+
72
+ @dataclass(frozen=True)
73
+ class WorkerHandle:
74
+ """Opaque handle to a running worker session."""
75
+
76
+ task_id: str
77
+ role: str
78
+ agent_id: str
79
+ session_id: str | None
80
+
81
+
82
+ # ---------------------------------------------------------------------------
83
+ # Pipeline primitives
84
+ # ---------------------------------------------------------------------------
85
+
86
+
87
+ @dataclass(frozen=True)
88
+ class RoleStep:
89
+ """Spawn an agent with a given role."""
90
+
91
+ role: str
92
+ on_pass: str | None
93
+ on_fail: str | None
94
+
95
+
96
+ @dataclass(frozen=True)
97
+ class GateStep:
98
+ """Block until an external signal is received."""
99
+
100
+ gate: str
101
+
102
+
103
+ @dataclass(frozen=True)
104
+ class LoopStep:
105
+ """Wrap steps — on fail retry from top, on pass continue."""
106
+
107
+ steps: tuple[PipelineStep, ...]
108
+
109
+
110
+ @dataclass(frozen=True)
111
+ class ActionStep:
112
+ """Terminal operation (merge-pr, mark-pr-ready, etc.)."""
113
+
114
+ action: str
115
+
116
+
117
+ PipelineStep = RoleStep | GateStep | LoopStep | ActionStep
118
+ """Union of all pipeline primitive types."""
119
+
120
+
121
+ @dataclass(frozen=True)
122
+ class PipelinePosition:
123
+ """Path through a nested pipeline structure.
124
+
125
+ Each element in `path` is an index into a list of steps at that nesting level.
126
+ Example: path=[0, 1] means "first step in pipeline (a LoopStep), second step within
127
+ that loop."
128
+ """
129
+
130
+ path: tuple[int, ...]
131
+
132
+
133
+ @dataclass(frozen=True)
134
+ class Process:
135
+ """A named process with intake and per-task pipelines."""
136
+
137
+ name: str
138
+ intake: tuple[PipelineStep, ...]
139
+ pipeline: tuple[PipelineStep, ...]
140
+
141
+
142
+ # ---------------------------------------------------------------------------
143
+ # World snapshot (input to the decide function)
144
+ # ---------------------------------------------------------------------------
145
+
146
+
147
+ @dataclass(frozen=True)
148
+ class WorkerState:
149
+ """Snapshot of a worker's current status."""
150
+
151
+ task_id: str
152
+ role: str
153
+ status: Literal["running", "done", "failed"]
154
+
155
+
156
+ @dataclass(frozen=True)
157
+ class World:
158
+ """Complete snapshot of orchestrator state — input to decide()."""
159
+
160
+ tasks: dict[str, Task]
161
+ workers: dict[str, WorkerState]
162
+ epoch: str
163
+
164
+
165
+ # ---------------------------------------------------------------------------
166
+ # Actions (output of the decide function)
167
+ # ---------------------------------------------------------------------------
168
+
169
+
170
+ @dataclass(frozen=True)
171
+ class SpawnWorker:
172
+ """Spawn a new worker for a task with a given role."""
173
+
174
+ task_id: str
175
+ role: str
176
+
177
+
178
+ @dataclass(frozen=True)
179
+ class ReapWorker:
180
+ """Collect results from a finished worker."""
181
+
182
+ task_id: str
183
+
184
+
185
+ @dataclass(frozen=True)
186
+ class AdvanceTask:
187
+ """Transition a task to a new status and/or phase."""
188
+
189
+ task_id: str
190
+ to_status: TaskStatus
191
+ to_phase: Phase | None
192
+
193
+
194
+ @dataclass(frozen=True)
195
+ class RunPM:
196
+ """Run the PM intake process."""
197
+
198
+
199
+ @dataclass(frozen=True)
200
+ class RunProcessImprover:
201
+ """Run the process improver with accumulated findings."""
202
+
203
+ findings: dict[str, int]
204
+
205
+
206
+ @dataclass(frozen=True)
207
+ class MergePR:
208
+ """Squash-merge a task's PR."""
209
+
210
+ task_id: str
211
+
212
+
213
+ @dataclass(frozen=True)
214
+ class Halt:
215
+ """Stop the orchestrator loop."""
216
+
217
+ reason: str
218
+
219
+
220
+ Action = SpawnWorker | ReapWorker | AdvanceTask | RunPM | RunProcessImprover | MergePR | Halt
221
+ """Union of all action types emitted by the decide function."""