yaml-flow 2.3.0 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -839,6 +839,352 @@ Use `validateGraphConfig()` for structural checks (JSON shape) and `validateGrap
839
839
 
840
840
  ---
841
841
 
842
+ ## Continuous Event Graph
843
+
844
+ A **long-lived, evolving** event-graph where both the graph config and execution state mutate over time. Ideal for dashboards, monitoring systems, and any scenario where the workflow has no fixed endpoint.
845
+
846
+ The core type is `LiveGraph` — it bundles `config` + `state` so they can't get out of sync. Every function is pure: `f(LiveGraph, input) → LiveGraph`.
847
+
848
+ ```typescript
849
+ import {
850
+ createLiveGraph, applyEvent,
851
+ addNode, removeNode,
852
+ addRequires, removeRequires, addProvides, removeProvides,
853
+ injectTokens, drainTokens,
854
+ schedule, inspect,
855
+ resetNode, disableNode, enableNode, getNode,
856
+ snapshot, restore,
857
+ getUnreachableTokens, getUnreachableNodes,
858
+ getUpstream, getDownstream,
859
+ } from 'yaml-flow/continuous-event-graph';
860
+ ```
861
+
862
+ ### Quick Start
863
+
864
+ ```typescript
865
+ import { createLiveGraph, applyEvent, addNode, schedule, inspect } from 'yaml-flow/continuous-event-graph';
866
+
867
+ // 1. Bootstrap
868
+ let live = createLiveGraph({
869
+ settings: { completion: 'manual' },
870
+ tasks: {
871
+ fetch_prices: { provides: ['price-data'] },
872
+ compute: { requires: ['price-data'], provides: ['indicators'] },
873
+ },
874
+ });
875
+
876
+ // 2. Schedule — what's ready?
877
+ schedule(live).eligible; // ['fetch_prices']
878
+
879
+ // 3. Apply events — immutable state transitions
880
+ live = applyEvent(live, { type: 'task-started', taskName: 'fetch_prices', timestamp: new Date().toISOString() });
881
+ live = applyEvent(live, { type: 'task-completed', taskName: 'fetch_prices', timestamp: new Date().toISOString() });
882
+ schedule(live).eligible; // ['compute']
883
+
884
+ // 4. Evolve — add a node at runtime
885
+ live = addNode(live, 'alert', { requires: ['indicators'], provides: ['alert-sent'] });
886
+
887
+ // 5. Health check
888
+ inspect(live); // { totalNodes: 3, running: 0, completed: 1, ... }
889
+ ```
890
+
891
+ ### Graph Mutations
892
+
893
+ | Function | Description |
894
+ |---|---|
895
+ | `addNode(live, name, config)` | Add a task to the graph (config + state) |
896
+ | `removeNode(live, name)` | Remove a task from the graph |
897
+ | `addRequires(live, node, tokens)` | Add requires tokens to a node |
898
+ | `removeRequires(live, node, tokens)` | Remove requires tokens from a node |
899
+ | `addProvides(live, node, tokens)` | Add provides tokens to a node |
900
+ | `removeProvides(live, node, tokens)` | Remove provides tokens from a node |
901
+
902
+ ### Token Management
903
+
904
+ ```typescript
905
+ // Inject external data/signals
906
+ live = injectTokens(live, ['market-open', 'price-data']);
907
+
908
+ // Drain stale/expired tokens
909
+ live = drainTokens(live, ['price-data']); // forces re-fetch before downstream can run
910
+ ```
911
+
912
+ ### Node Lifecycle
913
+
914
+ | Function | Description |
915
+ |---|---|
916
+ | `resetNode(live, name)` | Reset a node to `not-started` (for retry) |
917
+ | `disableNode(live, name)` | Set a node to `inactivated` (scheduler skips it) |
918
+ | `enableNode(live, name)` | Re-enable a disabled node |
919
+ | `getNode(live, name)` | Get config + state for a single node |
920
+
921
+ ### Graph Traversal
922
+
923
+ ```typescript
924
+ // "What feeds into generate_signals?"
925
+ const upstream = getUpstream(live, 'generate_signals');
926
+ upstream.nodes; // [{ nodeName: 'fetch_prices', providesTokens: ['price-data'] }, ...]
927
+ upstream.tokens; // ['price-data', 'indicators', ...]
928
+
929
+ // "What breaks if fetch_prices goes down?"
930
+ const downstream = getDownstream(live, 'fetch_prices');
931
+ downstream.nodes; // [{ nodeName: 'compute', requiresTokens: ['price-data'] }, ...]
932
+ downstream.tokens; // ['price-data', 'indicators', ...]
933
+ ```
934
+
935
+ ### Reachability Analysis
936
+
937
+ ```typescript
938
+ // Tokens that can never be produced given the current state
939
+ const unreachableTokens = getUnreachableTokens(live);
940
+ unreachableTokens.tokens; // [{ token: 'ghost', reason: 'no-producer', producers: [] }]
941
+
942
+ // Nodes that can never become eligible
943
+ const unreachableNodes = getUnreachableNodes(live);
944
+ unreachableNodes.nodes; // [{ nodeName: 'orphan', missingTokens: ['ghost'] }]
945
+ ```
946
+
947
+ ### Persistence
948
+
949
+ ```typescript
950
+ // Save
951
+ const snap = snapshot(live); // JSON-safe object
952
+ localStorage.setItem('graph', JSON.stringify(snap));
953
+
954
+ // Restore
955
+ const data = JSON.parse(localStorage.getItem('graph')!);
956
+ const restored = restore(data); // → LiveGraph (validates shape)
957
+ ```
958
+
959
+ ### Continuous Event Graph API Reference
960
+
961
+ | Function | Description |
962
+ |---|---|
963
+ | `createLiveGraph(config, id?)` | Bootstrap a LiveGraph from a GraphConfig |
964
+ | `applyEvent(live, event)` | Apply an execution event (task-started, task-completed, etc.) |
965
+ | `addNode(live, name, config)` | Add a node (both config + state) |
966
+ | `removeNode(live, name)` | Remove a node |
967
+ | `addRequires / removeRequires` | Wire/unwire requires tokens |
968
+ | `addProvides / removeProvides` | Wire/unwire provides tokens |
969
+ | `injectTokens(live, tokens)` | Add tokens to available outputs |
970
+ | `drainTokens(live, tokens)` | Remove tokens from available outputs |
971
+ | `schedule(live)` | Classify tasks: eligible / pending / unresolved / blocked / conflicts |
972
+ | `inspect(live)` | Health report: statuses, cycles, open deps, conflicts |
973
+ | `resetNode(live, name)` | Reset node to not-started |
974
+ | `disableNode(live, name)` | Disable a node (inactivated) |
975
+ | `enableNode(live, name)` | Re-enable a disabled node |
976
+ | `getNode(live, name)` | Get a node's config + state |
977
+ | `getUpstream(live, name)` | Transitive upstream: what feeds into this node? |
978
+ | `getDownstream(live, name)` | Transitive downstream: what depends on this node? |
979
+ | `getUnreachableTokens(live)` | Tokens that can never be produced |
980
+ | `getUnreachableNodes(live)` | Nodes that can never become eligible |
981
+ | `snapshot(live)` | Serialize to a JSON-safe snapshot |
982
+ | `restore(data)` | Restore a LiveGraph from a snapshot |
983
+
984
+ ---
985
+
986
+ ## LLM Inference
987
+
988
+ Pluggable AI-assisted completion detection. The caller provides the LLM via an `InferenceAdapter` — yaml-flow builds the prompt, parses the response, and applies the results. Core stays pure; inference is opt-in.
989
+
990
+ ```typescript
991
+ import {
992
+ buildInferencePrompt, inferCompletions, applyInferences, inferAndApply,
993
+ } from 'yaml-flow/inference';
994
+ ```
995
+
996
+ ### Inference Hints on Nodes
997
+
998
+ Add optional `inference` metadata to any `TaskConfig`:
999
+
1000
+ ```typescript
1001
+ const config = {
1002
+ settings: { completion: 'all-tasks' },
1003
+ tasks: {
1004
+ 'infra-provisioned': {
1005
+ provides: ['infra-ready'],
1006
+ inference: {
1007
+ criteria: 'All Azure resources provisioned successfully',
1008
+ keywords: ['azure', 'deployment', 'provisioning'],
1009
+ suggestedChecks: ['scan logs for "Deployment Succeeded"'],
1010
+ autoDetectable: true, // LLM will analyze this node
1011
+ },
1012
+ },
1013
+ 'app-deployed': {
1014
+ requires: ['infra-ready'],
1015
+ provides: ['app-ready'],
1016
+ inference: {
1017
+ criteria: 'Health check returns HTTP 200',
1018
+ autoDetectable: true,
1019
+ },
1020
+ },
1021
+ 'monitoring': { // no inference → LLM skips it
1022
+ requires: ['app-ready'],
1023
+ provides: ['monitored'],
1024
+ },
1025
+ },
1026
+ };
1027
+ ```
1028
+
1029
+ ### Pluggable Adapter
1030
+
1031
+ Implement one method — `analyze(prompt) → string`:
1032
+
1033
+ ```typescript
1034
+ import type { InferenceAdapter } from 'yaml-flow/inference';
1035
+
1036
+ // OpenAI
1037
+ const openaiAdapter: InferenceAdapter = {
1038
+ analyze: async (prompt) => {
1039
+ const res = await openai.chat.completions.create({
1040
+ model: 'gpt-4o', messages: [{ role: 'user', content: prompt }],
1041
+ });
1042
+ return res.choices[0].message.content ?? '[]';
1043
+ },
1044
+ };
1045
+
1046
+ // Anthropic
1047
+ const claudeAdapter: InferenceAdapter = {
1048
+ analyze: async (prompt) => {
1049
+ const res = await anthropic.messages.create({
1050
+ model: 'claude-sonnet-4-20250514', max_tokens: 1024,
1051
+ messages: [{ role: 'user', content: prompt }],
1052
+ });
1053
+ return res.content[0].type === 'text' ? res.content[0].text : '[]';
1054
+ },
1055
+ };
1056
+
1057
+ // Any HTTP endpoint
1058
+ const customAdapter: InferenceAdapter = {
1059
+ analyze: async (prompt) => {
1060
+ const res = await fetch('https://my-llm/analyze', {
1061
+ method: 'POST', body: JSON.stringify({ prompt }),
1062
+ });
1063
+ return (await res.json()).response;
1064
+ },
1065
+ };
1066
+ ```
1067
+
1068
+ ### Built-in Adapter Factories
1069
+
1070
+ Zero-boilerplate adapters for common patterns:
1071
+
1072
+ ```typescript
1073
+ import { createCliAdapter, createHttpAdapter } from 'yaml-flow/inference';
1074
+
1075
+ // Ollama via HTTP
1076
+ const ollama = createHttpAdapter({
1077
+ url: 'http://localhost:11434/api/generate',
1078
+ buildBody: (prompt) => ({ model: 'llama3', prompt, stream: false }),
1079
+ extractResponse: (json) => json.response,
1080
+ });
1081
+
1082
+ // Ollama via CLI
1083
+ const ollamaCli = createCliAdapter({
1084
+ command: 'ollama',
1085
+ args: (prompt) => ['run', 'llama3', prompt],
1086
+ });
1087
+
1088
+ // Simon Willison's llm CLI (stdin mode for long prompts)
1089
+ const llm = createCliAdapter({
1090
+ command: 'llm',
1091
+ args: () => ['--no-stream'],
1092
+ stdin: true,
1093
+ });
1094
+
1095
+ // Custom Python script
1096
+ const custom = createCliAdapter({
1097
+ command: 'python',
1098
+ args: (prompt) => ['scripts/infer.py', '--json', prompt],
1099
+ cwd: '/path/to/project',
1100
+ env: { MODEL: 'gpt-4o' },
1101
+ timeout: 60_000,
1102
+ });
1103
+ ```
1104
+
1105
+ **`createCliAdapter(options)`** — spawns a child process, captures stdout:
1106
+ | Option | Type | Description |
1107
+ |--------|------|-------------|
1108
+ | `command` | `string` | Executable to run (`ollama`, `llm`, `python`, `gh`, …) |
1109
+ | `args` | `(prompt) => string[]` | Build argument list from the prompt |
1110
+ | `stdin` | `boolean` | Pipe prompt via stdin instead of args (default: `false`) |
1111
+ | `timeout` | `number` | Kill after N ms (default: `30000`) |
1112
+ | `cwd` | `string` | Working directory |
1113
+ | `env` | `Record<string, string>` | Extra environment variables |
1114
+
1115
+ **`createHttpAdapter(options)`** — POSTs to an HTTP endpoint:
1116
+ | Option | Type | Description |
1117
+ |--------|------|-------------|
1118
+ | `url` | `string` | Endpoint URL |
1119
+ | `headers` | `Record<string, string>` | Request headers |
1120
+ | `buildBody` | `(prompt) => object` | Build request body (default: `{ prompt }`) |
1121
+ | `extractResponse` | `(json) => string` | Extract text from response JSON |
1122
+ | `timeout` | `number` | Abort after N ms (default: `30000`) |
1123
+ ```
1124
+
1125
+ ### Three APIs: Build → Suggest → Apply
1126
+
1127
+ ```typescript
1128
+ import { createLiveGraph } from 'yaml-flow/continuous-event-graph';
1129
+ import { buildInferencePrompt, inferCompletions, applyInferences } from 'yaml-flow/inference';
1130
+
1131
+ let live = createLiveGraph(config);
1132
+
1133
+ // 1. BUILD: Generate the prompt (pure, sync)
1134
+ const prompt = buildInferencePrompt(live, {
1135
+ context: 'Deployment log: "Deployment Succeeded", health check: HTTP 200',
1136
+ });
1137
+
1138
+ // 2. SUGGEST: Ask the LLM (async)
1139
+ const result = await inferCompletions(live, adapter, {
1140
+ threshold: 0.8,
1141
+ context: 'Deployment log: ...',
1142
+ });
1143
+ result.suggestions; // [{ taskName, confidence, reasoning, detectionMethod }]
1144
+
1145
+ // 3. APPLY: Accept high-confidence suggestions (pure, sync)
1146
+ live = applyInferences(live, result, 0.8); // only applies >= 80% confidence
1147
+ ```
1148
+
1149
+ ### One-Shot Convenience
1150
+
1151
+ ```typescript
1152
+ import { inferAndApply } from 'yaml-flow/inference';
1153
+
1154
+ const { live: updated, applied, skipped, inference } = await inferAndApply(
1155
+ live, adapter, { threshold: 0.8, context: 'deployment logs...' }
1156
+ );
1157
+
1158
+ console.log('Auto-completed:', applied.map(s => s.taskName));
1159
+ console.log('Skipped (low confidence):', skipped.map(s => `${s.taskName} (${s.confidence})`));
1160
+ ```
1161
+
1162
+ ### Inference API Reference
1163
+
1164
+ | Function | Description |
1165
+ |---|---|
1166
+ | `buildInferencePrompt(live, opts?)` | Build LLM prompt from graph state (pure, sync) |
1167
+ | `inferCompletions(live, adapter, opts?)` | Ask LLM to suggest completions (async) |
1168
+ | `applyInferences(live, result, threshold?)` | Apply suggestions above threshold (pure, sync) |
1169
+ | `inferAndApply(live, adapter, opts?)` | Infer + apply in one step (async, convenience) |
1170
+ | `createCliAdapter(opts)` | Factory: adapter that spawns a CLI command |
1171
+ | `createHttpAdapter(opts)` | Factory: adapter that POSTs to an HTTP endpoint |
1172
+
1173
+ ### Inference Types
1174
+
1175
+ | Type | Description |
1176
+ |---|---|
1177
+ | `InferenceAdapter` | `{ analyze(prompt: string): Promise<string> }` — pluggable LLM bridge |
1178
+ | `InferenceHints` | `criteria`, `keywords`, `suggestedChecks`, `autoDetectable` on a TaskConfig |
1179
+ | `InferenceOptions` | `threshold`, `scope`, `context`, `systemPrompt` |
1180
+ | `InferenceResult` | `suggestions[]`, `promptUsed`, `rawResponse`, `analyzedNodes` |
1181
+ | `InferredCompletion` | `taskName`, `confidence`, `reasoning`, `detectionMethod: 'llm-inferred'` |
1182
+ | `InferAndApplyResult` | `live`, `inference`, `applied[]`, `skipped[]` |
1183
+ | `CliAdapterOptions` | `command`, `args`, `stdin`, `timeout`, `cwd`, `env` |
1184
+ | `HttpAdapterOptions` | `url`, `headers`, `buildBody`, `extractResponse`, `timeout` |
1185
+
1186
+ ---
1187
+
842
1188
  ## Loading & Exporting Graph Configs
843
1189
 
844
1190
  ```typescript
@@ -886,6 +1232,23 @@ import type { BatchOptions, BatchResult, BatchItemResult, BatchProgress } from '
886
1232
  // Config utilities
887
1233
  import { resolveVariables, resolveConfigTemplates } from 'yaml-flow/config';
888
1234
 
1235
+ // Continuous Event Graph (long-lived evolving workflows)
1236
+ import {
1237
+ createLiveGraph, applyEvent, addNode, removeNode,
1238
+ addRequires, removeRequires, addProvides, removeProvides,
1239
+ injectTokens, drainTokens, schedule, inspect,
1240
+ resetNode, disableNode, enableNode, getNode,
1241
+ snapshot, restore,
1242
+ getUnreachableTokens, getUnreachableNodes,
1243
+ getUpstream, getDownstream,
1244
+ } from 'yaml-flow/continuous-event-graph';
1245
+
1246
+ // LLM Inference (AI-assisted completion detection)
1247
+ import {
1248
+ buildInferencePrompt, inferCompletions, applyInferences, inferAndApply,
1249
+ } from 'yaml-flow/inference';
1250
+ import type { InferenceAdapter, InferenceResult, InferenceOptions } from 'yaml-flow/inference';
1251
+
889
1252
  // Backward compatibility (v1 names → v2)
890
1253
  import { FlowEngine, createEngine } from 'yaml-flow'; // aliases for StepMachine, createStepMachine
891
1254
  ```
@@ -957,6 +1320,11 @@ See the [examples/](./examples) directory:
957
1320
  | [Batch Tickets](./examples/batch/batch-step-machine.ts) | Batch | Concurrent processing, progress tracking |
958
1321
  | [URL Pipeline](./examples/graph-of-graphs/url-processing-pipeline.ts) | Graph-of-Graphs | Outer event-graph → batch × inner event-graph per item |
959
1322
  | [Multi-Stage ETL](./examples/graph-of-graphs/multi-stage-etl.ts) | Graph-of-Graphs | Mixed modes: event-graph outer → step-machine + event-graph subs |
1323
+ | [Stock Dashboard](./examples/continuous-event-graph/stock-dashboard.ts) | Continuous Event Graph | Runtime mutations, token drain, upstream/downstream, snapshot |
1324
+ | [Azure Deployment](./examples/inference/azure-deployment.ts) | Inference | LLM analyzes deployment logs, auto-completes checkpoints |
1325
+ | [Data Pipeline](./examples/inference/data-pipeline.ts) | Inference | Iterative inference — evidence arrives in waves |
1326
+ | [Pluggable Adapters](./examples/inference/pluggable-adapters.ts) | Inference | OpenAI, Anthropic, Azure, CLI, HTTP adapter factories |
1327
+ | [Copilot CLI](./examples/inference/copilot-cli.ts) | Inference | GitHub Copilot CLI as inference adapter via `createCliAdapter` |
960
1328
  | [Order Processing](./examples/flows/order-processing.yaml) | Step Machine | YAML flow definition |
961
1329
  | [Browser Demo](./examples/browser/index.html) | Step Machine | In-browser usage |
962
1330
 
@@ -1,191 +1,6 @@
1
+ import { G as GraphConfig, c as ExecutionState, S as SchedulerResult, e as GraphEvent, T as TaskConfig, l as TaskState, g as StuckDetection, C as CompletionStrategy, a as ConflictStrategy, b as ExecutionMode, d as ExecutionStatus, m as TaskStatus } from './types-DAI_a2as.js';
1
2
  import { e as StepFlowConfig } from './types-FZ_eyErS.js';
2
3
 
3
- /**
4
- * Event Graph — Core Types
5
- *
6
- * Type definitions for the stateless event-graph engine.
7
- * Pure: f(state, event) → newState
8
- */
9
- interface GraphConfig {
10
- id?: string;
11
- settings: GraphSettings;
12
- tasks: Record<string, TaskConfig>;
13
- }
14
- interface GraphSettings {
15
- /** Completion strategy */
16
- completion: CompletionStrategy;
17
- /** Conflict resolution strategy */
18
- conflict_strategy?: ConflictStrategy;
19
- /** Execution mode */
20
- execution_mode?: ExecutionMode;
21
- /** Goal outputs — used with 'goal-reached' completion */
22
- goal?: string[];
23
- /** Max total scheduler iterations (safety limit, default: 1000) */
24
- max_iterations?: number;
25
- /** Timeout in ms (declared for drivers, not enforced by pure engine) */
26
- timeout_ms?: number;
27
- }
28
- interface TaskConfig {
29
- /** What this task needs to become eligible */
30
- requires?: string[];
31
- /** What this task produces on successful completion */
32
- provides: string[];
33
- /** Conditional provides based on handler result */
34
- on?: Record<string, string[]>;
35
- /** Tokens to inject into available outputs on failure */
36
- on_failure?: string[];
37
- /** Task execution method (informational — driver concern) */
38
- method?: string;
39
- /** Arbitrary task configuration (driver concern) */
40
- config?: Record<string, unknown>;
41
- /** Task priority (higher = preferred in conflict resolution) */
42
- priority?: number;
43
- /** Estimated duration in ms (used by duration-first strategy) */
44
- estimatedDuration?: number;
45
- /** Estimated cost (used by cost-optimized strategy) */
46
- estimatedCost?: number;
47
- /** Resource requirements (used by resource-aware strategy) */
48
- estimatedResources?: Record<string, number>;
49
- /** Retry configuration */
50
- retry?: TaskRetryConfig;
51
- /** Repeatable task configuration */
52
- repeatable?: boolean | RepeatableConfig;
53
- /** Circuit breaker: max executions before breaking */
54
- circuit_breaker?: TaskCircuitBreakerConfig;
55
- /** Description */
56
- description?: string;
57
- }
58
- interface TaskRetryConfig {
59
- max_attempts: number;
60
- delay_ms?: number;
61
- backoff_multiplier?: number;
62
- }
63
- interface RepeatableConfig {
64
- /** Max times this task can repeat (undefined = unlimited) */
65
- max?: number;
66
- }
67
- interface TaskCircuitBreakerConfig {
68
- /** Max executions before injecting break tokens */
69
- max_executions: number;
70
- /** Tokens to inject when breaker trips */
71
- on_break: string[];
72
- }
73
- interface ExecutionState {
74
- /** Current status of the execution */
75
- status: ExecutionStatus;
76
- /** Task states keyed by task name */
77
- tasks: Record<string, TaskState>;
78
- /** Tokens currently available in the system */
79
- availableOutputs: string[];
80
- /** Stuck detection result */
81
- stuckDetection: StuckDetection;
82
- /** Last update timestamp */
83
- lastUpdated: string;
84
- /** Execution ID for this run */
85
- executionId: string | null;
86
- /** Execution configuration */
87
- executionConfig: ExecutionConfig;
88
- }
89
- interface ExecutionConfig {
90
- executionMode: ExecutionMode;
91
- conflictStrategy: ConflictStrategy;
92
- completionStrategy: CompletionStrategy;
93
- }
94
- interface TaskState {
95
- status: TaskStatus;
96
- executionCount: number;
97
- retryCount: number;
98
- lastEpoch: number;
99
- startedAt?: string;
100
- completedAt?: string;
101
- failedAt?: string;
102
- lastUpdated?: string;
103
- error?: string;
104
- messages?: TaskMessage[];
105
- progress?: number | null;
106
- }
107
- interface TaskMessage {
108
- message: string;
109
- timestamp: string;
110
- status: string;
111
- }
112
- interface StuckDetection {
113
- is_stuck: boolean;
114
- stuck_description: string | null;
115
- outputs_unresolvable: string[];
116
- tasks_blocked: string[];
117
- }
118
- type GraphEvent = TaskStartedEvent | TaskCompletedEvent | TaskFailedEvent | TaskProgressEvent | InjectTokensEvent | AgentActionEvent | TaskCreationEvent;
119
- interface TaskStartedEvent {
120
- type: 'task-started';
121
- taskName: string;
122
- timestamp: string;
123
- executionId?: string;
124
- }
125
- interface TaskCompletedEvent {
126
- type: 'task-completed';
127
- taskName: string;
128
- /** Handler result key — used for conditional routing via `on` */
129
- result?: string;
130
- /** Data payload from task execution */
131
- data?: Record<string, unknown>;
132
- timestamp: string;
133
- executionId?: string;
134
- }
135
- interface TaskFailedEvent {
136
- type: 'task-failed';
137
- taskName: string;
138
- error: string;
139
- timestamp: string;
140
- executionId?: string;
141
- }
142
- interface TaskProgressEvent {
143
- type: 'task-progress';
144
- taskName: string;
145
- message?: string;
146
- progress?: number;
147
- timestamp: string;
148
- executionId?: string;
149
- }
150
- interface InjectTokensEvent {
151
- type: 'inject-tokens';
152
- tokens: string[];
153
- timestamp: string;
154
- }
155
- interface AgentActionEvent {
156
- type: 'agent-action';
157
- action: 'start' | 'stop' | 'pause' | 'resume';
158
- timestamp: string;
159
- config?: Partial<ExecutionConfig>;
160
- }
161
- interface TaskCreationEvent {
162
- type: 'task-creation';
163
- taskName: string;
164
- taskConfig: TaskConfig;
165
- timestamp: string;
166
- }
167
- interface SchedulerResult {
168
- /** Tasks eligible for execution */
169
- eligibleTasks: string[];
170
- /** Whether the graph execution is complete */
171
- isComplete: boolean;
172
- /** Stuck detection result */
173
- stuckDetection: StuckDetection;
174
- /** Whether conflicts were detected */
175
- hasConflicts: boolean;
176
- /** Conflict groups: output → competing task names */
177
- conflicts: Record<string, string[]>;
178
- /** Strategy used for conflict resolution */
179
- strategy: ConflictStrategy;
180
- /** Processing log for diagnostics */
181
- processingLog: string[];
182
- }
183
- type TaskStatus = 'not-started' | 'running' | 'completed' | 'failed' | 'inactivated';
184
- type ExecutionStatus = 'created' | 'running' | 'paused' | 'stopped' | 'completed' | 'failed';
185
- type CompletionStrategy = 'all-tasks-done' | 'all-outputs-done' | 'only-resolved' | 'goal-reached' | 'manual';
186
- type ExecutionMode = 'dependency-mode' | 'eligibility-mode';
187
- type ConflictStrategy = 'alphabetical' | 'priority-first' | 'duration-first' | 'cost-optimized' | 'resource-aware' | 'random-select' | 'user-choice' | 'parallel-all' | 'skip-conflicts' | 'round-robin';
188
-
189
4
  /**
190
5
  * Event Graph — Scheduler
191
6
  *
@@ -504,4 +319,4 @@ declare const DEFAULTS: {
504
319
  readonly MAX_ITERATIONS: 1000;
505
320
  };
506
321
 
507
- export { isTaskCompleted as $, type AgentActionEvent as A, applyAll as B, COMPLETION_STRATEGIES as C, DEFAULTS as D, EXECUTION_MODES as E, computeAvailableOutputs as F, type GraphConfig as G, createDefaultTaskState as H, type InjectTokensEvent as I, createInitialExecutionState as J, detectStuckState as K, exportGraphConfig as L, type MermaidOptions as M, exportGraphConfigToFile as N, flowToMermaid as O, getAllTasks as P, getCandidateTasks as Q, getProvides as R, type SchedulerResult as S, type TaskConfig as T, getRequires as U, getTask as V, graphToMermaid as W, hasTask as X, isExecutionComplete as Y, isNonActiveTask as Z, isRepeatableTask as _, CONFLICT_STRATEGIES as a, isTaskRunning as a0, loadGraphConfig as a1, next as a2, planExecution as a3, validateGraph as a4, validateGraphConfig as a5, type RepeatableConfig as a6, type TaskCircuitBreakerConfig as a7, type TaskMessage as a8, type TaskProgressEvent as a9, type TaskRetryConfig as aa, addKeyToProvides as ab, addKeyToRequires as ac, getRepeatableMax as ad, groupTasksByProvides as ae, hasOutputConflict as af, removeKeyFromProvides as ag, removeKeyFromRequires as ah, type CompletionResult as b, type CompletionStrategy as c, type ConflictStrategy as d, EXECUTION_STATUS as e, type ExecutionConfig as f, type ExecutionMode as g, type ExecutionPlan as h, type ExecutionState as i, type ExecutionStatus as j, type ExportOptions as k, type GraphEvent as l, type GraphIssue as m, type GraphSettings as n, type GraphValidationResult as o, type IssueSeverity as p, type StuckDetection as q, TASK_STATUS as r, type TaskCompletedEvent as s, type TaskCreationEvent as t, type TaskFailedEvent as u, type TaskStartedEvent as v, type TaskState as w, type TaskStatus as x, addDynamicTask as y, apply as z };
322
+ export { isTaskCompleted as A, isTaskRunning as B, COMPLETION_STRATEGIES as C, DEFAULTS as D, EXECUTION_MODES as E, loadGraphConfig as F, type GraphIssue as G, next as H, type IssueSeverity as I, planExecution as J, validateGraph as K, validateGraphConfig as L, type MermaidOptions as M, addKeyToProvides as N, addKeyToRequires as O, getRepeatableMax as P, groupTasksByProvides as Q, hasOutputConflict as R, removeKeyFromProvides as S, TASK_STATUS as T, removeKeyFromRequires as U, CONFLICT_STRATEGIES as a, type CompletionResult as b, EXECUTION_STATUS as c, type ExecutionPlan as d, type ExportOptions as e, type GraphValidationResult as f, addDynamicTask as g, apply as h, applyAll as i, computeAvailableOutputs as j, createDefaultTaskState as k, createInitialExecutionState as l, detectStuckState as m, exportGraphConfig as n, exportGraphConfigToFile as o, flowToMermaid as p, getAllTasks as q, getCandidateTasks as r, getProvides as s, getRequires as t, getTask as u, graphToMermaid as v, hasTask as w, isExecutionComplete as x, isNonActiveTask as y, isRepeatableTask as z };