yaml-flow 2.4.0 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,229 @@
1
+ import { L as LiveGraph } from '../types-C2lOwquM.cjs';
2
+ import '../types-DAI_a2as.cjs';
3
+
4
+ /**
5
+ * Inference — Types
6
+ *
7
+ * Type definitions for the LLM inference layer.
8
+ * Pluggable adapter pattern: yaml-flow never calls an LLM directly.
9
+ * The caller provides an InferenceAdapter that talks to their LLM of choice.
10
+ */
11
+
12
+ /**
13
+ * The caller implements this to connect any LLM provider.
14
+ * yaml-flow builds the prompt; the adapter sends it and returns the raw response.
15
+ */
16
+ interface InferenceAdapter {
17
+ /** Send a prompt to an LLM and return the raw text response */
18
+ analyze(prompt: string): Promise<string>;
19
+ }
20
+ /**
21
+ * Optional inference metadata on a TaskConfig.
22
+ * Tells the LLM what to look for when judging completion.
23
+ */
24
+ interface InferenceHints {
25
+ /** Human-readable completion criteria (e.g., "Azure infrastructure setup completed") */
26
+ criteria?: string;
27
+ /** Keywords to help the LLM understand the domain */
28
+ keywords?: string[];
29
+ /** Suggested checks for verification (e.g., ["scan logs for 'Deployment Succeeded'"]) */
30
+ suggestedChecks?: string[];
31
+ /** Whether the LLM should attempt to auto-detect completion for this node */
32
+ autoDetectable?: boolean;
33
+ }
34
+ interface InferenceOptions {
35
+ /** Only return suggestions above this confidence threshold (default: 0.5) */
36
+ threshold?: number;
37
+ /** Only analyze these specific nodes (default: all non-completed autoDetectable nodes) */
38
+ scope?: string[];
39
+ /** Additional context to inject into the prompt (e.g., deployment logs, test output) */
40
+ context?: string;
41
+ /** Custom system prompt prefix (optional — uses a sensible default) */
42
+ systemPrompt?: string;
43
+ }
44
+ interface InferenceResult {
45
+ /** Individual suggestions for node completions */
46
+ suggestions: InferredCompletion[];
47
+ /** The prompt that was sent to the LLM (for audit/debug) */
48
+ promptUsed: string;
49
+ /** The raw text response from the LLM */
50
+ rawResponse: string;
51
+ /** Nodes that were analyzed */
52
+ analyzedNodes: string[];
53
+ }
54
+ interface InferredCompletion {
55
+ /** The task/node name */
56
+ taskName: string;
57
+ /** Confidence score from the LLM (0.0 - 1.0) */
58
+ confidence: number;
59
+ /** LLM's reasoning for why it thinks this node is complete */
60
+ reasoning: string;
61
+ /** Always 'llm-inferred' — distinguishes from manual/automated completions */
62
+ detectionMethod: 'llm-inferred';
63
+ }
64
+ interface InferAndApplyResult {
65
+ /** The updated LiveGraph with inferred completions applied */
66
+ live: LiveGraph;
67
+ /** The full inference result (including suggestions below threshold) */
68
+ inference: InferenceResult;
69
+ /** Only the suggestions that were actually applied (above threshold) */
70
+ applied: InferredCompletion[];
71
+ /** Suggestions that were skipped (below threshold) */
72
+ skipped: InferredCompletion[];
73
+ }
74
+
75
+ /**
76
+ * Inference — Core
77
+ *
78
+ * LLM inference layer for continuous-event-graph.
79
+ * Pluggable adapter pattern: yaml-flow builds the prompt and parses the
80
+ * response; the caller provides the LLM via an InferenceAdapter.
81
+ *
82
+ * Core pattern:
83
+ * buildInferencePrompt(live) → prompt string (pure, sync)
84
+ * inferCompletions(live, adapter, opts) → InferenceResult (async, calls LLM)
85
+ * applyInferences(live, result, thresh) → LiveGraph (pure, sync)
86
+ * inferAndApply(live, adapter, opts) → InferAndApplyResult (async, convenience)
87
+ */
88
+
89
+ /**
90
+ * Build an LLM prompt from the current LiveGraph state.
91
+ * Includes only nodes that are:
92
+ * - Not yet completed
93
+ * - Have `inference.autoDetectable` set to true (or are in scope)
94
+ *
95
+ * Pure function — no side effects.
96
+ */
97
+ declare function buildInferencePrompt(live: LiveGraph, options?: InferenceOptions): string;
98
+ /**
99
+ * Ask an LLM to analyze the current graph state and suggest completions.
100
+ *
101
+ * Builds a prompt from the LiveGraph, sends it through the adapter,
102
+ * parses the structured response, and returns an InferenceResult.
103
+ */
104
+ declare function inferCompletions(live: LiveGraph, adapter: InferenceAdapter, options?: InferenceOptions): Promise<InferenceResult>;
105
+ /**
106
+ * Apply inferred completions to a LiveGraph.
107
+ * Only applies suggestions at or above the given confidence threshold.
108
+ *
109
+ * Under the hood, this fires `task-started` + `task-completed` events
110
+ * for each accepted suggestion (if the task isn't already running/completed).
111
+ *
112
+ * Pure function — returns a new LiveGraph.
113
+ */
114
+ declare function applyInferences(live: LiveGraph, result: InferenceResult, threshold?: number): LiveGraph;
115
+ /**
116
+ * Convenience: infer completions and apply them in one step.
117
+ * Returns the updated LiveGraph + full audit trail of what was inferred vs applied.
118
+ */
119
+ declare function inferAndApply(live: LiveGraph, adapter: InferenceAdapter, options?: InferenceOptions): Promise<InferAndApplyResult>;
120
+
121
+ /**
122
+ * Inference — Built-in Adapter Factories
123
+ *
124
+ * Ready-made adapter constructors for common LLM interfaces.
125
+ * Each returns an InferenceAdapter.
126
+ *
127
+ * CLI adapters spawn a child process and capture stdout.
128
+ * HTTP adapters POST to an endpoint and read the response.
129
+ */
130
+
131
+ interface CliAdapterOptions {
132
+ /** The command to execute (e.g., 'gh', 'ollama', 'llm') */
133
+ command: string;
134
+ /**
135
+ * Arguments builder: receives the prompt and returns the args array.
136
+ * The prompt is passed as an argument — NOT via stdin — unless you override.
137
+ *
138
+ * @example gh copilot: (prompt) => ['copilot', 'suggest', '-t', 'shell', prompt]
139
+ * @example ollama: (prompt) => ['run', 'llama3', prompt]
140
+ * @example llm cli: (prompt) => ['--model', 'gpt-4o', prompt]
141
+ */
142
+ args: (prompt: string) => string[];
143
+ /** Max execution time in ms (default: 60000) */
144
+ timeout?: number;
145
+ /** Working directory for the child process */
146
+ cwd?: string;
147
+ /** Environment variables to pass to the child process */
148
+ env?: Record<string, string>;
149
+ /**
150
+ * If true, pass the prompt via stdin instead of as a CLI argument.
151
+ * Useful for long prompts that exceed shell argument limits.
152
+ * Default: false
153
+ */
154
+ stdin?: boolean;
155
+ }
156
+ /**
157
+ * Create an InferenceAdapter that executes a local CLI command.
158
+ * The prompt is passed as a CLI argument (or via stdin if opts.stdin=true).
159
+ * stdout is captured as the LLM response.
160
+ *
161
+ * @example
162
+ * // GitHub Copilot CLI
163
+ * const adapter = createCliAdapter({
164
+ * command: 'gh',
165
+ * args: (prompt) => ['copilot', 'suggest', '-t', 'shell', prompt],
166
+ * });
167
+ *
168
+ * @example
169
+ * // Ollama (local LLM)
170
+ * const adapter = createCliAdapter({
171
+ * command: 'ollama',
172
+ * args: (prompt) => ['run', 'llama3', prompt],
173
+ * });
174
+ *
175
+ * @example
176
+ * // Simon Willison's llm CLI
177
+ * const adapter = createCliAdapter({
178
+ * command: 'llm',
179
+ * args: (prompt) => ['--model', 'gpt-4o', prompt],
180
+ * });
181
+ *
182
+ * @example
183
+ * // Any script (stdin mode for long prompts)
184
+ * const adapter = createCliAdapter({
185
+ * command: 'python',
186
+ * args: () => ['my_llm_script.py'],
187
+ * stdin: true,
188
+ * });
189
+ */
190
+ declare function createCliAdapter(opts: CliAdapterOptions): InferenceAdapter;
191
+ interface HttpAdapterOptions {
192
+ /** The endpoint URL to POST to */
193
+ url: string;
194
+ /** Additional headers (Authorization, etc.) */
195
+ headers?: Record<string, string>;
196
+ /**
197
+ * Build the request body from the prompt.
198
+ * Default: `{ prompt }`
199
+ */
200
+ buildBody?: (prompt: string) => unknown;
201
+ /**
202
+ * Extract the response text from the parsed JSON response.
203
+ * Default: `(json) => json.response ?? json.text ?? json.content ?? JSON.stringify(json)`
204
+ */
205
+ extractResponse?: (json: Record<string, unknown>) => string;
206
+ /** Request timeout in ms (default: 60000) */
207
+ timeout?: number;
208
+ }
209
+ /**
210
+ * Create an InferenceAdapter that POSTs to an HTTP endpoint.
211
+ *
212
+ * @example
213
+ * // Ollama HTTP API
214
+ * const adapter = createHttpAdapter({
215
+ * url: 'http://localhost:11434/api/generate',
216
+ * buildBody: (prompt) => ({ model: 'llama3', prompt, stream: false }),
217
+ * extractResponse: (json) => json.response as string,
218
+ * });
219
+ *
220
+ * @example
221
+ * // Custom API with auth
222
+ * const adapter = createHttpAdapter({
223
+ * url: 'https://my-llm.example.com/analyze',
224
+ * headers: { Authorization: `Bearer ${process.env.API_KEY}` },
225
+ * });
226
+ */
227
+ declare function createHttpAdapter(opts: HttpAdapterOptions): InferenceAdapter;
228
+
229
+ export { type CliAdapterOptions, type HttpAdapterOptions, type InferAndApplyResult, type InferenceAdapter, type InferenceHints, type InferenceOptions, type InferenceResult, type InferredCompletion, applyInferences, buildInferencePrompt, createCliAdapter, createHttpAdapter, inferAndApply, inferCompletions };
@@ -0,0 +1,229 @@
1
+ import { L as LiveGraph } from '../types-mS_pPftm.js';
2
+ import '../types-DAI_a2as.js';
3
+
4
+ /**
5
+ * Inference — Types
6
+ *
7
+ * Type definitions for the LLM inference layer.
8
+ * Pluggable adapter pattern: yaml-flow never calls an LLM directly.
9
+ * The caller provides an InferenceAdapter that talks to their LLM of choice.
10
+ */
11
+
12
+ /**
13
+ * The caller implements this to connect any LLM provider.
14
+ * yaml-flow builds the prompt; the adapter sends it and returns the raw response.
15
+ */
16
+ interface InferenceAdapter {
17
+ /** Send a prompt to an LLM and return the raw text response */
18
+ analyze(prompt: string): Promise<string>;
19
+ }
20
+ /**
21
+ * Optional inference metadata on a TaskConfig.
22
+ * Tells the LLM what to look for when judging completion.
23
+ */
24
+ interface InferenceHints {
25
+ /** Human-readable completion criteria (e.g., "Azure infrastructure setup completed") */
26
+ criteria?: string;
27
+ /** Keywords to help the LLM understand the domain */
28
+ keywords?: string[];
29
+ /** Suggested checks for verification (e.g., ["scan logs for 'Deployment Succeeded'"]) */
30
+ suggestedChecks?: string[];
31
+ /** Whether the LLM should attempt to auto-detect completion for this node */
32
+ autoDetectable?: boolean;
33
+ }
34
+ interface InferenceOptions {
35
+ /** Only return suggestions above this confidence threshold (default: 0.5) */
36
+ threshold?: number;
37
+ /** Only analyze these specific nodes (default: all non-completed autoDetectable nodes) */
38
+ scope?: string[];
39
+ /** Additional context to inject into the prompt (e.g., deployment logs, test output) */
40
+ context?: string;
41
+ /** Custom system prompt prefix (optional — uses a sensible default) */
42
+ systemPrompt?: string;
43
+ }
44
+ interface InferenceResult {
45
+ /** Individual suggestions for node completions */
46
+ suggestions: InferredCompletion[];
47
+ /** The prompt that was sent to the LLM (for audit/debug) */
48
+ promptUsed: string;
49
+ /** The raw text response from the LLM */
50
+ rawResponse: string;
51
+ /** Nodes that were analyzed */
52
+ analyzedNodes: string[];
53
+ }
54
+ interface InferredCompletion {
55
+ /** The task/node name */
56
+ taskName: string;
57
+ /** Confidence score from the LLM (0.0 - 1.0) */
58
+ confidence: number;
59
+ /** LLM's reasoning for why it thinks this node is complete */
60
+ reasoning: string;
61
+ /** Always 'llm-inferred' — distinguishes from manual/automated completions */
62
+ detectionMethod: 'llm-inferred';
63
+ }
64
+ interface InferAndApplyResult {
65
+ /** The updated LiveGraph with inferred completions applied */
66
+ live: LiveGraph;
67
+ /** The full inference result (including suggestions below threshold) */
68
+ inference: InferenceResult;
69
+ /** Only the suggestions that were actually applied (above threshold) */
70
+ applied: InferredCompletion[];
71
+ /** Suggestions that were skipped (below threshold) */
72
+ skipped: InferredCompletion[];
73
+ }
74
+
75
+ /**
76
+ * Inference — Core
77
+ *
78
+ * LLM inference layer for continuous-event-graph.
79
+ * Pluggable adapter pattern: yaml-flow builds the prompt and parses the
80
+ * response; the caller provides the LLM via an InferenceAdapter.
81
+ *
82
+ * Core pattern:
83
+ * buildInferencePrompt(live) → prompt string (pure, sync)
84
+ * inferCompletions(live, adapter, opts) → InferenceResult (async, calls LLM)
85
+ * applyInferences(live, result, thresh) → LiveGraph (pure, sync)
86
+ * inferAndApply(live, adapter, opts) → InferAndApplyResult (async, convenience)
87
+ */
88
+
89
+ /**
90
+ * Build an LLM prompt from the current LiveGraph state.
91
+ * Includes only nodes that are:
92
+ * - Not yet completed
93
+ * - Have `inference.autoDetectable` set to true (or are in scope)
94
+ *
95
+ * Pure function — no side effects.
96
+ */
97
+ declare function buildInferencePrompt(live: LiveGraph, options?: InferenceOptions): string;
98
+ /**
99
+ * Ask an LLM to analyze the current graph state and suggest completions.
100
+ *
101
+ * Builds a prompt from the LiveGraph, sends it through the adapter,
102
+ * parses the structured response, and returns an InferenceResult.
103
+ */
104
+ declare function inferCompletions(live: LiveGraph, adapter: InferenceAdapter, options?: InferenceOptions): Promise<InferenceResult>;
105
+ /**
106
+ * Apply inferred completions to a LiveGraph.
107
+ * Only applies suggestions at or above the given confidence threshold.
108
+ *
109
+ * Under the hood, this fires `task-started` + `task-completed` events
110
+ * for each accepted suggestion (if the task isn't already running/completed).
111
+ *
112
+ * Pure function — returns a new LiveGraph.
113
+ */
114
+ declare function applyInferences(live: LiveGraph, result: InferenceResult, threshold?: number): LiveGraph;
115
+ /**
116
+ * Convenience: infer completions and apply them in one step.
117
+ * Returns the updated LiveGraph + full audit trail of what was inferred vs applied.
118
+ */
119
+ declare function inferAndApply(live: LiveGraph, adapter: InferenceAdapter, options?: InferenceOptions): Promise<InferAndApplyResult>;
120
+
121
+ /**
122
+ * Inference — Built-in Adapter Factories
123
+ *
124
+ * Ready-made adapter constructors for common LLM interfaces.
125
+ * Each returns an InferenceAdapter.
126
+ *
127
+ * CLI adapters spawn a child process and capture stdout.
128
+ * HTTP adapters POST to an endpoint and read the response.
129
+ */
130
+
131
+ interface CliAdapterOptions {
132
+ /** The command to execute (e.g., 'gh', 'ollama', 'llm') */
133
+ command: string;
134
+ /**
135
+ * Arguments builder: receives the prompt and returns the args array.
136
+ * The prompt is passed as an argument — NOT via stdin — unless you override.
137
+ *
138
+ * @example gh copilot: (prompt) => ['copilot', 'suggest', '-t', 'shell', prompt]
139
+ * @example ollama: (prompt) => ['run', 'llama3', prompt]
140
+ * @example llm cli: (prompt) => ['--model', 'gpt-4o', prompt]
141
+ */
142
+ args: (prompt: string) => string[];
143
+ /** Max execution time in ms (default: 60000) */
144
+ timeout?: number;
145
+ /** Working directory for the child process */
146
+ cwd?: string;
147
+ /** Environment variables to pass to the child process */
148
+ env?: Record<string, string>;
149
+ /**
150
+ * If true, pass the prompt via stdin instead of as a CLI argument.
151
+ * Useful for long prompts that exceed shell argument limits.
152
+ * Default: false
153
+ */
154
+ stdin?: boolean;
155
+ }
156
+ /**
157
+ * Create an InferenceAdapter that executes a local CLI command.
158
+ * The prompt is passed as a CLI argument (or via stdin if opts.stdin=true).
159
+ * stdout is captured as the LLM response.
160
+ *
161
+ * @example
162
+ * // GitHub Copilot CLI
163
+ * const adapter = createCliAdapter({
164
+ * command: 'gh',
165
+ * args: (prompt) => ['copilot', 'suggest', '-t', 'shell', prompt],
166
+ * });
167
+ *
168
+ * @example
169
+ * // Ollama (local LLM)
170
+ * const adapter = createCliAdapter({
171
+ * command: 'ollama',
172
+ * args: (prompt) => ['run', 'llama3', prompt],
173
+ * });
174
+ *
175
+ * @example
176
+ * // Simon Willison's llm CLI
177
+ * const adapter = createCliAdapter({
178
+ * command: 'llm',
179
+ * args: (prompt) => ['--model', 'gpt-4o', prompt],
180
+ * });
181
+ *
182
+ * @example
183
+ * // Any script (stdin mode for long prompts)
184
+ * const adapter = createCliAdapter({
185
+ * command: 'python',
186
+ * args: () => ['my_llm_script.py'],
187
+ * stdin: true,
188
+ * });
189
+ */
190
+ declare function createCliAdapter(opts: CliAdapterOptions): InferenceAdapter;
191
+ interface HttpAdapterOptions {
192
+ /** The endpoint URL to POST to */
193
+ url: string;
194
+ /** Additional headers (Authorization, etc.) */
195
+ headers?: Record<string, string>;
196
+ /**
197
+ * Build the request body from the prompt.
198
+ * Default: `{ prompt }`
199
+ */
200
+ buildBody?: (prompt: string) => unknown;
201
+ /**
202
+ * Extract the response text from the parsed JSON response.
203
+ * Default: `(json) => json.response ?? json.text ?? json.content ?? JSON.stringify(json)`
204
+ */
205
+ extractResponse?: (json: Record<string, unknown>) => string;
206
+ /** Request timeout in ms (default: 60000) */
207
+ timeout?: number;
208
+ }
209
+ /**
210
+ * Create an InferenceAdapter that POSTs to an HTTP endpoint.
211
+ *
212
+ * @example
213
+ * // Ollama HTTP API
214
+ * const adapter = createHttpAdapter({
215
+ * url: 'http://localhost:11434/api/generate',
216
+ * buildBody: (prompt) => ({ model: 'llama3', prompt, stream: false }),
217
+ * extractResponse: (json) => json.response as string,
218
+ * });
219
+ *
220
+ * @example
221
+ * // Custom API with auth
222
+ * const adapter = createHttpAdapter({
223
+ * url: 'https://my-llm.example.com/analyze',
224
+ * headers: { Authorization: `Bearer ${process.env.API_KEY}` },
225
+ * });
226
+ */
227
+ declare function createHttpAdapter(opts: HttpAdapterOptions): InferenceAdapter;
228
+
229
+ export { type CliAdapterOptions, type HttpAdapterOptions, type InferAndApplyResult, type InferenceAdapter, type InferenceHints, type InferenceOptions, type InferenceResult, type InferredCompletion, applyInferences, buildInferencePrompt, createCliAdapter, createHttpAdapter, inferAndApply, inferCompletions };