yaml-flow 3.0.0 → 3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +44 -23
  2. package/dist/{constants-B_ftYTTE.d.ts → constants-B2zqu10b.d.ts} +7 -57
  3. package/dist/{constants-CiyHX8L-.d.cts → constants-DJZU1pwJ.d.cts} +7 -57
  4. package/dist/continuous-event-graph/index.cjs +1161 -182
  5. package/dist/continuous-event-graph/index.cjs.map +1 -1
  6. package/dist/continuous-event-graph/index.d.cts +567 -48
  7. package/dist/continuous-event-graph/index.d.ts +567 -48
  8. package/dist/continuous-event-graph/index.js +1151 -183
  9. package/dist/continuous-event-graph/index.js.map +1 -1
  10. package/dist/event-graph/index.cjs +35 -11
  11. package/dist/event-graph/index.cjs.map +1 -1
  12. package/dist/event-graph/index.d.cts +14 -5
  13. package/dist/event-graph/index.d.ts +14 -5
  14. package/dist/event-graph/index.js +34 -11
  15. package/dist/event-graph/index.js.map +1 -1
  16. package/dist/index.cjs +945 -414
  17. package/dist/index.cjs.map +1 -1
  18. package/dist/index.d.cts +5 -4
  19. package/dist/index.d.ts +5 -4
  20. package/dist/index.js +936 -415
  21. package/dist/index.js.map +1 -1
  22. package/dist/inference/index.cjs +31 -7
  23. package/dist/inference/index.cjs.map +1 -1
  24. package/dist/inference/index.d.cts +2 -2
  25. package/dist/inference/index.d.ts +2 -2
  26. package/dist/inference/index.js +31 -7
  27. package/dist/inference/index.js.map +1 -1
  28. package/dist/{types-CxJg9Jrt.d.cts → types-BwvgvlOO.d.cts} +2 -2
  29. package/dist/{types-BuEo3wVG.d.ts → types-ClRA8hzC.d.ts} +2 -2
  30. package/dist/{types-BpWrH1sf.d.cts → types-DEj7OakX.d.cts} +14 -4
  31. package/dist/{types-BpWrH1sf.d.ts → types-DEj7OakX.d.ts} +14 -4
  32. package/dist/validate-DEZ2Ymdb.d.ts +53 -0
  33. package/dist/validate-DqKTZg_o.d.cts +53 -0
  34. package/examples/batch/batch-step-machine.ts +121 -0
  35. package/examples/browser/index.html +367 -0
  36. package/examples/continuous-event-graph/live-cards-board.ts +215 -0
  37. package/examples/continuous-event-graph/live-portfolio-dashboard.ts +555 -0
  38. package/examples/continuous-event-graph/portfolio-tracker.ts +287 -0
  39. package/examples/continuous-event-graph/reactive-monitoring.ts +265 -0
  40. package/examples/continuous-event-graph/reactive-pipeline.ts +168 -0
  41. package/examples/continuous-event-graph/soc-incident-board.ts +287 -0
  42. package/examples/continuous-event-graph/stock-dashboard.ts +229 -0
  43. package/examples/event-graph/ci-cd-pipeline.ts +243 -0
  44. package/examples/event-graph/executor-diamond.ts +165 -0
  45. package/examples/event-graph/executor-pipeline.ts +161 -0
  46. package/examples/event-graph/research-pipeline.ts +137 -0
  47. package/examples/flows/ai-conversation.yaml +116 -0
  48. package/examples/flows/order-processing.yaml +143 -0
  49. package/examples/flows/simple-greeting.yaml +54 -0
  50. package/examples/graph-of-graphs/multi-stage-etl.ts +307 -0
  51. package/examples/graph-of-graphs/url-processing-pipeline.ts +254 -0
  52. package/examples/inference/azure-deployment.ts +149 -0
  53. package/examples/inference/copilot-cli.ts +138 -0
  54. package/examples/inference/data-pipeline.ts +145 -0
  55. package/examples/inference/pluggable-adapters.ts +254 -0
  56. package/examples/ingest.js +733 -0
  57. package/examples/node/ai-conversation.ts +195 -0
  58. package/examples/node/simple-greeting.ts +101 -0
  59. package/package.json +3 -2
@@ -0,0 +1,254 @@
1
+ /**
2
+ * Inference Example: Pluggable LLM Adapters
3
+ *
4
+ * Shows how to create adapters for different LLM providers.
5
+ * Each adapter implements InferenceAdapter.analyze(prompt) → string.
6
+ *
7
+ * Run with: npx tsx examples/inference/pluggable-adapters.ts
8
+ */
9
+
10
+ import {
11
+ createLiveGraph,
12
+ schedule,
13
+ } from '../../src/continuous-event-graph/index.js';
14
+ import {
15
+ inferCompletions,
16
+ applyInferences,
17
+ buildInferencePrompt,
18
+ createCliAdapter,
19
+ createHttpAdapter,
20
+ } from '../../src/inference/index.js';
21
+ import type { InferenceAdapter } from '../../src/inference/index.js';
22
+ import type { GraphConfig } from '../../src/continuous-event-graph/types.js';
23
+
24
+ // ============================================================================
25
+ // 1. Adapter Examples (uncomment the one for your LLM provider)
26
+ // ============================================================================
27
+
28
+ /**
29
+ * OpenAI Adapter
30
+ * npm install openai
31
+ */
32
+ function createOpenAIAdapter(apiKey: string, model = 'gpt-4o'): InferenceAdapter {
33
+ return {
34
+ analyze: async (prompt: string) => {
35
+ // import OpenAI from 'openai';
36
+ // const client = new OpenAI({ apiKey });
37
+ // const response = await client.chat.completions.create({
38
+ // model,
39
+ // messages: [{ role: 'user', content: prompt }],
40
+ // temperature: 0.1, // low temperature for deterministic analysis
41
+ // });
42
+ // return response.choices[0].message.content ?? '[]';
43
+
44
+ // Mock for demo
45
+ return '[]';
46
+ },
47
+ };
48
+ }
49
+
50
+ /**
51
+ * Azure OpenAI Adapter
52
+ * npm install openai
53
+ */
54
+ function createAzureOpenAIAdapter(
55
+ endpoint: string,
56
+ apiKey: string,
57
+ deployment: string,
58
+ ): InferenceAdapter {
59
+ return {
60
+ analyze: async (prompt: string) => {
61
+ // import OpenAI from 'openai';
62
+ // const client = new OpenAI({
63
+ // apiKey,
64
+ // baseURL: `${endpoint}/openai/deployments/${deployment}`,
65
+ // defaultQuery: { 'api-version': '2024-02-15-preview' },
66
+ // defaultHeaders: { 'api-key': apiKey },
67
+ // });
68
+ // const response = await client.chat.completions.create({
69
+ // model: deployment,
70
+ // messages: [{ role: 'user', content: prompt }],
71
+ // temperature: 0.1,
72
+ // });
73
+ // return response.choices[0].message.content ?? '[]';
74
+
75
+ return '[]';
76
+ },
77
+ };
78
+ }
79
+
80
+ /**
81
+ * Anthropic (Claude) Adapter
82
+ * npm install @anthropic-ai/sdk
83
+ */
84
+ function createAnthropicAdapter(apiKey: string, model = 'claude-sonnet-4-20250514'): InferenceAdapter {
85
+ return {
86
+ analyze: async (prompt: string) => {
87
+ // import Anthropic from '@anthropic-ai/sdk';
88
+ // const client = new Anthropic({ apiKey });
89
+ // const response = await client.messages.create({
90
+ // model,
91
+ // max_tokens: 1024,
92
+ // messages: [{ role: 'user', content: prompt }],
93
+ // });
94
+ // return response.content[0].type === 'text' ? response.content[0].text : '[]';
95
+
96
+ return '[]';
97
+ },
98
+ };
99
+ }
100
+
101
+ /**
102
+ * Local/Custom Adapter — call any HTTP endpoint
103
+ */
104
+ function createCustomAdapter(url: string, headers: Record<string, string> = {}): InferenceAdapter {
105
+ return {
106
+ analyze: async (prompt: string) => {
107
+ const response = await fetch(url, {
108
+ method: 'POST',
109
+ headers: { 'Content-Type': 'application/json', ...headers },
110
+ body: JSON.stringify({ prompt }),
111
+ });
112
+ const data = await response.json() as { response?: string };
113
+ return data.response ?? '[]';
114
+ },
115
+ };
116
+ }
117
+
118
+ // ============================================================================
119
+ // 1b. Built-in Adapter Factories (no boilerplate needed)
120
+ // ============================================================================
121
+
122
+ /**
123
+ * Ollama via CLI — runs models locally
124
+ * Install: https://ollama.com
125
+ */
126
+ const ollamaCliAdapter = createCliAdapter({
127
+ command: 'ollama',
128
+ args: (prompt) => ['run', 'llama3', prompt],
129
+ });
130
+
131
+ /**
132
+ * Ollama via HTTP — same model, HTTP interface
133
+ * Start with: ollama serve
134
+ */
135
+ const ollamaHttpAdapter = createHttpAdapter({
136
+ url: 'http://localhost:11434/api/generate',
137
+ buildBody: (prompt) => ({ model: 'llama3', prompt, stream: false }),
138
+ extractResponse: (json) => json.response as string,
139
+ });
140
+
141
+ /**
142
+ * llm CLI — Simon Willison's LLM tool
143
+ * Install: pip install llm
144
+ */
145
+ const llmCliAdapter = createCliAdapter({
146
+ command: 'llm',
147
+ args: () => ['--no-stream'],
148
+ stdin: true, // pipe prompt via stdin (better for long prompts)
149
+ });
150
+
151
+ /**
152
+ * GitHub Copilot CLI — use gh copilot for inference
153
+ * Install: gh extension install github/gh-copilot
154
+ */
155
+ const ghCopilotAdapter = createCliAdapter({
156
+ command: 'gh',
157
+ args: (prompt) => ['copilot', 'suggest', '-t', 'shell', prompt],
158
+ timeout: 30_000,
159
+ });
160
+
161
+ /**
162
+ * Custom script adapter — run your own wrapper script
163
+ * The script receives the prompt as argument (or stdin) and prints JSON to stdout
164
+ */
165
+ const customScriptAdapter = createCliAdapter({
166
+ command: 'python',
167
+ args: (prompt) => ['scripts/infer.py', '--json', prompt],
168
+ cwd: '/path/to/project',
169
+ env: { MODEL: 'gpt-4o', TEMPERATURE: '0.1' },
170
+ timeout: 60_000,
171
+ });
172
+
173
+ // ============================================================================
174
+ // 2. Demo: Use the prompt builder standalone
175
+ // ============================================================================
176
+
177
+ const config: GraphConfig = {
178
+ settings: { completion: 'all-tasks' },
179
+ tasks: {
180
+ 'code-reviewed': {
181
+ provides: ['review-done'],
182
+ description: 'Code review completed by team lead',
183
+ inference: {
184
+ criteria: 'PR approved with at least 2 approvals and no open comments',
185
+ keywords: ['pull-request', 'code-review', 'approval'],
186
+ suggestedChecks: ['check PR status for "approved"', 'verify no open comments'],
187
+ autoDetectable: true,
188
+ },
189
+ },
190
+ 'tests-passed': {
191
+ requires: ['review-done'],
192
+ provides: ['tests-green'],
193
+ description: 'CI pipeline tests all passing',
194
+ inference: {
195
+ criteria: 'All CI checks green, code coverage above 80%',
196
+ keywords: ['ci', 'tests', 'coverage', 'pipeline'],
197
+ suggestedChecks: ['check CI status', 'verify coverage report'],
198
+ autoDetectable: true,
199
+ },
200
+ },
201
+ 'deployed-staging': {
202
+ requires: ['tests-green'],
203
+ provides: ['staging-live'],
204
+ description: 'Deployed to staging environment',
205
+ inference: {
206
+ criteria: 'Staging URL returns HTTP 200 with correct version',
207
+ keywords: ['staging', 'deployment', 'version'],
208
+ autoDetectable: true,
209
+ },
210
+ },
211
+ },
212
+ };
213
+
214
+ async function main() {
215
+ const live = createLiveGraph(config);
216
+
217
+ // Just build the prompt — useful for debugging or using with your own LLM code
218
+ const prompt = buildInferencePrompt(live, {
219
+ context: 'PR #142: 3 approvals, 0 open comments. CI: all 47 tests pass, coverage 84%.',
220
+ });
221
+
222
+ console.log('=== Generated LLM Prompt ===');
223
+ console.log(prompt);
224
+ console.log('\n=== End Prompt ===');
225
+ console.log(`\nPrompt length: ${prompt.length} characters`);
226
+ console.log('This prompt can be sent to any LLM provider.');
227
+
228
+ // Demo: use a mock adapter to show the full flow
229
+ const mockAdapter: InferenceAdapter = {
230
+ analyze: async () => JSON.stringify([
231
+ { taskName: 'code-reviewed', confidence: 0.97, reasoning: '3 approvals, 0 open comments — PR is clearly approved.' },
232
+ { taskName: 'tests-passed', confidence: 0.92, reasoning: '47/47 tests pass, coverage 84% exceeds 80% threshold.' },
233
+ ]),
234
+ };
235
+
236
+ const result = await inferCompletions(live, mockAdapter, {
237
+ context: 'PR #142: 3 approvals, 0 open comments. CI: all 47 tests pass, coverage 84%.',
238
+ });
239
+
240
+ console.log('\n=== Inference Results ===');
241
+ for (const s of result.suggestions) {
242
+ console.log(` ${s.taskName}: ${(s.confidence * 100).toFixed(0)}% — ${s.reasoning}`);
243
+ }
244
+
245
+ // Apply above threshold
246
+ const updated = applyInferences(live, result, 0.9);
247
+ console.log('\n=== After Applying (threshold 90%) ===');
248
+ for (const [name, state] of Object.entries(updated.state.tasks)) {
249
+ console.log(` ${name}: ${state.status}`);
250
+ }
251
+ console.log(' Eligible:', schedule(updated).eligible);
252
+ }
253
+
254
+ main().catch(console.error);