@ryanfw/prompt-orchestration-pipeline 0.0.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +415 -24
  2. package/package.json +45 -8
  3. package/src/api/files.js +48 -0
  4. package/src/api/index.js +149 -53
  5. package/src/api/validators/seed.js +141 -0
  6. package/src/cli/index.js +456 -29
  7. package/src/cli/run-orchestrator.js +39 -0
  8. package/src/cli/update-pipeline-json.js +47 -0
  9. package/src/components/DAGGrid.jsx +649 -0
  10. package/src/components/JobCard.jsx +96 -0
  11. package/src/components/JobDetail.jsx +159 -0
  12. package/src/components/JobTable.jsx +202 -0
  13. package/src/components/Layout.jsx +134 -0
  14. package/src/components/TaskFilePane.jsx +570 -0
  15. package/src/components/UploadSeed.jsx +239 -0
  16. package/src/components/ui/badge.jsx +20 -0
  17. package/src/components/ui/button.jsx +43 -0
  18. package/src/components/ui/card.jsx +20 -0
  19. package/src/components/ui/focus-styles.css +60 -0
  20. package/src/components/ui/progress.jsx +26 -0
  21. package/src/components/ui/select.jsx +27 -0
  22. package/src/components/ui/separator.jsx +6 -0
  23. package/src/config/paths.js +99 -0
  24. package/src/core/config.js +270 -9
  25. package/src/core/file-io.js +202 -0
  26. package/src/core/module-loader.js +157 -0
  27. package/src/core/orchestrator.js +275 -294
  28. package/src/core/pipeline-runner.js +95 -41
  29. package/src/core/progress.js +66 -0
  30. package/src/core/status-writer.js +331 -0
  31. package/src/core/task-runner.js +719 -73
  32. package/src/core/validation.js +120 -1
  33. package/src/lib/utils.js +6 -0
  34. package/src/llm/README.md +139 -30
  35. package/src/llm/index.js +222 -72
  36. package/src/pages/PipelineDetail.jsx +111 -0
  37. package/src/pages/PromptPipelineDashboard.jsx +223 -0
  38. package/src/providers/deepseek.js +3 -15
  39. package/src/ui/client/adapters/job-adapter.js +258 -0
  40. package/src/ui/client/bootstrap.js +120 -0
  41. package/src/ui/client/hooks/useJobDetailWithUpdates.js +619 -0
  42. package/src/ui/client/hooks/useJobList.js +50 -0
  43. package/src/ui/client/hooks/useJobListWithUpdates.js +335 -0
  44. package/src/ui/client/hooks/useTicker.js +26 -0
  45. package/src/ui/client/index.css +31 -0
  46. package/src/ui/client/index.html +18 -0
  47. package/src/ui/client/main.jsx +38 -0
  48. package/src/ui/config-bridge.browser.js +149 -0
  49. package/src/ui/config-bridge.js +149 -0
  50. package/src/ui/config-bridge.node.js +310 -0
  51. package/src/ui/dist/assets/index-BDABnI-4.js +33399 -0
  52. package/src/ui/dist/assets/style-Ks8LY8gB.css +28496 -0
  53. package/src/ui/dist/index.html +19 -0
  54. package/src/ui/endpoints/job-endpoints.js +300 -0
  55. package/src/ui/file-reader.js +216 -0
  56. package/src/ui/job-change-detector.js +83 -0
  57. package/src/ui/job-index.js +231 -0
  58. package/src/ui/job-reader.js +274 -0
  59. package/src/ui/job-scanner.js +188 -0
  60. package/src/ui/public/app.js +3 -1
  61. package/src/ui/server.js +1636 -59
  62. package/src/ui/sse-enhancer.js +149 -0
  63. package/src/ui/sse.js +204 -0
  64. package/src/ui/state-snapshot.js +252 -0
  65. package/src/ui/transformers/list-transformer.js +347 -0
  66. package/src/ui/transformers/status-transformer.js +307 -0
  67. package/src/ui/watcher.js +61 -7
  68. package/src/utils/dag.js +101 -0
  69. package/src/utils/duration.js +126 -0
  70. package/src/utils/id-generator.js +30 -0
  71. package/src/utils/jobs.js +7 -0
  72. package/src/utils/pipelines.js +44 -0
  73. package/src/utils/task-files.js +271 -0
  74. package/src/utils/ui.jsx +76 -0
  75. package/src/ui/public/index.html +0 -53
  76. package/src/ui/public/style.css +0 -341
@@ -8,7 +8,7 @@ function getSeedSchema() {
8
8
  const config = getConfig();
9
9
  return {
10
10
  type: "object",
11
- required: ["name", "data"],
11
+ required: ["name", "data", "pipeline"],
12
12
  properties: {
13
13
  name: {
14
14
  type: "string",
@@ -21,6 +21,11 @@ function getSeedSchema() {
21
21
  type: "object",
22
22
  description: "Job data payload",
23
23
  },
24
+ pipeline: {
25
+ type: "string",
26
+ enum: Object.keys(config.pipelines),
27
+ description: "Pipeline slug from registry",
28
+ },
24
29
  metadata: {
25
30
  type: "object",
26
31
  description: "Optional metadata",
@@ -98,3 +103,117 @@ export function validateSeedOrThrow(seed) {
98
103
  throw new Error(formatValidationErrors(result.errors));
99
104
  }
100
105
  }
106
+
107
+ /**
108
+ * Validate pipeline config object shape (canonical)
109
+ * Expected shape:
110
+ * {
111
+ * name: string,
112
+ * tasks: string[],
113
+ * taskConfig?: { [taskName: string]: object }
114
+ * }
115
+ *
116
+ * @param {object} pipeline - pipeline object to validate
117
+ * @returns {{ valid: boolean, errors?: array }}
118
+ */
119
+ export function validatePipeline(pipeline) {
120
+ if (!pipeline || typeof pipeline !== "object") {
121
+ return {
122
+ valid: false,
123
+ errors: [
124
+ {
125
+ message: "Pipeline must be a valid JSON object",
126
+ path: "",
127
+ },
128
+ ],
129
+ };
130
+ }
131
+
132
+ const pipelineSchema = {
133
+ type: "object",
134
+ required: ["name", "tasks"],
135
+ properties: {
136
+ name: { type: "string" },
137
+ tasks: {
138
+ type: "array",
139
+ items: { type: "string" },
140
+ minItems: 1,
141
+ },
142
+ taskConfig: {
143
+ type: "object",
144
+ additionalProperties: { type: "object" },
145
+ },
146
+ },
147
+ additionalProperties: true,
148
+ };
149
+
150
+ const validatePipelineSchema = ajv.compile(pipelineSchema);
151
+ const valid = validatePipelineSchema(pipeline);
152
+
153
+ if (!valid) {
154
+ return {
155
+ valid: false,
156
+ errors: validatePipelineSchema.errors.map((err) => ({
157
+ message: err.message,
158
+ path: err.instancePath || err.dataPath || "",
159
+ params: err.params,
160
+ keyword: err.keyword,
161
+ })),
162
+ };
163
+ }
164
+
165
+ // Additional check: ensure every task listed has either an entry in taskConfig or empty object is acceptable.
166
+ if (Array.isArray(pipeline.tasks)) {
167
+ for (const t of pipeline.tasks) {
168
+ if (typeof t !== "string") {
169
+ return {
170
+ valid: false,
171
+ errors: [
172
+ {
173
+ message: "Every task entry must be a string task name",
174
+ path: "tasks",
175
+ },
176
+ ],
177
+ };
178
+ }
179
+ }
180
+ }
181
+
182
+ return { valid: true };
183
+ }
184
+
185
+ /**
186
+ * Formats pipeline validation errors into a human-readable message
187
+ * @param {array} errors
188
+ * @returns {string}
189
+ */
190
+ export function formatPipelineValidationErrors(errors) {
191
+ if (!errors || errors.length === 0) {
192
+ return "Unknown pipeline validation error";
193
+ }
194
+
195
+ const messages = errors.map((err) => {
196
+ const path = err.path ? `at '${err.path}'` : "";
197
+ return ` - ${err.message} ${path}`.trim();
198
+ });
199
+
200
+ return `Pipeline validation failed:\n${messages.join("\n")}`;
201
+ }
202
+
203
+ /**
204
+ * Validate pipeline object or throw an Error with friendly message.
205
+ * Accepts either a pipeline object or the path string to the pipeline file,
206
+ * in which case the caller should read and parse the file before calling.
207
+ *
208
+ * @param {object} pipeline - pipeline object to validate
209
+ * @param {string} [pathHint] - optional path for error messages
210
+ * @throws {Error} If validation fails
211
+ */
212
+ export function validatePipelineOrThrow(pipeline, pathHint = "pipeline.json") {
213
+ const result = validatePipeline(pipeline);
214
+ if (!result.valid) {
215
+ const header = `Invalid pipeline definition (${pathHint}):`;
216
+ const body = formatPipelineValidationErrors(result.errors);
217
+ throw new Error(`${header}\n${body}`);
218
+ }
219
+ }
@@ -0,0 +1,6 @@
1
+ import { clsx } from "clsx";
2
+ import { twMerge } from "tailwind-merge";
3
+
4
+ export function cn(...inputs) {
5
+ return twMerge(clsx(inputs));
6
+ }
package/src/llm/README.md CHANGED
@@ -27,17 +27,145 @@ src/
27
27
  └── anthropic.js ← Anthropic implementation
28
28
  ```
29
29
 
30
- ## Core Functions
30
+ ## Provider-Grouped Functions (Primary API)
31
31
 
32
- ### `chat(options)`
32
+ The LLM layer exposes provider-grouped functions that map to named models in the configuration registry. This is the recommended approach for task development.
33
+
34
+ ### `createLLM(options)`
35
+
36
+ Factory function that creates an interface with only provider-grouped functions.
37
+
38
+ ```javascript
39
+ import { createLLM } from "../llm/index.js";
40
+
41
+ const llm = createLLM();
42
+
43
+ // Provider-grouped functions available:
44
+ await llm.openai.gpt4({ messages: [{ role: "user", content: "Hello!" }] });
45
+ await llm.openai.gpt4Turbo({ messages: [{ role: "user", content: "Hello!" }] });
46
+ await llm.openai.gpt5({ messages: [{ role: "user", content: "Hello!" }] });
47
+
48
+ await llm.deepseek.reasoner({
49
+ messages: [{ role: "user", content: "Hello!" }],
50
+ });
51
+ await llm.deepseek.chat({ messages: [{ role: "user", content: "Hello!" }] });
52
+
53
+ await llm.anthropic.opus({ messages: [{ role: "user", content: "Hello!" }] });
54
+ await llm.anthropic.sonnet({ messages: [{ role: "user", content: "Hello!" }] });
55
+ ```
56
+
57
+ ### Task Context Usage
58
+
59
+ In task modules, `context.llm` provides only provider-grouped functions:
60
+
61
+ ```javascript
62
+ // In a task module:
63
+ export default async function analysis(context) {
64
+ const response = await context.llm.openai.gpt4({
65
+ messages: [{ role: "user", content: "Analyze this data" }],
66
+ temperature: 0.7,
67
+ });
68
+
69
+ return { analysis: response.content };
70
+ }
71
+ ```
72
+
73
+ ### Provider and Model Overrides
74
+
75
+ You can override the default provider and model:
76
+
77
+ ```javascript
78
+ // Use OpenAI provider with custom model
79
+ await llm.deepseek.reasoner({
80
+ messages: [{ role: "user", content: "Hello!" }],
81
+ provider: "openai", // Override provider
82
+ model: "gpt-4-custom", // Override model
83
+ });
84
+
85
+ // All other options are passed through
86
+ await llm.openai.gpt4({
87
+ messages: [{ role: "user", content: "Hello!" }],
88
+ temperature: 0.5,
89
+ maxTokens: 1000,
90
+ metadata: { taskId: "task-123" },
91
+ });
92
+ ```
93
+
94
+ ### Event Metadata
95
+
96
+ Provider-grouped functions automatically include alias metadata in events:
97
+
98
+ ```javascript
99
+ import { getLLMEvents } from "../llm/index.js";
100
+
101
+ const events = getLLMEvents();
102
+ events.on("llm:request:complete", (data) => {
103
+ console.log(data.metadata.alias); // "openai:gpt-4"
104
+ });
105
+ ```
106
+
107
+ ### Model Registry
108
+
109
+ The provider-grouped functions are generated from the model registry in your configuration. The default registry includes:
110
+
111
+ ```json
112
+ {
113
+ "llm": {
114
+ "models": {
115
+ "openai:gpt-4": { "provider": "openai", "model": "gpt-4" },
116
+ "openai:gpt-4-turbo": { "provider": "openai", "model": "gpt-4-turbo" },
117
+ "openai:gpt-5": { "provider": "openai", "model": "gpt-5-chat-latest" },
118
+ "deepseek:reasoner": {
119
+ "provider": "deepseek",
120
+ "model": "deepseek-reasoner"
121
+ },
122
+ "deepseek:chat": { "provider": "deepseek", "model": "deepseek-chat" },
123
+ "anthropic:opus": { "provider": "anthropic", "model": "claude-3-opus" },
124
+ "anthropic:sonnet": {
125
+ "provider": "anthropic",
126
+ "model": "claude-3-sonnet"
127
+ }
128
+ }
129
+ }
130
+ }
131
+ ```
132
+
133
+ **Function Naming:**
134
+
135
+ - Registry aliases are converted to camelCase function names
136
+ - `openai:gpt-4-turbo` → `gpt4Turbo()`
137
+ - `deepseek:reasoner` → `reasoner()`
138
+
139
+ **Custom Registry:**
140
+ You can customize the registry in your `config.json`:
141
+
142
+ ```json
143
+ {
144
+ "llm": {
145
+ "models": {
146
+ "openai:custom": { "provider": "openai", "model": "gpt-4-custom" },
147
+ "deepseek:fast": { "provider": "deepseek", "model": "deepseek-chat" }
148
+ }
149
+ }
150
+ }
151
+ ```
33
152
 
34
- Main function for LLM interactions. Supports multiple providers and models.
153
+ This will generate:
154
+
155
+ - `llm.openai.custom()`
156
+ - `llm.deepseek.fast()`
157
+
158
+ ## Legacy Functions (Deprecated)
159
+
160
+ The following functions are still available but deprecated in favor of provider-grouped functions:
161
+
162
+ ### `chat(options)`
35
163
 
36
164
  ```javascript
37
165
  import { chat } from "../llm/index.js";
38
166
 
39
167
  const response = await chat({
40
- provider: "openai", // or "deepseek", "anthropic"
168
+ provider: "openai",
41
169
  model: "gpt-5-chat-latest",
42
170
  messages: [
43
171
  { role: "system", content: "You are a helpful assistant" },
@@ -45,18 +173,14 @@ const response = await chat({
45
173
  ],
46
174
  temperature: 0.7,
47
175
  maxTokens: 1000,
48
- metadata: { taskId: "task-123" }, // Optional tracking data
176
+ metadata: { taskId: "task-123" },
49
177
  });
50
-
51
- console.log(response.content); // AI response text
52
- console.log(response.usage); // Token usage stats
53
178
  ```
54
179
 
55
- ### `complete(prompt, options)`
56
-
57
- Convenience function for simple single-turn completions.
180
+ ### `complete(prompt, options)` (Deprecated)
58
181
 
59
182
  ```javascript
183
+ // DEPRECATED: Use provider-grouped functions instead
60
184
  import { complete } from "../llm/index.js";
61
185
 
62
186
  const response = await complete("What is 2+2?", {
@@ -64,28 +188,13 @@ const response = await complete("What is 2+2?", {
64
188
  model: "gpt-5-chat-latest",
65
189
  });
66
190
 
67
- console.log(response.content); // "4"
68
- ```
69
-
70
- ### `createLLM(options)`
71
-
72
- Factory function to create a bound LLM interface with default settings.
73
-
74
- ```javascript
191
+ // RECOMMENDED: Use provider-grouped function
75
192
  import { createLLM } from "../llm/index.js";
76
193
 
77
- const llm = createLLM({
78
- defaultProvider: "openai",
79
- defaultModel: "gpt-5-chat-latest",
194
+ const llm = createLLM();
195
+ const response = await llm.openai.gpt5({
196
+ messages: [{ role: "user", content: "What is 2+2?" }],
80
197
  });
81
-
82
- // Use the bound interface
83
- const response = await llm.chat({
84
- messages: [{ role: "user", content: "Hello!" }],
85
- });
86
-
87
- // Or use convenience methods
88
- const result = await llm.complete("What is AI?");
89
198
  ```
90
199
 
91
200
  ### `createChain()`