@ryanfw/prompt-orchestration-pipeline 0.0.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +415 -24
  2. package/package.json +45 -8
  3. package/src/api/files.js +48 -0
  4. package/src/api/index.js +149 -53
  5. package/src/api/validators/seed.js +141 -0
  6. package/src/cli/index.js +456 -29
  7. package/src/cli/run-orchestrator.js +39 -0
  8. package/src/cli/update-pipeline-json.js +47 -0
  9. package/src/components/DAGGrid.jsx +649 -0
  10. package/src/components/JobCard.jsx +96 -0
  11. package/src/components/JobDetail.jsx +159 -0
  12. package/src/components/JobTable.jsx +202 -0
  13. package/src/components/Layout.jsx +134 -0
  14. package/src/components/TaskFilePane.jsx +570 -0
  15. package/src/components/UploadSeed.jsx +239 -0
  16. package/src/components/ui/badge.jsx +20 -0
  17. package/src/components/ui/button.jsx +43 -0
  18. package/src/components/ui/card.jsx +20 -0
  19. package/src/components/ui/focus-styles.css +60 -0
  20. package/src/components/ui/progress.jsx +26 -0
  21. package/src/components/ui/select.jsx +27 -0
  22. package/src/components/ui/separator.jsx +6 -0
  23. package/src/config/paths.js +99 -0
  24. package/src/core/config.js +270 -9
  25. package/src/core/file-io.js +202 -0
  26. package/src/core/module-loader.js +157 -0
  27. package/src/core/orchestrator.js +275 -294
  28. package/src/core/pipeline-runner.js +95 -41
  29. package/src/core/progress.js +66 -0
  30. package/src/core/status-writer.js +331 -0
  31. package/src/core/task-runner.js +719 -73
  32. package/src/core/validation.js +120 -1
  33. package/src/lib/utils.js +6 -0
  34. package/src/llm/README.md +139 -30
  35. package/src/llm/index.js +222 -72
  36. package/src/pages/PipelineDetail.jsx +111 -0
  37. package/src/pages/PromptPipelineDashboard.jsx +223 -0
  38. package/src/providers/deepseek.js +3 -15
  39. package/src/ui/client/adapters/job-adapter.js +258 -0
  40. package/src/ui/client/bootstrap.js +120 -0
  41. package/src/ui/client/hooks/useJobDetailWithUpdates.js +619 -0
  42. package/src/ui/client/hooks/useJobList.js +50 -0
  43. package/src/ui/client/hooks/useJobListWithUpdates.js +335 -0
  44. package/src/ui/client/hooks/useTicker.js +26 -0
  45. package/src/ui/client/index.css +31 -0
  46. package/src/ui/client/index.html +18 -0
  47. package/src/ui/client/main.jsx +38 -0
  48. package/src/ui/config-bridge.browser.js +149 -0
  49. package/src/ui/config-bridge.js +149 -0
  50. package/src/ui/config-bridge.node.js +310 -0
  51. package/src/ui/dist/assets/index-BDABnI-4.js +33399 -0
  52. package/src/ui/dist/assets/style-Ks8LY8gB.css +28496 -0
  53. package/src/ui/dist/index.html +19 -0
  54. package/src/ui/endpoints/job-endpoints.js +300 -0
  55. package/src/ui/file-reader.js +216 -0
  56. package/src/ui/job-change-detector.js +83 -0
  57. package/src/ui/job-index.js +231 -0
  58. package/src/ui/job-reader.js +274 -0
  59. package/src/ui/job-scanner.js +188 -0
  60. package/src/ui/public/app.js +3 -1
  61. package/src/ui/server.js +1636 -59
  62. package/src/ui/sse-enhancer.js +149 -0
  63. package/src/ui/sse.js +204 -0
  64. package/src/ui/state-snapshot.js +252 -0
  65. package/src/ui/transformers/list-transformer.js +347 -0
  66. package/src/ui/transformers/status-transformer.js +307 -0
  67. package/src/ui/watcher.js +61 -7
  68. package/src/utils/dag.js +101 -0
  69. package/src/utils/duration.js +126 -0
  70. package/src/utils/id-generator.js +30 -0
  71. package/src/utils/jobs.js +7 -0
  72. package/src/utils/pipelines.js +44 -0
  73. package/src/utils/task-files.js +271 -0
  74. package/src/utils/ui.jsx +76 -0
  75. package/src/ui/public/index.html +0 -53
  76. package/src/ui/public/style.css +0 -341
package/src/llm/index.js CHANGED
@@ -2,6 +2,7 @@ import { openaiChat } from "../providers/openai.js";
2
2
  import { deepseekChat } from "../providers/deepseek.js";
3
3
  import { EventEmitter } from "node:events";
4
4
  import { getConfig } from "../core/config.js";
5
+ import fs from "node:fs";
5
6
 
6
7
  // Global mock provider instance (for demo/testing)
7
8
  let mockProviderInstance = null;
@@ -15,6 +16,24 @@ export function registerMockProvider(provider) {
15
16
  mockProviderInstance = provider;
16
17
  }
17
18
 
19
+ // Auto-register mock provider in test mode when default provider is "mock"
20
+ function autoRegisterMockProvider() {
21
+ const config = getConfig();
22
+ if (config.llm.defaultProvider === "mock" && !mockProviderInstance) {
23
+ // Auto-register a basic mock provider for testing
24
+ mockProviderInstance = {
25
+ chat: async () => ({
26
+ content: "Mock response for testing",
27
+ usage: {
28
+ prompt_tokens: 100,
29
+ completion_tokens: 200,
30
+ total_tokens: 300,
31
+ },
32
+ }),
33
+ };
34
+ }
35
+ }
36
+
18
37
  // Check available providers
19
38
  export function getAvailableProviders() {
20
39
  return {
@@ -64,10 +83,10 @@ export function calculateCost(provider, model, usage) {
64
83
  return promptCost + completionCost;
65
84
  }
66
85
 
67
- // Main chat function - no metrics handling needed!
86
+ // Core chat function - no metrics handling needed!
68
87
  export async function chat(options) {
69
88
  const {
70
- provider = "openai",
89
+ provider,
71
90
  model,
72
91
  messages = [],
73
92
  temperature,
@@ -76,6 +95,9 @@ export async function chat(options) {
76
95
  ...rest
77
96
  } = options;
78
97
 
98
+ // Auto-register mock provider if needed
99
+ autoRegisterMockProvider();
100
+
79
101
  const available = getAvailableProviders();
80
102
 
81
103
  if (!available[provider]) {
@@ -90,6 +112,12 @@ export async function chat(options) {
90
112
  const userMessages = messages.filter((m) => m.role === "user");
91
113
  const userMsg = userMessages.map((m) => m.content).join("\n");
92
114
 
115
+ // DEBUG write the messages to /tmp/messages.log for debugging
116
+ fs.writeFileSync(
117
+ "/tmp/messages.log",
118
+ JSON.stringify({ messages, systemMsg, userMsg, provider, model }, null, 2)
119
+ );
120
+
93
121
  // Emit request start event
94
122
  llmEvents.emit("llm:request:start", {
95
123
  id: requestId,
@@ -152,18 +180,24 @@ export async function chat(options) {
152
180
  };
153
181
  } else if (provider === "deepseek") {
154
182
  const result = await deepseekChat(
155
- systemMsg,
156
- userMsg,
157
- model || "deepseek-reasoner"
183
+ {
184
+ messages,
185
+ model: "deepseek-chat",
186
+ }
187
+
188
+ // systemMsg,
189
+ // userMsg,
190
+ // model || "deepseek-reasoner"
158
191
  );
159
192
 
160
193
  response = {
161
- content: typeof result === "string" ? result : JSON.stringify(result),
162
- raw: result,
194
+ content: result.content,
163
195
  };
164
196
 
165
197
  const promptTokens = estimateTokens(systemMsg + userMsg);
166
- const completionTokens = estimateTokens(response.content);
198
+ const completionTokens = estimateTokens(
199
+ typeof result === "string" ? result : JSON.stringify(result)
200
+ );
167
201
  usage = {
168
202
  promptTokens,
169
203
  completionTokens,
@@ -189,10 +223,7 @@ export async function chat(options) {
189
223
  });
190
224
 
191
225
  // Return clean response - no metrics attached!
192
- return {
193
- ...response,
194
- usage,
195
- };
226
+ return response;
196
227
  } catch (error) {
197
228
  const duration = Date.now() - startTime;
198
229
 
@@ -211,110 +242,229 @@ export async function chat(options) {
211
242
  }
212
243
  }
213
244
 
214
- // Convenience function for simple completions
245
+ // Helper to convert model alias to camelCase function name
246
+ function toCamelCase(alias) {
247
+ const [provider, ...modelParts] = alias.split(":");
248
+ const model = modelParts.join("-");
249
+
250
+ // Convert to camelCase (handle both letters and numbers after hyphens)
251
+ const camelModel = model.replace(/-([a-z0-9])/g, (match, char) =>
252
+ char.toUpperCase()
253
+ );
254
+
255
+ return camelModel;
256
+ }
257
+
258
+ // Build provider-grouped functions from registry
259
+ function buildProviderFunctions(models) {
260
+ const functions = {};
261
+
262
+ // Group by provider
263
+ const byProvider = {};
264
+ for (const [alias, config] of Object.entries(models)) {
265
+ const { provider } = config;
266
+ if (!byProvider[provider]) {
267
+ byProvider[provider] = {};
268
+ }
269
+ byProvider[provider][alias] = config;
270
+ }
271
+
272
+ // Create functions for each provider
273
+ for (const [provider, providerModels] of Object.entries(byProvider)) {
274
+ functions[provider] = {};
275
+
276
+ for (const [alias, modelConfig] of Object.entries(providerModels)) {
277
+ const functionName = toCamelCase(alias);
278
+
279
+ functions[provider][functionName] = (options = {}) => {
280
+ // Respect provider overrides in options (last-write-wins)
281
+ const finalProvider = options.provider || provider;
282
+ const finalModel = options.model || modelConfig.model;
283
+
284
+ return chat({
285
+ provider: finalProvider,
286
+ model: finalModel,
287
+ ...options,
288
+ metadata: {
289
+ ...options.metadata,
290
+ alias,
291
+ },
292
+ });
293
+ };
294
+ }
295
+ }
296
+
297
+ return functions;
298
+ }
299
+
300
+ // Helper function for single prompt completion
215
301
  export async function complete(prompt, options = {}) {
302
+ const config = getConfig();
303
+ const defaultProvider = options.provider || config.llm.defaultProvider;
304
+
216
305
  return chat({
217
- ...options,
306
+ provider: defaultProvider,
218
307
  messages: [{ role: "user", content: prompt }],
308
+ ...options,
219
309
  });
220
310
  }
221
311
 
222
- // Create a chain for multi-turn conversations
312
+ // Chain implementation
223
313
  export function createChain() {
224
- const messages = [];
314
+ let messages = [];
225
315
 
226
316
  return {
227
- addSystemMessage: function (content) {
317
+ addSystemMessage(content) {
228
318
  messages.push({ role: "system", content });
229
- return this;
230
319
  },
231
-
232
- addUserMessage: function (content) {
320
+ addUserMessage(content) {
233
321
  messages.push({ role: "user", content });
234
- return this;
235
322
  },
236
-
237
- addAssistantMessage: function (content) {
323
+ addAssistantMessage(content) {
238
324
  messages.push({ role: "assistant", content });
239
- return this;
240
325
  },
241
-
242
- execute: async function (options = {}) {
243
- const response = await chat({ ...options, messages });
244
- messages.push({
245
- role: "assistant",
246
- content: response.content,
247
- });
248
- return response;
326
+ getMessages() {
327
+ return [...messages]; // Return copy to prevent external mutation
249
328
  },
250
-
251
- getMessages: () => [...messages],
252
-
253
- clear: function () {
254
- messages.length = 0;
255
- return this;
329
+ clear() {
330
+ messages = [];
331
+ },
332
+ async execute(options = {}) {
333
+ const result = await chat({
334
+ messages: [...messages],
335
+ ...options,
336
+ });
337
+ messages.push({ role: "assistant", content: result.content });
338
+ return result;
256
339
  },
257
340
  };
258
341
  }
259
342
 
260
- // Retry wrapper
261
- export async function withRetry(fn, args = [], options = {}) {
262
- const config = getConfig();
263
- const maxRetries = options.maxRetries ?? config.llm.retryMaxAttempts;
264
- const backoffMs = options.backoffMs ?? config.llm.retryBackoffMs;
265
-
343
+ // Retry implementation with exponential backoff
344
+ export async function withRetry(
345
+ fn,
346
+ args = [],
347
+ { maxRetries = 3, backoffMs = 100 } = {}
348
+ ) {
266
349
  let lastError;
267
350
 
268
- for (let i = 0; i <= maxRetries; i++) {
351
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
269
352
  try {
270
- if (i > 0) {
271
- await new Promise((r) => setTimeout(r, backoffMs * Math.pow(2, i - 1)));
272
- }
273
353
  return await fn(...args);
274
354
  } catch (error) {
275
355
  lastError = error;
276
- // Don't retry auth errors
277
- if (error.status === 401 || error.message?.includes("API key")) {
356
+
357
+ // Don't retry on auth errors
358
+ if (error.status === 401) {
278
359
  throw error;
279
360
  }
361
+
362
+ // If this is the last attempt, throw the error
363
+ if (attempt === maxRetries) {
364
+ throw error;
365
+ }
366
+
367
+ // Wait with exponential backoff
368
+ const delay = backoffMs * Math.pow(2, attempt);
369
+ await new Promise((resolve) => setTimeout(resolve, delay));
280
370
  }
281
371
  }
282
372
 
283
373
  throw lastError;
284
374
  }
285
375
 
286
- // Parallel execution with concurrency control
287
- export async function parallel(fn, items, maxConcurrency) {
288
- const config = getConfig();
289
- const concurrency = maxConcurrency ?? config.llm.maxConcurrency;
376
+ // Parallel execution with concurrency limit
377
+ export async function parallel(workerFn, items, concurrency = 5) {
378
+ if (!items || items.length === 0) {
379
+ return [];
380
+ }
381
+
382
+ const results = new Array(items.length);
383
+ const executing = [];
384
+
385
+ for (let i = 0; i < items.length; i++) {
386
+ const promise = workerFn(items[i]).then((result) => {
387
+ results[i] = result;
388
+ });
389
+
390
+ executing.push(promise);
290
391
 
291
- const results = [];
292
- for (let i = 0; i < items.length; i += concurrency) {
293
- const batch = items.slice(i, i + concurrency);
294
- const batchResults = await Promise.all(batch.map((item) => fn(item)));
295
- results.push(...batchResults);
392
+ if (executing.length >= concurrency) {
393
+ await Promise.race(executing);
394
+ // Remove completed promises from executing array
395
+ executing.splice(
396
+ executing.findIndex((p) => p === promise),
397
+ 1
398
+ );
399
+ }
296
400
  }
401
+
402
+ // Wait for all remaining promises
403
+ await Promise.all(executing);
404
+
297
405
  return results;
298
406
  }
299
407
 
300
- // Create a bound LLM interface (no metrics handling needed!)
301
- export function createLLM(options = {}) {
408
+ // Create a bound LLM interface - for named-models tests, only return provider functions
409
+ export function createLLM() {
410
+ const config = getConfig();
411
+
412
+ // Build functions from registry
413
+ const providerFunctions = buildProviderFunctions(config.llm.models);
414
+
415
+ return providerFunctions;
416
+ }
417
+
418
+ // Separate function for high-level LLM interface (used by llm.test.js)
419
+ export function createHighLevelLLM(options = {}) {
302
420
  const config = getConfig();
303
421
  const defaultProvider = options.defaultProvider || config.llm.defaultProvider;
304
422
 
423
+ // Build functions from registry
424
+ const providerFunctions = buildProviderFunctions(config.llm.models);
425
+
305
426
  return {
306
- chat: (opts) => chat({ provider: defaultProvider, ...opts }),
307
- complete: (prompt, opts) =>
308
- complete(prompt, { provider: defaultProvider, ...opts }),
309
- createChain: () => createChain(),
310
- withRetry: (opts) =>
311
- withRetry(chat, [{ provider: defaultProvider, ...opts }]),
312
- parallel: (requests, maxConcurrency) =>
313
- parallel(
314
- (req) => chat({ provider: defaultProvider, ...req }),
427
+ // High-level interface methods
428
+ chat(opts = {}) {
429
+ return chat({
430
+ provider: defaultProvider,
431
+ ...opts,
432
+ });
433
+ },
434
+
435
+ complete(prompt, opts = {}) {
436
+ return complete(prompt, {
437
+ provider: defaultProvider,
438
+ ...opts,
439
+ });
440
+ },
441
+
442
+ createChain,
443
+
444
+ withRetry(opts = {}) {
445
+ return withRetry(() =>
446
+ chat({
447
+ provider: defaultProvider,
448
+ ...opts,
449
+ })
450
+ );
451
+ },
452
+
453
+ async parallel(requests, concurrency = 5) {
454
+ return parallel(
455
+ (request) =>
456
+ chat({
457
+ provider: defaultProvider,
458
+ ...request,
459
+ }),
315
460
  requests,
316
- maxConcurrency
317
- ),
461
+ concurrency
462
+ );
463
+ },
464
+
318
465
  getAvailableProviders,
466
+
467
+ // Include provider-grouped functions for backward compatibility
468
+ ...providerFunctions,
319
469
  };
320
470
  }
@@ -0,0 +1,111 @@
1
+ import React from "react";
2
+ import { useParams } from "react-router-dom";
3
+ import { Box, Flex, Text } from "@radix-ui/themes";
4
+ import JobDetail from "../components/JobDetail.jsx";
5
+ import { useJobDetailWithUpdates } from "../ui/client/hooks/useJobDetailWithUpdates.js";
6
+ import Layout from "../components/Layout.jsx";
7
+ import { statusBadge } from "../utils/ui.jsx";
8
+
9
+ export default function PipelineDetail() {
10
+ const { jobId } = useParams();
11
+
12
+ // Handle missing job ID (undefined/null)
13
+ if (jobId === undefined || jobId === null) {
14
+ return (
15
+ <Layout title="Pipeline Details" showBackButton={true}>
16
+ <Flex align="center" justify="center" className="min-h-64">
17
+ <Box className="text-center">
18
+ <Text size="5" weight="medium" color="red" className="mb-2">
19
+ No job ID provided
20
+ </Text>
21
+ </Box>
22
+ </Flex>
23
+ </Layout>
24
+ );
25
+ }
26
+
27
+ const { data: job, loading, error } = useJobDetailWithUpdates(jobId);
28
+
29
+ if (loading) {
30
+ return (
31
+ <Layout title="Pipeline Details" showBackButton={true}>
32
+ <Flex align="center" justify="center" className="min-h-64">
33
+ <Box className="text-center">
34
+ <Text size="5" weight="medium" className="mb-2">
35
+ Loading job details...
36
+ </Text>
37
+ </Box>
38
+ </Flex>
39
+ </Layout>
40
+ );
41
+ }
42
+
43
+ if (error) {
44
+ return (
45
+ <Layout title="Pipeline Details" showBackButton={true}>
46
+ <Flex align="center" justify="center" className="min-h-64">
47
+ <Box className="text-center">
48
+ <Text size="5" weight="medium" color="red" className="mb-2">
49
+ Failed to load job details
50
+ </Text>
51
+ <Text size="2" color="gray" className="mt-2">
52
+ {error}
53
+ </Text>
54
+ </Box>
55
+ </Flex>
56
+ </Layout>
57
+ );
58
+ }
59
+
60
+ if (!job) {
61
+ return (
62
+ <Layout title="Pipeline Details" showBackButton={true}>
63
+ <Flex align="center" justify="center" className="min-h-64">
64
+ <Box className="text-center">
65
+ <Text size="5" weight="medium" className="mb-2">
66
+ Job not found
67
+ </Text>
68
+ </Box>
69
+ </Flex>
70
+ </Layout>
71
+ );
72
+ }
73
+
74
+ // Derive pipeline if not provided in job data
75
+ const pipeline =
76
+ job?.pipeline ||
77
+ (() => {
78
+ if (!job?.tasks) return { tasks: [] };
79
+
80
+ let pipelineTasks = [];
81
+ if (Array.isArray(job.tasks)) {
82
+ // tasks is an array, extract names
83
+ pipelineTasks = job.tasks.map((task) => task.name);
84
+ } else if (job.tasks && typeof job.tasks === "object") {
85
+ // tasks is an object, extract keys
86
+ pipelineTasks = Object.keys(job.tasks);
87
+ }
88
+
89
+ return { tasks: pipelineTasks };
90
+ })();
91
+
92
+ // Header actions: job ID and status badge
93
+ const headerActions = (
94
+ <Flex align="center" gap="3" className="shrink-0">
95
+ <Text size="2" color="gray">
96
+ ID: {job.id || jobId}
97
+ </Text>
98
+ {statusBadge(job.status)}
99
+ </Flex>
100
+ );
101
+
102
+ return (
103
+ <Layout
104
+ title={job.name || "Pipeline Details"}
105
+ showBackButton={true}
106
+ actions={headerActions}
107
+ >
108
+ <JobDetail job={job} pipeline={pipeline} />
109
+ </Layout>
110
+ );
111
+ }