@ryanfw/prompt-orchestration-pipeline 0.5.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/README.md +1 -2
  2. package/package.json +1 -2
  3. package/src/api/validators/json.js +39 -0
  4. package/src/components/DAGGrid.jsx +392 -303
  5. package/src/components/JobCard.jsx +14 -12
  6. package/src/components/JobDetail.jsx +54 -51
  7. package/src/components/JobTable.jsx +72 -23
  8. package/src/components/Layout.jsx +145 -42
  9. package/src/components/LiveText.jsx +47 -0
  10. package/src/components/PageSubheader.jsx +75 -0
  11. package/src/components/TaskDetailSidebar.jsx +216 -0
  12. package/src/components/TimerText.jsx +82 -0
  13. package/src/components/UploadSeed.jsx +0 -70
  14. package/src/components/ui/Logo.jsx +16 -0
  15. package/src/components/ui/RestartJobModal.jsx +140 -0
  16. package/src/components/ui/toast.jsx +138 -0
  17. package/src/config/models.js +322 -0
  18. package/src/config/statuses.js +119 -0
  19. package/src/core/config.js +4 -34
  20. package/src/core/file-io.js +13 -28
  21. package/src/core/module-loader.js +54 -40
  22. package/src/core/pipeline-runner.js +65 -26
  23. package/src/core/status-writer.js +213 -58
  24. package/src/core/symlink-bridge.js +57 -0
  25. package/src/core/symlink-utils.js +94 -0
  26. package/src/core/task-runner.js +321 -437
  27. package/src/llm/index.js +258 -86
  28. package/src/pages/Code.jsx +351 -0
  29. package/src/pages/PipelineDetail.jsx +124 -15
  30. package/src/pages/PromptPipelineDashboard.jsx +20 -88
  31. package/src/providers/anthropic.js +83 -69
  32. package/src/providers/base.js +52 -0
  33. package/src/providers/deepseek.js +20 -21
  34. package/src/providers/gemini.js +226 -0
  35. package/src/providers/openai.js +36 -106
  36. package/src/providers/zhipu.js +136 -0
  37. package/src/ui/client/adapters/job-adapter.js +42 -28
  38. package/src/ui/client/api.js +134 -0
  39. package/src/ui/client/hooks/useJobDetailWithUpdates.js +65 -179
  40. package/src/ui/client/index.css +15 -0
  41. package/src/ui/client/index.html +2 -1
  42. package/src/ui/client/main.jsx +19 -14
  43. package/src/ui/client/time-store.js +161 -0
  44. package/src/ui/config-bridge.js +15 -24
  45. package/src/ui/config-bridge.node.js +15 -24
  46. package/src/ui/dist/assets/{index-CxcrauYR.js → index-DqkbzXZ1.js} +2132 -1086
  47. package/src/ui/dist/assets/style-DBF9NQGk.css +62 -0
  48. package/src/ui/dist/index.html +4 -3
  49. package/src/ui/job-reader.js +0 -108
  50. package/src/ui/public/favicon.svg +12 -0
  51. package/src/ui/server.js +252 -0
  52. package/src/ui/sse-enhancer.js +0 -1
  53. package/src/ui/transformers/list-transformer.js +32 -12
  54. package/src/ui/transformers/status-transformer.js +29 -42
  55. package/src/utils/dag.js +8 -4
  56. package/src/utils/duration.js +13 -19
  57. package/src/utils/formatters.js +27 -0
  58. package/src/utils/geometry-equality.js +83 -0
  59. package/src/utils/pipelines.js +5 -1
  60. package/src/utils/time-utils.js +40 -0
  61. package/src/utils/token-cost-calculator.js +294 -0
  62. package/src/utils/ui.jsx +18 -20
  63. package/src/components/ui/select.jsx +0 -27
  64. package/src/lib/utils.js +0 -6
  65. package/src/ui/client/hooks/useTicker.js +0 -26
  66. package/src/ui/config-bridge.browser.js +0 -149
  67. package/src/ui/dist/assets/style-D6K_oQ12.css +0 -62
@@ -0,0 +1,138 @@
1
+ import React, { createContext, useContext, useState, useCallback } from "react";
2
+ import { Box, Flex, Text } from "@radix-ui/themes";
3
+
4
+ // Toast context for managing toast notifications
5
+ const ToastContext = createContext();
6
+
7
+ /**
8
+ * Toast Provider component that manages toast state
9
+ */
10
+ export function ToastProvider({ children }) {
11
+ const [toasts, setToasts] = useState([]);
12
+
13
+ const addToast = useCallback((message, options = {}) => {
14
+ const toast = {
15
+ id: Date.now() + Math.random(),
16
+ message,
17
+ type: options.type || "info",
18
+ duration: options.duration || 5000,
19
+ };
20
+
21
+ setToasts((prev) => [...prev, toast]);
22
+
23
+ // Auto-remove toast after duration
24
+ if (toast.duration > 0) {
25
+ setTimeout(() => {
26
+ removeToast(toast.id);
27
+ }, toast.duration);
28
+ }
29
+
30
+ return toast.id;
31
+ }, []);
32
+
33
+ const removeToast = useCallback((id) => {
34
+ setToasts((prev) => prev.filter((toast) => toast.id !== id));
35
+ }, []);
36
+
37
+ const value = {
38
+ addToast,
39
+ removeToast,
40
+ success: (message, options) =>
41
+ addToast(message, { ...options, type: "success" }),
42
+ error: (message, options) =>
43
+ addToast(message, { ...options, type: "error" }),
44
+ warning: (message, options) =>
45
+ addToast(message, { ...options, type: "warning" }),
46
+ info: (message, options) => addToast(message, { ...options, type: "info" }),
47
+ };
48
+
49
+ return (
50
+ <ToastContext.Provider value={value}>
51
+ {children}
52
+ <ToastContainer toasts={toasts} onRemove={removeToast} />
53
+ </ToastContext.Provider>
54
+ );
55
+ }
56
+
57
+ /**
58
+ * Hook to use toast functionality
59
+ */
60
+ export function useToast() {
61
+ const context = useContext(ToastContext);
62
+ if (!context) {
63
+ throw new Error("useToast must be used within a ToastProvider");
64
+ }
65
+ return context;
66
+ }
67
+
68
+ /**
69
+ * Toast container component that renders all active toasts
70
+ */
71
+ function ToastContainer({ toasts, onRemove }) {
72
+ if (toasts.length === 0) return null;
73
+
74
+ return (
75
+ <div className="fixed top-4 right-4 z-50 space-y-2">
76
+ {toasts.map((toast) => (
77
+ <ToastItem key={toast.id} toast={toast} onRemove={onRemove} />
78
+ ))}
79
+ </div>
80
+ );
81
+ }
82
+
83
+ /**
84
+ * Individual toast item component
85
+ */
86
+ function ToastItem({ toast, onRemove }) {
87
+ const getToastStyles = (type) => {
88
+ switch (type) {
89
+ case "success":
90
+ return "bg-green-50 border-green-200 text-green-800";
91
+ case "error":
92
+ return "bg-red-50 border-red-200 text-red-800";
93
+ case "warning":
94
+ return "bg-yellow-50 border-yellow-200 text-yellow-800";
95
+ default:
96
+ return "bg-blue-50 border-blue-200 text-blue-800";
97
+ }
98
+ };
99
+
100
+ const getIcon = (type) => {
101
+ switch (type) {
102
+ case "success":
103
+ return "✓";
104
+ case "error":
105
+ return "✕";
106
+ case "warning":
107
+ return "⚠";
108
+ default:
109
+ return "ℹ";
110
+ }
111
+ };
112
+
113
+ return (
114
+ <Box
115
+ className={`relative flex items-start p-4 border rounded-lg shadow-lg max-w-sm ${getToastStyles(
116
+ toast.type
117
+ )}`}
118
+ role="alert"
119
+ aria-live="polite"
120
+ >
121
+ <Flex gap="3" align="start">
122
+ <Text className="flex-shrink-0 text-lg font-semibold">
123
+ {getIcon(toast.type)}
124
+ </Text>
125
+ <Text className="text-sm font-medium flex-1">{toast.message}</Text>
126
+ <button
127
+ onClick={() => onRemove(toast.id)}
128
+ className="flex-shrink-0 ml-4 text-sm opacity-60 hover:opacity-100 focus:outline-none focus:opacity-100"
129
+ aria-label="Dismiss notification"
130
+ >
131
+
132
+ </button>
133
+ </Flex>
134
+ </Box>
135
+ );
136
+ }
137
+
138
+ export default ToastProvider;
@@ -0,0 +1,322 @@
1
+ /**
2
+ * Canonical model configuration for prompt orchestration pipeline.
3
+ * This module serves as single source of truth for all model metadata.
4
+ */
5
+
6
+ // Model alias constants grouped by provider
7
+ export const ModelAlias = Object.freeze({
8
+ // DeepSeek
9
+ DEEPSEEK_CHAT: "deepseek:chat",
10
+ DEEPSEEK_REASONER: "deepseek:reasoner",
11
+
12
+ // OpenAI
13
+ OPENAI_GPT_5: "openai:gpt-5",
14
+ OPENAI_GPT_5_CORE: "openai:gpt-5-core",
15
+ OPENAI_GPT_5_CHAT: "openai:gpt-5-chat",
16
+ OPENAI_GPT_5_PRO: "openai:gpt-5-pro",
17
+ OPENAI_GPT_5_MINI: "openai:gpt-5-mini",
18
+ OPENAI_GPT_5_NANO: "openai:gpt-5-nano",
19
+
20
+ // Legacy aliases for backward compatibility (tests)
21
+ OPENAI_GPT_4: "openai:gpt-4",
22
+ OPENAI_GPT_4_TURBO: "openai:gpt-4-turbo",
23
+
24
+ // Google Gemini
25
+ GEMINI_2_5_PRO: "gemini:pro-2.5",
26
+ GEMINI_2_5_FLASH: "gemini:flash-2.5",
27
+ GEMINI_2_5_FLASH_LITE: "gemini:flash-2.5-lite",
28
+ GEMINI_2_5_FLASH_IMAGE: "gemini:flash-2.5-image",
29
+
30
+ // Z.ai (formerly Zhipu) - standardized to "zhipu" provider
31
+ ZAI_GLM_4_6: "zhipu:glm-4.6",
32
+ ZAI_GLM_4_5: "zhipu:glm-4.5",
33
+ ZAI_GLM_4_5_AIR: "zhipu:glm-4.5-air",
34
+
35
+ // Anthropic
36
+ ANTHROPIC_SONNET_4_5: "anthropic:sonnet-4-5",
37
+ ANTHROPIC_HAIKU_4_5: "anthropic:haiku-4-5",
38
+ ANTHROPIC_OPUS_4_1: "anthropic:opus-4-1",
39
+ });
40
+
41
+ // Consolidated model configuration with pricing metadata
42
+ export const MODEL_CONFIG = Object.freeze({
43
+ // DeepSeek (2025)
44
+ [ModelAlias.DEEPSEEK_CHAT]: {
45
+ provider: "deepseek",
46
+ model: "deepseek-chat", // V3.2 Exp (non-thinking) under the hood
47
+ tokenCostInPerMillion: 0.27,
48
+ tokenCostOutPerMillion: 1.1,
49
+ },
50
+ [ModelAlias.DEEPSEEK_REASONER]: {
51
+ provider: "deepseek",
52
+ model: "deepseek-reasoner", // R1 family
53
+ tokenCostInPerMillion: 0.55,
54
+ tokenCostOutPerMillion: 2.19,
55
+ },
56
+
57
+ // — OpenAI (2025) —
58
+ [ModelAlias.OPENAI_GPT_5]: {
59
+ provider: "openai",
60
+ model: "gpt-5", // stable flagship
61
+ tokenCostInPerMillion: 1.25,
62
+ tokenCostOutPerMillion: 10.0,
63
+ },
64
+ [ModelAlias.OPENAI_GPT_5_CHAT]: {
65
+ provider: "openai",
66
+ model: "gpt-5-chat-latest", // Chat variant
67
+ tokenCostInPerMillion: 1.25,
68
+ tokenCostOutPerMillion: 10.0,
69
+ },
70
+ [ModelAlias.OPENAI_GPT_5_PRO]: {
71
+ provider: "openai",
72
+ model: "gpt-5-pro", // higher-compute tier
73
+ tokenCostInPerMillion: 15.0,
74
+ tokenCostOutPerMillion: 120.0,
75
+ },
76
+ [ModelAlias.OPENAI_GPT_5_MINI]: {
77
+ provider: "openai",
78
+ model: "gpt-5-mini",
79
+ tokenCostInPerMillion: 0.25,
80
+ tokenCostOutPerMillion: 2.0,
81
+ },
82
+ [ModelAlias.OPENAI_GPT_5_NANO]: {
83
+ provider: "openai",
84
+ model: "gpt-5-nano",
85
+ tokenCostInPerMillion: 0.05,
86
+ tokenCostOutPerMillion: 0.4,
87
+ },
88
+
89
+ // Legacy models for backward compatibility (tests)
90
+ [ModelAlias.OPENAI_GPT_4]: {
91
+ provider: "openai",
92
+ model: "gpt-4",
93
+ tokenCostInPerMillion: 0.5,
94
+ tokenCostOutPerMillion: 2.0,
95
+ },
96
+ [ModelAlias.OPENAI_GPT_4_TURBO]: {
97
+ provider: "openai",
98
+ model: "gpt-4-turbo",
99
+ tokenCostInPerMillion: 0.3,
100
+ tokenCostOutPerMillion: 1.0,
101
+ },
102
+
103
+ // — Google Gemini (2025) —
104
+ [ModelAlias.GEMINI_2_5_PRO]: {
105
+ provider: "gemini",
106
+ model: "gemini-2.5-pro", // ≤200k input tier shown; >200k is higher
107
+ tokenCostInPerMillion: 1.25,
108
+ tokenCostOutPerMillion: 10.0,
109
+ },
110
+ [ModelAlias.GEMINI_2_5_FLASH]: {
111
+ provider: "gemini",
112
+ model: "gemini-2.5-flash",
113
+ tokenCostInPerMillion: 0.3,
114
+ tokenCostOutPerMillion: 2.5,
115
+ },
116
+ [ModelAlias.GEMINI_2_5_FLASH_LITE]: {
117
+ provider: "gemini",
118
+ model: "gemini-2.5-flash-lite",
119
+ tokenCostInPerMillion: 0.1,
120
+ tokenCostOutPerMillion: 0.4,
121
+ },
122
+ [ModelAlias.GEMINI_2_5_FLASH_IMAGE]: {
123
+ provider: "gemini",
124
+ model: "gemini-2.5-flash-image",
125
+ // Inputs follow 2.5 Flash text pricing; outputs are **image tokens** at $30/M (≈$0.039 per 1024² image)
126
+ tokenCostInPerMillion: 0.3,
127
+ tokenCostOutPerMillion: 30.0,
128
+ },
129
+
130
+ // — Z.ai (formerly Zhipu) —
131
+ [ModelAlias.ZAI_GLM_4_6]: {
132
+ provider: "zhipu",
133
+ model: "glm-4.6",
134
+ tokenCostInPerMillion: 0.6,
135
+ tokenCostOutPerMillion: 2.2,
136
+ },
137
+ [ModelAlias.ZAI_GLM_4_5]: {
138
+ provider: "zhipu",
139
+ model: "glm-4.5",
140
+ tokenCostInPerMillion: 0.6,
141
+ tokenCostOutPerMillion: 2.2,
142
+ },
143
+ [ModelAlias.ZAI_GLM_4_5_AIR]: {
144
+ provider: "zhipu",
145
+ model: "glm-4.5-air",
146
+ tokenCostInPerMillion: 0.2,
147
+ tokenCostOutPerMillion: 1.1,
148
+ },
149
+
150
+ // — Anthropic —
151
+ // current (Claude 4.5 / 4.1)
152
+ [ModelAlias.ANTHROPIC_SONNET_4_5]: {
153
+ provider: "anthropic",
154
+ model: "claude-sonnet-4-5-20250929", // Use actual model ID
155
+ tokenCostInPerMillion: 3.0,
156
+ tokenCostOutPerMillion: 15.0,
157
+ },
158
+ [ModelAlias.ANTHROPIC_HAIKU_4_5]: {
159
+ provider: "anthropic",
160
+ model: "claude-haiku-4-5-20250929", // Use actual model ID
161
+ tokenCostInPerMillion: 0.25, // Correct pricing
162
+ tokenCostOutPerMillion: 1.25, // Correct pricing
163
+ },
164
+ [ModelAlias.ANTHROPIC_OPUS_4_1]: {
165
+ provider: "anthropic",
166
+ model: "claude-opus-4-1-20240229", // Use actual model ID
167
+ tokenCostInPerMillion: 15.0,
168
+ tokenCostOutPerMillion: 75.0,
169
+ },
170
+ });
171
+
172
+ // Validation set of all valid model aliases
173
+ export const VALID_MODEL_ALIASES = new Set(Object.keys(MODEL_CONFIG));
174
+
175
+ // Default model alias for each provider (used when no model specified)
176
+ export const DEFAULT_MODEL_BY_PROVIDER = Object.freeze({
177
+ deepseek: ModelAlias.DEEPSEEK_CHAT,
178
+ openai: ModelAlias.OPENAI_GPT_5,
179
+ gemini: ModelAlias.GEMINI_2_5_FLASH,
180
+ zhipu: ModelAlias.ZAI_GLM_4_6,
181
+ anthropic: ModelAlias.ANTHROPIC_SONNET_4_5,
182
+ });
183
+
184
+ /**
185
+ * Convert model alias to function name.
186
+ * Removes hyphens and dots, uppercases following alphanumeric character.
187
+ * @param {string} alias - Model alias (e.g., "gemini:2.5-pro")
188
+ * @returns {string} Function name (e.g., "25Pro")
189
+ * @throws {Error} If alias is invalid
190
+ */
191
+ export function aliasToFunctionName(alias) {
192
+ if (typeof alias !== "string" || !alias.includes(":")) {
193
+ throw new Error(`Invalid model alias: ${alias}`);
194
+ }
195
+
196
+ const model = alias.split(":").slice(1).join(":");
197
+ return model.replace(/[-.]([a-z0-9])/gi, (_, char) => char.toUpperCase());
198
+ }
199
+
200
+ /**
201
+ * Derived map of alias to function name for efficient lookup.
202
+ * Computed at module load time and frozen for immutability.
203
+ */
204
+ export const FUNCTION_NAME_BY_ALIAS = Object.freeze(
205
+ Object.fromEntries(
206
+ Object.keys(MODEL_CONFIG).map((alias) => [
207
+ alias,
208
+ aliasToFunctionName(alias),
209
+ ])
210
+ )
211
+ );
212
+
213
+ /**
214
+ * Build provider functions index with dotted path style.
215
+ * @returns {Object} Frozen provider functions index
216
+ */
217
+ export function buildProviderFunctionsIndex() {
218
+ const result = {};
219
+
220
+ for (const [alias, config] of Object.entries(MODEL_CONFIG)) {
221
+ const { provider } = config;
222
+ const functionName = FUNCTION_NAME_BY_ALIAS[alias];
223
+
224
+ if (!result[provider]) {
225
+ result[provider] = [];
226
+ }
227
+
228
+ const fullPath = `llm.${provider}.${functionName}`;
229
+
230
+ result[provider].push({
231
+ alias,
232
+ provider,
233
+ model: config.model,
234
+ functionName,
235
+ fullPath,
236
+ });
237
+ }
238
+
239
+ // Freeze inner arrays and outer object
240
+ for (const provider of Object.keys(result)) {
241
+ Object.freeze(result[provider]);
242
+ }
243
+ return Object.freeze(result);
244
+ }
245
+
246
+ /**
247
+ * Pre-built provider functions index for convenience.
248
+ * Uses dotted style: llm.anthropic.sonnet45, llm.openai.gpt5, etc.
249
+ */
250
+ export const PROVIDER_FUNCTIONS = buildProviderFunctionsIndex();
251
+
252
+ /**
253
+ * Extract provider name from model alias.
254
+ * @param {string} alias - Model alias (e.g., "openai:gpt-5")
255
+ * @returns {string} Provider name (e.g., "openai")
256
+ */
257
+ export function getProviderFromAlias(alias) {
258
+ if (typeof alias !== "string" || !alias.includes(":")) {
259
+ throw new Error(`Invalid model alias: ${alias}`);
260
+ }
261
+ return alias.split(":")[0];
262
+ }
263
+
264
+ /**
265
+ * Extract model name from model alias.
266
+ * @param {string} alias - Model alias (e.g., "openai:gpt-5")
267
+ * @returns {string} Model name (e.g., "gpt-5")
268
+ */
269
+ export function getModelFromAlias(alias) {
270
+ if (typeof alias !== "string" || !alias.includes(":")) {
271
+ throw new Error(`Invalid model alias: ${alias}`);
272
+ }
273
+ return alias.split(":").slice(1).join(":");
274
+ }
275
+
276
+ /**
277
+ * Get model configuration by alias.
278
+ * @param {string} alias - Model alias (e.g., "openai:gpt-5")
279
+ * @returns {Object|null} Model configuration or null if not found
280
+ */
281
+ export function getModelConfig(alias) {
282
+ return MODEL_CONFIG[alias] ?? null;
283
+ }
284
+
285
+ // Invariant checks to ensure data consistency
286
+ for (const [alias, config] of Object.entries(MODEL_CONFIG)) {
287
+ const providerFromAlias = getProviderFromAlias(alias);
288
+ if (providerFromAlias !== config.provider) {
289
+ throw new Error(
290
+ `Model config invariant violation: alias "${alias}" has provider "${config.provider}" but alias prefix indicates "${providerFromAlias}"`
291
+ );
292
+ }
293
+
294
+ if (
295
+ typeof config.tokenCostInPerMillion !== "number" ||
296
+ config.tokenCostInPerMillion < 0
297
+ ) {
298
+ throw new Error(
299
+ `Model config invariant violation: alias "${alias}" has invalid tokenCostInPerMillion: ${config.tokenCostInPerMillion}`
300
+ );
301
+ }
302
+
303
+ if (
304
+ typeof config.tokenCostOutPerMillion !== "number" ||
305
+ config.tokenCostOutPerMillion < 0
306
+ ) {
307
+ throw new Error(
308
+ `Model config invariant violation: alias "${alias}" has invalid tokenCostOutPerMillion: ${config.tokenCostOutPerMillion}`
309
+ );
310
+ }
311
+ }
312
+
313
+ // Verify VALID_MODEL_ALIASES matches MODEL_CONFIG keys exactly
314
+ const modelConfigKeys = new Set(Object.keys(MODEL_CONFIG));
315
+ if (
316
+ modelConfigKeys.size !== VALID_MODEL_ALIASES.size ||
317
+ ![...modelConfigKeys].every((key) => VALID_MODEL_ALIASES.has(key))
318
+ ) {
319
+ throw new Error(
320
+ "VALID_MODEL_ALIASES does not exactly match MODEL_CONFIG keys"
321
+ );
322
+ }
@@ -0,0 +1,119 @@
1
+ /**
2
+ * Canonical status constants and utilities for the prompt orchestration pipeline.
3
+ * This module serves as the single source of truth for all status-related values.
4
+ */
5
+
6
+ // Task states (per-task execution status)
7
+ export const TaskState = Object.freeze({
8
+ PENDING: "pending",
9
+ RUNNING: "running",
10
+ DONE: "done",
11
+ FAILED: "failed",
12
+ });
13
+
14
+ // Job statuses (computed aggregate from task states)
15
+ export const JobStatus = Object.freeze({
16
+ PENDING: "pending",
17
+ RUNNING: "running",
18
+ FAILED: "failed",
19
+ COMPLETE: "complete",
20
+ });
21
+
22
+ // Job locations (filesystem lifecycle buckets)
23
+ export const JobLocation = Object.freeze({
24
+ PENDING: "pending",
25
+ CURRENT: "current",
26
+ COMPLETE: "complete",
27
+ REJECTED: "rejected",
28
+ });
29
+
30
+ // Validation sets
31
+ export const VALID_TASK_STATES = new Set(Object.values(TaskState));
32
+ export const VALID_JOB_STATUSES = new Set(Object.values(JobStatus));
33
+ export const VALID_JOB_LOCATIONS = new Set(Object.values(JobLocation));
34
+
35
+ /**
36
+ * Normalizes a task state string to canonical form.
37
+ * @param {string} state - Raw task state
38
+ * @returns {string} Canonical task state
39
+ */
40
+ export function normalizeTaskState(state) {
41
+ if (typeof state !== "string") {
42
+ return TaskState.PENDING;
43
+ }
44
+
45
+ const normalized = state.toLowerCase().trim();
46
+
47
+ // Handle common synonyms
48
+ switch (normalized) {
49
+ case "error":
50
+ return TaskState.FAILED;
51
+ case "succeeded":
52
+ return TaskState.DONE;
53
+ case TaskState.PENDING:
54
+ case TaskState.RUNNING:
55
+ case TaskState.DONE:
56
+ case TaskState.FAILED:
57
+ return normalized;
58
+ default:
59
+ return TaskState.PENDING;
60
+ }
61
+ }
62
+
63
+ /**
64
+ * Normalizes a job status string to canonical form.
65
+ * @param {string} status - Raw job status
66
+ * @returns {string} Canonical job status
67
+ */
68
+ export function normalizeJobStatus(status) {
69
+ if (typeof status !== "string") {
70
+ return JobStatus.PENDING;
71
+ }
72
+
73
+ const normalized = status.toLowerCase().trim();
74
+
75
+ // Handle common synonyms
76
+ switch (normalized) {
77
+ case "completed":
78
+ return JobStatus.COMPLETE;
79
+ case "error":
80
+ return JobStatus.FAILED;
81
+ case JobStatus.PENDING:
82
+ case JobStatus.RUNNING:
83
+ case JobStatus.FAILED:
84
+ case JobStatus.COMPLETE:
85
+ return normalized;
86
+ default:
87
+ return JobStatus.PENDING;
88
+ }
89
+ }
90
+
91
+ /**
92
+ * Derives job status from an array of task states.
93
+ * Priority: failed > running > complete > pending
94
+ * @param {Array<Object>} tasks - Array of task objects with state property
95
+ * @returns {string} Canonical job status
96
+ */
97
+ export function deriveJobStatusFromTasks(tasks) {
98
+ if (!Array.isArray(tasks) || tasks.length === 0) {
99
+ return JobStatus.PENDING;
100
+ }
101
+
102
+ // Normalize all task states first
103
+ const normalizedStates = tasks.map((task) => normalizeTaskState(task.state));
104
+
105
+ // Apply priority rules
106
+ if (normalizedStates.some((state) => state === TaskState.FAILED)) {
107
+ return JobStatus.FAILED;
108
+ }
109
+
110
+ if (normalizedStates.some((state) => state === TaskState.RUNNING)) {
111
+ return JobStatus.RUNNING;
112
+ }
113
+
114
+ if (normalizedStates.every((state) => state === TaskState.DONE)) {
115
+ return JobStatus.COMPLETE;
116
+ }
117
+
118
+ return JobStatus.PENDING;
119
+ }
@@ -182,41 +182,11 @@ export const defaultConfig = {
182
182
  llmRequestTimeout: 60000,
183
183
  },
184
184
  llm: {
185
- defaultProvider: "openai",
186
- defaultModel: "gpt-5-chat-latest",
185
+ defaultProvider: "deepseek",
186
+ defaultModel: "chat",
187
187
  maxConcurrency: 5,
188
188
  retryMaxAttempts: 3,
189
189
  retryBackoffMs: 1000,
190
- models: {
191
- "openai:gpt-4": {
192
- provider: "openai",
193
- model: "gpt-4",
194
- },
195
- "openai:gpt-4-turbo": {
196
- provider: "openai",
197
- model: "gpt-4-turbo",
198
- },
199
- "openai:gpt-5": {
200
- provider: "openai",
201
- model: "gpt-5-chat-latest",
202
- },
203
- "deepseek:reasoner": {
204
- provider: "deepseek",
205
- model: "deepseek-reasoner",
206
- },
207
- "deepseek:chat": {
208
- provider: "deepseek",
209
- model: "deepseek-chat",
210
- },
211
- "anthropic:opus": {
212
- provider: "anthropic",
213
- model: "claude-3-opus",
214
- },
215
- "anthropic:sonnet": {
216
- provider: "anthropic",
217
- model: "claude-3-sonnet",
218
- },
219
- },
220
190
  },
221
191
  ui: {
222
192
  port: 3000,
@@ -513,14 +483,14 @@ export async function loadConfig(options = {}) {
513
483
  await validateConfig(config);
514
484
  }
515
485
 
516
- // Cache the loaded config
486
+ // Cache
517
487
  currentConfig = config;
518
488
 
519
489
  return config;
520
490
  }
521
491
 
522
492
  /**
523
- * Get the current configuration
493
+ * Get current configuration
524
494
  * Loads default config if not already loaded
525
495
  *
526
496
  * @returns {Object} Current configuration
@@ -1,5 +1,6 @@
1
1
  import fs from "node:fs/promises";
2
2
  import path from "node:path";
3
+ import { writeJobStatus } from "./status-writer.js";
3
4
 
4
5
  /**
5
6
  * Creates a task-scoped file I/O interface that manages file operations
@@ -30,41 +31,25 @@ export function createTaskFileIO({ workDir, taskName, getStage, statusPath }) {
30
31
  * Updates tasks-status.json with file information, ensuring de-duplication
31
32
  */
32
33
  async function updateStatusWithFiles(fileType, fileName) {
33
- try {
34
- const statusContent = await fs.readFile(statusPath, "utf8");
35
- const status = JSON.parse(statusContent);
36
-
37
- // Initialize files object if it doesn't exist
38
- if (!status.files) {
39
- status.files = { artifacts: [], logs: [], tmp: [] };
40
- }
41
-
42
- // Initialize task files if they don't exist
43
- if (!status.tasks[taskName].files) {
44
- status.tasks[taskName].files = { artifacts: [], logs: [], tmp: [] };
45
- }
46
-
47
- // Add to job-level files array (de-duped)
48
- const jobArray = status.files[fileType];
34
+ const jobDir = path.dirname(statusPath);
35
+ await writeJobStatus(jobDir, (snapshot) => {
36
+ snapshot.files ||= { artifacts: [], logs: [], tmp: [] };
37
+ snapshot.tasks ||= {};
38
+ snapshot.tasks[taskName] ||= {};
39
+ snapshot.tasks[taskName].files ||= { artifacts: [], logs: [], tmp: [] };
40
+
41
+ const jobArray = snapshot.files[fileType];
49
42
  if (!jobArray.includes(fileName)) {
50
43
  jobArray.push(fileName);
51
44
  }
52
45
 
53
- // Add to task-level files array (de-duped)
54
- const taskArray = status.tasks[taskName].files[fileType];
46
+ const taskArray = snapshot.tasks[taskName].files[fileType];
55
47
  if (!taskArray.includes(fileName)) {
56
48
  taskArray.push(fileName);
57
49
  }
58
50
 
59
- // Write back to file atomically
60
- await atomicWrite(statusPath, JSON.stringify(status, null, 2));
61
- } catch (error) {
62
- // If status file doesn't exist or is invalid, we'll log but not fail
63
- console.warn(
64
- `Failed to update status with file ${fileName}:`,
65
- error.message
66
- );
67
- }
51
+ return snapshot;
52
+ });
68
53
  }
69
54
 
70
55
  /**
@@ -132,7 +117,7 @@ export function createTaskFileIO({ workDir, taskName, getStage, statusPath }) {
132
117
  logsDir,
133
118
  name,
134
119
  content,
135
- options.mode || "append"
120
+ options.mode || "replace"
136
121
  );
137
122
  await updateStatusWithFiles("logs", name);
138
123
  return filePath;