@ryanfw/prompt-orchestration-pipeline 0.6.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +1 -2
  2. package/package.json +1 -2
  3. package/src/api/validators/json.js +39 -0
  4. package/src/components/DAGGrid.jsx +392 -303
  5. package/src/components/JobCard.jsx +13 -11
  6. package/src/components/JobDetail.jsx +41 -71
  7. package/src/components/JobTable.jsx +32 -22
  8. package/src/components/Layout.jsx +0 -21
  9. package/src/components/LiveText.jsx +47 -0
  10. package/src/components/TaskDetailSidebar.jsx +216 -0
  11. package/src/components/TimerText.jsx +82 -0
  12. package/src/components/ui/RestartJobModal.jsx +140 -0
  13. package/src/components/ui/toast.jsx +138 -0
  14. package/src/config/models.js +322 -0
  15. package/src/config/statuses.js +119 -0
  16. package/src/core/config.js +2 -164
  17. package/src/core/file-io.js +1 -1
  18. package/src/core/module-loader.js +54 -40
  19. package/src/core/pipeline-runner.js +52 -26
  20. package/src/core/status-writer.js +147 -3
  21. package/src/core/symlink-bridge.js +55 -0
  22. package/src/core/symlink-utils.js +94 -0
  23. package/src/core/task-runner.js +267 -443
  24. package/src/llm/index.js +167 -52
  25. package/src/pages/Code.jsx +57 -3
  26. package/src/pages/PipelineDetail.jsx +92 -22
  27. package/src/pages/PromptPipelineDashboard.jsx +15 -36
  28. package/src/providers/anthropic.js +83 -69
  29. package/src/providers/base.js +52 -0
  30. package/src/providers/deepseek.js +17 -34
  31. package/src/providers/gemini.js +226 -0
  32. package/src/providers/openai.js +36 -106
  33. package/src/providers/zhipu.js +136 -0
  34. package/src/ui/client/adapters/job-adapter.js +16 -26
  35. package/src/ui/client/api.js +134 -0
  36. package/src/ui/client/hooks/useJobDetailWithUpdates.js +65 -178
  37. package/src/ui/client/index.css +9 -0
  38. package/src/ui/client/index.html +1 -0
  39. package/src/ui/client/main.jsx +18 -15
  40. package/src/ui/client/time-store.js +161 -0
  41. package/src/ui/config-bridge.js +15 -24
  42. package/src/ui/config-bridge.node.js +15 -24
  43. package/src/ui/dist/assets/{index-WgJUlSmE.js → index-DqkbzXZ1.js} +1408 -771
  44. package/src/ui/dist/assets/style-DBF9NQGk.css +62 -0
  45. package/src/ui/dist/index.html +3 -2
  46. package/src/ui/public/favicon.svg +12 -0
  47. package/src/ui/server.js +231 -38
  48. package/src/ui/transformers/status-transformer.js +18 -31
  49. package/src/ui/watcher.js +5 -1
  50. package/src/utils/dag.js +8 -4
  51. package/src/utils/duration.js +13 -19
  52. package/src/utils/formatters.js +27 -0
  53. package/src/utils/geometry-equality.js +83 -0
  54. package/src/utils/pipelines.js +5 -1
  55. package/src/utils/time-utils.js +40 -0
  56. package/src/utils/token-cost-calculator.js +4 -7
  57. package/src/utils/ui.jsx +14 -16
  58. package/src/components/ui/select.jsx +0 -27
  59. package/src/lib/utils.js +0 -6
  60. package/src/ui/client/hooks/useTicker.js +0 -26
  61. package/src/ui/config-bridge.browser.js +0 -149
  62. package/src/ui/dist/assets/style-x0V-5m8e.css +0 -62
@@ -1,54 +1,38 @@
1
- import Anthropic from "@anthropic-ai/sdk";
2
1
  import {
3
2
  extractMessages,
4
3
  isRetryableError,
5
4
  sleep,
6
5
  tryParseJSON,
6
+ ensureJsonResponseFormat,
7
+ ProviderJsonParseError,
7
8
  } from "./base.js";
8
9
 
9
- let client = null;
10
-
11
- function getClient() {
12
- if (!client && process.env.ANTHROPIC_API_KEY) {
13
- client = new Anthropic({
14
- apiKey: process.env.ANTHROPIC_API_KEY,
15
- baseURL: process.env.ANTHROPIC_BASE_URL,
16
- });
17
- }
18
- return client;
19
- }
20
-
21
10
  export async function anthropicChat({
22
11
  messages,
23
- model = "claude-3-opus-20240229",
12
+ model = "claude-3-sonnet",
24
13
  temperature = 0.7,
25
- maxTokens = 4096,
26
- responseFormat,
14
+ maxTokens = 8192,
15
+ responseFormat = "json",
27
16
  topP,
28
- topK,
29
- stopSequences,
17
+ stop,
30
18
  maxRetries = 3,
31
19
  }) {
32
- const anthropic = getClient();
33
- if (!anthropic) throw new Error("Anthropic API key not configured");
34
-
35
- const { systemMsg, userMessages, assistantMessages } =
36
- extractMessages(messages);
37
-
38
- // Convert messages to Anthropic format
39
- const anthropicMessages = [];
40
- for (const msg of messages) {
41
- if (msg.role === "user" || msg.role === "assistant") {
42
- anthropicMessages.push({
43
- role: msg.role,
44
- content: msg.content,
45
- });
46
- }
47
- }
20
+ console.log("\n[Anthropic] Starting anthropicChat call");
21
+ console.log("[Anthropic] Model:", model);
22
+ console.log("[Anthropic] Response format:", responseFormat);
23
+
24
+ // Enforce JSON mode - reject calls without proper JSON responseFormat
25
+ ensureJsonResponseFormat(responseFormat, "Anthropic");
26
+
27
+ const { systemMsg, userMsg } = extractMessages(messages);
28
+ console.log("[Anthropic] System message length:", systemMsg.length);
29
+ console.log("[Anthropic] User message length:", userMsg.length);
30
+
31
+ // Build system guard for JSON enforcement
32
+ let system = systemMsg;
48
33
 
49
- // Ensure messages alternate and start with user
50
- if (anthropicMessages.length === 0 || anthropicMessages[0].role !== "user") {
51
- anthropicMessages.unshift({ role: "user", content: "Hello" });
34
+ if (responseFormat === "json" || responseFormat?.type === "json_object") {
35
+ system = `${systemMsg}\n\nYou must output strict JSON only with no extra text.`;
52
36
  }
53
37
 
54
38
  let lastError;
@@ -58,54 +42,84 @@ export async function anthropicChat({
58
42
  }
59
43
 
60
44
  try {
61
- const request = {
45
+ console.log(`[Anthropic] Attempt ${attempt + 1}/${maxRetries + 1}`);
46
+
47
+ const requestBody = {
62
48
  model,
63
- messages: anthropicMessages,
64
- max_tokens: maxTokens,
49
+ system,
50
+ messages: [{ role: "user", content: userMsg }],
65
51
  temperature,
66
- top_p: topP,
67
- top_k: topK,
68
- stop_sequences: stopSequences,
52
+ max_tokens: maxTokens,
53
+ ...(topP !== undefined ? { top_p: topP } : {}),
54
+ ...(stop !== undefined ? { stop_sequences: stop } : {}),
69
55
  };
70
56
 
71
- // Add system message if present
72
- if (systemMsg) {
73
- request.system = systemMsg;
74
- }
75
-
76
- const result = await anthropic.messages.create(request);
57
+ console.log("[Anthropic] Calling Anthropic API...");
58
+ const response = await fetch("https://api.anthropic.com/v1/messages", {
59
+ method: "POST",
60
+ headers: {
61
+ "Content-Type": "application/json",
62
+ "x-api-key": process.env.ANTHROPIC_API_KEY,
63
+ "anthropic-version": "2023-06-01",
64
+ },
65
+ body: JSON.stringify(requestBody),
66
+ });
77
67
 
78
- // Extract text content
79
- const content = result.content[0].text;
68
+ if (!response.ok) {
69
+ const error = await response
70
+ .json()
71
+ .catch(() => ({ error: response.statusText }));
72
+ throw { status: response.status, ...error };
73
+ }
80
74
 
81
- // Try to parse JSON if expected
82
- let parsed = null;
83
- if (responseFormat?.type === "json_object" || responseFormat === "json") {
84
- parsed = tryParseJSON(content);
85
- if (!parsed && attempt < maxRetries) {
86
- lastError = new Error("Failed to parse JSON response");
87
- continue;
88
- }
75
+ const data = await response.json();
76
+ console.log("[Anthropic] Response received from Anthropic API");
77
+
78
+ // Extract text from response.content blocks
79
+ const blocks = Array.isArray(data?.content) ? data.content : [];
80
+ const text = blocks
81
+ .filter((b) => b?.type === "text" && typeof b.text === "string")
82
+ .map((b) => b.text)
83
+ .join("");
84
+ console.log("[Anthropic] Response text length:", text.length);
85
+
86
+ // Parse JSON - this is required for all calls
87
+ const parsed = tryParseJSON(text);
88
+ if (!parsed) {
89
+ throw new ProviderJsonParseError(
90
+ "Anthropic",
91
+ model,
92
+ text.substring(0, 200),
93
+ "Failed to parse JSON response from Anthropic API"
94
+ );
89
95
  }
90
96
 
97
+ // Normalize usage (if provided)
98
+ const prompt_tokens = data?.usage?.input_tokens;
99
+ const completion_tokens = data?.usage?.output_tokens;
100
+ const total_tokens = (prompt_tokens ?? 0) + (completion_tokens ?? 0);
101
+ const usage =
102
+ prompt_tokens != null && completion_tokens != null
103
+ ? { prompt_tokens, completion_tokens, total_tokens }
104
+ : undefined;
105
+
106
+ console.log("[Anthropic] Returning response from Anthropic API");
91
107
  return {
92
- content: parsed || content,
93
- text: content,
94
- usage: {
95
- prompt_tokens: result.usage.input_tokens,
96
- completion_tokens: result.usage.output_tokens,
97
- total_tokens: result.usage.input_tokens + result.usage.output_tokens,
98
- cache_read_input_tokens: result.usage.cache_creation_input_tokens,
99
- cache_write_input_tokens: result.usage.cache_write_input_tokens,
100
- },
101
- raw: result,
108
+ content: parsed,
109
+ text,
110
+ ...(usage ? { usage } : {}),
111
+ raw: data,
102
112
  };
103
113
  } catch (error) {
104
114
  lastError = error;
115
+ const msg = error?.error?.message || error?.message || "";
116
+ console.error("[Anthropic] Error occurred:", msg);
117
+ console.error("[Anthropic] Error status:", error?.status);
105
118
 
106
119
  if (error.status === 401) throw error;
107
120
 
108
121
  if (isRetryableError(error) && attempt < maxRetries) {
122
+ console.log("[Anthropic] Retrying due to retryable error");
109
123
  continue;
110
124
  }
111
125
 
@@ -69,3 +69,55 @@ export function tryParseJSON(text) {
69
69
  }
70
70
  }
71
71
  }
72
+
73
+ /**
74
+ * Error thrown when JSON response format is required but not provided
75
+ */
76
+ export class ProviderJsonModeError extends Error {
77
+ constructor(providerName, message) {
78
+ super(message);
79
+ this.name = "ProviderJsonModeError";
80
+ this.provider = providerName;
81
+ }
82
+ }
83
+
84
+ /**
85
+ * Error thrown when JSON parsing fails and should not be retried
86
+ */
87
+ export class ProviderJsonParseError extends Error {
88
+ constructor(provider, model, sample, message = "Failed to parse JSON response") {
89
+ super(message);
90
+ this.name = "ProviderJsonParseError";
91
+ this.provider = provider;
92
+ this.model = model;
93
+ this.sample = sample;
94
+ }
95
+ }
96
+
97
+ /**
98
+ * Ensures that responseFormat is configured for JSON output
99
+ * @param {*} responseFormat - The response format object or string
100
+ * @param {string} providerName - Name of the provider for error reporting
101
+ * @throws {ProviderJsonModeError} When JSON format is not properly configured
102
+ */
103
+ export function ensureJsonResponseFormat(responseFormat, providerName) {
104
+ if (!responseFormat) {
105
+ throw new ProviderJsonModeError(
106
+ providerName,
107
+ `${providerName} requires responseFormat to be set for JSON mode`
108
+ );
109
+ }
110
+
111
+ // Check for valid JSON format types
112
+ const isValidJsonFormat =
113
+ responseFormat === "json" ||
114
+ responseFormat?.type === "json_object" ||
115
+ responseFormat?.type === "json_schema";
116
+
117
+ if (!isValidJsonFormat) {
118
+ throw new ProviderJsonModeError(
119
+ providerName,
120
+ `${providerName} only supports JSON response format. Got: ${JSON.stringify(responseFormat)}`
121
+ );
122
+ }
123
+ }
@@ -3,6 +3,8 @@ import {
3
3
  isRetryableError,
4
4
  sleep,
5
5
  tryParseJSON,
6
+ ensureJsonResponseFormat,
7
+ ProviderJsonParseError,
6
8
  } from "./base.js";
7
9
 
8
10
  export async function deepseekChat({
@@ -10,13 +12,16 @@ export async function deepseekChat({
10
12
  model = "deepseek-chat",
11
13
  temperature = 0.7,
12
14
  maxTokens,
13
- responseFormat = "json",
15
+ responseFormat,
14
16
  topP,
15
17
  frequencyPenalty,
16
18
  presencePenalty,
17
19
  stop,
18
20
  maxRetries = 3,
19
21
  }) {
22
+ // Enforce JSON mode - reject calls without proper JSON responseFormat
23
+ ensureJsonResponseFormat(responseFormat, "DeepSeek");
24
+
20
25
  if (!process.env.DEEPSEEK_API_KEY) {
21
26
  throw new Error("DeepSeek API key not configured");
22
27
  }
@@ -44,7 +49,7 @@ export async function deepseekChat({
44
49
  stop,
45
50
  };
46
51
 
47
- // Add response format if needed
52
+ // Add response format - this is now required for all calls
48
53
  if (responseFormat?.type === "json_object" || responseFormat === "json") {
49
54
  requestBody.response_format = { type: "json_object" };
50
55
  }
@@ -71,23 +76,19 @@ export async function deepseekChat({
71
76
  const data = await response.json();
72
77
  const content = data.choices[0].message.content;
73
78
 
74
- // Only try JSON parsing if responseFormat indicates JSON output
75
- if (responseFormat?.type === "json_object" || responseFormat === "json") {
76
- const parsed = tryParseJSON(content);
77
- if (parsed === null && attempt < maxRetries) {
78
- // JSON parsing failed, retry
79
- lastError = new Error("Failed to parse JSON response");
80
- continue;
81
- }
82
- return {
83
- content: parsed,
84
- usage: data.usage,
85
- raw: data,
86
- };
79
+ // Parse JSON - this is now required for all calls
80
+ const parsed = tryParseJSON(content);
81
+ if (!parsed) {
82
+ throw new ProviderJsonParseError(
83
+ "DeepSeek",
84
+ model,
85
+ content.substring(0, 200),
86
+ "Failed to parse JSON response from DeepSeek API"
87
+ );
87
88
  }
88
89
 
89
90
  return {
90
- content,
91
+ content: parsed,
91
92
  usage: data.usage,
92
93
  raw: data,
93
94
  };
@@ -106,21 +107,3 @@ export async function deepseekChat({
106
107
 
107
108
  throw lastError || new Error(`Failed after ${maxRetries + 1} attempts`);
108
109
  }
109
-
110
- // Keep backward compatibility
111
- export async function queryDeepSeek(
112
- system,
113
- prompt,
114
- model = "deepseek-reasoner"
115
- ) {
116
- const response = await deepseekChat({
117
- messages: [
118
- { role: "system", content: system },
119
- { role: "user", content: prompt },
120
- ],
121
- model,
122
- responseFormat: "json",
123
- });
124
-
125
- return response.content;
126
- }
@@ -0,0 +1,226 @@
1
+ import {
2
+ extractMessages,
3
+ isRetryableError,
4
+ sleep,
5
+ tryParseJSON,
6
+ ensureJsonResponseFormat,
7
+ ProviderJsonParseError,
8
+ } from "./base.js";
9
+
10
+ /**
11
+ * Google Gemini provider implementation
12
+ *
13
+ * @param {Object} options - Provider options
14
+ * @param {Array} options.messages - Message array with system and user roles
15
+ * @param {string} options.model - Model name (default: "gemini-2.5-flash")
16
+ * @param {number} options.temperature - Temperature for sampling (default: 0.7)
17
+ * @param {number} options.maxTokens - Maximum tokens in response
18
+ * @param {string|Object} options.responseFormat - Response format ("json" or schema object)
19
+ * @param {number} options.topP - Top-p sampling parameter
20
+ * @param {string} options.stop - Stop sequence
21
+ * @param {number} options.maxRetries - Maximum retry attempts (default: 3)
22
+ * @returns {Promise<Object>} Provider response with content, text, usage, and raw response
23
+ */
24
+ export async function geminiChat(options) {
25
+ const {
26
+ messages,
27
+ model = "gemini-2.5-flash",
28
+ temperature = 0.7,
29
+ maxTokens,
30
+ responseFormat,
31
+ topP,
32
+ frequencyPenalty,
33
+ presencePenalty,
34
+ stop,
35
+ maxRetries = 3
36
+ } = options;
37
+
38
+ // Validate response format (Gemini only supports JSON mode)
39
+ ensureJsonResponseFormat(responseFormat, "Gemini");
40
+
41
+ // Check API key
42
+ if (!process.env.GEMINI_API_KEY) {
43
+ throw new Error("Gemini API key not configured");
44
+ }
45
+
46
+ // Extract system and user messages
47
+ const { systemMsg, userMsg } = extractMessages(messages);
48
+
49
+ // Build system instruction for JSON enforcement
50
+ let systemInstruction = systemMsg;
51
+ if (responseFormat === "json" || responseFormat?.type === "json_object") {
52
+ systemInstruction = `${systemMsg}\n\nYou must output strict JSON only with no extra text.`;
53
+ }
54
+ if (responseFormat?.json_schema) {
55
+ systemInstruction = `${systemMsg}\n\nYou must output strict JSON only matching this schema (no extra text):\n${JSON.stringify(responseFormat.json_schema)}`;
56
+ }
57
+
58
+ // Prepare request body
59
+ const requestBody = {
60
+ contents: [
61
+ {
62
+ parts: [
63
+ {
64
+ text: userMsg,
65
+ },
66
+ ],
67
+ },
68
+ ],
69
+ generationConfig: {
70
+ temperature,
71
+ maxOutputTokens: maxTokens,
72
+ topP,
73
+ stopSequences: stop ? [stop] : undefined,
74
+ },
75
+ safetySettings: [
76
+ {
77
+ category: "HARM_CATEGORY_HARASSMENT",
78
+ threshold: "BLOCK_NONE",
79
+ },
80
+ {
81
+ category: "HARM_CATEGORY_HATE_SPEECH",
82
+ threshold: "BLOCK_NONE",
83
+ },
84
+ {
85
+ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
86
+ threshold: "BLOCK_NONE",
87
+ },
88
+ {
89
+ category: "HARM_CATEGORY_DANGEROUS_CONTENT",
90
+ threshold: "BLOCK_NONE",
91
+ },
92
+ ],
93
+ };
94
+
95
+ // Add system instruction if provided
96
+ if (systemInstruction.trim()) {
97
+ requestBody.systemInstruction = {
98
+ parts: [
99
+ {
100
+ text: systemInstruction,
101
+ },
102
+ ],
103
+ };
104
+ }
105
+
106
+ // Remove undefined values
107
+ if (topP === undefined) delete requestBody.generationConfig.topP;
108
+ if (stop === undefined) delete requestBody.generationConfig.stopSequences;
109
+
110
+ let lastError;
111
+ const baseUrl =
112
+ process.env.GEMINI_BASE_URL ||
113
+ "https://generativelanguage.googleapis.com/v1beta";
114
+ const url = `${baseUrl}/models/${model}:generateContent?key=${process.env.GEMINI_API_KEY}`;
115
+
116
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
117
+ if (attempt > 0) {
118
+ await sleep(2 ** attempt * 1000); // Exponential backoff
119
+ }
120
+
121
+ try {
122
+ console.log(
123
+ `[Gemini] Starting geminiChat call (attempt ${attempt + 1}/${maxRetries + 1})`
124
+ );
125
+ console.log(`[Gemini] Model: ${model}`);
126
+ console.log(`[Gemini] Response format:`, responseFormat);
127
+ console.log(
128
+ `[Gemini] System instruction length: ${systemInstruction.length}`
129
+ );
130
+ console.log(`[Gemini] User message length: ${userMsg.length}`);
131
+
132
+ const response = await fetch(url, {
133
+ method: "POST",
134
+ headers: {
135
+ "Content-Type": "application/json",
136
+ },
137
+ body: JSON.stringify(requestBody),
138
+ });
139
+
140
+ if (!response.ok) {
141
+ const errorData = await response.json().catch(() => ({}));
142
+ const error = new Error(
143
+ errorData.error?.message || `Gemini API error: ${response.statusText}`
144
+ );
145
+ error.status = response.status;
146
+ error.data = errorData;
147
+
148
+ // Don't retry on authentication errors
149
+ if (response.status === 401) {
150
+ throw error;
151
+ }
152
+
153
+ // Retry on retryable errors
154
+ if (isRetryableError(error) && attempt < maxRetries) {
155
+ console.log(`[Gemini] Retryable error, retrying...`);
156
+ lastError = error;
157
+ continue;
158
+ }
159
+
160
+ throw error;
161
+ }
162
+
163
+ const data = await response.json();
164
+ console.log(
165
+ `[Gemini] Response received, candidates length: ${data.candidates?.length || 0}`
166
+ );
167
+
168
+ // Extract text from response
169
+ const candidate = data.candidates?.[0];
170
+ if (!candidate?.content?.parts?.[0]?.text) {
171
+ throw new Error("No content returned from Gemini API");
172
+ }
173
+
174
+ const text = candidate.content.parts[0].text;
175
+ console.log(`[Gemini] Text length: ${text.length}`);
176
+
177
+ // Parse JSON if required
178
+ const parsed = tryParseJSON(text);
179
+ if (responseFormat && !parsed) {
180
+ throw new ProviderJsonParseError(
181
+ "Gemini",
182
+ model,
183
+ text.substring(0, 200),
184
+ "Failed to parse JSON response from Gemini API"
185
+ );
186
+ }
187
+
188
+ // Normalize usage metrics
189
+ const usage = data.usageMetadata
190
+ ? {
191
+ prompt_tokens: data.usageMetadata.promptTokenCount,
192
+ completion_tokens: data.usageMetadata.candidatesTokenCount,
193
+ total_tokens: data.usageMetadata.totalTokenCount,
194
+ }
195
+ : undefined;
196
+
197
+ console.log(`[Gemini] Usage:`, usage);
198
+
199
+ return {
200
+ content: parsed || text,
201
+ text,
202
+ ...(usage ? { usage } : {}),
203
+ raw: data,
204
+ };
205
+ } catch (error) {
206
+ console.error(`[Gemini] Error occurred: ${error.message}`);
207
+ console.error(`[Gemini] Error status: ${error.status}`);
208
+
209
+ lastError = error;
210
+
211
+ // Don't retry on authentication errors
212
+ if (error.status === 401) {
213
+ throw error;
214
+ }
215
+
216
+ // Continue retrying for other errors
217
+ if (attempt < maxRetries) {
218
+ continue;
219
+ }
220
+
221
+ throw lastError;
222
+ }
223
+ }
224
+
225
+ throw lastError;
226
+ }