@ryanfw/prompt-orchestration-pipeline 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/README.md +1 -2
  2. package/package.json +1 -2
  3. package/src/api/validators/json.js +39 -0
  4. package/src/components/DAGGrid.jsx +392 -303
  5. package/src/components/JobCard.jsx +13 -11
  6. package/src/components/JobDetail.jsx +41 -71
  7. package/src/components/JobTable.jsx +32 -22
  8. package/src/components/Layout.jsx +0 -21
  9. package/src/components/LiveText.jsx +47 -0
  10. package/src/components/TaskDetailSidebar.jsx +216 -0
  11. package/src/components/TimerText.jsx +82 -0
  12. package/src/components/ui/RestartJobModal.jsx +140 -0
  13. package/src/components/ui/toast.jsx +138 -0
  14. package/src/config/models.js +322 -0
  15. package/src/config/statuses.js +119 -0
  16. package/src/core/config.js +2 -164
  17. package/src/core/file-io.js +1 -1
  18. package/src/core/module-loader.js +54 -40
  19. package/src/core/pipeline-runner.js +52 -20
  20. package/src/core/status-writer.js +147 -3
  21. package/src/core/symlink-bridge.js +57 -0
  22. package/src/core/symlink-utils.js +94 -0
  23. package/src/core/task-runner.js +267 -443
  24. package/src/llm/index.js +167 -52
  25. package/src/pages/Code.jsx +57 -3
  26. package/src/pages/PipelineDetail.jsx +92 -22
  27. package/src/pages/PromptPipelineDashboard.jsx +15 -36
  28. package/src/providers/anthropic.js +83 -69
  29. package/src/providers/base.js +52 -0
  30. package/src/providers/deepseek.js +17 -34
  31. package/src/providers/gemini.js +226 -0
  32. package/src/providers/openai.js +36 -106
  33. package/src/providers/zhipu.js +136 -0
  34. package/src/ui/client/adapters/job-adapter.js +16 -26
  35. package/src/ui/client/api.js +134 -0
  36. package/src/ui/client/hooks/useJobDetailWithUpdates.js +65 -178
  37. package/src/ui/client/index.css +9 -0
  38. package/src/ui/client/index.html +1 -0
  39. package/src/ui/client/main.jsx +18 -15
  40. package/src/ui/client/time-store.js +161 -0
  41. package/src/ui/config-bridge.js +15 -24
  42. package/src/ui/config-bridge.node.js +15 -24
  43. package/src/ui/dist/assets/{index-WgJUlSmE.js → index-DqkbzXZ1.js} +1408 -771
  44. package/src/ui/dist/assets/style-DBF9NQGk.css +62 -0
  45. package/src/ui/dist/index.html +3 -2
  46. package/src/ui/public/favicon.svg +12 -0
  47. package/src/ui/server.js +231 -33
  48. package/src/ui/transformers/status-transformer.js +18 -31
  49. package/src/utils/dag.js +8 -4
  50. package/src/utils/duration.js +13 -19
  51. package/src/utils/formatters.js +27 -0
  52. package/src/utils/geometry-equality.js +83 -0
  53. package/src/utils/pipelines.js +5 -1
  54. package/src/utils/time-utils.js +40 -0
  55. package/src/utils/token-cost-calculator.js +4 -7
  56. package/src/utils/ui.jsx +14 -16
  57. package/src/components/ui/select.jsx +0 -27
  58. package/src/lib/utils.js +0 -6
  59. package/src/ui/client/hooks/useTicker.js +0 -26
  60. package/src/ui/config-bridge.browser.js +0 -149
  61. package/src/ui/dist/assets/style-x0V-5m8e.css +0 -62
package/src/llm/index.js CHANGED
@@ -1,7 +1,15 @@
1
1
  import { openaiChat } from "../providers/openai.js";
2
2
  import { deepseekChat } from "../providers/deepseek.js";
3
+ import { anthropicChat } from "../providers/anthropic.js";
4
+ import { geminiChat } from "../providers/gemini.js";
5
+ import { zhipuChat } from "../providers/zhipu.js";
3
6
  import { EventEmitter } from "node:events";
4
7
  import { getConfig } from "../core/config.js";
8
+ import {
9
+ MODEL_CONFIG,
10
+ DEFAULT_MODEL_BY_PROVIDER,
11
+ aliasToFunctionName,
12
+ } from "../config/models.js";
5
13
  import fs from "node:fs";
6
14
 
7
15
  // Global mock provider instance (for demo/testing)
@@ -44,6 +52,8 @@ export function getAvailableProviders() {
44
52
  openai: !!process.env.OPENAI_API_KEY,
45
53
  deepseek: !!process.env.DEEPSEEK_API_KEY,
46
54
  anthropic: !!process.env.ANTHROPIC_API_KEY,
55
+ gemini: !!process.env.GEMINI_API_KEY,
56
+ zhipu: !!process.env.ZHIPU_API_KEY,
47
57
  mock: !!mockProviderInstance,
48
58
  };
49
59
  }
@@ -53,36 +63,28 @@ export function estimateTokens(text) {
53
63
  return Math.ceil((text || "").length / 4);
54
64
  }
55
65
 
56
- // Calculate cost based on provider and model
66
+ // Calculate cost based on provider and model, derived from config
57
67
  export function calculateCost(provider, model, usage) {
58
- const pricing = {
59
- mock: {
60
- "gpt-3.5-turbo": { prompt: 0.0005, completion: 0.0015 },
61
- "gpt-4": { prompt: 0.03, completion: 0.06 },
62
- "gpt-4-turbo": { prompt: 0.01, completion: 0.03 },
63
- },
64
- openai: {
65
- "gpt-5-chat-latest": { prompt: 0.015, completion: 0.06 },
66
- "gpt-4": { prompt: 0.03, completion: 0.06 },
67
- "gpt-4-turbo": { prompt: 0.01, completion: 0.03 },
68
- "gpt-3.5-turbo": { prompt: 0.0005, completion: 0.0015 },
69
- },
70
- deepseek: {
71
- "deepseek-reasoner": { prompt: 0.001, completion: 0.002 },
72
- "deepseek-chat": { prompt: 0.0005, completion: 0.001 },
73
- },
74
- anthropic: {
75
- "claude-3-opus": { prompt: 0.015, completion: 0.075 },
76
- "claude-3-sonnet": { prompt: 0.003, completion: 0.015 },
77
- },
78
- };
68
+ if (!usage) {
69
+ // Fallback for missing usage
70
+ return 0;
71
+ }
72
+
73
+ const modelConfig = Object.values(MODEL_CONFIG).find(
74
+ (cfg) => cfg.provider === provider && cfg.model === model
75
+ );
76
+
77
+ if (!modelConfig) {
78
+ return 0;
79
+ }
79
80
 
80
- const modelPricing = pricing[provider]?.[model];
81
- if (!modelPricing || !usage) return 0;
81
+ // Convert per-million pricing to per-1k for calculation
82
+ const promptCostPer1k = modelConfig.tokenCostInPerMillion / 1000;
83
+ const completionCostPer1k = modelConfig.tokenCostOutPerMillion / 1000;
82
84
 
83
- const promptCost = ((usage.promptTokens || 0) / 1000) * modelPricing.prompt;
85
+ const promptCost = ((usage.promptTokens || 0) / 1000) * promptCostPer1k;
84
86
  const completionCost =
85
- ((usage.completionTokens || 0) / 1000) * modelPricing.completion;
87
+ ((usage.completionTokens || 0) / 1000) * completionCostPer1k;
86
88
 
87
89
  return promptCost + completionCost;
88
90
  }
@@ -116,12 +118,15 @@ export async function chat(options) {
116
118
  const startTime = Date.now();
117
119
  const requestId = `req_${Date.now()}_${Math.random().toString(36).substring(7)}`;
118
120
 
121
+ // Default to JSON mode if not specified
122
+ const finalResponseFormat = responseFormat ?? "json";
123
+
119
124
  // Extract system and user messages
120
125
  const systemMsg = messages.find((m) => m.role === "system")?.content || "";
121
126
  const userMessages = messages.filter((m) => m.role === "user");
122
127
  const userMsg = userMessages.map((m) => m.content).join("\n");
123
128
 
124
- // DEBUG write the messages to /tmp/messages.log for debugging
129
+ // DEBUG write_to_file messages to /tmp/messages.log for debugging
125
130
  fs.writeFileSync(
126
131
  "/tmp/messages.log",
127
132
  JSON.stringify({ messages, systemMsg, userMsg, provider, model }, null, 2)
@@ -173,8 +178,7 @@ export async function chat(options) {
173
178
  maxTokens,
174
179
  ...rest,
175
180
  };
176
- if (responseFormat !== undefined)
177
- openaiArgs.responseFormat = responseFormat;
181
+ openaiArgs.responseFormat = finalResponseFormat;
178
182
  if (topP !== undefined) openaiArgs.topP = topP;
179
183
  if (frequencyPenalty !== undefined)
180
184
  openaiArgs.frequencyPenalty = frequencyPenalty;
@@ -222,8 +226,7 @@ export async function chat(options) {
222
226
  if (presencePenalty !== undefined)
223
227
  deepseekArgs.presencePenalty = presencePenalty;
224
228
  if (stop !== undefined) deepseekArgs.stop = stop;
225
- if (responseFormat !== undefined)
226
- deepseekArgs.responseFormat = responseFormat;
229
+ deepseekArgs.responseFormat = finalResponseFormat;
227
230
 
228
231
  const result = await deepseekChat(deepseekArgs);
229
232
 
@@ -250,6 +253,128 @@ export async function chat(options) {
250
253
  totalTokens: promptTokens + completionTokens,
251
254
  };
252
255
  }
256
+ } else if (provider === "anthropic") {
257
+ const defaultAlias = DEFAULT_MODEL_BY_PROVIDER.anthropic;
258
+ const defaultModelConfig = MODEL_CONFIG[defaultAlias];
259
+ const defaultModel = defaultModelConfig?.model;
260
+
261
+ const anthropicArgs = {
262
+ messages,
263
+ model: model || defaultModel,
264
+ temperature,
265
+ maxTokens,
266
+ ...rest,
267
+ };
268
+ if (topP !== undefined) anthropicArgs.topP = topP;
269
+ if (stop !== undefined) anthropicArgs.stop = stop;
270
+ anthropicArgs.responseFormat = finalResponseFormat;
271
+
272
+ const result = await anthropicChat(anthropicArgs);
273
+
274
+ response = {
275
+ content: result.content,
276
+ raw: result.raw,
277
+ };
278
+
279
+ // Use actual usage from anthropic API if available; otherwise estimate
280
+ if (result?.usage) {
281
+ const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
282
+ usage = {
283
+ promptTokens: prompt_tokens,
284
+ completionTokens: completion_tokens,
285
+ totalTokens: total_tokens,
286
+ };
287
+ } else {
288
+ const promptTokens = estimateTokens(systemMsg + userMsg);
289
+ const completionTokens = estimateTokens(
290
+ typeof result === "string" ? result : JSON.stringify(result)
291
+ );
292
+ usage = {
293
+ promptTokens,
294
+ completionTokens,
295
+ totalTokens: promptTokens + completionTokens,
296
+ };
297
+ }
298
+ } else if (provider === "gemini") {
299
+ const geminiArgs = {
300
+ messages,
301
+ model: model || "gemini-2.5-flash",
302
+ temperature,
303
+ maxTokens,
304
+ ...rest,
305
+ };
306
+ if (topP !== undefined) geminiArgs.topP = topP;
307
+ if (stop !== undefined) geminiArgs.stop = stop;
308
+ geminiArgs.responseFormat = finalResponseFormat;
309
+
310
+ const result = await geminiChat(geminiArgs);
311
+
312
+ response = {
313
+ content: result.content,
314
+ raw: result.raw,
315
+ };
316
+
317
+ // Use actual usage from gemini API if available; otherwise estimate
318
+ if (result?.usage) {
319
+ const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
320
+ usage = {
321
+ promptTokens: prompt_tokens,
322
+ completionTokens: completion_tokens,
323
+ totalTokens: total_tokens,
324
+ };
325
+ } else {
326
+ const promptTokens = estimateTokens(systemMsg + userMsg);
327
+ const completionTokens = estimateTokens(
328
+ typeof result === "string" ? result : JSON.stringify(result)
329
+ );
330
+ usage = {
331
+ promptTokens,
332
+ completionTokens,
333
+ totalTokens: promptTokens + completionTokens,
334
+ };
335
+ }
336
+ } else if (provider === "zhipu") {
337
+ const defaultAlias = DEFAULT_MODEL_BY_PROVIDER.zhipu;
338
+ const defaultModelConfig = MODEL_CONFIG[defaultAlias];
339
+ const defaultModel = defaultModelConfig?.model;
340
+
341
+ const zhipuArgs = {
342
+ messages,
343
+ model: model || defaultModel,
344
+ temperature,
345
+ maxTokens,
346
+ ...rest,
347
+ };
348
+ if (topP !== undefined) zhipuArgs.topP = topP;
349
+ if (stop !== undefined) zhipuArgs.stop = stop;
350
+ zhipuArgs.responseFormat = finalResponseFormat;
351
+
352
+ const result = await zhipuChat(zhipuArgs);
353
+
354
+ response = {
355
+ content: result.content,
356
+ raw: result.raw,
357
+ };
358
+
359
+ // Use actual usage from zhipu API if available; otherwise estimate
360
+ if (result?.usage) {
361
+ const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
362
+ usage = {
363
+ promptTokens: prompt_tokens,
364
+ completionTokens: completion_tokens,
365
+ totalTokens: total_tokens,
366
+ };
367
+ } else {
368
+ const promptTokens = estimateTokens(systemMsg + userMsg);
369
+ const completionTokens = estimateTokens(
370
+ typeof result === "string" ? result : JSON.stringify(result)
371
+ );
372
+ usage = {
373
+ promptTokens,
374
+ completionTokens,
375
+ totalTokens: promptTokens + completionTokens,
376
+ };
377
+ }
253
378
  } else {
254
379
  throw new Error(`Provider ${provider} not yet implemented`);
255
380
  }
@@ -292,19 +417,6 @@ export async function chat(options) {
292
417
  }
293
418
  }
294
419
 
295
- // Helper to convert model alias to camelCase function name
296
- function toCamelCase(alias) {
297
- const [provider, ...modelParts] = alias.split(":");
298
- const model = modelParts.join("-");
299
-
300
- // Convert to camelCase (handle both letters and numbers after hyphens)
301
- const camelModel = model.replace(/-([a-z0-9])/g, (match, char) =>
302
- char.toUpperCase()
303
- );
304
-
305
- return camelModel;
306
- }
307
-
308
420
  // Build provider-grouped functions from registry
309
421
  function buildProviderFunctions(models) {
310
422
  const functions = {};
@@ -324,7 +436,7 @@ function buildProviderFunctions(models) {
324
436
  functions[provider] = {};
325
437
 
326
438
  for (const [alias, modelConfig] of Object.entries(providerModels)) {
327
- const functionName = toCamelCase(alias);
439
+ const functionName = aliasToFunctionName(alias);
328
440
 
329
441
  functions[provider][functionName] = (options = {}) => {
330
442
  // Respect provider overrides in options (last-write-wins)
@@ -460,25 +572,28 @@ export async function parallel(workerFn, items, concurrency = 5) {
460
572
 
461
573
  // Create a bound LLM interface - for named-models tests, only return provider functions
462
574
  export function createLLM() {
463
- const config = getConfig();
464
-
465
- // Build functions from registry
466
- const providerFunctions = buildProviderFunctions(config.llm.models);
575
+ // Build functions from centralized registry
576
+ const providerFunctions = buildProviderFunctions(MODEL_CONFIG);
467
577
 
468
578
  return providerFunctions;
469
579
  }
470
580
 
581
+ // Create named models API (explicit function for clarity)
582
+ export function createNamedModelsAPI() {
583
+ return buildProviderFunctions(MODEL_CONFIG);
584
+ }
585
+
471
586
  // Separate function for high-level LLM interface (used by llm.test.js)
472
587
  export function createHighLevelLLM(options = {}) {
473
588
  // Skip config check in tests to avoid PO_ROOT requirement
474
589
  const isTest =
475
590
  process.env.NODE_ENV === "test" || process.env.VITEST === "true";
476
- const config = isTest ? { llm: { models: {} } } : getConfig();
591
+ const config = isTest ? { llm: { defaultProvider: "openai" } } : getConfig();
477
592
  const defaultProvider =
478
593
  options.defaultProvider || (isTest ? "openai" : config.llm.defaultProvider);
479
594
 
480
- // Build functions from registry
481
- const providerFunctions = buildProviderFunctions(config.llm.models);
595
+ // Build functions from centralized registry
596
+ const providerFunctions = buildProviderFunctions(MODEL_CONFIG);
482
597
 
483
598
  return {
484
599
  // High-level interface methods
@@ -203,8 +203,6 @@ export default function CodePage() {
203
203
  topP?: number,
204
204
  frequencyPenalty?: number,
205
205
  presencePenalty?: number,
206
- tools?: Array<{type: "function", function: object}>,
207
- toolChoice?: "auto" | "required" | { type: "function", function: { name: string } },
208
206
  seed?: number,
209
207
  provider?: string,
210
208
  model?: string,
@@ -245,6 +243,62 @@ export default function CodePage() {
245
243
  </Box>
246
244
  )}
247
245
 
246
+ <Heading size="6" mt="8" mb="4">
247
+ Validation API
248
+ </Heading>
249
+ <Text as="p" mb="3" size="2">
250
+ Schema validation helper available to task stages via validators.
251
+ </Text>
252
+ <Box mb="4">
253
+ <Heading size="4" mb="2">
254
+ Function Signature
255
+ </Heading>
256
+ <Code size="3">
257
+ {`validateWithSchema(schema: object, data: object | string): { valid: true } | { valid: false, errors: AjvError[] }`}
258
+ </Code>
259
+ <Heading size="4" mb="2" mt="4">
260
+ Behavior
261
+ </Heading>
262
+ <ul className="list-disc list-inside mb-4 space-y-1">
263
+ <li className="text-sm text-gray-700">
264
+ <Text as="span">
265
+ Parses string data to JSON; on parse failure returns{" "}
266
+ {`{ valid:false, errors:[{ keyword:"type", message:"must be a valid JSON object (string parsing failed)"} ]`}
267
+ </Text>
268
+ </li>
269
+ <li className="text-sm text-gray-700">
270
+ <Text as="span">
271
+ Uses Ajv({`{ allErrors: true, strict: false }`}); compiles
272
+ provided schema
273
+ </Text>
274
+ </li>
275
+ <li className="text-sm text-gray-700">
276
+ <Text as="span">Returns AJV errors array when invalid</Text>
277
+ </li>
278
+ </ul>
279
+ <Heading size="4" mb="2">
280
+ Source
281
+ </Heading>
282
+ <Code size="3">src/api/validators/json.js</Code>
283
+ <Heading size="4" mb="2" mt="4">
284
+ Usage Example
285
+ </Heading>
286
+ <Code size="3">{`export const validateStructure = async ({
287
+ io,
288
+ flags,
289
+ validators: { validateWithSchema },
290
+ }) => {
291
+ const content = await io.readArtifact("research-output.json");
292
+ const result = validateWithSchema(researchJsonSchema, content);
293
+
294
+ if (!result.valid) {
295
+ console.warn("[Research:validateStructure] Validation failed", result.errors);
296
+ return { output: {}, flags: { ...flags, validationFailed: true } };
297
+ }
298
+ return { output: {}, flags };
299
+ };`}</Code>
300
+ </Box>
301
+
248
302
  <Heading size="6" mt="8" mb="4">
249
303
  Environment Configuration
250
304
  </Heading>
@@ -284,7 +338,7 @@ export default function CodePage() {
284
338
  </Table.Row>
285
339
  <Table.Row>
286
340
  <Table.RowHeaderCell>
287
- <Code size="3">Z_API_KEY=</Code>
341
+ <Code size="3">ZHIPU_API_KEY=</Code>
288
342
  </Table.RowHeaderCell>
289
343
  </Table.Row>
290
344
  </Table.Body>
@@ -1,11 +1,13 @@
1
1
  import React from "react";
2
- import { useParams } from "react-router-dom";
2
+ import { data, useParams } from "react-router-dom";
3
3
  import { Box, Flex, Text } from "@radix-ui/themes";
4
+ import * as Tooltip from "@radix-ui/react-tooltip";
4
5
  import JobDetail from "../components/JobDetail.jsx";
5
6
  import { useJobDetailWithUpdates } from "../ui/client/hooks/useJobDetailWithUpdates.js";
6
7
  import Layout from "../components/Layout.jsx";
7
8
  import PageSubheader from "../components/PageSubheader.jsx";
8
9
  import { statusBadge } from "../utils/ui.jsx";
10
+ import { formatCurrency4, formatTokensCompact } from "../utils/formatters.js";
9
11
 
10
12
  export default function PipelineDetail() {
11
13
  const { jobId } = useParams();
@@ -15,11 +17,7 @@ export default function PipelineDetail() {
15
17
  return (
16
18
  <Layout
17
19
  pageTitle="Pipeline Details"
18
- breadcrumbs={[
19
- { label: "Home", href: "/" },
20
- { label: "Pipeline Details" },
21
- ]}
22
- showBackButton={true}
20
+ breadcrumbs={[{ label: "Home", href: "/" }]}
23
21
  >
24
22
  <Flex align="center" justify="center" className="min-h-64">
25
23
  <Box className="text-center">
@@ -32,17 +30,30 @@ export default function PipelineDetail() {
32
30
  );
33
31
  }
34
32
 
35
- const { data: job, loading, error } = useJobDetailWithUpdates(jobId);
33
+ const {
34
+ data: job,
35
+ loading,
36
+ error,
37
+ isRefreshing,
38
+ isHydrated,
39
+ } = useJobDetailWithUpdates(jobId);
36
40
 
37
- if (loading) {
41
+ // Only show loading screen on initial load, not during refresh
42
+ const showLoadingScreen = loading && !isHydrated;
43
+
44
+ if (showLoadingScreen) {
38
45
  return (
39
46
  <Layout
40
47
  pageTitle="Pipeline Details"
41
48
  breadcrumbs={[
42
49
  { label: "Home", href: "/" },
43
- { label: "Pipeline Details" },
50
+ {
51
+ label:
52
+ job && job?.pipelineLabel
53
+ ? job.pipelineLabel
54
+ : "Pipeline Details",
55
+ },
44
56
  ]}
45
- showBackButton={true}
46
57
  >
47
58
  <Flex align="center" justify="center" className="min-h-64">
48
59
  <Box className="text-center">
@@ -61,9 +72,13 @@ export default function PipelineDetail() {
61
72
  pageTitle="Pipeline Details"
62
73
  breadcrumbs={[
63
74
  { label: "Home", href: "/" },
64
- { label: "Pipeline Details" },
75
+ {
76
+ label:
77
+ job && job?.pipelineLabel
78
+ ? job.pipelineLabel
79
+ : "Pipeline Details",
80
+ },
65
81
  ]}
66
- showBackButton={true}
67
82
  >
68
83
  <Flex align="center" justify="center" className="min-h-64">
69
84
  <Box className="text-center">
@@ -80,14 +95,14 @@ export default function PipelineDetail() {
80
95
  }
81
96
 
82
97
  if (!job) {
98
+ const pipelineDisplay = "Pipeline Details";
83
99
  return (
84
100
  <Layout
85
101
  pageTitle="Pipeline Details"
86
102
  breadcrumbs={[
87
103
  { label: "Home", href: "/" },
88
- { label: "Pipeline Details" },
104
+ { label: job.pipelineLabel || "Pipeline Details" },
89
105
  ]}
90
- showBackButton={true}
91
106
  >
92
107
  <Flex align="center" justify="center" className="min-h-64">
93
108
  <Box className="text-center">
@@ -119,30 +134,85 @@ export default function PipelineDetail() {
119
134
  })();
120
135
 
121
136
  const pageTitle = job.name || "Pipeline Details";
137
+
122
138
  const breadcrumbs = [
123
139
  { label: "Home", href: "/" },
124
- { label: "Pipeline Details" },
140
+ {
141
+ label: job && job?.pipelineLabel ? job.pipelineLabel : "Pipeline Details",
142
+ },
125
143
  ...(job.name ? [{ label: job.name }] : []),
126
144
  ];
127
145
 
128
- // Right side content for PageSubheader: job ID and status badge
146
+ // Derive cost data from job object with safe fallbacks
147
+ const totalCost = job?.totalCost || job?.costs?.summary?.totalCost || 0;
148
+ const totalTokens = job?.totalTokens || job?.costs?.summary?.totalTokens || 0;
149
+ const totalInputTokens = job?.costs?.summary?.totalInputTokens || 0;
150
+ const totalOutputTokens = job?.costs?.summary?.totalOutputTokens || 0;
151
+
152
+ // Create cost indicator with tooltip when token data is available
153
+ const costIndicator = (
154
+ <Text size="2" color="gray">
155
+ Cost: {totalCost > 0 ? formatCurrency4(totalCost) : "—"}
156
+ </Text>
157
+ );
158
+
159
+ const costIndicatorWithTooltip =
160
+ totalCost > 0 && totalTokens > 0 ? (
161
+ <Tooltip.Provider delayDuration={100}>
162
+ <Tooltip.Root>
163
+ <Tooltip.Trigger asChild>
164
+ <Box
165
+ className="cursor-help border-b border-dotted border-gray-400 hover:border-gray-600 transition-colors"
166
+ aria-label={`Total cost: ${formatCurrency4(totalCost)}, ${formatTokensCompact(totalTokens)}`}
167
+ >
168
+ {costIndicator}
169
+ </Box>
170
+ </Tooltip.Trigger>
171
+ <Tooltip.Portal>
172
+ <Tooltip.Content
173
+ className="bg-gray-900 text-white px-2 py-1 rounded text-xs max-w-xs"
174
+ sideOffset={5}
175
+ >
176
+ <div className="space-y-1">
177
+ <div className="font-semibold">
178
+ {formatTokensCompact(totalTokens)}
179
+ </div>
180
+ {totalInputTokens > 0 && totalOutputTokens > 0 && (
181
+ <div className="text-gray-300">
182
+ Input: {formatTokensCompact(totalInputTokens)} • Output:{" "}
183
+ {formatTokensCompact(totalOutputTokens)}
184
+ </div>
185
+ )}
186
+ </div>
187
+ <Tooltip.Arrow className="fill-gray-900" />
188
+ </Tooltip.Content>
189
+ </Tooltip.Portal>
190
+ </Tooltip.Root>
191
+ </Tooltip.Provider>
192
+ ) : (
193
+ costIndicator
194
+ );
195
+
196
+ // Right side content for PageSubheader: job ID, cost indicator, and status badge
129
197
  const subheaderRightContent = (
130
- <Flex align="center" gap="3" className="shrink-0">
198
+ <Flex align="center" gap="3" className="shrink-0 flex-wrap">
131
199
  <Text size="2" color="gray">
132
200
  ID: {job.id || jobId}
133
201
  </Text>
202
+ {costIndicatorWithTooltip}
134
203
  {statusBadge(job.status)}
135
204
  </Flex>
136
205
  );
137
206
 
138
207
  return (
139
- <Layout
140
- pageTitle={pageTitle}
141
- breadcrumbs={breadcrumbs}
142
- showBackButton={true}
143
- >
208
+ <Layout pageTitle={pageTitle} breadcrumbs={breadcrumbs}>
144
209
  <PageSubheader breadcrumbs={breadcrumbs} maxWidth="max-w-7xl">
145
210
  {subheaderRightContent}
211
+ {isRefreshing && (
212
+ <Text size="2" color="blue" className="ml-3 animate-pulse">
213
+ Refreshing...
214
+ </Text>
215
+ )}
146
216
  </PageSubheader>
147
217
  <JobDetail job={job} pipeline={pipeline} />
148
218
  </Layout>
@@ -1,14 +1,13 @@
1
1
  // PromptPipelineDashboard.jsx
2
- import React, { useEffect, useMemo, useState } from "react";
2
+ import React, { useMemo, useState } from "react";
3
3
  import { useNavigate } from "react-router-dom";
4
4
 
5
- import { Box, Flex, Text, Heading, Tabs } from "@radix-ui/themes";
5
+ import { Box, Flex, Text, Tabs } from "@radix-ui/themes";
6
6
 
7
7
  import { Progress } from "../components/ui/progress";
8
8
  import { useJobListWithUpdates } from "../ui/client/hooks/useJobListWithUpdates";
9
9
  import { adaptJobSummary } from "../ui/client/adapters/job-adapter";
10
- import { jobCumulativeDurationMs } from "../utils/duration";
11
- import { useTicker } from "../ui/client/hooks/useTicker";
10
+ import { TaskState, JobStatus } from "../config/statuses.js";
12
11
 
13
12
  // Referenced components — leave these alone
14
13
  import JobTable from "../components/JobTable";
@@ -55,40 +54,39 @@ export default function PromptPipelineDashboard({ isConnected }) {
55
54
  }, [apiJobs, error]);
56
55
  const [activeTab, setActiveTab] = useState("current");
57
56
 
58
- // Shared ticker for live duration updates
59
- const now = useTicker(10000);
57
+ // Shared ticker for live duration updates - removed useTicker
60
58
 
61
59
  const errorCount = useMemo(
62
- () => jobs.filter((j) => j.status === "failed").length,
60
+ () => jobs.filter((j) => j.status === TaskState.FAILED).length,
63
61
  [jobs]
64
62
  );
65
63
  const currentCount = useMemo(
66
- () => jobs.filter((j) => j.status === "running").length,
64
+ () => jobs.filter((j) => j.status === TaskState.RUNNING).length,
67
65
  [jobs]
68
66
  );
69
67
  const completedCount = useMemo(
70
- () => jobs.filter((j) => j.status === "complete").length,
68
+ () => jobs.filter((j) => j.status === JobStatus.COMPLETE).length,
71
69
  [jobs]
72
70
  );
73
71
 
74
72
  const filteredJobs = useMemo(() => {
75
73
  switch (activeTab) {
76
74
  case "current":
77
- return jobs.filter((j) => j.status === "running");
75
+ return jobs.filter((j) => j.status === TaskState.RUNNING);
78
76
  case "errors":
79
- return jobs.filter((j) => j.status === "failed");
77
+ return jobs.filter((j) => j.status === TaskState.FAILED);
80
78
  case "complete":
81
- return jobs.filter((j) => j.status === "complete");
79
+ return jobs.filter((j) => j.status === JobStatus.COMPLETE);
82
80
  default:
83
81
  return [];
84
82
  }
85
83
  }, [jobs, activeTab]);
86
84
 
87
- const overallElapsed = (job) => jobCumulativeDurationMs(job, now);
85
+ // overallElapsed function removed - JobTable now uses LiveText for duration calculations
88
86
 
89
87
  // Aggregate progress for currently running jobs (for a subtle top progress bar)
90
88
  const runningJobs = useMemo(
91
- () => jobs.filter((j) => j.status === "running"),
89
+ () => jobs.filter((j) => j.status === TaskState.RUNNING),
92
90
  [jobs]
93
91
  );
94
92
  const aggregateProgress = useMemo(() => {
@@ -142,33 +140,14 @@ export default function PromptPipelineDashboard({ isConnected }) {
142
140
  Completed ({completedCount})
143
141
  </Tabs.Trigger>
144
142
  </Tabs.List>
145
-
146
143
  <Tabs.Content value="current">
147
- <JobTable
148
- jobs={filteredJobs}
149
- pipeline={null}
150
- onOpenJob={openJob}
151
- overallElapsed={overallElapsed}
152
- now={now}
153
- />
144
+ <JobTable jobs={filteredJobs} pipeline={null} onOpenJob={openJob} />
154
145
  </Tabs.Content>
155
146
  <Tabs.Content value="errors">
156
- <JobTable
157
- jobs={filteredJobs}
158
- pipeline={null}
159
- onOpenJob={openJob}
160
- overallElapsed={overallElapsed}
161
- now={now}
162
- />
147
+ <JobTable jobs={filteredJobs} pipeline={null} onOpenJob={openJob} />
163
148
  </Tabs.Content>
164
149
  <Tabs.Content value="complete">
165
- <JobTable
166
- jobs={filteredJobs}
167
- pipeline={null}
168
- onOpenJob={openJob}
169
- overallElapsed={overallElapsed}
170
- now={now}
171
- />
150
+ <JobTable jobs={filteredJobs} pipeline={null} onOpenJob={openJob} />
172
151
  </Tabs.Content>
173
152
  </Tabs.Root>
174
153
  </Layout>