@ryanfw/prompt-orchestration-pipeline 0.4.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/package.json +1 -1
  2. package/src/components/JobCard.jsx +1 -1
  3. package/src/components/JobDetail.jsx +45 -12
  4. package/src/components/JobTable.jsx +40 -1
  5. package/src/components/Layout.jsx +146 -22
  6. package/src/components/PageSubheader.jsx +75 -0
  7. package/src/components/UploadSeed.jsx +0 -70
  8. package/src/components/ui/Logo.jsx +16 -0
  9. package/src/core/config.js +145 -13
  10. package/src/core/file-io.js +12 -27
  11. package/src/core/orchestrator.js +92 -78
  12. package/src/core/pipeline-runner.js +13 -6
  13. package/src/core/status-writer.js +63 -52
  14. package/src/core/task-runner.js +61 -1
  15. package/src/llm/index.js +97 -40
  16. package/src/pages/Code.jsx +297 -0
  17. package/src/pages/PipelineDetail.jsx +47 -8
  18. package/src/pages/PromptPipelineDashboard.jsx +6 -53
  19. package/src/providers/deepseek.js +17 -1
  20. package/src/providers/openai.js +1 -1
  21. package/src/ui/client/adapters/job-adapter.js +26 -2
  22. package/src/ui/client/hooks/useJobDetailWithUpdates.js +0 -1
  23. package/src/ui/client/index.css +6 -0
  24. package/src/ui/client/index.html +1 -1
  25. package/src/ui/client/main.jsx +2 -0
  26. package/src/ui/dist/assets/{index-CxcrauYR.js → index-WgJUlSmE.js} +716 -307
  27. package/src/ui/dist/assets/style-x0V-5m8e.css +62 -0
  28. package/src/ui/dist/index.html +3 -3
  29. package/src/ui/job-reader.js +0 -108
  30. package/src/ui/server.js +54 -0
  31. package/src/ui/sse-enhancer.js +0 -1
  32. package/src/ui/transformers/list-transformer.js +32 -12
  33. package/src/ui/transformers/status-transformer.js +11 -11
  34. package/src/utils/token-cost-calculator.js +297 -0
  35. package/src/utils/ui.jsx +4 -4
  36. package/src/ui/dist/assets/style-D6K_oQ12.css +0 -62
@@ -4,11 +4,28 @@ import fs from "fs";
4
4
  import { createLLM, getLLMEvents } from "../llm/index.js";
5
5
  import { loadFreshModule } from "./module-loader.js";
6
6
  import { loadEnvironment } from "./environment.js";
7
- import { getConfig } from "./config.js";
8
7
  import { createTaskFileIO } from "./file-io.js";
9
8
  import { writeJobStatus } from "./status-writer.js";
10
9
  import { computeDeterministicProgress } from "./progress.js";
11
10
 
11
+ /**
12
+ * Derives model key and token counts from LLM metric event.
13
+ * Returns a tuple: [modelKey, inputTokens, outputTokens].
14
+ *
15
+ * @param {Object} metric - The LLM metric event from llm:request:complete
16
+ * @returns {Array<string, number, number>} [modelKey, inputTokens, outputTokens]
17
+ */
18
+ export function deriveModelKeyAndTokens(metric) {
19
+ const provider = metric?.provider || "undefined";
20
+ const model = metric?.model || "undefined";
21
+ const modelKey = metric?.metadata?.alias || `${provider}:${model}`;
22
+ const input = Number.isFinite(metric?.promptTokens) ? metric.promptTokens : 0;
23
+ const output = Number.isFinite(metric?.completionTokens)
24
+ ? metric.completionTokens
25
+ : 0;
26
+ return [modelKey, input, output];
27
+ }
28
+
12
29
  /**
13
30
  * Validates that a value is a plain object (not array, null, or class instance).
14
31
  * @param {*} value - The value to check
@@ -365,12 +382,45 @@ export async function runPipeline(modulePath, initialContext = {}) {
365
382
  const llmMetrics = [];
366
383
  const llmEvents = getLLMEvents();
367
384
 
385
+ // Per-run write queue for serializing tokenUsage appends
386
+ let tokenWriteQueue = Promise.resolve();
387
+
388
+ /**
389
+ * Appends token usage tuple to tasks-status.json with serialized writes.
390
+ * @param {string} workDir - Working directory path
391
+ * @param {string} taskName - Task identifier
392
+ * @param {Array<string, number, number>} tuple - [modelKey, inputTokens, outputTokens]
393
+ */
394
+ function appendTokenUsage(workDir, taskName, tuple) {
395
+ tokenWriteQueue = tokenWriteQueue
396
+ .then(() =>
397
+ writeJobStatus(workDir, (snapshot) => {
398
+ if (!snapshot.tasks[taskName]) {
399
+ snapshot.tasks[taskName] = {};
400
+ }
401
+ const task = snapshot.tasks[taskName];
402
+ if (!Array.isArray(task.tokenUsage)) {
403
+ task.tokenUsage = [];
404
+ }
405
+ task.tokenUsage.push(tuple);
406
+ return snapshot;
407
+ })
408
+ )
409
+ .catch((e) => console.warn("[task-runner] tokenUsage append failed:", e));
410
+ }
411
+
368
412
  const onLLMComplete = (metric) => {
369
413
  llmMetrics.push({
370
414
  ...metric,
371
415
  task: context.meta.taskName,
372
416
  stage: context.currentStage,
373
417
  });
418
+
419
+ // Append token usage immediately for each successful LLM completion
420
+ if (context.meta.workDir && context.meta.taskName) {
421
+ const tuple = deriveModelKeyAndTokens(metric);
422
+ appendTokenUsage(context.meta.workDir, context.meta.taskName, tuple);
423
+ }
374
424
  };
375
425
 
376
426
  llmEvents.on("llm:request:complete", onLLMComplete);
@@ -531,6 +581,8 @@ export async function runPipeline(modulePath, initialContext = {}) {
531
581
  error: errInfo,
532
582
  refinementCycle: refinementCount,
533
583
  });
584
+ await tokenWriteQueue.catch(() => {});
585
+ llmEvents.off("llm:request:complete", onLLMComplete);
534
586
  return {
535
587
  ok: false,
536
588
  failedStage: s,
@@ -824,6 +876,9 @@ export async function runPipeline(modulePath, initialContext = {}) {
824
876
  }
825
877
  }
826
878
 
879
+ await tokenWriteQueue.catch(() => {});
880
+ llmEvents.off("llm:request:complete", onLLMComplete);
881
+
827
882
  // For non-validation stages or when refinements are exhausted, fail immediately
828
883
  return {
829
884
  ok: false,
@@ -859,6 +914,8 @@ export async function runPipeline(modulePath, initialContext = {}) {
859
914
  typeof tasks.validateQuality === "function";
860
915
 
861
916
  if (context.flags.validationFailed && hasValidation) {
917
+ await tokenWriteQueue.catch(() => {});
918
+ llmEvents.off("llm:request:complete", onLLMComplete);
862
919
  return {
863
920
  ok: false,
864
921
  failedStage: "final-validation",
@@ -869,6 +926,9 @@ export async function runPipeline(modulePath, initialContext = {}) {
869
926
  };
870
927
  }
871
928
 
929
+ // Flush any trailing token usage appends before cleanup
930
+ await tokenWriteQueue.catch(() => {}); // absorb last error to not mask pipeline result
931
+
872
932
  llmEvents.off("llm:request:complete", onLLMComplete);
873
933
 
874
934
  // Write final status with currentStage: null to indicate completion
package/src/llm/index.js CHANGED
@@ -18,8 +18,12 @@ export function registerMockProvider(provider) {
18
18
 
19
19
  // Auto-register mock provider in test mode when default provider is "mock"
20
20
  function autoRegisterMockProvider() {
21
- const config = getConfig();
22
- if (config.llm.defaultProvider === "mock" && !mockProviderInstance) {
21
+ // Skip config check in tests to avoid PO_ROOT requirement
22
+ const isTest =
23
+ process.env.NODE_ENV === "test" || process.env.VITEST === "true";
24
+ const defaultProvider = isTest ? "mock" : getConfig().llm.defaultProvider;
25
+
26
+ if (defaultProvider === "mock" && !mockProviderInstance) {
23
27
  // Auto-register a basic mock provider for testing
24
28
  mockProviderInstance = {
25
29
  chat: async () => ({
@@ -92,6 +96,11 @@ export async function chat(options) {
92
96
  temperature,
93
97
  maxTokens,
94
98
  metadata = {},
99
+ topP,
100
+ frequencyPenalty,
101
+ presencePenalty,
102
+ stop,
103
+ responseFormat,
95
104
  ...rest
96
105
  } = options;
97
106
 
@@ -157,52 +166,90 @@ export async function chat(options) {
157
166
  totalTokens: result.usage.total_tokens,
158
167
  };
159
168
  } else if (provider === "openai") {
160
- const result = await openaiChat({
169
+ const openaiArgs = {
161
170
  messages,
162
171
  model: model || "gpt-5-chat-latest",
163
- maxTokens,
164
172
  temperature,
173
+ maxTokens,
165
174
  ...rest,
166
- });
175
+ };
176
+ if (responseFormat !== undefined)
177
+ openaiArgs.responseFormat = responseFormat;
178
+ if (topP !== undefined) openaiArgs.topP = topP;
179
+ if (frequencyPenalty !== undefined)
180
+ openaiArgs.frequencyPenalty = frequencyPenalty;
181
+ if (presencePenalty !== undefined)
182
+ openaiArgs.presencePenalty = presencePenalty;
183
+ if (stop !== undefined) openaiArgs.stop = stop;
184
+
185
+ const result = await openaiChat(openaiArgs);
167
186
 
168
187
  response = {
169
- content: typeof result === "string" ? result : JSON.stringify(result),
170
- raw: result,
188
+ content:
189
+ result?.content ??
190
+ (typeof result === "string" ? result : String(result)),
191
+ raw: result?.raw ?? result,
171
192
  };
172
193
 
173
- // Estimate tokens since GPT-5 responses API might not return usage
174
- const promptTokens = estimateTokens(systemMsg + userMsg);
175
- const completionTokens = estimateTokens(response.content);
176
- usage = {
177
- promptTokens,
178
- completionTokens,
179
- totalTokens: promptTokens + completionTokens,
180
- };
194
+ // Use provider usage if available; otherwise estimate tokens
195
+ if (result?.usage) {
196
+ const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
197
+ usage = {
198
+ promptTokens: prompt_tokens,
199
+ completionTokens: completion_tokens,
200
+ totalTokens: total_tokens,
201
+ };
202
+ } else {
203
+ const promptTokens = estimateTokens(systemMsg + userMsg);
204
+ const completionTokens = estimateTokens(response.content);
205
+ usage = {
206
+ promptTokens,
207
+ completionTokens,
208
+ totalTokens: promptTokens + completionTokens,
209
+ };
210
+ }
181
211
  } else if (provider === "deepseek") {
182
- const result = await deepseekChat(
183
- {
184
- messages,
185
- model: "deepseek-chat",
186
- }
187
-
188
- // systemMsg,
189
- // userMsg,
190
- // model || "deepseek-reasoner"
191
- );
212
+ const deepseekArgs = {
213
+ messages,
214
+ model: model || "deepseek-reasoner",
215
+ temperature,
216
+ maxTokens,
217
+ ...rest,
218
+ };
219
+ if (topP !== undefined) deepseekArgs.topP = topP;
220
+ if (frequencyPenalty !== undefined)
221
+ deepseekArgs.frequencyPenalty = frequencyPenalty;
222
+ if (presencePenalty !== undefined)
223
+ deepseekArgs.presencePenalty = presencePenalty;
224
+ if (stop !== undefined) deepseekArgs.stop = stop;
225
+ if (responseFormat !== undefined)
226
+ deepseekArgs.responseFormat = responseFormat;
227
+
228
+ const result = await deepseekChat(deepseekArgs);
192
229
 
193
230
  response = {
194
231
  content: result.content,
195
232
  };
196
233
 
197
- const promptTokens = estimateTokens(systemMsg + userMsg);
198
- const completionTokens = estimateTokens(
199
- typeof result === "string" ? result : JSON.stringify(result)
200
- );
201
- usage = {
202
- promptTokens,
203
- completionTokens,
204
- totalTokens: promptTokens + completionTokens,
205
- };
234
+ // Use actual usage from deepseek API if available; otherwise estimate
235
+ if (result?.usage) {
236
+ const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
237
+ usage = {
238
+ promptTokens: prompt_tokens,
239
+ completionTokens: completion_tokens,
240
+ totalTokens: total_tokens,
241
+ };
242
+ } else {
243
+ const promptTokens = estimateTokens(systemMsg + userMsg);
244
+ const completionTokens = estimateTokens(
245
+ typeof result === "string" ? result : JSON.stringify(result)
246
+ );
247
+ usage = {
248
+ promptTokens,
249
+ completionTokens,
250
+ totalTokens: promptTokens + completionTokens,
251
+ };
252
+ }
206
253
  } else {
207
254
  throw new Error(`Provider ${provider} not yet implemented`);
208
255
  }
@@ -222,8 +269,11 @@ export async function chat(options) {
222
269
  timestamp: new Date().toISOString(),
223
270
  });
224
271
 
225
- // Return clean response - no metrics attached!
226
- return response;
272
+ // Return clean response with usage - no metrics attached!
273
+ return {
274
+ ...response,
275
+ usage,
276
+ };
227
277
  } catch (error) {
228
278
  const duration = Date.now() - startTime;
229
279
 
@@ -299,8 +349,11 @@ function buildProviderFunctions(models) {
299
349
 
300
350
  // Helper function for single prompt completion
301
351
  export async function complete(prompt, options = {}) {
302
- const config = getConfig();
303
- const defaultProvider = options.provider || config.llm.defaultProvider;
352
+ // Skip config check in tests to avoid PO_ROOT requirement
353
+ const isTest =
354
+ process.env.NODE_ENV === "test" || process.env.VITEST === "true";
355
+ const defaultProvider =
356
+ options.provider || (isTest ? "openai" : getConfig().llm.defaultProvider);
304
357
 
305
358
  return chat({
306
359
  provider: defaultProvider,
@@ -417,8 +470,12 @@ export function createLLM() {
417
470
 
418
471
  // Separate function for high-level LLM interface (used by llm.test.js)
419
472
  export function createHighLevelLLM(options = {}) {
420
- const config = getConfig();
421
- const defaultProvider = options.defaultProvider || config.llm.defaultProvider;
473
+ // Skip config check in tests to avoid PO_ROOT requirement
474
+ const isTest =
475
+ process.env.NODE_ENV === "test" || process.env.VITEST === "true";
476
+ const config = isTest ? { llm: { models: {} } } : getConfig();
477
+ const defaultProvider =
478
+ options.defaultProvider || (isTest ? "openai" : config.llm.defaultProvider);
422
479
 
423
480
  // Build functions from registry
424
481
  const providerFunctions = buildProviderFunctions(config.llm.models);
@@ -0,0 +1,297 @@
1
+ import React, { useState, useEffect } from "react";
2
+ import { Box, Heading, Table, Code, Text } from "@radix-ui/themes";
3
+ import Layout from "../components/Layout.jsx";
4
+ import PageSubheader from "../components/PageSubheader.jsx";
5
+ import { Button } from "../components/ui/button.jsx";
6
+
7
+ const ioFunctions = [
8
+ {
9
+ name: "writeArtifact",
10
+ description: "Write an artifact file",
11
+ params:
12
+ 'name: string, content: string, options?: { mode?: "replace"|"append"=replace }',
13
+ returns: "Promise<string>",
14
+ notes: "Writes to {workDir}/files/artifacts; updates tasks-status.json",
15
+ },
16
+ {
17
+ name: "writeLog",
18
+ description: "Write a log file",
19
+ params:
20
+ 'name: string, content: string, options?: { mode?: "append"|"replace"=append }',
21
+ returns: "Promise<string>",
22
+ notes:
23
+ "Writes to {workDir}/files/logs; default append; updates tasks-status.json",
24
+ },
25
+ {
26
+ name: "writeTmp",
27
+ description: "Write a temporary file",
28
+ params:
29
+ 'name: string, content: string, options?: { mode?: "replace"|"append"=replace }',
30
+ returns: "Promise<string>",
31
+ notes: "Writes to {workDir}/files/tmp; updates tasks-status.json",
32
+ },
33
+ {
34
+ name: "readArtifact",
35
+ description: "Read an artifact file",
36
+ params: "name: string",
37
+ returns: "Promise<string>",
38
+ notes: "Reads from {workDir}/files/artifacts",
39
+ },
40
+ {
41
+ name: "readLog",
42
+ description: "Read a log file",
43
+ params: "name: string",
44
+ returns: "Promise<string>",
45
+ notes: "Reads from {workDir}/files/logs",
46
+ },
47
+ {
48
+ name: "readTmp",
49
+ description: "Read a temporary file",
50
+ params: "name: string",
51
+ returns: "Promise<string>",
52
+ notes: "Reads from {workDir}/files/tmp",
53
+ },
54
+ {
55
+ name: "getTaskDir",
56
+ description: "Get the task directory path",
57
+ params: "",
58
+ returns: "string",
59
+ notes: "Returns {workDir}/tasks/{taskName}",
60
+ },
61
+ {
62
+ name: "getCurrentStage",
63
+ description: "Get the current stage name",
64
+ params: "",
65
+ returns: "string",
66
+ notes: "Calls injected getStage()",
67
+ },
68
+ ];
69
+
70
+ const sampleSeed = {
71
+ name: "some-name",
72
+ pipeline: "content-generation",
73
+ data: {
74
+ type: "some-type",
75
+ contentType: "blog-post",
76
+ targetAudience: "software-developers",
77
+ tone: "professional-yet-accessible",
78
+ length: "1500-2000 words",
79
+ outputFormat: "blog-post",
80
+ },
81
+ };
82
+
83
+ export default function CodePage() {
84
+ const [llmFunctions, setLlmFunctions] = useState(null);
85
+
86
+ useEffect(() => {
87
+ fetch("/api/llm/functions")
88
+ .then((res) => res.json())
89
+ .then(setLlmFunctions)
90
+ .catch(console.error);
91
+ }, []);
92
+
93
+ const breadcrumbs = [{ label: "Home", href: "/" }, { label: "Code" }];
94
+
95
+ const handleCopySeed = () => {
96
+ navigator.clipboard.writeText(JSON.stringify(sampleSeed, null, 2));
97
+ };
98
+
99
+ return (
100
+ <Layout>
101
+ <PageSubheader breadcrumbs={breadcrumbs} />
102
+ <Box>
103
+ {/* Seed File Example Section */}
104
+ <Box mb="8">
105
+ <Heading size="6" mb="4">
106
+ Seed File Example
107
+ </Heading>
108
+ <Text as="p" mb="3" size="2">
109
+ A seed file is a JSON object used to start a new pipeline job. It
110
+ defines the job name, the pipeline to run, and any contextual data
111
+ the pipeline requires to begin.
112
+ </Text>
113
+ <Text as="p" mb="3" size="2" weight="bold">
114
+ Required fields:
115
+ </Text>
116
+ <ul className="list-disc list-inside mb-4 space-y-1">
117
+ <li className="text-sm text-gray-700">
118
+ <Text as="span" weight="bold">
119
+ name
120
+ </Text>{" "}
121
+ (string): Human-friendly title; non-empty, printable only, ≤120
122
+ chars; must be unique.
123
+ </li>
124
+ <li className="text-sm text-gray-700">
125
+ <Text as="span" weight="bold">
126
+ pipeline
127
+ </Text>{" "}
128
+ (string): Pipeline slug defined in your registry (e.g.,
129
+ content-generation).
130
+ </li>
131
+ <li className="text-sm text-gray-700">
132
+ <Text as="span" weight="bold">
133
+ data
134
+ </Text>{" "}
135
+ (object): Required but flexible; include any arbitrary keys your
136
+ pipeline tasks expect.
137
+ </li>
138
+ </ul>
139
+ <Box mb="3">
140
+ <Button
141
+ size="1"
142
+ onClick={handleCopySeed}
143
+ data-testid="copy-seed-example"
144
+ >
145
+ Copy
146
+ </Button>
147
+ </Box>
148
+ <pre className="text-xs bg-gray-50 p-3 rounded overflow-auto max-h-60 border border-gray-200">
149
+ {JSON.stringify(sampleSeed, null, 2)}
150
+ </pre>
151
+ </Box>
152
+
153
+ <Heading size="6" mb="4">
154
+ Pipeline Task IO API
155
+ </Heading>
156
+ <Box overflowX="auto">
157
+ <Table.Root>
158
+ <Table.Header>
159
+ <Table.Row>
160
+ <Table.ColumnHeaderCell>Function</Table.ColumnHeaderCell>
161
+ <Table.ColumnHeaderCell>Parameters</Table.ColumnHeaderCell>
162
+ <Table.ColumnHeaderCell>Returns</Table.ColumnHeaderCell>
163
+ <Table.ColumnHeaderCell>Notes</Table.ColumnHeaderCell>
164
+ </Table.Row>
165
+ </Table.Header>
166
+ <Table.Body>
167
+ {ioFunctions.map((fn) => (
168
+ <Table.Row key={fn.name}>
169
+ <Table.RowHeaderCell>
170
+ <Code size="3">io.{fn.name}</Code>
171
+ </Table.RowHeaderCell>
172
+ <Table.Cell>
173
+ <Code size="3">{fn.params || "—"}</Code>
174
+ </Table.Cell>
175
+ <Table.Cell>
176
+ <Code size="3">{fn.returns}</Code>
177
+ </Table.Cell>
178
+ <Table.Cell>
179
+ {fn.description}
180
+ <br />
181
+ {fn.notes}
182
+ </Table.Cell>
183
+ </Table.Row>
184
+ ))}
185
+ </Table.Body>
186
+ </Table.Root>
187
+ </Box>
188
+
189
+ <Heading size="6" mt="8" mb="4">
190
+ Pipeline Task LLM API
191
+ </Heading>
192
+ <Box mb="4">
193
+ <Heading size="4" mb="2">
194
+ Arguments
195
+ </Heading>
196
+ <Code size="3" mb="4">
197
+ {`{
198
+ messages: Array<{role: "system"|"user"|"assistant", content: string }>,
199
+ temperature?: number,
200
+ maxTokens?: number,
201
+ responseFormat?: "json" | { type: "json_object" | { type: "json_schema", name: string, json_schema: object } },
202
+ stop?: string | string[],
203
+ topP?: number,
204
+ frequencyPenalty?: number,
205
+ presencePenalty?: number,
206
+ tools?: Array<{type: "function", function: object}>,
207
+ toolChoice?: "auto" | "required" | { type: "function", function: { name: string } },
208
+ seed?: number,
209
+ provider?: string,
210
+ model?: string,
211
+ metadata?: object,
212
+ maxRetries?: number
213
+ }`}
214
+ </Code>
215
+ <Heading size="4" mb="2">
216
+ Returns
217
+ </Heading>
218
+ <Code size="3">{`Promise<{ content: any, usage?: object, raw?: any }>`}</Code>
219
+ </Box>
220
+
221
+ {llmFunctions && (
222
+ <Box overflowX="auto">
223
+ <Table.Root>
224
+ <Table.Header>
225
+ <Table.Row>
226
+ <Table.ColumnHeaderCell>Function</Table.ColumnHeaderCell>
227
+ <Table.ColumnHeaderCell>Model</Table.ColumnHeaderCell>
228
+ </Table.Row>
229
+ </Table.Header>
230
+ <Table.Body>
231
+ {Object.entries(llmFunctions).map(([provider, functions]) =>
232
+ functions.map((fn) => (
233
+ <Table.Row key={fn.fullPath}>
234
+ <Table.RowHeaderCell>
235
+ <Code size="3">{fn.fullPath}</Code>
236
+ </Table.RowHeaderCell>
237
+ <Table.Cell>
238
+ <Code size="3">{fn.model}</Code>
239
+ </Table.Cell>
240
+ </Table.Row>
241
+ ))
242
+ )}
243
+ </Table.Body>
244
+ </Table.Root>
245
+ </Box>
246
+ )}
247
+
248
+ <Heading size="6" mt="8" mb="4">
249
+ Environment Configuration
250
+ </Heading>
251
+ <Box mb="4">
252
+ <Heading size="4" mb="2">
253
+ Example .env Configuration
254
+ </Heading>
255
+ <Box overflowX="auto">
256
+ <Table.Root>
257
+ <Table.Header>
258
+ <Table.Row>
259
+ <Table.ColumnHeaderCell>
260
+ Environment Variable
261
+ </Table.ColumnHeaderCell>
262
+ </Table.Row>
263
+ </Table.Header>
264
+ <Table.Body>
265
+ <Table.Row>
266
+ <Table.RowHeaderCell>
267
+ <Code size="3">OPENAI_API_KEY=</Code>
268
+ </Table.RowHeaderCell>
269
+ </Table.Row>
270
+ <Table.Row>
271
+ <Table.RowHeaderCell>
272
+ <Code size="3">DEEPSEEK_API_KEY=</Code>
273
+ </Table.RowHeaderCell>
274
+ </Table.Row>
275
+ <Table.Row>
276
+ <Table.RowHeaderCell>
277
+ <Code size="3">GEMINI_API_KEY=</Code>
278
+ </Table.RowHeaderCell>
279
+ </Table.Row>
280
+ <Table.Row>
281
+ <Table.RowHeaderCell>
282
+ <Code size="3">ANTHROPIC_API_KEY=</Code>
283
+ </Table.RowHeaderCell>
284
+ </Table.Row>
285
+ <Table.Row>
286
+ <Table.RowHeaderCell>
287
+ <Code size="3">Z_API_KEY=</Code>
288
+ </Table.RowHeaderCell>
289
+ </Table.Row>
290
+ </Table.Body>
291
+ </Table.Root>
292
+ </Box>
293
+ </Box>
294
+ </Box>
295
+ </Layout>
296
+ );
297
+ }