@ryanfw/prompt-orchestration-pipeline 0.12.0 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/package.json +10 -1
  2. package/src/cli/analyze-task.js +51 -0
  3. package/src/cli/index.js +8 -0
  4. package/src/components/AddPipelineSidebar.jsx +144 -0
  5. package/src/components/AnalysisProgressTray.jsx +87 -0
  6. package/src/components/JobTable.jsx +4 -3
  7. package/src/components/Layout.jsx +142 -139
  8. package/src/components/MarkdownRenderer.jsx +149 -0
  9. package/src/components/PipelineDAGGrid.jsx +404 -0
  10. package/src/components/PipelineTypeTaskSidebar.jsx +96 -0
  11. package/src/components/SchemaPreviewPanel.jsx +97 -0
  12. package/src/components/StageTimeline.jsx +36 -0
  13. package/src/components/TaskAnalysisDisplay.jsx +227 -0
  14. package/src/components/TaskCreationSidebar.jsx +447 -0
  15. package/src/components/TaskDetailSidebar.jsx +119 -117
  16. package/src/components/TaskFilePane.jsx +94 -39
  17. package/src/components/ui/button.jsx +59 -27
  18. package/src/components/ui/sidebar.jsx +118 -0
  19. package/src/config/models.js +99 -67
  20. package/src/core/config.js +4 -1
  21. package/src/llm/index.js +129 -9
  22. package/src/pages/PipelineDetail.jsx +6 -6
  23. package/src/pages/PipelineList.jsx +214 -0
  24. package/src/pages/PipelineTypeDetail.jsx +234 -0
  25. package/src/providers/deepseek.js +76 -16
  26. package/src/providers/openai.js +61 -34
  27. package/src/task-analysis/enrichers/analysis-writer.js +62 -0
  28. package/src/task-analysis/enrichers/schema-deducer.js +145 -0
  29. package/src/task-analysis/enrichers/schema-writer.js +74 -0
  30. package/src/task-analysis/extractors/artifacts.js +137 -0
  31. package/src/task-analysis/extractors/llm-calls.js +176 -0
  32. package/src/task-analysis/extractors/stages.js +51 -0
  33. package/src/task-analysis/index.js +103 -0
  34. package/src/task-analysis/parser.js +28 -0
  35. package/src/task-analysis/utils/ast.js +43 -0
  36. package/src/ui/client/hooks/useAnalysisProgress.js +145 -0
  37. package/src/ui/client/index.css +64 -0
  38. package/src/ui/client/main.jsx +4 -0
  39. package/src/ui/client/sse-fetch.js +120 -0
  40. package/src/ui/dist/assets/index-cjHV9mYW.js +82578 -0
  41. package/src/ui/dist/assets/index-cjHV9mYW.js.map +1 -0
  42. package/src/ui/dist/assets/style-CoM9SoQF.css +180 -0
  43. package/src/ui/dist/index.html +2 -2
  44. package/src/ui/endpoints/create-pipeline-endpoint.js +194 -0
  45. package/src/ui/endpoints/pipeline-analysis-endpoint.js +246 -0
  46. package/src/ui/endpoints/pipeline-type-detail-endpoint.js +181 -0
  47. package/src/ui/endpoints/pipelines-endpoint.js +133 -0
  48. package/src/ui/endpoints/schema-file-endpoint.js +105 -0
  49. package/src/ui/endpoints/task-analysis-endpoint.js +104 -0
  50. package/src/ui/endpoints/task-creation-endpoint.js +114 -0
  51. package/src/ui/endpoints/task-save-endpoint.js +101 -0
  52. package/src/ui/express-app.js +45 -0
  53. package/src/ui/lib/analysis-lock.js +67 -0
  54. package/src/ui/lib/sse.js +30 -0
  55. package/src/ui/server.js +4 -0
  56. package/src/ui/utils/slug.js +31 -0
  57. package/src/ui/watcher.js +28 -2
  58. package/src/ui/dist/assets/index-B320avRx.js +0 -26613
  59. package/src/ui/dist/assets/index-B320avRx.js.map +0 -1
  60. package/src/ui/dist/assets/style-BYCoLBnK.css +0 -62
package/src/llm/index.js CHANGED
@@ -91,6 +91,15 @@ export function calculateCost(provider, model, usage) {
91
91
 
92
92
  // Core chat function - no metrics handling needed!
93
93
  export async function chat(options) {
94
+ console.log("[llm] chat() called with options:", {
95
+ provider: options.provider,
96
+ model: options.model,
97
+ messageCount: options.messages?.length || 0,
98
+ hasTemperature: options.temperature !== undefined,
99
+ hasMaxTokens: options.maxTokens !== undefined,
100
+ responseFormat: options.responseFormat,
101
+ });
102
+
94
103
  const {
95
104
  provider,
96
105
  model,
@@ -103,6 +112,7 @@ export async function chat(options) {
103
112
  presencePenalty,
104
113
  stop,
105
114
  responseFormat,
115
+ stream = false,
106
116
  ...rest
107
117
  } = options;
108
118
 
@@ -111,27 +121,41 @@ export async function chat(options) {
111
121
 
112
122
  const available = getAvailableProviders();
113
123
 
124
+ console.log("[llm] Available providers:", available);
125
+ console.log("[llm] Requested provider:", provider);
126
+
114
127
  if (!available[provider]) {
128
+ console.error("[llm] Provider not available:", provider);
115
129
  throw new Error(`Provider ${provider} not available. Check API keys.`);
116
130
  }
117
131
 
118
132
  const startTime = Date.now();
119
133
  const requestId = `req_${Date.now()}_${Math.random().toString(36).substring(7)}`;
120
134
 
121
- // Default to JSON mode if not specified
122
- const finalResponseFormat = responseFormat ?? "json";
123
-
124
135
  // Extract system and user messages
125
136
  const systemMsg = messages.find((m) => m.role === "system")?.content || "";
126
137
  const userMessages = messages.filter((m) => m.role === "user");
127
138
  const userMsg = userMessages.map((m) => m.content).join("\n");
128
139
 
140
+ console.log("[llm] Message analysis:", {
141
+ hasSystemMessage: !!systemMsg,
142
+ systemMessageLength: systemMsg.length,
143
+ userMessageCount: userMessages.length,
144
+ userMessageLength: userMsg.length,
145
+ totalMessageLength: systemMsg.length + userMsg.length,
146
+ });
147
+
129
148
  // DEBUG write_to_file messages to /tmp/messages.log for debugging
130
149
  fs.writeFileSync(
131
150
  "/tmp/messages.log",
132
151
  JSON.stringify({ messages, systemMsg, userMsg, provider, model }, null, 2)
133
152
  );
134
153
 
154
+ console.log(
155
+ "[llm] Emitting llm:request:start event for requestId:",
156
+ requestId
157
+ );
158
+
135
159
  // Emit request start event
136
160
  llmEvents.emit("llm:request:start", {
137
161
  id: requestId,
@@ -142,10 +166,12 @@ export async function chat(options) {
142
166
  });
143
167
 
144
168
  try {
169
+ console.log("[llm] Starting provider call for:", provider);
145
170
  let response;
146
171
  let usage;
147
172
 
148
173
  if (provider === "mock") {
174
+ console.log("[llm] Using mock provider");
149
175
  if (!mockProviderInstance) {
150
176
  throw new Error(
151
177
  "Mock provider not registered. Call registerMockProvider() first."
@@ -159,6 +185,7 @@ export async function chat(options) {
159
185
  maxTokens,
160
186
  ...rest,
161
187
  });
188
+ console.log("[llm] Mock provider returned result");
162
189
 
163
190
  response = {
164
191
  content: result.content,
@@ -171,6 +198,7 @@ export async function chat(options) {
171
198
  totalTokens: result.usage.total_tokens,
172
199
  };
173
200
  } else if (provider === "openai") {
201
+ console.log("[llm] Using OpenAI provider");
174
202
  const openaiArgs = {
175
203
  messages,
176
204
  model: model || "gpt-5-chat-latest",
@@ -178,7 +206,14 @@ export async function chat(options) {
178
206
  maxTokens,
179
207
  ...rest,
180
208
  };
181
- openaiArgs.responseFormat = finalResponseFormat;
209
+ console.log("[llm] OpenAI call parameters:", {
210
+ model: openaiArgs.model,
211
+ hasMessages: !!openaiArgs.messages,
212
+ messageCount: openaiArgs.messages?.length,
213
+ });
214
+ if (responseFormat !== undefined) {
215
+ openaiArgs.responseFormat = responseFormat;
216
+ }
182
217
  if (topP !== undefined) openaiArgs.topP = topP;
183
218
  if (frequencyPenalty !== undefined)
184
219
  openaiArgs.frequencyPenalty = frequencyPenalty;
@@ -186,7 +221,13 @@ export async function chat(options) {
186
221
  openaiArgs.presencePenalty = presencePenalty;
187
222
  if (stop !== undefined) openaiArgs.stop = stop;
188
223
 
224
+ console.log("[llm] Calling openaiChat()...");
189
225
  const result = await openaiChat(openaiArgs);
226
+ console.log("[llm] openaiChat() returned:", {
227
+ hasResult: !!result,
228
+ hasContent: !!result?.content,
229
+ hasUsage: !!result?.usage,
230
+ });
190
231
 
191
232
  response = {
192
233
  content:
@@ -213,22 +254,43 @@ export async function chat(options) {
213
254
  };
214
255
  }
215
256
  } else if (provider === "deepseek") {
257
+ console.log("[llm] Using DeepSeek provider");
216
258
  const deepseekArgs = {
217
259
  messages,
218
- model: model || "deepseek-reasoner",
260
+ model: model || MODEL_CONFIG[DEFAULT_MODEL_BY_PROVIDER.deepseek].model,
219
261
  temperature,
220
262
  maxTokens,
221
263
  ...rest,
222
264
  };
265
+ console.log("[llm] DeepSeek call parameters:", {
266
+ model: deepseekArgs.model,
267
+ hasMessages: !!deepseekArgs.messages,
268
+ messageCount: deepseekArgs.messages?.length,
269
+ });
270
+ if (stream !== undefined) deepseekArgs.stream = stream;
223
271
  if (topP !== undefined) deepseekArgs.topP = topP;
224
272
  if (frequencyPenalty !== undefined)
225
273
  deepseekArgs.frequencyPenalty = frequencyPenalty;
226
274
  if (presencePenalty !== undefined)
227
275
  deepseekArgs.presencePenalty = presencePenalty;
228
276
  if (stop !== undefined) deepseekArgs.stop = stop;
229
- deepseekArgs.responseFormat = finalResponseFormat;
277
+ if (responseFormat !== undefined) {
278
+ deepseekArgs.responseFormat = responseFormat;
279
+ }
230
280
 
281
+ console.log("[llm] Calling deepseekChat()...");
231
282
  const result = await deepseekChat(deepseekArgs);
283
+ console.log("[llm] deepseekChat() returned:", {
284
+ hasResult: !!result,
285
+ isStream: typeof result?.[Symbol.asyncIterator] !== "undefined",
286
+ hasContent: !!result?.content,
287
+ hasUsage: !!result?.usage,
288
+ });
289
+
290
+ // Streaming mode - return async generator directly
291
+ if (stream && typeof result?.[Symbol.asyncIterator] !== "undefined") {
292
+ return result;
293
+ }
232
294
 
233
295
  response = {
234
296
  content: result.content,
@@ -254,6 +316,7 @@ export async function chat(options) {
254
316
  };
255
317
  }
256
318
  } else if (provider === "anthropic") {
319
+ console.log("[llm] Using Anthropic provider");
257
320
  const defaultAlias = DEFAULT_MODEL_BY_PROVIDER.anthropic;
258
321
  const defaultModelConfig = MODEL_CONFIG[defaultAlias];
259
322
  const defaultModel = defaultModelConfig?.model;
@@ -265,11 +328,24 @@ export async function chat(options) {
265
328
  maxTokens,
266
329
  ...rest,
267
330
  };
331
+ console.log("[llm] Anthropic call parameters:", {
332
+ model: anthropicArgs.model,
333
+ hasMessages: !!anthropicArgs.messages,
334
+ messageCount: anthropicArgs.messages?.length,
335
+ });
268
336
  if (topP !== undefined) anthropicArgs.topP = topP;
269
337
  if (stop !== undefined) anthropicArgs.stop = stop;
270
- anthropicArgs.responseFormat = finalResponseFormat;
338
+ if (responseFormat !== undefined) {
339
+ anthropicArgs.responseFormat = responseFormat;
340
+ }
271
341
 
342
+ console.log("[llm] Calling anthropicChat()...");
272
343
  const result = await anthropicChat(anthropicArgs);
344
+ console.log("[llm] anthropicChat() returned:", {
345
+ hasResult: !!result,
346
+ hasContent: !!result?.content,
347
+ hasUsage: !!result?.usage,
348
+ });
273
349
 
274
350
  response = {
275
351
  content: result.content,
@@ -296,6 +372,7 @@ export async function chat(options) {
296
372
  };
297
373
  }
298
374
  } else if (provider === "gemini") {
375
+ console.log("[llm] Using Gemini provider");
299
376
  const geminiArgs = {
300
377
  messages,
301
378
  model: model || "gemini-2.5-flash",
@@ -303,11 +380,24 @@ export async function chat(options) {
303
380
  maxTokens,
304
381
  ...rest,
305
382
  };
383
+ console.log("[llm] Gemini call parameters:", {
384
+ model: geminiArgs.model,
385
+ hasMessages: !!geminiArgs.messages,
386
+ messageCount: geminiArgs.messages?.length,
387
+ });
306
388
  if (topP !== undefined) geminiArgs.topP = topP;
307
389
  if (stop !== undefined) geminiArgs.stop = stop;
308
- geminiArgs.responseFormat = finalResponseFormat;
390
+ if (responseFormat !== undefined) {
391
+ geminiArgs.responseFormat = responseFormat;
392
+ }
309
393
 
394
+ console.log("[llm] Calling geminiChat()...");
310
395
  const result = await geminiChat(geminiArgs);
396
+ console.log("[llm] geminiChat() returned:", {
397
+ hasResult: !!result,
398
+ hasContent: !!result?.content,
399
+ hasUsage: !!result?.usage,
400
+ });
311
401
 
312
402
  response = {
313
403
  content: result.content,
@@ -334,6 +424,7 @@ export async function chat(options) {
334
424
  };
335
425
  }
336
426
  } else if (provider === "zhipu") {
427
+ console.log("[llm] Using Zhipu provider");
337
428
  const defaultAlias = DEFAULT_MODEL_BY_PROVIDER.zhipu;
338
429
  const defaultModelConfig = MODEL_CONFIG[defaultAlias];
339
430
  const defaultModel = defaultModelConfig?.model;
@@ -345,11 +436,24 @@ export async function chat(options) {
345
436
  maxTokens,
346
437
  ...rest,
347
438
  };
439
+ console.log("[llm] Zhipu call parameters:", {
440
+ model: zhipuArgs.model,
441
+ hasMessages: !!zhipuArgs.messages,
442
+ messageCount: zhipuArgs.messages?.length,
443
+ });
348
444
  if (topP !== undefined) zhipuArgs.topP = topP;
349
445
  if (stop !== undefined) zhipuArgs.stop = stop;
350
- zhipuArgs.responseFormat = finalResponseFormat;
446
+ if (responseFormat !== undefined) {
447
+ zhipuArgs.responseFormat = responseFormat;
448
+ }
351
449
 
450
+ console.log("[llm] Calling zhipuChat()...");
352
451
  const result = await zhipuChat(zhipuArgs);
452
+ console.log("[llm] zhipuChat() returned:", {
453
+ hasResult: !!result,
454
+ hasContent: !!result?.content,
455
+ hasUsage: !!result?.usage,
456
+ });
353
457
 
354
458
  response = {
355
459
  content: result.content,
@@ -376,12 +480,21 @@ export async function chat(options) {
376
480
  };
377
481
  }
378
482
  } else {
483
+ console.error("[llm] Unknown provider:", provider);
379
484
  throw new Error(`Provider ${provider} not yet implemented`);
380
485
  }
381
486
 
487
+ console.log("[llm] Processing response from provider:", provider);
488
+
382
489
  const duration = Date.now() - startTime;
383
490
  const cost = calculateCost(provider, model, usage);
384
491
 
492
+ console.log("[llm] Request completed:", {
493
+ duration: `${duration}ms`,
494
+ cost,
495
+ usage,
496
+ });
497
+
385
498
  // Emit success event with metrics
386
499
  llmEvents.emit("llm:request:complete", {
387
500
  id: requestId,
@@ -402,6 +515,13 @@ export async function chat(options) {
402
515
  } catch (error) {
403
516
  const duration = Date.now() - startTime;
404
517
 
518
+ console.error("[llm] Error in chat():", {
519
+ error: error.message,
520
+ name: error.name,
521
+ stack: error.stack,
522
+ duration: `${duration}ms`,
523
+ });
524
+
405
525
  // Emit error event
406
526
  llmEvents.emit("llm:request:error", {
407
527
  id: requestId,
@@ -1,6 +1,6 @@
1
1
  import React, { useState } from "react";
2
2
  import { data, useParams } from "react-router-dom";
3
- import { Box, Flex, Text, Button } from "@radix-ui/themes";
3
+ import { Box, Flex, Text } from "@radix-ui/themes";
4
4
  import * as Tooltip from "@radix-ui/react-tooltip";
5
5
  import JobDetail from "../components/JobDetail.jsx";
6
6
  import { useJobDetailWithUpdates } from "../ui/client/hooks/useJobDetailWithUpdates.js";
@@ -11,6 +11,7 @@ import { formatCurrency4, formatTokensCompact } from "../utils/formatters.js";
11
11
  import { rescanJob } from "../ui/client/api.js";
12
12
  import StopJobModal from "../components/ui/StopJobModal.jsx";
13
13
  import { stopJob } from "../ui/client/api.js";
14
+ import { Button } from "../components/ui/button.jsx";
14
15
 
15
16
  export default function PipelineDetail() {
16
17
  const { jobId } = useParams();
@@ -257,9 +258,8 @@ export default function PipelineDetail() {
257
258
  {costIndicatorWithTooltip}
258
259
  {isRunning && (
259
260
  <Button
260
- size="1"
261
- variant="solid"
262
- color="red"
261
+ size="sm"
262
+ variant="destructive"
263
263
  disabled={isStopping}
264
264
  onClick={openStopModal}
265
265
  >
@@ -267,8 +267,8 @@ export default function PipelineDetail() {
267
267
  </Button>
268
268
  )}
269
269
  <Button
270
- size="1"
271
- variant="soft"
270
+ size="sm"
271
+ variant="outline"
272
272
  disabled={isRescanning}
273
273
  onClick={handleRescan}
274
274
  >
@@ -0,0 +1,214 @@
1
+ import React, { useState, useEffect } from "react";
2
+ import { useNavigate } from "react-router-dom";
3
+ import { Box, Flex, Text, Heading, Table } from "@radix-ui/themes";
4
+ import { ChevronRight, Plus } from "lucide-react";
5
+ import Layout from "../components/Layout.jsx";
6
+ import PageSubheader from "../components/PageSubheader.jsx";
7
+ import AddPipelineSidebar from "../components/AddPipelineSidebar.jsx";
8
+ import { Button } from "../components/ui/button.jsx";
9
+
10
+ /**
11
+ * PipelineList component displays available pipelines in a table layout
12
+ *
13
+ * Fetches pipeline data from /api/pipelines endpoint and handles:
14
+ * - Loading state during fetch
15
+ * - Error state for failed requests
16
+ * - Empty state when no pipelines are available
17
+ * - Table layout using Radix UI components
18
+ * - Add pipeline type functionality via sidebar
19
+ */
20
+ export default function PipelineList() {
21
+ const [pipelines, setPipelines] = useState([]);
22
+ const [loading, setLoading] = useState(true);
23
+ const [error, setError] = useState(null);
24
+ const [sidebarOpen, setSidebarOpen] = useState(false);
25
+ const navigate = useNavigate();
26
+
27
+ useEffect(() => {
28
+ const fetchPipelines = async () => {
29
+ try {
30
+ setLoading(true);
31
+ setError(null);
32
+
33
+ const response = await fetch("/api/pipelines");
34
+
35
+ if (!response.ok) {
36
+ const errorData = await response.json().catch(() => ({}));
37
+ throw new Error(errorData.message || `HTTP ${response.status}`);
38
+ }
39
+
40
+ const result = await response.json();
41
+
42
+ if (!result.ok) {
43
+ throw new Error(result.message || "Failed to load pipelines");
44
+ }
45
+
46
+ setPipelines(result.data?.pipelines || []);
47
+ } catch (err) {
48
+ setError(err.message || "Failed to load pipelines");
49
+ setPipelines([]);
50
+ } finally {
51
+ setLoading(false);
52
+ }
53
+ };
54
+
55
+ fetchPipelines();
56
+ }, []);
57
+
58
+ const breadcrumbs = [{ label: "Home", href: "/" }, { label: "Pipelines" }];
59
+
60
+ const openPipeline = (slug) => {
61
+ navigate(`/pipelines/${slug}`);
62
+ };
63
+
64
+ // Loading state
65
+ if (loading) {
66
+ return (
67
+ <Layout>
68
+ <PageSubheader breadcrumbs={breadcrumbs} />
69
+ <Box>
70
+ <Box mb="8">
71
+ <Heading size="6" mb="4">
72
+ Loading pipeline types...
73
+ </Heading>
74
+ </Box>
75
+ </Box>
76
+ </Layout>
77
+ );
78
+ }
79
+
80
+ // Error state
81
+ if (error) {
82
+ return (
83
+ <Layout>
84
+ <PageSubheader breadcrumbs={breadcrumbs} />
85
+ <Box>
86
+ <Box mb="8">
87
+ <Heading size="6" mb="4">
88
+ Failed
89
+ </Heading>
90
+ <Flex align="center" justify="center" className="min-h-64">
91
+ <Box className="text-center">
92
+ <Text size="2" color="gray" className="mt-2">
93
+ {error}
94
+ </Text>
95
+ </Box>
96
+ </Flex>
97
+ </Box>
98
+ </Box>
99
+ </Layout>
100
+ );
101
+ }
102
+
103
+ // Empty state
104
+ if (pipelines.length === 0) {
105
+ return (
106
+ <Layout>
107
+ <PageSubheader breadcrumbs={breadcrumbs} />
108
+ <Box>
109
+ <Box mb="8">
110
+ <Heading size="6" mb="4">
111
+ No pipelines available
112
+ </Heading>
113
+ <Flex align="center" justify="center" className="min-h-64">
114
+ <Box className="text-center">
115
+ <Text size="2" color="gray" className="mt-2">
116
+ Check back later for available pipelines.
117
+ </Text>
118
+ </Box>
119
+ </Flex>
120
+ </Box>
121
+ </Box>
122
+ </Layout>
123
+ );
124
+ }
125
+
126
+ // Main content with pipeline table
127
+ return (
128
+ <Layout>
129
+ <PageSubheader breadcrumbs={breadcrumbs}>
130
+ <Button size="md" variant="solid" onClick={() => setSidebarOpen(true)}>
131
+ <Plus className="h-4 w-4 mr-2" />
132
+ Add a Pipeline Type
133
+ </Button>
134
+ </PageSubheader>
135
+ <Box>
136
+ <Box mb="8">
137
+ <Heading size="6" mb="4">
138
+ Pipeline Types
139
+ </Heading>
140
+
141
+ <Table.Root radius="none">
142
+ <Table.Header>
143
+ <Table.Row>
144
+ <Table.ColumnHeaderCell>Pipeline Name</Table.ColumnHeaderCell>
145
+ <Table.ColumnHeaderCell>Description</Table.ColumnHeaderCell>
146
+ <Table.ColumnHeaderCell className="w-12"></Table.ColumnHeaderCell>
147
+ </Table.Row>
148
+ </Table.Header>
149
+
150
+ <Table.Body>
151
+ {pipelines.map((pipeline) => {
152
+ const pipelineName = pipeline.name;
153
+ const pipelineSlug = pipeline.slug;
154
+ const description = pipeline.description || "—";
155
+
156
+ return (
157
+ <Table.Row
158
+ key={pipelineSlug}
159
+ className="group cursor-pointer hover:bg-slate-50/50 transition-colors"
160
+ onClick={() => openPipeline(pipelineSlug)}
161
+ onKeyDown={(e) => {
162
+ if (e.key === " ") {
163
+ e.preventDefault();
164
+ openPipeline(pipelineSlug);
165
+ } else if (e.key === "Enter") {
166
+ openPipeline(pipelineSlug);
167
+ }
168
+ }}
169
+ tabIndex={0}
170
+ aria-label={`Open ${pipelineName} pipeline`}
171
+ >
172
+ <Table.Cell>
173
+ <Flex direction="column" gap="1">
174
+ <Text
175
+ size="2"
176
+ weight="medium"
177
+ className="text-slate-900"
178
+ >
179
+ {pipelineName}
180
+ </Text>
181
+ <Text size="1" className="text-slate-500">
182
+ {pipelineSlug}
183
+ </Text>
184
+ </Flex>
185
+ </Table.Cell>
186
+
187
+ <Table.Cell>
188
+ <Text size="2" className="text-slate-700">
189
+ {description}
190
+ </Text>
191
+ </Table.Cell>
192
+
193
+ <Table.Cell>
194
+ <Button
195
+ variant="ghost"
196
+ size="sm"
197
+ className="opacity-0 group-hover:opacity-100 transition-opacity"
198
+ aria-label={`View ${pipelineName} pipeline`}
199
+ >
200
+ <ChevronRight className="h-4 w-4" />
201
+ </Button>
202
+ </Table.Cell>
203
+ </Table.Row>
204
+ );
205
+ })}
206
+ </Table.Body>
207
+ </Table.Root>
208
+ </Box>
209
+ </Box>
210
+
211
+ <AddPipelineSidebar open={sidebarOpen} onOpenChange={setSidebarOpen} />
212
+ </Layout>
213
+ );
214
+ }