gitlab-ai-provider 5.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,4220 @@
1
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
2
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
3
+ }) : x)(function(x) {
4
+ if (typeof require !== "undefined") return require.apply(this, arguments);
5
+ throw Error('Dynamic require of "' + x + '" is not supported');
6
+ });
7
+
8
+ // src/gitlab-anthropic-language-model.ts
9
+ import Anthropic from "@anthropic-ai/sdk";
10
+
11
+ // src/gitlab-direct-access.ts
12
+ import { z } from "zod";
13
+
14
+ // src/gitlab-error.ts
15
+ var GitLabError = class _GitLabError extends Error {
16
+ statusCode;
17
+ responseBody;
18
+ cause;
19
+ constructor(options) {
20
+ super(options.message);
21
+ this.name = "GitLabError";
22
+ this.statusCode = options.statusCode;
23
+ this.responseBody = options.responseBody;
24
+ this.cause = options.cause;
25
+ if (Error.captureStackTrace) {
26
+ Error.captureStackTrace(this, _GitLabError);
27
+ }
28
+ }
29
+ static fromResponse(response, body) {
30
+ return new _GitLabError({
31
+ message: `GitLab API error: ${response.status} ${response.statusText}`,
32
+ statusCode: response.status,
33
+ responseBody: body
34
+ });
35
+ }
36
+ isAuthError() {
37
+ return this.statusCode === 401;
38
+ }
39
+ isRateLimitError() {
40
+ return this.statusCode === 429;
41
+ }
42
+ isForbiddenError() {
43
+ return this.statusCode === 403;
44
+ }
45
+ isServerError() {
46
+ return this.statusCode !== void 0 && this.statusCode >= 500;
47
+ }
48
+ /**
49
+ * Check if this error is a context overflow error (prompt too long).
50
+ * These errors occur when the conversation exceeds the model's token limit.
51
+ */
52
+ isContextOverflowError() {
53
+ if (this.statusCode !== 400) {
54
+ return false;
55
+ }
56
+ const message = this.message?.toLowerCase() || "";
57
+ return message.includes("context overflow") || message.includes("prompt is too long") || message.includes("prompt too long") || message.includes("tokens") && message.includes("maximum");
58
+ }
59
+ };
60
+
61
+ // src/gitlab-direct-access.ts
62
+ var directAccessTokenSchema = z.object({
63
+ headers: z.record(z.string()),
64
+ token: z.string()
65
+ });
66
+ var DEFAULT_AI_GATEWAY_URL = "https://cloud.gitlab.com";
67
+ var GitLabDirectAccessClient = class {
68
+ config;
69
+ fetchFn;
70
+ aiGatewayUrl;
71
+ cachedToken = null;
72
+ tokenExpiresAt = 0;
73
+ constructor(config) {
74
+ this.config = config;
75
+ this.fetchFn = config.fetch ?? fetch;
76
+ this.aiGatewayUrl = config.aiGatewayUrl || process.env["GITLAB_AI_GATEWAY_URL"] || DEFAULT_AI_GATEWAY_URL;
77
+ }
78
+ /**
79
+ * Get a direct access token for the Anthropic proxy.
80
+ * Tokens are cached for 25 minutes (they expire after 30 minutes).
81
+ * @param forceRefresh - If true, ignores the cache and fetches a new token
82
+ */
83
+ async getDirectAccessToken(forceRefresh = false) {
84
+ const now = Date.now();
85
+ if (!forceRefresh && this.cachedToken && this.tokenExpiresAt > now) {
86
+ return this.cachedToken;
87
+ }
88
+ if (forceRefresh) {
89
+ this.invalidateToken();
90
+ }
91
+ const url = `${this.config.instanceUrl}/api/v4/ai/third_party_agents/direct_access`;
92
+ const requestBody = {};
93
+ if (this.config.featureFlags && Object.keys(this.config.featureFlags).length > 0) {
94
+ requestBody.feature_flags = this.config.featureFlags;
95
+ }
96
+ try {
97
+ const response = await this.fetchFn(url, {
98
+ method: "POST",
99
+ headers: {
100
+ ...this.config.getHeaders(),
101
+ "Content-Type": "application/json"
102
+ },
103
+ body: JSON.stringify(requestBody)
104
+ });
105
+ if (!response.ok) {
106
+ const errorText = await response.text();
107
+ if (response.status === 401 && this.config.refreshApiKey && !forceRefresh) {
108
+ try {
109
+ await this.config.refreshApiKey();
110
+ return await this.getDirectAccessToken(true);
111
+ } catch (refreshError) {
112
+ throw new GitLabError({
113
+ message: `Failed to get direct access token: ${response.status} ${response.statusText} - ${errorText}`,
114
+ statusCode: response.status,
115
+ responseBody: errorText
116
+ });
117
+ }
118
+ }
119
+ if (response.status === 403) {
120
+ throw new GitLabError({
121
+ message: `Access denied to GitLab AI features (${this.config.instanceUrl}). This may indicate that: (1) GitLab Duo is not enabled on this instance, (2) Your account does not have access to AI features, or (3) The third-party agents feature is not available. Original error: ${response.status} ${response.statusText} - ${errorText}`,
122
+ statusCode: response.status,
123
+ responseBody: errorText
124
+ });
125
+ }
126
+ throw new GitLabError({
127
+ message: `Failed to get direct access token: ${response.status} ${response.statusText} - ${errorText}`,
128
+ statusCode: response.status,
129
+ responseBody: errorText
130
+ });
131
+ }
132
+ const data = await response.json();
133
+ const token = directAccessTokenSchema.parse(data);
134
+ this.cachedToken = token;
135
+ this.tokenExpiresAt = now + 25 * 60 * 1e3;
136
+ return token;
137
+ } catch (error) {
138
+ if (error instanceof GitLabError) {
139
+ throw error;
140
+ }
141
+ throw new GitLabError({
142
+ message: `Failed to get direct access token: ${error}`,
143
+ cause: error
144
+ });
145
+ }
146
+ }
147
+ /**
148
+ * Get the Anthropic proxy base URL
149
+ */
150
+ getAnthropicProxyUrl() {
151
+ const baseUrl = this.aiGatewayUrl.replace(/\/$/, "");
152
+ return `${baseUrl}/ai/v1/proxy/anthropic/`;
153
+ }
154
+ /**
155
+ * Get the OpenAI proxy base URL
156
+ * Note: The OpenAI SDK expects a base URL like https://api.openai.com/v1
157
+ * and appends paths like /chat/completions. So we need /v1 at the end.
158
+ */
159
+ getOpenAIProxyUrl() {
160
+ const baseUrl = this.aiGatewayUrl.replace(/\/$/, "");
161
+ return `${baseUrl}/ai/v1/proxy/openai/v1`;
162
+ }
163
+ /**
164
+ * Invalidate the cached token
165
+ */
166
+ invalidateToken() {
167
+ this.cachedToken = null;
168
+ this.tokenExpiresAt = 0;
169
+ }
170
+ };
171
+
172
+ // src/gitlab-anthropic-language-model.ts
173
+ var GitLabAnthropicLanguageModel = class {
174
+ specificationVersion = "v2";
175
+ modelId;
176
+ supportedUrls = {};
177
+ config;
178
+ directAccessClient;
179
+ anthropicClient = null;
180
+ constructor(modelId, config) {
181
+ this.modelId = modelId;
182
+ this.config = config;
183
+ this.directAccessClient = new GitLabDirectAccessClient({
184
+ instanceUrl: config.instanceUrl,
185
+ getHeaders: config.getHeaders,
186
+ refreshApiKey: config.refreshApiKey,
187
+ fetch: config.fetch,
188
+ featureFlags: config.featureFlags,
189
+ aiGatewayUrl: config.aiGatewayUrl
190
+ });
191
+ }
192
+ get provider() {
193
+ return this.config.provider;
194
+ }
195
+ /**
196
+ * Get or create an Anthropic client with valid credentials
197
+ * @param forceRefresh - If true, forces a token refresh before creating the client
198
+ */
199
+ async getAnthropicClient(forceRefresh = false) {
200
+ const tokenData = await this.directAccessClient.getDirectAccessToken(forceRefresh);
201
+ const { "x-api-key": _removed, ...filteredHeaders } = tokenData.headers;
202
+ const mergedHeaders = {
203
+ ...filteredHeaders,
204
+ ...this.config.aiGatewayHeaders
205
+ };
206
+ this.anthropicClient = new Anthropic({
207
+ apiKey: null,
208
+ authToken: tokenData.token,
209
+ baseURL: this.directAccessClient.getAnthropicProxyUrl(),
210
+ defaultHeaders: mergedHeaders
211
+ });
212
+ return this.anthropicClient;
213
+ }
214
+ /**
215
+ * Check if an error is a token-related authentication error that can be retried
216
+ */
217
+ isTokenError(error) {
218
+ if (error instanceof Anthropic.APIError) {
219
+ if (error.status === 401) {
220
+ return true;
221
+ }
222
+ const message = error.message?.toLowerCase() || "";
223
+ if (message.includes("token") && (message.includes("expired") || message.includes("revoked") || message.includes("invalid"))) {
224
+ return true;
225
+ }
226
+ }
227
+ return false;
228
+ }
229
+ /**
230
+ * Check if an error is a context overflow error (prompt too long)
231
+ * These should NOT trigger token refresh and should be reported to the user.
232
+ */
233
+ isContextOverflowError(error) {
234
+ if (error instanceof Anthropic.APIError) {
235
+ if (error.status === 400) {
236
+ const message = error.message?.toLowerCase() || "";
237
+ if (message.includes("prompt is too long") || message.includes("prompt too long") || message.includes("tokens") && message.includes("maximum")) {
238
+ return true;
239
+ }
240
+ }
241
+ }
242
+ return false;
243
+ }
244
+ /**
245
+ * Convert AI SDK tools to Anthropic tool format
246
+ */
247
+ convertTools(tools) {
248
+ if (!tools || tools.length === 0) {
249
+ return void 0;
250
+ }
251
+ return tools.filter((tool) => tool.type === "function").map((tool) => {
252
+ const schema = tool.inputSchema;
253
+ return {
254
+ name: tool.name,
255
+ description: tool.description || "",
256
+ input_schema: {
257
+ type: "object",
258
+ properties: schema?.properties || {},
259
+ required: schema?.required || []
260
+ }
261
+ };
262
+ });
263
+ }
264
+ /**
265
+ * Convert AI SDK tool choice to Anthropic format
266
+ */
267
+ convertToolChoice(toolChoice) {
268
+ if (!toolChoice) {
269
+ return void 0;
270
+ }
271
+ switch (toolChoice.type) {
272
+ case "auto":
273
+ return { type: "auto" };
274
+ case "none":
275
+ return void 0;
276
+ case "required":
277
+ return { type: "any" };
278
+ case "tool":
279
+ return { type: "tool", name: toolChoice.toolName };
280
+ default:
281
+ return void 0;
282
+ }
283
+ }
284
+ /**
285
+ * Convert AI SDK prompt to Anthropic messages format
286
+ */
287
+ convertPrompt(prompt) {
288
+ let systemMessage;
289
+ const messages = [];
290
+ for (const message of prompt) {
291
+ if (message.role === "system") {
292
+ systemMessage = message.content;
293
+ continue;
294
+ }
295
+ if (message.role === "user") {
296
+ const content = [];
297
+ for (const part of message.content) {
298
+ if (part.type === "text") {
299
+ content.push({ type: "text", text: part.text });
300
+ } else if (part.type === "file") {
301
+ }
302
+ }
303
+ if (content.length > 0) {
304
+ messages.push({ role: "user", content });
305
+ }
306
+ } else if (message.role === "assistant") {
307
+ const content = [];
308
+ for (const part of message.content) {
309
+ if (part.type === "text") {
310
+ content.push({ type: "text", text: part.text });
311
+ } else if (part.type === "tool-call") {
312
+ content.push({
313
+ type: "tool_use",
314
+ id: part.toolCallId,
315
+ name: part.toolName,
316
+ input: typeof part.input === "string" ? JSON.parse(part.input) : part.input
317
+ });
318
+ }
319
+ }
320
+ if (content.length > 0) {
321
+ messages.push({ role: "assistant", content });
322
+ }
323
+ } else if (message.role === "tool") {
324
+ const content = [];
325
+ for (const part of message.content) {
326
+ if (part.type === "tool-result") {
327
+ let resultContent;
328
+ if (part.output.type === "text") {
329
+ resultContent = part.output.value;
330
+ } else if (part.output.type === "json") {
331
+ resultContent = JSON.stringify(part.output.value);
332
+ } else if (part.output.type === "error-text") {
333
+ resultContent = part.output.value;
334
+ } else if (part.output.type === "error-json") {
335
+ resultContent = JSON.stringify(part.output.value);
336
+ } else {
337
+ resultContent = JSON.stringify(part.output);
338
+ }
339
+ content.push({
340
+ type: "tool_result",
341
+ tool_use_id: part.toolCallId,
342
+ content: resultContent,
343
+ is_error: part.output.type.startsWith("error")
344
+ });
345
+ }
346
+ }
347
+ if (content.length > 0) {
348
+ messages.push({ role: "user", content });
349
+ }
350
+ }
351
+ }
352
+ return { system: systemMessage, messages };
353
+ }
354
+ /**
355
+ * Convert Anthropic finish reason to AI SDK format
356
+ */
357
+ convertFinishReason(stopReason) {
358
+ switch (stopReason) {
359
+ case "end_turn":
360
+ return "stop";
361
+ case "stop_sequence":
362
+ return "stop";
363
+ case "max_tokens":
364
+ return "length";
365
+ case "tool_use":
366
+ return "tool-calls";
367
+ default:
368
+ return "unknown";
369
+ }
370
+ }
371
+ async doGenerate(options) {
372
+ return this.doGenerateWithRetry(options, false);
373
+ }
374
+ async doGenerateWithRetry(options, isRetry) {
375
+ const client = await this.getAnthropicClient(isRetry);
376
+ const { system, messages } = this.convertPrompt(options.prompt);
377
+ const tools = this.convertTools(options.tools);
378
+ const toolChoice = options.toolChoice?.type !== "none" ? this.convertToolChoice(options.toolChoice) : void 0;
379
+ const anthropicModel = this.config.anthropicModel || "claude-sonnet-4-5-20250929";
380
+ const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
381
+ try {
382
+ const response = await client.messages.create({
383
+ model: anthropicModel,
384
+ max_tokens: maxTokens,
385
+ system,
386
+ messages,
387
+ tools,
388
+ tool_choice: tools ? toolChoice : void 0,
389
+ temperature: options.temperature,
390
+ top_p: options.topP,
391
+ stop_sequences: options.stopSequences
392
+ });
393
+ const content = [];
394
+ for (const block of response.content) {
395
+ if (block.type === "text") {
396
+ content.push({
397
+ type: "text",
398
+ text: block.text
399
+ });
400
+ } else if (block.type === "tool_use") {
401
+ content.push({
402
+ type: "tool-call",
403
+ toolCallId: block.id,
404
+ toolName: block.name,
405
+ input: JSON.stringify(block.input)
406
+ });
407
+ }
408
+ }
409
+ const usage = {
410
+ inputTokens: response.usage.input_tokens,
411
+ outputTokens: response.usage.output_tokens,
412
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens
413
+ };
414
+ return {
415
+ content,
416
+ finishReason: this.convertFinishReason(response.stop_reason),
417
+ usage,
418
+ warnings: []
419
+ };
420
+ } catch (error) {
421
+ if (this.isContextOverflowError(error)) {
422
+ const apiError = error;
423
+ throw new GitLabError({
424
+ message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
425
+ statusCode: 400,
426
+ cause: error
427
+ });
428
+ }
429
+ if (!isRetry && this.isTokenError(error)) {
430
+ this.directAccessClient.invalidateToken();
431
+ return this.doGenerateWithRetry(options, true);
432
+ }
433
+ if (error instanceof Anthropic.APIError) {
434
+ throw new GitLabError({
435
+ message: `Anthropic API error: ${error.message}`,
436
+ statusCode: error.status,
437
+ cause: error
438
+ });
439
+ }
440
+ throw error;
441
+ }
442
+ }
443
+ async doStream(options) {
444
+ return this.doStreamWithRetry(options, false);
445
+ }
446
+ async doStreamWithRetry(options, isRetry) {
447
+ const client = await this.getAnthropicClient(isRetry);
448
+ const { system, messages } = this.convertPrompt(options.prompt);
449
+ const tools = this.convertTools(options.tools);
450
+ const toolChoice = options.toolChoice?.type !== "none" ? this.convertToolChoice(options.toolChoice) : void 0;
451
+ const anthropicModel = this.config.anthropicModel || "claude-sonnet-4-5-20250929";
452
+ const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
453
+ const requestBody = {
454
+ model: anthropicModel,
455
+ max_tokens: maxTokens,
456
+ system,
457
+ messages,
458
+ tools,
459
+ tool_choice: tools ? toolChoice : void 0,
460
+ temperature: options.temperature,
461
+ top_p: options.topP,
462
+ stop_sequences: options.stopSequences,
463
+ stream: true
464
+ };
465
+ const self = this;
466
+ const stream = new ReadableStream({
467
+ start: async (controller) => {
468
+ const contentBlocks = {};
469
+ const usage = {
470
+ inputTokens: 0,
471
+ outputTokens: 0,
472
+ totalTokens: 0
473
+ };
474
+ let finishReason = "unknown";
475
+ try {
476
+ const anthropicStream = client.messages.stream(requestBody, {
477
+ signal: options.abortSignal
478
+ });
479
+ controller.enqueue({
480
+ type: "stream-start",
481
+ warnings: []
482
+ });
483
+ await new Promise((resolve2, reject) => {
484
+ anthropicStream.on("streamEvent", (event) => {
485
+ try {
486
+ switch (event.type) {
487
+ case "message_start":
488
+ if (event.message.usage) {
489
+ usage.inputTokens = event.message.usage.input_tokens;
490
+ }
491
+ controller.enqueue({
492
+ type: "response-metadata",
493
+ id: event.message.id,
494
+ modelId: event.message.model
495
+ });
496
+ break;
497
+ case "content_block_start":
498
+ if (event.content_block.type === "text") {
499
+ const textId = `text-${event.index}`;
500
+ contentBlocks[event.index] = { type: "text", id: textId };
501
+ controller.enqueue({
502
+ type: "text-start",
503
+ id: textId
504
+ });
505
+ } else if (event.content_block.type === "tool_use") {
506
+ contentBlocks[event.index] = {
507
+ type: "tool-call",
508
+ toolCallId: event.content_block.id,
509
+ toolName: event.content_block.name,
510
+ input: ""
511
+ };
512
+ controller.enqueue({
513
+ type: "tool-input-start",
514
+ id: event.content_block.id,
515
+ toolName: event.content_block.name
516
+ });
517
+ }
518
+ break;
519
+ case "content_block_delta": {
520
+ const block = contentBlocks[event.index];
521
+ if (event.delta.type === "text_delta" && block?.type === "text") {
522
+ controller.enqueue({
523
+ type: "text-delta",
524
+ id: block.id,
525
+ delta: event.delta.text
526
+ });
527
+ } else if (event.delta.type === "input_json_delta" && block?.type === "tool-call") {
528
+ block.input += event.delta.partial_json;
529
+ controller.enqueue({
530
+ type: "tool-input-delta",
531
+ id: block.toolCallId,
532
+ delta: event.delta.partial_json
533
+ });
534
+ }
535
+ break;
536
+ }
537
+ case "content_block_stop": {
538
+ const block = contentBlocks[event.index];
539
+ if (block?.type === "text") {
540
+ controller.enqueue({
541
+ type: "text-end",
542
+ id: block.id
543
+ });
544
+ } else if (block?.type === "tool-call") {
545
+ controller.enqueue({
546
+ type: "tool-input-end",
547
+ id: block.toolCallId
548
+ });
549
+ controller.enqueue({
550
+ type: "tool-call",
551
+ toolCallId: block.toolCallId,
552
+ toolName: block.toolName,
553
+ input: block.input === "" ? "{}" : block.input
554
+ });
555
+ }
556
+ delete contentBlocks[event.index];
557
+ break;
558
+ }
559
+ case "message_delta":
560
+ if (event.usage) {
561
+ usage.outputTokens = event.usage.output_tokens;
562
+ usage.totalTokens = (usage.inputTokens || 0) + event.usage.output_tokens;
563
+ }
564
+ if (event.delta.stop_reason) {
565
+ finishReason = self.convertFinishReason(event.delta.stop_reason);
566
+ }
567
+ break;
568
+ case "message_stop": {
569
+ controller.enqueue({
570
+ type: "finish",
571
+ finishReason,
572
+ usage
573
+ });
574
+ break;
575
+ }
576
+ }
577
+ } catch (error) {
578
+ controller.enqueue({
579
+ type: "error",
580
+ error: error instanceof Error ? error : new Error(String(error))
581
+ });
582
+ }
583
+ });
584
+ anthropicStream.on("end", () => {
585
+ resolve2();
586
+ });
587
+ anthropicStream.on("error", (error) => {
588
+ reject(error);
589
+ });
590
+ });
591
+ for (const [, block] of Object.entries(contentBlocks)) {
592
+ if (block.type === "tool-call") {
593
+ controller.enqueue({
594
+ type: "tool-input-end",
595
+ id: block.toolCallId
596
+ });
597
+ controller.enqueue({
598
+ type: "tool-call",
599
+ toolCallId: block.toolCallId,
600
+ toolName: block.toolName,
601
+ input: block.input === "" ? "{}" : block.input
602
+ });
603
+ }
604
+ }
605
+ controller.close();
606
+ } catch (error) {
607
+ for (const [, block] of Object.entries(contentBlocks)) {
608
+ if (block.type === "tool-call") {
609
+ controller.enqueue({
610
+ type: "tool-input-end",
611
+ id: block.toolCallId
612
+ });
613
+ controller.enqueue({
614
+ type: "tool-call",
615
+ toolCallId: block.toolCallId,
616
+ toolName: block.toolName,
617
+ input: block.input === "" ? "{}" : block.input
618
+ });
619
+ }
620
+ }
621
+ if (self.isContextOverflowError(error)) {
622
+ const apiError = error;
623
+ controller.enqueue({
624
+ type: "error",
625
+ error: new GitLabError({
626
+ message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
627
+ statusCode: 400,
628
+ cause: error
629
+ })
630
+ });
631
+ controller.close();
632
+ return;
633
+ }
634
+ if (!isRetry && self.isTokenError(error)) {
635
+ self.directAccessClient.invalidateToken();
636
+ controller.enqueue({
637
+ type: "error",
638
+ error: new GitLabError({
639
+ message: "TOKEN_REFRESH_NEEDED",
640
+ cause: error
641
+ })
642
+ });
643
+ controller.close();
644
+ return;
645
+ }
646
+ if (error instanceof Anthropic.APIError) {
647
+ controller.enqueue({
648
+ type: "error",
649
+ error: new GitLabError({
650
+ message: `Anthropic API error: ${error.message}`,
651
+ statusCode: error.status,
652
+ cause: error
653
+ })
654
+ });
655
+ } else {
656
+ controller.enqueue({
657
+ type: "error",
658
+ error
659
+ });
660
+ }
661
+ controller.close();
662
+ }
663
+ }
664
+ });
665
+ return {
666
+ stream,
667
+ request: { body: requestBody }
668
+ };
669
+ }
670
+ };
671
+
672
+ // src/gitlab-openai-language-model.ts
673
+ import OpenAI from "openai";
674
+
675
+ // src/model-mappings.ts
676
+ var MODEL_MAPPINGS = {
677
+ // Anthropic models
678
+ "duo-chat-opus-4-6": { provider: "anthropic", model: "claude-opus-4-6" },
679
+ "duo-chat-sonnet-4-6": { provider: "anthropic", model: "claude-sonnet-4-6" },
680
+ "duo-chat-opus-4-5": { provider: "anthropic", model: "claude-opus-4-5-20251101" },
681
+ "duo-chat-sonnet-4-5": { provider: "anthropic", model: "claude-sonnet-4-5-20250929" },
682
+ "duo-chat-haiku-4-5": { provider: "anthropic", model: "claude-haiku-4-5-20251001" },
683
+ // OpenAI models - Chat Completions API
684
+ "duo-chat-gpt-5-1": { provider: "openai", model: "gpt-5.1-2025-11-13", openaiApiType: "chat" },
685
+ "duo-chat-gpt-5-2": { provider: "openai", model: "gpt-5.2-2025-12-11", openaiApiType: "chat" },
686
+ "duo-chat-gpt-5-mini": {
687
+ provider: "openai",
688
+ model: "gpt-5-mini-2025-08-07",
689
+ openaiApiType: "chat"
690
+ },
691
+ // OpenAI models - Responses API (Codex models)
692
+ "duo-chat-gpt-5-codex": { provider: "openai", model: "gpt-5-codex", openaiApiType: "responses" },
693
+ "duo-chat-gpt-5-2-codex": {
694
+ provider: "openai",
695
+ model: "gpt-5.2-codex",
696
+ openaiApiType: "responses"
697
+ },
698
+ "duo-chat-gpt-5-3-codex": {
699
+ provider: "openai",
700
+ model: "gpt-5.3-codex",
701
+ openaiApiType: "responses"
702
+ },
703
+ // Duo Agent Platform model (server-side agentic via DWS WebSocket).
704
+ // This is the single user-facing model ID. The actual underlying model ref
705
+ // is resolved dynamically at runtime via GitLabModelDiscovery.
706
+ "duo-workflow": { provider: "workflow", model: "default" },
707
+ // Internal model refs — kept for backwards compatibility and direct use.
708
+ // Not intended as user-facing model IDs.
709
+ "duo-workflow-default": { provider: "workflow", model: "default" },
710
+ "duo-workflow-sonnet-4-5": {
711
+ provider: "workflow",
712
+ model: "anthropic/claude-sonnet-4-5-20250929"
713
+ },
714
+ "duo-workflow-sonnet-4-6": { provider: "workflow", model: "claude_sonnet_4_6" },
715
+ "duo-workflow-opus-4-5": {
716
+ provider: "workflow",
717
+ model: "anthropic/claude-opus-4-5-20251101"
718
+ },
719
+ "duo-workflow-haiku-4-5": { provider: "workflow", model: "claude_haiku_4_5_20251001" },
720
+ "duo-workflow-opus-4-6": { provider: "workflow", model: "claude_opus_4_6_20260205" }
721
+ };
722
+ function getModelMapping(modelId) {
723
+ return MODEL_MAPPINGS[modelId];
724
+ }
725
+ function getProviderForModelId(modelId) {
726
+ return MODEL_MAPPINGS[modelId]?.provider;
727
+ }
728
+ function getValidModelsForProvider(provider) {
729
+ return Object.values(MODEL_MAPPINGS).filter((m) => m.provider === provider).map((m) => m.model);
730
+ }
731
+ function getAnthropicModelForModelId(modelId) {
732
+ const mapping = MODEL_MAPPINGS[modelId];
733
+ return mapping?.provider === "anthropic" ? mapping.model : void 0;
734
+ }
735
+ function getOpenAIModelForModelId(modelId) {
736
+ const mapping = MODEL_MAPPINGS[modelId];
737
+ return mapping?.provider === "openai" ? mapping.model : void 0;
738
+ }
739
+ function getOpenAIApiType(modelId) {
740
+ const mapping = MODEL_MAPPINGS[modelId];
741
+ return mapping?.openaiApiType ?? "chat";
742
+ }
743
+ function isResponsesApiModel(modelId) {
744
+ return getOpenAIApiType(modelId) === "responses";
745
+ }
746
+ function isWorkflowModel(modelId) {
747
+ return MODEL_MAPPINGS[modelId]?.provider === "workflow";
748
+ }
749
+ function getWorkflowModelRef(modelId) {
750
+ const mapping = MODEL_MAPPINGS[modelId];
751
+ return mapping?.provider === "workflow" ? mapping.model : void 0;
752
+ }
753
+ var MODEL_ID_TO_ANTHROPIC_MODEL = Object.fromEntries(
754
+ Object.entries(MODEL_MAPPINGS).filter(([, v]) => v.provider === "anthropic").map(([k, v]) => [k, v.model])
755
+ );
756
+
757
+ // src/gitlab-openai-language-model.ts
758
+ var GitLabOpenAILanguageModel = class {
759
+ specificationVersion = "v2";
760
+ modelId;
761
+ supportedUrls = {};
762
+ config;
763
+ directAccessClient;
764
+ useResponsesApi;
765
+ openaiClient = null;
766
+ constructor(modelId, config) {
767
+ this.modelId = modelId;
768
+ this.config = config;
769
+ this.useResponsesApi = config.useResponsesApi ?? isResponsesApiModel(modelId);
770
+ this.directAccessClient = new GitLabDirectAccessClient({
771
+ instanceUrl: config.instanceUrl,
772
+ getHeaders: config.getHeaders,
773
+ refreshApiKey: config.refreshApiKey,
774
+ fetch: config.fetch,
775
+ featureFlags: config.featureFlags,
776
+ aiGatewayUrl: config.aiGatewayUrl
777
+ });
778
+ }
779
+ get provider() {
780
+ return this.config.provider;
781
+ }
782
+ async getOpenAIClient(forceRefresh = false) {
783
+ const tokenData = await this.directAccessClient.getDirectAccessToken(forceRefresh);
784
+ const { "x-api-key": _removed, ...filteredHeaders } = tokenData.headers;
785
+ const mergedHeaders = {
786
+ ...filteredHeaders,
787
+ ...this.config.aiGatewayHeaders
788
+ };
789
+ this.openaiClient = new OpenAI({
790
+ apiKey: tokenData.token,
791
+ baseURL: this.directAccessClient.getOpenAIProxyUrl(),
792
+ defaultHeaders: mergedHeaders
793
+ });
794
+ return this.openaiClient;
795
+ }
796
+ isTokenError(error) {
797
+ if (error instanceof OpenAI.APIError) {
798
+ if (error.status === 401) {
799
+ return true;
800
+ }
801
+ const message = error.message?.toLowerCase() || "";
802
+ if (message.includes("token") && (message.includes("expired") || message.includes("revoked") || message.includes("invalid"))) {
803
+ return true;
804
+ }
805
+ }
806
+ return false;
807
+ }
808
+ /**
809
+ * Check if an error is a context overflow error (prompt too long)
810
+ * These should NOT trigger token refresh and should be reported to the user.
811
+ */
812
+ isContextOverflowError(error) {
813
+ if (error instanceof OpenAI.APIError) {
814
+ if (error.status === 400) {
815
+ const message = error.message?.toLowerCase() || "";
816
+ if (message.includes("prompt is too long") || message.includes("prompt too long") || message.includes("tokens") && message.includes("maximum")) {
817
+ return true;
818
+ }
819
+ }
820
+ }
821
+ return false;
822
+ }
823
+ convertTools(tools) {
824
+ if (!tools || tools.length === 0) {
825
+ return void 0;
826
+ }
827
+ return tools.filter((tool) => tool.type === "function").map((tool) => {
828
+ const schema = tool.inputSchema;
829
+ return {
830
+ type: "function",
831
+ function: {
832
+ name: tool.name,
833
+ description: tool.description || "",
834
+ // Ensure the schema has type: 'object' as OpenAI requires it
835
+ parameters: {
836
+ type: "object",
837
+ ...schema
838
+ }
839
+ }
840
+ };
841
+ });
842
+ }
843
+ convertToolChoice(toolChoice) {
844
+ if (!toolChoice) {
845
+ return void 0;
846
+ }
847
+ switch (toolChoice.type) {
848
+ case "auto":
849
+ return "auto";
850
+ case "none":
851
+ return "none";
852
+ case "required":
853
+ return "required";
854
+ case "tool":
855
+ return { type: "function", function: { name: toolChoice.toolName } };
856
+ default:
857
+ return void 0;
858
+ }
859
+ }
860
+ convertPrompt(prompt) {
861
+ const messages = [];
862
+ for (const message of prompt) {
863
+ if (message.role === "system") {
864
+ messages.push({ role: "system", content: message.content });
865
+ continue;
866
+ }
867
+ if (message.role === "user") {
868
+ const textParts = message.content.filter((part) => part.type === "text").map((part) => part.text);
869
+ if (textParts.length > 0) {
870
+ messages.push({ role: "user", content: textParts.join("\n") });
871
+ }
872
+ } else if (message.role === "assistant") {
873
+ const textParts = [];
874
+ const toolCalls = [];
875
+ for (const part of message.content) {
876
+ if (part.type === "text") {
877
+ textParts.push(part.text);
878
+ } else if (part.type === "tool-call") {
879
+ toolCalls.push({
880
+ id: part.toolCallId,
881
+ type: "function",
882
+ function: {
883
+ name: part.toolName,
884
+ arguments: typeof part.input === "string" ? part.input : JSON.stringify(part.input)
885
+ }
886
+ });
887
+ }
888
+ }
889
+ const assistantMessage = {
890
+ role: "assistant",
891
+ content: textParts.length > 0 ? textParts.join("\n") : null
892
+ };
893
+ if (toolCalls.length > 0) {
894
+ assistantMessage.tool_calls = toolCalls;
895
+ }
896
+ messages.push(assistantMessage);
897
+ } else if (message.role === "tool") {
898
+ for (const part of message.content) {
899
+ if (part.type === "tool-result") {
900
+ let resultContent;
901
+ if (part.output.type === "text") {
902
+ resultContent = part.output.value;
903
+ } else if (part.output.type === "json") {
904
+ resultContent = JSON.stringify(part.output.value);
905
+ } else if (part.output.type === "error-text") {
906
+ resultContent = part.output.value;
907
+ } else if (part.output.type === "error-json") {
908
+ resultContent = JSON.stringify(part.output.value);
909
+ } else {
910
+ resultContent = JSON.stringify(part.output);
911
+ }
912
+ messages.push({
913
+ role: "tool",
914
+ tool_call_id: part.toolCallId,
915
+ content: resultContent
916
+ });
917
+ }
918
+ }
919
+ }
920
+ }
921
+ return messages;
922
+ }
923
+ convertFinishReason(finishReason) {
924
+ switch (finishReason) {
925
+ case "stop":
926
+ return "stop";
927
+ case "length":
928
+ return "length";
929
+ case "tool_calls":
930
+ return "tool-calls";
931
+ case "content_filter":
932
+ return "content-filter";
933
+ default:
934
+ return "unknown";
935
+ }
936
+ }
937
+ /**
938
+ * Convert tools to Responses API format
939
+ */
940
+ convertToolsForResponses(tools) {
941
+ if (!tools || tools.length === 0) {
942
+ return void 0;
943
+ }
944
+ return tools.filter((tool) => tool.type === "function").map((tool) => {
945
+ const schema = { ...tool.inputSchema };
946
+ delete schema["$schema"];
947
+ return {
948
+ type: "function",
949
+ name: tool.name,
950
+ description: tool.description || "",
951
+ parameters: schema,
952
+ strict: false
953
+ };
954
+ });
955
+ }
956
+ /**
957
+ * Convert prompt to Responses API input format
958
+ */
959
+ convertPromptForResponses(prompt) {
960
+ const items = [];
961
+ for (const message of prompt) {
962
+ if (message.role === "system") {
963
+ continue;
964
+ }
965
+ if (message.role === "user") {
966
+ const textParts = message.content.filter((part) => part.type === "text").map((part) => part.text);
967
+ if (textParts.length > 0) {
968
+ items.push({
969
+ type: "message",
970
+ role: "user",
971
+ content: textParts.map((text) => ({ type: "input_text", text }))
972
+ });
973
+ }
974
+ } else if (message.role === "assistant") {
975
+ const textParts = [];
976
+ for (const part of message.content) {
977
+ if (part.type === "text") {
978
+ textParts.push(part.text);
979
+ } else if (part.type === "tool-call") {
980
+ items.push({
981
+ type: "function_call",
982
+ call_id: part.toolCallId,
983
+ name: part.toolName,
984
+ arguments: typeof part.input === "string" ? part.input : JSON.stringify(part.input)
985
+ });
986
+ }
987
+ }
988
+ if (textParts.length > 0) {
989
+ items.push({
990
+ type: "message",
991
+ role: "assistant",
992
+ content: [{ type: "output_text", text: textParts.join("\n"), annotations: [] }]
993
+ });
994
+ }
995
+ } else if (message.role === "tool") {
996
+ for (const part of message.content) {
997
+ if (part.type === "tool-result") {
998
+ let resultContent;
999
+ if (part.output.type === "text") {
1000
+ resultContent = part.output.value;
1001
+ } else if (part.output.type === "json") {
1002
+ resultContent = JSON.stringify(part.output.value);
1003
+ } else if (part.output.type === "error-text") {
1004
+ resultContent = part.output.value;
1005
+ } else if (part.output.type === "error-json") {
1006
+ resultContent = JSON.stringify(part.output.value);
1007
+ } else {
1008
+ resultContent = JSON.stringify(part.output);
1009
+ }
1010
+ items.push({
1011
+ type: "function_call_output",
1012
+ call_id: part.toolCallId,
1013
+ output: resultContent
1014
+ });
1015
+ }
1016
+ }
1017
+ }
1018
+ }
1019
+ return items;
1020
+ }
1021
+ /**
1022
+ * Extract system instructions from prompt
1023
+ */
1024
+ extractSystemInstructions(prompt) {
1025
+ const systemMessages = prompt.filter((m) => m.role === "system").map((m) => m.content).join("\n");
1026
+ return systemMessages || void 0;
1027
+ }
1028
+ /**
1029
+ * Convert Responses API status to finish reason
1030
+ * Note: Responses API returns 'completed' even when making tool calls,
1031
+ * so we need to check the content for tool calls separately.
1032
+ */
1033
+ convertResponsesStatus(status, hasToolCalls = false) {
1034
+ if (hasToolCalls) {
1035
+ return "tool-calls";
1036
+ }
1037
+ switch (status) {
1038
+ case "completed":
1039
+ return "stop";
1040
+ case "incomplete":
1041
+ return "length";
1042
+ case "cancelled":
1043
+ return "stop";
1044
+ case "failed":
1045
+ return "error";
1046
+ default:
1047
+ return "unknown";
1048
+ }
1049
+ }
1050
+ async doGenerate(options) {
1051
+ if (this.useResponsesApi) {
1052
+ return this.doGenerateWithResponsesApi(options, false);
1053
+ }
1054
+ return this.doGenerateWithChatApi(options, false);
1055
+ }
1056
+ async doGenerateWithChatApi(options, isRetry) {
1057
+ const client = await this.getOpenAIClient(isRetry);
1058
+ const messages = this.convertPrompt(options.prompt);
1059
+ const tools = this.convertTools(options.tools);
1060
+ const toolChoice = options.toolChoice?.type !== "none" ? this.convertToolChoice(options.toolChoice) : void 0;
1061
+ const openaiModel = this.config.openaiModel || "gpt-4o";
1062
+ const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
1063
+ try {
1064
+ const response = await client.chat.completions.create({
1065
+ model: openaiModel,
1066
+ max_completion_tokens: maxTokens,
1067
+ messages,
1068
+ tools,
1069
+ tool_choice: tools ? toolChoice : void 0,
1070
+ temperature: options.temperature,
1071
+ top_p: options.topP,
1072
+ stop: options.stopSequences
1073
+ });
1074
+ const choice = response.choices[0];
1075
+ const content = [];
1076
+ if (choice?.message.content) {
1077
+ content.push({ type: "text", text: choice.message.content });
1078
+ }
1079
+ if (choice?.message.tool_calls) {
1080
+ for (const toolCall of choice.message.tool_calls) {
1081
+ if (toolCall.type === "function") {
1082
+ content.push({
1083
+ type: "tool-call",
1084
+ toolCallId: toolCall.id,
1085
+ toolName: toolCall.function.name,
1086
+ input: toolCall.function.arguments
1087
+ });
1088
+ }
1089
+ }
1090
+ }
1091
+ const usage = {
1092
+ inputTokens: response.usage?.prompt_tokens || 0,
1093
+ outputTokens: response.usage?.completion_tokens || 0,
1094
+ totalTokens: response.usage?.total_tokens || 0
1095
+ };
1096
+ return {
1097
+ content,
1098
+ finishReason: this.convertFinishReason(choice?.finish_reason),
1099
+ usage,
1100
+ warnings: []
1101
+ };
1102
+ } catch (error) {
1103
+ if (this.isContextOverflowError(error)) {
1104
+ const apiError = error;
1105
+ throw new GitLabError({
1106
+ message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
1107
+ statusCode: 400,
1108
+ cause: error
1109
+ });
1110
+ }
1111
+ if (!isRetry && this.isTokenError(error)) {
1112
+ this.directAccessClient.invalidateToken();
1113
+ return this.doGenerateWithChatApi(options, true);
1114
+ }
1115
+ if (error instanceof OpenAI.APIError) {
1116
+ throw new GitLabError({
1117
+ message: `OpenAI API error: ${error.message}`,
1118
+ statusCode: error.status,
1119
+ cause: error
1120
+ });
1121
+ }
1122
+ throw error;
1123
+ }
1124
+ }
1125
+ async doGenerateWithResponsesApi(options, isRetry) {
1126
+ const client = await this.getOpenAIClient(isRetry);
1127
+ const input = this.convertPromptForResponses(options.prompt);
1128
+ const tools = this.convertToolsForResponses(options.tools);
1129
+ const instructions = this.extractSystemInstructions(options.prompt);
1130
+ const openaiModel = this.config.openaiModel || "gpt-5-codex";
1131
+ const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
1132
+ try {
1133
+ const response = await client.responses.create({
1134
+ model: openaiModel,
1135
+ input,
1136
+ instructions,
1137
+ tools,
1138
+ max_output_tokens: maxTokens,
1139
+ temperature: options.temperature,
1140
+ top_p: options.topP,
1141
+ store: false
1142
+ });
1143
+ const content = [];
1144
+ let hasToolCalls = false;
1145
+ for (const item of response.output || []) {
1146
+ if (item.type === "message" && item.role === "assistant") {
1147
+ for (const contentItem of item.content || []) {
1148
+ if (contentItem.type === "output_text") {
1149
+ content.push({ type: "text", text: contentItem.text });
1150
+ }
1151
+ }
1152
+ } else if (item.type === "function_call") {
1153
+ hasToolCalls = true;
1154
+ content.push({
1155
+ type: "tool-call",
1156
+ toolCallId: item.call_id,
1157
+ toolName: item.name,
1158
+ input: item.arguments
1159
+ });
1160
+ }
1161
+ }
1162
+ const usage = {
1163
+ inputTokens: response.usage?.input_tokens || 0,
1164
+ outputTokens: response.usage?.output_tokens || 0,
1165
+ totalTokens: response.usage?.total_tokens || 0
1166
+ };
1167
+ return {
1168
+ content,
1169
+ finishReason: this.convertResponsesStatus(response.status, hasToolCalls),
1170
+ usage,
1171
+ warnings: []
1172
+ };
1173
+ } catch (error) {
1174
+ if (this.isContextOverflowError(error)) {
1175
+ const apiError = error;
1176
+ throw new GitLabError({
1177
+ message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
1178
+ statusCode: 400,
1179
+ cause: error
1180
+ });
1181
+ }
1182
+ if (!isRetry && this.isTokenError(error)) {
1183
+ this.directAccessClient.invalidateToken();
1184
+ return this.doGenerateWithResponsesApi(options, true);
1185
+ }
1186
+ if (error instanceof OpenAI.APIError) {
1187
+ throw new GitLabError({
1188
+ message: `OpenAI API error: ${error.message}`,
1189
+ statusCode: error.status,
1190
+ cause: error
1191
+ });
1192
+ }
1193
+ throw error;
1194
+ }
1195
+ }
1196
+ async doStream(options) {
1197
+ if (this.useResponsesApi) {
1198
+ return this.doStreamWithResponsesApi(options, false);
1199
+ }
1200
+ return this.doStreamWithChatApi(options, false);
1201
+ }
1202
+ async doStreamWithChatApi(options, isRetry) {
1203
+ const client = await this.getOpenAIClient(isRetry);
1204
+ const messages = this.convertPrompt(options.prompt);
1205
+ const tools = this.convertTools(options.tools);
1206
+ const toolChoice = options.toolChoice?.type !== "none" ? this.convertToolChoice(options.toolChoice) : void 0;
1207
+ const openaiModel = this.config.openaiModel || "gpt-4o";
1208
+ const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
1209
+ const requestBody = {
1210
+ model: openaiModel,
1211
+ max_completion_tokens: maxTokens,
1212
+ messages,
1213
+ tools,
1214
+ tool_choice: tools ? toolChoice : void 0,
1215
+ temperature: options.temperature,
1216
+ top_p: options.topP,
1217
+ stop: options.stopSequences,
1218
+ stream: true,
1219
+ stream_options: { include_usage: true }
1220
+ };
1221
+ const self = this;
1222
+ const stream = new ReadableStream({
1223
+ start: async (controller) => {
1224
+ const toolCalls = {};
1225
+ const usage = {
1226
+ inputTokens: 0,
1227
+ outputTokens: 0,
1228
+ totalTokens: 0
1229
+ };
1230
+ let finishReason = "unknown";
1231
+ let textStarted = false;
1232
+ const textId = "text-0";
1233
+ try {
1234
+ const openaiStream = await client.chat.completions.create({
1235
+ ...requestBody,
1236
+ stream: true
1237
+ });
1238
+ controller.enqueue({ type: "stream-start", warnings: [] });
1239
+ for await (const chunk of openaiStream) {
1240
+ const choice = chunk.choices?.[0];
1241
+ if (chunk.id && !textStarted) {
1242
+ controller.enqueue({
1243
+ type: "response-metadata",
1244
+ id: chunk.id,
1245
+ modelId: chunk.model
1246
+ });
1247
+ }
1248
+ if (choice?.delta?.content) {
1249
+ if (!textStarted) {
1250
+ controller.enqueue({ type: "text-start", id: textId });
1251
+ textStarted = true;
1252
+ }
1253
+ controller.enqueue({
1254
+ type: "text-delta",
1255
+ id: textId,
1256
+ delta: choice.delta.content
1257
+ });
1258
+ }
1259
+ if (choice?.delta?.tool_calls) {
1260
+ for (const tc of choice.delta.tool_calls) {
1261
+ const idx = tc.index;
1262
+ if (!toolCalls[idx]) {
1263
+ toolCalls[idx] = {
1264
+ id: tc.id || "",
1265
+ name: tc.function?.name || "",
1266
+ arguments: ""
1267
+ };
1268
+ controller.enqueue({
1269
+ type: "tool-input-start",
1270
+ id: toolCalls[idx].id,
1271
+ toolName: toolCalls[idx].name
1272
+ });
1273
+ }
1274
+ if (tc.function?.arguments) {
1275
+ toolCalls[idx].arguments += tc.function.arguments;
1276
+ controller.enqueue({
1277
+ type: "tool-input-delta",
1278
+ id: toolCalls[idx].id,
1279
+ delta: tc.function.arguments
1280
+ });
1281
+ }
1282
+ }
1283
+ }
1284
+ if (choice?.finish_reason) {
1285
+ finishReason = self.convertFinishReason(choice.finish_reason);
1286
+ }
1287
+ if (chunk.usage) {
1288
+ usage.inputTokens = chunk.usage.prompt_tokens || 0;
1289
+ usage.outputTokens = chunk.usage.completion_tokens || 0;
1290
+ usage.totalTokens = chunk.usage.total_tokens || 0;
1291
+ }
1292
+ }
1293
+ if (textStarted) {
1294
+ controller.enqueue({ type: "text-end", id: textId });
1295
+ }
1296
+ for (const [, tc] of Object.entries(toolCalls)) {
1297
+ controller.enqueue({ type: "tool-input-end", id: tc.id });
1298
+ controller.enqueue({
1299
+ type: "tool-call",
1300
+ toolCallId: tc.id,
1301
+ toolName: tc.name,
1302
+ input: tc.arguments || "{}"
1303
+ });
1304
+ }
1305
+ controller.enqueue({ type: "finish", finishReason, usage });
1306
+ controller.close();
1307
+ } catch (error) {
1308
+ if (self.isContextOverflowError(error)) {
1309
+ const apiError = error;
1310
+ controller.enqueue({
1311
+ type: "error",
1312
+ error: new GitLabError({
1313
+ message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
1314
+ statusCode: 400,
1315
+ cause: error
1316
+ })
1317
+ });
1318
+ controller.close();
1319
+ return;
1320
+ }
1321
+ if (!isRetry && self.isTokenError(error)) {
1322
+ self.directAccessClient.invalidateToken();
1323
+ controller.enqueue({
1324
+ type: "error",
1325
+ error: new GitLabError({ message: "TOKEN_REFRESH_NEEDED", cause: error })
1326
+ });
1327
+ controller.close();
1328
+ return;
1329
+ }
1330
+ if (error instanceof OpenAI.APIError) {
1331
+ controller.enqueue({
1332
+ type: "error",
1333
+ error: new GitLabError({
1334
+ message: `OpenAI API error: ${error.message}`,
1335
+ statusCode: error.status,
1336
+ cause: error
1337
+ })
1338
+ });
1339
+ } else {
1340
+ controller.enqueue({ type: "error", error });
1341
+ }
1342
+ controller.close();
1343
+ }
1344
+ }
1345
+ });
1346
+ return { stream, request: { body: requestBody } };
1347
+ }
1348
+ async doStreamWithResponsesApi(options, isRetry) {
1349
+ const client = await this.getOpenAIClient(isRetry);
1350
+ const input = this.convertPromptForResponses(options.prompt);
1351
+ const tools = this.convertToolsForResponses(options.tools);
1352
+ const instructions = this.extractSystemInstructions(options.prompt);
1353
+ const openaiModel = this.config.openaiModel || "gpt-5-codex";
1354
+ const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
1355
+ const requestBody = {
1356
+ model: openaiModel,
1357
+ input,
1358
+ instructions,
1359
+ tools,
1360
+ max_output_tokens: maxTokens,
1361
+ temperature: options.temperature,
1362
+ top_p: options.topP,
1363
+ store: false,
1364
+ stream: true
1365
+ };
1366
+ const self = this;
1367
+ const stream = new ReadableStream({
1368
+ start: async (controller) => {
1369
+ const toolCalls = {};
1370
+ const usage = {
1371
+ inputTokens: 0,
1372
+ outputTokens: 0,
1373
+ totalTokens: 0
1374
+ };
1375
+ let finishReason = "unknown";
1376
+ let textStarted = false;
1377
+ const textId = "text-0";
1378
+ try {
1379
+ const openaiStream = await client.responses.create({
1380
+ ...requestBody,
1381
+ stream: true
1382
+ });
1383
+ controller.enqueue({ type: "stream-start", warnings: [] });
1384
+ for await (const event of openaiStream) {
1385
+ if (event.type === "response.created") {
1386
+ controller.enqueue({
1387
+ type: "response-metadata",
1388
+ id: event.response.id,
1389
+ modelId: event.response.model
1390
+ });
1391
+ } else if (event.type === "response.output_item.added") {
1392
+ if (event.item.type === "function_call") {
1393
+ const outputIndex = event.output_index;
1394
+ const callId = event.item.call_id;
1395
+ toolCalls[outputIndex] = {
1396
+ callId,
1397
+ name: event.item.name,
1398
+ arguments: ""
1399
+ };
1400
+ controller.enqueue({
1401
+ type: "tool-input-start",
1402
+ id: callId,
1403
+ toolName: event.item.name
1404
+ });
1405
+ }
1406
+ } else if (event.type === "response.output_text.delta") {
1407
+ if (!textStarted) {
1408
+ controller.enqueue({ type: "text-start", id: textId });
1409
+ textStarted = true;
1410
+ }
1411
+ controller.enqueue({
1412
+ type: "text-delta",
1413
+ id: textId,
1414
+ delta: event.delta
1415
+ });
1416
+ } else if (event.type === "response.function_call_arguments.delta") {
1417
+ const outputIndex = event.output_index;
1418
+ const tc = toolCalls[outputIndex];
1419
+ if (tc) {
1420
+ tc.arguments += event.delta;
1421
+ controller.enqueue({
1422
+ type: "tool-input-delta",
1423
+ id: tc.callId,
1424
+ delta: event.delta
1425
+ });
1426
+ }
1427
+ } else if (event.type === "response.function_call_arguments.done") {
1428
+ const outputIndex = event.output_index;
1429
+ const tc = toolCalls[outputIndex];
1430
+ if (tc) {
1431
+ tc.arguments = event.arguments;
1432
+ }
1433
+ } else if (event.type === "response.completed") {
1434
+ const hasToolCalls2 = Object.keys(toolCalls).length > 0;
1435
+ finishReason = self.convertResponsesStatus(event.response.status, hasToolCalls2);
1436
+ if (event.response.usage) {
1437
+ usage.inputTokens = event.response.usage.input_tokens || 0;
1438
+ usage.outputTokens = event.response.usage.output_tokens || 0;
1439
+ usage.totalTokens = event.response.usage.total_tokens || 0;
1440
+ }
1441
+ }
1442
+ }
1443
+ if (textStarted) {
1444
+ controller.enqueue({ type: "text-end", id: textId });
1445
+ }
1446
+ const hasToolCalls = Object.keys(toolCalls).length > 0;
1447
+ if (hasToolCalls && finishReason === "stop") {
1448
+ finishReason = "tool-calls";
1449
+ }
1450
+ for (const tc of Object.values(toolCalls)) {
1451
+ controller.enqueue({ type: "tool-input-end", id: tc.callId });
1452
+ controller.enqueue({
1453
+ type: "tool-call",
1454
+ toolCallId: tc.callId,
1455
+ toolName: tc.name,
1456
+ input: tc.arguments || "{}"
1457
+ });
1458
+ }
1459
+ controller.enqueue({ type: "finish", finishReason, usage });
1460
+ controller.close();
1461
+ } catch (error) {
1462
+ if (self.isContextOverflowError(error)) {
1463
+ const apiError = error;
1464
+ controller.enqueue({
1465
+ type: "error",
1466
+ error: new GitLabError({
1467
+ message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
1468
+ statusCode: 400,
1469
+ cause: error
1470
+ })
1471
+ });
1472
+ controller.close();
1473
+ return;
1474
+ }
1475
+ if (!isRetry && self.isTokenError(error)) {
1476
+ self.directAccessClient.invalidateToken();
1477
+ controller.enqueue({
1478
+ type: "error",
1479
+ error: new GitLabError({ message: "TOKEN_REFRESH_NEEDED", cause: error })
1480
+ });
1481
+ controller.close();
1482
+ return;
1483
+ }
1484
+ if (error instanceof OpenAI.APIError) {
1485
+ controller.enqueue({
1486
+ type: "error",
1487
+ error: new GitLabError({
1488
+ message: `OpenAI API error: ${error.message}`,
1489
+ statusCode: error.status,
1490
+ cause: error
1491
+ })
1492
+ });
1493
+ } else {
1494
+ controller.enqueue({ type: "error", error });
1495
+ }
1496
+ controller.close();
1497
+ }
1498
+ }
1499
+ });
1500
+ return { stream, request: { body: requestBody } };
1501
+ }
1502
+ };
1503
+
1504
+ // src/gitlab-workflow-client.ts
1505
+ import WebSocket from "isomorphic-ws";
1506
+
1507
+ // src/version.ts
1508
+ var VERSION = true ? "3.6.0" : "0.0.0-dev";
1509
+
1510
+ // src/gitlab-workflow-types.ts
1511
+ var WorkflowType = /* @__PURE__ */ ((WorkflowType2) => {
1512
+ WorkflowType2["CHAT"] = "chat";
1513
+ WorkflowType2["SOFTWARE_DEVELOPMENT"] = "software_development";
1514
+ return WorkflowType2;
1515
+ })(WorkflowType || {});
1516
+ var WS_KEEPALIVE_PING_INTERVAL_MS = 45e3;
1517
+ var WS_HEARTBEAT_INTERVAL_MS = 6e4;
1518
+ var DEFAULT_WORKFLOW_DEFINITION = "chat" /* CHAT */;
1519
+ var DEFAULT_CLIENT_CAPABILITIES = ["shell_command"];
1520
+ var CLIENT_VERSION = "1.0";
1521
+ var STOP_REASON_USER = "USER_ACTION_TRIGGERED_STOP";
1522
+ var AGENT_PRIVILEGES = {
1523
+ READ_WRITE_FILES: 1,
1524
+ READ_ONLY_GITLAB: 2,
1525
+ READ_WRITE_GITLAB: 3,
1526
+ RUN_COMMANDS: 4,
1527
+ USE_GIT: 5,
1528
+ RUN_MCP_TOOLS: 6
1529
+ };
1530
+ var DEFAULT_AGENT_PRIVILEGES = [
1531
+ AGENT_PRIVILEGES.READ_WRITE_FILES,
1532
+ AGENT_PRIVILEGES.READ_ONLY_GITLAB,
1533
+ AGENT_PRIVILEGES.READ_WRITE_GITLAB,
1534
+ AGENT_PRIVILEGES.RUN_COMMANDS,
1535
+ AGENT_PRIVILEGES.RUN_MCP_TOOLS,
1536
+ AGENT_PRIVILEGES.USE_GIT
1537
+ ];
1538
+ var WORKFLOW_ENVIRONMENT = "ide";
1539
+
1540
+ // src/gitlab-workflow-client.ts
1541
+ var GitLabWorkflowClient = class {
1542
+ socket = null;
1543
+ keepaliveInterval = null;
1544
+ heartbeatInterval = null;
1545
+ eventCallback = null;
1546
+ closed = false;
1547
+ lastSendTime = 0;
1548
+ /**
1549
+ * Connect to the DWS WebSocket and start listening for events.
1550
+ *
1551
+ * @param options - Connection parameters
1552
+ * @param onEvent - Callback invoked for each WorkflowClientEvent
1553
+ * @returns Promise that resolves when the connection is open
1554
+ */
1555
+ connect(options, onEvent) {
1556
+ this.validateOptions(options);
1557
+ this.eventCallback = onEvent;
1558
+ this.closed = false;
1559
+ this.cleanedUp = false;
1560
+ return new Promise((resolve2, reject) => {
1561
+ const wsUrl = this.buildWebSocketUrl(options);
1562
+ const wsHeaders = this.buildWebSocketHeaders(options);
1563
+ this.socket = new WebSocket(wsUrl, { headers: wsHeaders });
1564
+ let resolved = false;
1565
+ this.socket.onopen = () => {
1566
+ resolved = true;
1567
+ this.startKeepalive();
1568
+ this.startHeartbeat();
1569
+ resolve2();
1570
+ };
1571
+ this.socket.onmessage = (event) => {
1572
+ try {
1573
+ const data = typeof event.data === "string" ? event.data : event.data.toString();
1574
+ const action = JSON.parse(data);
1575
+ if (!action || typeof action !== "object") {
1576
+ throw new Error("Invalid message structure: expected object");
1577
+ }
1578
+ this.handleAction(action);
1579
+ } catch (error) {
1580
+ this.emit({
1581
+ type: "failed",
1582
+ error: error instanceof Error ? error : new Error(String(error))
1583
+ });
1584
+ }
1585
+ };
1586
+ this.socket.onerror = (event) => {
1587
+ const error = new Error(`WebSocket error: ${event.message || "unknown"}`);
1588
+ if (!resolved) {
1589
+ reject(error);
1590
+ } else {
1591
+ this.emit({ type: "failed", error });
1592
+ }
1593
+ };
1594
+ this.socket.onclose = (event) => {
1595
+ this.cleanup();
1596
+ if (!resolved) {
1597
+ reject(
1598
+ new Error(
1599
+ `WebSocket closed before open: code=${event.code} reason=${event.reason || ""}`
1600
+ )
1601
+ );
1602
+ return;
1603
+ }
1604
+ if (!this.closed) {
1605
+ this.emit({
1606
+ type: "closed",
1607
+ code: event.code,
1608
+ reason: event.reason || ""
1609
+ });
1610
+ }
1611
+ };
1612
+ });
1613
+ }
1614
+ /**
1615
+ * Send a startRequest to begin the workflow.
1616
+ */
1617
+ sendStartRequest(request) {
1618
+ this.send({ startRequest: request });
1619
+ }
1620
+ /**
1621
+ * Send an actionResponse (tool result) back to DWS.
1622
+ */
1623
+ sendActionResponse(requestID, response, error) {
1624
+ this.sendHeartbeatIfNeeded();
1625
+ const payload = {
1626
+ requestID,
1627
+ plainTextResponse: {
1628
+ response,
1629
+ error: error ?? null
1630
+ }
1631
+ };
1632
+ this.send({ actionResponse: payload });
1633
+ }
1634
+ /**
1635
+ * Stop the workflow gracefully.
1636
+ */
1637
+ stop() {
1638
+ this.send({ stopWorkflow: { reason: STOP_REASON_USER } });
1639
+ this.closed = true;
1640
+ }
1641
+ /**
1642
+ * Close the WebSocket connection.
1643
+ */
1644
+ close() {
1645
+ if (this.closed) return;
1646
+ this.closed = true;
1647
+ this.cleanup();
1648
+ const sock = this.socket;
1649
+ this.socket = null;
1650
+ if (sock) {
1651
+ if (sock.readyState === WebSocket.OPEN || sock.readyState === WebSocket.CONNECTING) {
1652
+ sock.close(1e3, "Client closing");
1653
+ }
1654
+ }
1655
+ }
1656
+ /**
1657
+ * Check if the WebSocket is currently connected.
1658
+ */
1659
+ get isConnected() {
1660
+ return this.socket?.readyState === WebSocket.OPEN;
1661
+ }
1662
+ // ---------------------------------------------------------------------------
1663
+ // Private
1664
+ // ---------------------------------------------------------------------------
1665
+ validateOptions(options) {
1666
+ if (!options.instanceUrl || typeof options.instanceUrl !== "string") {
1667
+ throw new Error("instanceUrl is required");
1668
+ }
1669
+ const parsed = new URL(options.instanceUrl);
1670
+ if (parsed.protocol !== "https:" && parsed.protocol !== "http:") {
1671
+ throw new Error(`Invalid instanceUrl protocol: ${parsed.protocol}`);
1672
+ }
1673
+ if (parsed.username || parsed.password) {
1674
+ throw new Error(
1675
+ "instanceUrl must not contain authentication credentials (username/password)"
1676
+ );
1677
+ }
1678
+ if (!options.headers || typeof options.headers !== "object") {
1679
+ throw new Error("headers are required");
1680
+ }
1681
+ if (options.modelRef && typeof options.modelRef !== "string") {
1682
+ throw new Error("modelRef must be a string");
1683
+ }
1684
+ }
1685
+ buildWebSocketUrl(options) {
1686
+ const baseUrl = new URL(options.instanceUrl.replace(/\/?$/, "/"));
1687
+ const url = new URL("./api/v4/ai/duo_workflows/ws", baseUrl);
1688
+ url.protocol = url.protocol === "https:" ? "wss:" : "ws:";
1689
+ if (options.modelRef && options.modelRef !== "default") {
1690
+ url.searchParams.set("user_selected_model_identifier", options.modelRef);
1691
+ }
1692
+ return url.toString();
1693
+ }
1694
+ buildWebSocketHeaders(options) {
1695
+ const headers = {};
1696
+ for (const [key, value] of Object.entries(options.headers)) {
1697
+ headers[key.toLowerCase()] = value;
1698
+ }
1699
+ delete headers["content-type"];
1700
+ headers["x-gitlab-client-type"] = "node-websocket";
1701
+ const parsedUrl = new URL(options.instanceUrl);
1702
+ const origin = parsedUrl.origin;
1703
+ headers["origin"] = origin;
1704
+ if (options.requestId) {
1705
+ headers["x-request-id"] = options.requestId;
1706
+ }
1707
+ if (options.projectId) {
1708
+ headers["x-gitlab-project-id"] = options.projectId;
1709
+ }
1710
+ if (options.namespaceId) {
1711
+ headers["x-gitlab-namespace-id"] = options.namespaceId;
1712
+ }
1713
+ if (options.rootNamespaceId) {
1714
+ headers["x-gitlab-root-namespace-id"] = options.rootNamespaceId;
1715
+ }
1716
+ if (!headers["user-agent"]) {
1717
+ headers["user-agent"] = `gitlab-ai-provider/${VERSION}`;
1718
+ }
1719
+ return headers;
1720
+ }
1721
+ handleAction(action) {
1722
+ if (action.newCheckpoint) {
1723
+ const checkpoint = action.newCheckpoint;
1724
+ this.emit({ type: "checkpoint", data: checkpoint });
1725
+ if (checkpoint.status === "FINISHED" || checkpoint.status === "COMPLETED") {
1726
+ this.emit({ type: "completed" });
1727
+ } else if (checkpoint.status === "FAILED") {
1728
+ this.emit({
1729
+ type: "failed",
1730
+ error: new Error(checkpoint.content || "Workflow failed")
1731
+ });
1732
+ } else if (checkpoint.status === "STOPPED" || checkpoint.status === "CANCELLED") {
1733
+ this.emit({ type: "completed" });
1734
+ }
1735
+ return;
1736
+ }
1737
+ if (action.runMCPTool && action.requestID) {
1738
+ this.emit({
1739
+ type: "tool-request",
1740
+ requestID: action.requestID,
1741
+ data: action.runMCPTool
1742
+ });
1743
+ return;
1744
+ }
1745
+ const builtinTools = [
1746
+ ["runReadFile", action.runReadFile],
1747
+ ["runReadFiles", action.runReadFiles],
1748
+ ["runWriteFile", action.runWriteFile],
1749
+ ["runShellCommand", action.runShellCommand],
1750
+ ["runEditFile", action.runEditFile],
1751
+ ["listDirectory", action.listDirectory],
1752
+ ["findFiles", action.findFiles],
1753
+ ["grep", action.grep],
1754
+ ["mkdir", action.mkdir],
1755
+ ["runCommand", action.runCommand],
1756
+ ["runGitCommand", action.runGitCommand],
1757
+ ["runHTTPRequest", action.runHTTPRequest]
1758
+ ];
1759
+ for (const [toolName, data] of builtinTools) {
1760
+ if (data && action.requestID) {
1761
+ this.emit({
1762
+ type: "builtin-tool-request",
1763
+ requestID: action.requestID,
1764
+ toolName,
1765
+ data
1766
+ });
1767
+ return;
1768
+ }
1769
+ }
1770
+ }
1771
+ send(event) {
1772
+ if (this.socket?.readyState === WebSocket.OPEN) {
1773
+ const json = JSON.stringify(event);
1774
+ this.socket.send(json);
1775
+ this.lastSendTime = Date.now();
1776
+ }
1777
+ }
1778
+ sendHeartbeatIfNeeded() {
1779
+ const elapsed = Date.now() - this.lastSendTime;
1780
+ if (elapsed >= WS_HEARTBEAT_INTERVAL_MS / 2) {
1781
+ this.send({ heartbeat: { timestamp: Date.now() } });
1782
+ }
1783
+ }
1784
+ emit(event) {
1785
+ this.eventCallback?.(event);
1786
+ }
1787
+ /**
1788
+ * Start ws.ping() keepalive (45s interval).
1789
+ * Keeps TCP connection alive through proxies/load balancers.
1790
+ */
1791
+ startKeepalive() {
1792
+ this.keepaliveInterval = setInterval(() => {
1793
+ if (this.socket?.readyState === WebSocket.OPEN) {
1794
+ try {
1795
+ this.socket.ping();
1796
+ } catch {
1797
+ }
1798
+ }
1799
+ }, WS_KEEPALIVE_PING_INTERVAL_MS);
1800
+ }
1801
+ /**
1802
+ * Start application-level heartbeat (60s interval).
1803
+ * Prevents DWS from timing out the workflow.
1804
+ */
1805
+ startHeartbeat() {
1806
+ this.heartbeatInterval = setInterval(() => {
1807
+ this.send({ heartbeat: { timestamp: Date.now() } });
1808
+ }, WS_HEARTBEAT_INTERVAL_MS);
1809
+ }
1810
+ cleanedUp = false;
1811
+ /**
1812
+ * Clean up intervals. Idempotent — safe to call multiple times.
1813
+ */
1814
+ cleanup() {
1815
+ if (this.cleanedUp) return;
1816
+ this.cleanedUp = true;
1817
+ if (this.keepaliveInterval) {
1818
+ clearInterval(this.keepaliveInterval);
1819
+ this.keepaliveInterval = null;
1820
+ }
1821
+ if (this.heartbeatInterval) {
1822
+ clearInterval(this.heartbeatInterval);
1823
+ this.heartbeatInterval = null;
1824
+ }
1825
+ }
1826
+ };
1827
+
1828
+ // src/gitlab-workflow-builtins.ts
1829
+ function validateNoShellMetachars(value, fieldName) {
1830
+ const dangerousChars = /[;&|`$()<>]/;
1831
+ if (dangerousChars.test(value)) {
1832
+ throw new Error(
1833
+ `Invalid ${fieldName}: contains shell metacharacters. Use structured arguments instead.`
1834
+ );
1835
+ }
1836
+ }
1837
+ function shellEscape(arg) {
1838
+ return "'" + String(arg).replace(/'/g, "'\\''") + "'";
1839
+ }
1840
+ var ALLOWED_URL_SCHEMES = ["http:", "https:"];
1841
+ function sanitizeErrorMessage(message) {
1842
+ if (!message) return "";
1843
+ return message.replace(/\bBearer\s+[A-Za-z0-9\-_.~+/]+=*/gi, "Bearer [REDACTED]").replace(/\bgl(?:pat|oat|cbt|dt|oas|rt|soat|ffct|sapat)-[A-Za-z0-9_-]+/g, "[REDACTED]").replace(/([?&](?:private_token|access_token|token)=)[^&\s"']*/gi, "$1[REDACTED]").replace(/:\/\/([^:@/\s]+):([^@/\s]+)@/g, "://$1:[REDACTED]@");
1844
+ }
1845
+ function mapBuiltinTool(dwsToolName, data) {
1846
+ switch (dwsToolName) {
1847
+ case "runReadFile":
1848
+ return { toolName: "read", args: { filePath: data.filepath } };
1849
+ case "runReadFiles": {
1850
+ const paths = data.filepaths ?? [];
1851
+ if (paths.length <= 1) {
1852
+ return { toolName: "read", args: { filePath: paths[0] ?? "" } };
1853
+ }
1854
+ return {
1855
+ toolName: "read",
1856
+ args: { filePaths: paths }
1857
+ };
1858
+ }
1859
+ case "runWriteFile":
1860
+ return {
1861
+ toolName: "write",
1862
+ args: { filePath: data.filepath, content: data.contents }
1863
+ };
1864
+ case "runEditFile":
1865
+ return {
1866
+ toolName: "edit",
1867
+ args: {
1868
+ filePath: data.filepath,
1869
+ oldString: data.oldString ?? data.old_string,
1870
+ newString: data.newString ?? data.new_string
1871
+ }
1872
+ };
1873
+ case "runShellCommand": {
1874
+ const command = data.command;
1875
+ if (!command || typeof command !== "string") {
1876
+ throw new Error("runShellCommand: command is required and must be a string");
1877
+ }
1878
+ if (command.length > 1e4) {
1879
+ throw new Error("runShellCommand: command exceeds maximum length of 10000 characters");
1880
+ }
1881
+ return {
1882
+ toolName: "bash",
1883
+ args: { command, description: "DWS shell command" }
1884
+ };
1885
+ }
1886
+ case "runCommand": {
1887
+ const program = data.program;
1888
+ if (!program || typeof program !== "string") {
1889
+ throw new Error("runCommand: program is required and must be a string");
1890
+ }
1891
+ validateNoShellMetachars(program, "program");
1892
+ const flags = data.flags ?? [];
1893
+ const cmdArgs = data.arguments ?? [];
1894
+ for (const flag of flags) {
1895
+ if (typeof flag === "string") {
1896
+ validateNoShellMetachars(flag, "flag");
1897
+ }
1898
+ }
1899
+ for (const arg of cmdArgs) {
1900
+ if (typeof arg === "string") {
1901
+ validateNoShellMetachars(arg, "argument");
1902
+ }
1903
+ }
1904
+ return {
1905
+ toolName: "bash",
1906
+ args: {
1907
+ command: [program, ...flags, ...cmdArgs].map((a) => shellEscape(String(a))).join(" "),
1908
+ description: `DWS run: ${program}`
1909
+ }
1910
+ };
1911
+ }
1912
+ case "runGitCommand": {
1913
+ const gitCmd = data.command;
1914
+ if (!gitCmd || typeof gitCmd !== "string") {
1915
+ throw new Error("runGitCommand: command is required and must be a string");
1916
+ }
1917
+ validateNoShellMetachars(gitCmd, "git command");
1918
+ const gitArgs = data.arguments ?? [];
1919
+ for (const arg of gitArgs) {
1920
+ if (typeof arg === "string") {
1921
+ validateNoShellMetachars(arg, "git argument");
1922
+ }
1923
+ }
1924
+ return {
1925
+ toolName: "bash",
1926
+ args: {
1927
+ command: ["git", gitCmd, ...gitArgs].map((a) => shellEscape(String(a))).join(" "),
1928
+ description: `DWS git: ${gitCmd}`
1929
+ }
1930
+ };
1931
+ }
1932
+ case "listDirectory":
1933
+ return { toolName: "read", args: { filePath: data.directory ?? "." } };
1934
+ case "findFiles":
1935
+ return { toolName: "glob", args: { pattern: data.name_pattern ?? data.namePattern } };
1936
+ case "grep":
1937
+ return {
1938
+ toolName: "grep",
1939
+ args: {
1940
+ pattern: data.pattern,
1941
+ path: data.search_directory ?? data.searchDirectory
1942
+ }
1943
+ };
1944
+ case "mkdir": {
1945
+ const dirPath = String(data.directory_path ?? data.directoryPath ?? "");
1946
+ if (!dirPath) {
1947
+ throw new Error("mkdir: directory_path is required");
1948
+ }
1949
+ if (dirPath.includes("\0")) {
1950
+ throw new Error("mkdir: directory_path contains null bytes");
1951
+ }
1952
+ return {
1953
+ toolName: "bash",
1954
+ args: {
1955
+ command: `mkdir -p ${shellEscape(dirPath)}`,
1956
+ description: "DWS mkdir"
1957
+ }
1958
+ };
1959
+ }
1960
+ case "runHTTPRequest": {
1961
+ const methodRaw = String(data.method ?? "GET").toUpperCase();
1962
+ const allowedMethods = ["GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"];
1963
+ if (!allowedMethods.includes(methodRaw)) {
1964
+ throw new Error(`runHTTPRequest: invalid HTTP method '${methodRaw}'`);
1965
+ }
1966
+ const urlPath = String(data.path ?? "");
1967
+ if (!urlPath) {
1968
+ throw new Error("runHTTPRequest: path is required");
1969
+ }
1970
+ try {
1971
+ const parsedUrl = new URL(urlPath);
1972
+ if (!ALLOWED_URL_SCHEMES.includes(parsedUrl.protocol)) {
1973
+ throw new Error(
1974
+ `runHTTPRequest: only http:// and https:// schemes are allowed, got '${parsedUrl.protocol}'`
1975
+ );
1976
+ }
1977
+ } catch (e) {
1978
+ if (e instanceof Error && e.message.startsWith("runHTTPRequest:")) throw e;
1979
+ }
1980
+ const method = shellEscape(methodRaw);
1981
+ const escapedPath = shellEscape(urlPath);
1982
+ const bodyArg = data.body ? ` -d ${shellEscape(String(data.body))}` : "";
1983
+ return {
1984
+ toolName: "bash",
1985
+ args: {
1986
+ command: `curl -s -X ${method} -- ${escapedPath}${bodyArg}`,
1987
+ description: `DWS HTTP ${methodRaw}`
1988
+ }
1989
+ };
1990
+ }
1991
+ default:
1992
+ return { toolName: dwsToolName, args: data };
1993
+ }
1994
+ }
1995
+
1996
+ // src/gitlab-workflow-token-client.ts
1997
+ var TOKEN_CACHE_DURATION_MS = 25 * 60 * 1e3;
1998
+ var MAX_ERROR_TEXT_LENGTH = 500;
1999
+ function sanitizeErrorText(text) {
2000
+ const truncated = text.length > MAX_ERROR_TEXT_LENGTH ? text.slice(0, MAX_ERROR_TEXT_LENGTH) + "..." : text;
2001
+ return sanitizeErrorMessage(truncated);
2002
+ }
2003
+ var CHAT_SHARED_TOKEN_KEY = "__chat_shared__";
2004
+ var GitLabWorkflowTokenClient = class {
2005
+ config;
2006
+ fetchFn;
2007
+ /**
2008
+ * Token cache keyed by workflow definition type.
2009
+ *
2010
+ * - CHAT workflows use a shared key (CHAT_SHARED_TOKEN_KEY) so tokens
2011
+ * are reused across ALL chat sessions (matching gitlab-lsp behavior).
2012
+ * - SOFTWARE_DEVELOPMENT workflows would use per-workflow-id keys,
2013
+ * but since we fetch tokens before creating workflows, we key by type.
2014
+ */
2015
+ tokenCache = /* @__PURE__ */ new Map();
2016
+ constructor(config) {
2017
+ this.config = config;
2018
+ this.fetchFn = config.fetch ?? fetch;
2019
+ }
2020
+ /**
2021
+ * Resolve the cache key for a given workflow definition.
2022
+ * CHAT workflows share a single token per namespace; other types get per-type keys.
2023
+ */
2024
+ getCacheKey(workflowDefinition, rootNamespaceId) {
2025
+ const base = workflowDefinition === "chat" /* CHAT */ ? CHAT_SHARED_TOKEN_KEY : workflowDefinition;
2026
+ return rootNamespaceId ? `${base}:${rootNamespaceId}` : base;
2027
+ }
2028
+ /**
2029
+ * Get a DWS token, using cached value if still valid.
2030
+ *
2031
+ * Token caching strategy (matches gitlab-lsp):
2032
+ * - CHAT workflows: shared token across all sessions
2033
+ * - Other workflows: per-type token
2034
+ *
2035
+ * @param workflowDefinition - Workflow type (default: 'chat')
2036
+ * @param rootNamespaceId - Optional root namespace for scoping
2037
+ * @param forceRefresh - Bypass cache
2038
+ */
2039
+ async getToken(workflowDefinition = DEFAULT_WORKFLOW_DEFINITION, rootNamespaceId, forceRefresh = false) {
2040
+ const now = Date.now();
2041
+ const cacheKey = this.getCacheKey(workflowDefinition, rootNamespaceId);
2042
+ const cached = this.tokenCache.get(cacheKey);
2043
+ if (!forceRefresh && cached && cached.expiresAt > now) {
2044
+ return cached.token;
2045
+ }
2046
+ if (forceRefresh) {
2047
+ this.tokenCache.delete(cacheKey);
2048
+ }
2049
+ const url = `${this.config.instanceUrl}/api/v4/ai/duo_workflows/direct_access`;
2050
+ const body = {
2051
+ workflow_definition: workflowDefinition
2052
+ };
2053
+ if (rootNamespaceId) {
2054
+ body.root_namespace_id = rootNamespaceId;
2055
+ }
2056
+ if (this.config.featureFlags && Object.keys(this.config.featureFlags).length > 0) {
2057
+ body.feature_flags = this.config.featureFlags;
2058
+ }
2059
+ try {
2060
+ const response = await this.fetchFn(url, {
2061
+ method: "POST",
2062
+ headers: {
2063
+ ...this.config.getHeaders(),
2064
+ "Content-Type": "application/json"
2065
+ },
2066
+ body: JSON.stringify(body)
2067
+ });
2068
+ if (!response.ok) {
2069
+ const errorText = await response.text();
2070
+ const safeError = sanitizeErrorText(errorText);
2071
+ if (response.status === 401 && this.config.refreshApiKey && !forceRefresh) {
2072
+ try {
2073
+ await this.config.refreshApiKey();
2074
+ return await this.getToken(workflowDefinition, rootNamespaceId, true);
2075
+ } catch {
2076
+ throw new GitLabError({
2077
+ message: `Failed to get workflow token: ${response.status} ${response.statusText} - ${safeError}`,
2078
+ statusCode: response.status,
2079
+ responseBody: safeError
2080
+ });
2081
+ }
2082
+ }
2083
+ if (response.status === 403) {
2084
+ throw new GitLabError({
2085
+ message: `GitLab Duo Agent Platform access denied. GitLab Duo Agent Platform requires GitLab Ultimate with Duo Enterprise add-on. Ensure: (1) Your instance has GitLab Ultimate, (2) Duo Enterprise add-on is enabled, (3) Your account has access to AI features.`,
2086
+ statusCode: response.status,
2087
+ responseBody: safeError
2088
+ });
2089
+ }
2090
+ throw new GitLabError({
2091
+ message: `Failed to get workflow token: ${response.status} ${response.statusText} - ${safeError}`,
2092
+ statusCode: response.status,
2093
+ responseBody: safeError
2094
+ });
2095
+ }
2096
+ const data = await response.json();
2097
+ this.tokenCache.set(cacheKey, {
2098
+ token: data,
2099
+ expiresAt: now + TOKEN_CACHE_DURATION_MS
2100
+ });
2101
+ return data;
2102
+ } catch (error) {
2103
+ if (error instanceof GitLabError) throw error;
2104
+ throw new GitLabError({
2105
+ message: `Failed to get workflow token: ${error}`,
2106
+ cause: error
2107
+ });
2108
+ }
2109
+ }
2110
+ /**
2111
+ * Create a new workflow on the GitLab instance.
2112
+ *
2113
+ * @param goal - The user's message / goal for this workflow
2114
+ * @param options - Additional workflow creation options
2115
+ * @returns The created workflow's ID
2116
+ */
2117
+ async createWorkflow(goal, options) {
2118
+ if (!goal || typeof goal !== "string") {
2119
+ throw new GitLabError({ message: "goal is required and must be a non-empty string" });
2120
+ }
2121
+ if (goal.length > 1e4) {
2122
+ throw new GitLabError({ message: "goal exceeds maximum length of 10000 characters" });
2123
+ }
2124
+ const url = `${this.config.instanceUrl}/api/v4/ai/duo_workflows/workflows`;
2125
+ const body = {
2126
+ goal,
2127
+ project_id: options?.projectId,
2128
+ namespace_id: options?.namespaceId,
2129
+ workflow_definition: options?.workflowDefinition ?? DEFAULT_WORKFLOW_DEFINITION,
2130
+ agent_privileges: options?.agentPrivileges ?? DEFAULT_AGENT_PRIVILEGES,
2131
+ environment: options?.environment ?? WORKFLOW_ENVIRONMENT,
2132
+ allow_agent_to_request_user: options?.allowAgentToRequestUser ?? true
2133
+ };
2134
+ try {
2135
+ const response = await this.fetchFn(url, {
2136
+ method: "POST",
2137
+ headers: {
2138
+ ...this.config.getHeaders(),
2139
+ "Content-Type": "application/json"
2140
+ },
2141
+ body: JSON.stringify(body)
2142
+ });
2143
+ if (!response.ok) {
2144
+ const errorText = await response.text();
2145
+ const safeError = sanitizeErrorText(errorText);
2146
+ throw new GitLabError({
2147
+ message: `Failed to create workflow: ${response.status} ${response.statusText} - ${safeError}`,
2148
+ statusCode: response.status,
2149
+ responseBody: safeError
2150
+ });
2151
+ }
2152
+ const data = await response.json();
2153
+ return data.id.toString();
2154
+ } catch (error) {
2155
+ if (error instanceof GitLabError) throw error;
2156
+ throw new GitLabError({
2157
+ message: `Failed to create workflow: ${error}`,
2158
+ cause: error
2159
+ });
2160
+ }
2161
+ }
2162
+ /**
2163
+ * Invalidate cached tokens.
2164
+ *
2165
+ * @param workflowDefinition - If provided, only invalidate for this type.
2166
+ * If omitted, clears ALL cached tokens.
2167
+ */
2168
+ invalidateToken(workflowDefinition, rootNamespaceId) {
2169
+ if (workflowDefinition) {
2170
+ this.tokenCache.delete(this.getCacheKey(workflowDefinition, rootNamespaceId));
2171
+ } else {
2172
+ this.tokenCache.clear();
2173
+ }
2174
+ }
2175
+ };
2176
+
2177
+ // src/gitlab-project-detector.ts
2178
+ import { spawn } from "child_process";
2179
+ import * as path from "path";
2180
+
2181
+ // src/gitlab-project-cache.ts
2182
+ var GitLabProjectCache = class {
2183
+ cache = /* @__PURE__ */ new Map();
2184
+ defaultTTL;
2185
+ /**
2186
+ * Create a new project cache
2187
+ * @param defaultTTL - Default time-to-live in milliseconds (default: 5 minutes)
2188
+ */
2189
+ constructor(defaultTTL = 5 * 60 * 1e3) {
2190
+ this.defaultTTL = defaultTTL;
2191
+ }
2192
+ /**
2193
+ * Get a cached project by key
2194
+ * @param key - Cache key (typically the working directory path)
2195
+ * @returns The cached project or null if not found or expired
2196
+ */
2197
+ get(key) {
2198
+ const entry = this.cache.get(key);
2199
+ if (!entry) {
2200
+ return null;
2201
+ }
2202
+ if (Date.now() > entry.expiresAt) {
2203
+ this.cache.delete(key);
2204
+ return null;
2205
+ }
2206
+ return entry.project;
2207
+ }
2208
+ /**
2209
+ * Store a project in the cache
2210
+ * @param key - Cache key (typically the working directory path)
2211
+ * @param project - The project to cache
2212
+ * @param ttl - Optional custom TTL in milliseconds
2213
+ */
2214
+ set(key, project, ttl) {
2215
+ this.cache.set(key, {
2216
+ project,
2217
+ expiresAt: Date.now() + (ttl ?? this.defaultTTL)
2218
+ });
2219
+ }
2220
+ /**
2221
+ * Check if a key exists in the cache (and is not expired)
2222
+ * @param key - Cache key to check
2223
+ * @returns true if the key exists and is not expired
2224
+ */
2225
+ has(key) {
2226
+ return this.get(key) !== null;
2227
+ }
2228
+ /**
2229
+ * Remove a specific entry from the cache
2230
+ * @param key - Cache key to remove
2231
+ */
2232
+ delete(key) {
2233
+ this.cache.delete(key);
2234
+ }
2235
+ /**
2236
+ * Clear all entries from the cache
2237
+ */
2238
+ clear() {
2239
+ this.cache.clear();
2240
+ }
2241
+ /**
2242
+ * Get the number of entries in the cache (including expired ones)
2243
+ */
2244
+ get size() {
2245
+ return this.cache.size;
2246
+ }
2247
+ /**
2248
+ * Clean up expired entries from the cache
2249
+ * This is useful for long-running processes to prevent memory leaks
2250
+ */
2251
+ cleanup() {
2252
+ const now = Date.now();
2253
+ for (const [key, entry] of this.cache.entries()) {
2254
+ if (now > entry.expiresAt) {
2255
+ this.cache.delete(key);
2256
+ }
2257
+ }
2258
+ }
2259
+ };
2260
+
2261
+ // src/gitlab-project-detector.ts
2262
+ var GitLabProjectDetector = class {
2263
+ config;
2264
+ fetchFn;
2265
+ cache;
2266
+ constructor(config) {
2267
+ this.config = {
2268
+ gitTimeout: 5e3,
2269
+ // 5 seconds default
2270
+ ...config
2271
+ };
2272
+ this.fetchFn = config.fetch ?? fetch;
2273
+ this.cache = config.cache ?? new GitLabProjectCache();
2274
+ }
2275
+ /**
2276
+ * Auto-detect GitLab project from git remote in the working directory
2277
+ *
2278
+ * @param workingDirectory - The directory to check for git remote
2279
+ * @param remoteName - The git remote name to use (default: 'origin')
2280
+ * @returns The detected project or null if not a git repo / no matching remote
2281
+ * @throws GitLabError if the API call or an unexpected error occurs
2282
+ */
2283
+ async detectProject(workingDirectory, remoteName = "origin") {
2284
+ const cacheKey = path.resolve(workingDirectory);
2285
+ const cached = this.cache.get(cacheKey);
2286
+ if (cached) {
2287
+ return cached;
2288
+ }
2289
+ try {
2290
+ const remoteUrl = await this.getGitRemoteUrl(workingDirectory, remoteName);
2291
+ if (!remoteUrl) {
2292
+ return null;
2293
+ }
2294
+ const projectPath = this.parseGitRemoteUrl(remoteUrl, this.config.instanceUrl);
2295
+ if (!projectPath) {
2296
+ return null;
2297
+ }
2298
+ const project = await this.getProjectByPath(projectPath);
2299
+ this.cache.set(cacheKey, project);
2300
+ return project;
2301
+ } catch (error) {
2302
+ throw error instanceof GitLabError ? error : new GitLabError({
2303
+ message: `Project detection failed: ${error}`,
2304
+ cause: error
2305
+ });
2306
+ }
2307
+ }
2308
+ /**
2309
+ * Parse a git remote URL to extract the project path
2310
+ *
2311
+ * Supports:
2312
+ * - SSH: git@gitlab.com:namespace/project.git
2313
+ * - HTTPS: https://gitlab.com/namespace/project.git
2314
+ * - HTTP: http://gitlab.local/namespace/project.git
2315
+ * - Custom domains and ports
2316
+ *
2317
+ * @param remoteUrl - The git remote URL
2318
+ * @param instanceUrl - The GitLab instance URL to match against
2319
+ * @returns The project path (e.g., "namespace/project") or null if parsing fails
2320
+ */
2321
+ parseGitRemoteUrl(remoteUrl, instanceUrl) {
2322
+ try {
2323
+ const instanceHost = new URL(instanceUrl).hostname;
2324
+ const sshMatch = remoteUrl.match(/^git@([^:]+):(.+?)(?:\.git)?$/);
2325
+ if (sshMatch) {
2326
+ const [, host, pathPart] = sshMatch;
2327
+ const hostWithoutPort = host.split(":")[0];
2328
+ if (hostWithoutPort === instanceHost) {
2329
+ const cleanPath = pathPart.replace(/^\d+\//, "");
2330
+ return cleanPath.endsWith(".git") ? cleanPath.slice(0, -4) : cleanPath;
2331
+ }
2332
+ }
2333
+ const httpsMatch = remoteUrl.match(/^(https?):\/\/([^/]+)\/(.+?)(?:\.git)?$/);
2334
+ if (httpsMatch) {
2335
+ const [, , hostWithPort, pathPart] = httpsMatch;
2336
+ const host = hostWithPort.split(":")[0];
2337
+ if (host === instanceHost) {
2338
+ return pathPart.endsWith(".git") ? pathPart.slice(0, -4) : pathPart;
2339
+ }
2340
+ }
2341
+ return null;
2342
+ } catch (error) {
2343
+ return null;
2344
+ }
2345
+ }
2346
+ /**
2347
+ * Get the git remote URL from a working directory
2348
+ *
2349
+ * @param workingDirectory - The directory to check
2350
+ * @param remoteName - The git remote name (default: 'origin')
2351
+ * @returns The remote URL or null if not found
2352
+ */
2353
+ async getGitRemoteUrl(workingDirectory, remoteName = "origin") {
2354
+ return new Promise((resolve2) => {
2355
+ const child = spawn("git", ["config", "--get", `remote.${remoteName}.url`], {
2356
+ cwd: workingDirectory,
2357
+ timeout: this.config.gitTimeout
2358
+ });
2359
+ let stdout = "";
2360
+ let _stderr = "";
2361
+ child.stdout?.on("data", (data) => {
2362
+ stdout += data.toString();
2363
+ });
2364
+ child.stderr?.on("data", (data) => {
2365
+ _stderr += data.toString();
2366
+ });
2367
+ child.on("close", (exitCode) => {
2368
+ if (exitCode === 0 && stdout.trim()) {
2369
+ resolve2(stdout.trim());
2370
+ } else {
2371
+ resolve2(null);
2372
+ }
2373
+ });
2374
+ child.on("error", () => {
2375
+ resolve2(null);
2376
+ });
2377
+ });
2378
+ }
2379
+ /**
2380
+ * Fetch project details from GitLab API by project path
2381
+ *
2382
+ * @param projectPath - The project path (e.g., "namespace/project")
2383
+ * @returns The project details
2384
+ * @throws GitLabError if the API call fails
2385
+ */
2386
+ async getProjectByPath(projectPath) {
2387
+ const encodedPath = encodeURIComponent(projectPath);
2388
+ const url = `${this.config.instanceUrl}/api/v4/projects/${encodedPath}`;
2389
+ try {
2390
+ const response = await this.fetchFn(url, {
2391
+ method: "GET",
2392
+ headers: this.config.getHeaders()
2393
+ });
2394
+ if (!response.ok) {
2395
+ throw new GitLabError({
2396
+ message: `Failed to fetch project '${projectPath}': ${response.status} ${response.statusText}`
2397
+ });
2398
+ }
2399
+ const data = await response.json();
2400
+ return {
2401
+ id: data.id,
2402
+ path: data.path,
2403
+ pathWithNamespace: data.path_with_namespace,
2404
+ name: data.name,
2405
+ namespaceId: data.namespace?.id
2406
+ };
2407
+ } catch (error) {
2408
+ if (error instanceof GitLabError) {
2409
+ throw error;
2410
+ }
2411
+ throw new GitLabError({
2412
+ message: `Failed to fetch project '${projectPath}': ${error}`,
2413
+ cause: error
2414
+ });
2415
+ }
2416
+ }
2417
+ /**
2418
+ * Clear the project cache
2419
+ */
2420
+ clearCache() {
2421
+ this.cache.clear();
2422
+ }
2423
+ /**
2424
+ * Get the cache instance (useful for testing)
2425
+ */
2426
+ getCache() {
2427
+ return this.cache;
2428
+ }
2429
+ };
2430
+
2431
+ // src/gitlab-model-discovery.ts
2432
+ var AI_CHAT_AVAILABLE_MODELS_QUERY = `
2433
+ query aiChatAvailableModels($rootNamespaceId: GroupID!) {
2434
+ metadata {
2435
+ featureFlags(names: ["ai_user_model_switching"]) {
2436
+ enabled
2437
+ name
2438
+ }
2439
+ version
2440
+ }
2441
+
2442
+ aiChatAvailableModels(rootNamespaceId: $rootNamespaceId) {
2443
+ defaultModel {
2444
+ name
2445
+ ref
2446
+ }
2447
+ selectableModels {
2448
+ name
2449
+ ref
2450
+ }
2451
+ pinnedModel {
2452
+ name
2453
+ ref
2454
+ }
2455
+ }
2456
+ }
2457
+ `;
2458
+ var DISCOVERY_CACHE_TTL_MS = 10 * 60 * 1e3;
2459
+ var GitLabModelDiscovery = class {
2460
+ config;
2461
+ fetchFn;
2462
+ cache = /* @__PURE__ */ new Map();
2463
+ constructor(config) {
2464
+ this.config = config;
2465
+ this.fetchFn = config.fetch ?? fetch;
2466
+ }
2467
+ /**
2468
+ * Discover available models for a given root namespace.
2469
+ *
2470
+ * Results are cached per `rootNamespaceId` with a 10-minute TTL.
2471
+ * Use `invalidateCache()` to force an immediate refresh.
2472
+ *
2473
+ * @param rootNamespaceId - GitLab group ID (e.g., 'gid://gitlab/Group/12345')
2474
+ */
2475
+ async discover(rootNamespaceId) {
2476
+ const cached = this.cache.get(rootNamespaceId);
2477
+ if (cached && cached.expiresAt > Date.now()) {
2478
+ return cached.data;
2479
+ }
2480
+ const url = `${this.config.instanceUrl}/api/graphql`;
2481
+ try {
2482
+ const response = await this.fetchFn(url, {
2483
+ method: "POST",
2484
+ headers: {
2485
+ ...this.config.getHeaders(),
2486
+ "Content-Type": "application/json"
2487
+ },
2488
+ body: JSON.stringify({
2489
+ query: AI_CHAT_AVAILABLE_MODELS_QUERY,
2490
+ variables: { rootNamespaceId }
2491
+ })
2492
+ });
2493
+ if (!response.ok) {
2494
+ const errorText = await response.text();
2495
+ throw new GitLabError({
2496
+ message: `Model discovery GraphQL request failed: ${response.status} ${response.statusText} - ${errorText}`,
2497
+ statusCode: response.status,
2498
+ responseBody: errorText
2499
+ });
2500
+ }
2501
+ const json = await response.json();
2502
+ if (json.errors && json.errors.length > 0) {
2503
+ throw new GitLabError({
2504
+ message: `Model discovery GraphQL errors: ${json.errors.map((e) => e.message).join(", ")}`
2505
+ });
2506
+ }
2507
+ const models = json.data?.aiChatAvailableModels;
2508
+ const metadata = json.data?.metadata;
2509
+ const modelSwitchingEnabled = metadata?.featureFlags?.find((f) => f.name === "ai_user_model_switching")?.enabled ?? false;
2510
+ const result = {
2511
+ defaultModel: models?.defaultModel ?? null,
2512
+ selectableModels: models?.selectableModels ?? [],
2513
+ pinnedModel: models?.pinnedModel ?? null,
2514
+ modelSwitchingEnabled,
2515
+ instanceVersion: metadata?.version ?? null
2516
+ };
2517
+ this.cache.set(rootNamespaceId, {
2518
+ data: result,
2519
+ expiresAt: Date.now() + DISCOVERY_CACHE_TTL_MS
2520
+ });
2521
+ return result;
2522
+ } catch (error) {
2523
+ if (error instanceof GitLabError) throw error;
2524
+ throw new GitLabError({
2525
+ message: `Model discovery failed: ${error}`,
2526
+ cause: error
2527
+ });
2528
+ }
2529
+ }
2530
+ /**
2531
+ * Get the effective model ref to use for a workflow.
2532
+ *
2533
+ * Priority: pinned > user-selected > default.
2534
+ *
2535
+ * @param rootNamespaceId - GitLab group ID
2536
+ * @param userSelectedRef - Optional user preference
2537
+ */
2538
+ async getEffectiveModelRef(rootNamespaceId, userSelectedRef) {
2539
+ const discovered = await this.discover(rootNamespaceId);
2540
+ if (discovered.pinnedModel) {
2541
+ return discovered.pinnedModel.ref;
2542
+ }
2543
+ if (userSelectedRef && discovered.modelSwitchingEnabled) {
2544
+ const isValid = discovered.selectableModels.some((m) => m.ref === userSelectedRef);
2545
+ if (isValid) {
2546
+ return userSelectedRef;
2547
+ }
2548
+ }
2549
+ return discovered.defaultModel?.ref ?? null;
2550
+ }
2551
+ /**
2552
+ * Invalidate the cached discovery results.
2553
+ */
2554
+ invalidateCache() {
2555
+ this.cache.clear();
2556
+ }
2557
+ };
2558
+
2559
+ // src/gitlab-model-cache.ts
2560
+ import * as fs from "fs";
2561
+ import * as path2 from "path";
2562
+ import * as os from "os";
2563
+ import * as crypto from "crypto";
2564
+ function getCacheFilePath() {
2565
+ const cacheHome = process.env.XDG_CACHE_HOME || path2.join(os.homedir(), ".cache");
2566
+ return path2.join(cacheHome, "opencode", "gitlab-workflow-model-cache.json");
2567
+ }
2568
+ function computeCacheKey(workDir, instanceUrl) {
2569
+ const normalizedUrl = (instanceUrl || "https://gitlab.com").replace(/\/$/, "");
2570
+ return crypto.createHash("sha256").update(`${workDir}\0${normalizedUrl}`).digest("hex").slice(0, 12);
2571
+ }
2572
+ var GitLabModelCache = class {
2573
+ filePath;
2574
+ key;
2575
+ constructor(workDir, instanceUrl) {
2576
+ this.filePath = getCacheFilePath();
2577
+ this.key = computeCacheKey(workDir, instanceUrl);
2578
+ }
2579
+ readAll() {
2580
+ try {
2581
+ if (!fs.existsSync(this.filePath)) {
2582
+ return {};
2583
+ }
2584
+ const raw = fs.readFileSync(this.filePath, "utf-8");
2585
+ return JSON.parse(raw);
2586
+ } catch {
2587
+ return {};
2588
+ }
2589
+ }
2590
+ writeAll(data) {
2591
+ try {
2592
+ const dir = path2.dirname(this.filePath);
2593
+ fs.mkdirSync(dir, { recursive: true, mode: 448 });
2594
+ fs.writeFileSync(this.filePath, JSON.stringify(data, null, 2), { mode: 384 });
2595
+ } catch {
2596
+ }
2597
+ }
2598
+ /**
2599
+ * Load the cached entry for this workspace.
2600
+ * Returns null if no cache exists or is unreadable.
2601
+ */
2602
+ load() {
2603
+ return this.readAll()[this.key] ?? null;
2604
+ }
2605
+ /**
2606
+ * Persist the full cache entry to disk.
2607
+ */
2608
+ save(entry) {
2609
+ const data = this.readAll();
2610
+ data[this.key] = entry;
2611
+ this.writeAll(data);
2612
+ }
2613
+ /**
2614
+ * Update only the discovery portion of the cache, preserving selection.
2615
+ */
2616
+ saveDiscovery(discovery) {
2617
+ const existing = this.load();
2618
+ this.save({
2619
+ discovery,
2620
+ selectedModelRef: existing?.selectedModelRef ?? null,
2621
+ selectedModelName: existing?.selectedModelName ?? null,
2622
+ updatedAt: (/* @__PURE__ */ new Date()).toISOString()
2623
+ });
2624
+ }
2625
+ /**
2626
+ * Update only the selected model, preserving the discovery data.
2627
+ */
2628
+ saveSelection(ref, name) {
2629
+ const existing = this.load();
2630
+ this.save({
2631
+ discovery: existing?.discovery ?? null,
2632
+ selectedModelRef: ref,
2633
+ selectedModelName: name,
2634
+ updatedAt: (/* @__PURE__ */ new Date()).toISOString()
2635
+ });
2636
+ }
2637
+ /**
2638
+ * Remove the entry for this workspace from the cache file.
2639
+ */
2640
+ clear() {
2641
+ const data = this.readAll();
2642
+ delete data[this.key];
2643
+ if (Object.keys(data).length === 0) {
2644
+ try {
2645
+ if (fs.existsSync(this.filePath)) {
2646
+ fs.unlinkSync(this.filePath);
2647
+ }
2648
+ } catch {
2649
+ }
2650
+ } else {
2651
+ this.writeAll(data);
2652
+ }
2653
+ }
2654
+ /**
2655
+ * Convenience: get the cached selected model ref (or null).
2656
+ */
2657
+ getSelectedModelRef() {
2658
+ return this.load()?.selectedModelRef ?? null;
2659
+ }
2660
+ /**
2661
+ * Convenience: get the cached selected model name (or null).
2662
+ */
2663
+ getSelectedModelName() {
2664
+ return this.load()?.selectedModelName ?? null;
2665
+ }
2666
+ /**
2667
+ * Convenience: get the cached discovery result (or null).
2668
+ */
2669
+ getDiscovery() {
2670
+ return this.load()?.discovery ?? null;
2671
+ }
2672
+ };
2673
+
2674
+ // src/gitlab-workflow-language-model.ts
2675
+ function simplifySchemaObj(schema) {
2676
+ if (!schema || typeof schema !== "object") return schema;
2677
+ const result = {};
2678
+ for (const [key, value] of Object.entries(schema)) {
2679
+ if (key === "description" || key === "examples" || key === "default") {
2680
+ continue;
2681
+ }
2682
+ if (key === "properties" && typeof value === "object" && value !== null) {
2683
+ const props = {};
2684
+ for (const [propName, propValue] of Object.entries(value)) {
2685
+ if (typeof propValue === "object" && propValue !== null) {
2686
+ props[propName] = simplifySchemaObj(propValue);
2687
+ } else {
2688
+ props[propName] = propValue;
2689
+ }
2690
+ }
2691
+ result[key] = props;
2692
+ } else if (key === "items" && typeof value === "object" && value !== null) {
2693
+ result[key] = simplifySchemaObj(value);
2694
+ } else {
2695
+ result[key] = value;
2696
+ }
2697
+ }
2698
+ return result;
2699
+ }
2700
+ function simplifySchema(schemaStr) {
2701
+ try {
2702
+ return JSON.stringify(simplifySchemaObj(JSON.parse(schemaStr)));
2703
+ } catch {
2704
+ return schemaStr;
2705
+ }
2706
+ }
2707
+ function minimalSchemaObj(schema) {
2708
+ if (!schema || typeof schema !== "object") return schema;
2709
+ const result = { type: schema.type || "object" };
2710
+ if (schema.required) {
2711
+ result.required = schema.required;
2712
+ }
2713
+ if (schema.properties && typeof schema.properties === "object") {
2714
+ const props = {};
2715
+ for (const [propName, propValue] of Object.entries(
2716
+ schema.properties
2717
+ )) {
2718
+ if (typeof propValue === "object" && propValue !== null) {
2719
+ const pv = propValue;
2720
+ props[propName] = { type: pv.type || "string" };
2721
+ } else {
2722
+ props[propName] = { type: "string" };
2723
+ }
2724
+ }
2725
+ result.properties = props;
2726
+ }
2727
+ return result;
2728
+ }
2729
+ function minimalSchema(schemaStr) {
2730
+ try {
2731
+ return JSON.stringify(minimalSchemaObj(JSON.parse(schemaStr)));
2732
+ } catch {
2733
+ return schemaStr;
2734
+ }
2735
+ }
2736
+ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
2737
+ specificationVersion = "v2";
2738
+ modelId;
2739
+ supportedUrls = {};
2740
+ config;
2741
+ workflowOptions;
2742
+ tokenClient;
2743
+ projectDetector;
2744
+ modelDiscovery;
2745
+ modelCache;
2746
+ // Cached detected project path
2747
+ detectedProjectPath = null;
2748
+ // Workflow ID persisted across turns for multi-turn conversations.
2749
+ // When DWS sends INPUT_REQUIRED, the workflow stays alive server-side.
2750
+ // On the next doStream() call we reuse this ID (skip createWorkflow).
2751
+ currentWorkflowId = null;
2752
+ // Persisted across turns so that cumulative DWS chat logs don't re-emit
2753
+ // messages that were already streamed in a previous doStream() call.
2754
+ persistedAgentEmitted = /* @__PURE__ */ new Map();
2755
+ // Track all active stream clients so stopWorkflow() can stop them all.
2756
+ activeClients = /* @__PURE__ */ new Set();
2757
+ // Cache resolved values to avoid redundant GraphQL calls
2758
+ _selectedModelRef;
2759
+ _selectedModelName;
2760
+ _rootNamespaceId;
2761
+ _discoveryPromise;
2762
+ /**
2763
+ * Get the cached selected model ref.
2764
+ */
2765
+ get selectedModelRef() {
2766
+ return this._selectedModelRef ?? null;
2767
+ }
2768
+ /**
2769
+ * Set the selected model ref (e.g., from an eager discover call).
2770
+ * This will be used by resolveModelRef() to skip the picker.
2771
+ * Also persists to the file-based workspace cache.
2772
+ */
2773
+ set selectedModelRef(ref) {
2774
+ this._selectedModelRef = ref ?? void 0;
2775
+ this.modelCache.saveSelection(ref, this._selectedModelName ?? null);
2776
+ }
2777
+ /**
2778
+ * Get the cached selected model display name.
2779
+ */
2780
+ get selectedModelName() {
2781
+ return this._selectedModelName ?? null;
2782
+ }
2783
+ /**
2784
+ * Set the selected model display name.
2785
+ * Also persists to the file-based workspace cache.
2786
+ */
2787
+ set selectedModelName(name) {
2788
+ this._selectedModelName = name ?? void 0;
2789
+ this.modelCache.saveSelection(this._selectedModelRef ?? null, name);
2790
+ }
2791
+ /**
2792
+ * Optional external tool executor. When set, this is called for tool
2793
+ * requests instead of looking up tools from `options.tools`.
2794
+ * This allows the consumer (OpenCode) to wire in its permission system.
2795
+ *
2796
+ * The executor is automatically bound to the async context at the time
2797
+ * it is set, so that AsyncLocalStorage-based contexts (like Instance)
2798
+ * remain available when the executor is invoked from WebSocket callbacks.
2799
+ */
2800
+ _toolExecutor = null;
2801
+ /**
2802
+ * Optional callback invoked with intermediate token usage estimates
2803
+ * after each tool execution completes. This allows the consumer to
2804
+ * display live token counts during long-running DWS workflows, since
2805
+ * the AI SDK only surfaces usage via finish-step at stream end.
2806
+ */
2807
+ onUsageUpdate = null;
2808
+ /**
2809
+ * Optional callback invoked when multiple workflow models are available
2810
+ * and the user should pick one. Set per-stream by the host (e.g., OpenCode)
2811
+ * alongside `toolExecutor`. Takes precedence over `workflowOptions.onSelectModel`.
2812
+ */
2813
+ onSelectModel = null;
2814
+ get toolExecutor() {
2815
+ return this._toolExecutor;
2816
+ }
2817
+ set toolExecutor(executor) {
2818
+ if (executor) {
2819
+ try {
2820
+ const { AsyncResource } = __require("async_hooks");
2821
+ this._toolExecutor = AsyncResource.bind(executor);
2822
+ } catch {
2823
+ this._toolExecutor = executor;
2824
+ }
2825
+ } else {
2826
+ this._toolExecutor = null;
2827
+ }
2828
+ }
2829
+ constructor(modelId, config, workflowOptions = {}) {
2830
+ this.modelId = modelId;
2831
+ this.config = config;
2832
+ this.workflowOptions = workflowOptions;
2833
+ const workDir = workflowOptions.workingDirectory ?? process.cwd();
2834
+ this.modelCache = new GitLabModelCache(workDir, config.instanceUrl);
2835
+ const cached = this.modelCache.load();
2836
+ if (cached?.selectedModelRef) {
2837
+ this._selectedModelRef = cached.selectedModelRef;
2838
+ }
2839
+ if (cached?.selectedModelName) {
2840
+ this._selectedModelName = cached.selectedModelName;
2841
+ }
2842
+ this.tokenClient = new GitLabWorkflowTokenClient({
2843
+ instanceUrl: config.instanceUrl,
2844
+ getHeaders: config.getHeaders,
2845
+ refreshApiKey: config.refreshApiKey,
2846
+ fetch: config.fetch,
2847
+ featureFlags: config.featureFlags
2848
+ });
2849
+ this.projectDetector = new GitLabProjectDetector({
2850
+ instanceUrl: config.instanceUrl,
2851
+ getHeaders: config.getHeaders,
2852
+ fetch: config.fetch
2853
+ });
2854
+ this.modelDiscovery = new GitLabModelDiscovery({
2855
+ instanceUrl: config.instanceUrl,
2856
+ getHeaders: config.getHeaders,
2857
+ fetch: config.fetch
2858
+ });
2859
+ }
2860
+ get provider() {
2861
+ return this.config.provider;
2862
+ }
2863
+ /**
2864
+ * Resolve the project ID (path) to use for workflow creation.
2865
+ * Priority: explicit option > auto-detected from git remote > undefined.
2866
+ */
2867
+ async resolveProjectId() {
2868
+ if (this.workflowOptions.projectId) {
2869
+ return this.workflowOptions.projectId;
2870
+ }
2871
+ if (this.detectedProjectPath) {
2872
+ return this.detectedProjectPath;
2873
+ }
2874
+ const workDir = this.workflowOptions.workingDirectory ?? process.cwd();
2875
+ const project = await this.projectDetector.detectProject(workDir);
2876
+ if (project) {
2877
+ this.detectedProjectPath = project.pathWithNamespace;
2878
+ return project.pathWithNamespace;
2879
+ }
2880
+ return void 0;
2881
+ }
2882
+ /**
2883
+ * Resolve the root namespace GID to use for model discovery.
2884
+ *
2885
+ * Priority:
2886
+ * 1. Explicit `rootNamespaceId` in workflowOptions (caller-provided GID)
2887
+ * 2. Auto-detected from git remote via project detector (namespace.id → GID)
2888
+ * 3. Cached from previous call
2889
+ */
2890
+ async resolveRootNamespaceId() {
2891
+ if (this.workflowOptions.rootNamespaceId) {
2892
+ return this.workflowOptions.rootNamespaceId;
2893
+ }
2894
+ if (this._rootNamespaceId !== void 0) {
2895
+ return this._rootNamespaceId;
2896
+ }
2897
+ const workDir = this.workflowOptions.workingDirectory ?? process.cwd();
2898
+ const project = await this.projectDetector.detectProject(workDir);
2899
+ if (project?.namespaceId) {
2900
+ const gid = `gid://gitlab/Group/${project.namespaceId}`;
2901
+ this._rootNamespaceId = gid;
2902
+ return gid;
2903
+ }
2904
+ this._rootNamespaceId = null;
2905
+ return null;
2906
+ }
2907
+ /**
2908
+ * Resolve the effective DWS model ref to use for this stream.
2909
+ * Deduplicates concurrent calls via a shared promise.
2910
+ *
2911
+ * Priority for the canonical `duo-workflow` model ID:
2912
+ * 1. Admin-pinned model (from GitLabModelDiscovery) — always wins
2913
+ * 2. User selection via onSelectModel callback (if model switching enabled)
2914
+ * 3. Workspace default model
2915
+ * 4. File-cached discovery/selection — used when live discovery fails
2916
+ * 5. Hard-coded 'default' (DWS decides) — fallback when discovery fails
2917
+ *
2918
+ * For all other `duo-workflow-*` model IDs the static mapping is used as-is.
2919
+ */
2920
+ async resolveModelRef() {
2921
+ const staticRef = getWorkflowModelRef(this.modelId);
2922
+ if (this.modelId !== "duo-workflow") {
2923
+ return staticRef ?? "default";
2924
+ }
2925
+ if (this._selectedModelRef) {
2926
+ return this._selectedModelRef;
2927
+ }
2928
+ if (!this._discoveryPromise) {
2929
+ this._discoveryPromise = this.doResolveModelRef();
2930
+ this._discoveryPromise.finally(() => {
2931
+ this._discoveryPromise = void 0;
2932
+ });
2933
+ }
2934
+ return this._discoveryPromise;
2935
+ }
2936
+ async doResolveModelRef() {
2937
+ const rootNamespaceId = await this.resolveRootNamespaceId();
2938
+ if (!rootNamespaceId) {
2939
+ this._selectedModelRef = "default";
2940
+ return "default";
2941
+ }
2942
+ try {
2943
+ const discovered = await this.modelDiscovery.discover(rootNamespaceId);
2944
+ this.modelCache.saveDiscovery(discovered);
2945
+ if (discovered.pinnedModel) {
2946
+ this._selectedModelRef = discovered.pinnedModel.ref;
2947
+ this._selectedModelName = discovered.pinnedModel.name;
2948
+ this.modelCache.saveSelection(discovered.pinnedModel.ref, discovered.pinnedModel.name);
2949
+ return discovered.pinnedModel.ref;
2950
+ }
2951
+ const selectFn = this.onSelectModel ?? this.workflowOptions.onSelectModel;
2952
+ if (discovered.selectableModels.length > 0 && selectFn) {
2953
+ const selected = await selectFn(discovered.selectableModels);
2954
+ if (selected) {
2955
+ const match = discovered.selectableModels.find((m) => m.ref === selected);
2956
+ if (match) {
2957
+ this._selectedModelRef = match.ref;
2958
+ this._selectedModelName = match.name;
2959
+ this.modelCache.saveSelection(match.ref, match.name);
2960
+ return match.ref;
2961
+ }
2962
+ }
2963
+ }
2964
+ if (discovered.defaultModel) {
2965
+ this._selectedModelRef = discovered.defaultModel.ref;
2966
+ this._selectedModelName = discovered.defaultModel.name;
2967
+ this.modelCache.saveSelection(discovered.defaultModel.ref, discovered.defaultModel.name);
2968
+ return discovered.defaultModel.ref;
2969
+ }
2970
+ } catch {
2971
+ const cachedEntry = this.modelCache.load();
2972
+ if (cachedEntry?.selectedModelRef) {
2973
+ this._selectedModelRef = cachedEntry.selectedModelRef;
2974
+ this._selectedModelName = cachedEntry.selectedModelName ?? void 0;
2975
+ return cachedEntry.selectedModelRef;
2976
+ }
2977
+ }
2978
+ this._selectedModelRef = "default";
2979
+ return "default";
2980
+ }
2981
+ /**
2982
+ * Pre-fetch available models for the workspace.
2983
+ * Call this early (e.g., on IDE startup) to avoid blocking the first stream.
2984
+ * Results are persisted to the workspace model cache.
2985
+ *
2986
+ * @param rootNamespaceId - GitLab group ID (e.g., 'gid://gitlab/Group/12345')
2987
+ * @returns Discovered models with default, selectable, and pinned models
2988
+ */
2989
+ async discoverModels(rootNamespaceId) {
2990
+ const result = await this.modelDiscovery.discover(rootNamespaceId);
2991
+ this.modelCache.saveDiscovery(result);
2992
+ return result;
2993
+ }
2994
+ /**
2995
+ * Get the file-based model cache instance for this workspace.
2996
+ * Useful for consumers that need direct cache access (e.g., the discover route).
2997
+ */
2998
+ getModelCache() {
2999
+ return this.modelCache;
3000
+ }
3001
+ /**
3002
+ * Stop the active workflow.
3003
+ */
3004
+ stopWorkflow() {
3005
+ for (const client of this.activeClients) {
3006
+ if (client.isConnected) {
3007
+ client.stop();
3008
+ }
3009
+ }
3010
+ }
3011
+ /**
3012
+ * Reset the workflow state, forcing a new workflow to be created on the
3013
+ * next doStream() call. Call this when starting a new conversation.
3014
+ */
3015
+ resetWorkflow() {
3016
+ this.currentWorkflowId = null;
3017
+ this.persistedAgentEmitted.clear();
3018
+ }
3019
+ /**
3020
+ * Get the current workflow ID (if any).
3021
+ * Useful for consumers that need to track workflow state.
3022
+ */
3023
+ get workflowId() {
3024
+ return this.currentWorkflowId;
3025
+ }
3026
+ // ---------------------------------------------------------------------------
3027
+ // LanguageModelV2 — doGenerate (non-streaming)
3028
+ // ---------------------------------------------------------------------------
3029
+ async doGenerate(options) {
3030
+ const { stream } = await this.doStream(options);
3031
+ const reader = stream.getReader();
3032
+ const textParts = [];
3033
+ const toolCalls = [];
3034
+ let finishReason = "unknown";
3035
+ const usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 };
3036
+ try {
3037
+ while (true) {
3038
+ const { done, value } = await reader.read();
3039
+ if (done) break;
3040
+ switch (value.type) {
3041
+ case "text-delta":
3042
+ textParts.push(value.delta);
3043
+ break;
3044
+ case "tool-call":
3045
+ toolCalls.push({
3046
+ type: "tool-call",
3047
+ toolCallId: value.toolCallId,
3048
+ toolName: value.toolName,
3049
+ input: value.input
3050
+ });
3051
+ break;
3052
+ case "finish":
3053
+ finishReason = value.finishReason;
3054
+ if (value.usage) {
3055
+ usage.inputTokens = value.usage.inputTokens ?? 0;
3056
+ usage.outputTokens = value.usage.outputTokens ?? 0;
3057
+ usage.totalTokens = value.usage.totalTokens ?? 0;
3058
+ }
3059
+ break;
3060
+ case "error":
3061
+ throw value.error;
3062
+ }
3063
+ }
3064
+ } finally {
3065
+ reader.releaseLock();
3066
+ }
3067
+ const content = [];
3068
+ const fullText = textParts.join("");
3069
+ if (fullText) {
3070
+ content.push({ type: "text", text: fullText });
3071
+ }
3072
+ content.push(...toolCalls);
3073
+ return { content, finishReason, usage, warnings: [] };
3074
+ }
3075
+ // ---------------------------------------------------------------------------
3076
+ // LanguageModelV2 — doStream (streaming)
3077
+ // ---------------------------------------------------------------------------
3078
+ async doStream(options) {
3079
+ const goal = this.extractGoalFromPrompt(options.prompt);
3080
+ const modelRef = await this.resolveModelRef();
3081
+ const mcpTools = this.extractMcpTools(options);
3082
+ const preapprovedTools = this.workflowOptions.preapprovedTools ?? mcpTools.map((t) => t.name);
3083
+ const additionalContext = this.buildAdditionalContext(options.prompt);
3084
+ const toolExecutor = this.toolExecutor ?? null;
3085
+ await this.tokenClient.getToken(
3086
+ this.workflowOptions.workflowDefinition ?? DEFAULT_WORKFLOW_DEFINITION,
3087
+ this.workflowOptions.rootNamespaceId
3088
+ );
3089
+ const projectId = await this.resolveProjectId();
3090
+ let workflowId;
3091
+ if (this.currentWorkflowId) {
3092
+ workflowId = this.currentWorkflowId;
3093
+ } else {
3094
+ workflowId = await this.tokenClient.createWorkflow(goal, {
3095
+ projectId,
3096
+ namespaceId: this.workflowOptions.namespaceId,
3097
+ workflowDefinition: this.workflowOptions.workflowDefinition
3098
+ });
3099
+ this.currentWorkflowId = workflowId;
3100
+ }
3101
+ const wsClient = new GitLabWorkflowClient();
3102
+ this.activeClients.add(wsClient);
3103
+ let textBlockCounter = 0;
3104
+ const ss = {
3105
+ streamClosed: false,
3106
+ streamedInputChars: 0,
3107
+ streamedOutputChars: 0,
3108
+ pendingToolCount: 0,
3109
+ deferredClose: null,
3110
+ activeTextBlockId: null,
3111
+ agentMessageEmitted: new Map(this.persistedAgentEmitted),
3112
+ currentAgentMessageId: "",
3113
+ activeClient: wsClient
3114
+ };
3115
+ for (const msg of options.prompt) {
3116
+ if (msg.role === "system") {
3117
+ ss.streamedInputChars += msg.content.length;
3118
+ } else if (msg.role === "user") {
3119
+ for (const part of msg.content) {
3120
+ if (part.type === "text") {
3121
+ ss.streamedInputChars += part.text.length;
3122
+ }
3123
+ }
3124
+ }
3125
+ }
3126
+ const stream = new ReadableStream({
3127
+ start: async (controller) => {
3128
+ try {
3129
+ await wsClient.connect(
3130
+ {
3131
+ instanceUrl: this.config.instanceUrl,
3132
+ modelRef,
3133
+ headers: this.config.getHeaders(),
3134
+ projectId: this.workflowOptions.projectId,
3135
+ namespaceId: this.workflowOptions.namespaceId,
3136
+ rootNamespaceId: this.workflowOptions.rootNamespaceId
3137
+ },
3138
+ (event) => {
3139
+ this.handleWorkflowEvent(
3140
+ ss,
3141
+ event,
3142
+ controller,
3143
+ wsClient,
3144
+ toolExecutor,
3145
+ () => `text-${textBlockCounter++}`
3146
+ );
3147
+ }
3148
+ );
3149
+ const workflowDef = this.workflowOptions.workflowDefinition ?? DEFAULT_WORKFLOW_DEFINITION;
3150
+ const capabilities = this.workflowOptions.clientCapabilities ?? DEFAULT_CLIENT_CAPABILITIES;
3151
+ const workflowMetadata = await this.buildWorkflowMetadata();
3152
+ const metadataStr = JSON.stringify(workflowMetadata);
3153
+ const basePayload = {
3154
+ workflowID: workflowId,
3155
+ clientVersion: CLIENT_VERSION,
3156
+ workflowDefinition: workflowDef,
3157
+ goal,
3158
+ workflowMetadata: metadataStr,
3159
+ clientCapabilities: capabilities,
3160
+ preapproved_tools: preapprovedTools
3161
+ };
3162
+ const baseSize = JSON.stringify(basePayload).length + 100;
3163
+ const trimmed = this.trimPayload(mcpTools, additionalContext, baseSize);
3164
+ const trimmedPreapproved = preapprovedTools.filter(
3165
+ (name) => trimmed.mcpTools.some((t) => t.name === name)
3166
+ );
3167
+ const startReq = {
3168
+ workflowID: workflowId,
3169
+ clientVersion: CLIENT_VERSION,
3170
+ workflowDefinition: workflowDef,
3171
+ goal,
3172
+ workflowMetadata: metadataStr,
3173
+ additional_context: trimmed.additionalContext,
3174
+ clientCapabilities: capabilities,
3175
+ mcpTools: trimmed.mcpTools,
3176
+ preapproved_tools: trimmedPreapproved
3177
+ };
3178
+ if (this.workflowOptions.flowConfig) {
3179
+ startReq.flowConfig = this.workflowOptions.flowConfig;
3180
+ }
3181
+ if (this.workflowOptions.flowConfigSchemaVersion) {
3182
+ startReq.flowConfigSchemaVersion = this.workflowOptions.flowConfigSchemaVersion;
3183
+ }
3184
+ wsClient.sendStartRequest(startReq);
3185
+ controller.enqueue({
3186
+ type: "stream-start",
3187
+ warnings: []
3188
+ });
3189
+ controller.enqueue({
3190
+ type: "response-metadata",
3191
+ id: workflowId,
3192
+ modelId: modelRef
3193
+ });
3194
+ } catch (error) {
3195
+ if (!ss.streamClosed) {
3196
+ controller.enqueue({
3197
+ type: "error",
3198
+ error: error instanceof GitLabError ? error : new GitLabError({
3199
+ message: `Workflow connection failed: ${error}`,
3200
+ cause: error
3201
+ })
3202
+ });
3203
+ ss.streamClosed = true;
3204
+ controller.close();
3205
+ }
3206
+ }
3207
+ },
3208
+ cancel: (_reason) => {
3209
+ wsClient.stop();
3210
+ wsClient.close();
3211
+ this.activeClients.delete(wsClient);
3212
+ ss.activeClient = null;
3213
+ this.currentWorkflowId = null;
3214
+ }
3215
+ });
3216
+ return {
3217
+ stream,
3218
+ request: {
3219
+ body: { workflowId, modelRef, goal }
3220
+ }
3221
+ };
3222
+ }
3223
+ // ---------------------------------------------------------------------------
3224
+ // Event handling
3225
+ // ---------------------------------------------------------------------------
3226
+ handleWorkflowEvent(ss, event, controller, wsClient, toolExecutor, nextTextId) {
3227
+ if (ss.streamClosed) {
3228
+ return;
3229
+ }
3230
+ switch (event.type) {
3231
+ case "checkpoint": {
3232
+ this.processCheckpoint(ss, event.data, controller, nextTextId);
3233
+ break;
3234
+ }
3235
+ case "tool-request": {
3236
+ const { requestID, data } = event;
3237
+ let parsedArgs;
3238
+ try {
3239
+ JSON.parse(data.args);
3240
+ parsedArgs = data.args;
3241
+ } catch {
3242
+ parsedArgs = data.args || "{}";
3243
+ }
3244
+ if (ss.activeTextBlockId) {
3245
+ controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
3246
+ ss.activeTextBlockId = null;
3247
+ }
3248
+ controller.enqueue({
3249
+ type: "tool-input-start",
3250
+ id: requestID,
3251
+ toolName: data.name,
3252
+ providerExecuted: true
3253
+ });
3254
+ controller.enqueue({
3255
+ type: "tool-input-delta",
3256
+ id: requestID,
3257
+ delta: parsedArgs
3258
+ });
3259
+ controller.enqueue({
3260
+ type: "tool-input-end",
3261
+ id: requestID
3262
+ });
3263
+ controller.enqueue({
3264
+ type: "tool-call",
3265
+ toolCallId: requestID,
3266
+ toolName: data.name,
3267
+ input: parsedArgs,
3268
+ providerExecuted: true
3269
+ });
3270
+ this.executeToolAndRespond(
3271
+ ss,
3272
+ wsClient,
3273
+ controller,
3274
+ requestID,
3275
+ data.name,
3276
+ parsedArgs,
3277
+ toolExecutor
3278
+ ).catch(() => {
3279
+ });
3280
+ break;
3281
+ }
3282
+ case "builtin-tool-request": {
3283
+ const mapped = mapBuiltinTool(event.toolName, event.data);
3284
+ const mappedArgs = JSON.stringify(mapped.args);
3285
+ if (ss.activeTextBlockId) {
3286
+ controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
3287
+ ss.activeTextBlockId = null;
3288
+ }
3289
+ controller.enqueue({
3290
+ type: "tool-input-start",
3291
+ id: event.requestID,
3292
+ toolName: mapped.toolName,
3293
+ providerExecuted: true
3294
+ });
3295
+ controller.enqueue({
3296
+ type: "tool-input-delta",
3297
+ id: event.requestID,
3298
+ delta: mappedArgs
3299
+ });
3300
+ controller.enqueue({
3301
+ type: "tool-input-end",
3302
+ id: event.requestID
3303
+ });
3304
+ controller.enqueue({
3305
+ type: "tool-call",
3306
+ toolCallId: event.requestID,
3307
+ toolName: mapped.toolName,
3308
+ input: mappedArgs,
3309
+ providerExecuted: true
3310
+ });
3311
+ this.executeToolAndRespond(
3312
+ ss,
3313
+ wsClient,
3314
+ controller,
3315
+ event.requestID,
3316
+ mapped.toolName,
3317
+ mappedArgs,
3318
+ toolExecutor
3319
+ ).catch(() => {
3320
+ });
3321
+ break;
3322
+ }
3323
+ case "completed": {
3324
+ if (ss.activeTextBlockId) {
3325
+ controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
3326
+ ss.activeTextBlockId = null;
3327
+ }
3328
+ const doCompleteClose = () => {
3329
+ if (ss.streamClosed) return;
3330
+ const inputTokens = Math.ceil(ss.streamedInputChars / 4);
3331
+ const outputTokens = Math.ceil(ss.streamedOutputChars / 4);
3332
+ controller.enqueue({
3333
+ type: "finish",
3334
+ finishReason: "stop",
3335
+ usage: { inputTokens, outputTokens, totalTokens: inputTokens + outputTokens }
3336
+ });
3337
+ ss.streamClosed = true;
3338
+ controller.close();
3339
+ this.cleanupClient(ss);
3340
+ };
3341
+ if (ss.pendingToolCount > 0) {
3342
+ ss.deferredClose = doCompleteClose;
3343
+ } else {
3344
+ ss.deferredClose = null;
3345
+ doCompleteClose();
3346
+ }
3347
+ break;
3348
+ }
3349
+ case "failed": {
3350
+ if (ss.activeTextBlockId) {
3351
+ controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
3352
+ ss.activeTextBlockId = null;
3353
+ }
3354
+ controller.enqueue({
3355
+ type: "error",
3356
+ error: new GitLabError({
3357
+ message: `Workflow failed: ${sanitizeErrorMessage(event.error.message)}`,
3358
+ cause: event.error
3359
+ })
3360
+ });
3361
+ ss.streamClosed = true;
3362
+ controller.close();
3363
+ this.cleanupClient(ss, true);
3364
+ break;
3365
+ }
3366
+ case "closed": {
3367
+ if (ss.streamClosed) {
3368
+ break;
3369
+ }
3370
+ if (ss.activeTextBlockId) {
3371
+ controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
3372
+ ss.activeTextBlockId = null;
3373
+ }
3374
+ const doClose = () => {
3375
+ if (ss.streamClosed) return;
3376
+ if (event.code !== 1e3) {
3377
+ controller.enqueue({
3378
+ type: "error",
3379
+ error: new GitLabError({
3380
+ message: `WebSocket closed unexpectedly: code=${event.code} reason=${sanitizeErrorMessage(event.reason)}`,
3381
+ statusCode: event.code
3382
+ })
3383
+ });
3384
+ ss.streamClosed = true;
3385
+ controller.close();
3386
+ this.cleanupClient(ss, true);
3387
+ } else {
3388
+ const inTok = Math.ceil(ss.streamedInputChars / 4);
3389
+ const outTok = Math.ceil(ss.streamedOutputChars / 4);
3390
+ controller.enqueue({
3391
+ type: "finish",
3392
+ finishReason: "stop",
3393
+ usage: { inputTokens: inTok, outputTokens: outTok, totalTokens: inTok + outTok }
3394
+ });
3395
+ ss.streamClosed = true;
3396
+ controller.close();
3397
+ this.cleanupClient(ss);
3398
+ }
3399
+ };
3400
+ if (ss.pendingToolCount > 0) {
3401
+ ss.deferredClose = doClose;
3402
+ } else {
3403
+ ss.deferredClose = null;
3404
+ doClose();
3405
+ }
3406
+ break;
3407
+ }
3408
+ }
3409
+ }
3410
+ // ---------------------------------------------------------------------------
3411
+ // Checkpoint content extraction
3412
+ // ---------------------------------------------------------------------------
3413
+ processCheckpoint(ss, checkpoint, controller, nextTextId) {
3414
+ if (!checkpoint.checkpoint) {
3415
+ if (checkpoint.content) {
3416
+ if (!ss.activeTextBlockId) {
3417
+ ss.activeTextBlockId = nextTextId();
3418
+ controller.enqueue({ type: "text-start", id: ss.activeTextBlockId });
3419
+ }
3420
+ controller.enqueue({
3421
+ type: "text-delta",
3422
+ id: ss.activeTextBlockId,
3423
+ delta: checkpoint.content
3424
+ });
3425
+ ss.streamedOutputChars += checkpoint.content.length;
3426
+ }
3427
+ return;
3428
+ }
3429
+ let parsed;
3430
+ try {
3431
+ parsed = JSON.parse(checkpoint.checkpoint);
3432
+ } catch (e) {
3433
+ return;
3434
+ }
3435
+ const chatLog = parsed.channel_values?.ui_chat_log;
3436
+ if (!chatLog || !Array.isArray(chatLog) || chatLog.length === 0) {
3437
+ return;
3438
+ }
3439
+ if (checkpoint.status !== "RUNNING" && checkpoint.status !== "INPUT_REQUIRED" && checkpoint.status !== "FINISHED" && checkpoint.status !== "COMPLETED") {
3440
+ return;
3441
+ }
3442
+ for (let i = 0; i < chatLog.length; i++) {
3443
+ const entry = chatLog[i];
3444
+ if (entry.message_type !== "agent") continue;
3445
+ const content = entry.content || "";
3446
+ const msgId = entry.message_id || `idx-${i}`;
3447
+ const emittedLen = ss.agentMessageEmitted.get(msgId) ?? 0;
3448
+ if (content.length <= emittedLen) continue;
3449
+ const delta = content.slice(emittedLen);
3450
+ const isSameMsg = msgId === ss.currentAgentMessageId;
3451
+ if (!isSameMsg && ss.activeTextBlockId) {
3452
+ controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
3453
+ ss.activeTextBlockId = null;
3454
+ }
3455
+ if (!ss.activeTextBlockId) {
3456
+ ss.activeTextBlockId = nextTextId();
3457
+ controller.enqueue({ type: "text-start", id: ss.activeTextBlockId });
3458
+ }
3459
+ controller.enqueue({
3460
+ type: "text-delta",
3461
+ id: ss.activeTextBlockId,
3462
+ delta
3463
+ });
3464
+ ss.streamedOutputChars += delta.length;
3465
+ ss.agentMessageEmitted.set(msgId, content.length);
3466
+ this.persistedAgentEmitted.set(msgId, content.length);
3467
+ ss.currentAgentMessageId = msgId;
3468
+ }
3469
+ }
3470
+ async executeToolAndRespond(ss, wsClient, controller, requestID, toolName, argsJson, toolExecutor) {
3471
+ ss.pendingToolCount++;
3472
+ const safeEnqueue = (part) => {
3473
+ if (ss.streamClosed) {
3474
+ return;
3475
+ }
3476
+ try {
3477
+ controller.enqueue(part);
3478
+ } catch {
3479
+ }
3480
+ };
3481
+ try {
3482
+ if (toolExecutor) {
3483
+ const result = await toolExecutor(toolName, argsJson, requestID);
3484
+ wsClient.sendActionResponse(requestID, result.result, result.error);
3485
+ ss.streamedInputChars += argsJson.length;
3486
+ ss.streamedOutputChars += result.result.length;
3487
+ let toolOutput = result.result;
3488
+ let toolTitle = `${toolName} result`;
3489
+ let toolMetadata = { output: result.result };
3490
+ try {
3491
+ const parsed = JSON.parse(result.result);
3492
+ if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) {
3493
+ if (typeof parsed.output === "string") {
3494
+ toolOutput = parsed.output;
3495
+ } else if (parsed.output != null) {
3496
+ toolOutput = JSON.stringify(parsed.output);
3497
+ }
3498
+ if (typeof parsed.title === "string") toolTitle = parsed.title;
3499
+ if (parsed.metadata && typeof parsed.metadata === "object") {
3500
+ toolMetadata = {};
3501
+ for (const [k, v] of Object.entries(parsed.metadata)) {
3502
+ toolMetadata[k] = typeof v === "string" ? v : JSON.stringify(v);
3503
+ }
3504
+ if (!("output" in toolMetadata)) {
3505
+ toolMetadata.output = toolOutput;
3506
+ }
3507
+ }
3508
+ } else if (Array.isArray(parsed)) {
3509
+ toolOutput = JSON.stringify(parsed);
3510
+ toolMetadata = { output: toolOutput };
3511
+ }
3512
+ } catch {
3513
+ }
3514
+ if (result.error) {
3515
+ let errorText;
3516
+ if (typeof result.error === "string") {
3517
+ errorText = result.error;
3518
+ } else if (result.error && typeof result.error === "object") {
3519
+ errorText = JSON.stringify(result.error);
3520
+ } else {
3521
+ errorText = String(result.error);
3522
+ }
3523
+ const errorOutput = toolOutput || errorText;
3524
+ safeEnqueue({
3525
+ type: "tool-result",
3526
+ toolCallId: requestID,
3527
+ toolName,
3528
+ result: {
3529
+ output: errorOutput,
3530
+ title: toolTitle,
3531
+ metadata: { ...toolMetadata, error: errorText }
3532
+ },
3533
+ isError: true,
3534
+ providerExecuted: true
3535
+ });
3536
+ } else {
3537
+ safeEnqueue({
3538
+ type: "tool-result",
3539
+ toolCallId: requestID,
3540
+ toolName,
3541
+ result: {
3542
+ output: toolOutput,
3543
+ title: toolTitle,
3544
+ metadata: toolMetadata
3545
+ },
3546
+ isError: false,
3547
+ providerExecuted: true
3548
+ });
3549
+ }
3550
+ } else {
3551
+ const errorMsg = `Tool executor not configured for tool: ${toolName}`;
3552
+ wsClient.sendActionResponse(requestID, "", errorMsg);
3553
+ safeEnqueue({
3554
+ type: "tool-result",
3555
+ toolCallId: requestID,
3556
+ toolName,
3557
+ result: {
3558
+ output: errorMsg,
3559
+ title: `${toolName} error`,
3560
+ metadata: { output: errorMsg }
3561
+ },
3562
+ isError: true,
3563
+ providerExecuted: true
3564
+ });
3565
+ }
3566
+ } catch (error) {
3567
+ const rawMsg = error instanceof Error ? error.message : String(error);
3568
+ const errorMsg = sanitizeErrorMessage(rawMsg);
3569
+ wsClient.sendActionResponse(requestID, "", errorMsg);
3570
+ safeEnqueue({
3571
+ type: "tool-result",
3572
+ toolCallId: requestID,
3573
+ toolName,
3574
+ result: {
3575
+ output: errorMsg,
3576
+ title: `${toolName} error`,
3577
+ metadata: { output: errorMsg }
3578
+ },
3579
+ isError: true,
3580
+ providerExecuted: true
3581
+ });
3582
+ } finally {
3583
+ ss.pendingToolCount--;
3584
+ if (this.onUsageUpdate) {
3585
+ try {
3586
+ this.onUsageUpdate({
3587
+ inputTokens: Math.ceil(ss.streamedInputChars / 4),
3588
+ outputTokens: Math.ceil(ss.streamedOutputChars / 4)
3589
+ });
3590
+ } catch {
3591
+ }
3592
+ }
3593
+ if (ss.pendingToolCount <= 0 && ss.deferredClose) {
3594
+ const close = ss.deferredClose;
3595
+ ss.deferredClose = null;
3596
+ close();
3597
+ }
3598
+ }
3599
+ }
3600
+ cleanupClient(ss, clearWorkflow = false) {
3601
+ if (ss.activeClient) {
3602
+ ss.activeClient.close();
3603
+ this.activeClients.delete(ss.activeClient);
3604
+ ss.activeClient = null;
3605
+ }
3606
+ if (clearWorkflow) {
3607
+ this.currentWorkflowId = null;
3608
+ this.persistedAgentEmitted.clear();
3609
+ }
3610
+ }
3611
+ // ---------------------------------------------------------------------------
3612
+ // Workflow metadata
3613
+ // ---------------------------------------------------------------------------
3614
+ async buildWorkflowMetadata() {
3615
+ const metadata = {
3616
+ extended_logging: false
3617
+ };
3618
+ try {
3619
+ const workDir = this.workflowOptions.workingDirectory ?? process.cwd();
3620
+ const gitInfo = await this.getGitInfo(workDir);
3621
+ if (gitInfo.url) metadata.git_url = gitInfo.url;
3622
+ if (gitInfo.sha) metadata.git_sha = gitInfo.sha;
3623
+ if (gitInfo.branch) metadata.git_branch = gitInfo.branch;
3624
+ } catch {
3625
+ }
3626
+ return metadata;
3627
+ }
3628
+ async getGitInfo(workDir) {
3629
+ const { execFile } = await import("child_process");
3630
+ const { promisify } = await import("util");
3631
+ const execFileAsync = promisify(execFile);
3632
+ const opts = { cwd: workDir, timeout: 3e3 };
3633
+ const run = async (cmd, args) => {
3634
+ try {
3635
+ const { stdout } = await execFileAsync(cmd, args, opts);
3636
+ return stdout.trim() || void 0;
3637
+ } catch {
3638
+ return void 0;
3639
+ }
3640
+ };
3641
+ const [url, sha, branch] = await Promise.all([
3642
+ run("git", ["remote", "get-url", "origin"]),
3643
+ run("git", ["rev-parse", "HEAD"]),
3644
+ run("git", ["rev-parse", "--abbrev-ref", "HEAD"])
3645
+ ]);
3646
+ return { url, sha, branch };
3647
+ }
3648
+ // ---------------------------------------------------------------------------
3649
+ // Prompt / tool extraction helpers
3650
+ // ---------------------------------------------------------------------------
3651
+ /**
3652
+ * Extract the user's goal (last user message) from the AI SDK prompt.
3653
+ */
3654
+ extractGoalFromPrompt(prompt) {
3655
+ for (let i = prompt.length - 1; i >= 0; i--) {
3656
+ const message = prompt[i];
3657
+ if (message.role === "user") {
3658
+ const textParts = message.content.filter((part) => part.type === "text").map((part) => part.text);
3659
+ if (textParts.length > 0) {
3660
+ return textParts.join("\n");
3661
+ }
3662
+ }
3663
+ }
3664
+ return "";
3665
+ }
3666
+ /**
3667
+ * Convert AI SDK tools to DWS McpToolDefinition format.
3668
+ */
3669
+ extractMcpTools(options) {
3670
+ if (this.workflowOptions.mcpTools && this.workflowOptions.mcpTools.length > 0) {
3671
+ return this.workflowOptions.mcpTools;
3672
+ }
3673
+ if (!options.tools || options.tools.length === 0) {
3674
+ return [];
3675
+ }
3676
+ return options.tools.filter((tool) => tool.type === "function").map((tool) => ({
3677
+ name: tool.name,
3678
+ description: tool.description || "",
3679
+ inputSchema: JSON.stringify(tool.inputSchema || { type: "object", properties: {} })
3680
+ }));
3681
+ }
3682
+ // ---------------------------------------------------------------------------
3683
+ // Payload size management
3684
+ // ---------------------------------------------------------------------------
3685
+ static MAX_START_REQUEST_BYTES = 4 * 1024 * 1024;
3686
+ /**
3687
+ * Trim mcpTools and additionalContext to fit within the DWS 4MB gRPC
3688
+ * message size limit (`MAX_MESSAGE_SIZE` in duo_workflow_service/server.py).
3689
+ *
3690
+ * DWS has no per-field limits on tool descriptions, schemas, or context items.
3691
+ * The only hard constraint is the total serialized message size.
3692
+ *
3693
+ * Strategy (progressive, only if over budget):
3694
+ * 1. Send everything as-is
3695
+ * 2. Simplify tool input schemas (strip descriptions from properties)
3696
+ * 3. Strip schemas to minimal form (type + property names only)
3697
+ * 4. Drop tools from the end until it fits
3698
+ */
3699
+ trimPayload(mcpTools, additionalContext, basePayloadSize) {
3700
+ const budget = _GitLabWorkflowLanguageModel.MAX_START_REQUEST_BYTES - basePayloadSize;
3701
+ const contextJson = JSON.stringify(additionalContext);
3702
+ const toolsJson = JSON.stringify(mcpTools);
3703
+ const totalSize = toolsJson.length + contextJson.length;
3704
+ if (totalSize <= budget) {
3705
+ return { mcpTools, additionalContext };
3706
+ }
3707
+ const simplifiedTools = mcpTools.map((tool) => ({
3708
+ name: tool.name,
3709
+ description: tool.description,
3710
+ inputSchema: simplifySchema(tool.inputSchema)
3711
+ }));
3712
+ const simpSize = JSON.stringify(simplifiedTools).length + contextJson.length;
3713
+ if (simpSize <= budget) {
3714
+ return { mcpTools: simplifiedTools, additionalContext };
3715
+ }
3716
+ const minTools = simplifiedTools.map((tool) => ({
3717
+ name: tool.name,
3718
+ description: tool.description,
3719
+ inputSchema: minimalSchema(tool.inputSchema)
3720
+ }));
3721
+ const minSize = JSON.stringify(minTools).length + contextJson.length;
3722
+ if (minSize <= budget) {
3723
+ return { mcpTools: minTools, additionalContext };
3724
+ }
3725
+ const keptTools = [...minTools];
3726
+ while (keptTools.length > 0) {
3727
+ const currentSize = JSON.stringify(keptTools).length + contextJson.length;
3728
+ if (currentSize <= budget) break;
3729
+ keptTools.pop();
3730
+ }
3731
+ return { mcpTools: keptTools, additionalContext };
3732
+ }
3733
+ buildAdditionalContext(prompt) {
3734
+ const context = [];
3735
+ if (this.workflowOptions.additionalContext) {
3736
+ context.push(...this.workflowOptions.additionalContext);
3737
+ }
3738
+ for (const message of prompt) {
3739
+ if (message.role === "system") {
3740
+ context.push({
3741
+ category: "system_prompt",
3742
+ content: message.content,
3743
+ metadata: JSON.stringify({ role: "system" })
3744
+ });
3745
+ } else if (message.role === "assistant") {
3746
+ const textContent = message.content.filter((part) => part.type === "text").map((part) => part.text).join("\n");
3747
+ if (textContent) {
3748
+ context.push({
3749
+ category: "conversation",
3750
+ content: textContent,
3751
+ metadata: JSON.stringify({ role: "assistant" })
3752
+ });
3753
+ }
3754
+ }
3755
+ }
3756
+ return context;
3757
+ }
3758
+ };
3759
+
3760
+ // src/gitlab-oauth-types.ts
3761
+ var OPENCODE_GITLAB_AUTH_CLIENT_ID = "1d89f9fdb23ee96d4e603201f6861dab6e143c5c3c00469a018a2d94bdc03d4e";
3762
+ var BUNDLED_CLIENT_ID = "36f2a70cddeb5a0889d4fd8295c241b7e9848e89cf9e599d0eed2d8e5350fbf5";
3763
+ var GITLAB_COM_URL = "https://gitlab.com";
3764
+ var TOKEN_EXPIRY_SKEW_MS = 5 * 60 * 1e3;
3765
+ var OAUTH_SCOPES = ["api"];
3766
+
3767
+ // src/gitlab-oauth-manager.ts
3768
+ var GitLabOAuthManager = class {
3769
+ fetch;
3770
+ constructor(fetchImpl = fetch) {
3771
+ this.fetch = fetchImpl;
3772
+ }
3773
+ /**
3774
+ * Check if a token is expired
3775
+ */
3776
+ isTokenExpired(expiresAt) {
3777
+ return Date.now() >= expiresAt;
3778
+ }
3779
+ /**
3780
+ * Check if a token needs refresh (within skew window)
3781
+ */
3782
+ needsRefresh(expiresAt) {
3783
+ return Date.now() >= expiresAt - TOKEN_EXPIRY_SKEW_MS;
3784
+ }
3785
+ /**
3786
+ * Refresh tokens if needed
3787
+ * Returns the same tokens if refresh is not needed, or new tokens if refreshed
3788
+ */
3789
+ async refreshIfNeeded(tokens, clientId) {
3790
+ if (!this.needsRefresh(tokens.expiresAt)) {
3791
+ return tokens;
3792
+ }
3793
+ if (this.isTokenExpired(tokens.expiresAt)) {
3794
+ throw new GitLabError({
3795
+ message: "OAuth token has expired and cannot be used"
3796
+ });
3797
+ }
3798
+ return this.exchangeRefreshToken({
3799
+ instanceUrl: tokens.instanceUrl,
3800
+ refreshToken: tokens.refreshToken,
3801
+ clientId
3802
+ });
3803
+ }
3804
+ /**
3805
+ * Exchange authorization code for tokens
3806
+ * Based on gitlab-vscode-extension createOAuthAccountFromCode
3807
+ */
3808
+ async exchangeAuthorizationCode(params) {
3809
+ const { instanceUrl, code, codeVerifier, clientId, redirectUri } = params;
3810
+ const tokenResponse = await this.exchangeToken({
3811
+ instanceUrl,
3812
+ grantType: "authorization_code",
3813
+ code,
3814
+ codeVerifier,
3815
+ clientId: clientId || this.getClientId(instanceUrl),
3816
+ redirectUri
3817
+ });
3818
+ return this.createTokensFromResponse(tokenResponse, instanceUrl);
3819
+ }
3820
+ /**
3821
+ * Exchange refresh token for new tokens
3822
+ * Based on gitlab-vscode-extension TokenExchangeService
3823
+ */
3824
+ async exchangeRefreshToken(params) {
3825
+ const { instanceUrl, refreshToken, clientId } = params;
3826
+ const tokenResponse = await this.exchangeToken({
3827
+ instanceUrl,
3828
+ grantType: "refresh_token",
3829
+ refreshToken,
3830
+ clientId: clientId || this.getClientId(instanceUrl)
3831
+ });
3832
+ return this.createTokensFromResponse(tokenResponse, instanceUrl);
3833
+ }
3834
+ /**
3835
+ * Get the OAuth client ID for an instance.
3836
+ * Priority: env var > opencode-gitlab-auth default (for GitLab.com).
3837
+ * Note: callers (e.g. exchangeRefreshToken) may pass an explicit clientId
3838
+ * that bypasses this method entirely.
3839
+ */
3840
+ getClientId(instanceUrl) {
3841
+ const envClientId = process.env["GITLAB_OAUTH_CLIENT_ID"];
3842
+ if (envClientId) {
3843
+ return envClientId;
3844
+ }
3845
+ if (instanceUrl === GITLAB_COM_URL) {
3846
+ return OPENCODE_GITLAB_AUTH_CLIENT_ID;
3847
+ }
3848
+ throw new GitLabError({
3849
+ message: `No OAuth client ID configured for instance ${instanceUrl}. Please provide a clientId parameter or set GITLAB_OAUTH_CLIENT_ID environment variable.`
3850
+ });
3851
+ }
3852
+ /**
3853
+ * Exchange token with GitLab OAuth endpoint
3854
+ * Based on gitlab-vscode-extension GitLabService.exchangeToken
3855
+ */
3856
+ async exchangeToken(params) {
3857
+ const { instanceUrl, grantType, code, codeVerifier, refreshToken, clientId, redirectUri } = params;
3858
+ const body = {
3859
+ client_id: clientId,
3860
+ grant_type: grantType
3861
+ };
3862
+ if (grantType === "authorization_code") {
3863
+ if (!code || !codeVerifier || !redirectUri) {
3864
+ throw new GitLabError({
3865
+ message: "Authorization code, code verifier, and redirect URI are required for authorization_code grant"
3866
+ });
3867
+ }
3868
+ body.code = code;
3869
+ body.code_verifier = codeVerifier;
3870
+ body.redirect_uri = redirectUri;
3871
+ } else if (grantType === "refresh_token") {
3872
+ if (!refreshToken) {
3873
+ throw new GitLabError({
3874
+ message: "Refresh token is required for refresh_token grant"
3875
+ });
3876
+ }
3877
+ body.refresh_token = refreshToken;
3878
+ }
3879
+ const url = `${instanceUrl}/oauth/token`;
3880
+ try {
3881
+ const response = await this.fetch(url, {
3882
+ method: "POST",
3883
+ headers: {
3884
+ "Content-Type": "application/x-www-form-urlencoded"
3885
+ },
3886
+ body: new URLSearchParams(body).toString()
3887
+ });
3888
+ if (!response.ok) {
3889
+ const errorText = await response.text();
3890
+ throw new GitLabError({
3891
+ message: `OAuth token exchange failed: ${response.status} ${response.statusText}`,
3892
+ cause: new Error(errorText)
3893
+ });
3894
+ }
3895
+ const data = await response.json();
3896
+ return data;
3897
+ } catch (error) {
3898
+ if (error instanceof GitLabError) {
3899
+ throw error;
3900
+ }
3901
+ throw new GitLabError({
3902
+ message: `Failed to exchange OAuth token: ${error instanceof Error ? error.message : String(error)}`,
3903
+ cause: error instanceof Error ? error : void 0
3904
+ });
3905
+ }
3906
+ }
3907
+ /**
3908
+ * Create GitLabOAuthTokens from token response
3909
+ */
3910
+ createTokensFromResponse(response, instanceUrl) {
3911
+ const expiresAt = this.createExpiresTimestamp(response);
3912
+ return {
3913
+ accessToken: response.access_token,
3914
+ refreshToken: response.refresh_token || "",
3915
+ expiresAt,
3916
+ instanceUrl
3917
+ };
3918
+ }
3919
+ /**
3920
+ * Create expiry timestamp from token response
3921
+ * Based on gitlab-vscode-extension createExpiresTimestamp
3922
+ */
3923
+ createExpiresTimestamp(response) {
3924
+ const createdAt = response.created_at * 1e3;
3925
+ const expiresIn = response.expires_in * 1e3;
3926
+ return createdAt + expiresIn;
3927
+ }
3928
+ };
3929
+
3930
+ // src/gitlab-provider.ts
3931
+ import * as fs2 from "fs";
3932
+ import * as path3 from "path";
3933
+ import * as os2 from "os";
3934
+ function getOpenCodeAuthPath() {
3935
+ const homeDir = os2.homedir();
3936
+ const xdgDataHome = process.env.XDG_DATA_HOME;
3937
+ if (xdgDataHome) {
3938
+ return path3.join(xdgDataHome, "opencode", "auth.json");
3939
+ }
3940
+ if (process.platform !== "win32") {
3941
+ return path3.join(homeDir, ".local", "share", "opencode", "auth.json");
3942
+ }
3943
+ return path3.join(homeDir, ".opencode", "auth.json");
3944
+ }
3945
+ async function loadOpenCodeAuth(instanceUrl) {
3946
+ try {
3947
+ const authPath = getOpenCodeAuthPath();
3948
+ if (!fs2.existsSync(authPath)) {
3949
+ return void 0;
3950
+ }
3951
+ const authData = JSON.parse(fs2.readFileSync(authPath, "utf-8"));
3952
+ if (authData.gitlab?.type === "oauth") {
3953
+ const gitlabAuth = authData.gitlab;
3954
+ if (gitlabAuth.enterpriseUrl === instanceUrl || gitlabAuth.enterpriseUrl === instanceUrl.replace(/\/$/, "")) {
3955
+ return gitlabAuth;
3956
+ }
3957
+ }
3958
+ const normalizedUrl = instanceUrl.replace(/\/$/, "");
3959
+ const auth = authData[normalizedUrl] || authData[`${normalizedUrl}/`];
3960
+ return auth;
3961
+ } catch (error) {
3962
+ throw new Error(`Failed to load auth.json: ${error instanceof Error ? error.message : error}`);
3963
+ }
3964
+ }
3965
+ async function loadApiKey(options, instanceUrl, clientId) {
3966
+ if (options.apiKey) {
3967
+ return options.apiKey;
3968
+ }
3969
+ const auth = await loadOpenCodeAuth(instanceUrl);
3970
+ if (auth?.type === "oauth") {
3971
+ const oauthManager = new GitLabOAuthManager();
3972
+ if (oauthManager.needsRefresh(auth.expires)) {
3973
+ try {
3974
+ const refreshed = await oauthManager.exchangeRefreshToken({
3975
+ instanceUrl,
3976
+ refreshToken: auth.refresh,
3977
+ clientId
3978
+ });
3979
+ const authPath = getOpenCodeAuthPath();
3980
+ const authData = JSON.parse(fs2.readFileSync(authPath, "utf-8"));
3981
+ authData.gitlab = {
3982
+ type: "oauth",
3983
+ refresh: refreshed.refreshToken,
3984
+ access: refreshed.accessToken,
3985
+ expires: refreshed.expiresAt,
3986
+ enterpriseUrl: instanceUrl
3987
+ // Use enterpriseUrl to match auth plugin format
3988
+ };
3989
+ fs2.writeFileSync(authPath, JSON.stringify(authData, null, 2), { mode: 384 });
3990
+ return refreshed.accessToken;
3991
+ } catch (error) {
3992
+ const refreshErrorMsg = error instanceof Error ? error.message : String(error);
3993
+ const envApiKey = process.env[options.environmentVariableName];
3994
+ if (envApiKey) {
3995
+ return envApiKey;
3996
+ }
3997
+ throw new GitLabError({
3998
+ message: `OAuth token refresh failed and no fallback ${options.environmentVariableName} environment variable is set. Refresh error: ${refreshErrorMsg}. Re-authenticate with 'opencode auth login gitlab' or set ${options.environmentVariableName}.`
3999
+ });
4000
+ }
4001
+ } else {
4002
+ return auth.access;
4003
+ }
4004
+ }
4005
+ const apiKey = process.env[options.environmentVariableName];
4006
+ if (!apiKey) {
4007
+ throw new GitLabError({
4008
+ message: `${options.description} API key is missing. Pass it as the 'apiKey' parameter, set the ${options.environmentVariableName} environment variable, or authenticate with 'opencode auth login gitlab'.`
4009
+ });
4010
+ }
4011
+ return apiKey;
4012
+ }
4013
+ function withUserAgentSuffix(headers, suffix) {
4014
+ const userAgent = headers["User-Agent"];
4015
+ return {
4016
+ ...headers,
4017
+ "User-Agent": userAgent ? `${userAgent} ${suffix}` : suffix
4018
+ };
4019
+ }
4020
+ function createGitLab(options = {}) {
4021
+ const instanceUrl = options.instanceUrl ?? process.env["GITLAB_INSTANCE_URL"] ?? "https://gitlab.com";
4022
+ const providerName = options.name ?? "gitlab";
4023
+ let cachedApiKey;
4024
+ let apiKeyPromise;
4025
+ const getApiKey = async () => {
4026
+ if (cachedApiKey) {
4027
+ return cachedApiKey;
4028
+ }
4029
+ if (apiKeyPromise) {
4030
+ return apiKeyPromise;
4031
+ }
4032
+ apiKeyPromise = loadApiKey(
4033
+ {
4034
+ apiKey: options.apiKey,
4035
+ environmentVariableName: "GITLAB_TOKEN",
4036
+ description: "GitLab"
4037
+ },
4038
+ instanceUrl,
4039
+ options.clientId
4040
+ );
4041
+ cachedApiKey = await apiKeyPromise;
4042
+ apiKeyPromise = void 0;
4043
+ return cachedApiKey;
4044
+ };
4045
+ const refreshApiKey = async () => {
4046
+ cachedApiKey = void 0;
4047
+ apiKeyPromise = void 0;
4048
+ cachedApiKey = await loadApiKey(
4049
+ {
4050
+ apiKey: void 0,
4051
+ // Bypass stale options.apiKey to force auth.json read
4052
+ environmentVariableName: "GITLAB_TOKEN",
4053
+ description: "GitLab"
4054
+ },
4055
+ instanceUrl,
4056
+ options.clientId
4057
+ );
4058
+ };
4059
+ const getHeaders = () => {
4060
+ const apiKey = cachedApiKey || options.apiKey || process.env["GITLAB_TOKEN"] || "";
4061
+ if (!apiKey) {
4062
+ throw new GitLabError({
4063
+ message: "GitLab API key is missing. Pass it as the 'apiKey' parameter, set the GITLAB_TOKEN environment variable, or authenticate with 'opencode auth login gitlab'."
4064
+ });
4065
+ }
4066
+ return withUserAgentSuffix(
4067
+ {
4068
+ Authorization: `Bearer ${apiKey}`,
4069
+ "Content-Type": "application/json",
4070
+ ...options.headers
4071
+ },
4072
+ `ai-sdk-gitlab/${VERSION}`
4073
+ );
4074
+ };
4075
+ getApiKey().catch(() => {
4076
+ });
4077
+ const createAgenticChatModel = (modelId, agenticOptions) => {
4078
+ const mapping = getModelMapping(modelId);
4079
+ if (!mapping) {
4080
+ throw new GitLabError({
4081
+ message: `Unknown model ID: ${modelId}. Model must be registered in MODEL_MAPPINGS.`
4082
+ });
4083
+ }
4084
+ if (agenticOptions?.providerModel) {
4085
+ const validModels = getValidModelsForProvider(mapping.provider);
4086
+ if (!validModels.includes(agenticOptions.providerModel)) {
4087
+ throw new GitLabError({
4088
+ message: `Invalid providerModel '${agenticOptions.providerModel}' for provider '${mapping.provider}'. Valid models: ${validModels.join(", ")}`
4089
+ });
4090
+ }
4091
+ }
4092
+ const featureFlags = {
4093
+ DuoAgentPlatformNext: true,
4094
+ ...options.featureFlags,
4095
+ ...agenticOptions?.featureFlags
4096
+ };
4097
+ const defaultAiGatewayHeaders = {
4098
+ "User-Agent": `gitlab-ai-provider/${VERSION}`
4099
+ };
4100
+ const aiGatewayHeaders = {
4101
+ ...defaultAiGatewayHeaders,
4102
+ ...options.aiGatewayHeaders,
4103
+ ...agenticOptions?.aiGatewayHeaders
4104
+ };
4105
+ const baseConfig = {
4106
+ provider: `${providerName}.agentic`,
4107
+ instanceUrl,
4108
+ getHeaders,
4109
+ refreshApiKey,
4110
+ fetch: options.fetch,
4111
+ maxTokens: agenticOptions?.maxTokens,
4112
+ featureFlags,
4113
+ aiGatewayUrl: options.aiGatewayUrl,
4114
+ aiGatewayHeaders
4115
+ };
4116
+ if (mapping.provider === "openai") {
4117
+ return new GitLabOpenAILanguageModel(modelId, {
4118
+ ...baseConfig,
4119
+ openaiModel: agenticOptions?.providerModel ?? mapping.model
4120
+ });
4121
+ }
4122
+ return new GitLabAnthropicLanguageModel(modelId, {
4123
+ ...baseConfig,
4124
+ anthropicModel: agenticOptions?.providerModel ?? mapping.model
4125
+ });
4126
+ };
4127
+ const createWorkflowChatModel = (modelId, workflowOptions) => {
4128
+ const mapping = getModelMapping(modelId);
4129
+ if (!mapping || mapping.provider !== "workflow") {
4130
+ throw new GitLabError({
4131
+ message: `Unknown workflow model ID: ${modelId}. Use 'duo-workflow' or a 'duo-workflow-*' model ID.`
4132
+ });
4133
+ }
4134
+ return new GitLabWorkflowLanguageModel(
4135
+ modelId,
4136
+ {
4137
+ provider: `${providerName}.workflow`,
4138
+ instanceUrl,
4139
+ getHeaders,
4140
+ refreshApiKey,
4141
+ fetch: options.fetch,
4142
+ featureFlags: {
4143
+ ...options.featureFlags,
4144
+ ...workflowOptions?.featureFlags
4145
+ },
4146
+ aiGatewayUrl: options.aiGatewayUrl
4147
+ },
4148
+ workflowOptions
4149
+ );
4150
+ };
4151
+ const createDefaultModel = (modelId) => {
4152
+ if (isWorkflowModel(modelId)) {
4153
+ return createWorkflowChatModel(modelId);
4154
+ }
4155
+ return createAgenticChatModel(modelId);
4156
+ };
4157
+ const provider = Object.assign((modelId) => createDefaultModel(modelId), {
4158
+ specificationVersion: "v2",
4159
+ languageModel: createDefaultModel,
4160
+ chat: createDefaultModel,
4161
+ agenticChat: createAgenticChatModel,
4162
+ workflowChat: createWorkflowChatModel
4163
+ });
4164
+ provider.textEmbeddingModel = (modelId) => {
4165
+ throw new GitLabError({
4166
+ message: `GitLab provider does not support text embedding models. Model ID: ${modelId}`
4167
+ });
4168
+ };
4169
+ provider.imageModel = (modelId) => {
4170
+ throw new GitLabError({
4171
+ message: `GitLab provider does not support image models. Model ID: ${modelId}`
4172
+ });
4173
+ };
4174
+ return provider;
4175
+ }
4176
+ var gitlab = createGitLab();
4177
+ export {
4178
+ AGENT_PRIVILEGES,
4179
+ BUNDLED_CLIENT_ID,
4180
+ CLIENT_VERSION,
4181
+ DEFAULT_AGENT_PRIVILEGES,
4182
+ DEFAULT_AI_GATEWAY_URL,
4183
+ DEFAULT_CLIENT_CAPABILITIES,
4184
+ DEFAULT_WORKFLOW_DEFINITION,
4185
+ GITLAB_COM_URL,
4186
+ GitLabAnthropicLanguageModel,
4187
+ GitLabDirectAccessClient,
4188
+ GitLabError,
4189
+ GitLabModelCache,
4190
+ GitLabModelDiscovery,
4191
+ GitLabOAuthManager,
4192
+ GitLabOpenAILanguageModel,
4193
+ GitLabProjectCache,
4194
+ GitLabProjectDetector,
4195
+ GitLabWorkflowClient,
4196
+ GitLabWorkflowLanguageModel,
4197
+ GitLabWorkflowTokenClient,
4198
+ MODEL_ID_TO_ANTHROPIC_MODEL,
4199
+ MODEL_MAPPINGS,
4200
+ OAUTH_SCOPES,
4201
+ OPENCODE_GITLAB_AUTH_CLIENT_ID,
4202
+ TOKEN_EXPIRY_SKEW_MS,
4203
+ VERSION,
4204
+ WORKFLOW_ENVIRONMENT,
4205
+ WS_HEARTBEAT_INTERVAL_MS,
4206
+ WS_KEEPALIVE_PING_INTERVAL_MS,
4207
+ WorkflowType,
4208
+ createGitLab,
4209
+ getAnthropicModelForModelId,
4210
+ getModelMapping,
4211
+ getOpenAIApiType,
4212
+ getOpenAIModelForModelId,
4213
+ getProviderForModelId,
4214
+ getValidModelsForProvider,
4215
+ getWorkflowModelRef,
4216
+ gitlab,
4217
+ isResponsesApiModel,
4218
+ isWorkflowModel
4219
+ };
4220
+ //# sourceMappingURL=index.mjs.map