gitlab-ai-provider 5.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +438 -0
- package/LICENSE +28 -0
- package/README.md +815 -0
- package/dist/index.d.mts +1521 -0
- package/dist/index.d.ts +1658 -0
- package/dist/index.js +4289 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +4220 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +119 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,4289 @@
|
|
|
1
|
+
var __create = Object.create;
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
6
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
|
+
var __export = (target, all) => {
|
|
8
|
+
for (var name in all)
|
|
9
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
10
|
+
};
|
|
11
|
+
var __copyProps = (to, from, except, desc) => {
|
|
12
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
13
|
+
for (let key of __getOwnPropNames(from))
|
|
14
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
15
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
16
|
+
}
|
|
17
|
+
return to;
|
|
18
|
+
};
|
|
19
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
20
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
21
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
22
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
23
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
24
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
25
|
+
mod
|
|
26
|
+
));
|
|
27
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
28
|
+
|
|
29
|
+
// src/index.ts
|
|
30
|
+
var index_exports = {};
|
|
31
|
+
__export(index_exports, {
|
|
32
|
+
AGENT_PRIVILEGES: () => AGENT_PRIVILEGES,
|
|
33
|
+
BUNDLED_CLIENT_ID: () => BUNDLED_CLIENT_ID,
|
|
34
|
+
CLIENT_VERSION: () => CLIENT_VERSION,
|
|
35
|
+
DEFAULT_AGENT_PRIVILEGES: () => DEFAULT_AGENT_PRIVILEGES,
|
|
36
|
+
DEFAULT_AI_GATEWAY_URL: () => DEFAULT_AI_GATEWAY_URL,
|
|
37
|
+
DEFAULT_CLIENT_CAPABILITIES: () => DEFAULT_CLIENT_CAPABILITIES,
|
|
38
|
+
DEFAULT_WORKFLOW_DEFINITION: () => DEFAULT_WORKFLOW_DEFINITION,
|
|
39
|
+
GITLAB_COM_URL: () => GITLAB_COM_URL,
|
|
40
|
+
GitLabAnthropicLanguageModel: () => GitLabAnthropicLanguageModel,
|
|
41
|
+
GitLabDirectAccessClient: () => GitLabDirectAccessClient,
|
|
42
|
+
GitLabError: () => GitLabError,
|
|
43
|
+
GitLabModelCache: () => GitLabModelCache,
|
|
44
|
+
GitLabModelDiscovery: () => GitLabModelDiscovery,
|
|
45
|
+
GitLabOAuthManager: () => GitLabOAuthManager,
|
|
46
|
+
GitLabOpenAILanguageModel: () => GitLabOpenAILanguageModel,
|
|
47
|
+
GitLabProjectCache: () => GitLabProjectCache,
|
|
48
|
+
GitLabProjectDetector: () => GitLabProjectDetector,
|
|
49
|
+
GitLabWorkflowClient: () => GitLabWorkflowClient,
|
|
50
|
+
GitLabWorkflowLanguageModel: () => GitLabWorkflowLanguageModel,
|
|
51
|
+
GitLabWorkflowTokenClient: () => GitLabWorkflowTokenClient,
|
|
52
|
+
MODEL_ID_TO_ANTHROPIC_MODEL: () => MODEL_ID_TO_ANTHROPIC_MODEL,
|
|
53
|
+
MODEL_MAPPINGS: () => MODEL_MAPPINGS,
|
|
54
|
+
OAUTH_SCOPES: () => OAUTH_SCOPES,
|
|
55
|
+
OPENCODE_GITLAB_AUTH_CLIENT_ID: () => OPENCODE_GITLAB_AUTH_CLIENT_ID,
|
|
56
|
+
TOKEN_EXPIRY_SKEW_MS: () => TOKEN_EXPIRY_SKEW_MS,
|
|
57
|
+
VERSION: () => VERSION,
|
|
58
|
+
WORKFLOW_ENVIRONMENT: () => WORKFLOW_ENVIRONMENT,
|
|
59
|
+
WS_HEARTBEAT_INTERVAL_MS: () => WS_HEARTBEAT_INTERVAL_MS,
|
|
60
|
+
WS_KEEPALIVE_PING_INTERVAL_MS: () => WS_KEEPALIVE_PING_INTERVAL_MS,
|
|
61
|
+
WorkflowType: () => WorkflowType,
|
|
62
|
+
createGitLab: () => createGitLab,
|
|
63
|
+
getAnthropicModelForModelId: () => getAnthropicModelForModelId,
|
|
64
|
+
getModelMapping: () => getModelMapping,
|
|
65
|
+
getOpenAIApiType: () => getOpenAIApiType,
|
|
66
|
+
getOpenAIModelForModelId: () => getOpenAIModelForModelId,
|
|
67
|
+
getProviderForModelId: () => getProviderForModelId,
|
|
68
|
+
getValidModelsForProvider: () => getValidModelsForProvider,
|
|
69
|
+
getWorkflowModelRef: () => getWorkflowModelRef,
|
|
70
|
+
gitlab: () => gitlab,
|
|
71
|
+
isResponsesApiModel: () => isResponsesApiModel,
|
|
72
|
+
isWorkflowModel: () => isWorkflowModel
|
|
73
|
+
});
|
|
74
|
+
module.exports = __toCommonJS(index_exports);
|
|
75
|
+
|
|
76
|
+
// src/gitlab-anthropic-language-model.ts
|
|
77
|
+
var import_sdk = __toESM(require("@anthropic-ai/sdk"));
|
|
78
|
+
|
|
79
|
+
// src/gitlab-direct-access.ts
|
|
80
|
+
var import_zod = require("zod");
|
|
81
|
+
|
|
82
|
+
// src/gitlab-error.ts
|
|
83
|
+
var GitLabError = class _GitLabError extends Error {
|
|
84
|
+
statusCode;
|
|
85
|
+
responseBody;
|
|
86
|
+
cause;
|
|
87
|
+
constructor(options) {
|
|
88
|
+
super(options.message);
|
|
89
|
+
this.name = "GitLabError";
|
|
90
|
+
this.statusCode = options.statusCode;
|
|
91
|
+
this.responseBody = options.responseBody;
|
|
92
|
+
this.cause = options.cause;
|
|
93
|
+
if (Error.captureStackTrace) {
|
|
94
|
+
Error.captureStackTrace(this, _GitLabError);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
static fromResponse(response, body) {
|
|
98
|
+
return new _GitLabError({
|
|
99
|
+
message: `GitLab API error: ${response.status} ${response.statusText}`,
|
|
100
|
+
statusCode: response.status,
|
|
101
|
+
responseBody: body
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
isAuthError() {
|
|
105
|
+
return this.statusCode === 401;
|
|
106
|
+
}
|
|
107
|
+
isRateLimitError() {
|
|
108
|
+
return this.statusCode === 429;
|
|
109
|
+
}
|
|
110
|
+
isForbiddenError() {
|
|
111
|
+
return this.statusCode === 403;
|
|
112
|
+
}
|
|
113
|
+
isServerError() {
|
|
114
|
+
return this.statusCode !== void 0 && this.statusCode >= 500;
|
|
115
|
+
}
|
|
116
|
+
/**
|
|
117
|
+
* Check if this error is a context overflow error (prompt too long).
|
|
118
|
+
* These errors occur when the conversation exceeds the model's token limit.
|
|
119
|
+
*/
|
|
120
|
+
isContextOverflowError() {
|
|
121
|
+
if (this.statusCode !== 400) {
|
|
122
|
+
return false;
|
|
123
|
+
}
|
|
124
|
+
const message = this.message?.toLowerCase() || "";
|
|
125
|
+
return message.includes("context overflow") || message.includes("prompt is too long") || message.includes("prompt too long") || message.includes("tokens") && message.includes("maximum");
|
|
126
|
+
}
|
|
127
|
+
};
|
|
128
|
+
|
|
129
|
+
// src/gitlab-direct-access.ts
|
|
130
|
+
var directAccessTokenSchema = import_zod.z.object({
|
|
131
|
+
headers: import_zod.z.record(import_zod.z.string()),
|
|
132
|
+
token: import_zod.z.string()
|
|
133
|
+
});
|
|
134
|
+
var DEFAULT_AI_GATEWAY_URL = "https://cloud.gitlab.com";
|
|
135
|
+
var GitLabDirectAccessClient = class {
|
|
136
|
+
config;
|
|
137
|
+
fetchFn;
|
|
138
|
+
aiGatewayUrl;
|
|
139
|
+
cachedToken = null;
|
|
140
|
+
tokenExpiresAt = 0;
|
|
141
|
+
constructor(config) {
|
|
142
|
+
this.config = config;
|
|
143
|
+
this.fetchFn = config.fetch ?? fetch;
|
|
144
|
+
this.aiGatewayUrl = config.aiGatewayUrl || process.env["GITLAB_AI_GATEWAY_URL"] || DEFAULT_AI_GATEWAY_URL;
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* Get a direct access token for the Anthropic proxy.
|
|
148
|
+
* Tokens are cached for 25 minutes (they expire after 30 minutes).
|
|
149
|
+
* @param forceRefresh - If true, ignores the cache and fetches a new token
|
|
150
|
+
*/
|
|
151
|
+
async getDirectAccessToken(forceRefresh = false) {
|
|
152
|
+
const now = Date.now();
|
|
153
|
+
if (!forceRefresh && this.cachedToken && this.tokenExpiresAt > now) {
|
|
154
|
+
return this.cachedToken;
|
|
155
|
+
}
|
|
156
|
+
if (forceRefresh) {
|
|
157
|
+
this.invalidateToken();
|
|
158
|
+
}
|
|
159
|
+
const url = `${this.config.instanceUrl}/api/v4/ai/third_party_agents/direct_access`;
|
|
160
|
+
const requestBody = {};
|
|
161
|
+
if (this.config.featureFlags && Object.keys(this.config.featureFlags).length > 0) {
|
|
162
|
+
requestBody.feature_flags = this.config.featureFlags;
|
|
163
|
+
}
|
|
164
|
+
try {
|
|
165
|
+
const response = await this.fetchFn(url, {
|
|
166
|
+
method: "POST",
|
|
167
|
+
headers: {
|
|
168
|
+
...this.config.getHeaders(),
|
|
169
|
+
"Content-Type": "application/json"
|
|
170
|
+
},
|
|
171
|
+
body: JSON.stringify(requestBody)
|
|
172
|
+
});
|
|
173
|
+
if (!response.ok) {
|
|
174
|
+
const errorText = await response.text();
|
|
175
|
+
if (response.status === 401 && this.config.refreshApiKey && !forceRefresh) {
|
|
176
|
+
try {
|
|
177
|
+
await this.config.refreshApiKey();
|
|
178
|
+
return await this.getDirectAccessToken(true);
|
|
179
|
+
} catch (refreshError) {
|
|
180
|
+
throw new GitLabError({
|
|
181
|
+
message: `Failed to get direct access token: ${response.status} ${response.statusText} - ${errorText}`,
|
|
182
|
+
statusCode: response.status,
|
|
183
|
+
responseBody: errorText
|
|
184
|
+
});
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
if (response.status === 403) {
|
|
188
|
+
throw new GitLabError({
|
|
189
|
+
message: `Access denied to GitLab AI features (${this.config.instanceUrl}). This may indicate that: (1) GitLab Duo is not enabled on this instance, (2) Your account does not have access to AI features, or (3) The third-party agents feature is not available. Original error: ${response.status} ${response.statusText} - ${errorText}`,
|
|
190
|
+
statusCode: response.status,
|
|
191
|
+
responseBody: errorText
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
throw new GitLabError({
|
|
195
|
+
message: `Failed to get direct access token: ${response.status} ${response.statusText} - ${errorText}`,
|
|
196
|
+
statusCode: response.status,
|
|
197
|
+
responseBody: errorText
|
|
198
|
+
});
|
|
199
|
+
}
|
|
200
|
+
const data = await response.json();
|
|
201
|
+
const token = directAccessTokenSchema.parse(data);
|
|
202
|
+
this.cachedToken = token;
|
|
203
|
+
this.tokenExpiresAt = now + 25 * 60 * 1e3;
|
|
204
|
+
return token;
|
|
205
|
+
} catch (error) {
|
|
206
|
+
if (error instanceof GitLabError) {
|
|
207
|
+
throw error;
|
|
208
|
+
}
|
|
209
|
+
throw new GitLabError({
|
|
210
|
+
message: `Failed to get direct access token: ${error}`,
|
|
211
|
+
cause: error
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
/**
|
|
216
|
+
* Get the Anthropic proxy base URL
|
|
217
|
+
*/
|
|
218
|
+
getAnthropicProxyUrl() {
|
|
219
|
+
const baseUrl = this.aiGatewayUrl.replace(/\/$/, "");
|
|
220
|
+
return `${baseUrl}/ai/v1/proxy/anthropic/`;
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* Get the OpenAI proxy base URL
|
|
224
|
+
* Note: The OpenAI SDK expects a base URL like https://api.openai.com/v1
|
|
225
|
+
* and appends paths like /chat/completions. So we need /v1 at the end.
|
|
226
|
+
*/
|
|
227
|
+
getOpenAIProxyUrl() {
|
|
228
|
+
const baseUrl = this.aiGatewayUrl.replace(/\/$/, "");
|
|
229
|
+
return `${baseUrl}/ai/v1/proxy/openai/v1`;
|
|
230
|
+
}
|
|
231
|
+
/**
|
|
232
|
+
* Invalidate the cached token
|
|
233
|
+
*/
|
|
234
|
+
invalidateToken() {
|
|
235
|
+
this.cachedToken = null;
|
|
236
|
+
this.tokenExpiresAt = 0;
|
|
237
|
+
}
|
|
238
|
+
};
|
|
239
|
+
|
|
240
|
+
// src/gitlab-anthropic-language-model.ts
|
|
241
|
+
var GitLabAnthropicLanguageModel = class {
|
|
242
|
+
specificationVersion = "v2";
|
|
243
|
+
modelId;
|
|
244
|
+
supportedUrls = {};
|
|
245
|
+
config;
|
|
246
|
+
directAccessClient;
|
|
247
|
+
anthropicClient = null;
|
|
248
|
+
constructor(modelId, config) {
|
|
249
|
+
this.modelId = modelId;
|
|
250
|
+
this.config = config;
|
|
251
|
+
this.directAccessClient = new GitLabDirectAccessClient({
|
|
252
|
+
instanceUrl: config.instanceUrl,
|
|
253
|
+
getHeaders: config.getHeaders,
|
|
254
|
+
refreshApiKey: config.refreshApiKey,
|
|
255
|
+
fetch: config.fetch,
|
|
256
|
+
featureFlags: config.featureFlags,
|
|
257
|
+
aiGatewayUrl: config.aiGatewayUrl
|
|
258
|
+
});
|
|
259
|
+
}
|
|
260
|
+
get provider() {
|
|
261
|
+
return this.config.provider;
|
|
262
|
+
}
|
|
263
|
+
/**
|
|
264
|
+
* Get or create an Anthropic client with valid credentials
|
|
265
|
+
* @param forceRefresh - If true, forces a token refresh before creating the client
|
|
266
|
+
*/
|
|
267
|
+
async getAnthropicClient(forceRefresh = false) {
|
|
268
|
+
const tokenData = await this.directAccessClient.getDirectAccessToken(forceRefresh);
|
|
269
|
+
const { "x-api-key": _removed, ...filteredHeaders } = tokenData.headers;
|
|
270
|
+
const mergedHeaders = {
|
|
271
|
+
...filteredHeaders,
|
|
272
|
+
...this.config.aiGatewayHeaders
|
|
273
|
+
};
|
|
274
|
+
this.anthropicClient = new import_sdk.default({
|
|
275
|
+
apiKey: null,
|
|
276
|
+
authToken: tokenData.token,
|
|
277
|
+
baseURL: this.directAccessClient.getAnthropicProxyUrl(),
|
|
278
|
+
defaultHeaders: mergedHeaders
|
|
279
|
+
});
|
|
280
|
+
return this.anthropicClient;
|
|
281
|
+
}
|
|
282
|
+
/**
|
|
283
|
+
* Check if an error is a token-related authentication error that can be retried
|
|
284
|
+
*/
|
|
285
|
+
isTokenError(error) {
|
|
286
|
+
if (error instanceof import_sdk.default.APIError) {
|
|
287
|
+
if (error.status === 401) {
|
|
288
|
+
return true;
|
|
289
|
+
}
|
|
290
|
+
const message = error.message?.toLowerCase() || "";
|
|
291
|
+
if (message.includes("token") && (message.includes("expired") || message.includes("revoked") || message.includes("invalid"))) {
|
|
292
|
+
return true;
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
return false;
|
|
296
|
+
}
|
|
297
|
+
/**
|
|
298
|
+
* Check if an error is a context overflow error (prompt too long)
|
|
299
|
+
* These should NOT trigger token refresh and should be reported to the user.
|
|
300
|
+
*/
|
|
301
|
+
isContextOverflowError(error) {
|
|
302
|
+
if (error instanceof import_sdk.default.APIError) {
|
|
303
|
+
if (error.status === 400) {
|
|
304
|
+
const message = error.message?.toLowerCase() || "";
|
|
305
|
+
if (message.includes("prompt is too long") || message.includes("prompt too long") || message.includes("tokens") && message.includes("maximum")) {
|
|
306
|
+
return true;
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
return false;
|
|
311
|
+
}
|
|
312
|
+
/**
|
|
313
|
+
* Convert AI SDK tools to Anthropic tool format
|
|
314
|
+
*/
|
|
315
|
+
convertTools(tools) {
|
|
316
|
+
if (!tools || tools.length === 0) {
|
|
317
|
+
return void 0;
|
|
318
|
+
}
|
|
319
|
+
return tools.filter((tool) => tool.type === "function").map((tool) => {
|
|
320
|
+
const schema = tool.inputSchema;
|
|
321
|
+
return {
|
|
322
|
+
name: tool.name,
|
|
323
|
+
description: tool.description || "",
|
|
324
|
+
input_schema: {
|
|
325
|
+
type: "object",
|
|
326
|
+
properties: schema?.properties || {},
|
|
327
|
+
required: schema?.required || []
|
|
328
|
+
}
|
|
329
|
+
};
|
|
330
|
+
});
|
|
331
|
+
}
|
|
332
|
+
/**
|
|
333
|
+
* Convert AI SDK tool choice to Anthropic format
|
|
334
|
+
*/
|
|
335
|
+
convertToolChoice(toolChoice) {
|
|
336
|
+
if (!toolChoice) {
|
|
337
|
+
return void 0;
|
|
338
|
+
}
|
|
339
|
+
switch (toolChoice.type) {
|
|
340
|
+
case "auto":
|
|
341
|
+
return { type: "auto" };
|
|
342
|
+
case "none":
|
|
343
|
+
return void 0;
|
|
344
|
+
case "required":
|
|
345
|
+
return { type: "any" };
|
|
346
|
+
case "tool":
|
|
347
|
+
return { type: "tool", name: toolChoice.toolName };
|
|
348
|
+
default:
|
|
349
|
+
return void 0;
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
/**
|
|
353
|
+
* Convert AI SDK prompt to Anthropic messages format
|
|
354
|
+
*/
|
|
355
|
+
convertPrompt(prompt) {
|
|
356
|
+
let systemMessage;
|
|
357
|
+
const messages = [];
|
|
358
|
+
for (const message of prompt) {
|
|
359
|
+
if (message.role === "system") {
|
|
360
|
+
systemMessage = message.content;
|
|
361
|
+
continue;
|
|
362
|
+
}
|
|
363
|
+
if (message.role === "user") {
|
|
364
|
+
const content = [];
|
|
365
|
+
for (const part of message.content) {
|
|
366
|
+
if (part.type === "text") {
|
|
367
|
+
content.push({ type: "text", text: part.text });
|
|
368
|
+
} else if (part.type === "file") {
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
if (content.length > 0) {
|
|
372
|
+
messages.push({ role: "user", content });
|
|
373
|
+
}
|
|
374
|
+
} else if (message.role === "assistant") {
|
|
375
|
+
const content = [];
|
|
376
|
+
for (const part of message.content) {
|
|
377
|
+
if (part.type === "text") {
|
|
378
|
+
content.push({ type: "text", text: part.text });
|
|
379
|
+
} else if (part.type === "tool-call") {
|
|
380
|
+
content.push({
|
|
381
|
+
type: "tool_use",
|
|
382
|
+
id: part.toolCallId,
|
|
383
|
+
name: part.toolName,
|
|
384
|
+
input: typeof part.input === "string" ? JSON.parse(part.input) : part.input
|
|
385
|
+
});
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
if (content.length > 0) {
|
|
389
|
+
messages.push({ role: "assistant", content });
|
|
390
|
+
}
|
|
391
|
+
} else if (message.role === "tool") {
|
|
392
|
+
const content = [];
|
|
393
|
+
for (const part of message.content) {
|
|
394
|
+
if (part.type === "tool-result") {
|
|
395
|
+
let resultContent;
|
|
396
|
+
if (part.output.type === "text") {
|
|
397
|
+
resultContent = part.output.value;
|
|
398
|
+
} else if (part.output.type === "json") {
|
|
399
|
+
resultContent = JSON.stringify(part.output.value);
|
|
400
|
+
} else if (part.output.type === "error-text") {
|
|
401
|
+
resultContent = part.output.value;
|
|
402
|
+
} else if (part.output.type === "error-json") {
|
|
403
|
+
resultContent = JSON.stringify(part.output.value);
|
|
404
|
+
} else {
|
|
405
|
+
resultContent = JSON.stringify(part.output);
|
|
406
|
+
}
|
|
407
|
+
content.push({
|
|
408
|
+
type: "tool_result",
|
|
409
|
+
tool_use_id: part.toolCallId,
|
|
410
|
+
content: resultContent,
|
|
411
|
+
is_error: part.output.type.startsWith("error")
|
|
412
|
+
});
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
if (content.length > 0) {
|
|
416
|
+
messages.push({ role: "user", content });
|
|
417
|
+
}
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
return { system: systemMessage, messages };
|
|
421
|
+
}
|
|
422
|
+
/**
|
|
423
|
+
* Convert Anthropic finish reason to AI SDK format
|
|
424
|
+
*/
|
|
425
|
+
convertFinishReason(stopReason) {
|
|
426
|
+
switch (stopReason) {
|
|
427
|
+
case "end_turn":
|
|
428
|
+
return "stop";
|
|
429
|
+
case "stop_sequence":
|
|
430
|
+
return "stop";
|
|
431
|
+
case "max_tokens":
|
|
432
|
+
return "length";
|
|
433
|
+
case "tool_use":
|
|
434
|
+
return "tool-calls";
|
|
435
|
+
default:
|
|
436
|
+
return "unknown";
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
async doGenerate(options) {
|
|
440
|
+
return this.doGenerateWithRetry(options, false);
|
|
441
|
+
}
|
|
442
|
+
async doGenerateWithRetry(options, isRetry) {
|
|
443
|
+
const client = await this.getAnthropicClient(isRetry);
|
|
444
|
+
const { system, messages } = this.convertPrompt(options.prompt);
|
|
445
|
+
const tools = this.convertTools(options.tools);
|
|
446
|
+
const toolChoice = options.toolChoice?.type !== "none" ? this.convertToolChoice(options.toolChoice) : void 0;
|
|
447
|
+
const anthropicModel = this.config.anthropicModel || "claude-sonnet-4-5-20250929";
|
|
448
|
+
const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
|
|
449
|
+
try {
|
|
450
|
+
const response = await client.messages.create({
|
|
451
|
+
model: anthropicModel,
|
|
452
|
+
max_tokens: maxTokens,
|
|
453
|
+
system,
|
|
454
|
+
messages,
|
|
455
|
+
tools,
|
|
456
|
+
tool_choice: tools ? toolChoice : void 0,
|
|
457
|
+
temperature: options.temperature,
|
|
458
|
+
top_p: options.topP,
|
|
459
|
+
stop_sequences: options.stopSequences
|
|
460
|
+
});
|
|
461
|
+
const content = [];
|
|
462
|
+
for (const block of response.content) {
|
|
463
|
+
if (block.type === "text") {
|
|
464
|
+
content.push({
|
|
465
|
+
type: "text",
|
|
466
|
+
text: block.text
|
|
467
|
+
});
|
|
468
|
+
} else if (block.type === "tool_use") {
|
|
469
|
+
content.push({
|
|
470
|
+
type: "tool-call",
|
|
471
|
+
toolCallId: block.id,
|
|
472
|
+
toolName: block.name,
|
|
473
|
+
input: JSON.stringify(block.input)
|
|
474
|
+
});
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
const usage = {
|
|
478
|
+
inputTokens: response.usage.input_tokens,
|
|
479
|
+
outputTokens: response.usage.output_tokens,
|
|
480
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens
|
|
481
|
+
};
|
|
482
|
+
return {
|
|
483
|
+
content,
|
|
484
|
+
finishReason: this.convertFinishReason(response.stop_reason),
|
|
485
|
+
usage,
|
|
486
|
+
warnings: []
|
|
487
|
+
};
|
|
488
|
+
} catch (error) {
|
|
489
|
+
if (this.isContextOverflowError(error)) {
|
|
490
|
+
const apiError = error;
|
|
491
|
+
throw new GitLabError({
|
|
492
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
493
|
+
statusCode: 400,
|
|
494
|
+
cause: error
|
|
495
|
+
});
|
|
496
|
+
}
|
|
497
|
+
if (!isRetry && this.isTokenError(error)) {
|
|
498
|
+
this.directAccessClient.invalidateToken();
|
|
499
|
+
return this.doGenerateWithRetry(options, true);
|
|
500
|
+
}
|
|
501
|
+
if (error instanceof import_sdk.default.APIError) {
|
|
502
|
+
throw new GitLabError({
|
|
503
|
+
message: `Anthropic API error: ${error.message}`,
|
|
504
|
+
statusCode: error.status,
|
|
505
|
+
cause: error
|
|
506
|
+
});
|
|
507
|
+
}
|
|
508
|
+
throw error;
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
async doStream(options) {
|
|
512
|
+
return this.doStreamWithRetry(options, false);
|
|
513
|
+
}
|
|
514
|
+
async doStreamWithRetry(options, isRetry) {
|
|
515
|
+
const client = await this.getAnthropicClient(isRetry);
|
|
516
|
+
const { system, messages } = this.convertPrompt(options.prompt);
|
|
517
|
+
const tools = this.convertTools(options.tools);
|
|
518
|
+
const toolChoice = options.toolChoice?.type !== "none" ? this.convertToolChoice(options.toolChoice) : void 0;
|
|
519
|
+
const anthropicModel = this.config.anthropicModel || "claude-sonnet-4-5-20250929";
|
|
520
|
+
const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
|
|
521
|
+
const requestBody = {
|
|
522
|
+
model: anthropicModel,
|
|
523
|
+
max_tokens: maxTokens,
|
|
524
|
+
system,
|
|
525
|
+
messages,
|
|
526
|
+
tools,
|
|
527
|
+
tool_choice: tools ? toolChoice : void 0,
|
|
528
|
+
temperature: options.temperature,
|
|
529
|
+
top_p: options.topP,
|
|
530
|
+
stop_sequences: options.stopSequences,
|
|
531
|
+
stream: true
|
|
532
|
+
};
|
|
533
|
+
const self = this;
|
|
534
|
+
const stream = new ReadableStream({
|
|
535
|
+
start: async (controller) => {
|
|
536
|
+
const contentBlocks = {};
|
|
537
|
+
const usage = {
|
|
538
|
+
inputTokens: 0,
|
|
539
|
+
outputTokens: 0,
|
|
540
|
+
totalTokens: 0
|
|
541
|
+
};
|
|
542
|
+
let finishReason = "unknown";
|
|
543
|
+
try {
|
|
544
|
+
const anthropicStream = client.messages.stream(requestBody, {
|
|
545
|
+
signal: options.abortSignal
|
|
546
|
+
});
|
|
547
|
+
controller.enqueue({
|
|
548
|
+
type: "stream-start",
|
|
549
|
+
warnings: []
|
|
550
|
+
});
|
|
551
|
+
await new Promise((resolve2, reject) => {
|
|
552
|
+
anthropicStream.on("streamEvent", (event) => {
|
|
553
|
+
try {
|
|
554
|
+
switch (event.type) {
|
|
555
|
+
case "message_start":
|
|
556
|
+
if (event.message.usage) {
|
|
557
|
+
usage.inputTokens = event.message.usage.input_tokens;
|
|
558
|
+
}
|
|
559
|
+
controller.enqueue({
|
|
560
|
+
type: "response-metadata",
|
|
561
|
+
id: event.message.id,
|
|
562
|
+
modelId: event.message.model
|
|
563
|
+
});
|
|
564
|
+
break;
|
|
565
|
+
case "content_block_start":
|
|
566
|
+
if (event.content_block.type === "text") {
|
|
567
|
+
const textId = `text-${event.index}`;
|
|
568
|
+
contentBlocks[event.index] = { type: "text", id: textId };
|
|
569
|
+
controller.enqueue({
|
|
570
|
+
type: "text-start",
|
|
571
|
+
id: textId
|
|
572
|
+
});
|
|
573
|
+
} else if (event.content_block.type === "tool_use") {
|
|
574
|
+
contentBlocks[event.index] = {
|
|
575
|
+
type: "tool-call",
|
|
576
|
+
toolCallId: event.content_block.id,
|
|
577
|
+
toolName: event.content_block.name,
|
|
578
|
+
input: ""
|
|
579
|
+
};
|
|
580
|
+
controller.enqueue({
|
|
581
|
+
type: "tool-input-start",
|
|
582
|
+
id: event.content_block.id,
|
|
583
|
+
toolName: event.content_block.name
|
|
584
|
+
});
|
|
585
|
+
}
|
|
586
|
+
break;
|
|
587
|
+
case "content_block_delta": {
|
|
588
|
+
const block = contentBlocks[event.index];
|
|
589
|
+
if (event.delta.type === "text_delta" && block?.type === "text") {
|
|
590
|
+
controller.enqueue({
|
|
591
|
+
type: "text-delta",
|
|
592
|
+
id: block.id,
|
|
593
|
+
delta: event.delta.text
|
|
594
|
+
});
|
|
595
|
+
} else if (event.delta.type === "input_json_delta" && block?.type === "tool-call") {
|
|
596
|
+
block.input += event.delta.partial_json;
|
|
597
|
+
controller.enqueue({
|
|
598
|
+
type: "tool-input-delta",
|
|
599
|
+
id: block.toolCallId,
|
|
600
|
+
delta: event.delta.partial_json
|
|
601
|
+
});
|
|
602
|
+
}
|
|
603
|
+
break;
|
|
604
|
+
}
|
|
605
|
+
case "content_block_stop": {
|
|
606
|
+
const block = contentBlocks[event.index];
|
|
607
|
+
if (block?.type === "text") {
|
|
608
|
+
controller.enqueue({
|
|
609
|
+
type: "text-end",
|
|
610
|
+
id: block.id
|
|
611
|
+
});
|
|
612
|
+
} else if (block?.type === "tool-call") {
|
|
613
|
+
controller.enqueue({
|
|
614
|
+
type: "tool-input-end",
|
|
615
|
+
id: block.toolCallId
|
|
616
|
+
});
|
|
617
|
+
controller.enqueue({
|
|
618
|
+
type: "tool-call",
|
|
619
|
+
toolCallId: block.toolCallId,
|
|
620
|
+
toolName: block.toolName,
|
|
621
|
+
input: block.input === "" ? "{}" : block.input
|
|
622
|
+
});
|
|
623
|
+
}
|
|
624
|
+
delete contentBlocks[event.index];
|
|
625
|
+
break;
|
|
626
|
+
}
|
|
627
|
+
case "message_delta":
|
|
628
|
+
if (event.usage) {
|
|
629
|
+
usage.outputTokens = event.usage.output_tokens;
|
|
630
|
+
usage.totalTokens = (usage.inputTokens || 0) + event.usage.output_tokens;
|
|
631
|
+
}
|
|
632
|
+
if (event.delta.stop_reason) {
|
|
633
|
+
finishReason = self.convertFinishReason(event.delta.stop_reason);
|
|
634
|
+
}
|
|
635
|
+
break;
|
|
636
|
+
case "message_stop": {
|
|
637
|
+
controller.enqueue({
|
|
638
|
+
type: "finish",
|
|
639
|
+
finishReason,
|
|
640
|
+
usage
|
|
641
|
+
});
|
|
642
|
+
break;
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
} catch (error) {
|
|
646
|
+
controller.enqueue({
|
|
647
|
+
type: "error",
|
|
648
|
+
error: error instanceof Error ? error : new Error(String(error))
|
|
649
|
+
});
|
|
650
|
+
}
|
|
651
|
+
});
|
|
652
|
+
anthropicStream.on("end", () => {
|
|
653
|
+
resolve2();
|
|
654
|
+
});
|
|
655
|
+
anthropicStream.on("error", (error) => {
|
|
656
|
+
reject(error);
|
|
657
|
+
});
|
|
658
|
+
});
|
|
659
|
+
for (const [, block] of Object.entries(contentBlocks)) {
|
|
660
|
+
if (block.type === "tool-call") {
|
|
661
|
+
controller.enqueue({
|
|
662
|
+
type: "tool-input-end",
|
|
663
|
+
id: block.toolCallId
|
|
664
|
+
});
|
|
665
|
+
controller.enqueue({
|
|
666
|
+
type: "tool-call",
|
|
667
|
+
toolCallId: block.toolCallId,
|
|
668
|
+
toolName: block.toolName,
|
|
669
|
+
input: block.input === "" ? "{}" : block.input
|
|
670
|
+
});
|
|
671
|
+
}
|
|
672
|
+
}
|
|
673
|
+
controller.close();
|
|
674
|
+
} catch (error) {
|
|
675
|
+
for (const [, block] of Object.entries(contentBlocks)) {
|
|
676
|
+
if (block.type === "tool-call") {
|
|
677
|
+
controller.enqueue({
|
|
678
|
+
type: "tool-input-end",
|
|
679
|
+
id: block.toolCallId
|
|
680
|
+
});
|
|
681
|
+
controller.enqueue({
|
|
682
|
+
type: "tool-call",
|
|
683
|
+
toolCallId: block.toolCallId,
|
|
684
|
+
toolName: block.toolName,
|
|
685
|
+
input: block.input === "" ? "{}" : block.input
|
|
686
|
+
});
|
|
687
|
+
}
|
|
688
|
+
}
|
|
689
|
+
if (self.isContextOverflowError(error)) {
|
|
690
|
+
const apiError = error;
|
|
691
|
+
controller.enqueue({
|
|
692
|
+
type: "error",
|
|
693
|
+
error: new GitLabError({
|
|
694
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
695
|
+
statusCode: 400,
|
|
696
|
+
cause: error
|
|
697
|
+
})
|
|
698
|
+
});
|
|
699
|
+
controller.close();
|
|
700
|
+
return;
|
|
701
|
+
}
|
|
702
|
+
if (!isRetry && self.isTokenError(error)) {
|
|
703
|
+
self.directAccessClient.invalidateToken();
|
|
704
|
+
controller.enqueue({
|
|
705
|
+
type: "error",
|
|
706
|
+
error: new GitLabError({
|
|
707
|
+
message: "TOKEN_REFRESH_NEEDED",
|
|
708
|
+
cause: error
|
|
709
|
+
})
|
|
710
|
+
});
|
|
711
|
+
controller.close();
|
|
712
|
+
return;
|
|
713
|
+
}
|
|
714
|
+
if (error instanceof import_sdk.default.APIError) {
|
|
715
|
+
controller.enqueue({
|
|
716
|
+
type: "error",
|
|
717
|
+
error: new GitLabError({
|
|
718
|
+
message: `Anthropic API error: ${error.message}`,
|
|
719
|
+
statusCode: error.status,
|
|
720
|
+
cause: error
|
|
721
|
+
})
|
|
722
|
+
});
|
|
723
|
+
} else {
|
|
724
|
+
controller.enqueue({
|
|
725
|
+
type: "error",
|
|
726
|
+
error
|
|
727
|
+
});
|
|
728
|
+
}
|
|
729
|
+
controller.close();
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
});
|
|
733
|
+
return {
|
|
734
|
+
stream,
|
|
735
|
+
request: { body: requestBody }
|
|
736
|
+
};
|
|
737
|
+
}
|
|
738
|
+
};
|
|
739
|
+
|
|
740
|
+
// src/gitlab-openai-language-model.ts
|
|
741
|
+
var import_openai = __toESM(require("openai"));
|
|
742
|
+
|
|
743
|
+
// src/model-mappings.ts
|
|
744
|
+
var MODEL_MAPPINGS = {
|
|
745
|
+
// Anthropic models
|
|
746
|
+
"duo-chat-opus-4-6": { provider: "anthropic", model: "claude-opus-4-6" },
|
|
747
|
+
"duo-chat-sonnet-4-6": { provider: "anthropic", model: "claude-sonnet-4-6" },
|
|
748
|
+
"duo-chat-opus-4-5": { provider: "anthropic", model: "claude-opus-4-5-20251101" },
|
|
749
|
+
"duo-chat-sonnet-4-5": { provider: "anthropic", model: "claude-sonnet-4-5-20250929" },
|
|
750
|
+
"duo-chat-haiku-4-5": { provider: "anthropic", model: "claude-haiku-4-5-20251001" },
|
|
751
|
+
// OpenAI models - Chat Completions API
|
|
752
|
+
"duo-chat-gpt-5-1": { provider: "openai", model: "gpt-5.1-2025-11-13", openaiApiType: "chat" },
|
|
753
|
+
"duo-chat-gpt-5-2": { provider: "openai", model: "gpt-5.2-2025-12-11", openaiApiType: "chat" },
|
|
754
|
+
"duo-chat-gpt-5-mini": {
|
|
755
|
+
provider: "openai",
|
|
756
|
+
model: "gpt-5-mini-2025-08-07",
|
|
757
|
+
openaiApiType: "chat"
|
|
758
|
+
},
|
|
759
|
+
// OpenAI models - Responses API (Codex models)
|
|
760
|
+
"duo-chat-gpt-5-codex": { provider: "openai", model: "gpt-5-codex", openaiApiType: "responses" },
|
|
761
|
+
"duo-chat-gpt-5-2-codex": {
|
|
762
|
+
provider: "openai",
|
|
763
|
+
model: "gpt-5.2-codex",
|
|
764
|
+
openaiApiType: "responses"
|
|
765
|
+
},
|
|
766
|
+
"duo-chat-gpt-5-3-codex": {
|
|
767
|
+
provider: "openai",
|
|
768
|
+
model: "gpt-5.3-codex",
|
|
769
|
+
openaiApiType: "responses"
|
|
770
|
+
},
|
|
771
|
+
// Duo Agent Platform model (server-side agentic via DWS WebSocket).
|
|
772
|
+
// This is the single user-facing model ID. The actual underlying model ref
|
|
773
|
+
// is resolved dynamically at runtime via GitLabModelDiscovery.
|
|
774
|
+
"duo-workflow": { provider: "workflow", model: "default" },
|
|
775
|
+
// Internal model refs — kept for backwards compatibility and direct use.
|
|
776
|
+
// Not intended as user-facing model IDs.
|
|
777
|
+
"duo-workflow-default": { provider: "workflow", model: "default" },
|
|
778
|
+
"duo-workflow-sonnet-4-5": {
|
|
779
|
+
provider: "workflow",
|
|
780
|
+
model: "anthropic/claude-sonnet-4-5-20250929"
|
|
781
|
+
},
|
|
782
|
+
"duo-workflow-sonnet-4-6": { provider: "workflow", model: "claude_sonnet_4_6" },
|
|
783
|
+
"duo-workflow-opus-4-5": {
|
|
784
|
+
provider: "workflow",
|
|
785
|
+
model: "anthropic/claude-opus-4-5-20251101"
|
|
786
|
+
},
|
|
787
|
+
"duo-workflow-haiku-4-5": { provider: "workflow", model: "claude_haiku_4_5_20251001" },
|
|
788
|
+
"duo-workflow-opus-4-6": { provider: "workflow", model: "claude_opus_4_6_20260205" }
|
|
789
|
+
};
|
|
790
|
+
function getModelMapping(modelId) {
|
|
791
|
+
return MODEL_MAPPINGS[modelId];
|
|
792
|
+
}
|
|
793
|
+
function getProviderForModelId(modelId) {
|
|
794
|
+
return MODEL_MAPPINGS[modelId]?.provider;
|
|
795
|
+
}
|
|
796
|
+
function getValidModelsForProvider(provider) {
|
|
797
|
+
return Object.values(MODEL_MAPPINGS).filter((m) => m.provider === provider).map((m) => m.model);
|
|
798
|
+
}
|
|
799
|
+
function getAnthropicModelForModelId(modelId) {
|
|
800
|
+
const mapping = MODEL_MAPPINGS[modelId];
|
|
801
|
+
return mapping?.provider === "anthropic" ? mapping.model : void 0;
|
|
802
|
+
}
|
|
803
|
+
function getOpenAIModelForModelId(modelId) {
|
|
804
|
+
const mapping = MODEL_MAPPINGS[modelId];
|
|
805
|
+
return mapping?.provider === "openai" ? mapping.model : void 0;
|
|
806
|
+
}
|
|
807
|
+
function getOpenAIApiType(modelId) {
|
|
808
|
+
const mapping = MODEL_MAPPINGS[modelId];
|
|
809
|
+
return mapping?.openaiApiType ?? "chat";
|
|
810
|
+
}
|
|
811
|
+
function isResponsesApiModel(modelId) {
|
|
812
|
+
return getOpenAIApiType(modelId) === "responses";
|
|
813
|
+
}
|
|
814
|
+
function isWorkflowModel(modelId) {
|
|
815
|
+
return MODEL_MAPPINGS[modelId]?.provider === "workflow";
|
|
816
|
+
}
|
|
817
|
+
function getWorkflowModelRef(modelId) {
|
|
818
|
+
const mapping = MODEL_MAPPINGS[modelId];
|
|
819
|
+
return mapping?.provider === "workflow" ? mapping.model : void 0;
|
|
820
|
+
}
|
|
821
|
+
var MODEL_ID_TO_ANTHROPIC_MODEL = Object.fromEntries(
|
|
822
|
+
Object.entries(MODEL_MAPPINGS).filter(([, v]) => v.provider === "anthropic").map(([k, v]) => [k, v.model])
|
|
823
|
+
);
|
|
824
|
+
|
|
825
|
+
// src/gitlab-openai-language-model.ts
|
|
826
|
+
var GitLabOpenAILanguageModel = class {
|
|
827
|
+
specificationVersion = "v2";
|
|
828
|
+
modelId;
|
|
829
|
+
supportedUrls = {};
|
|
830
|
+
config;
|
|
831
|
+
directAccessClient;
|
|
832
|
+
useResponsesApi;
|
|
833
|
+
openaiClient = null;
|
|
834
|
+
constructor(modelId, config) {
|
|
835
|
+
this.modelId = modelId;
|
|
836
|
+
this.config = config;
|
|
837
|
+
this.useResponsesApi = config.useResponsesApi ?? isResponsesApiModel(modelId);
|
|
838
|
+
this.directAccessClient = new GitLabDirectAccessClient({
|
|
839
|
+
instanceUrl: config.instanceUrl,
|
|
840
|
+
getHeaders: config.getHeaders,
|
|
841
|
+
refreshApiKey: config.refreshApiKey,
|
|
842
|
+
fetch: config.fetch,
|
|
843
|
+
featureFlags: config.featureFlags,
|
|
844
|
+
aiGatewayUrl: config.aiGatewayUrl
|
|
845
|
+
});
|
|
846
|
+
}
|
|
847
|
+
get provider() {
|
|
848
|
+
return this.config.provider;
|
|
849
|
+
}
|
|
850
|
+
async getOpenAIClient(forceRefresh = false) {
|
|
851
|
+
const tokenData = await this.directAccessClient.getDirectAccessToken(forceRefresh);
|
|
852
|
+
const { "x-api-key": _removed, ...filteredHeaders } = tokenData.headers;
|
|
853
|
+
const mergedHeaders = {
|
|
854
|
+
...filteredHeaders,
|
|
855
|
+
...this.config.aiGatewayHeaders
|
|
856
|
+
};
|
|
857
|
+
this.openaiClient = new import_openai.default({
|
|
858
|
+
apiKey: tokenData.token,
|
|
859
|
+
baseURL: this.directAccessClient.getOpenAIProxyUrl(),
|
|
860
|
+
defaultHeaders: mergedHeaders
|
|
861
|
+
});
|
|
862
|
+
return this.openaiClient;
|
|
863
|
+
}
|
|
864
|
+
isTokenError(error) {
|
|
865
|
+
if (error instanceof import_openai.default.APIError) {
|
|
866
|
+
if (error.status === 401) {
|
|
867
|
+
return true;
|
|
868
|
+
}
|
|
869
|
+
const message = error.message?.toLowerCase() || "";
|
|
870
|
+
if (message.includes("token") && (message.includes("expired") || message.includes("revoked") || message.includes("invalid"))) {
|
|
871
|
+
return true;
|
|
872
|
+
}
|
|
873
|
+
}
|
|
874
|
+
return false;
|
|
875
|
+
}
|
|
876
|
+
/**
|
|
877
|
+
* Check if an error is a context overflow error (prompt too long)
|
|
878
|
+
* These should NOT trigger token refresh and should be reported to the user.
|
|
879
|
+
*/
|
|
880
|
+
isContextOverflowError(error) {
|
|
881
|
+
if (error instanceof import_openai.default.APIError) {
|
|
882
|
+
if (error.status === 400) {
|
|
883
|
+
const message = error.message?.toLowerCase() || "";
|
|
884
|
+
if (message.includes("prompt is too long") || message.includes("prompt too long") || message.includes("tokens") && message.includes("maximum")) {
|
|
885
|
+
return true;
|
|
886
|
+
}
|
|
887
|
+
}
|
|
888
|
+
}
|
|
889
|
+
return false;
|
|
890
|
+
}
|
|
891
|
+
convertTools(tools) {
|
|
892
|
+
if (!tools || tools.length === 0) {
|
|
893
|
+
return void 0;
|
|
894
|
+
}
|
|
895
|
+
return tools.filter((tool) => tool.type === "function").map((tool) => {
|
|
896
|
+
const schema = tool.inputSchema;
|
|
897
|
+
return {
|
|
898
|
+
type: "function",
|
|
899
|
+
function: {
|
|
900
|
+
name: tool.name,
|
|
901
|
+
description: tool.description || "",
|
|
902
|
+
// Ensure the schema has type: 'object' as OpenAI requires it
|
|
903
|
+
parameters: {
|
|
904
|
+
type: "object",
|
|
905
|
+
...schema
|
|
906
|
+
}
|
|
907
|
+
}
|
|
908
|
+
};
|
|
909
|
+
});
|
|
910
|
+
}
|
|
911
|
+
convertToolChoice(toolChoice) {
|
|
912
|
+
if (!toolChoice) {
|
|
913
|
+
return void 0;
|
|
914
|
+
}
|
|
915
|
+
switch (toolChoice.type) {
|
|
916
|
+
case "auto":
|
|
917
|
+
return "auto";
|
|
918
|
+
case "none":
|
|
919
|
+
return "none";
|
|
920
|
+
case "required":
|
|
921
|
+
return "required";
|
|
922
|
+
case "tool":
|
|
923
|
+
return { type: "function", function: { name: toolChoice.toolName } };
|
|
924
|
+
default:
|
|
925
|
+
return void 0;
|
|
926
|
+
}
|
|
927
|
+
}
|
|
928
|
+
convertPrompt(prompt) {
|
|
929
|
+
const messages = [];
|
|
930
|
+
for (const message of prompt) {
|
|
931
|
+
if (message.role === "system") {
|
|
932
|
+
messages.push({ role: "system", content: message.content });
|
|
933
|
+
continue;
|
|
934
|
+
}
|
|
935
|
+
if (message.role === "user") {
|
|
936
|
+
const textParts = message.content.filter((part) => part.type === "text").map((part) => part.text);
|
|
937
|
+
if (textParts.length > 0) {
|
|
938
|
+
messages.push({ role: "user", content: textParts.join("\n") });
|
|
939
|
+
}
|
|
940
|
+
} else if (message.role === "assistant") {
|
|
941
|
+
const textParts = [];
|
|
942
|
+
const toolCalls = [];
|
|
943
|
+
for (const part of message.content) {
|
|
944
|
+
if (part.type === "text") {
|
|
945
|
+
textParts.push(part.text);
|
|
946
|
+
} else if (part.type === "tool-call") {
|
|
947
|
+
toolCalls.push({
|
|
948
|
+
id: part.toolCallId,
|
|
949
|
+
type: "function",
|
|
950
|
+
function: {
|
|
951
|
+
name: part.toolName,
|
|
952
|
+
arguments: typeof part.input === "string" ? part.input : JSON.stringify(part.input)
|
|
953
|
+
}
|
|
954
|
+
});
|
|
955
|
+
}
|
|
956
|
+
}
|
|
957
|
+
const assistantMessage = {
|
|
958
|
+
role: "assistant",
|
|
959
|
+
content: textParts.length > 0 ? textParts.join("\n") : null
|
|
960
|
+
};
|
|
961
|
+
if (toolCalls.length > 0) {
|
|
962
|
+
assistantMessage.tool_calls = toolCalls;
|
|
963
|
+
}
|
|
964
|
+
messages.push(assistantMessage);
|
|
965
|
+
} else if (message.role === "tool") {
|
|
966
|
+
for (const part of message.content) {
|
|
967
|
+
if (part.type === "tool-result") {
|
|
968
|
+
let resultContent;
|
|
969
|
+
if (part.output.type === "text") {
|
|
970
|
+
resultContent = part.output.value;
|
|
971
|
+
} else if (part.output.type === "json") {
|
|
972
|
+
resultContent = JSON.stringify(part.output.value);
|
|
973
|
+
} else if (part.output.type === "error-text") {
|
|
974
|
+
resultContent = part.output.value;
|
|
975
|
+
} else if (part.output.type === "error-json") {
|
|
976
|
+
resultContent = JSON.stringify(part.output.value);
|
|
977
|
+
} else {
|
|
978
|
+
resultContent = JSON.stringify(part.output);
|
|
979
|
+
}
|
|
980
|
+
messages.push({
|
|
981
|
+
role: "tool",
|
|
982
|
+
tool_call_id: part.toolCallId,
|
|
983
|
+
content: resultContent
|
|
984
|
+
});
|
|
985
|
+
}
|
|
986
|
+
}
|
|
987
|
+
}
|
|
988
|
+
}
|
|
989
|
+
return messages;
|
|
990
|
+
}
|
|
991
|
+
convertFinishReason(finishReason) {
|
|
992
|
+
switch (finishReason) {
|
|
993
|
+
case "stop":
|
|
994
|
+
return "stop";
|
|
995
|
+
case "length":
|
|
996
|
+
return "length";
|
|
997
|
+
case "tool_calls":
|
|
998
|
+
return "tool-calls";
|
|
999
|
+
case "content_filter":
|
|
1000
|
+
return "content-filter";
|
|
1001
|
+
default:
|
|
1002
|
+
return "unknown";
|
|
1003
|
+
}
|
|
1004
|
+
}
|
|
1005
|
+
/**
|
|
1006
|
+
* Convert tools to Responses API format
|
|
1007
|
+
*/
|
|
1008
|
+
convertToolsForResponses(tools) {
|
|
1009
|
+
if (!tools || tools.length === 0) {
|
|
1010
|
+
return void 0;
|
|
1011
|
+
}
|
|
1012
|
+
return tools.filter((tool) => tool.type === "function").map((tool) => {
|
|
1013
|
+
const schema = { ...tool.inputSchema };
|
|
1014
|
+
delete schema["$schema"];
|
|
1015
|
+
return {
|
|
1016
|
+
type: "function",
|
|
1017
|
+
name: tool.name,
|
|
1018
|
+
description: tool.description || "",
|
|
1019
|
+
parameters: schema,
|
|
1020
|
+
strict: false
|
|
1021
|
+
};
|
|
1022
|
+
});
|
|
1023
|
+
}
|
|
1024
|
+
/**
|
|
1025
|
+
* Convert prompt to Responses API input format
|
|
1026
|
+
*/
|
|
1027
|
+
convertPromptForResponses(prompt) {
|
|
1028
|
+
const items = [];
|
|
1029
|
+
for (const message of prompt) {
|
|
1030
|
+
if (message.role === "system") {
|
|
1031
|
+
continue;
|
|
1032
|
+
}
|
|
1033
|
+
if (message.role === "user") {
|
|
1034
|
+
const textParts = message.content.filter((part) => part.type === "text").map((part) => part.text);
|
|
1035
|
+
if (textParts.length > 0) {
|
|
1036
|
+
items.push({
|
|
1037
|
+
type: "message",
|
|
1038
|
+
role: "user",
|
|
1039
|
+
content: textParts.map((text) => ({ type: "input_text", text }))
|
|
1040
|
+
});
|
|
1041
|
+
}
|
|
1042
|
+
} else if (message.role === "assistant") {
|
|
1043
|
+
const textParts = [];
|
|
1044
|
+
for (const part of message.content) {
|
|
1045
|
+
if (part.type === "text") {
|
|
1046
|
+
textParts.push(part.text);
|
|
1047
|
+
} else if (part.type === "tool-call") {
|
|
1048
|
+
items.push({
|
|
1049
|
+
type: "function_call",
|
|
1050
|
+
call_id: part.toolCallId,
|
|
1051
|
+
name: part.toolName,
|
|
1052
|
+
arguments: typeof part.input === "string" ? part.input : JSON.stringify(part.input)
|
|
1053
|
+
});
|
|
1054
|
+
}
|
|
1055
|
+
}
|
|
1056
|
+
if (textParts.length > 0) {
|
|
1057
|
+
items.push({
|
|
1058
|
+
type: "message",
|
|
1059
|
+
role: "assistant",
|
|
1060
|
+
content: [{ type: "output_text", text: textParts.join("\n"), annotations: [] }]
|
|
1061
|
+
});
|
|
1062
|
+
}
|
|
1063
|
+
} else if (message.role === "tool") {
|
|
1064
|
+
for (const part of message.content) {
|
|
1065
|
+
if (part.type === "tool-result") {
|
|
1066
|
+
let resultContent;
|
|
1067
|
+
if (part.output.type === "text") {
|
|
1068
|
+
resultContent = part.output.value;
|
|
1069
|
+
} else if (part.output.type === "json") {
|
|
1070
|
+
resultContent = JSON.stringify(part.output.value);
|
|
1071
|
+
} else if (part.output.type === "error-text") {
|
|
1072
|
+
resultContent = part.output.value;
|
|
1073
|
+
} else if (part.output.type === "error-json") {
|
|
1074
|
+
resultContent = JSON.stringify(part.output.value);
|
|
1075
|
+
} else {
|
|
1076
|
+
resultContent = JSON.stringify(part.output);
|
|
1077
|
+
}
|
|
1078
|
+
items.push({
|
|
1079
|
+
type: "function_call_output",
|
|
1080
|
+
call_id: part.toolCallId,
|
|
1081
|
+
output: resultContent
|
|
1082
|
+
});
|
|
1083
|
+
}
|
|
1084
|
+
}
|
|
1085
|
+
}
|
|
1086
|
+
}
|
|
1087
|
+
return items;
|
|
1088
|
+
}
|
|
1089
|
+
/**
|
|
1090
|
+
* Extract system instructions from prompt
|
|
1091
|
+
*/
|
|
1092
|
+
extractSystemInstructions(prompt) {
|
|
1093
|
+
const systemMessages = prompt.filter((m) => m.role === "system").map((m) => m.content).join("\n");
|
|
1094
|
+
return systemMessages || void 0;
|
|
1095
|
+
}
|
|
1096
|
+
/**
|
|
1097
|
+
* Convert Responses API status to finish reason
|
|
1098
|
+
* Note: Responses API returns 'completed' even when making tool calls,
|
|
1099
|
+
* so we need to check the content for tool calls separately.
|
|
1100
|
+
*/
|
|
1101
|
+
convertResponsesStatus(status, hasToolCalls = false) {
|
|
1102
|
+
if (hasToolCalls) {
|
|
1103
|
+
return "tool-calls";
|
|
1104
|
+
}
|
|
1105
|
+
switch (status) {
|
|
1106
|
+
case "completed":
|
|
1107
|
+
return "stop";
|
|
1108
|
+
case "incomplete":
|
|
1109
|
+
return "length";
|
|
1110
|
+
case "cancelled":
|
|
1111
|
+
return "stop";
|
|
1112
|
+
case "failed":
|
|
1113
|
+
return "error";
|
|
1114
|
+
default:
|
|
1115
|
+
return "unknown";
|
|
1116
|
+
}
|
|
1117
|
+
}
|
|
1118
|
+
async doGenerate(options) {
|
|
1119
|
+
if (this.useResponsesApi) {
|
|
1120
|
+
return this.doGenerateWithResponsesApi(options, false);
|
|
1121
|
+
}
|
|
1122
|
+
return this.doGenerateWithChatApi(options, false);
|
|
1123
|
+
}
|
|
1124
|
+
async doGenerateWithChatApi(options, isRetry) {
|
|
1125
|
+
const client = await this.getOpenAIClient(isRetry);
|
|
1126
|
+
const messages = this.convertPrompt(options.prompt);
|
|
1127
|
+
const tools = this.convertTools(options.tools);
|
|
1128
|
+
const toolChoice = options.toolChoice?.type !== "none" ? this.convertToolChoice(options.toolChoice) : void 0;
|
|
1129
|
+
const openaiModel = this.config.openaiModel || "gpt-4o";
|
|
1130
|
+
const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
|
|
1131
|
+
try {
|
|
1132
|
+
const response = await client.chat.completions.create({
|
|
1133
|
+
model: openaiModel,
|
|
1134
|
+
max_completion_tokens: maxTokens,
|
|
1135
|
+
messages,
|
|
1136
|
+
tools,
|
|
1137
|
+
tool_choice: tools ? toolChoice : void 0,
|
|
1138
|
+
temperature: options.temperature,
|
|
1139
|
+
top_p: options.topP,
|
|
1140
|
+
stop: options.stopSequences
|
|
1141
|
+
});
|
|
1142
|
+
const choice = response.choices[0];
|
|
1143
|
+
const content = [];
|
|
1144
|
+
if (choice?.message.content) {
|
|
1145
|
+
content.push({ type: "text", text: choice.message.content });
|
|
1146
|
+
}
|
|
1147
|
+
if (choice?.message.tool_calls) {
|
|
1148
|
+
for (const toolCall of choice.message.tool_calls) {
|
|
1149
|
+
if (toolCall.type === "function") {
|
|
1150
|
+
content.push({
|
|
1151
|
+
type: "tool-call",
|
|
1152
|
+
toolCallId: toolCall.id,
|
|
1153
|
+
toolName: toolCall.function.name,
|
|
1154
|
+
input: toolCall.function.arguments
|
|
1155
|
+
});
|
|
1156
|
+
}
|
|
1157
|
+
}
|
|
1158
|
+
}
|
|
1159
|
+
const usage = {
|
|
1160
|
+
inputTokens: response.usage?.prompt_tokens || 0,
|
|
1161
|
+
outputTokens: response.usage?.completion_tokens || 0,
|
|
1162
|
+
totalTokens: response.usage?.total_tokens || 0
|
|
1163
|
+
};
|
|
1164
|
+
return {
|
|
1165
|
+
content,
|
|
1166
|
+
finishReason: this.convertFinishReason(choice?.finish_reason),
|
|
1167
|
+
usage,
|
|
1168
|
+
warnings: []
|
|
1169
|
+
};
|
|
1170
|
+
} catch (error) {
|
|
1171
|
+
if (this.isContextOverflowError(error)) {
|
|
1172
|
+
const apiError = error;
|
|
1173
|
+
throw new GitLabError({
|
|
1174
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
1175
|
+
statusCode: 400,
|
|
1176
|
+
cause: error
|
|
1177
|
+
});
|
|
1178
|
+
}
|
|
1179
|
+
if (!isRetry && this.isTokenError(error)) {
|
|
1180
|
+
this.directAccessClient.invalidateToken();
|
|
1181
|
+
return this.doGenerateWithChatApi(options, true);
|
|
1182
|
+
}
|
|
1183
|
+
if (error instanceof import_openai.default.APIError) {
|
|
1184
|
+
throw new GitLabError({
|
|
1185
|
+
message: `OpenAI API error: ${error.message}`,
|
|
1186
|
+
statusCode: error.status,
|
|
1187
|
+
cause: error
|
|
1188
|
+
});
|
|
1189
|
+
}
|
|
1190
|
+
throw error;
|
|
1191
|
+
}
|
|
1192
|
+
}
|
|
1193
|
+
async doGenerateWithResponsesApi(options, isRetry) {
|
|
1194
|
+
const client = await this.getOpenAIClient(isRetry);
|
|
1195
|
+
const input = this.convertPromptForResponses(options.prompt);
|
|
1196
|
+
const tools = this.convertToolsForResponses(options.tools);
|
|
1197
|
+
const instructions = this.extractSystemInstructions(options.prompt);
|
|
1198
|
+
const openaiModel = this.config.openaiModel || "gpt-5-codex";
|
|
1199
|
+
const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
|
|
1200
|
+
try {
|
|
1201
|
+
const response = await client.responses.create({
|
|
1202
|
+
model: openaiModel,
|
|
1203
|
+
input,
|
|
1204
|
+
instructions,
|
|
1205
|
+
tools,
|
|
1206
|
+
max_output_tokens: maxTokens,
|
|
1207
|
+
temperature: options.temperature,
|
|
1208
|
+
top_p: options.topP,
|
|
1209
|
+
store: false
|
|
1210
|
+
});
|
|
1211
|
+
const content = [];
|
|
1212
|
+
let hasToolCalls = false;
|
|
1213
|
+
for (const item of response.output || []) {
|
|
1214
|
+
if (item.type === "message" && item.role === "assistant") {
|
|
1215
|
+
for (const contentItem of item.content || []) {
|
|
1216
|
+
if (contentItem.type === "output_text") {
|
|
1217
|
+
content.push({ type: "text", text: contentItem.text });
|
|
1218
|
+
}
|
|
1219
|
+
}
|
|
1220
|
+
} else if (item.type === "function_call") {
|
|
1221
|
+
hasToolCalls = true;
|
|
1222
|
+
content.push({
|
|
1223
|
+
type: "tool-call",
|
|
1224
|
+
toolCallId: item.call_id,
|
|
1225
|
+
toolName: item.name,
|
|
1226
|
+
input: item.arguments
|
|
1227
|
+
});
|
|
1228
|
+
}
|
|
1229
|
+
}
|
|
1230
|
+
const usage = {
|
|
1231
|
+
inputTokens: response.usage?.input_tokens || 0,
|
|
1232
|
+
outputTokens: response.usage?.output_tokens || 0,
|
|
1233
|
+
totalTokens: response.usage?.total_tokens || 0
|
|
1234
|
+
};
|
|
1235
|
+
return {
|
|
1236
|
+
content,
|
|
1237
|
+
finishReason: this.convertResponsesStatus(response.status, hasToolCalls),
|
|
1238
|
+
usage,
|
|
1239
|
+
warnings: []
|
|
1240
|
+
};
|
|
1241
|
+
} catch (error) {
|
|
1242
|
+
if (this.isContextOverflowError(error)) {
|
|
1243
|
+
const apiError = error;
|
|
1244
|
+
throw new GitLabError({
|
|
1245
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
1246
|
+
statusCode: 400,
|
|
1247
|
+
cause: error
|
|
1248
|
+
});
|
|
1249
|
+
}
|
|
1250
|
+
if (!isRetry && this.isTokenError(error)) {
|
|
1251
|
+
this.directAccessClient.invalidateToken();
|
|
1252
|
+
return this.doGenerateWithResponsesApi(options, true);
|
|
1253
|
+
}
|
|
1254
|
+
if (error instanceof import_openai.default.APIError) {
|
|
1255
|
+
throw new GitLabError({
|
|
1256
|
+
message: `OpenAI API error: ${error.message}`,
|
|
1257
|
+
statusCode: error.status,
|
|
1258
|
+
cause: error
|
|
1259
|
+
});
|
|
1260
|
+
}
|
|
1261
|
+
throw error;
|
|
1262
|
+
}
|
|
1263
|
+
}
|
|
1264
|
+
async doStream(options) {
|
|
1265
|
+
if (this.useResponsesApi) {
|
|
1266
|
+
return this.doStreamWithResponsesApi(options, false);
|
|
1267
|
+
}
|
|
1268
|
+
return this.doStreamWithChatApi(options, false);
|
|
1269
|
+
}
|
|
1270
|
+
async doStreamWithChatApi(options, isRetry) {
|
|
1271
|
+
const client = await this.getOpenAIClient(isRetry);
|
|
1272
|
+
const messages = this.convertPrompt(options.prompt);
|
|
1273
|
+
const tools = this.convertTools(options.tools);
|
|
1274
|
+
const toolChoice = options.toolChoice?.type !== "none" ? this.convertToolChoice(options.toolChoice) : void 0;
|
|
1275
|
+
const openaiModel = this.config.openaiModel || "gpt-4o";
|
|
1276
|
+
const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
|
|
1277
|
+
const requestBody = {
|
|
1278
|
+
model: openaiModel,
|
|
1279
|
+
max_completion_tokens: maxTokens,
|
|
1280
|
+
messages,
|
|
1281
|
+
tools,
|
|
1282
|
+
tool_choice: tools ? toolChoice : void 0,
|
|
1283
|
+
temperature: options.temperature,
|
|
1284
|
+
top_p: options.topP,
|
|
1285
|
+
stop: options.stopSequences,
|
|
1286
|
+
stream: true,
|
|
1287
|
+
stream_options: { include_usage: true }
|
|
1288
|
+
};
|
|
1289
|
+
const self = this;
|
|
1290
|
+
const stream = new ReadableStream({
|
|
1291
|
+
start: async (controller) => {
|
|
1292
|
+
const toolCalls = {};
|
|
1293
|
+
const usage = {
|
|
1294
|
+
inputTokens: 0,
|
|
1295
|
+
outputTokens: 0,
|
|
1296
|
+
totalTokens: 0
|
|
1297
|
+
};
|
|
1298
|
+
let finishReason = "unknown";
|
|
1299
|
+
let textStarted = false;
|
|
1300
|
+
const textId = "text-0";
|
|
1301
|
+
try {
|
|
1302
|
+
const openaiStream = await client.chat.completions.create({
|
|
1303
|
+
...requestBody,
|
|
1304
|
+
stream: true
|
|
1305
|
+
});
|
|
1306
|
+
controller.enqueue({ type: "stream-start", warnings: [] });
|
|
1307
|
+
for await (const chunk of openaiStream) {
|
|
1308
|
+
const choice = chunk.choices?.[0];
|
|
1309
|
+
if (chunk.id && !textStarted) {
|
|
1310
|
+
controller.enqueue({
|
|
1311
|
+
type: "response-metadata",
|
|
1312
|
+
id: chunk.id,
|
|
1313
|
+
modelId: chunk.model
|
|
1314
|
+
});
|
|
1315
|
+
}
|
|
1316
|
+
if (choice?.delta?.content) {
|
|
1317
|
+
if (!textStarted) {
|
|
1318
|
+
controller.enqueue({ type: "text-start", id: textId });
|
|
1319
|
+
textStarted = true;
|
|
1320
|
+
}
|
|
1321
|
+
controller.enqueue({
|
|
1322
|
+
type: "text-delta",
|
|
1323
|
+
id: textId,
|
|
1324
|
+
delta: choice.delta.content
|
|
1325
|
+
});
|
|
1326
|
+
}
|
|
1327
|
+
if (choice?.delta?.tool_calls) {
|
|
1328
|
+
for (const tc of choice.delta.tool_calls) {
|
|
1329
|
+
const idx = tc.index;
|
|
1330
|
+
if (!toolCalls[idx]) {
|
|
1331
|
+
toolCalls[idx] = {
|
|
1332
|
+
id: tc.id || "",
|
|
1333
|
+
name: tc.function?.name || "",
|
|
1334
|
+
arguments: ""
|
|
1335
|
+
};
|
|
1336
|
+
controller.enqueue({
|
|
1337
|
+
type: "tool-input-start",
|
|
1338
|
+
id: toolCalls[idx].id,
|
|
1339
|
+
toolName: toolCalls[idx].name
|
|
1340
|
+
});
|
|
1341
|
+
}
|
|
1342
|
+
if (tc.function?.arguments) {
|
|
1343
|
+
toolCalls[idx].arguments += tc.function.arguments;
|
|
1344
|
+
controller.enqueue({
|
|
1345
|
+
type: "tool-input-delta",
|
|
1346
|
+
id: toolCalls[idx].id,
|
|
1347
|
+
delta: tc.function.arguments
|
|
1348
|
+
});
|
|
1349
|
+
}
|
|
1350
|
+
}
|
|
1351
|
+
}
|
|
1352
|
+
if (choice?.finish_reason) {
|
|
1353
|
+
finishReason = self.convertFinishReason(choice.finish_reason);
|
|
1354
|
+
}
|
|
1355
|
+
if (chunk.usage) {
|
|
1356
|
+
usage.inputTokens = chunk.usage.prompt_tokens || 0;
|
|
1357
|
+
usage.outputTokens = chunk.usage.completion_tokens || 0;
|
|
1358
|
+
usage.totalTokens = chunk.usage.total_tokens || 0;
|
|
1359
|
+
}
|
|
1360
|
+
}
|
|
1361
|
+
if (textStarted) {
|
|
1362
|
+
controller.enqueue({ type: "text-end", id: textId });
|
|
1363
|
+
}
|
|
1364
|
+
for (const [, tc] of Object.entries(toolCalls)) {
|
|
1365
|
+
controller.enqueue({ type: "tool-input-end", id: tc.id });
|
|
1366
|
+
controller.enqueue({
|
|
1367
|
+
type: "tool-call",
|
|
1368
|
+
toolCallId: tc.id,
|
|
1369
|
+
toolName: tc.name,
|
|
1370
|
+
input: tc.arguments || "{}"
|
|
1371
|
+
});
|
|
1372
|
+
}
|
|
1373
|
+
controller.enqueue({ type: "finish", finishReason, usage });
|
|
1374
|
+
controller.close();
|
|
1375
|
+
} catch (error) {
|
|
1376
|
+
if (self.isContextOverflowError(error)) {
|
|
1377
|
+
const apiError = error;
|
|
1378
|
+
controller.enqueue({
|
|
1379
|
+
type: "error",
|
|
1380
|
+
error: new GitLabError({
|
|
1381
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
1382
|
+
statusCode: 400,
|
|
1383
|
+
cause: error
|
|
1384
|
+
})
|
|
1385
|
+
});
|
|
1386
|
+
controller.close();
|
|
1387
|
+
return;
|
|
1388
|
+
}
|
|
1389
|
+
if (!isRetry && self.isTokenError(error)) {
|
|
1390
|
+
self.directAccessClient.invalidateToken();
|
|
1391
|
+
controller.enqueue({
|
|
1392
|
+
type: "error",
|
|
1393
|
+
error: new GitLabError({ message: "TOKEN_REFRESH_NEEDED", cause: error })
|
|
1394
|
+
});
|
|
1395
|
+
controller.close();
|
|
1396
|
+
return;
|
|
1397
|
+
}
|
|
1398
|
+
if (error instanceof import_openai.default.APIError) {
|
|
1399
|
+
controller.enqueue({
|
|
1400
|
+
type: "error",
|
|
1401
|
+
error: new GitLabError({
|
|
1402
|
+
message: `OpenAI API error: ${error.message}`,
|
|
1403
|
+
statusCode: error.status,
|
|
1404
|
+
cause: error
|
|
1405
|
+
})
|
|
1406
|
+
});
|
|
1407
|
+
} else {
|
|
1408
|
+
controller.enqueue({ type: "error", error });
|
|
1409
|
+
}
|
|
1410
|
+
controller.close();
|
|
1411
|
+
}
|
|
1412
|
+
}
|
|
1413
|
+
});
|
|
1414
|
+
return { stream, request: { body: requestBody } };
|
|
1415
|
+
}
|
|
1416
|
+
async doStreamWithResponsesApi(options, isRetry) {
|
|
1417
|
+
const client = await this.getOpenAIClient(isRetry);
|
|
1418
|
+
const input = this.convertPromptForResponses(options.prompt);
|
|
1419
|
+
const tools = this.convertToolsForResponses(options.tools);
|
|
1420
|
+
const instructions = this.extractSystemInstructions(options.prompt);
|
|
1421
|
+
const openaiModel = this.config.openaiModel || "gpt-5-codex";
|
|
1422
|
+
const maxTokens = options.maxOutputTokens || this.config.maxTokens || 8192;
|
|
1423
|
+
const requestBody = {
|
|
1424
|
+
model: openaiModel,
|
|
1425
|
+
input,
|
|
1426
|
+
instructions,
|
|
1427
|
+
tools,
|
|
1428
|
+
max_output_tokens: maxTokens,
|
|
1429
|
+
temperature: options.temperature,
|
|
1430
|
+
top_p: options.topP,
|
|
1431
|
+
store: false,
|
|
1432
|
+
stream: true
|
|
1433
|
+
};
|
|
1434
|
+
const self = this;
|
|
1435
|
+
const stream = new ReadableStream({
|
|
1436
|
+
start: async (controller) => {
|
|
1437
|
+
const toolCalls = {};
|
|
1438
|
+
const usage = {
|
|
1439
|
+
inputTokens: 0,
|
|
1440
|
+
outputTokens: 0,
|
|
1441
|
+
totalTokens: 0
|
|
1442
|
+
};
|
|
1443
|
+
let finishReason = "unknown";
|
|
1444
|
+
let textStarted = false;
|
|
1445
|
+
const textId = "text-0";
|
|
1446
|
+
try {
|
|
1447
|
+
const openaiStream = await client.responses.create({
|
|
1448
|
+
...requestBody,
|
|
1449
|
+
stream: true
|
|
1450
|
+
});
|
|
1451
|
+
controller.enqueue({ type: "stream-start", warnings: [] });
|
|
1452
|
+
for await (const event of openaiStream) {
|
|
1453
|
+
if (event.type === "response.created") {
|
|
1454
|
+
controller.enqueue({
|
|
1455
|
+
type: "response-metadata",
|
|
1456
|
+
id: event.response.id,
|
|
1457
|
+
modelId: event.response.model
|
|
1458
|
+
});
|
|
1459
|
+
} else if (event.type === "response.output_item.added") {
|
|
1460
|
+
if (event.item.type === "function_call") {
|
|
1461
|
+
const outputIndex = event.output_index;
|
|
1462
|
+
const callId = event.item.call_id;
|
|
1463
|
+
toolCalls[outputIndex] = {
|
|
1464
|
+
callId,
|
|
1465
|
+
name: event.item.name,
|
|
1466
|
+
arguments: ""
|
|
1467
|
+
};
|
|
1468
|
+
controller.enqueue({
|
|
1469
|
+
type: "tool-input-start",
|
|
1470
|
+
id: callId,
|
|
1471
|
+
toolName: event.item.name
|
|
1472
|
+
});
|
|
1473
|
+
}
|
|
1474
|
+
} else if (event.type === "response.output_text.delta") {
|
|
1475
|
+
if (!textStarted) {
|
|
1476
|
+
controller.enqueue({ type: "text-start", id: textId });
|
|
1477
|
+
textStarted = true;
|
|
1478
|
+
}
|
|
1479
|
+
controller.enqueue({
|
|
1480
|
+
type: "text-delta",
|
|
1481
|
+
id: textId,
|
|
1482
|
+
delta: event.delta
|
|
1483
|
+
});
|
|
1484
|
+
} else if (event.type === "response.function_call_arguments.delta") {
|
|
1485
|
+
const outputIndex = event.output_index;
|
|
1486
|
+
const tc = toolCalls[outputIndex];
|
|
1487
|
+
if (tc) {
|
|
1488
|
+
tc.arguments += event.delta;
|
|
1489
|
+
controller.enqueue({
|
|
1490
|
+
type: "tool-input-delta",
|
|
1491
|
+
id: tc.callId,
|
|
1492
|
+
delta: event.delta
|
|
1493
|
+
});
|
|
1494
|
+
}
|
|
1495
|
+
} else if (event.type === "response.function_call_arguments.done") {
|
|
1496
|
+
const outputIndex = event.output_index;
|
|
1497
|
+
const tc = toolCalls[outputIndex];
|
|
1498
|
+
if (tc) {
|
|
1499
|
+
tc.arguments = event.arguments;
|
|
1500
|
+
}
|
|
1501
|
+
} else if (event.type === "response.completed") {
|
|
1502
|
+
const hasToolCalls2 = Object.keys(toolCalls).length > 0;
|
|
1503
|
+
finishReason = self.convertResponsesStatus(event.response.status, hasToolCalls2);
|
|
1504
|
+
if (event.response.usage) {
|
|
1505
|
+
usage.inputTokens = event.response.usage.input_tokens || 0;
|
|
1506
|
+
usage.outputTokens = event.response.usage.output_tokens || 0;
|
|
1507
|
+
usage.totalTokens = event.response.usage.total_tokens || 0;
|
|
1508
|
+
}
|
|
1509
|
+
}
|
|
1510
|
+
}
|
|
1511
|
+
if (textStarted) {
|
|
1512
|
+
controller.enqueue({ type: "text-end", id: textId });
|
|
1513
|
+
}
|
|
1514
|
+
const hasToolCalls = Object.keys(toolCalls).length > 0;
|
|
1515
|
+
if (hasToolCalls && finishReason === "stop") {
|
|
1516
|
+
finishReason = "tool-calls";
|
|
1517
|
+
}
|
|
1518
|
+
for (const tc of Object.values(toolCalls)) {
|
|
1519
|
+
controller.enqueue({ type: "tool-input-end", id: tc.callId });
|
|
1520
|
+
controller.enqueue({
|
|
1521
|
+
type: "tool-call",
|
|
1522
|
+
toolCallId: tc.callId,
|
|
1523
|
+
toolName: tc.name,
|
|
1524
|
+
input: tc.arguments || "{}"
|
|
1525
|
+
});
|
|
1526
|
+
}
|
|
1527
|
+
controller.enqueue({ type: "finish", finishReason, usage });
|
|
1528
|
+
controller.close();
|
|
1529
|
+
} catch (error) {
|
|
1530
|
+
if (self.isContextOverflowError(error)) {
|
|
1531
|
+
const apiError = error;
|
|
1532
|
+
controller.enqueue({
|
|
1533
|
+
type: "error",
|
|
1534
|
+
error: new GitLabError({
|
|
1535
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
1536
|
+
statusCode: 400,
|
|
1537
|
+
cause: error
|
|
1538
|
+
})
|
|
1539
|
+
});
|
|
1540
|
+
controller.close();
|
|
1541
|
+
return;
|
|
1542
|
+
}
|
|
1543
|
+
if (!isRetry && self.isTokenError(error)) {
|
|
1544
|
+
self.directAccessClient.invalidateToken();
|
|
1545
|
+
controller.enqueue({
|
|
1546
|
+
type: "error",
|
|
1547
|
+
error: new GitLabError({ message: "TOKEN_REFRESH_NEEDED", cause: error })
|
|
1548
|
+
});
|
|
1549
|
+
controller.close();
|
|
1550
|
+
return;
|
|
1551
|
+
}
|
|
1552
|
+
if (error instanceof import_openai.default.APIError) {
|
|
1553
|
+
controller.enqueue({
|
|
1554
|
+
type: "error",
|
|
1555
|
+
error: new GitLabError({
|
|
1556
|
+
message: `OpenAI API error: ${error.message}`,
|
|
1557
|
+
statusCode: error.status,
|
|
1558
|
+
cause: error
|
|
1559
|
+
})
|
|
1560
|
+
});
|
|
1561
|
+
} else {
|
|
1562
|
+
controller.enqueue({ type: "error", error });
|
|
1563
|
+
}
|
|
1564
|
+
controller.close();
|
|
1565
|
+
}
|
|
1566
|
+
}
|
|
1567
|
+
});
|
|
1568
|
+
return { stream, request: { body: requestBody } };
|
|
1569
|
+
}
|
|
1570
|
+
};
|
|
1571
|
+
|
|
1572
|
+
// src/gitlab-workflow-client.ts
|
|
1573
|
+
var import_isomorphic_ws = __toESM(require("isomorphic-ws"));
|
|
1574
|
+
|
|
1575
|
+
// src/version.ts
|
|
1576
|
+
var VERSION = true ? "3.6.0" : "0.0.0-dev";
|
|
1577
|
+
|
|
1578
|
+
// src/gitlab-workflow-types.ts
|
|
1579
|
+
var WorkflowType = /* @__PURE__ */ ((WorkflowType2) => {
|
|
1580
|
+
WorkflowType2["CHAT"] = "chat";
|
|
1581
|
+
WorkflowType2["SOFTWARE_DEVELOPMENT"] = "software_development";
|
|
1582
|
+
return WorkflowType2;
|
|
1583
|
+
})(WorkflowType || {});
|
|
1584
|
+
var WS_KEEPALIVE_PING_INTERVAL_MS = 45e3;
|
|
1585
|
+
var WS_HEARTBEAT_INTERVAL_MS = 6e4;
|
|
1586
|
+
var DEFAULT_WORKFLOW_DEFINITION = "chat" /* CHAT */;
|
|
1587
|
+
var DEFAULT_CLIENT_CAPABILITIES = ["shell_command"];
|
|
1588
|
+
var CLIENT_VERSION = "1.0";
|
|
1589
|
+
var STOP_REASON_USER = "USER_ACTION_TRIGGERED_STOP";
|
|
1590
|
+
var AGENT_PRIVILEGES = {
|
|
1591
|
+
READ_WRITE_FILES: 1,
|
|
1592
|
+
READ_ONLY_GITLAB: 2,
|
|
1593
|
+
READ_WRITE_GITLAB: 3,
|
|
1594
|
+
RUN_COMMANDS: 4,
|
|
1595
|
+
USE_GIT: 5,
|
|
1596
|
+
RUN_MCP_TOOLS: 6
|
|
1597
|
+
};
|
|
1598
|
+
var DEFAULT_AGENT_PRIVILEGES = [
|
|
1599
|
+
AGENT_PRIVILEGES.READ_WRITE_FILES,
|
|
1600
|
+
AGENT_PRIVILEGES.READ_ONLY_GITLAB,
|
|
1601
|
+
AGENT_PRIVILEGES.READ_WRITE_GITLAB,
|
|
1602
|
+
AGENT_PRIVILEGES.RUN_COMMANDS,
|
|
1603
|
+
AGENT_PRIVILEGES.RUN_MCP_TOOLS,
|
|
1604
|
+
AGENT_PRIVILEGES.USE_GIT
|
|
1605
|
+
];
|
|
1606
|
+
var WORKFLOW_ENVIRONMENT = "ide";
|
|
1607
|
+
|
|
1608
|
+
// src/gitlab-workflow-client.ts
|
|
1609
|
+
var GitLabWorkflowClient = class {
|
|
1610
|
+
socket = null;
|
|
1611
|
+
keepaliveInterval = null;
|
|
1612
|
+
heartbeatInterval = null;
|
|
1613
|
+
eventCallback = null;
|
|
1614
|
+
closed = false;
|
|
1615
|
+
lastSendTime = 0;
|
|
1616
|
+
/**
|
|
1617
|
+
* Connect to the DWS WebSocket and start listening for events.
|
|
1618
|
+
*
|
|
1619
|
+
* @param options - Connection parameters
|
|
1620
|
+
* @param onEvent - Callback invoked for each WorkflowClientEvent
|
|
1621
|
+
* @returns Promise that resolves when the connection is open
|
|
1622
|
+
*/
|
|
1623
|
+
connect(options, onEvent) {
|
|
1624
|
+
this.validateOptions(options);
|
|
1625
|
+
this.eventCallback = onEvent;
|
|
1626
|
+
this.closed = false;
|
|
1627
|
+
this.cleanedUp = false;
|
|
1628
|
+
return new Promise((resolve2, reject) => {
|
|
1629
|
+
const wsUrl = this.buildWebSocketUrl(options);
|
|
1630
|
+
const wsHeaders = this.buildWebSocketHeaders(options);
|
|
1631
|
+
this.socket = new import_isomorphic_ws.default(wsUrl, { headers: wsHeaders });
|
|
1632
|
+
let resolved = false;
|
|
1633
|
+
this.socket.onopen = () => {
|
|
1634
|
+
resolved = true;
|
|
1635
|
+
this.startKeepalive();
|
|
1636
|
+
this.startHeartbeat();
|
|
1637
|
+
resolve2();
|
|
1638
|
+
};
|
|
1639
|
+
this.socket.onmessage = (event) => {
|
|
1640
|
+
try {
|
|
1641
|
+
const data = typeof event.data === "string" ? event.data : event.data.toString();
|
|
1642
|
+
const action = JSON.parse(data);
|
|
1643
|
+
if (!action || typeof action !== "object") {
|
|
1644
|
+
throw new Error("Invalid message structure: expected object");
|
|
1645
|
+
}
|
|
1646
|
+
this.handleAction(action);
|
|
1647
|
+
} catch (error) {
|
|
1648
|
+
this.emit({
|
|
1649
|
+
type: "failed",
|
|
1650
|
+
error: error instanceof Error ? error : new Error(String(error))
|
|
1651
|
+
});
|
|
1652
|
+
}
|
|
1653
|
+
};
|
|
1654
|
+
this.socket.onerror = (event) => {
|
|
1655
|
+
const error = new Error(`WebSocket error: ${event.message || "unknown"}`);
|
|
1656
|
+
if (!resolved) {
|
|
1657
|
+
reject(error);
|
|
1658
|
+
} else {
|
|
1659
|
+
this.emit({ type: "failed", error });
|
|
1660
|
+
}
|
|
1661
|
+
};
|
|
1662
|
+
this.socket.onclose = (event) => {
|
|
1663
|
+
this.cleanup();
|
|
1664
|
+
if (!resolved) {
|
|
1665
|
+
reject(
|
|
1666
|
+
new Error(
|
|
1667
|
+
`WebSocket closed before open: code=${event.code} reason=${event.reason || ""}`
|
|
1668
|
+
)
|
|
1669
|
+
);
|
|
1670
|
+
return;
|
|
1671
|
+
}
|
|
1672
|
+
if (!this.closed) {
|
|
1673
|
+
this.emit({
|
|
1674
|
+
type: "closed",
|
|
1675
|
+
code: event.code,
|
|
1676
|
+
reason: event.reason || ""
|
|
1677
|
+
});
|
|
1678
|
+
}
|
|
1679
|
+
};
|
|
1680
|
+
});
|
|
1681
|
+
}
|
|
1682
|
+
/**
|
|
1683
|
+
* Send a startRequest to begin the workflow.
|
|
1684
|
+
*/
|
|
1685
|
+
sendStartRequest(request) {
|
|
1686
|
+
this.send({ startRequest: request });
|
|
1687
|
+
}
|
|
1688
|
+
/**
|
|
1689
|
+
* Send an actionResponse (tool result) back to DWS.
|
|
1690
|
+
*/
|
|
1691
|
+
sendActionResponse(requestID, response, error) {
|
|
1692
|
+
this.sendHeartbeatIfNeeded();
|
|
1693
|
+
const payload = {
|
|
1694
|
+
requestID,
|
|
1695
|
+
plainTextResponse: {
|
|
1696
|
+
response,
|
|
1697
|
+
error: error ?? null
|
|
1698
|
+
}
|
|
1699
|
+
};
|
|
1700
|
+
this.send({ actionResponse: payload });
|
|
1701
|
+
}
|
|
1702
|
+
/**
|
|
1703
|
+
* Stop the workflow gracefully.
|
|
1704
|
+
*/
|
|
1705
|
+
stop() {
|
|
1706
|
+
this.send({ stopWorkflow: { reason: STOP_REASON_USER } });
|
|
1707
|
+
this.closed = true;
|
|
1708
|
+
}
|
|
1709
|
+
/**
|
|
1710
|
+
* Close the WebSocket connection.
|
|
1711
|
+
*/
|
|
1712
|
+
close() {
|
|
1713
|
+
if (this.closed) return;
|
|
1714
|
+
this.closed = true;
|
|
1715
|
+
this.cleanup();
|
|
1716
|
+
const sock = this.socket;
|
|
1717
|
+
this.socket = null;
|
|
1718
|
+
if (sock) {
|
|
1719
|
+
if (sock.readyState === import_isomorphic_ws.default.OPEN || sock.readyState === import_isomorphic_ws.default.CONNECTING) {
|
|
1720
|
+
sock.close(1e3, "Client closing");
|
|
1721
|
+
}
|
|
1722
|
+
}
|
|
1723
|
+
}
|
|
1724
|
+
/**
|
|
1725
|
+
* Check if the WebSocket is currently connected.
|
|
1726
|
+
*/
|
|
1727
|
+
get isConnected() {
|
|
1728
|
+
return this.socket?.readyState === import_isomorphic_ws.default.OPEN;
|
|
1729
|
+
}
|
|
1730
|
+
// ---------------------------------------------------------------------------
|
|
1731
|
+
// Private
|
|
1732
|
+
// ---------------------------------------------------------------------------
|
|
1733
|
+
validateOptions(options) {
|
|
1734
|
+
if (!options.instanceUrl || typeof options.instanceUrl !== "string") {
|
|
1735
|
+
throw new Error("instanceUrl is required");
|
|
1736
|
+
}
|
|
1737
|
+
const parsed = new URL(options.instanceUrl);
|
|
1738
|
+
if (parsed.protocol !== "https:" && parsed.protocol !== "http:") {
|
|
1739
|
+
throw new Error(`Invalid instanceUrl protocol: ${parsed.protocol}`);
|
|
1740
|
+
}
|
|
1741
|
+
if (parsed.username || parsed.password) {
|
|
1742
|
+
throw new Error(
|
|
1743
|
+
"instanceUrl must not contain authentication credentials (username/password)"
|
|
1744
|
+
);
|
|
1745
|
+
}
|
|
1746
|
+
if (!options.headers || typeof options.headers !== "object") {
|
|
1747
|
+
throw new Error("headers are required");
|
|
1748
|
+
}
|
|
1749
|
+
if (options.modelRef && typeof options.modelRef !== "string") {
|
|
1750
|
+
throw new Error("modelRef must be a string");
|
|
1751
|
+
}
|
|
1752
|
+
}
|
|
1753
|
+
buildWebSocketUrl(options) {
|
|
1754
|
+
const baseUrl = new URL(options.instanceUrl.replace(/\/?$/, "/"));
|
|
1755
|
+
const url = new URL("./api/v4/ai/duo_workflows/ws", baseUrl);
|
|
1756
|
+
url.protocol = url.protocol === "https:" ? "wss:" : "ws:";
|
|
1757
|
+
if (options.modelRef && options.modelRef !== "default") {
|
|
1758
|
+
url.searchParams.set("user_selected_model_identifier", options.modelRef);
|
|
1759
|
+
}
|
|
1760
|
+
return url.toString();
|
|
1761
|
+
}
|
|
1762
|
+
buildWebSocketHeaders(options) {
|
|
1763
|
+
const headers = {};
|
|
1764
|
+
for (const [key, value] of Object.entries(options.headers)) {
|
|
1765
|
+
headers[key.toLowerCase()] = value;
|
|
1766
|
+
}
|
|
1767
|
+
delete headers["content-type"];
|
|
1768
|
+
headers["x-gitlab-client-type"] = "node-websocket";
|
|
1769
|
+
const parsedUrl = new URL(options.instanceUrl);
|
|
1770
|
+
const origin = parsedUrl.origin;
|
|
1771
|
+
headers["origin"] = origin;
|
|
1772
|
+
if (options.requestId) {
|
|
1773
|
+
headers["x-request-id"] = options.requestId;
|
|
1774
|
+
}
|
|
1775
|
+
if (options.projectId) {
|
|
1776
|
+
headers["x-gitlab-project-id"] = options.projectId;
|
|
1777
|
+
}
|
|
1778
|
+
if (options.namespaceId) {
|
|
1779
|
+
headers["x-gitlab-namespace-id"] = options.namespaceId;
|
|
1780
|
+
}
|
|
1781
|
+
if (options.rootNamespaceId) {
|
|
1782
|
+
headers["x-gitlab-root-namespace-id"] = options.rootNamespaceId;
|
|
1783
|
+
}
|
|
1784
|
+
if (!headers["user-agent"]) {
|
|
1785
|
+
headers["user-agent"] = `gitlab-ai-provider/${VERSION}`;
|
|
1786
|
+
}
|
|
1787
|
+
return headers;
|
|
1788
|
+
}
|
|
1789
|
+
handleAction(action) {
|
|
1790
|
+
if (action.newCheckpoint) {
|
|
1791
|
+
const checkpoint = action.newCheckpoint;
|
|
1792
|
+
this.emit({ type: "checkpoint", data: checkpoint });
|
|
1793
|
+
if (checkpoint.status === "FINISHED" || checkpoint.status === "COMPLETED") {
|
|
1794
|
+
this.emit({ type: "completed" });
|
|
1795
|
+
} else if (checkpoint.status === "FAILED") {
|
|
1796
|
+
this.emit({
|
|
1797
|
+
type: "failed",
|
|
1798
|
+
error: new Error(checkpoint.content || "Workflow failed")
|
|
1799
|
+
});
|
|
1800
|
+
} else if (checkpoint.status === "STOPPED" || checkpoint.status === "CANCELLED") {
|
|
1801
|
+
this.emit({ type: "completed" });
|
|
1802
|
+
}
|
|
1803
|
+
return;
|
|
1804
|
+
}
|
|
1805
|
+
if (action.runMCPTool && action.requestID) {
|
|
1806
|
+
this.emit({
|
|
1807
|
+
type: "tool-request",
|
|
1808
|
+
requestID: action.requestID,
|
|
1809
|
+
data: action.runMCPTool
|
|
1810
|
+
});
|
|
1811
|
+
return;
|
|
1812
|
+
}
|
|
1813
|
+
const builtinTools = [
|
|
1814
|
+
["runReadFile", action.runReadFile],
|
|
1815
|
+
["runReadFiles", action.runReadFiles],
|
|
1816
|
+
["runWriteFile", action.runWriteFile],
|
|
1817
|
+
["runShellCommand", action.runShellCommand],
|
|
1818
|
+
["runEditFile", action.runEditFile],
|
|
1819
|
+
["listDirectory", action.listDirectory],
|
|
1820
|
+
["findFiles", action.findFiles],
|
|
1821
|
+
["grep", action.grep],
|
|
1822
|
+
["mkdir", action.mkdir],
|
|
1823
|
+
["runCommand", action.runCommand],
|
|
1824
|
+
["runGitCommand", action.runGitCommand],
|
|
1825
|
+
["runHTTPRequest", action.runHTTPRequest]
|
|
1826
|
+
];
|
|
1827
|
+
for (const [toolName, data] of builtinTools) {
|
|
1828
|
+
if (data && action.requestID) {
|
|
1829
|
+
this.emit({
|
|
1830
|
+
type: "builtin-tool-request",
|
|
1831
|
+
requestID: action.requestID,
|
|
1832
|
+
toolName,
|
|
1833
|
+
data
|
|
1834
|
+
});
|
|
1835
|
+
return;
|
|
1836
|
+
}
|
|
1837
|
+
}
|
|
1838
|
+
}
|
|
1839
|
+
send(event) {
|
|
1840
|
+
if (this.socket?.readyState === import_isomorphic_ws.default.OPEN) {
|
|
1841
|
+
const json = JSON.stringify(event);
|
|
1842
|
+
this.socket.send(json);
|
|
1843
|
+
this.lastSendTime = Date.now();
|
|
1844
|
+
}
|
|
1845
|
+
}
|
|
1846
|
+
sendHeartbeatIfNeeded() {
|
|
1847
|
+
const elapsed = Date.now() - this.lastSendTime;
|
|
1848
|
+
if (elapsed >= WS_HEARTBEAT_INTERVAL_MS / 2) {
|
|
1849
|
+
this.send({ heartbeat: { timestamp: Date.now() } });
|
|
1850
|
+
}
|
|
1851
|
+
}
|
|
1852
|
+
emit(event) {
|
|
1853
|
+
this.eventCallback?.(event);
|
|
1854
|
+
}
|
|
1855
|
+
/**
|
|
1856
|
+
* Start ws.ping() keepalive (45s interval).
|
|
1857
|
+
* Keeps TCP connection alive through proxies/load balancers.
|
|
1858
|
+
*/
|
|
1859
|
+
startKeepalive() {
|
|
1860
|
+
this.keepaliveInterval = setInterval(() => {
|
|
1861
|
+
if (this.socket?.readyState === import_isomorphic_ws.default.OPEN) {
|
|
1862
|
+
try {
|
|
1863
|
+
this.socket.ping();
|
|
1864
|
+
} catch {
|
|
1865
|
+
}
|
|
1866
|
+
}
|
|
1867
|
+
}, WS_KEEPALIVE_PING_INTERVAL_MS);
|
|
1868
|
+
}
|
|
1869
|
+
/**
|
|
1870
|
+
* Start application-level heartbeat (60s interval).
|
|
1871
|
+
* Prevents DWS from timing out the workflow.
|
|
1872
|
+
*/
|
|
1873
|
+
startHeartbeat() {
|
|
1874
|
+
this.heartbeatInterval = setInterval(() => {
|
|
1875
|
+
this.send({ heartbeat: { timestamp: Date.now() } });
|
|
1876
|
+
}, WS_HEARTBEAT_INTERVAL_MS);
|
|
1877
|
+
}
|
|
1878
|
+
cleanedUp = false;
|
|
1879
|
+
/**
|
|
1880
|
+
* Clean up intervals. Idempotent — safe to call multiple times.
|
|
1881
|
+
*/
|
|
1882
|
+
cleanup() {
|
|
1883
|
+
if (this.cleanedUp) return;
|
|
1884
|
+
this.cleanedUp = true;
|
|
1885
|
+
if (this.keepaliveInterval) {
|
|
1886
|
+
clearInterval(this.keepaliveInterval);
|
|
1887
|
+
this.keepaliveInterval = null;
|
|
1888
|
+
}
|
|
1889
|
+
if (this.heartbeatInterval) {
|
|
1890
|
+
clearInterval(this.heartbeatInterval);
|
|
1891
|
+
this.heartbeatInterval = null;
|
|
1892
|
+
}
|
|
1893
|
+
}
|
|
1894
|
+
};
|
|
1895
|
+
|
|
1896
|
+
// src/gitlab-workflow-builtins.ts
|
|
1897
|
+
function validateNoShellMetachars(value, fieldName) {
|
|
1898
|
+
const dangerousChars = /[;&|`$()<>]/;
|
|
1899
|
+
if (dangerousChars.test(value)) {
|
|
1900
|
+
throw new Error(
|
|
1901
|
+
`Invalid ${fieldName}: contains shell metacharacters. Use structured arguments instead.`
|
|
1902
|
+
);
|
|
1903
|
+
}
|
|
1904
|
+
}
|
|
1905
|
+
function shellEscape(arg) {
|
|
1906
|
+
return "'" + String(arg).replace(/'/g, "'\\''") + "'";
|
|
1907
|
+
}
|
|
1908
|
+
var ALLOWED_URL_SCHEMES = ["http:", "https:"];
|
|
1909
|
+
function sanitizeErrorMessage(message) {
|
|
1910
|
+
if (!message) return "";
|
|
1911
|
+
return message.replace(/\bBearer\s+[A-Za-z0-9\-_.~+/]+=*/gi, "Bearer [REDACTED]").replace(/\bgl(?:pat|oat|cbt|dt|oas|rt|soat|ffct|sapat)-[A-Za-z0-9_-]+/g, "[REDACTED]").replace(/([?&](?:private_token|access_token|token)=)[^&\s"']*/gi, "$1[REDACTED]").replace(/:\/\/([^:@/\s]+):([^@/\s]+)@/g, "://$1:[REDACTED]@");
|
|
1912
|
+
}
|
|
1913
|
+
function mapBuiltinTool(dwsToolName, data) {
|
|
1914
|
+
switch (dwsToolName) {
|
|
1915
|
+
case "runReadFile":
|
|
1916
|
+
return { toolName: "read", args: { filePath: data.filepath } };
|
|
1917
|
+
case "runReadFiles": {
|
|
1918
|
+
const paths = data.filepaths ?? [];
|
|
1919
|
+
if (paths.length <= 1) {
|
|
1920
|
+
return { toolName: "read", args: { filePath: paths[0] ?? "" } };
|
|
1921
|
+
}
|
|
1922
|
+
return {
|
|
1923
|
+
toolName: "read",
|
|
1924
|
+
args: { filePaths: paths }
|
|
1925
|
+
};
|
|
1926
|
+
}
|
|
1927
|
+
case "runWriteFile":
|
|
1928
|
+
return {
|
|
1929
|
+
toolName: "write",
|
|
1930
|
+
args: { filePath: data.filepath, content: data.contents }
|
|
1931
|
+
};
|
|
1932
|
+
case "runEditFile":
|
|
1933
|
+
return {
|
|
1934
|
+
toolName: "edit",
|
|
1935
|
+
args: {
|
|
1936
|
+
filePath: data.filepath,
|
|
1937
|
+
oldString: data.oldString ?? data.old_string,
|
|
1938
|
+
newString: data.newString ?? data.new_string
|
|
1939
|
+
}
|
|
1940
|
+
};
|
|
1941
|
+
case "runShellCommand": {
|
|
1942
|
+
const command = data.command;
|
|
1943
|
+
if (!command || typeof command !== "string") {
|
|
1944
|
+
throw new Error("runShellCommand: command is required and must be a string");
|
|
1945
|
+
}
|
|
1946
|
+
if (command.length > 1e4) {
|
|
1947
|
+
throw new Error("runShellCommand: command exceeds maximum length of 10000 characters");
|
|
1948
|
+
}
|
|
1949
|
+
return {
|
|
1950
|
+
toolName: "bash",
|
|
1951
|
+
args: { command, description: "DWS shell command" }
|
|
1952
|
+
};
|
|
1953
|
+
}
|
|
1954
|
+
case "runCommand": {
|
|
1955
|
+
const program = data.program;
|
|
1956
|
+
if (!program || typeof program !== "string") {
|
|
1957
|
+
throw new Error("runCommand: program is required and must be a string");
|
|
1958
|
+
}
|
|
1959
|
+
validateNoShellMetachars(program, "program");
|
|
1960
|
+
const flags = data.flags ?? [];
|
|
1961
|
+
const cmdArgs = data.arguments ?? [];
|
|
1962
|
+
for (const flag of flags) {
|
|
1963
|
+
if (typeof flag === "string") {
|
|
1964
|
+
validateNoShellMetachars(flag, "flag");
|
|
1965
|
+
}
|
|
1966
|
+
}
|
|
1967
|
+
for (const arg of cmdArgs) {
|
|
1968
|
+
if (typeof arg === "string") {
|
|
1969
|
+
validateNoShellMetachars(arg, "argument");
|
|
1970
|
+
}
|
|
1971
|
+
}
|
|
1972
|
+
return {
|
|
1973
|
+
toolName: "bash",
|
|
1974
|
+
args: {
|
|
1975
|
+
command: [program, ...flags, ...cmdArgs].map((a) => shellEscape(String(a))).join(" "),
|
|
1976
|
+
description: `DWS run: ${program}`
|
|
1977
|
+
}
|
|
1978
|
+
};
|
|
1979
|
+
}
|
|
1980
|
+
case "runGitCommand": {
|
|
1981
|
+
const gitCmd = data.command;
|
|
1982
|
+
if (!gitCmd || typeof gitCmd !== "string") {
|
|
1983
|
+
throw new Error("runGitCommand: command is required and must be a string");
|
|
1984
|
+
}
|
|
1985
|
+
validateNoShellMetachars(gitCmd, "git command");
|
|
1986
|
+
const gitArgs = data.arguments ?? [];
|
|
1987
|
+
for (const arg of gitArgs) {
|
|
1988
|
+
if (typeof arg === "string") {
|
|
1989
|
+
validateNoShellMetachars(arg, "git argument");
|
|
1990
|
+
}
|
|
1991
|
+
}
|
|
1992
|
+
return {
|
|
1993
|
+
toolName: "bash",
|
|
1994
|
+
args: {
|
|
1995
|
+
command: ["git", gitCmd, ...gitArgs].map((a) => shellEscape(String(a))).join(" "),
|
|
1996
|
+
description: `DWS git: ${gitCmd}`
|
|
1997
|
+
}
|
|
1998
|
+
};
|
|
1999
|
+
}
|
|
2000
|
+
case "listDirectory":
|
|
2001
|
+
return { toolName: "read", args: { filePath: data.directory ?? "." } };
|
|
2002
|
+
case "findFiles":
|
|
2003
|
+
return { toolName: "glob", args: { pattern: data.name_pattern ?? data.namePattern } };
|
|
2004
|
+
case "grep":
|
|
2005
|
+
return {
|
|
2006
|
+
toolName: "grep",
|
|
2007
|
+
args: {
|
|
2008
|
+
pattern: data.pattern,
|
|
2009
|
+
path: data.search_directory ?? data.searchDirectory
|
|
2010
|
+
}
|
|
2011
|
+
};
|
|
2012
|
+
case "mkdir": {
|
|
2013
|
+
const dirPath = String(data.directory_path ?? data.directoryPath ?? "");
|
|
2014
|
+
if (!dirPath) {
|
|
2015
|
+
throw new Error("mkdir: directory_path is required");
|
|
2016
|
+
}
|
|
2017
|
+
if (dirPath.includes("\0")) {
|
|
2018
|
+
throw new Error("mkdir: directory_path contains null bytes");
|
|
2019
|
+
}
|
|
2020
|
+
return {
|
|
2021
|
+
toolName: "bash",
|
|
2022
|
+
args: {
|
|
2023
|
+
command: `mkdir -p ${shellEscape(dirPath)}`,
|
|
2024
|
+
description: "DWS mkdir"
|
|
2025
|
+
}
|
|
2026
|
+
};
|
|
2027
|
+
}
|
|
2028
|
+
case "runHTTPRequest": {
|
|
2029
|
+
const methodRaw = String(data.method ?? "GET").toUpperCase();
|
|
2030
|
+
const allowedMethods = ["GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"];
|
|
2031
|
+
if (!allowedMethods.includes(methodRaw)) {
|
|
2032
|
+
throw new Error(`runHTTPRequest: invalid HTTP method '${methodRaw}'`);
|
|
2033
|
+
}
|
|
2034
|
+
const urlPath = String(data.path ?? "");
|
|
2035
|
+
if (!urlPath) {
|
|
2036
|
+
throw new Error("runHTTPRequest: path is required");
|
|
2037
|
+
}
|
|
2038
|
+
try {
|
|
2039
|
+
const parsedUrl = new URL(urlPath);
|
|
2040
|
+
if (!ALLOWED_URL_SCHEMES.includes(parsedUrl.protocol)) {
|
|
2041
|
+
throw new Error(
|
|
2042
|
+
`runHTTPRequest: only http:// and https:// schemes are allowed, got '${parsedUrl.protocol}'`
|
|
2043
|
+
);
|
|
2044
|
+
}
|
|
2045
|
+
} catch (e) {
|
|
2046
|
+
if (e instanceof Error && e.message.startsWith("runHTTPRequest:")) throw e;
|
|
2047
|
+
}
|
|
2048
|
+
const method = shellEscape(methodRaw);
|
|
2049
|
+
const escapedPath = shellEscape(urlPath);
|
|
2050
|
+
const bodyArg = data.body ? ` -d ${shellEscape(String(data.body))}` : "";
|
|
2051
|
+
return {
|
|
2052
|
+
toolName: "bash",
|
|
2053
|
+
args: {
|
|
2054
|
+
command: `curl -s -X ${method} -- ${escapedPath}${bodyArg}`,
|
|
2055
|
+
description: `DWS HTTP ${methodRaw}`
|
|
2056
|
+
}
|
|
2057
|
+
};
|
|
2058
|
+
}
|
|
2059
|
+
default:
|
|
2060
|
+
return { toolName: dwsToolName, args: data };
|
|
2061
|
+
}
|
|
2062
|
+
}
|
|
2063
|
+
|
|
2064
|
+
// src/gitlab-workflow-token-client.ts
|
|
2065
|
+
var TOKEN_CACHE_DURATION_MS = 25 * 60 * 1e3;
|
|
2066
|
+
var MAX_ERROR_TEXT_LENGTH = 500;
|
|
2067
|
+
function sanitizeErrorText(text) {
|
|
2068
|
+
const truncated = text.length > MAX_ERROR_TEXT_LENGTH ? text.slice(0, MAX_ERROR_TEXT_LENGTH) + "..." : text;
|
|
2069
|
+
return sanitizeErrorMessage(truncated);
|
|
2070
|
+
}
|
|
2071
|
+
var CHAT_SHARED_TOKEN_KEY = "__chat_shared__";
|
|
2072
|
+
var GitLabWorkflowTokenClient = class {
|
|
2073
|
+
config;
|
|
2074
|
+
fetchFn;
|
|
2075
|
+
/**
|
|
2076
|
+
* Token cache keyed by workflow definition type.
|
|
2077
|
+
*
|
|
2078
|
+
* - CHAT workflows use a shared key (CHAT_SHARED_TOKEN_KEY) so tokens
|
|
2079
|
+
* are reused across ALL chat sessions (matching gitlab-lsp behavior).
|
|
2080
|
+
* - SOFTWARE_DEVELOPMENT workflows would use per-workflow-id keys,
|
|
2081
|
+
* but since we fetch tokens before creating workflows, we key by type.
|
|
2082
|
+
*/
|
|
2083
|
+
tokenCache = /* @__PURE__ */ new Map();
|
|
2084
|
+
constructor(config) {
|
|
2085
|
+
this.config = config;
|
|
2086
|
+
this.fetchFn = config.fetch ?? fetch;
|
|
2087
|
+
}
|
|
2088
|
+
/**
|
|
2089
|
+
* Resolve the cache key for a given workflow definition.
|
|
2090
|
+
* CHAT workflows share a single token per namespace; other types get per-type keys.
|
|
2091
|
+
*/
|
|
2092
|
+
getCacheKey(workflowDefinition, rootNamespaceId) {
|
|
2093
|
+
const base = workflowDefinition === "chat" /* CHAT */ ? CHAT_SHARED_TOKEN_KEY : workflowDefinition;
|
|
2094
|
+
return rootNamespaceId ? `${base}:${rootNamespaceId}` : base;
|
|
2095
|
+
}
|
|
2096
|
+
/**
|
|
2097
|
+
* Get a DWS token, using cached value if still valid.
|
|
2098
|
+
*
|
|
2099
|
+
* Token caching strategy (matches gitlab-lsp):
|
|
2100
|
+
* - CHAT workflows: shared token across all sessions
|
|
2101
|
+
* - Other workflows: per-type token
|
|
2102
|
+
*
|
|
2103
|
+
* @param workflowDefinition - Workflow type (default: 'chat')
|
|
2104
|
+
* @param rootNamespaceId - Optional root namespace for scoping
|
|
2105
|
+
* @param forceRefresh - Bypass cache
|
|
2106
|
+
*/
|
|
2107
|
+
async getToken(workflowDefinition = DEFAULT_WORKFLOW_DEFINITION, rootNamespaceId, forceRefresh = false) {
|
|
2108
|
+
const now = Date.now();
|
|
2109
|
+
const cacheKey = this.getCacheKey(workflowDefinition, rootNamespaceId);
|
|
2110
|
+
const cached = this.tokenCache.get(cacheKey);
|
|
2111
|
+
if (!forceRefresh && cached && cached.expiresAt > now) {
|
|
2112
|
+
return cached.token;
|
|
2113
|
+
}
|
|
2114
|
+
if (forceRefresh) {
|
|
2115
|
+
this.tokenCache.delete(cacheKey);
|
|
2116
|
+
}
|
|
2117
|
+
const url = `${this.config.instanceUrl}/api/v4/ai/duo_workflows/direct_access`;
|
|
2118
|
+
const body = {
|
|
2119
|
+
workflow_definition: workflowDefinition
|
|
2120
|
+
};
|
|
2121
|
+
if (rootNamespaceId) {
|
|
2122
|
+
body.root_namespace_id = rootNamespaceId;
|
|
2123
|
+
}
|
|
2124
|
+
if (this.config.featureFlags && Object.keys(this.config.featureFlags).length > 0) {
|
|
2125
|
+
body.feature_flags = this.config.featureFlags;
|
|
2126
|
+
}
|
|
2127
|
+
try {
|
|
2128
|
+
const response = await this.fetchFn(url, {
|
|
2129
|
+
method: "POST",
|
|
2130
|
+
headers: {
|
|
2131
|
+
...this.config.getHeaders(),
|
|
2132
|
+
"Content-Type": "application/json"
|
|
2133
|
+
},
|
|
2134
|
+
body: JSON.stringify(body)
|
|
2135
|
+
});
|
|
2136
|
+
if (!response.ok) {
|
|
2137
|
+
const errorText = await response.text();
|
|
2138
|
+
const safeError = sanitizeErrorText(errorText);
|
|
2139
|
+
if (response.status === 401 && this.config.refreshApiKey && !forceRefresh) {
|
|
2140
|
+
try {
|
|
2141
|
+
await this.config.refreshApiKey();
|
|
2142
|
+
return await this.getToken(workflowDefinition, rootNamespaceId, true);
|
|
2143
|
+
} catch {
|
|
2144
|
+
throw new GitLabError({
|
|
2145
|
+
message: `Failed to get workflow token: ${response.status} ${response.statusText} - ${safeError}`,
|
|
2146
|
+
statusCode: response.status,
|
|
2147
|
+
responseBody: safeError
|
|
2148
|
+
});
|
|
2149
|
+
}
|
|
2150
|
+
}
|
|
2151
|
+
if (response.status === 403) {
|
|
2152
|
+
throw new GitLabError({
|
|
2153
|
+
message: `GitLab Duo Agent Platform access denied. GitLab Duo Agent Platform requires GitLab Ultimate with Duo Enterprise add-on. Ensure: (1) Your instance has GitLab Ultimate, (2) Duo Enterprise add-on is enabled, (3) Your account has access to AI features.`,
|
|
2154
|
+
statusCode: response.status,
|
|
2155
|
+
responseBody: safeError
|
|
2156
|
+
});
|
|
2157
|
+
}
|
|
2158
|
+
throw new GitLabError({
|
|
2159
|
+
message: `Failed to get workflow token: ${response.status} ${response.statusText} - ${safeError}`,
|
|
2160
|
+
statusCode: response.status,
|
|
2161
|
+
responseBody: safeError
|
|
2162
|
+
});
|
|
2163
|
+
}
|
|
2164
|
+
const data = await response.json();
|
|
2165
|
+
this.tokenCache.set(cacheKey, {
|
|
2166
|
+
token: data,
|
|
2167
|
+
expiresAt: now + TOKEN_CACHE_DURATION_MS
|
|
2168
|
+
});
|
|
2169
|
+
return data;
|
|
2170
|
+
} catch (error) {
|
|
2171
|
+
if (error instanceof GitLabError) throw error;
|
|
2172
|
+
throw new GitLabError({
|
|
2173
|
+
message: `Failed to get workflow token: ${error}`,
|
|
2174
|
+
cause: error
|
|
2175
|
+
});
|
|
2176
|
+
}
|
|
2177
|
+
}
|
|
2178
|
+
/**
|
|
2179
|
+
* Create a new workflow on the GitLab instance.
|
|
2180
|
+
*
|
|
2181
|
+
* @param goal - The user's message / goal for this workflow
|
|
2182
|
+
* @param options - Additional workflow creation options
|
|
2183
|
+
* @returns The created workflow's ID
|
|
2184
|
+
*/
|
|
2185
|
+
async createWorkflow(goal, options) {
|
|
2186
|
+
if (!goal || typeof goal !== "string") {
|
|
2187
|
+
throw new GitLabError({ message: "goal is required and must be a non-empty string" });
|
|
2188
|
+
}
|
|
2189
|
+
if (goal.length > 1e4) {
|
|
2190
|
+
throw new GitLabError({ message: "goal exceeds maximum length of 10000 characters" });
|
|
2191
|
+
}
|
|
2192
|
+
const url = `${this.config.instanceUrl}/api/v4/ai/duo_workflows/workflows`;
|
|
2193
|
+
const body = {
|
|
2194
|
+
goal,
|
|
2195
|
+
project_id: options?.projectId,
|
|
2196
|
+
namespace_id: options?.namespaceId,
|
|
2197
|
+
workflow_definition: options?.workflowDefinition ?? DEFAULT_WORKFLOW_DEFINITION,
|
|
2198
|
+
agent_privileges: options?.agentPrivileges ?? DEFAULT_AGENT_PRIVILEGES,
|
|
2199
|
+
environment: options?.environment ?? WORKFLOW_ENVIRONMENT,
|
|
2200
|
+
allow_agent_to_request_user: options?.allowAgentToRequestUser ?? true
|
|
2201
|
+
};
|
|
2202
|
+
try {
|
|
2203
|
+
const response = await this.fetchFn(url, {
|
|
2204
|
+
method: "POST",
|
|
2205
|
+
headers: {
|
|
2206
|
+
...this.config.getHeaders(),
|
|
2207
|
+
"Content-Type": "application/json"
|
|
2208
|
+
},
|
|
2209
|
+
body: JSON.stringify(body)
|
|
2210
|
+
});
|
|
2211
|
+
if (!response.ok) {
|
|
2212
|
+
const errorText = await response.text();
|
|
2213
|
+
const safeError = sanitizeErrorText(errorText);
|
|
2214
|
+
throw new GitLabError({
|
|
2215
|
+
message: `Failed to create workflow: ${response.status} ${response.statusText} - ${safeError}`,
|
|
2216
|
+
statusCode: response.status,
|
|
2217
|
+
responseBody: safeError
|
|
2218
|
+
});
|
|
2219
|
+
}
|
|
2220
|
+
const data = await response.json();
|
|
2221
|
+
return data.id.toString();
|
|
2222
|
+
} catch (error) {
|
|
2223
|
+
if (error instanceof GitLabError) throw error;
|
|
2224
|
+
throw new GitLabError({
|
|
2225
|
+
message: `Failed to create workflow: ${error}`,
|
|
2226
|
+
cause: error
|
|
2227
|
+
});
|
|
2228
|
+
}
|
|
2229
|
+
}
|
|
2230
|
+
/**
|
|
2231
|
+
* Invalidate cached tokens.
|
|
2232
|
+
*
|
|
2233
|
+
* @param workflowDefinition - If provided, only invalidate for this type.
|
|
2234
|
+
* If omitted, clears ALL cached tokens.
|
|
2235
|
+
*/
|
|
2236
|
+
invalidateToken(workflowDefinition, rootNamespaceId) {
|
|
2237
|
+
if (workflowDefinition) {
|
|
2238
|
+
this.tokenCache.delete(this.getCacheKey(workflowDefinition, rootNamespaceId));
|
|
2239
|
+
} else {
|
|
2240
|
+
this.tokenCache.clear();
|
|
2241
|
+
}
|
|
2242
|
+
}
|
|
2243
|
+
};
|
|
2244
|
+
|
|
2245
|
+
// src/gitlab-project-detector.ts
|
|
2246
|
+
var import_child_process = require("child_process");
|
|
2247
|
+
var path = __toESM(require("path"));
|
|
2248
|
+
|
|
2249
|
+
// src/gitlab-project-cache.ts
|
|
2250
|
+
var GitLabProjectCache = class {
|
|
2251
|
+
cache = /* @__PURE__ */ new Map();
|
|
2252
|
+
defaultTTL;
|
|
2253
|
+
/**
|
|
2254
|
+
* Create a new project cache
|
|
2255
|
+
* @param defaultTTL - Default time-to-live in milliseconds (default: 5 minutes)
|
|
2256
|
+
*/
|
|
2257
|
+
constructor(defaultTTL = 5 * 60 * 1e3) {
|
|
2258
|
+
this.defaultTTL = defaultTTL;
|
|
2259
|
+
}
|
|
2260
|
+
/**
|
|
2261
|
+
* Get a cached project by key
|
|
2262
|
+
* @param key - Cache key (typically the working directory path)
|
|
2263
|
+
* @returns The cached project or null if not found or expired
|
|
2264
|
+
*/
|
|
2265
|
+
get(key) {
|
|
2266
|
+
const entry = this.cache.get(key);
|
|
2267
|
+
if (!entry) {
|
|
2268
|
+
return null;
|
|
2269
|
+
}
|
|
2270
|
+
if (Date.now() > entry.expiresAt) {
|
|
2271
|
+
this.cache.delete(key);
|
|
2272
|
+
return null;
|
|
2273
|
+
}
|
|
2274
|
+
return entry.project;
|
|
2275
|
+
}
|
|
2276
|
+
/**
|
|
2277
|
+
* Store a project in the cache
|
|
2278
|
+
* @param key - Cache key (typically the working directory path)
|
|
2279
|
+
* @param project - The project to cache
|
|
2280
|
+
* @param ttl - Optional custom TTL in milliseconds
|
|
2281
|
+
*/
|
|
2282
|
+
set(key, project, ttl) {
|
|
2283
|
+
this.cache.set(key, {
|
|
2284
|
+
project,
|
|
2285
|
+
expiresAt: Date.now() + (ttl ?? this.defaultTTL)
|
|
2286
|
+
});
|
|
2287
|
+
}
|
|
2288
|
+
/**
|
|
2289
|
+
* Check if a key exists in the cache (and is not expired)
|
|
2290
|
+
* @param key - Cache key to check
|
|
2291
|
+
* @returns true if the key exists and is not expired
|
|
2292
|
+
*/
|
|
2293
|
+
has(key) {
|
|
2294
|
+
return this.get(key) !== null;
|
|
2295
|
+
}
|
|
2296
|
+
/**
|
|
2297
|
+
* Remove a specific entry from the cache
|
|
2298
|
+
* @param key - Cache key to remove
|
|
2299
|
+
*/
|
|
2300
|
+
delete(key) {
|
|
2301
|
+
this.cache.delete(key);
|
|
2302
|
+
}
|
|
2303
|
+
/**
|
|
2304
|
+
* Clear all entries from the cache
|
|
2305
|
+
*/
|
|
2306
|
+
clear() {
|
|
2307
|
+
this.cache.clear();
|
|
2308
|
+
}
|
|
2309
|
+
/**
|
|
2310
|
+
* Get the number of entries in the cache (including expired ones)
|
|
2311
|
+
*/
|
|
2312
|
+
get size() {
|
|
2313
|
+
return this.cache.size;
|
|
2314
|
+
}
|
|
2315
|
+
/**
|
|
2316
|
+
* Clean up expired entries from the cache
|
|
2317
|
+
* This is useful for long-running processes to prevent memory leaks
|
|
2318
|
+
*/
|
|
2319
|
+
cleanup() {
|
|
2320
|
+
const now = Date.now();
|
|
2321
|
+
for (const [key, entry] of this.cache.entries()) {
|
|
2322
|
+
if (now > entry.expiresAt) {
|
|
2323
|
+
this.cache.delete(key);
|
|
2324
|
+
}
|
|
2325
|
+
}
|
|
2326
|
+
}
|
|
2327
|
+
};
|
|
2328
|
+
|
|
2329
|
+
// src/gitlab-project-detector.ts
|
|
2330
|
+
var GitLabProjectDetector = class {
|
|
2331
|
+
config;
|
|
2332
|
+
fetchFn;
|
|
2333
|
+
cache;
|
|
2334
|
+
constructor(config) {
|
|
2335
|
+
this.config = {
|
|
2336
|
+
gitTimeout: 5e3,
|
|
2337
|
+
// 5 seconds default
|
|
2338
|
+
...config
|
|
2339
|
+
};
|
|
2340
|
+
this.fetchFn = config.fetch ?? fetch;
|
|
2341
|
+
this.cache = config.cache ?? new GitLabProjectCache();
|
|
2342
|
+
}
|
|
2343
|
+
/**
|
|
2344
|
+
* Auto-detect GitLab project from git remote in the working directory
|
|
2345
|
+
*
|
|
2346
|
+
* @param workingDirectory - The directory to check for git remote
|
|
2347
|
+
* @param remoteName - The git remote name to use (default: 'origin')
|
|
2348
|
+
* @returns The detected project or null if not a git repo / no matching remote
|
|
2349
|
+
* @throws GitLabError if the API call or an unexpected error occurs
|
|
2350
|
+
*/
|
|
2351
|
+
async detectProject(workingDirectory, remoteName = "origin") {
|
|
2352
|
+
const cacheKey = path.resolve(workingDirectory);
|
|
2353
|
+
const cached = this.cache.get(cacheKey);
|
|
2354
|
+
if (cached) {
|
|
2355
|
+
return cached;
|
|
2356
|
+
}
|
|
2357
|
+
try {
|
|
2358
|
+
const remoteUrl = await this.getGitRemoteUrl(workingDirectory, remoteName);
|
|
2359
|
+
if (!remoteUrl) {
|
|
2360
|
+
return null;
|
|
2361
|
+
}
|
|
2362
|
+
const projectPath = this.parseGitRemoteUrl(remoteUrl, this.config.instanceUrl);
|
|
2363
|
+
if (!projectPath) {
|
|
2364
|
+
return null;
|
|
2365
|
+
}
|
|
2366
|
+
const project = await this.getProjectByPath(projectPath);
|
|
2367
|
+
this.cache.set(cacheKey, project);
|
|
2368
|
+
return project;
|
|
2369
|
+
} catch (error) {
|
|
2370
|
+
throw error instanceof GitLabError ? error : new GitLabError({
|
|
2371
|
+
message: `Project detection failed: ${error}`,
|
|
2372
|
+
cause: error
|
|
2373
|
+
});
|
|
2374
|
+
}
|
|
2375
|
+
}
|
|
2376
|
+
/**
|
|
2377
|
+
* Parse a git remote URL to extract the project path
|
|
2378
|
+
*
|
|
2379
|
+
* Supports:
|
|
2380
|
+
* - SSH: git@gitlab.com:namespace/project.git
|
|
2381
|
+
* - HTTPS: https://gitlab.com/namespace/project.git
|
|
2382
|
+
* - HTTP: http://gitlab.local/namespace/project.git
|
|
2383
|
+
* - Custom domains and ports
|
|
2384
|
+
*
|
|
2385
|
+
* @param remoteUrl - The git remote URL
|
|
2386
|
+
* @param instanceUrl - The GitLab instance URL to match against
|
|
2387
|
+
* @returns The project path (e.g., "namespace/project") or null if parsing fails
|
|
2388
|
+
*/
|
|
2389
|
+
parseGitRemoteUrl(remoteUrl, instanceUrl) {
|
|
2390
|
+
try {
|
|
2391
|
+
const instanceHost = new URL(instanceUrl).hostname;
|
|
2392
|
+
const sshMatch = remoteUrl.match(/^git@([^:]+):(.+?)(?:\.git)?$/);
|
|
2393
|
+
if (sshMatch) {
|
|
2394
|
+
const [, host, pathPart] = sshMatch;
|
|
2395
|
+
const hostWithoutPort = host.split(":")[0];
|
|
2396
|
+
if (hostWithoutPort === instanceHost) {
|
|
2397
|
+
const cleanPath = pathPart.replace(/^\d+\//, "");
|
|
2398
|
+
return cleanPath.endsWith(".git") ? cleanPath.slice(0, -4) : cleanPath;
|
|
2399
|
+
}
|
|
2400
|
+
}
|
|
2401
|
+
const httpsMatch = remoteUrl.match(/^(https?):\/\/([^/]+)\/(.+?)(?:\.git)?$/);
|
|
2402
|
+
if (httpsMatch) {
|
|
2403
|
+
const [, , hostWithPort, pathPart] = httpsMatch;
|
|
2404
|
+
const host = hostWithPort.split(":")[0];
|
|
2405
|
+
if (host === instanceHost) {
|
|
2406
|
+
return pathPart.endsWith(".git") ? pathPart.slice(0, -4) : pathPart;
|
|
2407
|
+
}
|
|
2408
|
+
}
|
|
2409
|
+
return null;
|
|
2410
|
+
} catch (error) {
|
|
2411
|
+
return null;
|
|
2412
|
+
}
|
|
2413
|
+
}
|
|
2414
|
+
/**
|
|
2415
|
+
* Get the git remote URL from a working directory
|
|
2416
|
+
*
|
|
2417
|
+
* @param workingDirectory - The directory to check
|
|
2418
|
+
* @param remoteName - The git remote name (default: 'origin')
|
|
2419
|
+
* @returns The remote URL or null if not found
|
|
2420
|
+
*/
|
|
2421
|
+
async getGitRemoteUrl(workingDirectory, remoteName = "origin") {
|
|
2422
|
+
return new Promise((resolve2) => {
|
|
2423
|
+
const child = (0, import_child_process.spawn)("git", ["config", "--get", `remote.${remoteName}.url`], {
|
|
2424
|
+
cwd: workingDirectory,
|
|
2425
|
+
timeout: this.config.gitTimeout
|
|
2426
|
+
});
|
|
2427
|
+
let stdout = "";
|
|
2428
|
+
let _stderr = "";
|
|
2429
|
+
child.stdout?.on("data", (data) => {
|
|
2430
|
+
stdout += data.toString();
|
|
2431
|
+
});
|
|
2432
|
+
child.stderr?.on("data", (data) => {
|
|
2433
|
+
_stderr += data.toString();
|
|
2434
|
+
});
|
|
2435
|
+
child.on("close", (exitCode) => {
|
|
2436
|
+
if (exitCode === 0 && stdout.trim()) {
|
|
2437
|
+
resolve2(stdout.trim());
|
|
2438
|
+
} else {
|
|
2439
|
+
resolve2(null);
|
|
2440
|
+
}
|
|
2441
|
+
});
|
|
2442
|
+
child.on("error", () => {
|
|
2443
|
+
resolve2(null);
|
|
2444
|
+
});
|
|
2445
|
+
});
|
|
2446
|
+
}
|
|
2447
|
+
/**
|
|
2448
|
+
* Fetch project details from GitLab API by project path
|
|
2449
|
+
*
|
|
2450
|
+
* @param projectPath - The project path (e.g., "namespace/project")
|
|
2451
|
+
* @returns The project details
|
|
2452
|
+
* @throws GitLabError if the API call fails
|
|
2453
|
+
*/
|
|
2454
|
+
async getProjectByPath(projectPath) {
|
|
2455
|
+
const encodedPath = encodeURIComponent(projectPath);
|
|
2456
|
+
const url = `${this.config.instanceUrl}/api/v4/projects/${encodedPath}`;
|
|
2457
|
+
try {
|
|
2458
|
+
const response = await this.fetchFn(url, {
|
|
2459
|
+
method: "GET",
|
|
2460
|
+
headers: this.config.getHeaders()
|
|
2461
|
+
});
|
|
2462
|
+
if (!response.ok) {
|
|
2463
|
+
throw new GitLabError({
|
|
2464
|
+
message: `Failed to fetch project '${projectPath}': ${response.status} ${response.statusText}`
|
|
2465
|
+
});
|
|
2466
|
+
}
|
|
2467
|
+
const data = await response.json();
|
|
2468
|
+
return {
|
|
2469
|
+
id: data.id,
|
|
2470
|
+
path: data.path,
|
|
2471
|
+
pathWithNamespace: data.path_with_namespace,
|
|
2472
|
+
name: data.name,
|
|
2473
|
+
namespaceId: data.namespace?.id
|
|
2474
|
+
};
|
|
2475
|
+
} catch (error) {
|
|
2476
|
+
if (error instanceof GitLabError) {
|
|
2477
|
+
throw error;
|
|
2478
|
+
}
|
|
2479
|
+
throw new GitLabError({
|
|
2480
|
+
message: `Failed to fetch project '${projectPath}': ${error}`,
|
|
2481
|
+
cause: error
|
|
2482
|
+
});
|
|
2483
|
+
}
|
|
2484
|
+
}
|
|
2485
|
+
/**
|
|
2486
|
+
* Clear the project cache
|
|
2487
|
+
*/
|
|
2488
|
+
clearCache() {
|
|
2489
|
+
this.cache.clear();
|
|
2490
|
+
}
|
|
2491
|
+
/**
|
|
2492
|
+
* Get the cache instance (useful for testing)
|
|
2493
|
+
*/
|
|
2494
|
+
getCache() {
|
|
2495
|
+
return this.cache;
|
|
2496
|
+
}
|
|
2497
|
+
};
|
|
2498
|
+
|
|
2499
|
+
// src/gitlab-model-discovery.ts
|
|
2500
|
+
var AI_CHAT_AVAILABLE_MODELS_QUERY = `
|
|
2501
|
+
query aiChatAvailableModels($rootNamespaceId: GroupID!) {
|
|
2502
|
+
metadata {
|
|
2503
|
+
featureFlags(names: ["ai_user_model_switching"]) {
|
|
2504
|
+
enabled
|
|
2505
|
+
name
|
|
2506
|
+
}
|
|
2507
|
+
version
|
|
2508
|
+
}
|
|
2509
|
+
|
|
2510
|
+
aiChatAvailableModels(rootNamespaceId: $rootNamespaceId) {
|
|
2511
|
+
defaultModel {
|
|
2512
|
+
name
|
|
2513
|
+
ref
|
|
2514
|
+
}
|
|
2515
|
+
selectableModels {
|
|
2516
|
+
name
|
|
2517
|
+
ref
|
|
2518
|
+
}
|
|
2519
|
+
pinnedModel {
|
|
2520
|
+
name
|
|
2521
|
+
ref
|
|
2522
|
+
}
|
|
2523
|
+
}
|
|
2524
|
+
}
|
|
2525
|
+
`;
|
|
2526
|
+
var DISCOVERY_CACHE_TTL_MS = 10 * 60 * 1e3;
|
|
2527
|
+
var GitLabModelDiscovery = class {
|
|
2528
|
+
config;
|
|
2529
|
+
fetchFn;
|
|
2530
|
+
cache = /* @__PURE__ */ new Map();
|
|
2531
|
+
constructor(config) {
|
|
2532
|
+
this.config = config;
|
|
2533
|
+
this.fetchFn = config.fetch ?? fetch;
|
|
2534
|
+
}
|
|
2535
|
+
/**
|
|
2536
|
+
* Discover available models for a given root namespace.
|
|
2537
|
+
*
|
|
2538
|
+
* Results are cached per `rootNamespaceId` with a 10-minute TTL.
|
|
2539
|
+
* Use `invalidateCache()` to force an immediate refresh.
|
|
2540
|
+
*
|
|
2541
|
+
* @param rootNamespaceId - GitLab group ID (e.g., 'gid://gitlab/Group/12345')
|
|
2542
|
+
*/
|
|
2543
|
+
async discover(rootNamespaceId) {
|
|
2544
|
+
const cached = this.cache.get(rootNamespaceId);
|
|
2545
|
+
if (cached && cached.expiresAt > Date.now()) {
|
|
2546
|
+
return cached.data;
|
|
2547
|
+
}
|
|
2548
|
+
const url = `${this.config.instanceUrl}/api/graphql`;
|
|
2549
|
+
try {
|
|
2550
|
+
const response = await this.fetchFn(url, {
|
|
2551
|
+
method: "POST",
|
|
2552
|
+
headers: {
|
|
2553
|
+
...this.config.getHeaders(),
|
|
2554
|
+
"Content-Type": "application/json"
|
|
2555
|
+
},
|
|
2556
|
+
body: JSON.stringify({
|
|
2557
|
+
query: AI_CHAT_AVAILABLE_MODELS_QUERY,
|
|
2558
|
+
variables: { rootNamespaceId }
|
|
2559
|
+
})
|
|
2560
|
+
});
|
|
2561
|
+
if (!response.ok) {
|
|
2562
|
+
const errorText = await response.text();
|
|
2563
|
+
throw new GitLabError({
|
|
2564
|
+
message: `Model discovery GraphQL request failed: ${response.status} ${response.statusText} - ${errorText}`,
|
|
2565
|
+
statusCode: response.status,
|
|
2566
|
+
responseBody: errorText
|
|
2567
|
+
});
|
|
2568
|
+
}
|
|
2569
|
+
const json = await response.json();
|
|
2570
|
+
if (json.errors && json.errors.length > 0) {
|
|
2571
|
+
throw new GitLabError({
|
|
2572
|
+
message: `Model discovery GraphQL errors: ${json.errors.map((e) => e.message).join(", ")}`
|
|
2573
|
+
});
|
|
2574
|
+
}
|
|
2575
|
+
const models = json.data?.aiChatAvailableModels;
|
|
2576
|
+
const metadata = json.data?.metadata;
|
|
2577
|
+
const modelSwitchingEnabled = metadata?.featureFlags?.find((f) => f.name === "ai_user_model_switching")?.enabled ?? false;
|
|
2578
|
+
const result = {
|
|
2579
|
+
defaultModel: models?.defaultModel ?? null,
|
|
2580
|
+
selectableModels: models?.selectableModels ?? [],
|
|
2581
|
+
pinnedModel: models?.pinnedModel ?? null,
|
|
2582
|
+
modelSwitchingEnabled,
|
|
2583
|
+
instanceVersion: metadata?.version ?? null
|
|
2584
|
+
};
|
|
2585
|
+
this.cache.set(rootNamespaceId, {
|
|
2586
|
+
data: result,
|
|
2587
|
+
expiresAt: Date.now() + DISCOVERY_CACHE_TTL_MS
|
|
2588
|
+
});
|
|
2589
|
+
return result;
|
|
2590
|
+
} catch (error) {
|
|
2591
|
+
if (error instanceof GitLabError) throw error;
|
|
2592
|
+
throw new GitLabError({
|
|
2593
|
+
message: `Model discovery failed: ${error}`,
|
|
2594
|
+
cause: error
|
|
2595
|
+
});
|
|
2596
|
+
}
|
|
2597
|
+
}
|
|
2598
|
+
/**
|
|
2599
|
+
* Get the effective model ref to use for a workflow.
|
|
2600
|
+
*
|
|
2601
|
+
* Priority: pinned > user-selected > default.
|
|
2602
|
+
*
|
|
2603
|
+
* @param rootNamespaceId - GitLab group ID
|
|
2604
|
+
* @param userSelectedRef - Optional user preference
|
|
2605
|
+
*/
|
|
2606
|
+
async getEffectiveModelRef(rootNamespaceId, userSelectedRef) {
|
|
2607
|
+
const discovered = await this.discover(rootNamespaceId);
|
|
2608
|
+
if (discovered.pinnedModel) {
|
|
2609
|
+
return discovered.pinnedModel.ref;
|
|
2610
|
+
}
|
|
2611
|
+
if (userSelectedRef && discovered.modelSwitchingEnabled) {
|
|
2612
|
+
const isValid = discovered.selectableModels.some((m) => m.ref === userSelectedRef);
|
|
2613
|
+
if (isValid) {
|
|
2614
|
+
return userSelectedRef;
|
|
2615
|
+
}
|
|
2616
|
+
}
|
|
2617
|
+
return discovered.defaultModel?.ref ?? null;
|
|
2618
|
+
}
|
|
2619
|
+
/**
|
|
2620
|
+
* Invalidate the cached discovery results.
|
|
2621
|
+
*/
|
|
2622
|
+
invalidateCache() {
|
|
2623
|
+
this.cache.clear();
|
|
2624
|
+
}
|
|
2625
|
+
};
|
|
2626
|
+
|
|
2627
|
+
// src/gitlab-model-cache.ts
|
|
2628
|
+
var fs = __toESM(require("fs"));
|
|
2629
|
+
var path2 = __toESM(require("path"));
|
|
2630
|
+
var os = __toESM(require("os"));
|
|
2631
|
+
var crypto = __toESM(require("crypto"));
|
|
2632
|
+
function getCacheFilePath() {
|
|
2633
|
+
const cacheHome = process.env.XDG_CACHE_HOME || path2.join(os.homedir(), ".cache");
|
|
2634
|
+
return path2.join(cacheHome, "opencode", "gitlab-workflow-model-cache.json");
|
|
2635
|
+
}
|
|
2636
|
+
function computeCacheKey(workDir, instanceUrl) {
|
|
2637
|
+
const normalizedUrl = (instanceUrl || "https://gitlab.com").replace(/\/$/, "");
|
|
2638
|
+
return crypto.createHash("sha256").update(`${workDir}\0${normalizedUrl}`).digest("hex").slice(0, 12);
|
|
2639
|
+
}
|
|
2640
|
+
var GitLabModelCache = class {
|
|
2641
|
+
filePath;
|
|
2642
|
+
key;
|
|
2643
|
+
constructor(workDir, instanceUrl) {
|
|
2644
|
+
this.filePath = getCacheFilePath();
|
|
2645
|
+
this.key = computeCacheKey(workDir, instanceUrl);
|
|
2646
|
+
}
|
|
2647
|
+
readAll() {
|
|
2648
|
+
try {
|
|
2649
|
+
if (!fs.existsSync(this.filePath)) {
|
|
2650
|
+
return {};
|
|
2651
|
+
}
|
|
2652
|
+
const raw = fs.readFileSync(this.filePath, "utf-8");
|
|
2653
|
+
return JSON.parse(raw);
|
|
2654
|
+
} catch {
|
|
2655
|
+
return {};
|
|
2656
|
+
}
|
|
2657
|
+
}
|
|
2658
|
+
writeAll(data) {
|
|
2659
|
+
try {
|
|
2660
|
+
const dir = path2.dirname(this.filePath);
|
|
2661
|
+
fs.mkdirSync(dir, { recursive: true, mode: 448 });
|
|
2662
|
+
fs.writeFileSync(this.filePath, JSON.stringify(data, null, 2), { mode: 384 });
|
|
2663
|
+
} catch {
|
|
2664
|
+
}
|
|
2665
|
+
}
|
|
2666
|
+
/**
|
|
2667
|
+
* Load the cached entry for this workspace.
|
|
2668
|
+
* Returns null if no cache exists or is unreadable.
|
|
2669
|
+
*/
|
|
2670
|
+
load() {
|
|
2671
|
+
return this.readAll()[this.key] ?? null;
|
|
2672
|
+
}
|
|
2673
|
+
/**
|
|
2674
|
+
* Persist the full cache entry to disk.
|
|
2675
|
+
*/
|
|
2676
|
+
save(entry) {
|
|
2677
|
+
const data = this.readAll();
|
|
2678
|
+
data[this.key] = entry;
|
|
2679
|
+
this.writeAll(data);
|
|
2680
|
+
}
|
|
2681
|
+
/**
|
|
2682
|
+
* Update only the discovery portion of the cache, preserving selection.
|
|
2683
|
+
*/
|
|
2684
|
+
saveDiscovery(discovery) {
|
|
2685
|
+
const existing = this.load();
|
|
2686
|
+
this.save({
|
|
2687
|
+
discovery,
|
|
2688
|
+
selectedModelRef: existing?.selectedModelRef ?? null,
|
|
2689
|
+
selectedModelName: existing?.selectedModelName ?? null,
|
|
2690
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
2691
|
+
});
|
|
2692
|
+
}
|
|
2693
|
+
/**
|
|
2694
|
+
* Update only the selected model, preserving the discovery data.
|
|
2695
|
+
*/
|
|
2696
|
+
saveSelection(ref, name) {
|
|
2697
|
+
const existing = this.load();
|
|
2698
|
+
this.save({
|
|
2699
|
+
discovery: existing?.discovery ?? null,
|
|
2700
|
+
selectedModelRef: ref,
|
|
2701
|
+
selectedModelName: name,
|
|
2702
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
2703
|
+
});
|
|
2704
|
+
}
|
|
2705
|
+
/**
|
|
2706
|
+
* Remove the entry for this workspace from the cache file.
|
|
2707
|
+
*/
|
|
2708
|
+
clear() {
|
|
2709
|
+
const data = this.readAll();
|
|
2710
|
+
delete data[this.key];
|
|
2711
|
+
if (Object.keys(data).length === 0) {
|
|
2712
|
+
try {
|
|
2713
|
+
if (fs.existsSync(this.filePath)) {
|
|
2714
|
+
fs.unlinkSync(this.filePath);
|
|
2715
|
+
}
|
|
2716
|
+
} catch {
|
|
2717
|
+
}
|
|
2718
|
+
} else {
|
|
2719
|
+
this.writeAll(data);
|
|
2720
|
+
}
|
|
2721
|
+
}
|
|
2722
|
+
/**
|
|
2723
|
+
* Convenience: get the cached selected model ref (or null).
|
|
2724
|
+
*/
|
|
2725
|
+
getSelectedModelRef() {
|
|
2726
|
+
return this.load()?.selectedModelRef ?? null;
|
|
2727
|
+
}
|
|
2728
|
+
/**
|
|
2729
|
+
* Convenience: get the cached selected model name (or null).
|
|
2730
|
+
*/
|
|
2731
|
+
getSelectedModelName() {
|
|
2732
|
+
return this.load()?.selectedModelName ?? null;
|
|
2733
|
+
}
|
|
2734
|
+
/**
|
|
2735
|
+
* Convenience: get the cached discovery result (or null).
|
|
2736
|
+
*/
|
|
2737
|
+
getDiscovery() {
|
|
2738
|
+
return this.load()?.discovery ?? null;
|
|
2739
|
+
}
|
|
2740
|
+
};
|
|
2741
|
+
|
|
2742
|
+
// src/gitlab-workflow-language-model.ts
|
|
2743
|
+
function simplifySchemaObj(schema) {
|
|
2744
|
+
if (!schema || typeof schema !== "object") return schema;
|
|
2745
|
+
const result = {};
|
|
2746
|
+
for (const [key, value] of Object.entries(schema)) {
|
|
2747
|
+
if (key === "description" || key === "examples" || key === "default") {
|
|
2748
|
+
continue;
|
|
2749
|
+
}
|
|
2750
|
+
if (key === "properties" && typeof value === "object" && value !== null) {
|
|
2751
|
+
const props = {};
|
|
2752
|
+
for (const [propName, propValue] of Object.entries(value)) {
|
|
2753
|
+
if (typeof propValue === "object" && propValue !== null) {
|
|
2754
|
+
props[propName] = simplifySchemaObj(propValue);
|
|
2755
|
+
} else {
|
|
2756
|
+
props[propName] = propValue;
|
|
2757
|
+
}
|
|
2758
|
+
}
|
|
2759
|
+
result[key] = props;
|
|
2760
|
+
} else if (key === "items" && typeof value === "object" && value !== null) {
|
|
2761
|
+
result[key] = simplifySchemaObj(value);
|
|
2762
|
+
} else {
|
|
2763
|
+
result[key] = value;
|
|
2764
|
+
}
|
|
2765
|
+
}
|
|
2766
|
+
return result;
|
|
2767
|
+
}
|
|
2768
|
+
function simplifySchema(schemaStr) {
|
|
2769
|
+
try {
|
|
2770
|
+
return JSON.stringify(simplifySchemaObj(JSON.parse(schemaStr)));
|
|
2771
|
+
} catch {
|
|
2772
|
+
return schemaStr;
|
|
2773
|
+
}
|
|
2774
|
+
}
|
|
2775
|
+
function minimalSchemaObj(schema) {
|
|
2776
|
+
if (!schema || typeof schema !== "object") return schema;
|
|
2777
|
+
const result = { type: schema.type || "object" };
|
|
2778
|
+
if (schema.required) {
|
|
2779
|
+
result.required = schema.required;
|
|
2780
|
+
}
|
|
2781
|
+
if (schema.properties && typeof schema.properties === "object") {
|
|
2782
|
+
const props = {};
|
|
2783
|
+
for (const [propName, propValue] of Object.entries(
|
|
2784
|
+
schema.properties
|
|
2785
|
+
)) {
|
|
2786
|
+
if (typeof propValue === "object" && propValue !== null) {
|
|
2787
|
+
const pv = propValue;
|
|
2788
|
+
props[propName] = { type: pv.type || "string" };
|
|
2789
|
+
} else {
|
|
2790
|
+
props[propName] = { type: "string" };
|
|
2791
|
+
}
|
|
2792
|
+
}
|
|
2793
|
+
result.properties = props;
|
|
2794
|
+
}
|
|
2795
|
+
return result;
|
|
2796
|
+
}
|
|
2797
|
+
function minimalSchema(schemaStr) {
|
|
2798
|
+
try {
|
|
2799
|
+
return JSON.stringify(minimalSchemaObj(JSON.parse(schemaStr)));
|
|
2800
|
+
} catch {
|
|
2801
|
+
return schemaStr;
|
|
2802
|
+
}
|
|
2803
|
+
}
|
|
2804
|
+
var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
|
|
2805
|
+
specificationVersion = "v2";
|
|
2806
|
+
modelId;
|
|
2807
|
+
supportedUrls = {};
|
|
2808
|
+
config;
|
|
2809
|
+
workflowOptions;
|
|
2810
|
+
tokenClient;
|
|
2811
|
+
projectDetector;
|
|
2812
|
+
modelDiscovery;
|
|
2813
|
+
modelCache;
|
|
2814
|
+
// Cached detected project path
|
|
2815
|
+
detectedProjectPath = null;
|
|
2816
|
+
// Workflow ID persisted across turns for multi-turn conversations.
|
|
2817
|
+
// When DWS sends INPUT_REQUIRED, the workflow stays alive server-side.
|
|
2818
|
+
// On the next doStream() call we reuse this ID (skip createWorkflow).
|
|
2819
|
+
currentWorkflowId = null;
|
|
2820
|
+
// Persisted across turns so that cumulative DWS chat logs don't re-emit
|
|
2821
|
+
// messages that were already streamed in a previous doStream() call.
|
|
2822
|
+
persistedAgentEmitted = /* @__PURE__ */ new Map();
|
|
2823
|
+
// Track all active stream clients so stopWorkflow() can stop them all.
|
|
2824
|
+
activeClients = /* @__PURE__ */ new Set();
|
|
2825
|
+
// Cache resolved values to avoid redundant GraphQL calls
|
|
2826
|
+
_selectedModelRef;
|
|
2827
|
+
_selectedModelName;
|
|
2828
|
+
_rootNamespaceId;
|
|
2829
|
+
_discoveryPromise;
|
|
2830
|
+
/**
|
|
2831
|
+
* Get the cached selected model ref.
|
|
2832
|
+
*/
|
|
2833
|
+
get selectedModelRef() {
|
|
2834
|
+
return this._selectedModelRef ?? null;
|
|
2835
|
+
}
|
|
2836
|
+
/**
|
|
2837
|
+
* Set the selected model ref (e.g., from an eager discover call).
|
|
2838
|
+
* This will be used by resolveModelRef() to skip the picker.
|
|
2839
|
+
* Also persists to the file-based workspace cache.
|
|
2840
|
+
*/
|
|
2841
|
+
set selectedModelRef(ref) {
|
|
2842
|
+
this._selectedModelRef = ref ?? void 0;
|
|
2843
|
+
this.modelCache.saveSelection(ref, this._selectedModelName ?? null);
|
|
2844
|
+
}
|
|
2845
|
+
/**
|
|
2846
|
+
* Get the cached selected model display name.
|
|
2847
|
+
*/
|
|
2848
|
+
get selectedModelName() {
|
|
2849
|
+
return this._selectedModelName ?? null;
|
|
2850
|
+
}
|
|
2851
|
+
/**
|
|
2852
|
+
* Set the selected model display name.
|
|
2853
|
+
* Also persists to the file-based workspace cache.
|
|
2854
|
+
*/
|
|
2855
|
+
set selectedModelName(name) {
|
|
2856
|
+
this._selectedModelName = name ?? void 0;
|
|
2857
|
+
this.modelCache.saveSelection(this._selectedModelRef ?? null, name);
|
|
2858
|
+
}
|
|
2859
|
+
/**
|
|
2860
|
+
* Optional external tool executor. When set, this is called for tool
|
|
2861
|
+
* requests instead of looking up tools from `options.tools`.
|
|
2862
|
+
* This allows the consumer (OpenCode) to wire in its permission system.
|
|
2863
|
+
*
|
|
2864
|
+
* The executor is automatically bound to the async context at the time
|
|
2865
|
+
* it is set, so that AsyncLocalStorage-based contexts (like Instance)
|
|
2866
|
+
* remain available when the executor is invoked from WebSocket callbacks.
|
|
2867
|
+
*/
|
|
2868
|
+
_toolExecutor = null;
|
|
2869
|
+
/**
|
|
2870
|
+
* Optional callback invoked with intermediate token usage estimates
|
|
2871
|
+
* after each tool execution completes. This allows the consumer to
|
|
2872
|
+
* display live token counts during long-running DWS workflows, since
|
|
2873
|
+
* the AI SDK only surfaces usage via finish-step at stream end.
|
|
2874
|
+
*/
|
|
2875
|
+
onUsageUpdate = null;
|
|
2876
|
+
/**
|
|
2877
|
+
* Optional callback invoked when multiple workflow models are available
|
|
2878
|
+
* and the user should pick one. Set per-stream by the host (e.g., OpenCode)
|
|
2879
|
+
* alongside `toolExecutor`. Takes precedence over `workflowOptions.onSelectModel`.
|
|
2880
|
+
*/
|
|
2881
|
+
onSelectModel = null;
|
|
2882
|
+
get toolExecutor() {
|
|
2883
|
+
return this._toolExecutor;
|
|
2884
|
+
}
|
|
2885
|
+
set toolExecutor(executor) {
|
|
2886
|
+
if (executor) {
|
|
2887
|
+
try {
|
|
2888
|
+
const { AsyncResource } = require("async_hooks");
|
|
2889
|
+
this._toolExecutor = AsyncResource.bind(executor);
|
|
2890
|
+
} catch {
|
|
2891
|
+
this._toolExecutor = executor;
|
|
2892
|
+
}
|
|
2893
|
+
} else {
|
|
2894
|
+
this._toolExecutor = null;
|
|
2895
|
+
}
|
|
2896
|
+
}
|
|
2897
|
+
constructor(modelId, config, workflowOptions = {}) {
|
|
2898
|
+
this.modelId = modelId;
|
|
2899
|
+
this.config = config;
|
|
2900
|
+
this.workflowOptions = workflowOptions;
|
|
2901
|
+
const workDir = workflowOptions.workingDirectory ?? process.cwd();
|
|
2902
|
+
this.modelCache = new GitLabModelCache(workDir, config.instanceUrl);
|
|
2903
|
+
const cached = this.modelCache.load();
|
|
2904
|
+
if (cached?.selectedModelRef) {
|
|
2905
|
+
this._selectedModelRef = cached.selectedModelRef;
|
|
2906
|
+
}
|
|
2907
|
+
if (cached?.selectedModelName) {
|
|
2908
|
+
this._selectedModelName = cached.selectedModelName;
|
|
2909
|
+
}
|
|
2910
|
+
this.tokenClient = new GitLabWorkflowTokenClient({
|
|
2911
|
+
instanceUrl: config.instanceUrl,
|
|
2912
|
+
getHeaders: config.getHeaders,
|
|
2913
|
+
refreshApiKey: config.refreshApiKey,
|
|
2914
|
+
fetch: config.fetch,
|
|
2915
|
+
featureFlags: config.featureFlags
|
|
2916
|
+
});
|
|
2917
|
+
this.projectDetector = new GitLabProjectDetector({
|
|
2918
|
+
instanceUrl: config.instanceUrl,
|
|
2919
|
+
getHeaders: config.getHeaders,
|
|
2920
|
+
fetch: config.fetch
|
|
2921
|
+
});
|
|
2922
|
+
this.modelDiscovery = new GitLabModelDiscovery({
|
|
2923
|
+
instanceUrl: config.instanceUrl,
|
|
2924
|
+
getHeaders: config.getHeaders,
|
|
2925
|
+
fetch: config.fetch
|
|
2926
|
+
});
|
|
2927
|
+
}
|
|
2928
|
+
get provider() {
|
|
2929
|
+
return this.config.provider;
|
|
2930
|
+
}
|
|
2931
|
+
/**
|
|
2932
|
+
* Resolve the project ID (path) to use for workflow creation.
|
|
2933
|
+
* Priority: explicit option > auto-detected from git remote > undefined.
|
|
2934
|
+
*/
|
|
2935
|
+
async resolveProjectId() {
|
|
2936
|
+
if (this.workflowOptions.projectId) {
|
|
2937
|
+
return this.workflowOptions.projectId;
|
|
2938
|
+
}
|
|
2939
|
+
if (this.detectedProjectPath) {
|
|
2940
|
+
return this.detectedProjectPath;
|
|
2941
|
+
}
|
|
2942
|
+
const workDir = this.workflowOptions.workingDirectory ?? process.cwd();
|
|
2943
|
+
const project = await this.projectDetector.detectProject(workDir);
|
|
2944
|
+
if (project) {
|
|
2945
|
+
this.detectedProjectPath = project.pathWithNamespace;
|
|
2946
|
+
return project.pathWithNamespace;
|
|
2947
|
+
}
|
|
2948
|
+
return void 0;
|
|
2949
|
+
}
|
|
2950
|
+
/**
|
|
2951
|
+
* Resolve the root namespace GID to use for model discovery.
|
|
2952
|
+
*
|
|
2953
|
+
* Priority:
|
|
2954
|
+
* 1. Explicit `rootNamespaceId` in workflowOptions (caller-provided GID)
|
|
2955
|
+
* 2. Auto-detected from git remote via project detector (namespace.id → GID)
|
|
2956
|
+
* 3. Cached from previous call
|
|
2957
|
+
*/
|
|
2958
|
+
async resolveRootNamespaceId() {
|
|
2959
|
+
if (this.workflowOptions.rootNamespaceId) {
|
|
2960
|
+
return this.workflowOptions.rootNamespaceId;
|
|
2961
|
+
}
|
|
2962
|
+
if (this._rootNamespaceId !== void 0) {
|
|
2963
|
+
return this._rootNamespaceId;
|
|
2964
|
+
}
|
|
2965
|
+
const workDir = this.workflowOptions.workingDirectory ?? process.cwd();
|
|
2966
|
+
const project = await this.projectDetector.detectProject(workDir);
|
|
2967
|
+
if (project?.namespaceId) {
|
|
2968
|
+
const gid = `gid://gitlab/Group/${project.namespaceId}`;
|
|
2969
|
+
this._rootNamespaceId = gid;
|
|
2970
|
+
return gid;
|
|
2971
|
+
}
|
|
2972
|
+
this._rootNamespaceId = null;
|
|
2973
|
+
return null;
|
|
2974
|
+
}
|
|
2975
|
+
/**
|
|
2976
|
+
* Resolve the effective DWS model ref to use for this stream.
|
|
2977
|
+
* Deduplicates concurrent calls via a shared promise.
|
|
2978
|
+
*
|
|
2979
|
+
* Priority for the canonical `duo-workflow` model ID:
|
|
2980
|
+
* 1. Admin-pinned model (from GitLabModelDiscovery) — always wins
|
|
2981
|
+
* 2. User selection via onSelectModel callback (if model switching enabled)
|
|
2982
|
+
* 3. Workspace default model
|
|
2983
|
+
* 4. File-cached discovery/selection — used when live discovery fails
|
|
2984
|
+
* 5. Hard-coded 'default' (DWS decides) — fallback when discovery fails
|
|
2985
|
+
*
|
|
2986
|
+
* For all other `duo-workflow-*` model IDs the static mapping is used as-is.
|
|
2987
|
+
*/
|
|
2988
|
+
async resolveModelRef() {
|
|
2989
|
+
const staticRef = getWorkflowModelRef(this.modelId);
|
|
2990
|
+
if (this.modelId !== "duo-workflow") {
|
|
2991
|
+
return staticRef ?? "default";
|
|
2992
|
+
}
|
|
2993
|
+
if (this._selectedModelRef) {
|
|
2994
|
+
return this._selectedModelRef;
|
|
2995
|
+
}
|
|
2996
|
+
if (!this._discoveryPromise) {
|
|
2997
|
+
this._discoveryPromise = this.doResolveModelRef();
|
|
2998
|
+
this._discoveryPromise.finally(() => {
|
|
2999
|
+
this._discoveryPromise = void 0;
|
|
3000
|
+
});
|
|
3001
|
+
}
|
|
3002
|
+
return this._discoveryPromise;
|
|
3003
|
+
}
|
|
3004
|
+
async doResolveModelRef() {
|
|
3005
|
+
const rootNamespaceId = await this.resolveRootNamespaceId();
|
|
3006
|
+
if (!rootNamespaceId) {
|
|
3007
|
+
this._selectedModelRef = "default";
|
|
3008
|
+
return "default";
|
|
3009
|
+
}
|
|
3010
|
+
try {
|
|
3011
|
+
const discovered = await this.modelDiscovery.discover(rootNamespaceId);
|
|
3012
|
+
this.modelCache.saveDiscovery(discovered);
|
|
3013
|
+
if (discovered.pinnedModel) {
|
|
3014
|
+
this._selectedModelRef = discovered.pinnedModel.ref;
|
|
3015
|
+
this._selectedModelName = discovered.pinnedModel.name;
|
|
3016
|
+
this.modelCache.saveSelection(discovered.pinnedModel.ref, discovered.pinnedModel.name);
|
|
3017
|
+
return discovered.pinnedModel.ref;
|
|
3018
|
+
}
|
|
3019
|
+
const selectFn = this.onSelectModel ?? this.workflowOptions.onSelectModel;
|
|
3020
|
+
if (discovered.selectableModels.length > 0 && selectFn) {
|
|
3021
|
+
const selected = await selectFn(discovered.selectableModels);
|
|
3022
|
+
if (selected) {
|
|
3023
|
+
const match = discovered.selectableModels.find((m) => m.ref === selected);
|
|
3024
|
+
if (match) {
|
|
3025
|
+
this._selectedModelRef = match.ref;
|
|
3026
|
+
this._selectedModelName = match.name;
|
|
3027
|
+
this.modelCache.saveSelection(match.ref, match.name);
|
|
3028
|
+
return match.ref;
|
|
3029
|
+
}
|
|
3030
|
+
}
|
|
3031
|
+
}
|
|
3032
|
+
if (discovered.defaultModel) {
|
|
3033
|
+
this._selectedModelRef = discovered.defaultModel.ref;
|
|
3034
|
+
this._selectedModelName = discovered.defaultModel.name;
|
|
3035
|
+
this.modelCache.saveSelection(discovered.defaultModel.ref, discovered.defaultModel.name);
|
|
3036
|
+
return discovered.defaultModel.ref;
|
|
3037
|
+
}
|
|
3038
|
+
} catch {
|
|
3039
|
+
const cachedEntry = this.modelCache.load();
|
|
3040
|
+
if (cachedEntry?.selectedModelRef) {
|
|
3041
|
+
this._selectedModelRef = cachedEntry.selectedModelRef;
|
|
3042
|
+
this._selectedModelName = cachedEntry.selectedModelName ?? void 0;
|
|
3043
|
+
return cachedEntry.selectedModelRef;
|
|
3044
|
+
}
|
|
3045
|
+
}
|
|
3046
|
+
this._selectedModelRef = "default";
|
|
3047
|
+
return "default";
|
|
3048
|
+
}
|
|
3049
|
+
/**
|
|
3050
|
+
* Pre-fetch available models for the workspace.
|
|
3051
|
+
* Call this early (e.g., on IDE startup) to avoid blocking the first stream.
|
|
3052
|
+
* Results are persisted to the workspace model cache.
|
|
3053
|
+
*
|
|
3054
|
+
* @param rootNamespaceId - GitLab group ID (e.g., 'gid://gitlab/Group/12345')
|
|
3055
|
+
* @returns Discovered models with default, selectable, and pinned models
|
|
3056
|
+
*/
|
|
3057
|
+
async discoverModels(rootNamespaceId) {
|
|
3058
|
+
const result = await this.modelDiscovery.discover(rootNamespaceId);
|
|
3059
|
+
this.modelCache.saveDiscovery(result);
|
|
3060
|
+
return result;
|
|
3061
|
+
}
|
|
3062
|
+
/**
|
|
3063
|
+
* Get the file-based model cache instance for this workspace.
|
|
3064
|
+
* Useful for consumers that need direct cache access (e.g., the discover route).
|
|
3065
|
+
*/
|
|
3066
|
+
getModelCache() {
|
|
3067
|
+
return this.modelCache;
|
|
3068
|
+
}
|
|
3069
|
+
/**
|
|
3070
|
+
* Stop the active workflow.
|
|
3071
|
+
*/
|
|
3072
|
+
stopWorkflow() {
|
|
3073
|
+
for (const client of this.activeClients) {
|
|
3074
|
+
if (client.isConnected) {
|
|
3075
|
+
client.stop();
|
|
3076
|
+
}
|
|
3077
|
+
}
|
|
3078
|
+
}
|
|
3079
|
+
/**
|
|
3080
|
+
* Reset the workflow state, forcing a new workflow to be created on the
|
|
3081
|
+
* next doStream() call. Call this when starting a new conversation.
|
|
3082
|
+
*/
|
|
3083
|
+
resetWorkflow() {
|
|
3084
|
+
this.currentWorkflowId = null;
|
|
3085
|
+
this.persistedAgentEmitted.clear();
|
|
3086
|
+
}
|
|
3087
|
+
/**
|
|
3088
|
+
* Get the current workflow ID (if any).
|
|
3089
|
+
* Useful for consumers that need to track workflow state.
|
|
3090
|
+
*/
|
|
3091
|
+
get workflowId() {
|
|
3092
|
+
return this.currentWorkflowId;
|
|
3093
|
+
}
|
|
3094
|
+
// ---------------------------------------------------------------------------
|
|
3095
|
+
// LanguageModelV2 — doGenerate (non-streaming)
|
|
3096
|
+
// ---------------------------------------------------------------------------
|
|
3097
|
+
async doGenerate(options) {
|
|
3098
|
+
const { stream } = await this.doStream(options);
|
|
3099
|
+
const reader = stream.getReader();
|
|
3100
|
+
const textParts = [];
|
|
3101
|
+
const toolCalls = [];
|
|
3102
|
+
let finishReason = "unknown";
|
|
3103
|
+
const usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 };
|
|
3104
|
+
try {
|
|
3105
|
+
while (true) {
|
|
3106
|
+
const { done, value } = await reader.read();
|
|
3107
|
+
if (done) break;
|
|
3108
|
+
switch (value.type) {
|
|
3109
|
+
case "text-delta":
|
|
3110
|
+
textParts.push(value.delta);
|
|
3111
|
+
break;
|
|
3112
|
+
case "tool-call":
|
|
3113
|
+
toolCalls.push({
|
|
3114
|
+
type: "tool-call",
|
|
3115
|
+
toolCallId: value.toolCallId,
|
|
3116
|
+
toolName: value.toolName,
|
|
3117
|
+
input: value.input
|
|
3118
|
+
});
|
|
3119
|
+
break;
|
|
3120
|
+
case "finish":
|
|
3121
|
+
finishReason = value.finishReason;
|
|
3122
|
+
if (value.usage) {
|
|
3123
|
+
usage.inputTokens = value.usage.inputTokens ?? 0;
|
|
3124
|
+
usage.outputTokens = value.usage.outputTokens ?? 0;
|
|
3125
|
+
usage.totalTokens = value.usage.totalTokens ?? 0;
|
|
3126
|
+
}
|
|
3127
|
+
break;
|
|
3128
|
+
case "error":
|
|
3129
|
+
throw value.error;
|
|
3130
|
+
}
|
|
3131
|
+
}
|
|
3132
|
+
} finally {
|
|
3133
|
+
reader.releaseLock();
|
|
3134
|
+
}
|
|
3135
|
+
const content = [];
|
|
3136
|
+
const fullText = textParts.join("");
|
|
3137
|
+
if (fullText) {
|
|
3138
|
+
content.push({ type: "text", text: fullText });
|
|
3139
|
+
}
|
|
3140
|
+
content.push(...toolCalls);
|
|
3141
|
+
return { content, finishReason, usage, warnings: [] };
|
|
3142
|
+
}
|
|
3143
|
+
// ---------------------------------------------------------------------------
|
|
3144
|
+
// LanguageModelV2 — doStream (streaming)
|
|
3145
|
+
// ---------------------------------------------------------------------------
|
|
3146
|
+
async doStream(options) {
|
|
3147
|
+
const goal = this.extractGoalFromPrompt(options.prompt);
|
|
3148
|
+
const modelRef = await this.resolveModelRef();
|
|
3149
|
+
const mcpTools = this.extractMcpTools(options);
|
|
3150
|
+
const preapprovedTools = this.workflowOptions.preapprovedTools ?? mcpTools.map((t) => t.name);
|
|
3151
|
+
const additionalContext = this.buildAdditionalContext(options.prompt);
|
|
3152
|
+
const toolExecutor = this.toolExecutor ?? null;
|
|
3153
|
+
await this.tokenClient.getToken(
|
|
3154
|
+
this.workflowOptions.workflowDefinition ?? DEFAULT_WORKFLOW_DEFINITION,
|
|
3155
|
+
this.workflowOptions.rootNamespaceId
|
|
3156
|
+
);
|
|
3157
|
+
const projectId = await this.resolveProjectId();
|
|
3158
|
+
let workflowId;
|
|
3159
|
+
if (this.currentWorkflowId) {
|
|
3160
|
+
workflowId = this.currentWorkflowId;
|
|
3161
|
+
} else {
|
|
3162
|
+
workflowId = await this.tokenClient.createWorkflow(goal, {
|
|
3163
|
+
projectId,
|
|
3164
|
+
namespaceId: this.workflowOptions.namespaceId,
|
|
3165
|
+
workflowDefinition: this.workflowOptions.workflowDefinition
|
|
3166
|
+
});
|
|
3167
|
+
this.currentWorkflowId = workflowId;
|
|
3168
|
+
}
|
|
3169
|
+
const wsClient = new GitLabWorkflowClient();
|
|
3170
|
+
this.activeClients.add(wsClient);
|
|
3171
|
+
let textBlockCounter = 0;
|
|
3172
|
+
const ss = {
|
|
3173
|
+
streamClosed: false,
|
|
3174
|
+
streamedInputChars: 0,
|
|
3175
|
+
streamedOutputChars: 0,
|
|
3176
|
+
pendingToolCount: 0,
|
|
3177
|
+
deferredClose: null,
|
|
3178
|
+
activeTextBlockId: null,
|
|
3179
|
+
agentMessageEmitted: new Map(this.persistedAgentEmitted),
|
|
3180
|
+
currentAgentMessageId: "",
|
|
3181
|
+
activeClient: wsClient
|
|
3182
|
+
};
|
|
3183
|
+
for (const msg of options.prompt) {
|
|
3184
|
+
if (msg.role === "system") {
|
|
3185
|
+
ss.streamedInputChars += msg.content.length;
|
|
3186
|
+
} else if (msg.role === "user") {
|
|
3187
|
+
for (const part of msg.content) {
|
|
3188
|
+
if (part.type === "text") {
|
|
3189
|
+
ss.streamedInputChars += part.text.length;
|
|
3190
|
+
}
|
|
3191
|
+
}
|
|
3192
|
+
}
|
|
3193
|
+
}
|
|
3194
|
+
const stream = new ReadableStream({
|
|
3195
|
+
start: async (controller) => {
|
|
3196
|
+
try {
|
|
3197
|
+
await wsClient.connect(
|
|
3198
|
+
{
|
|
3199
|
+
instanceUrl: this.config.instanceUrl,
|
|
3200
|
+
modelRef,
|
|
3201
|
+
headers: this.config.getHeaders(),
|
|
3202
|
+
projectId: this.workflowOptions.projectId,
|
|
3203
|
+
namespaceId: this.workflowOptions.namespaceId,
|
|
3204
|
+
rootNamespaceId: this.workflowOptions.rootNamespaceId
|
|
3205
|
+
},
|
|
3206
|
+
(event) => {
|
|
3207
|
+
this.handleWorkflowEvent(
|
|
3208
|
+
ss,
|
|
3209
|
+
event,
|
|
3210
|
+
controller,
|
|
3211
|
+
wsClient,
|
|
3212
|
+
toolExecutor,
|
|
3213
|
+
() => `text-${textBlockCounter++}`
|
|
3214
|
+
);
|
|
3215
|
+
}
|
|
3216
|
+
);
|
|
3217
|
+
const workflowDef = this.workflowOptions.workflowDefinition ?? DEFAULT_WORKFLOW_DEFINITION;
|
|
3218
|
+
const capabilities = this.workflowOptions.clientCapabilities ?? DEFAULT_CLIENT_CAPABILITIES;
|
|
3219
|
+
const workflowMetadata = await this.buildWorkflowMetadata();
|
|
3220
|
+
const metadataStr = JSON.stringify(workflowMetadata);
|
|
3221
|
+
const basePayload = {
|
|
3222
|
+
workflowID: workflowId,
|
|
3223
|
+
clientVersion: CLIENT_VERSION,
|
|
3224
|
+
workflowDefinition: workflowDef,
|
|
3225
|
+
goal,
|
|
3226
|
+
workflowMetadata: metadataStr,
|
|
3227
|
+
clientCapabilities: capabilities,
|
|
3228
|
+
preapproved_tools: preapprovedTools
|
|
3229
|
+
};
|
|
3230
|
+
const baseSize = JSON.stringify(basePayload).length + 100;
|
|
3231
|
+
const trimmed = this.trimPayload(mcpTools, additionalContext, baseSize);
|
|
3232
|
+
const trimmedPreapproved = preapprovedTools.filter(
|
|
3233
|
+
(name) => trimmed.mcpTools.some((t) => t.name === name)
|
|
3234
|
+
);
|
|
3235
|
+
const startReq = {
|
|
3236
|
+
workflowID: workflowId,
|
|
3237
|
+
clientVersion: CLIENT_VERSION,
|
|
3238
|
+
workflowDefinition: workflowDef,
|
|
3239
|
+
goal,
|
|
3240
|
+
workflowMetadata: metadataStr,
|
|
3241
|
+
additional_context: trimmed.additionalContext,
|
|
3242
|
+
clientCapabilities: capabilities,
|
|
3243
|
+
mcpTools: trimmed.mcpTools,
|
|
3244
|
+
preapproved_tools: trimmedPreapproved
|
|
3245
|
+
};
|
|
3246
|
+
if (this.workflowOptions.flowConfig) {
|
|
3247
|
+
startReq.flowConfig = this.workflowOptions.flowConfig;
|
|
3248
|
+
}
|
|
3249
|
+
if (this.workflowOptions.flowConfigSchemaVersion) {
|
|
3250
|
+
startReq.flowConfigSchemaVersion = this.workflowOptions.flowConfigSchemaVersion;
|
|
3251
|
+
}
|
|
3252
|
+
wsClient.sendStartRequest(startReq);
|
|
3253
|
+
controller.enqueue({
|
|
3254
|
+
type: "stream-start",
|
|
3255
|
+
warnings: []
|
|
3256
|
+
});
|
|
3257
|
+
controller.enqueue({
|
|
3258
|
+
type: "response-metadata",
|
|
3259
|
+
id: workflowId,
|
|
3260
|
+
modelId: modelRef
|
|
3261
|
+
});
|
|
3262
|
+
} catch (error) {
|
|
3263
|
+
if (!ss.streamClosed) {
|
|
3264
|
+
controller.enqueue({
|
|
3265
|
+
type: "error",
|
|
3266
|
+
error: error instanceof GitLabError ? error : new GitLabError({
|
|
3267
|
+
message: `Workflow connection failed: ${error}`,
|
|
3268
|
+
cause: error
|
|
3269
|
+
})
|
|
3270
|
+
});
|
|
3271
|
+
ss.streamClosed = true;
|
|
3272
|
+
controller.close();
|
|
3273
|
+
}
|
|
3274
|
+
}
|
|
3275
|
+
},
|
|
3276
|
+
cancel: (_reason) => {
|
|
3277
|
+
wsClient.stop();
|
|
3278
|
+
wsClient.close();
|
|
3279
|
+
this.activeClients.delete(wsClient);
|
|
3280
|
+
ss.activeClient = null;
|
|
3281
|
+
this.currentWorkflowId = null;
|
|
3282
|
+
}
|
|
3283
|
+
});
|
|
3284
|
+
return {
|
|
3285
|
+
stream,
|
|
3286
|
+
request: {
|
|
3287
|
+
body: { workflowId, modelRef, goal }
|
|
3288
|
+
}
|
|
3289
|
+
};
|
|
3290
|
+
}
|
|
3291
|
+
// ---------------------------------------------------------------------------
|
|
3292
|
+
// Event handling
|
|
3293
|
+
// ---------------------------------------------------------------------------
|
|
3294
|
+
handleWorkflowEvent(ss, event, controller, wsClient, toolExecutor, nextTextId) {
|
|
3295
|
+
if (ss.streamClosed) {
|
|
3296
|
+
return;
|
|
3297
|
+
}
|
|
3298
|
+
switch (event.type) {
|
|
3299
|
+
case "checkpoint": {
|
|
3300
|
+
this.processCheckpoint(ss, event.data, controller, nextTextId);
|
|
3301
|
+
break;
|
|
3302
|
+
}
|
|
3303
|
+
case "tool-request": {
|
|
3304
|
+
const { requestID, data } = event;
|
|
3305
|
+
let parsedArgs;
|
|
3306
|
+
try {
|
|
3307
|
+
JSON.parse(data.args);
|
|
3308
|
+
parsedArgs = data.args;
|
|
3309
|
+
} catch {
|
|
3310
|
+
parsedArgs = data.args || "{}";
|
|
3311
|
+
}
|
|
3312
|
+
if (ss.activeTextBlockId) {
|
|
3313
|
+
controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
|
|
3314
|
+
ss.activeTextBlockId = null;
|
|
3315
|
+
}
|
|
3316
|
+
controller.enqueue({
|
|
3317
|
+
type: "tool-input-start",
|
|
3318
|
+
id: requestID,
|
|
3319
|
+
toolName: data.name,
|
|
3320
|
+
providerExecuted: true
|
|
3321
|
+
});
|
|
3322
|
+
controller.enqueue({
|
|
3323
|
+
type: "tool-input-delta",
|
|
3324
|
+
id: requestID,
|
|
3325
|
+
delta: parsedArgs
|
|
3326
|
+
});
|
|
3327
|
+
controller.enqueue({
|
|
3328
|
+
type: "tool-input-end",
|
|
3329
|
+
id: requestID
|
|
3330
|
+
});
|
|
3331
|
+
controller.enqueue({
|
|
3332
|
+
type: "tool-call",
|
|
3333
|
+
toolCallId: requestID,
|
|
3334
|
+
toolName: data.name,
|
|
3335
|
+
input: parsedArgs,
|
|
3336
|
+
providerExecuted: true
|
|
3337
|
+
});
|
|
3338
|
+
this.executeToolAndRespond(
|
|
3339
|
+
ss,
|
|
3340
|
+
wsClient,
|
|
3341
|
+
controller,
|
|
3342
|
+
requestID,
|
|
3343
|
+
data.name,
|
|
3344
|
+
parsedArgs,
|
|
3345
|
+
toolExecutor
|
|
3346
|
+
).catch(() => {
|
|
3347
|
+
});
|
|
3348
|
+
break;
|
|
3349
|
+
}
|
|
3350
|
+
case "builtin-tool-request": {
|
|
3351
|
+
const mapped = mapBuiltinTool(event.toolName, event.data);
|
|
3352
|
+
const mappedArgs = JSON.stringify(mapped.args);
|
|
3353
|
+
if (ss.activeTextBlockId) {
|
|
3354
|
+
controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
|
|
3355
|
+
ss.activeTextBlockId = null;
|
|
3356
|
+
}
|
|
3357
|
+
controller.enqueue({
|
|
3358
|
+
type: "tool-input-start",
|
|
3359
|
+
id: event.requestID,
|
|
3360
|
+
toolName: mapped.toolName,
|
|
3361
|
+
providerExecuted: true
|
|
3362
|
+
});
|
|
3363
|
+
controller.enqueue({
|
|
3364
|
+
type: "tool-input-delta",
|
|
3365
|
+
id: event.requestID,
|
|
3366
|
+
delta: mappedArgs
|
|
3367
|
+
});
|
|
3368
|
+
controller.enqueue({
|
|
3369
|
+
type: "tool-input-end",
|
|
3370
|
+
id: event.requestID
|
|
3371
|
+
});
|
|
3372
|
+
controller.enqueue({
|
|
3373
|
+
type: "tool-call",
|
|
3374
|
+
toolCallId: event.requestID,
|
|
3375
|
+
toolName: mapped.toolName,
|
|
3376
|
+
input: mappedArgs,
|
|
3377
|
+
providerExecuted: true
|
|
3378
|
+
});
|
|
3379
|
+
this.executeToolAndRespond(
|
|
3380
|
+
ss,
|
|
3381
|
+
wsClient,
|
|
3382
|
+
controller,
|
|
3383
|
+
event.requestID,
|
|
3384
|
+
mapped.toolName,
|
|
3385
|
+
mappedArgs,
|
|
3386
|
+
toolExecutor
|
|
3387
|
+
).catch(() => {
|
|
3388
|
+
});
|
|
3389
|
+
break;
|
|
3390
|
+
}
|
|
3391
|
+
case "completed": {
|
|
3392
|
+
if (ss.activeTextBlockId) {
|
|
3393
|
+
controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
|
|
3394
|
+
ss.activeTextBlockId = null;
|
|
3395
|
+
}
|
|
3396
|
+
const doCompleteClose = () => {
|
|
3397
|
+
if (ss.streamClosed) return;
|
|
3398
|
+
const inputTokens = Math.ceil(ss.streamedInputChars / 4);
|
|
3399
|
+
const outputTokens = Math.ceil(ss.streamedOutputChars / 4);
|
|
3400
|
+
controller.enqueue({
|
|
3401
|
+
type: "finish",
|
|
3402
|
+
finishReason: "stop",
|
|
3403
|
+
usage: { inputTokens, outputTokens, totalTokens: inputTokens + outputTokens }
|
|
3404
|
+
});
|
|
3405
|
+
ss.streamClosed = true;
|
|
3406
|
+
controller.close();
|
|
3407
|
+
this.cleanupClient(ss);
|
|
3408
|
+
};
|
|
3409
|
+
if (ss.pendingToolCount > 0) {
|
|
3410
|
+
ss.deferredClose = doCompleteClose;
|
|
3411
|
+
} else {
|
|
3412
|
+
ss.deferredClose = null;
|
|
3413
|
+
doCompleteClose();
|
|
3414
|
+
}
|
|
3415
|
+
break;
|
|
3416
|
+
}
|
|
3417
|
+
case "failed": {
|
|
3418
|
+
if (ss.activeTextBlockId) {
|
|
3419
|
+
controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
|
|
3420
|
+
ss.activeTextBlockId = null;
|
|
3421
|
+
}
|
|
3422
|
+
controller.enqueue({
|
|
3423
|
+
type: "error",
|
|
3424
|
+
error: new GitLabError({
|
|
3425
|
+
message: `Workflow failed: ${sanitizeErrorMessage(event.error.message)}`,
|
|
3426
|
+
cause: event.error
|
|
3427
|
+
})
|
|
3428
|
+
});
|
|
3429
|
+
ss.streamClosed = true;
|
|
3430
|
+
controller.close();
|
|
3431
|
+
this.cleanupClient(ss, true);
|
|
3432
|
+
break;
|
|
3433
|
+
}
|
|
3434
|
+
case "closed": {
|
|
3435
|
+
if (ss.streamClosed) {
|
|
3436
|
+
break;
|
|
3437
|
+
}
|
|
3438
|
+
if (ss.activeTextBlockId) {
|
|
3439
|
+
controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
|
|
3440
|
+
ss.activeTextBlockId = null;
|
|
3441
|
+
}
|
|
3442
|
+
const doClose = () => {
|
|
3443
|
+
if (ss.streamClosed) return;
|
|
3444
|
+
if (event.code !== 1e3) {
|
|
3445
|
+
controller.enqueue({
|
|
3446
|
+
type: "error",
|
|
3447
|
+
error: new GitLabError({
|
|
3448
|
+
message: `WebSocket closed unexpectedly: code=${event.code} reason=${sanitizeErrorMessage(event.reason)}`,
|
|
3449
|
+
statusCode: event.code
|
|
3450
|
+
})
|
|
3451
|
+
});
|
|
3452
|
+
ss.streamClosed = true;
|
|
3453
|
+
controller.close();
|
|
3454
|
+
this.cleanupClient(ss, true);
|
|
3455
|
+
} else {
|
|
3456
|
+
const inTok = Math.ceil(ss.streamedInputChars / 4);
|
|
3457
|
+
const outTok = Math.ceil(ss.streamedOutputChars / 4);
|
|
3458
|
+
controller.enqueue({
|
|
3459
|
+
type: "finish",
|
|
3460
|
+
finishReason: "stop",
|
|
3461
|
+
usage: { inputTokens: inTok, outputTokens: outTok, totalTokens: inTok + outTok }
|
|
3462
|
+
});
|
|
3463
|
+
ss.streamClosed = true;
|
|
3464
|
+
controller.close();
|
|
3465
|
+
this.cleanupClient(ss);
|
|
3466
|
+
}
|
|
3467
|
+
};
|
|
3468
|
+
if (ss.pendingToolCount > 0) {
|
|
3469
|
+
ss.deferredClose = doClose;
|
|
3470
|
+
} else {
|
|
3471
|
+
ss.deferredClose = null;
|
|
3472
|
+
doClose();
|
|
3473
|
+
}
|
|
3474
|
+
break;
|
|
3475
|
+
}
|
|
3476
|
+
}
|
|
3477
|
+
}
|
|
3478
|
+
// ---------------------------------------------------------------------------
|
|
3479
|
+
// Checkpoint content extraction
|
|
3480
|
+
// ---------------------------------------------------------------------------
|
|
3481
|
+
processCheckpoint(ss, checkpoint, controller, nextTextId) {
|
|
3482
|
+
if (!checkpoint.checkpoint) {
|
|
3483
|
+
if (checkpoint.content) {
|
|
3484
|
+
if (!ss.activeTextBlockId) {
|
|
3485
|
+
ss.activeTextBlockId = nextTextId();
|
|
3486
|
+
controller.enqueue({ type: "text-start", id: ss.activeTextBlockId });
|
|
3487
|
+
}
|
|
3488
|
+
controller.enqueue({
|
|
3489
|
+
type: "text-delta",
|
|
3490
|
+
id: ss.activeTextBlockId,
|
|
3491
|
+
delta: checkpoint.content
|
|
3492
|
+
});
|
|
3493
|
+
ss.streamedOutputChars += checkpoint.content.length;
|
|
3494
|
+
}
|
|
3495
|
+
return;
|
|
3496
|
+
}
|
|
3497
|
+
let parsed;
|
|
3498
|
+
try {
|
|
3499
|
+
parsed = JSON.parse(checkpoint.checkpoint);
|
|
3500
|
+
} catch (e) {
|
|
3501
|
+
return;
|
|
3502
|
+
}
|
|
3503
|
+
const chatLog = parsed.channel_values?.ui_chat_log;
|
|
3504
|
+
if (!chatLog || !Array.isArray(chatLog) || chatLog.length === 0) {
|
|
3505
|
+
return;
|
|
3506
|
+
}
|
|
3507
|
+
if (checkpoint.status !== "RUNNING" && checkpoint.status !== "INPUT_REQUIRED" && checkpoint.status !== "FINISHED" && checkpoint.status !== "COMPLETED") {
|
|
3508
|
+
return;
|
|
3509
|
+
}
|
|
3510
|
+
for (let i = 0; i < chatLog.length; i++) {
|
|
3511
|
+
const entry = chatLog[i];
|
|
3512
|
+
if (entry.message_type !== "agent") continue;
|
|
3513
|
+
const content = entry.content || "";
|
|
3514
|
+
const msgId = entry.message_id || `idx-${i}`;
|
|
3515
|
+
const emittedLen = ss.agentMessageEmitted.get(msgId) ?? 0;
|
|
3516
|
+
if (content.length <= emittedLen) continue;
|
|
3517
|
+
const delta = content.slice(emittedLen);
|
|
3518
|
+
const isSameMsg = msgId === ss.currentAgentMessageId;
|
|
3519
|
+
if (!isSameMsg && ss.activeTextBlockId) {
|
|
3520
|
+
controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
|
|
3521
|
+
ss.activeTextBlockId = null;
|
|
3522
|
+
}
|
|
3523
|
+
if (!ss.activeTextBlockId) {
|
|
3524
|
+
ss.activeTextBlockId = nextTextId();
|
|
3525
|
+
controller.enqueue({ type: "text-start", id: ss.activeTextBlockId });
|
|
3526
|
+
}
|
|
3527
|
+
controller.enqueue({
|
|
3528
|
+
type: "text-delta",
|
|
3529
|
+
id: ss.activeTextBlockId,
|
|
3530
|
+
delta
|
|
3531
|
+
});
|
|
3532
|
+
ss.streamedOutputChars += delta.length;
|
|
3533
|
+
ss.agentMessageEmitted.set(msgId, content.length);
|
|
3534
|
+
this.persistedAgentEmitted.set(msgId, content.length);
|
|
3535
|
+
ss.currentAgentMessageId = msgId;
|
|
3536
|
+
}
|
|
3537
|
+
}
|
|
3538
|
+
async executeToolAndRespond(ss, wsClient, controller, requestID, toolName, argsJson, toolExecutor) {
|
|
3539
|
+
ss.pendingToolCount++;
|
|
3540
|
+
const safeEnqueue = (part) => {
|
|
3541
|
+
if (ss.streamClosed) {
|
|
3542
|
+
return;
|
|
3543
|
+
}
|
|
3544
|
+
try {
|
|
3545
|
+
controller.enqueue(part);
|
|
3546
|
+
} catch {
|
|
3547
|
+
}
|
|
3548
|
+
};
|
|
3549
|
+
try {
|
|
3550
|
+
if (toolExecutor) {
|
|
3551
|
+
const result = await toolExecutor(toolName, argsJson, requestID);
|
|
3552
|
+
wsClient.sendActionResponse(requestID, result.result, result.error);
|
|
3553
|
+
ss.streamedInputChars += argsJson.length;
|
|
3554
|
+
ss.streamedOutputChars += result.result.length;
|
|
3555
|
+
let toolOutput = result.result;
|
|
3556
|
+
let toolTitle = `${toolName} result`;
|
|
3557
|
+
let toolMetadata = { output: result.result };
|
|
3558
|
+
try {
|
|
3559
|
+
const parsed = JSON.parse(result.result);
|
|
3560
|
+
if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) {
|
|
3561
|
+
if (typeof parsed.output === "string") {
|
|
3562
|
+
toolOutput = parsed.output;
|
|
3563
|
+
} else if (parsed.output != null) {
|
|
3564
|
+
toolOutput = JSON.stringify(parsed.output);
|
|
3565
|
+
}
|
|
3566
|
+
if (typeof parsed.title === "string") toolTitle = parsed.title;
|
|
3567
|
+
if (parsed.metadata && typeof parsed.metadata === "object") {
|
|
3568
|
+
toolMetadata = {};
|
|
3569
|
+
for (const [k, v] of Object.entries(parsed.metadata)) {
|
|
3570
|
+
toolMetadata[k] = typeof v === "string" ? v : JSON.stringify(v);
|
|
3571
|
+
}
|
|
3572
|
+
if (!("output" in toolMetadata)) {
|
|
3573
|
+
toolMetadata.output = toolOutput;
|
|
3574
|
+
}
|
|
3575
|
+
}
|
|
3576
|
+
} else if (Array.isArray(parsed)) {
|
|
3577
|
+
toolOutput = JSON.stringify(parsed);
|
|
3578
|
+
toolMetadata = { output: toolOutput };
|
|
3579
|
+
}
|
|
3580
|
+
} catch {
|
|
3581
|
+
}
|
|
3582
|
+
if (result.error) {
|
|
3583
|
+
let errorText;
|
|
3584
|
+
if (typeof result.error === "string") {
|
|
3585
|
+
errorText = result.error;
|
|
3586
|
+
} else if (result.error && typeof result.error === "object") {
|
|
3587
|
+
errorText = JSON.stringify(result.error);
|
|
3588
|
+
} else {
|
|
3589
|
+
errorText = String(result.error);
|
|
3590
|
+
}
|
|
3591
|
+
const errorOutput = toolOutput || errorText;
|
|
3592
|
+
safeEnqueue({
|
|
3593
|
+
type: "tool-result",
|
|
3594
|
+
toolCallId: requestID,
|
|
3595
|
+
toolName,
|
|
3596
|
+
result: {
|
|
3597
|
+
output: errorOutput,
|
|
3598
|
+
title: toolTitle,
|
|
3599
|
+
metadata: { ...toolMetadata, error: errorText }
|
|
3600
|
+
},
|
|
3601
|
+
isError: true,
|
|
3602
|
+
providerExecuted: true
|
|
3603
|
+
});
|
|
3604
|
+
} else {
|
|
3605
|
+
safeEnqueue({
|
|
3606
|
+
type: "tool-result",
|
|
3607
|
+
toolCallId: requestID,
|
|
3608
|
+
toolName,
|
|
3609
|
+
result: {
|
|
3610
|
+
output: toolOutput,
|
|
3611
|
+
title: toolTitle,
|
|
3612
|
+
metadata: toolMetadata
|
|
3613
|
+
},
|
|
3614
|
+
isError: false,
|
|
3615
|
+
providerExecuted: true
|
|
3616
|
+
});
|
|
3617
|
+
}
|
|
3618
|
+
} else {
|
|
3619
|
+
const errorMsg = `Tool executor not configured for tool: ${toolName}`;
|
|
3620
|
+
wsClient.sendActionResponse(requestID, "", errorMsg);
|
|
3621
|
+
safeEnqueue({
|
|
3622
|
+
type: "tool-result",
|
|
3623
|
+
toolCallId: requestID,
|
|
3624
|
+
toolName,
|
|
3625
|
+
result: {
|
|
3626
|
+
output: errorMsg,
|
|
3627
|
+
title: `${toolName} error`,
|
|
3628
|
+
metadata: { output: errorMsg }
|
|
3629
|
+
},
|
|
3630
|
+
isError: true,
|
|
3631
|
+
providerExecuted: true
|
|
3632
|
+
});
|
|
3633
|
+
}
|
|
3634
|
+
} catch (error) {
|
|
3635
|
+
const rawMsg = error instanceof Error ? error.message : String(error);
|
|
3636
|
+
const errorMsg = sanitizeErrorMessage(rawMsg);
|
|
3637
|
+
wsClient.sendActionResponse(requestID, "", errorMsg);
|
|
3638
|
+
safeEnqueue({
|
|
3639
|
+
type: "tool-result",
|
|
3640
|
+
toolCallId: requestID,
|
|
3641
|
+
toolName,
|
|
3642
|
+
result: {
|
|
3643
|
+
output: errorMsg,
|
|
3644
|
+
title: `${toolName} error`,
|
|
3645
|
+
metadata: { output: errorMsg }
|
|
3646
|
+
},
|
|
3647
|
+
isError: true,
|
|
3648
|
+
providerExecuted: true
|
|
3649
|
+
});
|
|
3650
|
+
} finally {
|
|
3651
|
+
ss.pendingToolCount--;
|
|
3652
|
+
if (this.onUsageUpdate) {
|
|
3653
|
+
try {
|
|
3654
|
+
this.onUsageUpdate({
|
|
3655
|
+
inputTokens: Math.ceil(ss.streamedInputChars / 4),
|
|
3656
|
+
outputTokens: Math.ceil(ss.streamedOutputChars / 4)
|
|
3657
|
+
});
|
|
3658
|
+
} catch {
|
|
3659
|
+
}
|
|
3660
|
+
}
|
|
3661
|
+
if (ss.pendingToolCount <= 0 && ss.deferredClose) {
|
|
3662
|
+
const close = ss.deferredClose;
|
|
3663
|
+
ss.deferredClose = null;
|
|
3664
|
+
close();
|
|
3665
|
+
}
|
|
3666
|
+
}
|
|
3667
|
+
}
|
|
3668
|
+
cleanupClient(ss, clearWorkflow = false) {
|
|
3669
|
+
if (ss.activeClient) {
|
|
3670
|
+
ss.activeClient.close();
|
|
3671
|
+
this.activeClients.delete(ss.activeClient);
|
|
3672
|
+
ss.activeClient = null;
|
|
3673
|
+
}
|
|
3674
|
+
if (clearWorkflow) {
|
|
3675
|
+
this.currentWorkflowId = null;
|
|
3676
|
+
this.persistedAgentEmitted.clear();
|
|
3677
|
+
}
|
|
3678
|
+
}
|
|
3679
|
+
// ---------------------------------------------------------------------------
|
|
3680
|
+
// Workflow metadata
|
|
3681
|
+
// ---------------------------------------------------------------------------
|
|
3682
|
+
async buildWorkflowMetadata() {
|
|
3683
|
+
const metadata = {
|
|
3684
|
+
extended_logging: false
|
|
3685
|
+
};
|
|
3686
|
+
try {
|
|
3687
|
+
const workDir = this.workflowOptions.workingDirectory ?? process.cwd();
|
|
3688
|
+
const gitInfo = await this.getGitInfo(workDir);
|
|
3689
|
+
if (gitInfo.url) metadata.git_url = gitInfo.url;
|
|
3690
|
+
if (gitInfo.sha) metadata.git_sha = gitInfo.sha;
|
|
3691
|
+
if (gitInfo.branch) metadata.git_branch = gitInfo.branch;
|
|
3692
|
+
} catch {
|
|
3693
|
+
}
|
|
3694
|
+
return metadata;
|
|
3695
|
+
}
|
|
3696
|
+
async getGitInfo(workDir) {
|
|
3697
|
+
const { execFile } = await import("child_process");
|
|
3698
|
+
const { promisify } = await import("util");
|
|
3699
|
+
const execFileAsync = promisify(execFile);
|
|
3700
|
+
const opts = { cwd: workDir, timeout: 3e3 };
|
|
3701
|
+
const run = async (cmd, args) => {
|
|
3702
|
+
try {
|
|
3703
|
+
const { stdout } = await execFileAsync(cmd, args, opts);
|
|
3704
|
+
return stdout.trim() || void 0;
|
|
3705
|
+
} catch {
|
|
3706
|
+
return void 0;
|
|
3707
|
+
}
|
|
3708
|
+
};
|
|
3709
|
+
const [url, sha, branch] = await Promise.all([
|
|
3710
|
+
run("git", ["remote", "get-url", "origin"]),
|
|
3711
|
+
run("git", ["rev-parse", "HEAD"]),
|
|
3712
|
+
run("git", ["rev-parse", "--abbrev-ref", "HEAD"])
|
|
3713
|
+
]);
|
|
3714
|
+
return { url, sha, branch };
|
|
3715
|
+
}
|
|
3716
|
+
// ---------------------------------------------------------------------------
|
|
3717
|
+
// Prompt / tool extraction helpers
|
|
3718
|
+
// ---------------------------------------------------------------------------
|
|
3719
|
+
/**
|
|
3720
|
+
* Extract the user's goal (last user message) from the AI SDK prompt.
|
|
3721
|
+
*/
|
|
3722
|
+
extractGoalFromPrompt(prompt) {
|
|
3723
|
+
for (let i = prompt.length - 1; i >= 0; i--) {
|
|
3724
|
+
const message = prompt[i];
|
|
3725
|
+
if (message.role === "user") {
|
|
3726
|
+
const textParts = message.content.filter((part) => part.type === "text").map((part) => part.text);
|
|
3727
|
+
if (textParts.length > 0) {
|
|
3728
|
+
return textParts.join("\n");
|
|
3729
|
+
}
|
|
3730
|
+
}
|
|
3731
|
+
}
|
|
3732
|
+
return "";
|
|
3733
|
+
}
|
|
3734
|
+
/**
|
|
3735
|
+
* Convert AI SDK tools to DWS McpToolDefinition format.
|
|
3736
|
+
*/
|
|
3737
|
+
extractMcpTools(options) {
|
|
3738
|
+
if (this.workflowOptions.mcpTools && this.workflowOptions.mcpTools.length > 0) {
|
|
3739
|
+
return this.workflowOptions.mcpTools;
|
|
3740
|
+
}
|
|
3741
|
+
if (!options.tools || options.tools.length === 0) {
|
|
3742
|
+
return [];
|
|
3743
|
+
}
|
|
3744
|
+
return options.tools.filter((tool) => tool.type === "function").map((tool) => ({
|
|
3745
|
+
name: tool.name,
|
|
3746
|
+
description: tool.description || "",
|
|
3747
|
+
inputSchema: JSON.stringify(tool.inputSchema || { type: "object", properties: {} })
|
|
3748
|
+
}));
|
|
3749
|
+
}
|
|
3750
|
+
// ---------------------------------------------------------------------------
|
|
3751
|
+
// Payload size management
|
|
3752
|
+
// ---------------------------------------------------------------------------
|
|
3753
|
+
static MAX_START_REQUEST_BYTES = 4 * 1024 * 1024;
|
|
3754
|
+
/**
|
|
3755
|
+
* Trim mcpTools and additionalContext to fit within the DWS 4MB gRPC
|
|
3756
|
+
* message size limit (`MAX_MESSAGE_SIZE` in duo_workflow_service/server.py).
|
|
3757
|
+
*
|
|
3758
|
+
* DWS has no per-field limits on tool descriptions, schemas, or context items.
|
|
3759
|
+
* The only hard constraint is the total serialized message size.
|
|
3760
|
+
*
|
|
3761
|
+
* Strategy (progressive, only if over budget):
|
|
3762
|
+
* 1. Send everything as-is
|
|
3763
|
+
* 2. Simplify tool input schemas (strip descriptions from properties)
|
|
3764
|
+
* 3. Strip schemas to minimal form (type + property names only)
|
|
3765
|
+
* 4. Drop tools from the end until it fits
|
|
3766
|
+
*/
|
|
3767
|
+
trimPayload(mcpTools, additionalContext, basePayloadSize) {
|
|
3768
|
+
const budget = _GitLabWorkflowLanguageModel.MAX_START_REQUEST_BYTES - basePayloadSize;
|
|
3769
|
+
const contextJson = JSON.stringify(additionalContext);
|
|
3770
|
+
const toolsJson = JSON.stringify(mcpTools);
|
|
3771
|
+
const totalSize = toolsJson.length + contextJson.length;
|
|
3772
|
+
if (totalSize <= budget) {
|
|
3773
|
+
return { mcpTools, additionalContext };
|
|
3774
|
+
}
|
|
3775
|
+
const simplifiedTools = mcpTools.map((tool) => ({
|
|
3776
|
+
name: tool.name,
|
|
3777
|
+
description: tool.description,
|
|
3778
|
+
inputSchema: simplifySchema(tool.inputSchema)
|
|
3779
|
+
}));
|
|
3780
|
+
const simpSize = JSON.stringify(simplifiedTools).length + contextJson.length;
|
|
3781
|
+
if (simpSize <= budget) {
|
|
3782
|
+
return { mcpTools: simplifiedTools, additionalContext };
|
|
3783
|
+
}
|
|
3784
|
+
const minTools = simplifiedTools.map((tool) => ({
|
|
3785
|
+
name: tool.name,
|
|
3786
|
+
description: tool.description,
|
|
3787
|
+
inputSchema: minimalSchema(tool.inputSchema)
|
|
3788
|
+
}));
|
|
3789
|
+
const minSize = JSON.stringify(minTools).length + contextJson.length;
|
|
3790
|
+
if (minSize <= budget) {
|
|
3791
|
+
return { mcpTools: minTools, additionalContext };
|
|
3792
|
+
}
|
|
3793
|
+
const keptTools = [...minTools];
|
|
3794
|
+
while (keptTools.length > 0) {
|
|
3795
|
+
const currentSize = JSON.stringify(keptTools).length + contextJson.length;
|
|
3796
|
+
if (currentSize <= budget) break;
|
|
3797
|
+
keptTools.pop();
|
|
3798
|
+
}
|
|
3799
|
+
return { mcpTools: keptTools, additionalContext };
|
|
3800
|
+
}
|
|
3801
|
+
buildAdditionalContext(prompt) {
|
|
3802
|
+
const context = [];
|
|
3803
|
+
if (this.workflowOptions.additionalContext) {
|
|
3804
|
+
context.push(...this.workflowOptions.additionalContext);
|
|
3805
|
+
}
|
|
3806
|
+
for (const message of prompt) {
|
|
3807
|
+
if (message.role === "system") {
|
|
3808
|
+
context.push({
|
|
3809
|
+
category: "system_prompt",
|
|
3810
|
+
content: message.content,
|
|
3811
|
+
metadata: JSON.stringify({ role: "system" })
|
|
3812
|
+
});
|
|
3813
|
+
} else if (message.role === "assistant") {
|
|
3814
|
+
const textContent = message.content.filter((part) => part.type === "text").map((part) => part.text).join("\n");
|
|
3815
|
+
if (textContent) {
|
|
3816
|
+
context.push({
|
|
3817
|
+
category: "conversation",
|
|
3818
|
+
content: textContent,
|
|
3819
|
+
metadata: JSON.stringify({ role: "assistant" })
|
|
3820
|
+
});
|
|
3821
|
+
}
|
|
3822
|
+
}
|
|
3823
|
+
}
|
|
3824
|
+
return context;
|
|
3825
|
+
}
|
|
3826
|
+
};
|
|
3827
|
+
|
|
3828
|
+
// src/gitlab-oauth-types.ts
|
|
3829
|
+
var OPENCODE_GITLAB_AUTH_CLIENT_ID = "1d89f9fdb23ee96d4e603201f6861dab6e143c5c3c00469a018a2d94bdc03d4e";
|
|
3830
|
+
var BUNDLED_CLIENT_ID = "36f2a70cddeb5a0889d4fd8295c241b7e9848e89cf9e599d0eed2d8e5350fbf5";
|
|
3831
|
+
var GITLAB_COM_URL = "https://gitlab.com";
|
|
3832
|
+
var TOKEN_EXPIRY_SKEW_MS = 5 * 60 * 1e3;
|
|
3833
|
+
var OAUTH_SCOPES = ["api"];
|
|
3834
|
+
|
|
3835
|
+
// src/gitlab-oauth-manager.ts
|
|
3836
|
+
var GitLabOAuthManager = class {
|
|
3837
|
+
fetch;
|
|
3838
|
+
constructor(fetchImpl = fetch) {
|
|
3839
|
+
this.fetch = fetchImpl;
|
|
3840
|
+
}
|
|
3841
|
+
/**
|
|
3842
|
+
* Check if a token is expired
|
|
3843
|
+
*/
|
|
3844
|
+
isTokenExpired(expiresAt) {
|
|
3845
|
+
return Date.now() >= expiresAt;
|
|
3846
|
+
}
|
|
3847
|
+
/**
|
|
3848
|
+
* Check if a token needs refresh (within skew window)
|
|
3849
|
+
*/
|
|
3850
|
+
needsRefresh(expiresAt) {
|
|
3851
|
+
return Date.now() >= expiresAt - TOKEN_EXPIRY_SKEW_MS;
|
|
3852
|
+
}
|
|
3853
|
+
/**
|
|
3854
|
+
* Refresh tokens if needed
|
|
3855
|
+
* Returns the same tokens if refresh is not needed, or new tokens if refreshed
|
|
3856
|
+
*/
|
|
3857
|
+
async refreshIfNeeded(tokens, clientId) {
|
|
3858
|
+
if (!this.needsRefresh(tokens.expiresAt)) {
|
|
3859
|
+
return tokens;
|
|
3860
|
+
}
|
|
3861
|
+
if (this.isTokenExpired(tokens.expiresAt)) {
|
|
3862
|
+
throw new GitLabError({
|
|
3863
|
+
message: "OAuth token has expired and cannot be used"
|
|
3864
|
+
});
|
|
3865
|
+
}
|
|
3866
|
+
return this.exchangeRefreshToken({
|
|
3867
|
+
instanceUrl: tokens.instanceUrl,
|
|
3868
|
+
refreshToken: tokens.refreshToken,
|
|
3869
|
+
clientId
|
|
3870
|
+
});
|
|
3871
|
+
}
|
|
3872
|
+
/**
|
|
3873
|
+
* Exchange authorization code for tokens
|
|
3874
|
+
* Based on gitlab-vscode-extension createOAuthAccountFromCode
|
|
3875
|
+
*/
|
|
3876
|
+
async exchangeAuthorizationCode(params) {
|
|
3877
|
+
const { instanceUrl, code, codeVerifier, clientId, redirectUri } = params;
|
|
3878
|
+
const tokenResponse = await this.exchangeToken({
|
|
3879
|
+
instanceUrl,
|
|
3880
|
+
grantType: "authorization_code",
|
|
3881
|
+
code,
|
|
3882
|
+
codeVerifier,
|
|
3883
|
+
clientId: clientId || this.getClientId(instanceUrl),
|
|
3884
|
+
redirectUri
|
|
3885
|
+
});
|
|
3886
|
+
return this.createTokensFromResponse(tokenResponse, instanceUrl);
|
|
3887
|
+
}
|
|
3888
|
+
/**
|
|
3889
|
+
* Exchange refresh token for new tokens
|
|
3890
|
+
* Based on gitlab-vscode-extension TokenExchangeService
|
|
3891
|
+
*/
|
|
3892
|
+
async exchangeRefreshToken(params) {
|
|
3893
|
+
const { instanceUrl, refreshToken, clientId } = params;
|
|
3894
|
+
const tokenResponse = await this.exchangeToken({
|
|
3895
|
+
instanceUrl,
|
|
3896
|
+
grantType: "refresh_token",
|
|
3897
|
+
refreshToken,
|
|
3898
|
+
clientId: clientId || this.getClientId(instanceUrl)
|
|
3899
|
+
});
|
|
3900
|
+
return this.createTokensFromResponse(tokenResponse, instanceUrl);
|
|
3901
|
+
}
|
|
3902
|
+
/**
|
|
3903
|
+
* Get the OAuth client ID for an instance.
|
|
3904
|
+
* Priority: env var > opencode-gitlab-auth default (for GitLab.com).
|
|
3905
|
+
* Note: callers (e.g. exchangeRefreshToken) may pass an explicit clientId
|
|
3906
|
+
* that bypasses this method entirely.
|
|
3907
|
+
*/
|
|
3908
|
+
getClientId(instanceUrl) {
|
|
3909
|
+
const envClientId = process.env["GITLAB_OAUTH_CLIENT_ID"];
|
|
3910
|
+
if (envClientId) {
|
|
3911
|
+
return envClientId;
|
|
3912
|
+
}
|
|
3913
|
+
if (instanceUrl === GITLAB_COM_URL) {
|
|
3914
|
+
return OPENCODE_GITLAB_AUTH_CLIENT_ID;
|
|
3915
|
+
}
|
|
3916
|
+
throw new GitLabError({
|
|
3917
|
+
message: `No OAuth client ID configured for instance ${instanceUrl}. Please provide a clientId parameter or set GITLAB_OAUTH_CLIENT_ID environment variable.`
|
|
3918
|
+
});
|
|
3919
|
+
}
|
|
3920
|
+
/**
|
|
3921
|
+
* Exchange token with GitLab OAuth endpoint
|
|
3922
|
+
* Based on gitlab-vscode-extension GitLabService.exchangeToken
|
|
3923
|
+
*/
|
|
3924
|
+
async exchangeToken(params) {
|
|
3925
|
+
const { instanceUrl, grantType, code, codeVerifier, refreshToken, clientId, redirectUri } = params;
|
|
3926
|
+
const body = {
|
|
3927
|
+
client_id: clientId,
|
|
3928
|
+
grant_type: grantType
|
|
3929
|
+
};
|
|
3930
|
+
if (grantType === "authorization_code") {
|
|
3931
|
+
if (!code || !codeVerifier || !redirectUri) {
|
|
3932
|
+
throw new GitLabError({
|
|
3933
|
+
message: "Authorization code, code verifier, and redirect URI are required for authorization_code grant"
|
|
3934
|
+
});
|
|
3935
|
+
}
|
|
3936
|
+
body.code = code;
|
|
3937
|
+
body.code_verifier = codeVerifier;
|
|
3938
|
+
body.redirect_uri = redirectUri;
|
|
3939
|
+
} else if (grantType === "refresh_token") {
|
|
3940
|
+
if (!refreshToken) {
|
|
3941
|
+
throw new GitLabError({
|
|
3942
|
+
message: "Refresh token is required for refresh_token grant"
|
|
3943
|
+
});
|
|
3944
|
+
}
|
|
3945
|
+
body.refresh_token = refreshToken;
|
|
3946
|
+
}
|
|
3947
|
+
const url = `${instanceUrl}/oauth/token`;
|
|
3948
|
+
try {
|
|
3949
|
+
const response = await this.fetch(url, {
|
|
3950
|
+
method: "POST",
|
|
3951
|
+
headers: {
|
|
3952
|
+
"Content-Type": "application/x-www-form-urlencoded"
|
|
3953
|
+
},
|
|
3954
|
+
body: new URLSearchParams(body).toString()
|
|
3955
|
+
});
|
|
3956
|
+
if (!response.ok) {
|
|
3957
|
+
const errorText = await response.text();
|
|
3958
|
+
throw new GitLabError({
|
|
3959
|
+
message: `OAuth token exchange failed: ${response.status} ${response.statusText}`,
|
|
3960
|
+
cause: new Error(errorText)
|
|
3961
|
+
});
|
|
3962
|
+
}
|
|
3963
|
+
const data = await response.json();
|
|
3964
|
+
return data;
|
|
3965
|
+
} catch (error) {
|
|
3966
|
+
if (error instanceof GitLabError) {
|
|
3967
|
+
throw error;
|
|
3968
|
+
}
|
|
3969
|
+
throw new GitLabError({
|
|
3970
|
+
message: `Failed to exchange OAuth token: ${error instanceof Error ? error.message : String(error)}`,
|
|
3971
|
+
cause: error instanceof Error ? error : void 0
|
|
3972
|
+
});
|
|
3973
|
+
}
|
|
3974
|
+
}
|
|
3975
|
+
/**
|
|
3976
|
+
* Create GitLabOAuthTokens from token response
|
|
3977
|
+
*/
|
|
3978
|
+
createTokensFromResponse(response, instanceUrl) {
|
|
3979
|
+
const expiresAt = this.createExpiresTimestamp(response);
|
|
3980
|
+
return {
|
|
3981
|
+
accessToken: response.access_token,
|
|
3982
|
+
refreshToken: response.refresh_token || "",
|
|
3983
|
+
expiresAt,
|
|
3984
|
+
instanceUrl
|
|
3985
|
+
};
|
|
3986
|
+
}
|
|
3987
|
+
/**
|
|
3988
|
+
* Create expiry timestamp from token response
|
|
3989
|
+
* Based on gitlab-vscode-extension createExpiresTimestamp
|
|
3990
|
+
*/
|
|
3991
|
+
createExpiresTimestamp(response) {
|
|
3992
|
+
const createdAt = response.created_at * 1e3;
|
|
3993
|
+
const expiresIn = response.expires_in * 1e3;
|
|
3994
|
+
return createdAt + expiresIn;
|
|
3995
|
+
}
|
|
3996
|
+
};
|
|
3997
|
+
|
|
3998
|
+
// src/gitlab-provider.ts
|
|
3999
|
+
var fs2 = __toESM(require("fs"));
|
|
4000
|
+
var path3 = __toESM(require("path"));
|
|
4001
|
+
var os2 = __toESM(require("os"));
|
|
4002
|
+
function getOpenCodeAuthPath() {
|
|
4003
|
+
const homeDir = os2.homedir();
|
|
4004
|
+
const xdgDataHome = process.env.XDG_DATA_HOME;
|
|
4005
|
+
if (xdgDataHome) {
|
|
4006
|
+
return path3.join(xdgDataHome, "opencode", "auth.json");
|
|
4007
|
+
}
|
|
4008
|
+
if (process.platform !== "win32") {
|
|
4009
|
+
return path3.join(homeDir, ".local", "share", "opencode", "auth.json");
|
|
4010
|
+
}
|
|
4011
|
+
return path3.join(homeDir, ".opencode", "auth.json");
|
|
4012
|
+
}
|
|
4013
|
+
async function loadOpenCodeAuth(instanceUrl) {
|
|
4014
|
+
try {
|
|
4015
|
+
const authPath = getOpenCodeAuthPath();
|
|
4016
|
+
if (!fs2.existsSync(authPath)) {
|
|
4017
|
+
return void 0;
|
|
4018
|
+
}
|
|
4019
|
+
const authData = JSON.parse(fs2.readFileSync(authPath, "utf-8"));
|
|
4020
|
+
if (authData.gitlab?.type === "oauth") {
|
|
4021
|
+
const gitlabAuth = authData.gitlab;
|
|
4022
|
+
if (gitlabAuth.enterpriseUrl === instanceUrl || gitlabAuth.enterpriseUrl === instanceUrl.replace(/\/$/, "")) {
|
|
4023
|
+
return gitlabAuth;
|
|
4024
|
+
}
|
|
4025
|
+
}
|
|
4026
|
+
const normalizedUrl = instanceUrl.replace(/\/$/, "");
|
|
4027
|
+
const auth = authData[normalizedUrl] || authData[`${normalizedUrl}/`];
|
|
4028
|
+
return auth;
|
|
4029
|
+
} catch (error) {
|
|
4030
|
+
throw new Error(`Failed to load auth.json: ${error instanceof Error ? error.message : error}`);
|
|
4031
|
+
}
|
|
4032
|
+
}
|
|
4033
|
+
async function loadApiKey(options, instanceUrl, clientId) {
|
|
4034
|
+
if (options.apiKey) {
|
|
4035
|
+
return options.apiKey;
|
|
4036
|
+
}
|
|
4037
|
+
const auth = await loadOpenCodeAuth(instanceUrl);
|
|
4038
|
+
if (auth?.type === "oauth") {
|
|
4039
|
+
const oauthManager = new GitLabOAuthManager();
|
|
4040
|
+
if (oauthManager.needsRefresh(auth.expires)) {
|
|
4041
|
+
try {
|
|
4042
|
+
const refreshed = await oauthManager.exchangeRefreshToken({
|
|
4043
|
+
instanceUrl,
|
|
4044
|
+
refreshToken: auth.refresh,
|
|
4045
|
+
clientId
|
|
4046
|
+
});
|
|
4047
|
+
const authPath = getOpenCodeAuthPath();
|
|
4048
|
+
const authData = JSON.parse(fs2.readFileSync(authPath, "utf-8"));
|
|
4049
|
+
authData.gitlab = {
|
|
4050
|
+
type: "oauth",
|
|
4051
|
+
refresh: refreshed.refreshToken,
|
|
4052
|
+
access: refreshed.accessToken,
|
|
4053
|
+
expires: refreshed.expiresAt,
|
|
4054
|
+
enterpriseUrl: instanceUrl
|
|
4055
|
+
// Use enterpriseUrl to match auth plugin format
|
|
4056
|
+
};
|
|
4057
|
+
fs2.writeFileSync(authPath, JSON.stringify(authData, null, 2), { mode: 384 });
|
|
4058
|
+
return refreshed.accessToken;
|
|
4059
|
+
} catch (error) {
|
|
4060
|
+
const refreshErrorMsg = error instanceof Error ? error.message : String(error);
|
|
4061
|
+
const envApiKey = process.env[options.environmentVariableName];
|
|
4062
|
+
if (envApiKey) {
|
|
4063
|
+
return envApiKey;
|
|
4064
|
+
}
|
|
4065
|
+
throw new GitLabError({
|
|
4066
|
+
message: `OAuth token refresh failed and no fallback ${options.environmentVariableName} environment variable is set. Refresh error: ${refreshErrorMsg}. Re-authenticate with 'opencode auth login gitlab' or set ${options.environmentVariableName}.`
|
|
4067
|
+
});
|
|
4068
|
+
}
|
|
4069
|
+
} else {
|
|
4070
|
+
return auth.access;
|
|
4071
|
+
}
|
|
4072
|
+
}
|
|
4073
|
+
const apiKey = process.env[options.environmentVariableName];
|
|
4074
|
+
if (!apiKey) {
|
|
4075
|
+
throw new GitLabError({
|
|
4076
|
+
message: `${options.description} API key is missing. Pass it as the 'apiKey' parameter, set the ${options.environmentVariableName} environment variable, or authenticate with 'opencode auth login gitlab'.`
|
|
4077
|
+
});
|
|
4078
|
+
}
|
|
4079
|
+
return apiKey;
|
|
4080
|
+
}
|
|
4081
|
+
function withUserAgentSuffix(headers, suffix) {
|
|
4082
|
+
const userAgent = headers["User-Agent"];
|
|
4083
|
+
return {
|
|
4084
|
+
...headers,
|
|
4085
|
+
"User-Agent": userAgent ? `${userAgent} ${suffix}` : suffix
|
|
4086
|
+
};
|
|
4087
|
+
}
|
|
4088
|
+
function createGitLab(options = {}) {
|
|
4089
|
+
const instanceUrl = options.instanceUrl ?? process.env["GITLAB_INSTANCE_URL"] ?? "https://gitlab.com";
|
|
4090
|
+
const providerName = options.name ?? "gitlab";
|
|
4091
|
+
let cachedApiKey;
|
|
4092
|
+
let apiKeyPromise;
|
|
4093
|
+
const getApiKey = async () => {
|
|
4094
|
+
if (cachedApiKey) {
|
|
4095
|
+
return cachedApiKey;
|
|
4096
|
+
}
|
|
4097
|
+
if (apiKeyPromise) {
|
|
4098
|
+
return apiKeyPromise;
|
|
4099
|
+
}
|
|
4100
|
+
apiKeyPromise = loadApiKey(
|
|
4101
|
+
{
|
|
4102
|
+
apiKey: options.apiKey,
|
|
4103
|
+
environmentVariableName: "GITLAB_TOKEN",
|
|
4104
|
+
description: "GitLab"
|
|
4105
|
+
},
|
|
4106
|
+
instanceUrl,
|
|
4107
|
+
options.clientId
|
|
4108
|
+
);
|
|
4109
|
+
cachedApiKey = await apiKeyPromise;
|
|
4110
|
+
apiKeyPromise = void 0;
|
|
4111
|
+
return cachedApiKey;
|
|
4112
|
+
};
|
|
4113
|
+
const refreshApiKey = async () => {
|
|
4114
|
+
cachedApiKey = void 0;
|
|
4115
|
+
apiKeyPromise = void 0;
|
|
4116
|
+
cachedApiKey = await loadApiKey(
|
|
4117
|
+
{
|
|
4118
|
+
apiKey: void 0,
|
|
4119
|
+
// Bypass stale options.apiKey to force auth.json read
|
|
4120
|
+
environmentVariableName: "GITLAB_TOKEN",
|
|
4121
|
+
description: "GitLab"
|
|
4122
|
+
},
|
|
4123
|
+
instanceUrl,
|
|
4124
|
+
options.clientId
|
|
4125
|
+
);
|
|
4126
|
+
};
|
|
4127
|
+
const getHeaders = () => {
|
|
4128
|
+
const apiKey = cachedApiKey || options.apiKey || process.env["GITLAB_TOKEN"] || "";
|
|
4129
|
+
if (!apiKey) {
|
|
4130
|
+
throw new GitLabError({
|
|
4131
|
+
message: "GitLab API key is missing. Pass it as the 'apiKey' parameter, set the GITLAB_TOKEN environment variable, or authenticate with 'opencode auth login gitlab'."
|
|
4132
|
+
});
|
|
4133
|
+
}
|
|
4134
|
+
return withUserAgentSuffix(
|
|
4135
|
+
{
|
|
4136
|
+
Authorization: `Bearer ${apiKey}`,
|
|
4137
|
+
"Content-Type": "application/json",
|
|
4138
|
+
...options.headers
|
|
4139
|
+
},
|
|
4140
|
+
`ai-sdk-gitlab/${VERSION}`
|
|
4141
|
+
);
|
|
4142
|
+
};
|
|
4143
|
+
getApiKey().catch(() => {
|
|
4144
|
+
});
|
|
4145
|
+
const createAgenticChatModel = (modelId, agenticOptions) => {
|
|
4146
|
+
const mapping = getModelMapping(modelId);
|
|
4147
|
+
if (!mapping) {
|
|
4148
|
+
throw new GitLabError({
|
|
4149
|
+
message: `Unknown model ID: ${modelId}. Model must be registered in MODEL_MAPPINGS.`
|
|
4150
|
+
});
|
|
4151
|
+
}
|
|
4152
|
+
if (agenticOptions?.providerModel) {
|
|
4153
|
+
const validModels = getValidModelsForProvider(mapping.provider);
|
|
4154
|
+
if (!validModels.includes(agenticOptions.providerModel)) {
|
|
4155
|
+
throw new GitLabError({
|
|
4156
|
+
message: `Invalid providerModel '${agenticOptions.providerModel}' for provider '${mapping.provider}'. Valid models: ${validModels.join(", ")}`
|
|
4157
|
+
});
|
|
4158
|
+
}
|
|
4159
|
+
}
|
|
4160
|
+
const featureFlags = {
|
|
4161
|
+
DuoAgentPlatformNext: true,
|
|
4162
|
+
...options.featureFlags,
|
|
4163
|
+
...agenticOptions?.featureFlags
|
|
4164
|
+
};
|
|
4165
|
+
const defaultAiGatewayHeaders = {
|
|
4166
|
+
"User-Agent": `gitlab-ai-provider/${VERSION}`
|
|
4167
|
+
};
|
|
4168
|
+
const aiGatewayHeaders = {
|
|
4169
|
+
...defaultAiGatewayHeaders,
|
|
4170
|
+
...options.aiGatewayHeaders,
|
|
4171
|
+
...agenticOptions?.aiGatewayHeaders
|
|
4172
|
+
};
|
|
4173
|
+
const baseConfig = {
|
|
4174
|
+
provider: `${providerName}.agentic`,
|
|
4175
|
+
instanceUrl,
|
|
4176
|
+
getHeaders,
|
|
4177
|
+
refreshApiKey,
|
|
4178
|
+
fetch: options.fetch,
|
|
4179
|
+
maxTokens: agenticOptions?.maxTokens,
|
|
4180
|
+
featureFlags,
|
|
4181
|
+
aiGatewayUrl: options.aiGatewayUrl,
|
|
4182
|
+
aiGatewayHeaders
|
|
4183
|
+
};
|
|
4184
|
+
if (mapping.provider === "openai") {
|
|
4185
|
+
return new GitLabOpenAILanguageModel(modelId, {
|
|
4186
|
+
...baseConfig,
|
|
4187
|
+
openaiModel: agenticOptions?.providerModel ?? mapping.model
|
|
4188
|
+
});
|
|
4189
|
+
}
|
|
4190
|
+
return new GitLabAnthropicLanguageModel(modelId, {
|
|
4191
|
+
...baseConfig,
|
|
4192
|
+
anthropicModel: agenticOptions?.providerModel ?? mapping.model
|
|
4193
|
+
});
|
|
4194
|
+
};
|
|
4195
|
+
const createWorkflowChatModel = (modelId, workflowOptions) => {
|
|
4196
|
+
const mapping = getModelMapping(modelId);
|
|
4197
|
+
if (!mapping || mapping.provider !== "workflow") {
|
|
4198
|
+
throw new GitLabError({
|
|
4199
|
+
message: `Unknown workflow model ID: ${modelId}. Use 'duo-workflow' or a 'duo-workflow-*' model ID.`
|
|
4200
|
+
});
|
|
4201
|
+
}
|
|
4202
|
+
return new GitLabWorkflowLanguageModel(
|
|
4203
|
+
modelId,
|
|
4204
|
+
{
|
|
4205
|
+
provider: `${providerName}.workflow`,
|
|
4206
|
+
instanceUrl,
|
|
4207
|
+
getHeaders,
|
|
4208
|
+
refreshApiKey,
|
|
4209
|
+
fetch: options.fetch,
|
|
4210
|
+
featureFlags: {
|
|
4211
|
+
...options.featureFlags,
|
|
4212
|
+
...workflowOptions?.featureFlags
|
|
4213
|
+
},
|
|
4214
|
+
aiGatewayUrl: options.aiGatewayUrl
|
|
4215
|
+
},
|
|
4216
|
+
workflowOptions
|
|
4217
|
+
);
|
|
4218
|
+
};
|
|
4219
|
+
const createDefaultModel = (modelId) => {
|
|
4220
|
+
if (isWorkflowModel(modelId)) {
|
|
4221
|
+
return createWorkflowChatModel(modelId);
|
|
4222
|
+
}
|
|
4223
|
+
return createAgenticChatModel(modelId);
|
|
4224
|
+
};
|
|
4225
|
+
const provider = Object.assign((modelId) => createDefaultModel(modelId), {
|
|
4226
|
+
specificationVersion: "v2",
|
|
4227
|
+
languageModel: createDefaultModel,
|
|
4228
|
+
chat: createDefaultModel,
|
|
4229
|
+
agenticChat: createAgenticChatModel,
|
|
4230
|
+
workflowChat: createWorkflowChatModel
|
|
4231
|
+
});
|
|
4232
|
+
provider.textEmbeddingModel = (modelId) => {
|
|
4233
|
+
throw new GitLabError({
|
|
4234
|
+
message: `GitLab provider does not support text embedding models. Model ID: ${modelId}`
|
|
4235
|
+
});
|
|
4236
|
+
};
|
|
4237
|
+
provider.imageModel = (modelId) => {
|
|
4238
|
+
throw new GitLabError({
|
|
4239
|
+
message: `GitLab provider does not support image models. Model ID: ${modelId}`
|
|
4240
|
+
});
|
|
4241
|
+
};
|
|
4242
|
+
return provider;
|
|
4243
|
+
}
|
|
4244
|
+
var gitlab = createGitLab();
|
|
4245
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
4246
|
+
0 && (module.exports = {
|
|
4247
|
+
AGENT_PRIVILEGES,
|
|
4248
|
+
BUNDLED_CLIENT_ID,
|
|
4249
|
+
CLIENT_VERSION,
|
|
4250
|
+
DEFAULT_AGENT_PRIVILEGES,
|
|
4251
|
+
DEFAULT_AI_GATEWAY_URL,
|
|
4252
|
+
DEFAULT_CLIENT_CAPABILITIES,
|
|
4253
|
+
DEFAULT_WORKFLOW_DEFINITION,
|
|
4254
|
+
GITLAB_COM_URL,
|
|
4255
|
+
GitLabAnthropicLanguageModel,
|
|
4256
|
+
GitLabDirectAccessClient,
|
|
4257
|
+
GitLabError,
|
|
4258
|
+
GitLabModelCache,
|
|
4259
|
+
GitLabModelDiscovery,
|
|
4260
|
+
GitLabOAuthManager,
|
|
4261
|
+
GitLabOpenAILanguageModel,
|
|
4262
|
+
GitLabProjectCache,
|
|
4263
|
+
GitLabProjectDetector,
|
|
4264
|
+
GitLabWorkflowClient,
|
|
4265
|
+
GitLabWorkflowLanguageModel,
|
|
4266
|
+
GitLabWorkflowTokenClient,
|
|
4267
|
+
MODEL_ID_TO_ANTHROPIC_MODEL,
|
|
4268
|
+
MODEL_MAPPINGS,
|
|
4269
|
+
OAUTH_SCOPES,
|
|
4270
|
+
OPENCODE_GITLAB_AUTH_CLIENT_ID,
|
|
4271
|
+
TOKEN_EXPIRY_SKEW_MS,
|
|
4272
|
+
VERSION,
|
|
4273
|
+
WORKFLOW_ENVIRONMENT,
|
|
4274
|
+
WS_HEARTBEAT_INTERVAL_MS,
|
|
4275
|
+
WS_KEEPALIVE_PING_INTERVAL_MS,
|
|
4276
|
+
WorkflowType,
|
|
4277
|
+
createGitLab,
|
|
4278
|
+
getAnthropicModelForModelId,
|
|
4279
|
+
getModelMapping,
|
|
4280
|
+
getOpenAIApiType,
|
|
4281
|
+
getOpenAIModelForModelId,
|
|
4282
|
+
getProviderForModelId,
|
|
4283
|
+
getValidModelsForProvider,
|
|
4284
|
+
getWorkflowModelRef,
|
|
4285
|
+
gitlab,
|
|
4286
|
+
isResponsesApiModel,
|
|
4287
|
+
isWorkflowModel
|
|
4288
|
+
});
|
|
4289
|
+
//# sourceMappingURL=index.js.map
|