gitlab-ai-provider 5.3.3 → 6.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -242,7 +242,7 @@ var GitLabDirectAccessClient = class {
242
242
 
243
243
  // src/gitlab-anthropic-language-model.ts
244
244
  var GitLabAnthropicLanguageModel = class {
245
- specificationVersion = "v2";
245
+ specificationVersion = "v3";
246
246
  modelId;
247
247
  supportedUrls = {};
248
248
  config;
@@ -401,10 +401,14 @@ ${message.content}` : message.content;
401
401
  resultContent = part.output.value;
402
402
  } else if (part.output.type === "json") {
403
403
  resultContent = JSON.stringify(part.output.value);
404
+ } else if (part.output.type === "content") {
405
+ resultContent = JSON.stringify(part.output.value);
404
406
  } else if (part.output.type === "error-text") {
405
407
  resultContent = part.output.value;
406
408
  } else if (part.output.type === "error-json") {
407
409
  resultContent = JSON.stringify(part.output.value);
410
+ } else if (part.output.type === "execution-denied") {
411
+ resultContent = part.output.reason ?? "Tool execution denied.";
408
412
  } else {
409
413
  resultContent = JSON.stringify(part.output);
410
414
  }
@@ -427,18 +431,41 @@ ${message.content}` : message.content;
427
431
  * Convert Anthropic finish reason to AI SDK format
428
432
  */
429
433
  convertFinishReason(stopReason) {
430
- switch (stopReason) {
431
- case "end_turn":
432
- return "stop";
433
- case "stop_sequence":
434
- return "stop";
435
- case "max_tokens":
436
- return "length";
437
- case "tool_use":
438
- return "tool-calls";
439
- default:
440
- return "unknown";
441
- }
434
+ const unified = (() => {
435
+ switch (stopReason) {
436
+ case "end_turn":
437
+ return "stop";
438
+ case "stop_sequence":
439
+ return "stop";
440
+ case "max_tokens":
441
+ return "length";
442
+ case "tool_use":
443
+ return "tool-calls";
444
+ default:
445
+ return "other";
446
+ }
447
+ })();
448
+ return { unified, raw: stopReason ?? void 0 };
449
+ }
450
+ createUsage(params) {
451
+ const inputTotal = params?.inputTotal;
452
+ const outputTotal = params?.outputTotal;
453
+ const cacheRead = params?.cacheRead;
454
+ const cacheWrite = params?.cacheWrite;
455
+ return {
456
+ inputTokens: {
457
+ total: inputTotal,
458
+ noCache: inputTotal == null ? void 0 : Math.max(0, inputTotal - (cacheRead ?? 0) - (cacheWrite ?? 0)),
459
+ cacheRead,
460
+ cacheWrite
461
+ },
462
+ outputTokens: {
463
+ total: outputTotal,
464
+ text: outputTotal,
465
+ reasoning: params?.outputReasoning
466
+ },
467
+ raw: params?.raw
468
+ };
442
469
  }
443
470
  async doGenerate(options) {
444
471
  return this.doGenerateWithRetry(options, false);
@@ -478,11 +505,13 @@ ${message.content}` : message.content;
478
505
  });
479
506
  }
480
507
  }
481
- const usage = {
482
- inputTokens: response.usage.input_tokens,
483
- outputTokens: response.usage.output_tokens,
484
- totalTokens: response.usage.input_tokens + response.usage.output_tokens
485
- };
508
+ const rawUsage = response.usage;
509
+ const usage = this.createUsage({
510
+ inputTotal: response.usage.input_tokens,
511
+ outputTotal: response.usage.output_tokens,
512
+ cacheRead: rawUsage.cache_read_input_tokens,
513
+ cacheWrite: rawUsage.cache_creation_input_tokens
514
+ });
486
515
  return {
487
516
  content,
488
517
  finishReason: this.convertFinishReason(response.stop_reason),
@@ -538,12 +567,8 @@ ${message.content}` : message.content;
538
567
  const stream = new ReadableStream({
539
568
  start: async (controller) => {
540
569
  const contentBlocks = {};
541
- const usage = {
542
- inputTokens: 0,
543
- outputTokens: 0,
544
- totalTokens: 0
545
- };
546
- let finishReason = "unknown";
570
+ let usage = self.createUsage();
571
+ let finishReason = { unified: "other", raw: void 0 };
547
572
  try {
548
573
  const anthropicStream = client.messages.stream(requestBody, {
549
574
  signal: options.abortSignal
@@ -558,7 +583,14 @@ ${message.content}` : message.content;
558
583
  switch (event.type) {
559
584
  case "message_start":
560
585
  if (event.message.usage) {
561
- usage.inputTokens = event.message.usage.input_tokens;
586
+ usage = self.createUsage({
587
+ inputTotal: event.message.usage.input_tokens,
588
+ outputTotal: usage.outputTokens.total,
589
+ outputReasoning: usage.outputTokens.reasoning,
590
+ cacheRead: usage.inputTokens.cacheRead,
591
+ cacheWrite: usage.inputTokens.cacheWrite,
592
+ raw: usage.raw
593
+ });
562
594
  }
563
595
  controller.enqueue({
564
596
  type: "response-metadata",
@@ -630,8 +662,14 @@ ${message.content}` : message.content;
630
662
  }
631
663
  case "message_delta":
632
664
  if (event.usage) {
633
- usage.outputTokens = event.usage.output_tokens;
634
- usage.totalTokens = (usage.inputTokens || 0) + event.usage.output_tokens;
665
+ usage = self.createUsage({
666
+ inputTotal: usage.inputTokens.total,
667
+ outputTotal: event.usage.output_tokens,
668
+ outputReasoning: usage.outputTokens.reasoning,
669
+ cacheRead: usage.inputTokens.cacheRead,
670
+ cacheWrite: usage.inputTokens.cacheWrite,
671
+ raw: usage.raw
672
+ });
635
673
  }
636
674
  if (event.delta.stop_reason) {
637
675
  finishReason = self.convertFinishReason(event.delta.stop_reason);
@@ -831,7 +869,7 @@ var MODEL_ID_TO_ANTHROPIC_MODEL = Object.fromEntries(
831
869
 
832
870
  // src/gitlab-openai-language-model.ts
833
871
  var GitLabOpenAILanguageModel = class {
834
- specificationVersion = "v2";
872
+ specificationVersion = "v3";
835
873
  modelId;
836
874
  supportedUrls = {};
837
875
  config;
@@ -977,10 +1015,14 @@ var GitLabOpenAILanguageModel = class {
977
1015
  resultContent = part.output.value;
978
1016
  } else if (part.output.type === "json") {
979
1017
  resultContent = JSON.stringify(part.output.value);
1018
+ } else if (part.output.type === "content") {
1019
+ resultContent = JSON.stringify(part.output.value);
980
1020
  } else if (part.output.type === "error-text") {
981
1021
  resultContent = part.output.value;
982
1022
  } else if (part.output.type === "error-json") {
983
1023
  resultContent = JSON.stringify(part.output.value);
1024
+ } else if (part.output.type === "execution-denied") {
1025
+ resultContent = part.output.reason ?? "Tool execution denied.";
984
1026
  } else {
985
1027
  resultContent = JSON.stringify(part.output);
986
1028
  }
@@ -996,18 +1038,40 @@ var GitLabOpenAILanguageModel = class {
996
1038
  return messages;
997
1039
  }
998
1040
  convertFinishReason(finishReason) {
999
- switch (finishReason) {
1000
- case "stop":
1001
- return "stop";
1002
- case "length":
1003
- return "length";
1004
- case "tool_calls":
1005
- return "tool-calls";
1006
- case "content_filter":
1007
- return "content-filter";
1008
- default:
1009
- return "unknown";
1010
- }
1041
+ const unified = (() => {
1042
+ switch (finishReason) {
1043
+ case "stop":
1044
+ return "stop";
1045
+ case "length":
1046
+ return "length";
1047
+ case "tool_calls":
1048
+ return "tool-calls";
1049
+ case "content_filter":
1050
+ return "content-filter";
1051
+ default:
1052
+ return "other";
1053
+ }
1054
+ })();
1055
+ return { unified, raw: finishReason ?? void 0 };
1056
+ }
1057
+ createUsage(params) {
1058
+ const inputTotal = params?.inputTotal;
1059
+ const outputTotal = params?.outputTotal;
1060
+ const cacheRead = params?.cacheRead;
1061
+ return {
1062
+ inputTokens: {
1063
+ total: inputTotal,
1064
+ noCache: inputTotal == null ? void 0 : Math.max(0, inputTotal - (cacheRead ?? 0)),
1065
+ cacheRead,
1066
+ cacheWrite: void 0
1067
+ },
1068
+ outputTokens: {
1069
+ total: outputTotal,
1070
+ text: outputTotal,
1071
+ reasoning: params?.outputReasoning
1072
+ },
1073
+ raw: params?.raw
1074
+ };
1011
1075
  }
1012
1076
  /**
1013
1077
  * Convert tools to Responses API format
@@ -1024,7 +1088,7 @@ var GitLabOpenAILanguageModel = class {
1024
1088
  name: tool.name,
1025
1089
  description: tool.description || "",
1026
1090
  parameters: schema,
1027
- strict: false
1091
+ strict: tool.strict
1028
1092
  };
1029
1093
  });
1030
1094
  }
@@ -1075,10 +1139,14 @@ var GitLabOpenAILanguageModel = class {
1075
1139
  resultContent = part.output.value;
1076
1140
  } else if (part.output.type === "json") {
1077
1141
  resultContent = JSON.stringify(part.output.value);
1142
+ } else if (part.output.type === "content") {
1143
+ resultContent = JSON.stringify(part.output.value);
1078
1144
  } else if (part.output.type === "error-text") {
1079
1145
  resultContent = part.output.value;
1080
1146
  } else if (part.output.type === "error-json") {
1081
1147
  resultContent = JSON.stringify(part.output.value);
1148
+ } else if (part.output.type === "execution-denied") {
1149
+ resultContent = part.output.reason ?? "Tool execution denied.";
1082
1150
  } else {
1083
1151
  resultContent = JSON.stringify(part.output);
1084
1152
  }
@@ -1107,20 +1175,23 @@ var GitLabOpenAILanguageModel = class {
1107
1175
  */
1108
1176
  convertResponsesStatus(status, hasToolCalls = false) {
1109
1177
  if (hasToolCalls) {
1110
- return "tool-calls";
1111
- }
1112
- switch (status) {
1113
- case "completed":
1114
- return "stop";
1115
- case "incomplete":
1116
- return "length";
1117
- case "cancelled":
1118
- return "stop";
1119
- case "failed":
1120
- return "error";
1121
- default:
1122
- return "unknown";
1123
- }
1178
+ return { unified: "tool-calls", raw: status };
1179
+ }
1180
+ const unified = (() => {
1181
+ switch (status) {
1182
+ case "completed":
1183
+ return "stop";
1184
+ case "incomplete":
1185
+ return "length";
1186
+ case "cancelled":
1187
+ return "stop";
1188
+ case "failed":
1189
+ return "error";
1190
+ default:
1191
+ return "other";
1192
+ }
1193
+ })();
1194
+ return { unified, raw: status };
1124
1195
  }
1125
1196
  async doGenerate(options) {
1126
1197
  if (this.useResponsesApi) {
@@ -1163,11 +1234,12 @@ var GitLabOpenAILanguageModel = class {
1163
1234
  }
1164
1235
  }
1165
1236
  }
1166
- const usage = {
1167
- inputTokens: response.usage?.prompt_tokens || 0,
1168
- outputTokens: response.usage?.completion_tokens || 0,
1169
- totalTokens: response.usage?.total_tokens || 0
1170
- };
1237
+ const usage = this.createUsage({
1238
+ inputTotal: response.usage?.prompt_tokens,
1239
+ outputTotal: response.usage?.completion_tokens,
1240
+ cacheRead: response.usage?.prompt_tokens_details?.cached_tokens,
1241
+ raw: { total_tokens: response.usage?.total_tokens }
1242
+ });
1171
1243
  return {
1172
1244
  content,
1173
1245
  finishReason: this.convertFinishReason(choice?.finish_reason),
@@ -1234,11 +1306,13 @@ var GitLabOpenAILanguageModel = class {
1234
1306
  });
1235
1307
  }
1236
1308
  }
1237
- const usage = {
1238
- inputTokens: response.usage?.input_tokens || 0,
1239
- outputTokens: response.usage?.output_tokens || 0,
1240
- totalTokens: response.usage?.total_tokens || 0
1241
- };
1309
+ const usage = this.createUsage({
1310
+ inputTotal: response.usage?.input_tokens,
1311
+ outputTotal: response.usage?.output_tokens,
1312
+ outputReasoning: response.usage?.output_tokens_details?.reasoning_tokens,
1313
+ cacheRead: response.usage?.input_tokens_details?.cached_tokens,
1314
+ raw: { total_tokens: response.usage?.total_tokens }
1315
+ });
1242
1316
  return {
1243
1317
  content,
1244
1318
  finishReason: this.convertResponsesStatus(response.status, hasToolCalls),
@@ -1297,12 +1371,8 @@ var GitLabOpenAILanguageModel = class {
1297
1371
  const stream = new ReadableStream({
1298
1372
  start: async (controller) => {
1299
1373
  const toolCalls = {};
1300
- const usage = {
1301
- inputTokens: 0,
1302
- outputTokens: 0,
1303
- totalTokens: 0
1304
- };
1305
- let finishReason = "unknown";
1374
+ let usage = self.createUsage();
1375
+ let finishReason = { unified: "other", raw: void 0 };
1306
1376
  let textStarted = false;
1307
1377
  const textId = "text-0";
1308
1378
  try {
@@ -1360,9 +1430,12 @@ var GitLabOpenAILanguageModel = class {
1360
1430
  finishReason = self.convertFinishReason(choice.finish_reason);
1361
1431
  }
1362
1432
  if (chunk.usage) {
1363
- usage.inputTokens = chunk.usage.prompt_tokens || 0;
1364
- usage.outputTokens = chunk.usage.completion_tokens || 0;
1365
- usage.totalTokens = chunk.usage.total_tokens || 0;
1433
+ usage = self.createUsage({
1434
+ inputTotal: chunk.usage.prompt_tokens,
1435
+ outputTotal: chunk.usage.completion_tokens,
1436
+ cacheRead: chunk.usage.prompt_tokens_details?.cached_tokens,
1437
+ raw: { total_tokens: chunk.usage.total_tokens }
1438
+ });
1366
1439
  }
1367
1440
  }
1368
1441
  if (textStarted) {
@@ -1442,12 +1515,8 @@ var GitLabOpenAILanguageModel = class {
1442
1515
  const stream = new ReadableStream({
1443
1516
  start: async (controller) => {
1444
1517
  const toolCalls = {};
1445
- const usage = {
1446
- inputTokens: 0,
1447
- outputTokens: 0,
1448
- totalTokens: 0
1449
- };
1450
- let finishReason = "unknown";
1518
+ let usage = self.createUsage();
1519
+ let finishReason = { unified: "other", raw: void 0 };
1451
1520
  let textStarted = false;
1452
1521
  const textId = "text-0";
1453
1522
  try {
@@ -1509,9 +1578,13 @@ var GitLabOpenAILanguageModel = class {
1509
1578
  const hasToolCalls2 = Object.keys(toolCalls).length > 0;
1510
1579
  finishReason = self.convertResponsesStatus(event.response.status, hasToolCalls2);
1511
1580
  if (event.response.usage) {
1512
- usage.inputTokens = event.response.usage.input_tokens || 0;
1513
- usage.outputTokens = event.response.usage.output_tokens || 0;
1514
- usage.totalTokens = event.response.usage.total_tokens || 0;
1581
+ usage = self.createUsage({
1582
+ inputTotal: event.response.usage.input_tokens,
1583
+ outputTotal: event.response.usage.output_tokens,
1584
+ outputReasoning: event.response.usage.output_tokens_details?.reasoning_tokens,
1585
+ cacheRead: event.response.usage.input_tokens_details?.cached_tokens,
1586
+ raw: { total_tokens: event.response.usage.total_tokens }
1587
+ });
1515
1588
  }
1516
1589
  }
1517
1590
  }
@@ -1519,8 +1592,8 @@ var GitLabOpenAILanguageModel = class {
1519
1592
  controller.enqueue({ type: "text-end", id: textId });
1520
1593
  }
1521
1594
  const hasToolCalls = Object.keys(toolCalls).length > 0;
1522
- if (hasToolCalls && finishReason === "stop") {
1523
- finishReason = "tool-calls";
1595
+ if (hasToolCalls && finishReason.unified === "stop") {
1596
+ finishReason = { unified: "tool-calls", raw: finishReason.raw };
1524
1597
  }
1525
1598
  for (const tc of Object.values(toolCalls)) {
1526
1599
  controller.enqueue({ type: "tool-input-end", id: tc.callId });
@@ -1580,7 +1653,7 @@ var GitLabOpenAILanguageModel = class {
1580
1653
  var import_isomorphic_ws = __toESM(require("isomorphic-ws"));
1581
1654
 
1582
1655
  // src/version.ts
1583
- var VERSION = true ? "5.3.2" : "0.0.0-dev";
1656
+ var VERSION = true ? "6.0.0" : "0.0.0-dev";
1584
1657
 
1585
1658
  // src/gitlab-workflow-types.ts
1586
1659
  var WorkflowType = /* @__PURE__ */ ((WorkflowType2) => {
@@ -1806,7 +1879,9 @@ var GitLabWorkflowClient = class {
1806
1879
  });
1807
1880
  } else if (checkpoint.status === "STOPPED" || checkpoint.status === "CANCELLED") {
1808
1881
  this.emit({ type: "completed" });
1809
- } else if (checkpoint.status === "TOOL_CALL_APPROVAL_REQUIRED" || checkpoint.status === "PLAN_APPROVAL_REQUIRED") {
1882
+ } else if (checkpoint.status === "TOOL_CALL_APPROVAL_REQUIRED") {
1883
+ this.emit({ type: "approval-required", tools: this.extractApprovalTools(checkpoint) });
1884
+ } else if (checkpoint.status === "PLAN_APPROVAL_REQUIRED") {
1810
1885
  this.emit({ type: "completed" });
1811
1886
  }
1812
1887
  return;
@@ -1845,6 +1920,18 @@ var GitLabWorkflowClient = class {
1845
1920
  }
1846
1921
  }
1847
1922
  }
1923
+ extractApprovalTools(checkpoint) {
1924
+ if (!checkpoint.checkpoint) return [];
1925
+ let parsed;
1926
+ try {
1927
+ parsed = JSON.parse(checkpoint.checkpoint);
1928
+ } catch {
1929
+ return [];
1930
+ }
1931
+ return (parsed.channel_values?.ui_chat_log ?? []).filter(
1932
+ (e) => e.message_type === "request" && e.tool_info !== null
1933
+ ).map((e) => ({ name: e.tool_info.name, args: JSON.stringify(e.tool_info.args) }));
1934
+ }
1848
1935
  send(event) {
1849
1936
  if (this.socket?.readyState === import_isomorphic_ws.default.OPEN) {
1850
1937
  const json = JSON.stringify(event);
@@ -2944,7 +3031,7 @@ function minimalSchema(schemaStr) {
2944
3031
  }
2945
3032
  }
2946
3033
  var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
2947
- specificationVersion = "v2";
3034
+ specificationVersion = "v3";
2948
3035
  modelId;
2949
3036
  supportedUrls = {};
2950
3037
  config;
@@ -3016,6 +3103,23 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3016
3103
  * the AI SDK only surfaces usage via finish-step at stream end.
3017
3104
  */
3018
3105
  onUsageUpdate = null;
3106
+ /**
3107
+ * Tool names pre-approved for the current session.
3108
+ * Set by the host (e.g., opencode) and merged into preapproved_tools on each StartRequest.
3109
+ * Updated when the user chooses "always" in the approval prompt.
3110
+ */
3111
+ sessionPreapprovedTools = [];
3112
+ /**
3113
+ * Set the approval handler callback.
3114
+ * Called when DWS requires tool call approval. Host (e.g., opencode) wires this
3115
+ * to its permission system each stream call, similar to toolExecutor.
3116
+ */
3117
+ set approvalHandler(handler) {
3118
+ this.workflowOptions.approvalHandler = handler ?? void 0;
3119
+ }
3120
+ get approvalHandler() {
3121
+ return this.workflowOptions.approvalHandler ?? null;
3122
+ }
3019
3123
  /**
3020
3124
  * Optional callback invoked when multiple workflow models are available
3021
3125
  * and the user should pick one. Set per-stream by the host (e.g., OpenCode)
@@ -3245,16 +3349,34 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3245
3349
  get workflowId() {
3246
3350
  return this.currentWorkflowId;
3247
3351
  }
3352
+ createUsage(params) {
3353
+ return {
3354
+ inputTokens: {
3355
+ total: params?.inputTotal,
3356
+ noCache: params?.inputTotal,
3357
+ cacheRead: void 0,
3358
+ cacheWrite: void 0
3359
+ },
3360
+ outputTokens: {
3361
+ total: params?.outputTotal,
3362
+ text: params?.outputTotal,
3363
+ reasoning: void 0
3364
+ }
3365
+ };
3366
+ }
3367
+ createFinishReason(unified, raw) {
3368
+ return { unified, raw };
3369
+ }
3248
3370
  // ---------------------------------------------------------------------------
3249
- // LanguageModelV2 — doGenerate (non-streaming)
3371
+ // LanguageModelV3 — doGenerate (non-streaming)
3250
3372
  // ---------------------------------------------------------------------------
3251
3373
  async doGenerate(options) {
3252
3374
  const { stream } = await this.doStream(options);
3253
3375
  const reader = stream.getReader();
3254
3376
  const textParts = [];
3255
3377
  const toolCalls = [];
3256
- let finishReason = "unknown";
3257
- const usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 };
3378
+ let finishReason = { unified: "other", raw: void 0 };
3379
+ let usage = this.createUsage();
3258
3380
  try {
3259
3381
  while (true) {
3260
3382
  const { done, value } = await reader.read();
@@ -3274,9 +3396,7 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3274
3396
  case "finish":
3275
3397
  finishReason = value.finishReason;
3276
3398
  if (value.usage) {
3277
- usage.inputTokens = value.usage.inputTokens ?? 0;
3278
- usage.outputTokens = value.usage.outputTokens ?? 0;
3279
- usage.totalTokens = value.usage.totalTokens ?? 0;
3399
+ usage = value.usage;
3280
3400
  }
3281
3401
  break;
3282
3402
  case "error":
@@ -3295,13 +3415,16 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3295
3415
  return { content, finishReason, usage, warnings: [] };
3296
3416
  }
3297
3417
  // ---------------------------------------------------------------------------
3298
- // LanguageModelV2 — doStream (streaming)
3418
+ // LanguageModelV3 — doStream (streaming)
3299
3419
  // ---------------------------------------------------------------------------
3300
3420
  async doStream(options) {
3301
3421
  const goal = this.extractGoalFromPrompt(options.prompt);
3302
3422
  const modelRef = await this.resolveModelRef();
3303
3423
  const mcpTools = this.extractMcpTools(options);
3304
- const preapprovedTools = this.workflowOptions.preapprovedTools ?? mcpTools.map((t) => t.name);
3424
+ const preapprovedTools = [
3425
+ ...this.workflowOptions.preapprovedTools ?? mcpTools.map((t) => t.name),
3426
+ ...this.sessionPreapprovedTools
3427
+ ];
3305
3428
  const additionalContext = this.buildAdditionalContext(options.prompt);
3306
3429
  const toolExecutor = this.toolExecutor ?? null;
3307
3430
  const availableToolNames = new Set(options.tools?.map((t) => t.name) ?? []);
@@ -3317,7 +3440,8 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3317
3440
  workflowId = await this.tokenClient.createWorkflow(goal, {
3318
3441
  projectId,
3319
3442
  namespaceId: this.workflowOptions.namespaceId,
3320
- workflowDefinition: this.workflowOptions.workflowDefinition
3443
+ workflowDefinition: this.workflowOptions.workflowDefinition,
3444
+ agentPrivileges: this.workflowOptions.agentPrivileges
3321
3445
  });
3322
3446
  this.currentWorkflowId = workflowId;
3323
3447
  }
@@ -3329,11 +3453,13 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3329
3453
  streamedInputChars: 0,
3330
3454
  streamedOutputChars: 0,
3331
3455
  pendingToolCount: 0,
3456
+ approvalPending: false,
3332
3457
  deferredClose: null,
3333
3458
  activeTextBlockId: null,
3334
3459
  agentMessageEmitted: new Map(this.persistedAgentEmitted),
3335
3460
  currentAgentMessageId: "",
3336
- activeClient: wsClient
3461
+ activeClient: wsClient,
3462
+ processedRequestIDs: /* @__PURE__ */ new Set()
3337
3463
  };
3338
3464
  for (const msg of options.prompt) {
3339
3465
  if (msg.role === "system") {
@@ -3346,6 +3472,7 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3346
3472
  }
3347
3473
  }
3348
3474
  }
3475
+ let startReq;
3349
3476
  const stream = new ReadableStream({
3350
3477
  start: async (controller) => {
3351
3478
  try {
@@ -3366,7 +3493,8 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3366
3493
  wsClient,
3367
3494
  toolExecutor,
3368
3495
  () => `text-${textBlockCounter++}`,
3369
- availableToolNames
3496
+ availableToolNames,
3497
+ startReq
3370
3498
  );
3371
3499
  }
3372
3500
  );
@@ -3388,7 +3516,7 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3388
3516
  const trimmedPreapproved = preapprovedTools.filter(
3389
3517
  (name) => trimmed.mcpTools.some((t) => t.name === name)
3390
3518
  );
3391
- const startReq = {
3519
+ startReq = {
3392
3520
  workflowID: workflowId,
3393
3521
  clientVersion: CLIENT_VERSION,
3394
3522
  workflowDefinition: workflowDef,
@@ -3453,7 +3581,7 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3453
3581
  // ---------------------------------------------------------------------------
3454
3582
  // Event handling
3455
3583
  // ---------------------------------------------------------------------------
3456
- handleWorkflowEvent(ss, event, controller, wsClient, toolExecutor, nextTextId, availableToolNames) {
3584
+ handleWorkflowEvent(ss, event, controller, wsClient, toolExecutor, nextTextId, availableToolNames, startReq) {
3457
3585
  if (ss.streamClosed) {
3458
3586
  return;
3459
3587
  }
@@ -3464,6 +3592,8 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3464
3592
  }
3465
3593
  case "tool-request": {
3466
3594
  const { requestID, data } = event;
3595
+ if (ss.processedRequestIDs.has(requestID)) break;
3596
+ ss.processedRequestIDs.add(requestID);
3467
3597
  let parsedArgs;
3468
3598
  try {
3469
3599
  JSON.parse(data.args);
@@ -3510,6 +3640,8 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3510
3640
  break;
3511
3641
  }
3512
3642
  case "builtin-tool-request": {
3643
+ if (ss.processedRequestIDs.has(event.requestID)) break;
3644
+ ss.processedRequestIDs.add(event.requestID);
3513
3645
  const mapped = mapBuiltinTool(event.toolName, event.data, availableToolNames);
3514
3646
  const mappedArgs = JSON.stringify(mapped.args);
3515
3647
  if (ss.activeTextBlockId) {
@@ -3550,6 +3682,26 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3550
3682
  });
3551
3683
  break;
3552
3684
  }
3685
+ case "approval-required": {
3686
+ ss.approvalPending = true;
3687
+ this.approveAndResume(
3688
+ ss,
3689
+ event.tools,
3690
+ startReq,
3691
+ controller,
3692
+ toolExecutor,
3693
+ nextTextId,
3694
+ availableToolNames
3695
+ ).catch(() => {
3696
+ ss.approvalPending = false;
3697
+ if (ss.deferredClose) {
3698
+ const close = ss.deferredClose;
3699
+ ss.deferredClose = null;
3700
+ close();
3701
+ }
3702
+ });
3703
+ break;
3704
+ }
3553
3705
  case "completed": {
3554
3706
  if (ss.activeTextBlockId) {
3555
3707
  controller.enqueue({ type: "text-end", id: ss.activeTextBlockId });
@@ -3561,14 +3713,14 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3561
3713
  const outputTokens = Math.ceil(ss.streamedOutputChars / 4);
3562
3714
  controller.enqueue({
3563
3715
  type: "finish",
3564
- finishReason: "stop",
3565
- usage: { inputTokens, outputTokens, totalTokens: inputTokens + outputTokens }
3716
+ finishReason: this.createFinishReason("stop", "completed"),
3717
+ usage: this.createUsage({ inputTotal: inputTokens, outputTotal: outputTokens })
3566
3718
  });
3567
3719
  ss.streamClosed = true;
3568
3720
  controller.close();
3569
3721
  this.cleanupClient(ss);
3570
3722
  };
3571
- if (ss.pendingToolCount > 0) {
3723
+ if (ss.pendingToolCount > 0 || ss.approvalPending) {
3572
3724
  ss.deferredClose = doCompleteClose;
3573
3725
  } else {
3574
3726
  ss.deferredClose = null;
@@ -3619,15 +3771,15 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3619
3771
  const outTok = Math.ceil(ss.streamedOutputChars / 4);
3620
3772
  controller.enqueue({
3621
3773
  type: "finish",
3622
- finishReason: "stop",
3623
- usage: { inputTokens: inTok, outputTokens: outTok, totalTokens: inTok + outTok }
3774
+ finishReason: this.createFinishReason("stop", "closed"),
3775
+ usage: this.createUsage({ inputTotal: inTok, outputTotal: outTok })
3624
3776
  });
3625
3777
  ss.streamClosed = true;
3626
3778
  controller.close();
3627
3779
  this.cleanupClient(ss);
3628
3780
  }
3629
3781
  };
3630
- if (ss.pendingToolCount > 0) {
3782
+ if (ss.pendingToolCount > 0 || ss.approvalPending) {
3631
3783
  ss.deferredClose = doClose;
3632
3784
  } else {
3633
3785
  ss.deferredClose = null;
@@ -3737,8 +3889,7 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3737
3889
  toolCallId: requestID,
3738
3890
  toolName,
3739
3891
  result: errorText,
3740
- isError: true,
3741
- providerExecuted: true
3892
+ isError: true
3742
3893
  });
3743
3894
  } else {
3744
3895
  safeEnqueue({
@@ -3750,8 +3901,7 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3750
3901
  title: toolTitle,
3751
3902
  metadata: toolMetadata
3752
3903
  },
3753
- isError: false,
3754
- providerExecuted: true
3904
+ isError: false
3755
3905
  });
3756
3906
  }
3757
3907
  } else {
@@ -3762,8 +3912,7 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3762
3912
  toolCallId: requestID,
3763
3913
  toolName,
3764
3914
  result: errorMsg,
3765
- isError: true,
3766
- providerExecuted: true
3915
+ isError: true
3767
3916
  });
3768
3917
  }
3769
3918
  } catch (error) {
@@ -3775,8 +3924,7 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3775
3924
  toolCallId: requestID,
3776
3925
  toolName,
3777
3926
  result: errorMsg,
3778
- isError: true,
3779
- providerExecuted: true
3927
+ isError: true
3780
3928
  });
3781
3929
  } finally {
3782
3930
  ss.pendingToolCount--;
@@ -3807,6 +3955,60 @@ var GitLabWorkflowLanguageModel = class _GitLabWorkflowLanguageModel {
3807
3955
  this.persistedAgentEmitted.clear();
3808
3956
  }
3809
3957
  }
3958
+ async approveAndResume(ss, tools, startReq, controller, toolExecutor, nextTextId, availableToolNames) {
3959
+ const handler = this.workflowOptions.approvalHandler;
3960
+ if (!handler || !startReq) {
3961
+ ss.approvalPending = false;
3962
+ if (ss.deferredClose) {
3963
+ const close = ss.deferredClose;
3964
+ ss.deferredClose = null;
3965
+ close();
3966
+ }
3967
+ return;
3968
+ }
3969
+ let decision;
3970
+ try {
3971
+ decision = await handler(tools);
3972
+ } catch (err) {
3973
+ ss.approvalPending = false;
3974
+ if (!ss.streamClosed) controller.error(err);
3975
+ return;
3976
+ }
3977
+ ss.approvalPending = false;
3978
+ this.cleanupClient(ss, false);
3979
+ const approval = decision.approved ? { approval: { tool_name: tools[0]?.name, tool_args_json: tools[0]?.args } } : { rejection: { message: decision.message ?? "User rejected" } };
3980
+ const newStartReq = { ...startReq, approval };
3981
+ const newClient = new GitLabWorkflowClient();
3982
+ this.activeClients.add(newClient);
3983
+ ss.activeClient = newClient;
3984
+ const modelRef = await this.resolveModelRef();
3985
+ try {
3986
+ await newClient.connect(
3987
+ {
3988
+ instanceUrl: this.config.instanceUrl,
3989
+ modelRef,
3990
+ headers: this.config.getHeaders(),
3991
+ projectId: this.workflowOptions.projectId,
3992
+ namespaceId: this.workflowOptions.namespaceId,
3993
+ rootNamespaceId: this.workflowOptions.rootNamespaceId
3994
+ },
3995
+ (event) => this.handleWorkflowEvent(
3996
+ ss,
3997
+ event,
3998
+ controller,
3999
+ newClient,
4000
+ toolExecutor,
4001
+ nextTextId,
4002
+ availableToolNames,
4003
+ newStartReq
4004
+ )
4005
+ );
4006
+ newClient.sendStartRequest(newStartReq);
4007
+ } catch (err) {
4008
+ this.cleanupClient(ss, true);
4009
+ if (!ss.streamClosed) controller.error(err);
4010
+ }
4011
+ }
3810
4012
  // ---------------------------------------------------------------------------
3811
4013
  // Workflow metadata
3812
4014
  // ---------------------------------------------------------------------------
@@ -4375,13 +4577,13 @@ function createGitLab(options = {}) {
4375
4577
  return createAgenticChatModel(modelId);
4376
4578
  };
4377
4579
  const provider = Object.assign((modelId) => createDefaultModel(modelId), {
4378
- specificationVersion: "v2",
4580
+ specificationVersion: "v3",
4379
4581
  languageModel: createDefaultModel,
4380
4582
  chat: createDefaultModel,
4381
4583
  agenticChat: createAgenticChatModel,
4382
4584
  workflowChat: createWorkflowChatModel
4383
4585
  });
4384
- provider.textEmbeddingModel = (modelId) => {
4586
+ provider.embeddingModel = (modelId) => {
4385
4587
  throw new GitLabError({
4386
4588
  message: `GitLab provider does not support text embedding models. Model ID: ${modelId}`
4387
4589
  });