@gitlab/gitlab-ai-provider 3.4.0 → 3.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +5 -0
- package/dist/gitlab-gitlab-ai-provider-3.4.1.tgz +0 -0
- package/dist/index.d.mts +15 -0
- package/dist/index.d.ts +15 -0
- package/dist/index.js +111 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +111 -1
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
- package/dist/gitlab-gitlab-ai-provider-3.4.0.tgz +0 -0
package/dist/index.mjs
CHANGED
|
@@ -38,6 +38,17 @@ var GitLabError = class _GitLabError extends Error {
|
|
|
38
38
|
isServerError() {
|
|
39
39
|
return this.statusCode !== void 0 && this.statusCode >= 500;
|
|
40
40
|
}
|
|
41
|
+
/**
|
|
42
|
+
* Check if this error is a context overflow error (prompt too long).
|
|
43
|
+
* These errors occur when the conversation exceeds the model's token limit.
|
|
44
|
+
*/
|
|
45
|
+
isContextOverflowError() {
|
|
46
|
+
if (this.statusCode !== 400) {
|
|
47
|
+
return false;
|
|
48
|
+
}
|
|
49
|
+
const message = this.message?.toLowerCase() || "";
|
|
50
|
+
return message.includes("context overflow") || message.includes("prompt is too long") || message.includes("prompt too long") || message.includes("tokens") && message.includes("maximum");
|
|
51
|
+
}
|
|
41
52
|
};
|
|
42
53
|
|
|
43
54
|
// src/gitlab-direct-access.ts
|
|
@@ -208,6 +219,21 @@ var GitLabAnthropicLanguageModel = class {
|
|
|
208
219
|
}
|
|
209
220
|
return false;
|
|
210
221
|
}
|
|
222
|
+
/**
|
|
223
|
+
* Check if an error is a context overflow error (prompt too long)
|
|
224
|
+
* These should NOT trigger token refresh and should be reported to the user.
|
|
225
|
+
*/
|
|
226
|
+
isContextOverflowError(error) {
|
|
227
|
+
if (error instanceof Anthropic.APIError) {
|
|
228
|
+
if (error.status === 400) {
|
|
229
|
+
const message = error.message?.toLowerCase() || "";
|
|
230
|
+
if (message.includes("prompt is too long") || message.includes("prompt too long") || message.includes("tokens") && message.includes("maximum")) {
|
|
231
|
+
return true;
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
return false;
|
|
236
|
+
}
|
|
211
237
|
/**
|
|
212
238
|
* Convert AI SDK tools to Anthropic tool format
|
|
213
239
|
*/
|
|
@@ -385,6 +411,14 @@ var GitLabAnthropicLanguageModel = class {
|
|
|
385
411
|
warnings: []
|
|
386
412
|
};
|
|
387
413
|
} catch (error) {
|
|
414
|
+
if (this.isContextOverflowError(error)) {
|
|
415
|
+
const apiError = error;
|
|
416
|
+
throw new GitLabError({
|
|
417
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
418
|
+
statusCode: 400,
|
|
419
|
+
cause: error
|
|
420
|
+
});
|
|
421
|
+
}
|
|
388
422
|
if (!isRetry && this.isTokenError(error)) {
|
|
389
423
|
this.directAccessClient.invalidateToken();
|
|
390
424
|
return this.doGenerateWithRetry(options, true);
|
|
@@ -392,6 +426,7 @@ var GitLabAnthropicLanguageModel = class {
|
|
|
392
426
|
if (error instanceof Anthropic.APIError) {
|
|
393
427
|
throw new GitLabError({
|
|
394
428
|
message: `Anthropic API error: ${error.message}`,
|
|
429
|
+
statusCode: error.status,
|
|
395
430
|
cause: error
|
|
396
431
|
});
|
|
397
432
|
}
|
|
@@ -572,6 +607,19 @@ var GitLabAnthropicLanguageModel = class {
|
|
|
572
607
|
});
|
|
573
608
|
}
|
|
574
609
|
}
|
|
610
|
+
if (self.isContextOverflowError(error)) {
|
|
611
|
+
const apiError = error;
|
|
612
|
+
controller.enqueue({
|
|
613
|
+
type: "error",
|
|
614
|
+
error: new GitLabError({
|
|
615
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
616
|
+
statusCode: 400,
|
|
617
|
+
cause: error
|
|
618
|
+
})
|
|
619
|
+
});
|
|
620
|
+
controller.close();
|
|
621
|
+
return;
|
|
622
|
+
}
|
|
575
623
|
if (!isRetry && self.isTokenError(error)) {
|
|
576
624
|
self.directAccessClient.invalidateToken();
|
|
577
625
|
controller.enqueue({
|
|
@@ -589,6 +637,7 @@ var GitLabAnthropicLanguageModel = class {
|
|
|
589
637
|
type: "error",
|
|
590
638
|
error: new GitLabError({
|
|
591
639
|
message: `Anthropic API error: ${error.message}`,
|
|
640
|
+
statusCode: error.status,
|
|
592
641
|
cause: error
|
|
593
642
|
})
|
|
594
643
|
});
|
|
@@ -713,6 +762,21 @@ var GitLabOpenAILanguageModel = class {
|
|
|
713
762
|
}
|
|
714
763
|
return false;
|
|
715
764
|
}
|
|
765
|
+
/**
|
|
766
|
+
* Check if an error is a context overflow error (prompt too long)
|
|
767
|
+
* These should NOT trigger token refresh and should be reported to the user.
|
|
768
|
+
*/
|
|
769
|
+
isContextOverflowError(error) {
|
|
770
|
+
if (error instanceof OpenAI.APIError) {
|
|
771
|
+
if (error.status === 400) {
|
|
772
|
+
const message = error.message?.toLowerCase() || "";
|
|
773
|
+
if (message.includes("prompt is too long") || message.includes("prompt too long") || message.includes("tokens") && message.includes("maximum")) {
|
|
774
|
+
return true;
|
|
775
|
+
}
|
|
776
|
+
}
|
|
777
|
+
}
|
|
778
|
+
return false;
|
|
779
|
+
}
|
|
716
780
|
convertTools(tools) {
|
|
717
781
|
if (!tools || tools.length === 0) {
|
|
718
782
|
return void 0;
|
|
@@ -993,6 +1057,14 @@ var GitLabOpenAILanguageModel = class {
|
|
|
993
1057
|
warnings: []
|
|
994
1058
|
};
|
|
995
1059
|
} catch (error) {
|
|
1060
|
+
if (this.isContextOverflowError(error)) {
|
|
1061
|
+
const apiError = error;
|
|
1062
|
+
throw new GitLabError({
|
|
1063
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
1064
|
+
statusCode: 400,
|
|
1065
|
+
cause: error
|
|
1066
|
+
});
|
|
1067
|
+
}
|
|
996
1068
|
if (!isRetry && this.isTokenError(error)) {
|
|
997
1069
|
this.directAccessClient.invalidateToken();
|
|
998
1070
|
return this.doGenerateWithChatApi(options, true);
|
|
@@ -1000,6 +1072,7 @@ var GitLabOpenAILanguageModel = class {
|
|
|
1000
1072
|
if (error instanceof OpenAI.APIError) {
|
|
1001
1073
|
throw new GitLabError({
|
|
1002
1074
|
message: `OpenAI API error: ${error.message}`,
|
|
1075
|
+
statusCode: error.status,
|
|
1003
1076
|
cause: error
|
|
1004
1077
|
});
|
|
1005
1078
|
}
|
|
@@ -1055,6 +1128,14 @@ var GitLabOpenAILanguageModel = class {
|
|
|
1055
1128
|
warnings: []
|
|
1056
1129
|
};
|
|
1057
1130
|
} catch (error) {
|
|
1131
|
+
if (this.isContextOverflowError(error)) {
|
|
1132
|
+
const apiError = error;
|
|
1133
|
+
throw new GitLabError({
|
|
1134
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
1135
|
+
statusCode: 400,
|
|
1136
|
+
cause: error
|
|
1137
|
+
});
|
|
1138
|
+
}
|
|
1058
1139
|
if (!isRetry && this.isTokenError(error)) {
|
|
1059
1140
|
this.directAccessClient.invalidateToken();
|
|
1060
1141
|
return this.doGenerateWithResponsesApi(options, true);
|
|
@@ -1062,6 +1143,7 @@ var GitLabOpenAILanguageModel = class {
|
|
|
1062
1143
|
if (error instanceof OpenAI.APIError) {
|
|
1063
1144
|
throw new GitLabError({
|
|
1064
1145
|
message: `OpenAI API error: ${error.message}`,
|
|
1146
|
+
statusCode: error.status,
|
|
1065
1147
|
cause: error
|
|
1066
1148
|
});
|
|
1067
1149
|
}
|
|
@@ -1180,6 +1262,19 @@ var GitLabOpenAILanguageModel = class {
|
|
|
1180
1262
|
controller.enqueue({ type: "finish", finishReason, usage });
|
|
1181
1263
|
controller.close();
|
|
1182
1264
|
} catch (error) {
|
|
1265
|
+
if (self.isContextOverflowError(error)) {
|
|
1266
|
+
const apiError = error;
|
|
1267
|
+
controller.enqueue({
|
|
1268
|
+
type: "error",
|
|
1269
|
+
error: new GitLabError({
|
|
1270
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
1271
|
+
statusCode: 400,
|
|
1272
|
+
cause: error
|
|
1273
|
+
})
|
|
1274
|
+
});
|
|
1275
|
+
controller.close();
|
|
1276
|
+
return;
|
|
1277
|
+
}
|
|
1183
1278
|
if (!isRetry && self.isTokenError(error)) {
|
|
1184
1279
|
self.directAccessClient.invalidateToken();
|
|
1185
1280
|
controller.enqueue({
|
|
@@ -1194,6 +1289,7 @@ var GitLabOpenAILanguageModel = class {
|
|
|
1194
1289
|
type: "error",
|
|
1195
1290
|
error: new GitLabError({
|
|
1196
1291
|
message: `OpenAI API error: ${error.message}`,
|
|
1292
|
+
statusCode: error.status,
|
|
1197
1293
|
cause: error
|
|
1198
1294
|
})
|
|
1199
1295
|
});
|
|
@@ -1320,6 +1416,19 @@ var GitLabOpenAILanguageModel = class {
|
|
|
1320
1416
|
controller.enqueue({ type: "finish", finishReason, usage });
|
|
1321
1417
|
controller.close();
|
|
1322
1418
|
} catch (error) {
|
|
1419
|
+
if (self.isContextOverflowError(error)) {
|
|
1420
|
+
const apiError = error;
|
|
1421
|
+
controller.enqueue({
|
|
1422
|
+
type: "error",
|
|
1423
|
+
error: new GitLabError({
|
|
1424
|
+
message: `Context overflow: ${apiError.message}. Please start a new session or use /compact to reduce context.`,
|
|
1425
|
+
statusCode: 400,
|
|
1426
|
+
cause: error
|
|
1427
|
+
})
|
|
1428
|
+
});
|
|
1429
|
+
controller.close();
|
|
1430
|
+
return;
|
|
1431
|
+
}
|
|
1323
1432
|
if (!isRetry && self.isTokenError(error)) {
|
|
1324
1433
|
self.directAccessClient.invalidateToken();
|
|
1325
1434
|
controller.enqueue({
|
|
@@ -1334,6 +1443,7 @@ var GitLabOpenAILanguageModel = class {
|
|
|
1334
1443
|
type: "error",
|
|
1335
1444
|
error: new GitLabError({
|
|
1336
1445
|
message: `OpenAI API error: ${error.message}`,
|
|
1446
|
+
statusCode: error.status,
|
|
1337
1447
|
cause: error
|
|
1338
1448
|
})
|
|
1339
1449
|
});
|
|
@@ -1511,7 +1621,7 @@ var GitLabOAuthManager = class {
|
|
|
1511
1621
|
};
|
|
1512
1622
|
|
|
1513
1623
|
// src/version.ts
|
|
1514
|
-
var VERSION = true ? "3.
|
|
1624
|
+
var VERSION = true ? "3.4.0" : "0.0.0-dev";
|
|
1515
1625
|
|
|
1516
1626
|
// src/gitlab-provider.ts
|
|
1517
1627
|
import * as fs from "fs";
|