gitlab-ai-provider 6.5.0 → 6.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +5 -0
- package/dist/gitlab-ai-provider-6.6.0.tgz +0 -0
- package/dist/index.d.mts +11 -1
- package/dist/index.d.ts +11 -1
- package/dist/index.js +29 -3
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +29 -3
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
- package/dist/gitlab-ai-provider-6.5.0.tgz +0 -0
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,11 @@
|
|
|
2
2
|
|
|
3
3
|
All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
|
|
4
4
|
|
|
5
|
+
## 6.6.0 (2026-04-17)
|
|
6
|
+
|
|
7
|
+
- Merge branch 'feat/anthropic-prompt-caching' into 'main' ([d2125eb](https://gitlab.com/vglafirov/gitlab-ai-provider/commit/d2125eb)), closes [#6](https://gitlab.com/vglafirov/gitlab-ai-provider/issues/6)
|
|
8
|
+
- feat(anthropic): enable prompt caching via cache_control on system prompt ([376556f](https://gitlab.com/vglafirov/gitlab-ai-provider/commit/376556f))
|
|
9
|
+
|
|
5
10
|
## 6.5.0 (2026-04-17)
|
|
6
11
|
|
|
7
12
|
- Merge branch 'add-opus-4-7-model-mapping' into 'main' ([f95e21a](https://gitlab.com/vglafirov/gitlab-ai-provider/commit/f95e21a))
|
|
Binary file
|
package/dist/index.d.mts
CHANGED
|
@@ -79,7 +79,17 @@ declare class GitLabAnthropicLanguageModel implements LanguageModelV3 {
|
|
|
79
79
|
*/
|
|
80
80
|
private convertToolChoice;
|
|
81
81
|
/**
|
|
82
|
-
* Convert AI SDK prompt to Anthropic messages format
|
|
82
|
+
* Convert AI SDK prompt to Anthropic messages format.
|
|
83
|
+
*
|
|
84
|
+
* Cache breakpoints (`cache_control: { type: "ephemeral" }`) are placed on:
|
|
85
|
+
* 1. The system prompt content block — static across all turns.
|
|
86
|
+
* 2. The last content block of the second-to-last message — the boundary
|
|
87
|
+
* between conversation history and the current turn.
|
|
88
|
+
*
|
|
89
|
+
* This lets Anthropic cache the system prompt and the accumulated
|
|
90
|
+
* conversation prefix, so each new turn only pays for the new content.
|
|
91
|
+
*
|
|
92
|
+
* @see https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
|
|
83
93
|
*/
|
|
84
94
|
private convertPrompt;
|
|
85
95
|
/**
|
package/dist/index.d.ts
CHANGED
|
@@ -79,7 +79,17 @@ declare class GitLabAnthropicLanguageModel implements LanguageModelV3 {
|
|
|
79
79
|
*/
|
|
80
80
|
private convertToolChoice;
|
|
81
81
|
/**
|
|
82
|
-
* Convert AI SDK prompt to Anthropic messages format
|
|
82
|
+
* Convert AI SDK prompt to Anthropic messages format.
|
|
83
|
+
*
|
|
84
|
+
* Cache breakpoints (`cache_control: { type: "ephemeral" }`) are placed on:
|
|
85
|
+
* 1. The system prompt content block — static across all turns.
|
|
86
|
+
* 2. The last content block of the second-to-last message — the boundary
|
|
87
|
+
* between conversation history and the current turn.
|
|
88
|
+
*
|
|
89
|
+
* This lets Anthropic cache the system prompt and the accumulated
|
|
90
|
+
* conversation prefix, so each new turn only pays for the new content.
|
|
91
|
+
*
|
|
92
|
+
* @see https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
|
|
83
93
|
*/
|
|
84
94
|
private convertPrompt;
|
|
85
95
|
/**
|
package/dist/index.js
CHANGED
|
@@ -353,7 +353,17 @@ var GitLabAnthropicLanguageModel = class {
|
|
|
353
353
|
}
|
|
354
354
|
}
|
|
355
355
|
/**
|
|
356
|
-
* Convert AI SDK prompt to Anthropic messages format
|
|
356
|
+
* Convert AI SDK prompt to Anthropic messages format.
|
|
357
|
+
*
|
|
358
|
+
* Cache breakpoints (`cache_control: { type: "ephemeral" }`) are placed on:
|
|
359
|
+
* 1. The system prompt content block — static across all turns.
|
|
360
|
+
* 2. The last content block of the second-to-last message — the boundary
|
|
361
|
+
* between conversation history and the current turn.
|
|
362
|
+
*
|
|
363
|
+
* This lets Anthropic cache the system prompt and the accumulated
|
|
364
|
+
* conversation prefix, so each new turn only pays for the new content.
|
|
365
|
+
*
|
|
366
|
+
* @see https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
|
|
357
367
|
*/
|
|
358
368
|
convertPrompt(prompt) {
|
|
359
369
|
let systemMessage;
|
|
@@ -425,7 +435,23 @@ ${message.content}` : message.content;
|
|
|
425
435
|
}
|
|
426
436
|
}
|
|
427
437
|
}
|
|
428
|
-
|
|
438
|
+
const system = systemMessage ? [
|
|
439
|
+
{
|
|
440
|
+
type: "text",
|
|
441
|
+
text: systemMessage,
|
|
442
|
+
cache_control: { type: "ephemeral" }
|
|
443
|
+
}
|
|
444
|
+
] : void 0;
|
|
445
|
+
if (messages.length >= 2) {
|
|
446
|
+
const penultimate = messages[messages.length - 2];
|
|
447
|
+
if (Array.isArray(penultimate.content)) {
|
|
448
|
+
const lastBlock = penultimate.content[penultimate.content.length - 1];
|
|
449
|
+
lastBlock.cache_control = {
|
|
450
|
+
type: "ephemeral"
|
|
451
|
+
};
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
return { system, messages };
|
|
429
455
|
}
|
|
430
456
|
/**
|
|
431
457
|
* Convert Anthropic finish reason to AI SDK format
|
|
@@ -1654,7 +1680,7 @@ var GitLabOpenAILanguageModel = class {
|
|
|
1654
1680
|
var import_isomorphic_ws = __toESM(require("isomorphic-ws"));
|
|
1655
1681
|
|
|
1656
1682
|
// src/version.ts
|
|
1657
|
-
var VERSION = true ? "6.
|
|
1683
|
+
var VERSION = true ? "6.5.0" : "0.0.0-dev";
|
|
1658
1684
|
|
|
1659
1685
|
// src/gitlab-workflow-types.ts
|
|
1660
1686
|
var WorkflowType = /* @__PURE__ */ ((WorkflowType2) => {
|