@fairyhunter13/ai-anthropic 3.0.76-fork.1 → 3.0.76-fork.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +4 -5
- package/docs/05-anthropic.mdx +7 -6
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
|
@@ -747,7 +747,6 @@
|
|
|
747
747
|
- 2231e84: fix(anthropic): implement temperature/topP mutual exclusivity
|
|
748
748
|
|
|
749
749
|
Resolves the Anthropic API breaking change where sampling parameters must use only `temperature` OR `top_p`, not both. When both parameters are provided:
|
|
750
|
-
|
|
751
750
|
- Temperature takes priority and topP is ignored
|
|
752
751
|
- A warning is added to inform users: "topP is not supported when temperature is set. topP is ignored."
|
|
753
752
|
- The validation only runs when thinking mode is not enabled (thinking mode has its own parameter validation)
|
|
@@ -813,13 +812,13 @@
|
|
|
813
812
|
Before
|
|
814
813
|
|
|
815
814
|
```ts
|
|
816
|
-
model.textEmbeddingModel(
|
|
815
|
+
model.textEmbeddingModel('my-model-id');
|
|
817
816
|
```
|
|
818
817
|
|
|
819
818
|
After
|
|
820
819
|
|
|
821
820
|
```ts
|
|
822
|
-
model.embeddingModel(
|
|
821
|
+
model.embeddingModel('my-model-id');
|
|
823
822
|
```
|
|
824
823
|
|
|
825
824
|
- f33a018: chore: add model ID for Haiku 4.5
|
|
@@ -1159,13 +1158,13 @@
|
|
|
1159
1158
|
Before
|
|
1160
1159
|
|
|
1161
1160
|
```ts
|
|
1162
|
-
model.textEmbeddingModel(
|
|
1161
|
+
model.textEmbeddingModel('my-model-id');
|
|
1163
1162
|
```
|
|
1164
1163
|
|
|
1165
1164
|
After
|
|
1166
1165
|
|
|
1167
1166
|
```ts
|
|
1168
|
-
model.embeddingModel(
|
|
1167
|
+
model.embeddingModel('my-model-id');
|
|
1169
1168
|
```
|
|
1170
1169
|
|
|
1171
1170
|
- Updated dependencies [8d9e8ad]
|
package/docs/05-anthropic.mdx
CHANGED
|
@@ -149,7 +149,6 @@ The following optional provider options are available for Anthropic models:
|
|
|
149
149
|
- `structuredOutputMode` _"outputFormat" | "jsonTool" | "auto"_
|
|
150
150
|
|
|
151
151
|
Determines how structured outputs are generated. Optional.
|
|
152
|
-
|
|
153
152
|
- `"outputFormat"`: Use the `output_format` parameter to specify the structured output format.
|
|
154
153
|
- `"jsonTool"`: Use a special `"json"` tool to specify the structured output format.
|
|
155
154
|
- `"auto"`: Use `"outputFormat"` when supported, otherwise fall back to `"jsonTool"` (default).
|
|
@@ -157,7 +156,6 @@ The following optional provider options are available for Anthropic models:
|
|
|
157
156
|
- `metadata` _object_
|
|
158
157
|
|
|
159
158
|
Optional. Metadata to include with the request. See the [Anthropic API documentation](https://platform.claude.com/docs/en/api/messages/create) for details.
|
|
160
|
-
|
|
161
159
|
- `userId` _string_ - An external identifier for the end-user. Should be a UUID, hash, or other opaque identifier. Must not contain PII.
|
|
162
160
|
|
|
163
161
|
### Structured Outputs and Tool Input Streaming
|
|
@@ -356,9 +354,9 @@ console.log(text);
|
|
|
356
354
|
```
|
|
357
355
|
|
|
358
356
|
<Note>
|
|
359
|
-
If you stream reasoning to users with `claude-opus-4-7`, the default
|
|
360
|
-
cause a long pause before output begins. Set
|
|
361
|
-
progress during thinking.
|
|
357
|
+
If you stream reasoning to users with `claude-opus-4-7`, the default
|
|
358
|
+
`"omitted"` display will cause a long pause before output begins. Set
|
|
359
|
+
`display: "summarized"` to restore visible progress during thinking.
|
|
362
360
|
</Note>
|
|
363
361
|
|
|
364
362
|
#### Budget-Based Thinking
|
|
@@ -616,7 +614,10 @@ const result = await generateText({
|
|
|
616
614
|
});
|
|
617
615
|
|
|
618
616
|
console.log(result.text);
|
|
619
|
-
console.log(
|
|
617
|
+
console.log(
|
|
618
|
+
'Cache read tokens:',
|
|
619
|
+
result.usage.inputTokenDetails.cacheReadTokens,
|
|
620
|
+
);
|
|
620
621
|
console.log(
|
|
621
622
|
'Cache write tokens:',
|
|
622
623
|
result.usage.inputTokenDetails.cacheWriteTokens,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fairyhunter13/ai-anthropic",
|
|
3
|
-
"version": "3.0.76-fork.
|
|
3
|
+
"version": "3.0.76-fork.2",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"license": "Apache-2.0",
|
|
6
6
|
"sideEffects": false,
|
|
@@ -48,8 +48,8 @@
|
|
|
48
48
|
}
|
|
49
49
|
},
|
|
50
50
|
"dependencies": {
|
|
51
|
-
"@fairyhunter13/ai-provider": "3.0.10-fork.
|
|
52
|
-
"@fairyhunter13/ai-provider-utils": "4.0.27-fork.
|
|
51
|
+
"@fairyhunter13/ai-provider": "3.0.10-fork.16",
|
|
52
|
+
"@fairyhunter13/ai-provider-utils": "4.0.27-fork.2"
|
|
53
53
|
},
|
|
54
54
|
"devDependencies": {
|
|
55
55
|
"@ai-sdk/test-server": "workspace:*",
|