@output.ai/llm 0.2.11 → 0.2.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@output.ai/llm",
3
- "version": "0.2.11",
3
+ "version": "0.2.12",
4
4
  "description": "Framework abstraction to interact with LLM models",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/ai_sdk.js CHANGED
@@ -51,6 +51,9 @@ const aiSdkOptionsFromPrompt = prompt => {
51
51
 
52
52
  if ( prompt.config.maxTokens ) {
53
53
  options.maxOutputTokens = prompt.config.maxTokens;
54
+ } else if ( prompt.config.provider === 'anthropic' ) {
55
+ // Override Anthropic SDK's low 4096 default - Claude models support up to 64k output tokens
56
+ options.maxOutputTokens = 64000;
54
57
  }
55
58
 
56
59
  const tools = loadTools( prompt );
@@ -221,6 +221,7 @@ describe( 'ai_sdk', () => {
221
221
  expect( aiFns.generateText ).toHaveBeenCalledWith( {
222
222
  model: 'MODEL',
223
223
  messages: promptWithProviderOptions.messages,
224
+ maxOutputTokens: 64000,
224
225
  providerOptions: {
225
226
  thinking: {
226
227
  type: 'enabled',
@@ -339,6 +340,7 @@ describe( 'ai_sdk', () => {
339
340
  enum: [ 'A', 'B', 'C' ],
340
341
  model: 'MODEL',
341
342
  messages: promptWithMixedOptions.messages,
343
+ maxOutputTokens: 64000,
342
344
  providerOptions: {
343
345
  thinking: {
344
346
  type: 'enabled',