codeep 1.2.18 → 1.2.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -23,6 +23,7 @@ export interface ProviderConfig {
23
23
  }[];
24
24
  defaultModel: string;
25
25
  defaultProtocol: 'openai' | 'anthropic';
26
+ maxOutputTokens?: number;
26
27
  envKey?: string;
27
28
  subscribeUrl?: string;
28
29
  mcpEndpoints?: {
@@ -49,3 +50,8 @@ export declare function getProviderBaseUrl(providerId: string, protocol: 'openai
49
50
  export declare function getProviderAuthHeader(providerId: string, protocol: 'openai' | 'anthropic'): 'Bearer' | 'x-api-key';
50
51
  export declare function getProviderMcpEndpoints(providerId: string): ProviderConfig['mcpEndpoints'] | null;
51
52
  export declare function supportsNativeTools(providerId: string, protocol: 'openai' | 'anthropic'): boolean;
53
+ /**
54
+ * Returns the effective max output tokens for a provider, capped by the provider's limit.
55
+ * Falls back to the requested value if no provider limit is set.
56
+ */
57
+ export declare function getEffectiveMaxTokens(providerId: string, requested: number): number;
@@ -124,6 +124,7 @@ export const PROVIDERS = {
124
124
  ],
125
125
  defaultModel: 'deepseek-chat',
126
126
  defaultProtocol: 'openai',
127
+ maxOutputTokens: 8192, // DeepSeek API limit
127
128
  envKey: 'DEEPSEEK_API_KEY',
128
129
  subscribeUrl: 'https://platform.deepseek.com/sign_up',
129
130
  },
@@ -185,3 +186,13 @@ export function supportsNativeTools(providerId, protocol) {
185
186
  return false;
186
187
  return provider.protocols[protocol]?.supportsNativeTools ?? true; // Default to true
187
188
  }
189
+ /**
190
+ * Returns the effective max output tokens for a provider, capped by the provider's limit.
191
+ * Falls back to the requested value if no provider limit is set.
192
+ */
193
+ export function getEffectiveMaxTokens(providerId, requested) {
194
+ const provider = PROVIDERS[providerId];
195
+ if (!provider?.maxOutputTokens)
196
+ return requested;
197
+ return Math.min(requested, provider.maxOutputTokens);
198
+ }
@@ -14,7 +14,7 @@
14
14
  import { existsSync, readFileSync } from 'fs';
15
15
  import { join } from 'path';
16
16
  import { config, getApiKey } from '../config/index.js';
17
- import { getProviderBaseUrl, getProviderAuthHeader, supportsNativeTools } from '../config/providers.js';
17
+ import { getProviderBaseUrl, getProviderAuthHeader, supportsNativeTools, getEffectiveMaxTokens } from '../config/providers.js';
18
18
  import { recordTokenUsage, extractOpenAIUsage, extractAnthropicUsage } from './tokenTracker.js';
19
19
  import { parseOpenAIToolCalls, parseAnthropicToolCalls, parseToolCalls } from './toolParsing.js';
20
20
  import { formatToolDefinitions, getOpenAITools, getAnthropicTools } from './tools.js';
@@ -174,7 +174,7 @@ export async function agentChat(messages, systemPrompt, onChunk, abortSignal, dy
174
174
  body = {
175
175
  model, messages: [{ role: 'system', content: systemPrompt }, ...messages],
176
176
  tools: getOpenAITools(), tool_choice: 'auto', stream: useStreaming,
177
- temperature: config.get('temperature'), max_tokens: Math.max(config.get('maxTokens'), 16384),
177
+ temperature: config.get('temperature'), max_tokens: getEffectiveMaxTokens(providerId, Math.max(config.get('maxTokens'), 16384)),
178
178
  };
179
179
  }
180
180
  else {
@@ -182,7 +182,7 @@ export async function agentChat(messages, systemPrompt, onChunk, abortSignal, dy
182
182
  body = {
183
183
  model, system: systemPrompt, messages,
184
184
  tools: getAnthropicTools(), stream: useStreaming,
185
- temperature: config.get('temperature'), max_tokens: Math.max(config.get('maxTokens'), 16384),
185
+ temperature: config.get('temperature'), max_tokens: getEffectiveMaxTokens(providerId, Math.max(config.get('maxTokens'), 16384)),
186
186
  };
187
187
  }
188
188
  const response = await fetch(endpoint, {
@@ -290,7 +290,7 @@ export async function agentChatFallback(messages, systemPrompt, onChunk, abortSi
290
290
  body = {
291
291
  model, messages: [{ role: 'system', content: fallbackPrompt }, ...messages],
292
292
  stream: Boolean(onChunk), temperature: config.get('temperature'),
293
- max_tokens: Math.max(config.get('maxTokens'), 16384),
293
+ max_tokens: getEffectiveMaxTokens(providerId, Math.max(config.get('maxTokens'), 16384)),
294
294
  };
295
295
  }
296
296
  else {
@@ -303,7 +303,7 @@ export async function agentChatFallback(messages, systemPrompt, onChunk, abortSi
303
303
  ...messages,
304
304
  ],
305
305
  stream: Boolean(onChunk), temperature: config.get('temperature'),
306
- max_tokens: Math.max(config.get('maxTokens'), 16384),
306
+ max_tokens: getEffectiveMaxTokens(providerId, Math.max(config.get('maxTokens'), 16384)),
307
307
  };
308
308
  }
309
309
  const response = await fetch(endpoint, {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codeep",
3
- "version": "1.2.18",
3
+ "version": "1.2.19",
4
4
  "description": "AI-powered coding assistant built for the terminal. Multiple LLM providers, project-aware context, and a seamless development workflow.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",