codeep 1.2.17 → 1.2.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +20 -7
- package/dist/api/index.d.ts +7 -0
- package/dist/api/index.js +21 -17
- package/dist/renderer/App.d.ts +1 -5
- package/dist/renderer/App.js +106 -486
- package/dist/renderer/agentExecution.d.ts +36 -0
- package/dist/renderer/agentExecution.js +394 -0
- package/dist/renderer/commands.d.ts +16 -0
- package/dist/renderer/commands.js +838 -0
- package/dist/renderer/handlers.d.ts +87 -0
- package/dist/renderer/handlers.js +260 -0
- package/dist/renderer/highlight.d.ts +18 -0
- package/dist/renderer/highlight.js +130 -0
- package/dist/renderer/main.d.ts +4 -2
- package/dist/renderer/main.js +103 -1550
- package/dist/utils/agent.d.ts +5 -15
- package/dist/utils/agent.js +9 -693
- package/dist/utils/agentChat.d.ts +46 -0
- package/dist/utils/agentChat.js +343 -0
- package/dist/utils/agentStream.d.ts +23 -0
- package/dist/utils/agentStream.js +216 -0
- package/dist/utils/keychain.js +3 -2
- package/dist/utils/learning.js +9 -3
- package/dist/utils/mcpIntegration.d.ts +61 -0
- package/dist/utils/mcpIntegration.js +154 -0
- package/dist/utils/project.js +8 -3
- package/dist/utils/skills.js +21 -11
- package/dist/utils/smartContext.d.ts +4 -0
- package/dist/utils/smartContext.js +51 -14
- package/dist/utils/toolExecution.d.ts +27 -0
- package/dist/utils/toolExecution.js +525 -0
- package/dist/utils/toolParsing.d.ts +18 -0
- package/dist/utils/toolParsing.js +302 -0
- package/dist/utils/tools.d.ts +11 -24
- package/dist/utils/tools.js +22 -1187
- package/package.json +3 -1
- package/dist/config/config.test.d.ts +0 -1
- package/dist/config/config.test.js +0 -157
- package/dist/config/providers.test.d.ts +0 -1
- package/dist/config/providers.test.js +0 -187
- package/dist/hooks/index.d.ts +0 -4
- package/dist/hooks/index.js +0 -4
- package/dist/hooks/useAgent.d.ts +0 -29
- package/dist/hooks/useAgent.js +0 -148
- package/dist/utils/agent.test.d.ts +0 -1
- package/dist/utils/agent.test.js +0 -315
- package/dist/utils/git.test.d.ts +0 -1
- package/dist/utils/git.test.js +0 -193
- package/dist/utils/gitignore.test.d.ts +0 -1
- package/dist/utils/gitignore.test.js +0 -167
- package/dist/utils/project.test.d.ts +0 -1
- package/dist/utils/project.test.js +0 -212
- package/dist/utils/ratelimit.test.d.ts +0 -1
- package/dist/utils/ratelimit.test.js +0 -131
- package/dist/utils/retry.test.d.ts +0 -1
- package/dist/utils/retry.test.js +0 -163
- package/dist/utils/smartContext.test.d.ts +0 -1
- package/dist/utils/smartContext.test.js +0 -382
- package/dist/utils/tools.test.d.ts +0 -1
- package/dist/utils/tools.test.js +0 -681
- package/dist/utils/validation.test.d.ts +0 -1
- package/dist/utils/validation.test.js +0 -164
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent chat API layer and prompt building.
|
|
3
|
+
*
|
|
4
|
+
* Exported:
|
|
5
|
+
* loadProjectRules() — loads .codeep/rules.md or CODEEP.md
|
|
6
|
+
* formatChatHistoryForAgent() — trims history to fit context window
|
|
7
|
+
* getAgentSystemPrompt() — builds system prompt for native-tool mode
|
|
8
|
+
* getFallbackSystemPrompt() — builds system prompt for text-tool mode
|
|
9
|
+
* agentChat() — native tool-calling API call
|
|
10
|
+
* agentChatFallback() — text-based tool format fallback
|
|
11
|
+
* AgentChatResponse — response type (re-export from agentStream)
|
|
12
|
+
* TimeoutError — distinguishes timeout from user abort
|
|
13
|
+
*/
|
|
14
|
+
import { ProjectContext } from './project';
|
|
15
|
+
import { Message } from '../config/index';
|
|
16
|
+
import { AgentChatResponse } from './agentStream';
|
|
17
|
+
export { AgentChatResponse };
|
|
18
|
+
/**
|
|
19
|
+
* Custom error class for timeout
|
|
20
|
+
*/
|
|
21
|
+
export declare class TimeoutError extends Error {
|
|
22
|
+
constructor(message?: string);
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Load project rules from .codeep/rules.md or CODEEP.md
|
|
26
|
+
*/
|
|
27
|
+
export declare function loadProjectRules(projectRoot: string): string;
|
|
28
|
+
/**
|
|
29
|
+
* Format chat session history for inclusion in agent system prompt.
|
|
30
|
+
* Keeps the most recent messages within a character budget.
|
|
31
|
+
*/
|
|
32
|
+
export declare function formatChatHistoryForAgent(history?: Array<{
|
|
33
|
+
role: 'user' | 'assistant';
|
|
34
|
+
content: string;
|
|
35
|
+
}>, maxChars?: number): string;
|
|
36
|
+
export declare function getAgentSystemPrompt(projectContext: ProjectContext): string;
|
|
37
|
+
export declare function getFallbackSystemPrompt(projectContext: ProjectContext): string;
|
|
38
|
+
/**
|
|
39
|
+
* Make a chat API call for agent mode with native tool support.
|
|
40
|
+
* Falls back to agentChatFallback() if provider doesn't support tools.
|
|
41
|
+
*/
|
|
42
|
+
export declare function agentChat(messages: Message[], systemPrompt: string, onChunk?: (chunk: string) => void, abortSignal?: AbortSignal, dynamicTimeout?: number): Promise<AgentChatResponse>;
|
|
43
|
+
/**
|
|
44
|
+
* Fallback chat without native tools (text-based tool format)
|
|
45
|
+
*/
|
|
46
|
+
export declare function agentChatFallback(messages: Message[], systemPrompt: string, onChunk?: (chunk: string) => void, abortSignal?: AbortSignal, dynamicTimeout?: number): Promise<AgentChatResponse>;
|
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent chat API layer and prompt building.
|
|
3
|
+
*
|
|
4
|
+
* Exported:
|
|
5
|
+
* loadProjectRules() — loads .codeep/rules.md or CODEEP.md
|
|
6
|
+
* formatChatHistoryForAgent() — trims history to fit context window
|
|
7
|
+
* getAgentSystemPrompt() — builds system prompt for native-tool mode
|
|
8
|
+
* getFallbackSystemPrompt() — builds system prompt for text-tool mode
|
|
9
|
+
* agentChat() — native tool-calling API call
|
|
10
|
+
* agentChatFallback() — text-based tool format fallback
|
|
11
|
+
* AgentChatResponse — response type (re-export from agentStream)
|
|
12
|
+
* TimeoutError — distinguishes timeout from user abort
|
|
13
|
+
*/
|
|
14
|
+
import { existsSync, readFileSync } from 'fs';
|
|
15
|
+
import { join } from 'path';
|
|
16
|
+
import { config, getApiKey } from '../config/index.js';
|
|
17
|
+
import { getProviderBaseUrl, getProviderAuthHeader, supportsNativeTools } from '../config/providers.js';
|
|
18
|
+
import { recordTokenUsage, extractOpenAIUsage, extractAnthropicUsage } from './tokenTracker.js';
|
|
19
|
+
import { parseOpenAIToolCalls, parseAnthropicToolCalls, parseToolCalls } from './toolParsing.js';
|
|
20
|
+
import { formatToolDefinitions, getOpenAITools, getAnthropicTools } from './tools.js';
|
|
21
|
+
import { handleStream, handleOpenAIAgentStream, handleAnthropicAgentStream } from './agentStream.js';
|
|
22
|
+
const debug = (...args) => {
|
|
23
|
+
if (process.env.CODEEP_DEBUG === '1') {
|
|
24
|
+
console.error('[DEBUG]', ...args);
|
|
25
|
+
}
|
|
26
|
+
};
|
|
27
|
+
/**
|
|
28
|
+
* Custom error class for timeout
|
|
29
|
+
*/
|
|
30
|
+
export class TimeoutError extends Error {
|
|
31
|
+
constructor(message = 'Request timed out') {
|
|
32
|
+
super(message);
|
|
33
|
+
this.name = 'TimeoutError';
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Load project rules from .codeep/rules.md or CODEEP.md
|
|
38
|
+
*/
|
|
39
|
+
export function loadProjectRules(projectRoot) {
|
|
40
|
+
const candidates = [
|
|
41
|
+
join(projectRoot, '.codeep', 'rules.md'),
|
|
42
|
+
join(projectRoot, 'CODEEP.md'),
|
|
43
|
+
];
|
|
44
|
+
for (const filePath of candidates) {
|
|
45
|
+
if (existsSync(filePath)) {
|
|
46
|
+
try {
|
|
47
|
+
const content = readFileSync(filePath, 'utf-8').trim();
|
|
48
|
+
if (content) {
|
|
49
|
+
debug('Loaded project rules from', filePath);
|
|
50
|
+
return `\n\n## Project Rules\nThe following rules are defined by the project owner. You MUST follow these rules:\n\n${content}`;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
catch (err) {
|
|
54
|
+
debug('Failed to read project rules from', filePath, err);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
return '';
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Format chat session history for inclusion in agent system prompt.
|
|
62
|
+
* Keeps the most recent messages within a character budget.
|
|
63
|
+
*/
|
|
64
|
+
export function formatChatHistoryForAgent(history, maxChars = 16000) {
|
|
65
|
+
if (!history || history.length === 0)
|
|
66
|
+
return '';
|
|
67
|
+
const filtered = history.filter(m => {
|
|
68
|
+
const content = m.content.trimStart();
|
|
69
|
+
if (content.startsWith('[AGENT]') || content.startsWith('[DRY RUN]'))
|
|
70
|
+
return false;
|
|
71
|
+
if (content.startsWith('Agent completed') || content.startsWith('Agent failed') || content.startsWith('Agent stopped'))
|
|
72
|
+
return false;
|
|
73
|
+
return true;
|
|
74
|
+
});
|
|
75
|
+
if (filtered.length === 0)
|
|
76
|
+
return '';
|
|
77
|
+
const selected = [];
|
|
78
|
+
let totalChars = 0;
|
|
79
|
+
for (let i = filtered.length - 1; i >= 0; i--) {
|
|
80
|
+
const msg = filtered[i];
|
|
81
|
+
const entry = `${msg.role === 'user' ? 'User' : 'Assistant'}: ${msg.content}`;
|
|
82
|
+
if (totalChars + entry.length > maxChars && selected.length > 0)
|
|
83
|
+
break;
|
|
84
|
+
if (entry.length > maxChars) {
|
|
85
|
+
selected.unshift({ role: msg.role, content: msg.content.slice(0, maxChars - 100) + '\n[truncated]' });
|
|
86
|
+
break;
|
|
87
|
+
}
|
|
88
|
+
selected.unshift(msg);
|
|
89
|
+
totalChars += entry.length;
|
|
90
|
+
}
|
|
91
|
+
if (selected.length === 0)
|
|
92
|
+
return '';
|
|
93
|
+
const lines = selected.map(m => `**${m.role === 'user' ? 'User' : 'Assistant'}:** ${m.content}`).join('\n\n');
|
|
94
|
+
return `\n\n## Prior Conversation Context\nThe following is the recent chat history from this session. Use it as background context to understand the user's intent, but focus on completing the current task.\n\n${lines}`;
|
|
95
|
+
}
|
|
96
|
+
export function getAgentSystemPrompt(projectContext) {
|
|
97
|
+
return `You are an AI coding agent with FULL autonomous access to this project.
|
|
98
|
+
|
|
99
|
+
## Your Capabilities
|
|
100
|
+
- Read, write, edit, and delete files and directories
|
|
101
|
+
- Create directories with create_directory tool
|
|
102
|
+
- Execute shell commands (npm, git, build tools, etc.)
|
|
103
|
+
- Search code in the project
|
|
104
|
+
- List directory contents
|
|
105
|
+
|
|
106
|
+
## IMPORTANT: Follow User Instructions Exactly
|
|
107
|
+
- Do EXACTLY what the user asks
|
|
108
|
+
- If user says "create a website" -> create ALL necessary files (HTML, CSS, JS, images, etc.)
|
|
109
|
+
- If user says "create folder X" -> use create_directory tool to create folder X
|
|
110
|
+
- If user says "delete file X" -> use delete_file tool to delete file X
|
|
111
|
+
- The user may write in any language - understand their request and execute it
|
|
112
|
+
- Tool names and parameters must ALWAYS be in English (e.g., "create_directory", not "kreiraj_direktorij")
|
|
113
|
+
- KEEP WORKING until the ENTIRE task is finished - do NOT stop after creating just directories or partial files
|
|
114
|
+
- Only stop when you have created ALL files needed for a complete, working solution
|
|
115
|
+
|
|
116
|
+
## Rules
|
|
117
|
+
1. Always read files before editing them to understand the current content
|
|
118
|
+
2. Use edit_file for modifications to existing files (preserves other content)
|
|
119
|
+
3. Use write_file only for creating new files or complete overwrites
|
|
120
|
+
4. Use create_directory to create new folders/directories
|
|
121
|
+
5. Use list_files to see directory contents
|
|
122
|
+
6. Use search_code to find files or search patterns
|
|
123
|
+
7. NEVER use execute_command for: ls, find, cat, grep, mkdir, rm, cp, mv, touch
|
|
124
|
+
8. Use execute_command ONLY for: npm, git, composer, pip, cargo (build/package managers)
|
|
125
|
+
9. When the task is complete, respond with a summary WITHOUT any tool calls
|
|
126
|
+
|
|
127
|
+
## Project Information
|
|
128
|
+
Name: ${projectContext.name || 'Unknown'}
|
|
129
|
+
Type: ${projectContext.type || 'unknown'}
|
|
130
|
+
Root: ${projectContext.root || process.cwd()}
|
|
131
|
+
${projectContext.structure ? `\n## Project Structure\n${projectContext.structure}` : ''}`;
|
|
132
|
+
}
|
|
133
|
+
export function getFallbackSystemPrompt(projectContext) {
|
|
134
|
+
return getAgentSystemPrompt(projectContext) + '\n\n' + formatToolDefinitions();
|
|
135
|
+
}
|
|
136
|
+
/**
|
|
137
|
+
* Make a chat API call for agent mode with native tool support.
|
|
138
|
+
* Falls back to agentChatFallback() if provider doesn't support tools.
|
|
139
|
+
*/
|
|
140
|
+
export async function agentChat(messages, systemPrompt, onChunk, abortSignal, dynamicTimeout) {
|
|
141
|
+
const protocol = config.get('protocol');
|
|
142
|
+
const model = config.get('model');
|
|
143
|
+
const apiKey = getApiKey();
|
|
144
|
+
const providerId = config.get('provider');
|
|
145
|
+
const baseUrl = getProviderBaseUrl(providerId, protocol);
|
|
146
|
+
const authHeader = getProviderAuthHeader(providerId, protocol);
|
|
147
|
+
if (!baseUrl)
|
|
148
|
+
throw new Error(`Provider ${providerId} does not support ${protocol} protocol`);
|
|
149
|
+
if (!supportsNativeTools(providerId, protocol)) {
|
|
150
|
+
return await agentChatFallback(messages, systemPrompt, onChunk, abortSignal);
|
|
151
|
+
}
|
|
152
|
+
const controller = new AbortController();
|
|
153
|
+
const timeoutMs = dynamicTimeout || config.get('apiTimeout');
|
|
154
|
+
let isTimeout = false;
|
|
155
|
+
const timeout = setTimeout(() => { isTimeout = true; controller.abort(); }, timeoutMs);
|
|
156
|
+
if (abortSignal) {
|
|
157
|
+
abortSignal.addEventListener('abort', () => { isTimeout = false; controller.abort(); });
|
|
158
|
+
}
|
|
159
|
+
const headers = { 'Content-Type': 'application/json' };
|
|
160
|
+
if (authHeader === 'Bearer') {
|
|
161
|
+
headers['Authorization'] = `Bearer ${apiKey}`;
|
|
162
|
+
}
|
|
163
|
+
else {
|
|
164
|
+
headers['x-api-key'] = apiKey;
|
|
165
|
+
}
|
|
166
|
+
if (protocol === 'anthropic')
|
|
167
|
+
headers['anthropic-version'] = '2023-06-01';
|
|
168
|
+
try {
|
|
169
|
+
let endpoint;
|
|
170
|
+
let body;
|
|
171
|
+
const useStreaming = Boolean(onChunk);
|
|
172
|
+
if (protocol === 'openai') {
|
|
173
|
+
endpoint = `${baseUrl}/chat/completions`;
|
|
174
|
+
body = {
|
|
175
|
+
model, messages: [{ role: 'system', content: systemPrompt }, ...messages],
|
|
176
|
+
tools: getOpenAITools(), tool_choice: 'auto', stream: useStreaming,
|
|
177
|
+
temperature: config.get('temperature'), max_tokens: Math.max(config.get('maxTokens'), 16384),
|
|
178
|
+
};
|
|
179
|
+
}
|
|
180
|
+
else {
|
|
181
|
+
endpoint = `${baseUrl}/v1/messages`;
|
|
182
|
+
body = {
|
|
183
|
+
model, system: systemPrompt, messages,
|
|
184
|
+
tools: getAnthropicTools(), stream: useStreaming,
|
|
185
|
+
temperature: config.get('temperature'), max_tokens: Math.max(config.get('maxTokens'), 16384),
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
const response = await fetch(endpoint, {
|
|
189
|
+
method: 'POST', headers, body: JSON.stringify(body), signal: controller.signal,
|
|
190
|
+
});
|
|
191
|
+
if (!response.ok) {
|
|
192
|
+
const errorText = await response.text();
|
|
193
|
+
if (errorText.includes('tools') || errorText.includes('function') || response.status === 400) {
|
|
194
|
+
return await agentChatFallback(messages, systemPrompt, onChunk, abortSignal);
|
|
195
|
+
}
|
|
196
|
+
throw new Error(`API error: ${response.status} - ${errorText}`);
|
|
197
|
+
}
|
|
198
|
+
if (useStreaming && response.body) {
|
|
199
|
+
if (protocol === 'openai')
|
|
200
|
+
return await handleOpenAIAgentStream(response.body, onChunk, model, providerId);
|
|
201
|
+
else
|
|
202
|
+
return await handleAnthropicAgentStream(response.body, onChunk, model, providerId);
|
|
203
|
+
}
|
|
204
|
+
const data = await response.json();
|
|
205
|
+
const usageExtractor = protocol === 'openai' ? extractOpenAIUsage : extractAnthropicUsage;
|
|
206
|
+
const usage = usageExtractor(data);
|
|
207
|
+
if (usage)
|
|
208
|
+
recordTokenUsage(usage, model, providerId);
|
|
209
|
+
if (protocol === 'openai') {
|
|
210
|
+
const message = data.choices?.[0]?.message;
|
|
211
|
+
const content = message?.content || '';
|
|
212
|
+
const rawToolCalls = message?.tool_calls || [];
|
|
213
|
+
const toolCalls = parseOpenAIToolCalls(rawToolCalls);
|
|
214
|
+
debug('Parsed tool calls:', toolCalls.length, toolCalls.map((t) => t.tool));
|
|
215
|
+
if (toolCalls.length === 0 && content) {
|
|
216
|
+
const textToolCalls = parseToolCalls(content);
|
|
217
|
+
if (textToolCalls.length > 0)
|
|
218
|
+
return { content, toolCalls: textToolCalls, usedNativeTools: false };
|
|
219
|
+
}
|
|
220
|
+
if (onChunk && content)
|
|
221
|
+
onChunk(content);
|
|
222
|
+
return { content, toolCalls, usedNativeTools: true };
|
|
223
|
+
}
|
|
224
|
+
else {
|
|
225
|
+
const contentBlocks = data.content || [];
|
|
226
|
+
let textContent = '';
|
|
227
|
+
for (const block of contentBlocks) {
|
|
228
|
+
if (block.type === 'text') {
|
|
229
|
+
textContent += block.text;
|
|
230
|
+
if (onChunk)
|
|
231
|
+
onChunk(block.text);
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
const toolCalls = parseAnthropicToolCalls(contentBlocks);
|
|
235
|
+
return { content: textContent, toolCalls, usedNativeTools: true };
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
catch (error) {
|
|
239
|
+
const err = error;
|
|
240
|
+
if (err.name === 'AbortError') {
|
|
241
|
+
if (isTimeout)
|
|
242
|
+
throw new TimeoutError(`API request timed out after ${timeoutMs}ms`);
|
|
243
|
+
throw error;
|
|
244
|
+
}
|
|
245
|
+
if (err.message.includes('tools') || err.message.includes('function')) {
|
|
246
|
+
return await agentChatFallback(messages, systemPrompt, onChunk, abortSignal);
|
|
247
|
+
}
|
|
248
|
+
throw error;
|
|
249
|
+
}
|
|
250
|
+
finally {
|
|
251
|
+
clearTimeout(timeout);
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
/**
|
|
255
|
+
* Fallback chat without native tools (text-based tool format)
|
|
256
|
+
*/
|
|
257
|
+
export async function agentChatFallback(messages, systemPrompt, onChunk, abortSignal, dynamicTimeout) {
|
|
258
|
+
const protocol = config.get('protocol');
|
|
259
|
+
const model = config.get('model');
|
|
260
|
+
const apiKey = getApiKey();
|
|
261
|
+
const providerId = config.get('provider');
|
|
262
|
+
const baseUrl = getProviderBaseUrl(providerId, protocol);
|
|
263
|
+
const authHeader = getProviderAuthHeader(providerId, protocol);
|
|
264
|
+
if (!baseUrl)
|
|
265
|
+
throw new Error(`Provider ${providerId} does not support ${protocol} protocol`);
|
|
266
|
+
const controller = new AbortController();
|
|
267
|
+
const timeoutMs = dynamicTimeout || config.get('apiTimeout');
|
|
268
|
+
let isTimeout = false;
|
|
269
|
+
const timeout = setTimeout(() => { isTimeout = true; controller.abort(); }, timeoutMs);
|
|
270
|
+
if (abortSignal) {
|
|
271
|
+
abortSignal.addEventListener('abort', () => { isTimeout = false; controller.abort(); });
|
|
272
|
+
}
|
|
273
|
+
const headers = { 'Content-Type': 'application/json' };
|
|
274
|
+
if (authHeader === 'Bearer') {
|
|
275
|
+
headers['Authorization'] = `Bearer ${apiKey}`;
|
|
276
|
+
}
|
|
277
|
+
else {
|
|
278
|
+
headers['x-api-key'] = apiKey;
|
|
279
|
+
}
|
|
280
|
+
if (protocol === 'anthropic')
|
|
281
|
+
headers['anthropic-version'] = '2023-06-01';
|
|
282
|
+
const fallbackPrompt = systemPrompt.includes('## Available Tools')
|
|
283
|
+
? systemPrompt
|
|
284
|
+
: systemPrompt + '\n\n' + formatToolDefinitions();
|
|
285
|
+
try {
|
|
286
|
+
let endpoint;
|
|
287
|
+
let body;
|
|
288
|
+
if (protocol === 'openai') {
|
|
289
|
+
endpoint = `${baseUrl}/chat/completions`;
|
|
290
|
+
body = {
|
|
291
|
+
model, messages: [{ role: 'system', content: fallbackPrompt }, ...messages],
|
|
292
|
+
stream: Boolean(onChunk), temperature: config.get('temperature'),
|
|
293
|
+
max_tokens: Math.max(config.get('maxTokens'), 16384),
|
|
294
|
+
};
|
|
295
|
+
}
|
|
296
|
+
else {
|
|
297
|
+
endpoint = `${baseUrl}/v1/messages`;
|
|
298
|
+
body = {
|
|
299
|
+
model,
|
|
300
|
+
messages: [
|
|
301
|
+
{ role: 'user', content: fallbackPrompt },
|
|
302
|
+
{ role: 'assistant', content: 'Understood. I will use the tools as specified.' },
|
|
303
|
+
...messages,
|
|
304
|
+
],
|
|
305
|
+
stream: Boolean(onChunk), temperature: config.get('temperature'),
|
|
306
|
+
max_tokens: Math.max(config.get('maxTokens'), 16384),
|
|
307
|
+
};
|
|
308
|
+
}
|
|
309
|
+
const response = await fetch(endpoint, {
|
|
310
|
+
method: 'POST', headers, body: JSON.stringify(body), signal: controller.signal,
|
|
311
|
+
});
|
|
312
|
+
if (!response.ok) {
|
|
313
|
+
const error = await response.text();
|
|
314
|
+
throw new Error(`API error: ${response.status} - ${error}`);
|
|
315
|
+
}
|
|
316
|
+
let content;
|
|
317
|
+
if (onChunk && response.body) {
|
|
318
|
+
content = await handleStream(response.body, protocol, onChunk);
|
|
319
|
+
}
|
|
320
|
+
else {
|
|
321
|
+
const data = await response.json();
|
|
322
|
+
const fallbackUsageExtractor = protocol === 'openai' ? extractOpenAIUsage : extractAnthropicUsage;
|
|
323
|
+
const fallbackUsage = fallbackUsageExtractor(data);
|
|
324
|
+
if (fallbackUsage)
|
|
325
|
+
recordTokenUsage(fallbackUsage, model, providerId);
|
|
326
|
+
content = protocol === 'openai' ? (data.choices?.[0]?.message?.content || '') : (data.content?.[0]?.text || '');
|
|
327
|
+
}
|
|
328
|
+
const toolCalls = parseToolCalls(content);
|
|
329
|
+
return { content, toolCalls, usedNativeTools: false };
|
|
330
|
+
}
|
|
331
|
+
catch (error) {
|
|
332
|
+
const err = error;
|
|
333
|
+
if (err.name === 'AbortError') {
|
|
334
|
+
if (isTimeout)
|
|
335
|
+
throw new TimeoutError(`API request timed out after ${timeoutMs}ms`);
|
|
336
|
+
throw error;
|
|
337
|
+
}
|
|
338
|
+
throw error;
|
|
339
|
+
}
|
|
340
|
+
finally {
|
|
341
|
+
clearTimeout(timeout);
|
|
342
|
+
}
|
|
343
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent streaming handlers for OpenAI and Anthropic protocols.
|
|
3
|
+
*
|
|
4
|
+
* Parses SSE streams and accumulates tool calls from deltas.
|
|
5
|
+
*/
|
|
6
|
+
import { ToolCall } from './tools';
|
|
7
|
+
export interface AgentChatResponse {
|
|
8
|
+
content: string;
|
|
9
|
+
toolCalls: ToolCall[];
|
|
10
|
+
usedNativeTools: boolean;
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Handle streaming response (text-based fallback, no native tools)
|
|
14
|
+
*/
|
|
15
|
+
export declare function handleStream(body: ReadableStream<Uint8Array>, protocol: string, onChunk: (chunk: string) => void): Promise<string>;
|
|
16
|
+
/**
|
|
17
|
+
* Handle OpenAI streaming response with tool call accumulation
|
|
18
|
+
*/
|
|
19
|
+
export declare function handleOpenAIAgentStream(body: ReadableStream<Uint8Array>, onChunk: (chunk: string) => void, model: string, providerId: string): Promise<AgentChatResponse>;
|
|
20
|
+
/**
|
|
21
|
+
* Handle Anthropic streaming response with tool call accumulation
|
|
22
|
+
*/
|
|
23
|
+
export declare function handleAnthropicAgentStream(body: ReadableStream<Uint8Array>, onChunk: (chunk: string) => void, model: string, providerId: string): Promise<AgentChatResponse>;
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent streaming handlers for OpenAI and Anthropic protocols.
|
|
3
|
+
*
|
|
4
|
+
* Parses SSE streams and accumulates tool calls from deltas.
|
|
5
|
+
*/
|
|
6
|
+
import { recordTokenUsage, extractOpenAIUsage, extractAnthropicUsage } from './tokenTracker.js';
|
|
7
|
+
import { parseOpenAIToolCalls, parseAnthropicToolCalls, parseToolCalls } from './toolParsing.js';
|
|
8
|
+
// Debug logging helper - only logs when CODEEP_DEBUG=1
|
|
9
|
+
const debug = (...args) => {
|
|
10
|
+
if (process.env.CODEEP_DEBUG === '1') {
|
|
11
|
+
console.error('[DEBUG]', ...args);
|
|
12
|
+
}
|
|
13
|
+
};
|
|
14
|
+
function tryParseJSON(str) {
|
|
15
|
+
try {
|
|
16
|
+
return JSON.parse(str);
|
|
17
|
+
}
|
|
18
|
+
catch {
|
|
19
|
+
return {};
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* Handle streaming response (text-based fallback, no native tools)
|
|
24
|
+
*/
|
|
25
|
+
export async function handleStream(body, protocol, onChunk) {
|
|
26
|
+
const reader = body.getReader();
|
|
27
|
+
const decoder = new TextDecoder();
|
|
28
|
+
const chunks = [];
|
|
29
|
+
let buffer = '';
|
|
30
|
+
while (true) {
|
|
31
|
+
const { done, value } = await reader.read();
|
|
32
|
+
if (done)
|
|
33
|
+
break;
|
|
34
|
+
buffer += decoder.decode(value, { stream: true });
|
|
35
|
+
const lines = buffer.split('\n');
|
|
36
|
+
buffer = lines.pop() || '';
|
|
37
|
+
for (const line of lines) {
|
|
38
|
+
if (line.startsWith('data: ')) {
|
|
39
|
+
const data = line.slice(6);
|
|
40
|
+
if (data === '[DONE]')
|
|
41
|
+
continue;
|
|
42
|
+
try {
|
|
43
|
+
const parsed = JSON.parse(data);
|
|
44
|
+
let content;
|
|
45
|
+
if (protocol === 'openai') {
|
|
46
|
+
content = parsed.choices?.[0]?.delta?.content;
|
|
47
|
+
}
|
|
48
|
+
else if (parsed.type === 'content_block_delta') {
|
|
49
|
+
content = parsed.delta?.text;
|
|
50
|
+
}
|
|
51
|
+
if (content) {
|
|
52
|
+
chunks.push(content);
|
|
53
|
+
onChunk(content);
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
catch {
|
|
57
|
+
// Skip parse errors
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
return chunks.join('');
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Handle OpenAI streaming response with tool call accumulation
|
|
66
|
+
*/
|
|
67
|
+
export async function handleOpenAIAgentStream(body, onChunk, model, providerId) {
|
|
68
|
+
const reader = body.getReader();
|
|
69
|
+
const decoder = new TextDecoder();
|
|
70
|
+
let buffer = '';
|
|
71
|
+
let content = '';
|
|
72
|
+
const toolCallMap = new Map();
|
|
73
|
+
let usageData = null;
|
|
74
|
+
while (true) {
|
|
75
|
+
const { done, value } = await reader.read();
|
|
76
|
+
if (done)
|
|
77
|
+
break;
|
|
78
|
+
buffer += decoder.decode(value, { stream: true });
|
|
79
|
+
const lines = buffer.split('\n');
|
|
80
|
+
buffer = lines.pop() || '';
|
|
81
|
+
for (const line of lines) {
|
|
82
|
+
if (!line.startsWith('data: '))
|
|
83
|
+
continue;
|
|
84
|
+
const data = line.slice(6);
|
|
85
|
+
if (data === '[DONE]')
|
|
86
|
+
continue;
|
|
87
|
+
try {
|
|
88
|
+
const parsed = JSON.parse(data);
|
|
89
|
+
if (parsed.usage) {
|
|
90
|
+
usageData = parsed;
|
|
91
|
+
}
|
|
92
|
+
const delta = parsed.choices?.[0]?.delta;
|
|
93
|
+
if (!delta)
|
|
94
|
+
continue;
|
|
95
|
+
if (delta.content) {
|
|
96
|
+
content += delta.content;
|
|
97
|
+
onChunk(delta.content);
|
|
98
|
+
}
|
|
99
|
+
if (delta.tool_calls) {
|
|
100
|
+
for (const tc of delta.tool_calls) {
|
|
101
|
+
const idx = tc.index ?? 0;
|
|
102
|
+
if (!toolCallMap.has(idx)) {
|
|
103
|
+
toolCallMap.set(idx, { id: tc.id || '', name: tc.function?.name || '', arguments: '' });
|
|
104
|
+
}
|
|
105
|
+
const entry = toolCallMap.get(idx);
|
|
106
|
+
if (tc.id)
|
|
107
|
+
entry.id = tc.id;
|
|
108
|
+
if (tc.function?.name)
|
|
109
|
+
entry.name = tc.function.name;
|
|
110
|
+
if (tc.function?.arguments)
|
|
111
|
+
entry.arguments += tc.function.arguments;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
catch {
|
|
116
|
+
// Ignore parse errors
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
if (usageData) {
|
|
121
|
+
const usage = extractOpenAIUsage(usageData);
|
|
122
|
+
if (usage)
|
|
123
|
+
recordTokenUsage(usage, model, providerId);
|
|
124
|
+
}
|
|
125
|
+
const rawToolCalls = Array.from(toolCallMap.values()).map(tc => ({
|
|
126
|
+
id: tc.id,
|
|
127
|
+
type: 'function',
|
|
128
|
+
function: { name: tc.name, arguments: tc.arguments },
|
|
129
|
+
}));
|
|
130
|
+
const toolCalls = parseOpenAIToolCalls(rawToolCalls);
|
|
131
|
+
debug('Stream parsed tool calls:', toolCalls.length, toolCalls.map(t => t.tool));
|
|
132
|
+
if (toolCalls.length === 0 && content) {
|
|
133
|
+
const textToolCalls = parseToolCalls(content);
|
|
134
|
+
if (textToolCalls.length > 0) {
|
|
135
|
+
return { content, toolCalls: textToolCalls, usedNativeTools: false };
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
return { content, toolCalls, usedNativeTools: true };
|
|
139
|
+
}
|
|
140
|
+
/**
|
|
141
|
+
* Handle Anthropic streaming response with tool call accumulation
|
|
142
|
+
*/
|
|
143
|
+
export async function handleAnthropicAgentStream(body, onChunk, model, providerId) {
|
|
144
|
+
const reader = body.getReader();
|
|
145
|
+
const decoder = new TextDecoder();
|
|
146
|
+
let buffer = '';
|
|
147
|
+
let content = '';
|
|
148
|
+
const contentBlocks = [];
|
|
149
|
+
let currentBlockType = '';
|
|
150
|
+
let currentToolName = '';
|
|
151
|
+
let currentToolId = '';
|
|
152
|
+
let currentToolInput = '';
|
|
153
|
+
let usageData = null;
|
|
154
|
+
while (true) {
|
|
155
|
+
const { done, value } = await reader.read();
|
|
156
|
+
if (done)
|
|
157
|
+
break;
|
|
158
|
+
buffer += decoder.decode(value, { stream: true });
|
|
159
|
+
const lines = buffer.split('\n');
|
|
160
|
+
buffer = lines.pop() || '';
|
|
161
|
+
for (const line of lines) {
|
|
162
|
+
if (!line.startsWith('data: '))
|
|
163
|
+
continue;
|
|
164
|
+
const data = line.slice(6);
|
|
165
|
+
try {
|
|
166
|
+
const parsed = JSON.parse(data);
|
|
167
|
+
if (parsed.usage)
|
|
168
|
+
usageData = parsed;
|
|
169
|
+
if (parsed.type === 'message_delta' && parsed.usage)
|
|
170
|
+
usageData = parsed;
|
|
171
|
+
if (parsed.type === 'content_block_start') {
|
|
172
|
+
const block = parsed.content_block;
|
|
173
|
+
if (block.type === 'text') {
|
|
174
|
+
currentBlockType = 'text';
|
|
175
|
+
}
|
|
176
|
+
else if (block.type === 'tool_use') {
|
|
177
|
+
currentBlockType = 'tool_use';
|
|
178
|
+
currentToolName = block.name || '';
|
|
179
|
+
currentToolId = block.id || '';
|
|
180
|
+
currentToolInput = '';
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
else if (parsed.type === 'content_block_delta') {
|
|
184
|
+
if (currentBlockType === 'text' && parsed.delta?.text) {
|
|
185
|
+
content += parsed.delta.text;
|
|
186
|
+
onChunk(parsed.delta.text);
|
|
187
|
+
}
|
|
188
|
+
else if (currentBlockType === 'tool_use' && parsed.delta?.partial_json) {
|
|
189
|
+
currentToolInput += parsed.delta.partial_json;
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
else if (parsed.type === 'content_block_stop') {
|
|
193
|
+
if (currentBlockType === 'tool_use') {
|
|
194
|
+
contentBlocks.push({
|
|
195
|
+
type: 'tool_use',
|
|
196
|
+
id: currentToolId,
|
|
197
|
+
name: currentToolName,
|
|
198
|
+
input: tryParseJSON(currentToolInput),
|
|
199
|
+
});
|
|
200
|
+
}
|
|
201
|
+
currentBlockType = '';
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
catch {
|
|
205
|
+
// Ignore parse errors
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
if (usageData) {
|
|
210
|
+
const usage = extractAnthropicUsage(usageData);
|
|
211
|
+
if (usage)
|
|
212
|
+
recordTokenUsage(usage, model, providerId);
|
|
213
|
+
}
|
|
214
|
+
const toolCalls = parseAnthropicToolCalls(contentBlocks);
|
|
215
|
+
return { content, toolCalls, usedNativeTools: true };
|
|
216
|
+
}
|
package/dist/utils/keychain.js
CHANGED
|
@@ -90,7 +90,8 @@ class SmartStorage {
|
|
|
90
90
|
}
|
|
91
91
|
catch {
|
|
92
92
|
this.useKeychain = false;
|
|
93
|
-
|
|
93
|
+
logger.warn('System keychain is unavailable. API keys will be stored as plaintext in the config file. ' +
|
|
94
|
+
'Consider installing libsecret (Linux) or ensuring Keychain Access is available (macOS).');
|
|
94
95
|
}
|
|
95
96
|
this.keychainTested = true;
|
|
96
97
|
}
|
|
@@ -113,8 +114,8 @@ class SmartStorage {
|
|
|
113
114
|
return;
|
|
114
115
|
}
|
|
115
116
|
catch (error) {
|
|
116
|
-
// Keychain failed, fall back to config
|
|
117
117
|
this.useKeychain = false;
|
|
118
|
+
logger.warn(`Keychain write failed for '${providerId}'. API key will be stored as plaintext in config. Error: ${error}`);
|
|
118
119
|
}
|
|
119
120
|
}
|
|
120
121
|
await this.fallback.setApiKey(providerId, apiKey);
|
package/dist/utils/learning.js
CHANGED
|
@@ -94,9 +94,15 @@ export function loadProjectPreferences(projectRoot) {
|
|
|
94
94
|
export function saveProjectPreferences(projectRoot, prefs) {
|
|
95
95
|
ensureLearningDir();
|
|
96
96
|
const projectPrefsPath = getProjectPrefsPath(projectRoot);
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
97
|
+
let existing = {};
|
|
98
|
+
if (existsSync(projectPrefsPath)) {
|
|
99
|
+
try {
|
|
100
|
+
existing = JSON.parse(readFileSync(projectPrefsPath, 'utf-8'));
|
|
101
|
+
}
|
|
102
|
+
catch {
|
|
103
|
+
// Corrupt prefs file — start fresh
|
|
104
|
+
}
|
|
105
|
+
}
|
|
100
106
|
const merged = { ...existing, ...prefs, lastUpdated: Date.now() };
|
|
101
107
|
writeFileSync(projectPrefsPath, JSON.stringify(merged, null, 2));
|
|
102
108
|
}
|