codebot-ai 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +247 -0
- package/bin/codebot +5 -0
- package/dist/agent.d.ts +31 -0
- package/dist/agent.js +256 -0
- package/dist/banner.d.ts +19 -0
- package/dist/banner.js +148 -0
- package/dist/browser/cdp.d.ts +29 -0
- package/dist/browser/cdp.js +292 -0
- package/dist/cli.d.ts +2 -0
- package/dist/cli.js +518 -0
- package/dist/context/manager.d.ts +27 -0
- package/dist/context/manager.js +139 -0
- package/dist/context/repo-map.d.ts +5 -0
- package/dist/context/repo-map.js +100 -0
- package/dist/history.d.ts +27 -0
- package/dist/history.js +146 -0
- package/dist/index.d.ts +13 -0
- package/dist/index.js +42 -0
- package/dist/memory.d.ts +39 -0
- package/dist/memory.js +168 -0
- package/dist/parser.d.ts +8 -0
- package/dist/parser.js +79 -0
- package/dist/providers/anthropic.d.ts +9 -0
- package/dist/providers/anthropic.js +288 -0
- package/dist/providers/index.d.ts +5 -0
- package/dist/providers/index.js +13 -0
- package/dist/providers/openai.d.ts +11 -0
- package/dist/providers/openai.js +173 -0
- package/dist/providers/registry.d.ts +15 -0
- package/dist/providers/registry.js +115 -0
- package/dist/setup.d.ts +17 -0
- package/dist/setup.js +243 -0
- package/dist/tools/browser.d.ts +43 -0
- package/dist/tools/browser.js +329 -0
- package/dist/tools/edit.d.ts +26 -0
- package/dist/tools/edit.js +73 -0
- package/dist/tools/execute.d.ts +26 -0
- package/dist/tools/execute.js +52 -0
- package/dist/tools/glob.d.ts +24 -0
- package/dist/tools/glob.js +102 -0
- package/dist/tools/grep.d.ts +29 -0
- package/dist/tools/grep.js +125 -0
- package/dist/tools/index.d.ts +10 -0
- package/dist/tools/index.js +49 -0
- package/dist/tools/memory.d.ts +36 -0
- package/dist/tools/memory.js +114 -0
- package/dist/tools/read.d.ts +26 -0
- package/dist/tools/read.js +75 -0
- package/dist/tools/think.d.ts +18 -0
- package/dist/tools/think.js +20 -0
- package/dist/tools/web-fetch.d.ts +36 -0
- package/dist/tools/web-fetch.js +83 -0
- package/dist/tools/write.d.ts +22 -0
- package/dist/tools/write.js +65 -0
- package/dist/types.d.ts +82 -0
- package/dist/types.js +3 -0
- package/package.json +57 -0
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.AnthropicProvider = void 0;
|
|
4
|
+
class AnthropicProvider {
|
|
5
|
+
name;
|
|
6
|
+
config;
|
|
7
|
+
constructor(config) {
|
|
8
|
+
this.config = config;
|
|
9
|
+
this.name = config.model;
|
|
10
|
+
}
|
|
11
|
+
async *chat(messages, tools) {
|
|
12
|
+
const { systemPrompt, apiMessages } = this.convertMessages(messages);
|
|
13
|
+
const body = {
|
|
14
|
+
model: this.config.model,
|
|
15
|
+
messages: apiMessages,
|
|
16
|
+
max_tokens: 8192,
|
|
17
|
+
stream: true,
|
|
18
|
+
};
|
|
19
|
+
if (systemPrompt) {
|
|
20
|
+
body.system = systemPrompt;
|
|
21
|
+
}
|
|
22
|
+
if (tools?.length) {
|
|
23
|
+
body.tools = tools.map(t => ({
|
|
24
|
+
name: t.function.name,
|
|
25
|
+
description: t.function.description,
|
|
26
|
+
input_schema: t.function.parameters,
|
|
27
|
+
}));
|
|
28
|
+
}
|
|
29
|
+
const baseUrl = this.config.baseUrl.replace(/\/+$/, '');
|
|
30
|
+
let response;
|
|
31
|
+
try {
|
|
32
|
+
response = await fetch(`${baseUrl}/v1/messages`, {
|
|
33
|
+
method: 'POST',
|
|
34
|
+
headers: {
|
|
35
|
+
'Content-Type': 'application/json',
|
|
36
|
+
'x-api-key': this.config.apiKey || '',
|
|
37
|
+
'anthropic-version': '2023-06-01',
|
|
38
|
+
},
|
|
39
|
+
body: JSON.stringify(body),
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
catch (err) {
|
|
43
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
44
|
+
yield { type: 'error', error: `Connection failed: ${msg}` };
|
|
45
|
+
return;
|
|
46
|
+
}
|
|
47
|
+
if (!response.ok) {
|
|
48
|
+
const text = await response.text();
|
|
49
|
+
yield { type: 'error', error: `Anthropic error ${response.status}: ${text}` };
|
|
50
|
+
return;
|
|
51
|
+
}
|
|
52
|
+
if (!response.body) {
|
|
53
|
+
yield { type: 'error', error: 'No response body from Anthropic' };
|
|
54
|
+
return;
|
|
55
|
+
}
|
|
56
|
+
const reader = response.body.getReader();
|
|
57
|
+
const decoder = new TextDecoder();
|
|
58
|
+
let buffer = '';
|
|
59
|
+
// Track current content blocks
|
|
60
|
+
const toolBlocks = new Map();
|
|
61
|
+
let currentBlockIndex = -1;
|
|
62
|
+
let currentBlockType = '';
|
|
63
|
+
try {
|
|
64
|
+
while (true) {
|
|
65
|
+
const { done, value } = await reader.read();
|
|
66
|
+
if (done)
|
|
67
|
+
break;
|
|
68
|
+
buffer += decoder.decode(value, { stream: true });
|
|
69
|
+
const lines = buffer.split('\n');
|
|
70
|
+
buffer = lines.pop() || '';
|
|
71
|
+
let currentEvent = '';
|
|
72
|
+
for (const line of lines) {
|
|
73
|
+
const trimmed = line.trim();
|
|
74
|
+
if (trimmed.startsWith('event: ')) {
|
|
75
|
+
currentEvent = trimmed.slice(7);
|
|
76
|
+
continue;
|
|
77
|
+
}
|
|
78
|
+
if (!trimmed.startsWith('data: '))
|
|
79
|
+
continue;
|
|
80
|
+
const dataStr = trimmed.slice(6);
|
|
81
|
+
if (!dataStr || dataStr === '[DONE]')
|
|
82
|
+
continue;
|
|
83
|
+
let data;
|
|
84
|
+
try {
|
|
85
|
+
data = JSON.parse(dataStr);
|
|
86
|
+
}
|
|
87
|
+
catch {
|
|
88
|
+
continue;
|
|
89
|
+
}
|
|
90
|
+
switch (currentEvent) {
|
|
91
|
+
case 'content_block_start': {
|
|
92
|
+
const block = data.content_block;
|
|
93
|
+
currentBlockIndex = data.index;
|
|
94
|
+
currentBlockType = block?.type || '';
|
|
95
|
+
if (currentBlockType === 'tool_use') {
|
|
96
|
+
toolBlocks.set(currentBlockIndex, {
|
|
97
|
+
id: block.id,
|
|
98
|
+
name: block.name,
|
|
99
|
+
input: '',
|
|
100
|
+
});
|
|
101
|
+
yield {
|
|
102
|
+
type: 'tool_call_start',
|
|
103
|
+
toolCall: {
|
|
104
|
+
id: block.id,
|
|
105
|
+
type: 'function',
|
|
106
|
+
function: { name: block.name, arguments: '' },
|
|
107
|
+
},
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
else if (currentBlockType === 'thinking') {
|
|
111
|
+
yield { type: 'thinking', text: '' };
|
|
112
|
+
}
|
|
113
|
+
break;
|
|
114
|
+
}
|
|
115
|
+
case 'content_block_delta': {
|
|
116
|
+
const delta = data.delta;
|
|
117
|
+
const deltaType = delta?.type;
|
|
118
|
+
if (deltaType === 'text_delta') {
|
|
119
|
+
yield { type: 'text', text: delta.text };
|
|
120
|
+
}
|
|
121
|
+
else if (deltaType === 'input_json_delta') {
|
|
122
|
+
const partial = delta.partial_json;
|
|
123
|
+
const block = toolBlocks.get(currentBlockIndex);
|
|
124
|
+
if (block) {
|
|
125
|
+
block.input += partial;
|
|
126
|
+
}
|
|
127
|
+
yield { type: 'tool_call_delta', text: partial };
|
|
128
|
+
}
|
|
129
|
+
else if (deltaType === 'thinking_delta') {
|
|
130
|
+
yield { type: 'thinking', text: delta.thinking };
|
|
131
|
+
}
|
|
132
|
+
break;
|
|
133
|
+
}
|
|
134
|
+
case 'content_block_stop': {
|
|
135
|
+
if (currentBlockType === 'thinking') {
|
|
136
|
+
yield { type: 'thinking', text: '\n' };
|
|
137
|
+
}
|
|
138
|
+
break;
|
|
139
|
+
}
|
|
140
|
+
case 'message_start': {
|
|
141
|
+
const message = data.message;
|
|
142
|
+
if (message?.usage) {
|
|
143
|
+
const usage = message.usage;
|
|
144
|
+
yield {
|
|
145
|
+
type: 'usage',
|
|
146
|
+
usage: {
|
|
147
|
+
inputTokens: usage.input_tokens,
|
|
148
|
+
outputTokens: usage.output_tokens,
|
|
149
|
+
},
|
|
150
|
+
};
|
|
151
|
+
}
|
|
152
|
+
break;
|
|
153
|
+
}
|
|
154
|
+
case 'message_delta': {
|
|
155
|
+
// Emit usage from message_delta
|
|
156
|
+
const deltaUsage = data.usage || {};
|
|
157
|
+
if (deltaUsage.output_tokens) {
|
|
158
|
+
yield {
|
|
159
|
+
type: 'usage',
|
|
160
|
+
usage: { outputTokens: deltaUsage.output_tokens },
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
// Message is ending — emit all accumulated tool calls
|
|
164
|
+
for (const [, block] of toolBlocks) {
|
|
165
|
+
yield {
|
|
166
|
+
type: 'tool_call_end',
|
|
167
|
+
toolCall: {
|
|
168
|
+
id: block.id,
|
|
169
|
+
type: 'function',
|
|
170
|
+
function: { name: block.name, arguments: block.input },
|
|
171
|
+
},
|
|
172
|
+
};
|
|
173
|
+
}
|
|
174
|
+
break;
|
|
175
|
+
}
|
|
176
|
+
case 'message_stop': {
|
|
177
|
+
yield { type: 'done' };
|
|
178
|
+
return;
|
|
179
|
+
}
|
|
180
|
+
case 'error': {
|
|
181
|
+
const error = data.error;
|
|
182
|
+
yield { type: 'error', error: `Anthropic: ${error?.message || 'Unknown error'}` };
|
|
183
|
+
return;
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
finally {
|
|
190
|
+
reader.releaseLock();
|
|
191
|
+
}
|
|
192
|
+
// Emit remaining tool calls if stream ended without message_delta
|
|
193
|
+
for (const [, block] of toolBlocks) {
|
|
194
|
+
yield {
|
|
195
|
+
type: 'tool_call_end',
|
|
196
|
+
toolCall: {
|
|
197
|
+
id: block.id,
|
|
198
|
+
type: 'function',
|
|
199
|
+
function: { name: block.name, arguments: block.input },
|
|
200
|
+
},
|
|
201
|
+
};
|
|
202
|
+
}
|
|
203
|
+
yield { type: 'done' };
|
|
204
|
+
}
|
|
205
|
+
convertMessages(messages) {
|
|
206
|
+
let systemPrompt = '';
|
|
207
|
+
const apiMessages = [];
|
|
208
|
+
for (const msg of messages) {
|
|
209
|
+
if (msg.role === 'system') {
|
|
210
|
+
systemPrompt += (systemPrompt ? '\n\n' : '') + msg.content;
|
|
211
|
+
continue;
|
|
212
|
+
}
|
|
213
|
+
if (msg.role === 'assistant') {
|
|
214
|
+
const content = [];
|
|
215
|
+
if (msg.content) {
|
|
216
|
+
content.push({ type: 'text', text: msg.content });
|
|
217
|
+
}
|
|
218
|
+
if (msg.tool_calls) {
|
|
219
|
+
for (const tc of msg.tool_calls) {
|
|
220
|
+
let input;
|
|
221
|
+
try {
|
|
222
|
+
input = JSON.parse(tc.function.arguments);
|
|
223
|
+
}
|
|
224
|
+
catch {
|
|
225
|
+
input = {};
|
|
226
|
+
}
|
|
227
|
+
content.push({
|
|
228
|
+
type: 'tool_use',
|
|
229
|
+
id: tc.id,
|
|
230
|
+
name: tc.function.name,
|
|
231
|
+
input,
|
|
232
|
+
});
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
apiMessages.push({
|
|
236
|
+
role: 'assistant',
|
|
237
|
+
content: content.length === 1 && content[0].type === 'text'
|
|
238
|
+
? content[0].text
|
|
239
|
+
: content,
|
|
240
|
+
});
|
|
241
|
+
continue;
|
|
242
|
+
}
|
|
243
|
+
if (msg.role === 'tool') {
|
|
244
|
+
// Tool results in Anthropic go as user messages with tool_result content
|
|
245
|
+
const lastMsg = apiMessages[apiMessages.length - 1];
|
|
246
|
+
if (lastMsg?.role === 'user' && Array.isArray(lastMsg.content)) {
|
|
247
|
+
lastMsg.content.push({
|
|
248
|
+
type: 'tool_result',
|
|
249
|
+
tool_use_id: msg.tool_call_id,
|
|
250
|
+
content: msg.content,
|
|
251
|
+
});
|
|
252
|
+
}
|
|
253
|
+
else {
|
|
254
|
+
apiMessages.push({
|
|
255
|
+
role: 'user',
|
|
256
|
+
content: [{
|
|
257
|
+
type: 'tool_result',
|
|
258
|
+
tool_use_id: msg.tool_call_id,
|
|
259
|
+
content: msg.content,
|
|
260
|
+
}],
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
continue;
|
|
264
|
+
}
|
|
265
|
+
// Regular user message
|
|
266
|
+
apiMessages.push({ role: 'user', content: msg.content });
|
|
267
|
+
}
|
|
268
|
+
// Anthropic requires alternating user/assistant. Merge consecutive same-role messages.
|
|
269
|
+
const merged = [];
|
|
270
|
+
for (const msg of apiMessages) {
|
|
271
|
+
const last = merged[merged.length - 1];
|
|
272
|
+
if (last && last.role === msg.role) {
|
|
273
|
+
// Merge content
|
|
274
|
+
const lastContent = typeof last.content === 'string' ? last.content : '';
|
|
275
|
+
const msgContent = typeof msg.content === 'string' ? msg.content : '';
|
|
276
|
+
if (typeof last.content === 'string' && typeof msg.content === 'string') {
|
|
277
|
+
last.content = lastContent + '\n' + msgContent;
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
else {
|
|
281
|
+
merged.push(msg);
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
return { systemPrompt, apiMessages: merged };
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
exports.AnthropicProvider = AnthropicProvider;
|
|
288
|
+
//# sourceMappingURL=anthropic.js.map
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
export { OpenAIProvider } from './openai';
|
|
2
|
+
export { AnthropicProvider } from './anthropic';
|
|
3
|
+
export { MODEL_REGISTRY, PROVIDER_DEFAULTS, getModelInfo, detectProvider } from './registry';
|
|
4
|
+
export type { ModelInfo } from './registry';
|
|
5
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.detectProvider = exports.getModelInfo = exports.PROVIDER_DEFAULTS = exports.MODEL_REGISTRY = exports.AnthropicProvider = exports.OpenAIProvider = void 0;
|
|
4
|
+
var openai_1 = require("./openai");
|
|
5
|
+
Object.defineProperty(exports, "OpenAIProvider", { enumerable: true, get: function () { return openai_1.OpenAIProvider; } });
|
|
6
|
+
var anthropic_1 = require("./anthropic");
|
|
7
|
+
Object.defineProperty(exports, "AnthropicProvider", { enumerable: true, get: function () { return anthropic_1.AnthropicProvider; } });
|
|
8
|
+
var registry_1 = require("./registry");
|
|
9
|
+
Object.defineProperty(exports, "MODEL_REGISTRY", { enumerable: true, get: function () { return registry_1.MODEL_REGISTRY; } });
|
|
10
|
+
Object.defineProperty(exports, "PROVIDER_DEFAULTS", { enumerable: true, get: function () { return registry_1.PROVIDER_DEFAULTS; } });
|
|
11
|
+
Object.defineProperty(exports, "getModelInfo", { enumerable: true, get: function () { return registry_1.getModelInfo; } });
|
|
12
|
+
Object.defineProperty(exports, "detectProvider", { enumerable: true, get: function () { return registry_1.detectProvider; } });
|
|
13
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { LLMProvider, Message, ToolSchema, StreamEvent, ProviderConfig } from '../types';
|
|
2
|
+
export declare class OpenAIProvider implements LLMProvider {
|
|
3
|
+
name: string;
|
|
4
|
+
private config;
|
|
5
|
+
private supportsTools;
|
|
6
|
+
constructor(config: ProviderConfig);
|
|
7
|
+
chat(messages: Message[], tools?: ToolSchema[]): AsyncGenerator<StreamEvent>;
|
|
8
|
+
listModels(): Promise<string[]>;
|
|
9
|
+
private formatMessage;
|
|
10
|
+
}
|
|
11
|
+
//# sourceMappingURL=openai.d.ts.map
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.OpenAIProvider = void 0;
|
|
4
|
+
const registry_1 = require("./registry");
|
|
5
|
+
class OpenAIProvider {
|
|
6
|
+
name;
|
|
7
|
+
config;
|
|
8
|
+
supportsTools;
|
|
9
|
+
constructor(config) {
|
|
10
|
+
this.config = config;
|
|
11
|
+
this.name = config.model;
|
|
12
|
+
this.supportsTools = (0, registry_1.getModelInfo)(config.model).supportsToolCalling;
|
|
13
|
+
}
|
|
14
|
+
async *chat(messages, tools) {
|
|
15
|
+
const body = {
|
|
16
|
+
model: this.config.model,
|
|
17
|
+
messages: messages.map(m => this.formatMessage(m)),
|
|
18
|
+
stream: true,
|
|
19
|
+
};
|
|
20
|
+
if (tools?.length && this.supportsTools) {
|
|
21
|
+
body.tools = tools;
|
|
22
|
+
}
|
|
23
|
+
const headers = {
|
|
24
|
+
'Content-Type': 'application/json',
|
|
25
|
+
};
|
|
26
|
+
if (this.config.apiKey) {
|
|
27
|
+
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
|
|
28
|
+
}
|
|
29
|
+
let response;
|
|
30
|
+
try {
|
|
31
|
+
response = await fetch(`${this.config.baseUrl}/v1/chat/completions`, {
|
|
32
|
+
method: 'POST',
|
|
33
|
+
headers,
|
|
34
|
+
body: JSON.stringify(body),
|
|
35
|
+
});
|
|
36
|
+
}
|
|
37
|
+
catch (err) {
|
|
38
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
39
|
+
yield { type: 'error', error: `Connection failed: ${msg}. Is your LLM server running?` };
|
|
40
|
+
return;
|
|
41
|
+
}
|
|
42
|
+
if (!response.ok) {
|
|
43
|
+
const text = await response.text();
|
|
44
|
+
yield { type: 'error', error: `LLM error ${response.status}: ${text}` };
|
|
45
|
+
return;
|
|
46
|
+
}
|
|
47
|
+
if (!response.body) {
|
|
48
|
+
yield { type: 'error', error: 'No response body from LLM' };
|
|
49
|
+
return;
|
|
50
|
+
}
|
|
51
|
+
const reader = response.body.getReader();
|
|
52
|
+
const decoder = new TextDecoder();
|
|
53
|
+
let buffer = '';
|
|
54
|
+
const toolCalls = new Map();
|
|
55
|
+
try {
|
|
56
|
+
while (true) {
|
|
57
|
+
const { done, value } = await reader.read();
|
|
58
|
+
if (done)
|
|
59
|
+
break;
|
|
60
|
+
buffer += decoder.decode(value, { stream: true });
|
|
61
|
+
const lines = buffer.split('\n');
|
|
62
|
+
buffer = lines.pop() || '';
|
|
63
|
+
for (const line of lines) {
|
|
64
|
+
const trimmed = line.trim();
|
|
65
|
+
if (!trimmed || !trimmed.startsWith('data: '))
|
|
66
|
+
continue;
|
|
67
|
+
if (trimmed === 'data: [DONE]') {
|
|
68
|
+
for (const [, tc] of toolCalls) {
|
|
69
|
+
yield {
|
|
70
|
+
type: 'tool_call_end',
|
|
71
|
+
toolCall: {
|
|
72
|
+
id: tc.id,
|
|
73
|
+
type: 'function',
|
|
74
|
+
function: { name: tc.name, arguments: tc.arguments },
|
|
75
|
+
},
|
|
76
|
+
};
|
|
77
|
+
}
|
|
78
|
+
yield { type: 'done' };
|
|
79
|
+
return;
|
|
80
|
+
}
|
|
81
|
+
try {
|
|
82
|
+
const data = JSON.parse(trimmed.slice(6));
|
|
83
|
+
const delta = data.choices?.[0]?.delta;
|
|
84
|
+
if (!delta)
|
|
85
|
+
continue;
|
|
86
|
+
if (delta.content) {
|
|
87
|
+
yield { type: 'text', text: delta.content };
|
|
88
|
+
}
|
|
89
|
+
if (delta.tool_calls) {
|
|
90
|
+
for (const tc of delta.tool_calls) {
|
|
91
|
+
const idx = tc.index ?? 0;
|
|
92
|
+
if (!toolCalls.has(idx)) {
|
|
93
|
+
toolCalls.set(idx, {
|
|
94
|
+
id: tc.id || `call_${idx}`,
|
|
95
|
+
name: tc.function?.name || '',
|
|
96
|
+
arguments: '',
|
|
97
|
+
});
|
|
98
|
+
if (tc.function?.name) {
|
|
99
|
+
yield {
|
|
100
|
+
type: 'tool_call_start',
|
|
101
|
+
toolCall: {
|
|
102
|
+
id: tc.id,
|
|
103
|
+
type: 'function',
|
|
104
|
+
function: { name: tc.function.name, arguments: '' },
|
|
105
|
+
},
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
const entry = toolCalls.get(idx);
|
|
110
|
+
if (tc.function?.name && !entry.name) {
|
|
111
|
+
entry.name = tc.function.name;
|
|
112
|
+
}
|
|
113
|
+
if (tc.function?.arguments) {
|
|
114
|
+
entry.arguments += tc.function.arguments;
|
|
115
|
+
yield { type: 'tool_call_delta', text: tc.function.arguments };
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
catch {
|
|
121
|
+
// Skip malformed SSE lines
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
finally {
|
|
127
|
+
reader.releaseLock();
|
|
128
|
+
}
|
|
129
|
+
// If we reach here without [DONE], emit remaining tool calls
|
|
130
|
+
for (const [, tc] of toolCalls) {
|
|
131
|
+
yield {
|
|
132
|
+
type: 'tool_call_end',
|
|
133
|
+
toolCall: {
|
|
134
|
+
id: tc.id,
|
|
135
|
+
type: 'function',
|
|
136
|
+
function: { name: tc.name, arguments: tc.arguments },
|
|
137
|
+
},
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
yield { type: 'done' };
|
|
141
|
+
}
|
|
142
|
+
async listModels() {
|
|
143
|
+
try {
|
|
144
|
+
const headers = {};
|
|
145
|
+
if (this.config.apiKey) {
|
|
146
|
+
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
|
|
147
|
+
}
|
|
148
|
+
const res = await fetch(`${this.config.baseUrl}/v1/models`, {
|
|
149
|
+
headers,
|
|
150
|
+
signal: AbortSignal.timeout(5000),
|
|
151
|
+
});
|
|
152
|
+
if (!res.ok)
|
|
153
|
+
return [];
|
|
154
|
+
const data = await res.json();
|
|
155
|
+
return (data.data || []).map(m => m.id);
|
|
156
|
+
}
|
|
157
|
+
catch {
|
|
158
|
+
return [];
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
formatMessage(msg) {
|
|
162
|
+
const formatted = { role: msg.role, content: msg.content };
|
|
163
|
+
if (msg.tool_calls)
|
|
164
|
+
formatted.tool_calls = msg.tool_calls;
|
|
165
|
+
if (msg.tool_call_id)
|
|
166
|
+
formatted.tool_call_id = msg.tool_call_id;
|
|
167
|
+
if (msg.name)
|
|
168
|
+
formatted.name = msg.name;
|
|
169
|
+
return formatted;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
exports.OpenAIProvider = OpenAIProvider;
|
|
173
|
+
//# sourceMappingURL=openai.js.map
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
export interface ModelInfo {
|
|
2
|
+
contextWindow: number;
|
|
3
|
+
supportsToolCalling: boolean;
|
|
4
|
+
provider?: string;
|
|
5
|
+
}
|
|
6
|
+
/** Default base URLs for cloud providers */
|
|
7
|
+
export declare const PROVIDER_DEFAULTS: Record<string, {
|
|
8
|
+
baseUrl: string;
|
|
9
|
+
envKey: string;
|
|
10
|
+
}>;
|
|
11
|
+
export declare const MODEL_REGISTRY: Record<string, ModelInfo>;
|
|
12
|
+
export declare function getModelInfo(model: string): ModelInfo;
|
|
13
|
+
/** Detect provider from model name */
|
|
14
|
+
export declare function detectProvider(model: string): string | undefined;
|
|
15
|
+
//# sourceMappingURL=registry.d.ts.map
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.MODEL_REGISTRY = exports.PROVIDER_DEFAULTS = void 0;
|
|
4
|
+
exports.getModelInfo = getModelInfo;
|
|
5
|
+
exports.detectProvider = detectProvider;
|
|
6
|
+
/** Default base URLs for cloud providers */
|
|
7
|
+
exports.PROVIDER_DEFAULTS = {
|
|
8
|
+
anthropic: { baseUrl: 'https://api.anthropic.com', envKey: 'ANTHROPIC_API_KEY' },
|
|
9
|
+
openai: { baseUrl: 'https://api.openai.com', envKey: 'OPENAI_API_KEY' },
|
|
10
|
+
gemini: { baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai', envKey: 'GEMINI_API_KEY' },
|
|
11
|
+
deepseek: { baseUrl: 'https://api.deepseek.com', envKey: 'DEEPSEEK_API_KEY' },
|
|
12
|
+
groq: { baseUrl: 'https://api.groq.com/openai', envKey: 'GROQ_API_KEY' },
|
|
13
|
+
mistral: { baseUrl: 'https://api.mistral.ai', envKey: 'MISTRAL_API_KEY' },
|
|
14
|
+
xai: { baseUrl: 'https://api.x.ai', envKey: 'XAI_API_KEY' },
|
|
15
|
+
};
|
|
16
|
+
exports.MODEL_REGISTRY = {
|
|
17
|
+
// ── Ollama / Local Models ──────────────────────────────────────────────────
|
|
18
|
+
'qwen2.5-coder:32b': { contextWindow: 32768, supportsToolCalling: true },
|
|
19
|
+
'qwen2.5-coder:14b': { contextWindow: 32768, supportsToolCalling: true },
|
|
20
|
+
'qwen2.5-coder:7b': { contextWindow: 32768, supportsToolCalling: true },
|
|
21
|
+
'qwen2.5-coder:3b': { contextWindow: 32768, supportsToolCalling: true },
|
|
22
|
+
'qwen3:32b': { contextWindow: 32768, supportsToolCalling: true },
|
|
23
|
+
'qwen3:14b': { contextWindow: 32768, supportsToolCalling: true },
|
|
24
|
+
'qwen3:8b': { contextWindow: 32768, supportsToolCalling: true },
|
|
25
|
+
'deepseek-coder-v2:16b': { contextWindow: 16384, supportsToolCalling: true },
|
|
26
|
+
'deepseek-coder:33b': { contextWindow: 16384, supportsToolCalling: false },
|
|
27
|
+
'codellama:34b': { contextWindow: 16384, supportsToolCalling: false },
|
|
28
|
+
'llama3.1:70b': { contextWindow: 131072, supportsToolCalling: true },
|
|
29
|
+
'llama3.1:8b': { contextWindow: 131072, supportsToolCalling: true },
|
|
30
|
+
'llama3.2:3b': { contextWindow: 131072, supportsToolCalling: true },
|
|
31
|
+
'llama3.3:70b': { contextWindow: 131072, supportsToolCalling: true },
|
|
32
|
+
'mistral:7b': { contextWindow: 32768, supportsToolCalling: true },
|
|
33
|
+
'mixtral:8x7b': { contextWindow: 32768, supportsToolCalling: true },
|
|
34
|
+
'phi-3:14b': { contextWindow: 4096, supportsToolCalling: false },
|
|
35
|
+
'phi-4:14b': { contextWindow: 16384, supportsToolCalling: true },
|
|
36
|
+
'starcoder2:15b': { contextWindow: 16384, supportsToolCalling: false },
|
|
37
|
+
'granite-code:34b': { contextWindow: 8192, supportsToolCalling: false },
|
|
38
|
+
'gemma2:27b': { contextWindow: 8192, supportsToolCalling: true },
|
|
39
|
+
'command-r:35b': { contextWindow: 131072, supportsToolCalling: true },
|
|
40
|
+
// ── Anthropic / Claude ─────────────────────────────────────────────────────
|
|
41
|
+
'claude-opus-4-6': { contextWindow: 200000, supportsToolCalling: true, provider: 'anthropic' },
|
|
42
|
+
'claude-sonnet-4-6': { contextWindow: 200000, supportsToolCalling: true, provider: 'anthropic' },
|
|
43
|
+
'claude-sonnet-4-20250514': { contextWindow: 200000, supportsToolCalling: true, provider: 'anthropic' },
|
|
44
|
+
'claude-haiku-4-5-20251001': { contextWindow: 200000, supportsToolCalling: true, provider: 'anthropic' },
|
|
45
|
+
'claude-3-5-sonnet-20241022': { contextWindow: 200000, supportsToolCalling: true, provider: 'anthropic' },
|
|
46
|
+
'claude-3-5-haiku-20241022': { contextWindow: 200000, supportsToolCalling: true, provider: 'anthropic' },
|
|
47
|
+
// ── OpenAI ─────────────────────────────────────────────────────────────────
|
|
48
|
+
'gpt-4o': { contextWindow: 128000, supportsToolCalling: true, provider: 'openai' },
|
|
49
|
+
'gpt-4o-mini': { contextWindow: 128000, supportsToolCalling: true, provider: 'openai' },
|
|
50
|
+
'gpt-4-turbo': { contextWindow: 128000, supportsToolCalling: true, provider: 'openai' },
|
|
51
|
+
'gpt-4.1': { contextWindow: 1047576, supportsToolCalling: true, provider: 'openai' },
|
|
52
|
+
'gpt-4.1-mini': { contextWindow: 1047576, supportsToolCalling: true, provider: 'openai' },
|
|
53
|
+
'gpt-4.1-nano': { contextWindow: 1047576, supportsToolCalling: true, provider: 'openai' },
|
|
54
|
+
'o1': { contextWindow: 200000, supportsToolCalling: true, provider: 'openai' },
|
|
55
|
+
'o1-mini': { contextWindow: 128000, supportsToolCalling: true, provider: 'openai' },
|
|
56
|
+
'o3': { contextWindow: 200000, supportsToolCalling: true, provider: 'openai' },
|
|
57
|
+
'o3-mini': { contextWindow: 200000, supportsToolCalling: true, provider: 'openai' },
|
|
58
|
+
'o4-mini': { contextWindow: 200000, supportsToolCalling: true, provider: 'openai' },
|
|
59
|
+
// ── Google Gemini (OpenAI-compatible endpoint) ─────────────────────────────
|
|
60
|
+
'gemini-2.5-pro': { contextWindow: 1048576, supportsToolCalling: true, provider: 'gemini' },
|
|
61
|
+
'gemini-2.5-flash': { contextWindow: 1048576, supportsToolCalling: true, provider: 'gemini' },
|
|
62
|
+
'gemini-2.0-flash': { contextWindow: 1048576, supportsToolCalling: true, provider: 'gemini' },
|
|
63
|
+
'gemini-1.5-pro': { contextWindow: 2097152, supportsToolCalling: true, provider: 'gemini' },
|
|
64
|
+
'gemini-1.5-flash': { contextWindow: 1048576, supportsToolCalling: true, provider: 'gemini' },
|
|
65
|
+
// ── DeepSeek (OpenAI-compatible) ───────────────────────────────────────────
|
|
66
|
+
'deepseek-chat': { contextWindow: 65536, supportsToolCalling: true, provider: 'deepseek' },
|
|
67
|
+
'deepseek-reasoner': { contextWindow: 65536, supportsToolCalling: true, provider: 'deepseek' },
|
|
68
|
+
// ── Groq (OpenAI-compatible, fast inference) ───────────────────────────────
|
|
69
|
+
'llama-3.3-70b-versatile': { contextWindow: 131072, supportsToolCalling: true, provider: 'groq' },
|
|
70
|
+
'llama-3.1-8b-instant': { contextWindow: 131072, supportsToolCalling: true, provider: 'groq' },
|
|
71
|
+
'mixtral-8x7b-32768': { contextWindow: 32768, supportsToolCalling: true, provider: 'groq' },
|
|
72
|
+
'gemma2-9b-it': { contextWindow: 8192, supportsToolCalling: true, provider: 'groq' },
|
|
73
|
+
// ── Mistral (OpenAI-compatible) ────────────────────────────────────────────
|
|
74
|
+
'mistral-large-latest': { contextWindow: 131072, supportsToolCalling: true, provider: 'mistral' },
|
|
75
|
+
'mistral-small-latest': { contextWindow: 131072, supportsToolCalling: true, provider: 'mistral' },
|
|
76
|
+
'codestral-latest': { contextWindow: 32768, supportsToolCalling: true, provider: 'mistral' },
|
|
77
|
+
// ── xAI / Grok (OpenAI-compatible) ─────────────────────────────────────────
|
|
78
|
+
'grok-3': { contextWindow: 131072, supportsToolCalling: true, provider: 'xai' },
|
|
79
|
+
'grok-3-mini': { contextWindow: 131072, supportsToolCalling: true, provider: 'xai' },
|
|
80
|
+
};
|
|
81
|
+
function getModelInfo(model) {
|
|
82
|
+
if (exports.MODEL_REGISTRY[model])
|
|
83
|
+
return exports.MODEL_REGISTRY[model];
|
|
84
|
+
// Prefix match (e.g., "qwen2.5-coder" matches "qwen2.5-coder:32b")
|
|
85
|
+
for (const [key, info] of Object.entries(exports.MODEL_REGISTRY)) {
|
|
86
|
+
if (key.startsWith(model) || model.startsWith(key.split(':')[0])) {
|
|
87
|
+
return info;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
// Default: conservative 8K context, no native tool calling
|
|
91
|
+
return { contextWindow: 8192, supportsToolCalling: false };
|
|
92
|
+
}
|
|
93
|
+
/** Detect provider from model name */
|
|
94
|
+
function detectProvider(model) {
|
|
95
|
+
const info = getModelInfo(model);
|
|
96
|
+
if (info.provider)
|
|
97
|
+
return info.provider;
|
|
98
|
+
// Heuristic detection from model name prefixes
|
|
99
|
+
if (model.startsWith('claude'))
|
|
100
|
+
return 'anthropic';
|
|
101
|
+
if (model.startsWith('gpt-') || model.startsWith('o1') || model.startsWith('o3') || model.startsWith('o4'))
|
|
102
|
+
return 'openai';
|
|
103
|
+
if (model.startsWith('gemini'))
|
|
104
|
+
return 'gemini';
|
|
105
|
+
if (model.startsWith('deepseek'))
|
|
106
|
+
return 'deepseek';
|
|
107
|
+
if (model.startsWith('grok'))
|
|
108
|
+
return 'xai';
|
|
109
|
+
if (model.startsWith('mistral') || model.startsWith('codestral'))
|
|
110
|
+
return 'mistral';
|
|
111
|
+
if (model.includes('groq') || model.startsWith('llama-'))
|
|
112
|
+
return 'groq';
|
|
113
|
+
return undefined; // local/ollama
|
|
114
|
+
}
|
|
115
|
+
//# sourceMappingURL=registry.js.map
|
package/dist/setup.d.ts
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
export interface SavedConfig {
|
|
2
|
+
model?: string;
|
|
3
|
+
provider?: string;
|
|
4
|
+
baseUrl?: string;
|
|
5
|
+
apiKey?: string;
|
|
6
|
+
autoApprove?: boolean;
|
|
7
|
+
maxIterations?: number;
|
|
8
|
+
}
|
|
9
|
+
/** Load saved config from ~/.codebot/config.json */
|
|
10
|
+
export declare function loadConfig(): SavedConfig;
|
|
11
|
+
/** Save config to ~/.codebot/config.json */
|
|
12
|
+
export declare function saveConfig(config: SavedConfig): void;
|
|
13
|
+
/** Check if this is the first run (no config, no env keys) */
|
|
14
|
+
export declare function isFirstRun(): boolean;
|
|
15
|
+
/** Interactive setup wizard */
|
|
16
|
+
export declare function runSetup(): Promise<SavedConfig>;
|
|
17
|
+
//# sourceMappingURL=setup.d.ts.map
|