nex-code 0.3.5 → 0.3.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,333 +0,0 @@
1
- /**
2
- * cli/providers/anthropic.js — Anthropic Claude Provider
3
- * Supports Claude Sonnet, Opus, Haiku via Anthropic Messages API with SSE streaming.
4
- */
5
-
6
- const axios = require('axios');
7
- const { BaseProvider } = require('./base');
8
-
9
- const ANTHROPIC_MODELS = {
10
- 'claude-sonnet': {
11
- id: 'claude-sonnet-4-6',
12
- name: 'Claude Sonnet 4.6',
13
- maxTokens: 64000,
14
- contextWindow: 200000,
15
- },
16
- 'claude-opus': {
17
- id: 'claude-opus-4-6',
18
- name: 'Claude Opus 4.6',
19
- maxTokens: 128000,
20
- contextWindow: 200000,
21
- },
22
- 'claude-haiku': {
23
- id: 'claude-haiku-4-5-20251001',
24
- name: 'Claude Haiku 4.5',
25
- maxTokens: 64000,
26
- contextWindow: 200000,
27
- },
28
- 'claude-sonnet-4-5': {
29
- id: 'claude-sonnet-4-5-20250929',
30
- name: 'Claude Sonnet 4.5',
31
- maxTokens: 64000,
32
- contextWindow: 200000,
33
- },
34
- 'claude-sonnet-4': {
35
- id: 'claude-sonnet-4-20250514',
36
- name: 'Claude Sonnet 4',
37
- maxTokens: 64000,
38
- contextWindow: 200000,
39
- },
40
- };
41
-
42
- const ANTHROPIC_VERSION = '2023-06-01';
43
-
44
- class AnthropicProvider extends BaseProvider {
45
- constructor(config = {}) {
46
- super({
47
- name: 'anthropic',
48
- baseUrl: config.baseUrl || 'https://api.anthropic.com/v1',
49
- models: config.models || ANTHROPIC_MODELS,
50
- defaultModel: config.defaultModel || 'claude-sonnet',
51
- ...config,
52
- });
53
- this.timeout = config.timeout || 180000;
54
- this.temperature = config.temperature ?? 0.2;
55
- this.apiVersion = config.apiVersion || ANTHROPIC_VERSION;
56
- }
57
-
58
- isConfigured() {
59
- return !!this.getApiKey();
60
- }
61
-
62
- getApiKey() {
63
- return process.env.ANTHROPIC_API_KEY || null;
64
- }
65
-
66
- _getHeaders() {
67
- const key = this.getApiKey();
68
- if (!key) throw new Error('ANTHROPIC_API_KEY not set');
69
- return {
70
- 'x-api-key': key,
71
- 'anthropic-version': this.apiVersion,
72
- 'Content-Type': 'application/json',
73
- };
74
- }
75
-
76
- /**
77
- * Convert normalized messages to Anthropic format.
78
- * Anthropic uses separate system parameter and different tool_use/tool_result blocks.
79
- */
80
- formatMessages(messages) {
81
- let system = '';
82
- const formatted = [];
83
-
84
- for (const msg of messages) {
85
- if (msg.role === 'system') {
86
- system += (system ? '\n\n' : '') + msg.content;
87
- continue;
88
- }
89
-
90
- if (msg.role === 'assistant') {
91
- const content = [];
92
- if (msg.content) {
93
- content.push({ type: 'text', text: msg.content });
94
- }
95
- if (msg.tool_calls) {
96
- for (const tc of msg.tool_calls) {
97
- content.push({
98
- type: 'tool_use',
99
- id: tc.id || `toolu-${Date.now()}`,
100
- name: tc.function.name,
101
- input:
102
- typeof tc.function.arguments === 'string'
103
- ? JSON.parse(tc.function.arguments || '{}')
104
- : tc.function.arguments || {},
105
- });
106
- }
107
- }
108
- formatted.push({ role: 'assistant', content: content.length > 0 ? content : [{ type: 'text', text: '' }] });
109
- continue;
110
- }
111
-
112
- if (msg.role === 'tool') {
113
- // Anthropic tool results are sent as user messages with tool_result content blocks
114
- // Merge consecutive tool results into one user message
115
- const last = formatted[formatted.length - 1];
116
- const toolResult = {
117
- type: 'tool_result',
118
- tool_use_id: msg.tool_call_id,
119
- content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
120
- };
121
-
122
- if (last && last.role === 'user' && Array.isArray(last.content) && last.content[0]?.type === 'tool_result') {
123
- last.content.push(toolResult);
124
- } else {
125
- formatted.push({ role: 'user', content: [toolResult] });
126
- }
127
- continue;
128
- }
129
-
130
- // user messages
131
- formatted.push({ role: 'user', content: msg.content });
132
- }
133
-
134
- return { messages: formatted, system };
135
- }
136
-
137
- /**
138
- * Convert OpenAI/Ollama tool format to Anthropic tool format
139
- */
140
- formatTools(tools) {
141
- if (!tools || tools.length === 0) return [];
142
- return tools.map((t) => ({
143
- name: t.function.name,
144
- description: t.function.description || '',
145
- input_schema: t.function.parameters || { type: 'object', properties: {} },
146
- }));
147
- }
148
-
149
- _resolveModelId(model) {
150
- const info = this.getModel(model);
151
- return info?.id || model;
152
- }
153
-
154
- async chat(messages, tools, options = {}) {
155
- const model = options.model || this.defaultModel;
156
- const modelId = this._resolveModelId(model);
157
- const modelInfo = this.getModel(model);
158
- const maxTokens = options.maxTokens || modelInfo?.maxTokens || 8192;
159
- const { messages: formatted, system } = this.formatMessages(messages);
160
-
161
- const body = {
162
- model: modelId,
163
- messages: formatted,
164
- max_tokens: maxTokens,
165
- temperature: options.temperature ?? this.temperature,
166
- };
167
-
168
- if (system) body.system = system;
169
- const formattedTools = this.formatTools(tools);
170
- if (formattedTools.length > 0) body.tools = formattedTools;
171
-
172
- const response = await axios.post(`${this.baseUrl}/messages`, body, {
173
- timeout: options.timeout || this.timeout,
174
- headers: this._getHeaders(),
175
- });
176
-
177
- return this.normalizeResponse(response.data);
178
- }
179
-
180
- async stream(messages, tools, options = {}) {
181
- const model = options.model || this.defaultModel;
182
- const modelId = this._resolveModelId(model);
183
- const modelInfo = this.getModel(model);
184
- const maxTokens = options.maxTokens || modelInfo?.maxTokens || 8192;
185
- const onToken = options.onToken || (() => {});
186
- const { messages: formatted, system } = this.formatMessages(messages);
187
-
188
- const body = {
189
- model: modelId,
190
- messages: formatted,
191
- max_tokens: maxTokens,
192
- temperature: options.temperature ?? this.temperature,
193
- stream: true,
194
- };
195
-
196
- if (system) body.system = system;
197
- const formattedTools = this.formatTools(tools);
198
- if (formattedTools.length > 0) body.tools = formattedTools;
199
-
200
- let response;
201
- try {
202
- response = await axios.post(`${this.baseUrl}/messages`, body, {
203
- timeout: options.timeout || this.timeout,
204
- headers: this._getHeaders(),
205
- responseType: 'stream',
206
- signal: options.signal,
207
- });
208
- } catch (err) {
209
- if (err.name === 'CanceledError' || err.name === 'AbortError' || err.code === 'ERR_CANCELED') throw err;
210
- const msg = err.response?.data?.error?.message || err.message;
211
- throw new Error(`API Error: ${msg}`);
212
- }
213
-
214
- return new Promise((resolve, reject) => {
215
- let content = '';
216
- const toolUses = []; // { id, name, inputJson }
217
- let currentToolIndex = -1;
218
- let buffer = '';
219
-
220
- // Abort listener: destroy stream on signal
221
- if (options.signal) {
222
- options.signal.addEventListener('abort', () => {
223
- response.data.destroy();
224
- reject(new DOMException('The operation was aborted', 'AbortError'));
225
- }, { once: true });
226
- }
227
-
228
- response.data.on('data', (chunk) => {
229
- buffer += chunk.toString();
230
- const lines = buffer.split('\n');
231
- buffer = lines.pop() || '';
232
-
233
- for (const line of lines) {
234
- const trimmed = line.trim();
235
-
236
- if (trimmed.startsWith('data: ')) {
237
- const data = trimmed.slice(6);
238
- let parsed;
239
- try {
240
- parsed = JSON.parse(data);
241
- } catch {
242
- continue;
243
- }
244
-
245
- switch (parsed.type) {
246
- case 'content_block_start': {
247
- const block = parsed.content_block;
248
- if (block?.type === 'tool_use') {
249
- currentToolIndex = toolUses.length;
250
- toolUses.push({ id: block.id, name: block.name, inputJson: '' });
251
- }
252
- break;
253
- }
254
-
255
- case 'content_block_delta': {
256
- const delta = parsed.delta;
257
- if (delta?.type === 'text_delta' && delta.text) {
258
- onToken(delta.text);
259
- content += delta.text;
260
- }
261
- if (delta?.type === 'input_json_delta' && delta.partial_json !== undefined) {
262
- if (currentToolIndex >= 0) {
263
- toolUses[currentToolIndex].inputJson += delta.partial_json;
264
- }
265
- }
266
- break;
267
- }
268
-
269
- case 'content_block_stop':
270
- currentToolIndex = -1;
271
- break;
272
-
273
- case 'message_stop':
274
- resolve({ content, tool_calls: this._buildToolCalls(toolUses) });
275
- return;
276
- }
277
- }
278
- }
279
- });
280
-
281
- response.data.on('error', (err) => {
282
- if (options.signal?.aborted) return; // Ignore errors after abort
283
- reject(new Error(`Stream error: ${err.message}`));
284
- });
285
-
286
- response.data.on('end', () => {
287
- resolve({ content, tool_calls: this._buildToolCalls(toolUses) });
288
- });
289
- });
290
- }
291
-
292
- normalizeResponse(data) {
293
- let content = '';
294
- const toolCalls = [];
295
-
296
- for (const block of data.content || []) {
297
- if (block.type === 'text') {
298
- content += block.text;
299
- } else if (block.type === 'tool_use') {
300
- toolCalls.push({
301
- id: block.id,
302
- function: {
303
- name: block.name,
304
- arguments: block.input,
305
- },
306
- });
307
- }
308
- }
309
-
310
- return { content, tool_calls: toolCalls };
311
- }
312
-
313
- _buildToolCalls(toolUses) {
314
- return toolUses
315
- .filter((tu) => tu.name)
316
- .map((tu) => {
317
- let args = {};
318
- if (tu.inputJson) {
319
- try {
320
- args = JSON.parse(tu.inputJson);
321
- } catch {
322
- args = tu.inputJson;
323
- }
324
- }
325
- return {
326
- id: tu.id || `anthropic-${Date.now()}`,
327
- function: { name: tu.name, arguments: args },
328
- };
329
- });
330
- }
331
- }
332
-
333
- module.exports = { AnthropicProvider, ANTHROPIC_MODELS };
@@ -1,116 +0,0 @@
1
- /**
2
- * cli/providers/base.js — Abstract Provider Interface
3
- * All providers extend this base class.
4
- */
5
-
6
- class BaseProvider {
7
- /**
8
- * @param {object} config
9
- * @param {string} config.name - Provider name (e.g. 'ollama', 'openai')
10
- * @param {string} [config.baseUrl] - API base URL
11
- * @param {object} [config.models] - Available models { id: { name, maxTokens, contextWindow } }
12
- */
13
- constructor(config = {}) {
14
- if (new.target === BaseProvider) {
15
- throw new Error('BaseProvider is abstract — use a concrete provider');
16
- }
17
- this.name = config.name || 'unknown';
18
- this.baseUrl = config.baseUrl || '';
19
- this.models = config.models || {};
20
- this.defaultModel = config.defaultModel || null;
21
- }
22
-
23
- /**
24
- * Check if the provider is configured (API key set, etc.)
25
- * @returns {boolean}
26
- */
27
- isConfigured() {
28
- throw new Error(`${this.name}: isConfigured() not implemented`);
29
- }
30
-
31
- /**
32
- * Get the API key for this provider
33
- * @returns {string|null}
34
- */
35
- getApiKey() {
36
- return null;
37
- }
38
-
39
- /**
40
- * Get available models for this provider
41
- * @returns {object} { modelId: { id, name, maxTokens, contextWindow } }
42
- */
43
- getModels() {
44
- return this.models;
45
- }
46
-
47
- /**
48
- * Get model names
49
- * @returns {string[]}
50
- */
51
- getModelNames() {
52
- return Object.keys(this.models);
53
- }
54
-
55
- /**
56
- * Get model info by id
57
- * @param {string} modelId
58
- * @returns {object|null}
59
- */
60
- getModel(modelId) {
61
- return this.models[modelId] || null;
62
- }
63
-
64
- /**
65
- * Non-streaming chat call.
66
- * @param {Array} messages - Normalized messages [{ role, content, tool_calls?, tool_call_id? }]
67
- * @param {Array} tools - Tool definitions (OpenAI/Ollama format)
68
- * @param {object} [options] - { model, temperature, maxTokens }
69
- * @returns {Promise<{content: string, tool_calls: Array}>}
70
- */
71
- async chat(messages, tools, options = {}) {
72
- throw new Error(`${this.name}: chat() not implemented`);
73
- }
74
-
75
- /**
76
- * Streaming chat call.
77
- * @param {Array} messages - Normalized messages
78
- * @param {Array} tools - Tool definitions
79
- * @param {object} [options] - { model, temperature, maxTokens, onToken: (text) => void }
80
- * @returns {Promise<{content: string, tool_calls: Array}>}
81
- */
82
- async stream(messages, tools, options = {}) {
83
- throw new Error(`${this.name}: stream() not implemented`);
84
- }
85
-
86
- /**
87
- * Convert normalized messages to provider-specific format.
88
- * Override in subclasses if the provider uses a different format.
89
- * @param {Array} messages
90
- * @returns {object} { messages, system? } - provider-specific format
91
- */
92
- formatMessages(messages) {
93
- return { messages };
94
- }
95
-
96
- /**
97
- * Convert tool definitions to provider-specific format.
98
- * Override if provider uses a different tool format.
99
- * @param {Array} tools - OpenAI/Ollama format tools
100
- * @returns {Array}
101
- */
102
- formatTools(tools) {
103
- return tools;
104
- }
105
-
106
- /**
107
- * Normalize provider response to standard format.
108
- * @param {object} raw - Raw provider response
109
- * @returns {{content: string, tool_calls: Array}}
110
- */
111
- normalizeResponse(raw) {
112
- throw new Error(`${this.name}: normalizeResponse() not implemented`);
113
- }
114
- }
115
-
116
- module.exports = { BaseProvider };
@@ -1,239 +0,0 @@
1
- /**
2
- * cli/providers/gemini.js — Google Gemini Provider
3
- * Supports Gemini 3.x Preview, 2.5 Pro/Flash/Lite, 2.0 Flash (deprecated) via
4
- * Google's OpenAI-compatible endpoint with SSE streaming.
5
- */
6
-
7
- const axios = require('axios');
8
- const { BaseProvider } = require('./base');
9
-
10
- const GEMINI_MODELS = {
11
- // Preview — Gemini 3.x (latest)
12
- 'gemini-3.1-pro-preview': { id: 'gemini-3.1-pro-preview', name: 'Gemini 3.1 Pro Preview', maxTokens: 65536, contextWindow: 1048576 },
13
- 'gemini-3-flash-preview': { id: 'gemini-3-flash-preview', name: 'Gemini 3 Flash Preview', maxTokens: 65536, contextWindow: 1048576 },
14
- // Stable — Gemini 2.5 (GA)
15
- 'gemini-2.5-pro': { id: 'gemini-2.5-pro', name: 'Gemini 2.5 Pro', maxTokens: 65536, contextWindow: 1048576 },
16
- 'gemini-2.5-flash': { id: 'gemini-2.5-flash', name: 'Gemini 2.5 Flash', maxTokens: 65536, contextWindow: 1048576 },
17
- 'gemini-2.5-flash-lite': { id: 'gemini-2.5-flash-lite', name: 'Gemini 2.5 Flash Lite', maxTokens: 65536, contextWindow: 1048576 },
18
- // Deprecated — retiring June 1, 2026
19
- 'gemini-2.0-flash': { id: 'gemini-2.0-flash', name: 'Gemini 2.0 Flash', maxTokens: 8192, contextWindow: 1048576 },
20
- 'gemini-2.0-flash-lite': { id: 'gemini-2.0-flash-lite', name: 'Gemini 2.0 Flash Lite', maxTokens: 8192, contextWindow: 1048576 },
21
- };
22
-
23
- class GeminiProvider extends BaseProvider {
24
- constructor(config = {}) {
25
- super({
26
- name: 'gemini',
27
- baseUrl: config.baseUrl || 'https://generativelanguage.googleapis.com/v1beta/openai',
28
- models: config.models || GEMINI_MODELS,
29
- defaultModel: config.defaultModel || 'gemini-2.5-flash',
30
- ...config,
31
- });
32
- this.timeout = config.timeout || 180000;
33
- this.temperature = config.temperature ?? 0.2;
34
- }
35
-
36
- isConfigured() {
37
- return !!this.getApiKey();
38
- }
39
-
40
- getApiKey() {
41
- return process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY || null;
42
- }
43
-
44
- _getHeaders() {
45
- const key = this.getApiKey();
46
- if (!key) throw new Error('GEMINI_API_KEY not set');
47
- return {
48
- Authorization: `Bearer ${key}`,
49
- 'Content-Type': 'application/json',
50
- };
51
- }
52
-
53
- formatMessages(messages) {
54
- return {
55
- messages: messages.map((msg) => {
56
- if (msg.role === 'assistant' && msg.tool_calls) {
57
- return {
58
- role: 'assistant',
59
- content: msg.content || null,
60
- tool_calls: msg.tool_calls.map((tc) => ({
61
- id: tc.id || `call-${Date.now()}`,
62
- type: 'function',
63
- function: {
64
- name: tc.function.name,
65
- arguments:
66
- typeof tc.function.arguments === 'string'
67
- ? tc.function.arguments
68
- : JSON.stringify(tc.function.arguments),
69
- },
70
- })),
71
- };
72
- }
73
- if (msg.role === 'tool') {
74
- return {
75
- role: 'tool',
76
- content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
77
- tool_call_id: msg.tool_call_id,
78
- };
79
- }
80
- return { role: msg.role, content: msg.content };
81
- }),
82
- };
83
- }
84
-
85
- async chat(messages, tools, options = {}) {
86
- const model = options.model || this.defaultModel;
87
- const modelInfo = this.getModel(model);
88
- const maxTokens = options.maxTokens || modelInfo?.maxTokens || 8192;
89
- const { messages: formatted } = this.formatMessages(messages);
90
-
91
- const body = {
92
- model,
93
- messages: formatted,
94
- max_tokens: maxTokens,
95
- temperature: options.temperature ?? this.temperature,
96
- };
97
-
98
- if (tools && tools.length > 0) {
99
- body.tools = tools;
100
- }
101
-
102
- const response = await axios.post(`${this.baseUrl}/chat/completions`, body, {
103
- timeout: options.timeout || this.timeout,
104
- headers: this._getHeaders(),
105
- });
106
-
107
- return this.normalizeResponse(response.data);
108
- }
109
-
110
- async stream(messages, tools, options = {}) {
111
- const model = options.model || this.defaultModel;
112
- const modelInfo = this.getModel(model);
113
- const maxTokens = options.maxTokens || modelInfo?.maxTokens || 8192;
114
- const onToken = options.onToken || (() => {});
115
- const { messages: formatted } = this.formatMessages(messages);
116
-
117
- const body = {
118
- model,
119
- messages: formatted,
120
- max_tokens: maxTokens,
121
- temperature: options.temperature ?? this.temperature,
122
- stream: true,
123
- };
124
-
125
- if (tools && tools.length > 0) {
126
- body.tools = tools;
127
- }
128
-
129
- let response;
130
- try {
131
- response = await axios.post(`${this.baseUrl}/chat/completions`, body, {
132
- timeout: options.timeout || this.timeout,
133
- headers: this._getHeaders(),
134
- responseType: 'stream',
135
- signal: options.signal,
136
- });
137
- } catch (err) {
138
- if (err.name === 'CanceledError' || err.name === 'AbortError' || err.code === 'ERR_CANCELED') throw err;
139
- const msg = err.response?.data?.error?.message || err.message;
140
- throw new Error(`API Error: ${msg}`);
141
- }
142
-
143
- return new Promise((resolve, reject) => {
144
- let content = '';
145
- const toolCallsMap = {}; // index -> { id, name, arguments }
146
- let buffer = '';
147
-
148
- // Abort listener: destroy stream on signal
149
- if (options.signal) {
150
- options.signal.addEventListener('abort', () => {
151
- response.data.destroy();
152
- reject(new DOMException('The operation was aborted', 'AbortError'));
153
- }, { once: true });
154
- }
155
-
156
- response.data.on('data', (chunk) => {
157
- buffer += chunk.toString();
158
- const lines = buffer.split('\n');
159
- buffer = lines.pop() || '';
160
-
161
- for (const line of lines) {
162
- const trimmed = line.trim();
163
- if (!trimmed || !trimmed.startsWith('data: ')) continue;
164
- const data = trimmed.slice(6);
165
- if (data === '[DONE]') {
166
- resolve({ content, tool_calls: this._buildToolCalls(toolCallsMap) });
167
- return;
168
- }
169
-
170
- let parsed;
171
- try {
172
- parsed = JSON.parse(data);
173
- } catch {
174
- continue;
175
- }
176
-
177
- const delta = parsed.choices?.[0]?.delta;
178
- if (!delta) continue;
179
-
180
- if (delta.content) {
181
- onToken(delta.content);
182
- content += delta.content;
183
- }
184
-
185
- if (delta.tool_calls) {
186
- for (const tc of delta.tool_calls) {
187
- const idx = tc.index ?? 0;
188
- if (!toolCallsMap[idx]) {
189
- toolCallsMap[idx] = { id: tc.id || '', name: '', arguments: '' };
190
- }
191
- if (tc.id) toolCallsMap[idx].id = tc.id;
192
- if (tc.function?.name) toolCallsMap[idx].name += tc.function.name;
193
- if (tc.function?.arguments) toolCallsMap[idx].arguments += tc.function.arguments;
194
- }
195
- }
196
- }
197
- });
198
-
199
- response.data.on('error', (err) => {
200
- if (options.signal?.aborted) return; // Ignore errors after abort
201
- reject(new Error(`Stream error: ${err.message}`));
202
- });
203
-
204
- response.data.on('end', () => {
205
- resolve({ content, tool_calls: this._buildToolCalls(toolCallsMap) });
206
- });
207
- });
208
- }
209
-
210
- normalizeResponse(data) {
211
- const choice = data.choices?.[0]?.message || {};
212
- const toolCalls = (choice.tool_calls || []).map((tc) => ({
213
- id: tc.id,
214
- function: {
215
- name: tc.function.name,
216
- arguments: tc.function.arguments,
217
- },
218
- }));
219
-
220
- return {
221
- content: choice.content || '',
222
- tool_calls: toolCalls,
223
- };
224
- }
225
-
226
- _buildToolCalls(toolCallsMap) {
227
- return Object.values(toolCallsMap)
228
- .filter((tc) => tc.name)
229
- .map((tc) => ({
230
- id: tc.id || `gemini-${Date.now()}`,
231
- function: {
232
- name: tc.name,
233
- arguments: tc.arguments,
234
- },
235
- }));
236
- }
237
- }
238
-
239
- module.exports = { GeminiProvider, GEMINI_MODELS };