neoagent 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/.env.example +28 -0
  2. package/LICENSE +21 -0
  3. package/README.md +42 -0
  4. package/bin/neoagent.js +8 -0
  5. package/com.neoagent.plist +45 -0
  6. package/docs/configuration.md +45 -0
  7. package/docs/skills.md +45 -0
  8. package/lib/manager.js +459 -0
  9. package/package.json +61 -0
  10. package/server/db/database.js +239 -0
  11. package/server/index.js +442 -0
  12. package/server/middleware/auth.js +35 -0
  13. package/server/public/app.html +559 -0
  14. package/server/public/css/app.css +608 -0
  15. package/server/public/css/styles.css +472 -0
  16. package/server/public/favicon.svg +17 -0
  17. package/server/public/js/app.js +3283 -0
  18. package/server/public/login.html +313 -0
  19. package/server/routes/agents.js +125 -0
  20. package/server/routes/auth.js +105 -0
  21. package/server/routes/browser.js +116 -0
  22. package/server/routes/mcp.js +164 -0
  23. package/server/routes/memory.js +193 -0
  24. package/server/routes/messaging.js +153 -0
  25. package/server/routes/protocols.js +87 -0
  26. package/server/routes/scheduler.js +63 -0
  27. package/server/routes/settings.js +98 -0
  28. package/server/routes/skills.js +107 -0
  29. package/server/routes/store.js +1192 -0
  30. package/server/services/ai/compaction.js +82 -0
  31. package/server/services/ai/engine.js +1690 -0
  32. package/server/services/ai/models.js +46 -0
  33. package/server/services/ai/multiStep.js +112 -0
  34. package/server/services/ai/providers/anthropic.js +181 -0
  35. package/server/services/ai/providers/base.js +40 -0
  36. package/server/services/ai/providers/google.js +187 -0
  37. package/server/services/ai/providers/grok.js +121 -0
  38. package/server/services/ai/providers/ollama.js +162 -0
  39. package/server/services/ai/providers/openai.js +167 -0
  40. package/server/services/ai/toolRunner.js +218 -0
  41. package/server/services/browser/controller.js +320 -0
  42. package/server/services/cli/executor.js +204 -0
  43. package/server/services/mcp/client.js +260 -0
  44. package/server/services/memory/embeddings.js +126 -0
  45. package/server/services/memory/manager.js +431 -0
  46. package/server/services/messaging/base.js +23 -0
  47. package/server/services/messaging/discord.js +238 -0
  48. package/server/services/messaging/manager.js +328 -0
  49. package/server/services/messaging/telegram.js +243 -0
  50. package/server/services/messaging/telnyx.js +693 -0
  51. package/server/services/messaging/whatsapp.js +304 -0
  52. package/server/services/scheduler/cron.js +312 -0
  53. package/server/services/websocket.js +191 -0
  54. package/server/utils/security.js +71 -0
@@ -0,0 +1,46 @@
1
+ const { GrokProvider } = require('./providers/grok');
2
+ const { OpenAIProvider } = require('./providers/openai');
3
+ const { GoogleProvider } = require('./providers/google');
4
+
5
+ const SUPPORTED_MODELS = [
6
+ {
7
+ id: 'grok-4-1-fast-reasoning',
8
+ label: 'Grok 4.1 (Personality / Default)',
9
+ provider: 'grok',
10
+ purpose: 'general'
11
+ },
12
+ {
13
+ id: 'gpt-5-nano',
14
+ label: 'GPT-5 Nano (Fast / Subagents)',
15
+ provider: 'openai',
16
+ purpose: 'fast'
17
+ },
18
+ {
19
+ id: 'gpt-5-mini',
20
+ label: 'GPT-5 Mini (Planning / Complex)',
21
+ provider: 'openai',
22
+ purpose: 'planning'
23
+ },
24
+ {
25
+ id: 'gemini-3.1-flash-lite-preview',
26
+ label: 'Gemini 3.1 Flash Lite (Preview)',
27
+ provider: 'google',
28
+ purpose: 'general'
29
+ }
30
+ ];
31
+
32
+ function createProviderInstance(providerStr) {
33
+ if (providerStr === 'grok') {
34
+ return new GrokProvider({ apiKey: process.env.XAI_API_KEY });
35
+ } else if (providerStr === 'openai') {
36
+ return new OpenAIProvider({ apiKey: process.env.OPENAI_API_KEY });
37
+ } else if (providerStr === 'google') {
38
+ return new GoogleProvider({ apiKey: process.env.GOOGLE_AI_KEY });
39
+ }
40
+ throw new Error(`Unknown provider: ${providerStr}`);
41
+ }
42
+
43
+ module.exports = {
44
+ SUPPORTED_MODELS,
45
+ createProviderInstance
46
+ };
@@ -0,0 +1,112 @@
1
+ const { v4: uuidv4 } = require('uuid');
2
+ const db = require('../../db/database');
3
+ const { AgentEngine } = require('./engine');
4
+
5
+ class MultiStepOrchestrator {
6
+ constructor(engine, io) {
7
+ this.io = io;
8
+ this.engine = engine;
9
+ this.activeOrchestrations = new Map();
10
+ }
11
+
12
+ async planAndExecute(userId, task, options = {}) {
13
+ const orchestrationId = uuidv4();
14
+ const app = options.app;
15
+
16
+ this.activeOrchestrations.set(orchestrationId, {
17
+ userId,
18
+ task,
19
+ status: 'planning',
20
+ steps: [],
21
+ currentStep: 0
22
+ });
23
+
24
+ this.emit(userId, 'orchestration:start', { orchestrationId, task });
25
+
26
+ try {
27
+ const result = await this.engine.run(userId, task, {
28
+ runId: orchestrationId,
29
+ conversationId: options.conversationId,
30
+ app,
31
+ triggerType: options.triggerType || 'user',
32
+ triggerSource: options.triggerSource || 'web',
33
+ context: options.context,
34
+ stream: options.stream
35
+ });
36
+
37
+ this.activeOrchestrations.delete(orchestrationId);
38
+ this.emit(userId, 'orchestration:complete', {
39
+ orchestrationId,
40
+ result: result.content,
41
+ totalTokens: result.totalTokens,
42
+ iterations: result.iterations
43
+ });
44
+
45
+ return result;
46
+ } catch (err) {
47
+ this.activeOrchestrations.delete(orchestrationId);
48
+ this.emit(userId, 'orchestration:error', { orchestrationId, error: err.message });
49
+ throw err;
50
+ }
51
+ }
52
+
53
+ async runParallel(userId, tasks, options = {}) {
54
+ const groupId = uuidv4();
55
+ const app = options.app;
56
+ const results = [];
57
+
58
+ this.emit(userId, 'parallel:start', { groupId, taskCount: tasks.length });
59
+
60
+ const promises = tasks.map(async (task, index) => {
61
+ try {
62
+ const result = await this.engine.run(userId, task, {
63
+ app,
64
+ triggerType: options.triggerType || 'user',
65
+ triggerSource: options.triggerSource || 'parallel',
66
+ context: options.context
67
+ });
68
+ return { index, status: 'completed', result };
69
+ } catch (err) {
70
+ return { index, status: 'failed', error: err.message };
71
+ }
72
+ });
73
+
74
+ const settled = await Promise.allSettled(promises);
75
+ for (const item of settled) {
76
+ if (item.status === 'fulfilled') {
77
+ results.push(item.value);
78
+ } else {
79
+ results.push({ status: 'failed', error: item.reason?.message });
80
+ }
81
+ }
82
+
83
+ this.emit(userId, 'parallel:complete', { groupId, results });
84
+ return results;
85
+ }
86
+
87
+ stop(orchestrationId) {
88
+ const orch = this.activeOrchestrations.get(orchestrationId);
89
+ if (orch) {
90
+ this.engine.stopRun(orchestrationId);
91
+ this.activeOrchestrations.delete(orchestrationId);
92
+ }
93
+ }
94
+
95
+ getActive() {
96
+ return Array.from(this.activeOrchestrations.entries()).map(([id, orch]) => ({
97
+ id,
98
+ userId: orch.userId,
99
+ task: orch.task,
100
+ status: orch.status,
101
+ currentStep: orch.currentStep
102
+ }));
103
+ }
104
+
105
+ emit(userId, event, data) {
106
+ if (this.io) {
107
+ this.io.to(`user:${userId}`).emit(event, data);
108
+ }
109
+ }
110
+ }
111
+
112
+ module.exports = { MultiStepOrchestrator };
@@ -0,0 +1,181 @@
1
+ const Anthropic = require('@anthropic-ai/sdk');
2
+ const { BaseProvider } = require('./base');
3
+
4
+ class AnthropicProvider extends BaseProvider {
5
+ constructor(config = {}) {
6
+ super(config);
7
+ this.name = 'anthropic';
8
+ this.models = [
9
+ 'claude-sonnet-4-20250514',
10
+ 'claude-3-5-sonnet-20241022',
11
+ 'claude-3-5-haiku-20241022',
12
+ 'claude-3-opus-20240229'
13
+ ];
14
+ this.contextWindows = {
15
+ 'claude-sonnet-4-20250514': 200000,
16
+ 'claude-3-5-sonnet-20241022': 200000,
17
+ 'claude-3-5-haiku-20241022': 200000,
18
+ 'claude-3-opus-20240229': 200000
19
+ };
20
+ this.client = new Anthropic({
21
+ apiKey: config.apiKey || process.env.ANTHROPIC_API_KEY
22
+ });
23
+ }
24
+
25
+ getContextWindow(model) {
26
+ return this.contextWindows[model] || 200000;
27
+ }
28
+
29
+ formatTools(tools) {
30
+ return tools.map(tool => ({
31
+ name: tool.name,
32
+ description: tool.description,
33
+ input_schema: tool.parameters || { type: 'object', properties: {} }
34
+ }));
35
+ }
36
+
37
+ convertMessages(messages) {
38
+ let system = '';
39
+ const converted = [];
40
+
41
+ for (const msg of messages) {
42
+ if (msg.role === 'system') {
43
+ system += (system ? '\n\n' : '') + msg.content;
44
+ continue;
45
+ }
46
+
47
+ if (msg.role === 'tool') {
48
+ converted.push({
49
+ role: 'user',
50
+ content: [{
51
+ type: 'tool_result',
52
+ tool_use_id: msg.tool_call_id,
53
+ content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
54
+ }]
55
+ });
56
+ continue;
57
+ }
58
+
59
+ if (msg.role === 'assistant' && msg.tool_calls) {
60
+ const content = [];
61
+ if (msg.content) content.push({ type: 'text', text: msg.content });
62
+ for (const tc of msg.tool_calls) {
63
+ content.push({
64
+ type: 'tool_use',
65
+ id: tc.id,
66
+ name: tc.function.name,
67
+ input: JSON.parse(tc.function.arguments || '{}')
68
+ });
69
+ }
70
+ converted.push({ role: 'assistant', content });
71
+ continue;
72
+ }
73
+
74
+ converted.push({
75
+ role: msg.role === 'assistant' ? 'assistant' : 'user',
76
+ content: msg.content
77
+ });
78
+ }
79
+
80
+ return { system, messages: converted };
81
+ }
82
+
83
+ async chat(messages, tools = [], options = {}) {
84
+ const model = options.model || this.config.model || this.getDefaultModel();
85
+ const { system, messages: converted } = this.convertMessages(messages);
86
+
87
+ const params = {
88
+ model,
89
+ max_tokens: options.maxTokens || 16384,
90
+ messages: converted
91
+ };
92
+
93
+ if (system) params.system = system;
94
+ if (tools.length > 0) params.tools = this.formatTools(tools);
95
+
96
+ const response = await this.client.messages.create(params);
97
+
98
+ let content = '';
99
+ const toolCalls = [];
100
+
101
+ for (const block of response.content) {
102
+ if (block.type === 'text') {
103
+ content += block.text;
104
+ } else if (block.type === 'tool_use') {
105
+ toolCalls.push({
106
+ id: block.id,
107
+ type: 'function',
108
+ function: {
109
+ name: block.name,
110
+ arguments: JSON.stringify(block.input)
111
+ }
112
+ });
113
+ }
114
+ }
115
+
116
+ return {
117
+ content,
118
+ toolCalls,
119
+ finishReason: response.stop_reason === 'tool_use' ? 'tool_calls' : 'stop',
120
+ usage: {
121
+ promptTokens: response.usage.input_tokens,
122
+ completionTokens: response.usage.output_tokens,
123
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens
124
+ },
125
+ model: response.model
126
+ };
127
+ }
128
+
129
+ async *stream(messages, tools = [], options = {}) {
130
+ const model = options.model || this.config.model || this.getDefaultModel();
131
+ const { system, messages: converted } = this.convertMessages(messages);
132
+
133
+ const params = {
134
+ model,
135
+ max_tokens: options.maxTokens || 16384,
136
+ messages: converted,
137
+ stream: true
138
+ };
139
+
140
+ if (system) params.system = system;
141
+ if (tools.length > 0) params.tools = this.formatTools(tools);
142
+
143
+ const stream = await this.client.messages.stream(params);
144
+
145
+ let content = '';
146
+ let currentToolCalls = [];
147
+ let currentToolIndex = -1;
148
+
149
+ for await (const event of stream) {
150
+ if (event.type === 'content_block_start') {
151
+ if (event.content_block.type === 'tool_use') {
152
+ currentToolIndex++;
153
+ currentToolCalls.push({
154
+ id: event.content_block.id,
155
+ type: 'function',
156
+ function: { name: event.content_block.name, arguments: '' }
157
+ });
158
+ }
159
+ } else if (event.type === 'content_block_delta') {
160
+ if (event.delta.type === 'text_delta') {
161
+ content += event.delta.text;
162
+ yield { type: 'content', content: event.delta.text };
163
+ } else if (event.delta.type === 'input_json_delta') {
164
+ if (currentToolCalls[currentToolIndex]) {
165
+ currentToolCalls[currentToolIndex].function.arguments += event.delta.partial_json;
166
+ }
167
+ }
168
+ } else if (event.type === 'message_stop') {
169
+ yield {
170
+ type: 'done',
171
+ content,
172
+ toolCalls: currentToolCalls,
173
+ finishReason: currentToolCalls.length > 0 ? 'tool_calls' : 'stop',
174
+ usage: null
175
+ };
176
+ }
177
+ }
178
+ }
179
+ }
180
+
181
+ module.exports = { AnthropicProvider };
@@ -0,0 +1,40 @@
1
+ class BaseProvider {
2
+ constructor(config = {}) {
3
+ this.config = config;
4
+ this.name = 'base';
5
+ this.models = [];
6
+ }
7
+
8
+ getDefaultModel() {
9
+ return this.models[0] || '';
10
+ }
11
+
12
+ formatTools(tools) {
13
+ return tools.map(tool => ({
14
+ type: 'function',
15
+ function: {
16
+ name: tool.name,
17
+ description: tool.description,
18
+ parameters: tool.parameters || { type: 'object', properties: {} }
19
+ }
20
+ }));
21
+ }
22
+
23
+ async chat(messages, tools = [], options = {}) {
24
+ throw new Error('chat() not implemented');
25
+ }
26
+
27
+ async *stream(messages, tools = [], options = {}) {
28
+ throw new Error('stream() not implemented');
29
+ }
30
+
31
+ countTokensEstimate(text) {
32
+ return Math.ceil(text.length / 4);
33
+ }
34
+
35
+ getContextWindow(model) {
36
+ return 128000;
37
+ }
38
+ }
39
+
40
+ module.exports = { BaseProvider };
@@ -0,0 +1,187 @@
1
+ const { GoogleGenerativeAI } = require('@google/generative-ai');
2
+ const { BaseProvider } = require('./base');
3
+
4
+ class GoogleProvider extends BaseProvider {
5
+ constructor(config = {}) {
6
+ super(config);
7
+ this.name = 'google';
8
+ this.models = [
9
+ 'gemini-2.0-flash',
10
+ 'gemini-2.0-pro',
11
+ 'gemini-1.5-pro',
12
+ 'gemini-1.5-flash',
13
+ 'gemini-3.1-flash-lite-preview'
14
+ ];
15
+ this.contextWindows = {
16
+ 'gemini-2.0-flash': 1048576,
17
+ 'gemini-2.0-pro': 2097152,
18
+ 'gemini-1.5-pro': 2097152,
19
+ 'gemini-1.5-flash': 1048576,
20
+ 'gemini-3.1-flash-lite-preview': 1048576
21
+ };
22
+ this.genAI = new GoogleGenerativeAI(config.apiKey || process.env.GOOGLE_AI_KEY);
23
+ }
24
+
25
+ getContextWindow(model) {
26
+ return this.contextWindows[model] || 1048576;
27
+ }
28
+
29
+ formatTools(tools) {
30
+ return [{
31
+ functionDeclarations: tools.map(tool => ({
32
+ name: tool.name,
33
+ description: tool.description,
34
+ parameters: tool.parameters || { type: 'object', properties: {} }
35
+ }))
36
+ }];
37
+ }
38
+
39
+ convertMessages(messages) {
40
+ let systemInstruction = '';
41
+ const history = [];
42
+
43
+ for (const msg of messages) {
44
+ if (msg.role === 'system') {
45
+ systemInstruction += (systemInstruction ? '\n\n' : '') + msg.content;
46
+ continue;
47
+ }
48
+ if (msg.role === 'tool') {
49
+ history.push({
50
+ role: 'function',
51
+ parts: [{
52
+ functionResponse: {
53
+ name: msg.name || 'tool',
54
+ response: { result: msg.content }
55
+ }
56
+ }]
57
+ });
58
+ continue;
59
+ }
60
+ if (msg.role === 'assistant' && msg.tool_calls) {
61
+ const parts = [];
62
+ if (msg.content) parts.push({ text: msg.content });
63
+ for (const tc of msg.tool_calls) {
64
+ const functionCallPart = {
65
+ functionCall: {
66
+ name: tc.function.name,
67
+ args: JSON.parse(tc.function.arguments || '{}')
68
+ }
69
+ };
70
+ if (tc.function.thought_signature) {
71
+ functionCallPart.thoughtSignature = tc.function.thought_signature;
72
+ }
73
+ parts.push(functionCallPart);
74
+ }
75
+ history.push({ role: 'model', parts });
76
+ continue;
77
+ }
78
+ history.push({
79
+ role: msg.role === 'assistant' ? 'model' : 'user',
80
+ parts: [{ text: msg.content || '' }]
81
+ });
82
+ }
83
+
84
+ return { systemInstruction, history };
85
+ }
86
+
87
+ async chat(messages, tools = [], options = {}) {
88
+ const model = options.model || this.config.model || this.getDefaultModel();
89
+ const { systemInstruction, history } = this.convertMessages(messages);
90
+
91
+ const genModel = this.genAI.getGenerativeModel({
92
+ model,
93
+ systemInstruction: systemInstruction || undefined,
94
+ tools: tools.length > 0 ? this.formatTools(tools) : undefined
95
+ });
96
+
97
+ const lastMessage = history.pop();
98
+ const chat = genModel.startChat({ history });
99
+ const result = await chat.sendMessage(lastMessage.parts);
100
+ const response = result.response;
101
+
102
+ let content = '';
103
+ const toolCalls = [];
104
+
105
+ for (const candidate of response.candidates || []) {
106
+ for (const part of candidate.content?.parts || []) {
107
+ if (part.text) content += part.text;
108
+ if (part.functionCall) {
109
+ toolCalls.push({
110
+ id: `call_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`,
111
+ type: 'function',
112
+ function: {
113
+ name: part.functionCall.name,
114
+ arguments: JSON.stringify(part.functionCall.args || {}),
115
+ thought_signature: part.thoughtSignature
116
+ }
117
+ });
118
+ }
119
+ }
120
+ }
121
+
122
+ const usage = response.usageMetadata;
123
+ return {
124
+ content,
125
+ toolCalls,
126
+ finishReason: toolCalls.length > 0 ? 'tool_calls' : 'stop',
127
+ usage: usage ? {
128
+ promptTokens: usage.promptTokenCount || 0,
129
+ completionTokens: usage.candidatesTokenCount || 0,
130
+ totalTokens: usage.totalTokenCount || 0
131
+ } : null,
132
+ model
133
+ };
134
+ }
135
+
136
+ async *stream(messages, tools = [], options = {}) {
137
+ const model = options.model || this.config.model || this.getDefaultModel();
138
+ const { systemInstruction, history } = this.convertMessages(messages);
139
+
140
+ const genModel = this.genAI.getGenerativeModel({
141
+ model,
142
+ systemInstruction: systemInstruction || undefined,
143
+ tools: tools.length > 0 ? this.formatTools(tools) : undefined
144
+ });
145
+
146
+ const lastMessage = history.pop();
147
+ const chat = genModel.startChat({ history });
148
+ const result = await chat.sendMessageStream(lastMessage.parts);
149
+
150
+ let content = '';
151
+ const toolCalls = [];
152
+
153
+ for await (const chunk of result.stream) {
154
+ const text = chunk.text();
155
+ if (text) {
156
+ content += text;
157
+ yield { type: 'content', content: text };
158
+ }
159
+
160
+ for (const candidate of chunk.candidates || []) {
161
+ for (const part of candidate.content?.parts || []) {
162
+ if (part.functionCall) {
163
+ toolCalls.push({
164
+ id: `call_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`,
165
+ type: 'function',
166
+ function: {
167
+ name: part.functionCall.name,
168
+ arguments: JSON.stringify(part.functionCall.args || {}),
169
+ thought_signature: part.thoughtSignature
170
+ }
171
+ });
172
+ }
173
+ }
174
+ }
175
+ }
176
+
177
+ yield {
178
+ type: 'done',
179
+ content,
180
+ toolCalls,
181
+ finishReason: toolCalls.length > 0 ? 'tool_calls' : 'stop',
182
+ usage: null
183
+ };
184
+ }
185
+ }
186
+
187
+ module.exports = { GoogleProvider };
@@ -0,0 +1,121 @@
1
+ const OpenAI = require('openai');
2
+ const { BaseProvider } = require('./base');
3
+
4
+ class GrokProvider extends BaseProvider {
5
+ constructor(config = {}) {
6
+ super(config);
7
+ this.name = 'grok';
8
+ this.client = new OpenAI({
9
+ apiKey: config.apiKey || process.env.XAI_API_KEY,
10
+ baseURL: 'https://api.x.ai/v1'
11
+ });
12
+ }
13
+
14
+ getContextWindow(model) {
15
+ return 131072; // grok-4 context window
16
+ }
17
+
18
+ _buildParams(model, messages, tools, options) {
19
+ const params = {
20
+ model,
21
+ messages,
22
+ max_tokens: options.maxTokens || 16384
23
+ };
24
+
25
+ // grok-4-1-fast-reasoning is a reasoning model: no temperature
26
+ const isReasoning = model.includes('reasoning') || model.startsWith('grok-4');
27
+ if (!isReasoning) {
28
+ params.temperature = options.temperature ?? 0.9;
29
+ }
30
+
31
+ if (tools && tools.length > 0) {
32
+ params.tools = this.formatTools(tools);
33
+ params.tool_choice = 'auto';
34
+ }
35
+
36
+ return params;
37
+ }
38
+
39
+ async chat(messages, tools = [], options = {}) {
40
+ const model = options.model || 'grok-4-1-fast-reasoning';
41
+ const params = this._buildParams(model, messages, tools, options);
42
+
43
+ const response = await this.client.chat.completions.create(params);
44
+ return this.normalizeResponse(response);
45
+ }
46
+
47
+ async *stream(messages, tools = [], options = {}) {
48
+ const model = options.model || 'grok-4-1-fast-reasoning';
49
+ const params = { ...this._buildParams(model, messages, tools, options), stream: true };
50
+
51
+ const stream = await this.client.chat.completions.create(params);
52
+
53
+ let toolCalls = [];
54
+ let content = '';
55
+
56
+ for await (const chunk of stream) {
57
+ const delta = chunk.choices[0]?.delta;
58
+ if (!delta) continue;
59
+
60
+ if (delta.content) {
61
+ content += delta.content;
62
+ yield { type: 'content', content: delta.content };
63
+ }
64
+
65
+ if (delta.tool_calls) {
66
+ for (const tc of delta.tool_calls) {
67
+ if (!toolCalls[tc.index]) {
68
+ toolCalls[tc.index] = { id: tc.id || '', type: 'function', function: { name: tc.function?.name || '', arguments: '' } };
69
+ }
70
+ if (tc.id) toolCalls[tc.index].id = tc.id;
71
+ if (tc.function?.name) toolCalls[tc.index].function.name = tc.function.name;
72
+ if (tc.function?.arguments) toolCalls[tc.index].function.arguments += tc.function.arguments;
73
+ }
74
+ }
75
+
76
+ const finishReason = chunk.choices[0]?.finish_reason;
77
+ if (finishReason === 'tool_calls' || (finishReason === 'stop' && toolCalls.length > 0)) {
78
+ yield { type: 'tool_calls', toolCalls, content };
79
+ return;
80
+ }
81
+ if (finishReason === 'stop') {
82
+ yield { type: 'done', content };
83
+ return;
84
+ }
85
+ }
86
+
87
+ if (toolCalls.length > 0) {
88
+ yield { type: 'tool_calls', toolCalls, content };
89
+ } else {
90
+ yield { type: 'done', content };
91
+ }
92
+ }
93
+
94
+ normalizeResponse(response) {
95
+ const choice = response.choices[0];
96
+ const msg = choice.message;
97
+ return {
98
+ content: msg.content || '',
99
+ toolCalls: msg.tool_calls?.map(tc => ({
100
+ id: tc.id,
101
+ type: 'function',
102
+ function: { name: tc.function.name, arguments: tc.function.arguments }
103
+ })) || [],
104
+ finishReason: choice.finish_reason,
105
+ usage: response.usage
106
+ };
107
+ }
108
+
109
+ formatTools(tools) {
110
+ return tools.map(tool => ({
111
+ type: 'function',
112
+ function: {
113
+ name: tool.name,
114
+ description: tool.description,
115
+ parameters: tool.parameters
116
+ }
117
+ }));
118
+ }
119
+ }
120
+
121
+ module.exports = { GrokProvider };