protoagent 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +39 -0
- package/dist/agentic-loop.js +277 -0
- package/dist/config/client.js +166 -0
- package/dist/config/commands.js +208 -0
- package/dist/config/manager.js +117 -0
- package/dist/config/mcp-commands.js +266 -0
- package/dist/config/mcp-manager.js +240 -0
- package/dist/config/mcp-types.js +28 -0
- package/dist/config/providers.js +170 -0
- package/dist/config/setup.js +175 -0
- package/dist/config/system-prompt.js +301 -0
- package/dist/config/types.js +4 -0
- package/dist/index.js +156 -0
- package/dist/tools/create-directory.js +76 -0
- package/dist/tools/directory-operations.js +195 -0
- package/dist/tools/edit-file.js +144 -0
- package/dist/tools/file-operations.js +211 -0
- package/dist/tools/index.js +95 -0
- package/dist/tools/list-directory.js +84 -0
- package/dist/tools/read-file.js +111 -0
- package/dist/tools/run-shell-command.js +340 -0
- package/dist/tools/search-files.js +177 -0
- package/dist/tools/search-operations.js +179 -0
- package/dist/tools/shell-operations.js +342 -0
- package/dist/tools/todo.js +177 -0
- package/dist/tools/view-directory-tree.js +125 -0
- package/dist/tools/write-file.js +136 -0
- package/dist/tools.js +2 -0
- package/dist/utils/conversation-compactor.js +139 -0
- package/dist/utils/cost-tracker.js +106 -0
- package/dist/utils/file-operations-approval.js +163 -0
- package/dist/utils/logger.js +149 -0
- package/package.json +61 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Thomas Gauvin
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# ProtoAgent
|
|
2
|
+
|
|
3
|
+
š¤ **Interactive AI coding agent that can read, write, and execute files in your terminal.**
|
|
4
|
+
|
|
5
|
+
## Get Started
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install -g protoagent
|
|
9
|
+
protoagent
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
That's it! ProtoAgent will guide you through setup and start helping with your coding tasks.
|
|
13
|
+
|
|
14
|
+
## What it does
|
|
15
|
+
|
|
16
|
+
- **File Operations** - Read, write, edit, and search files
|
|
17
|
+
- **Shell Commands** - Execute terminal commands safely
|
|
18
|
+
- **Code Analysis** - Understand and analyze your codebase
|
|
19
|
+
- **Project Generation** - Create files, folders, and entire project structures
|
|
20
|
+
- **Interactive Chat** - Natural language interface for all coding tasks
|
|
21
|
+
|
|
22
|
+
## Example Usage
|
|
23
|
+
|
|
24
|
+
```
|
|
25
|
+
protoagent> Create a React component called Button.tsx
|
|
26
|
+
protoagent> Find all TODO comments in my project
|
|
27
|
+
protoagent> Set up a Node.js Express server with TypeScript
|
|
28
|
+
protoagent> Debug this error in my package.json
|
|
29
|
+
protoagent> Show me the structure of my src folder
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Requirements
|
|
33
|
+
|
|
34
|
+
- Node.js 16+
|
|
35
|
+
- OpenAI API key (you'll be prompted to set this up on first run)
|
|
36
|
+
|
|
37
|
+
## License
|
|
38
|
+
|
|
39
|
+
MIT
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
import { tools, handleToolCall } from './tools/index.js';
|
|
2
|
+
import { createChatCompletion } from './config/client.js';
|
|
3
|
+
import { generateSystemPrompt } from './config/system-prompt.js';
|
|
4
|
+
import { getModelConfig } from './config/providers.js';
|
|
5
|
+
import { estimateTokens, createUsageInfo, logUsageInfo, getContextInfo } from './utils/cost-tracker.js';
|
|
6
|
+
import { checkAndCompactIfNeeded } from './utils/conversation-compactor.js';
|
|
7
|
+
// Create system message for ProtoAgent dynamically
|
|
8
|
+
async function createSystemMessage() {
|
|
9
|
+
const systemPrompt = await generateSystemPrompt();
|
|
10
|
+
return {
|
|
11
|
+
role: 'system',
|
|
12
|
+
content: systemPrompt
|
|
13
|
+
};
|
|
14
|
+
}
|
|
15
|
+
export class AgenticLoop {
|
|
16
|
+
constructor(openaiClient, config, options = {}) {
|
|
17
|
+
this.systemMessage = null;
|
|
18
|
+
this.openaiClient = openaiClient;
|
|
19
|
+
this.config = config;
|
|
20
|
+
this.options = {
|
|
21
|
+
maxIterations: options.maxIterations || 100,
|
|
22
|
+
streamOutput: options.streamOutput !== false, // Default to true
|
|
23
|
+
};
|
|
24
|
+
this.messages = []; // Will be initialized with system message in initialize()
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Initialize the agentic loop with dynamic system message
|
|
28
|
+
*/
|
|
29
|
+
async initialize() {
|
|
30
|
+
this.systemMessage = await createSystemMessage();
|
|
31
|
+
this.messages = [this.systemMessage];
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Get the current conversation history
|
|
35
|
+
*/
|
|
36
|
+
getMessages() {
|
|
37
|
+
return [...this.messages];
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Clear the conversation history (keeps system message)
|
|
41
|
+
*/
|
|
42
|
+
clearHistory() {
|
|
43
|
+
this.messages = this.systemMessage ? [this.systemMessage] : [];
|
|
44
|
+
}
|
|
45
|
+
/**
|
|
46
|
+
* Add a message to the conversation history
|
|
47
|
+
*/
|
|
48
|
+
addMessage(message) {
|
|
49
|
+
this.messages.push(message);
|
|
50
|
+
}
|
|
51
|
+
/**
|
|
52
|
+
* Process a user input and run the agentic loop
|
|
53
|
+
*/
|
|
54
|
+
async processUserInput(userInput) {
|
|
55
|
+
try {
|
|
56
|
+
// Add user message to conversation history
|
|
57
|
+
this.addMessage({
|
|
58
|
+
role: 'user',
|
|
59
|
+
content: userInput
|
|
60
|
+
});
|
|
61
|
+
console.log('š¤ Thinking...');
|
|
62
|
+
// Start the agentic loop
|
|
63
|
+
let continueProcessing = true;
|
|
64
|
+
let iterationCount = 0;
|
|
65
|
+
while (continueProcessing && iterationCount < this.options.maxIterations) {
|
|
66
|
+
iterationCount++;
|
|
67
|
+
try {
|
|
68
|
+
// Check if conversation needs compaction before making API call
|
|
69
|
+
const modelConfig = getModelConfig(this.config.provider, this.config.model);
|
|
70
|
+
if (modelConfig) {
|
|
71
|
+
const contextInfo = getContextInfo(this.messages, modelConfig);
|
|
72
|
+
if (contextInfo.needsCompaction) {
|
|
73
|
+
console.log('\nšļø Context window approaching limit, compacting conversation...');
|
|
74
|
+
this.messages = await checkAndCompactIfNeeded(this.openaiClient, this.config.model, this.messages, modelConfig.contextWindow, contextInfo.currentTokens);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
// Create completion using OpenAI with built-in retry logic and cost tracking
|
|
78
|
+
const { stream, estimatedInputTokens } = await createChatCompletion(this.openaiClient, {
|
|
79
|
+
model: this.config.model,
|
|
80
|
+
messages: this.messages,
|
|
81
|
+
tools: tools,
|
|
82
|
+
tool_choice: 'auto',
|
|
83
|
+
stream: true
|
|
84
|
+
}, this.config, this.messages);
|
|
85
|
+
// Collect the streamed response
|
|
86
|
+
let assistantMessage = {
|
|
87
|
+
role: 'assistant',
|
|
88
|
+
content: '',
|
|
89
|
+
tool_calls: []
|
|
90
|
+
};
|
|
91
|
+
let streamedContent = '';
|
|
92
|
+
let hasToolCalls = false;
|
|
93
|
+
let actualUsage = undefined;
|
|
94
|
+
for await (const chunk of stream) {
|
|
95
|
+
const delta = chunk.choices[0]?.delta;
|
|
96
|
+
if (chunk.usage) {
|
|
97
|
+
actualUsage = chunk.usage;
|
|
98
|
+
}
|
|
99
|
+
if (delta?.content) {
|
|
100
|
+
streamedContent += delta.content;
|
|
101
|
+
assistantMessage.content = streamedContent;
|
|
102
|
+
// Stream content to user in real-time if no tool calls are being made
|
|
103
|
+
if (this.options.streamOutput && !hasToolCalls && !delta.tool_calls) {
|
|
104
|
+
if (streamedContent === delta.content) {
|
|
105
|
+
// First content chunk
|
|
106
|
+
process.stdout.write('\\nš¤ ProtoAgent: ');
|
|
107
|
+
}
|
|
108
|
+
process.stdout.write(delta.content);
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
if (delta?.tool_calls) {
|
|
112
|
+
hasToolCalls = true;
|
|
113
|
+
// Initialize tool_calls array if not exists
|
|
114
|
+
if (!assistantMessage.tool_calls) {
|
|
115
|
+
assistantMessage.tool_calls = [];
|
|
116
|
+
}
|
|
117
|
+
// Handle tool calls in streaming
|
|
118
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
119
|
+
const index = toolCallDelta.index || 0;
|
|
120
|
+
// Ensure we have an entry at this index
|
|
121
|
+
if (!assistantMessage.tool_calls[index]) {
|
|
122
|
+
assistantMessage.tool_calls[index] = {
|
|
123
|
+
id: '',
|
|
124
|
+
type: 'function',
|
|
125
|
+
function: { name: '', arguments: '' }
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
if (toolCallDelta.id) {
|
|
129
|
+
assistantMessage.tool_calls[index].id = toolCallDelta.id;
|
|
130
|
+
}
|
|
131
|
+
if (toolCallDelta.function?.name) {
|
|
132
|
+
assistantMessage.tool_calls[index].function.name += toolCallDelta.function.name;
|
|
133
|
+
}
|
|
134
|
+
if (toolCallDelta.function?.arguments) {
|
|
135
|
+
assistantMessage.tool_calls[index].function.arguments += toolCallDelta.function.arguments;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
const message = assistantMessage;
|
|
141
|
+
// Calculate and log cost information
|
|
142
|
+
if (modelConfig) {
|
|
143
|
+
const finalInputTokens = actualUsage?.prompt_tokens ?? estimatedInputTokens;
|
|
144
|
+
const finalOutputTokens = actualUsage?.completion_tokens ?? estimateTokens(message.content || '');
|
|
145
|
+
const usageInfo = createUsageInfo(finalInputTokens, finalOutputTokens, modelConfig);
|
|
146
|
+
const contextInfo = getContextInfo(this.messages, modelConfig);
|
|
147
|
+
logUsageInfo(usageInfo, contextInfo, modelConfig);
|
|
148
|
+
}
|
|
149
|
+
// Check if AI wants to use tools
|
|
150
|
+
if (message.tool_calls && message.tool_calls.length > 0) {
|
|
151
|
+
// Add the AI's message (with tool calls) to conversation
|
|
152
|
+
this.addMessage(message);
|
|
153
|
+
console.log(`š§ Using ${message.tool_calls.length} tool(s)...`);
|
|
154
|
+
// Execute each tool call
|
|
155
|
+
for (const toolCall of message.tool_calls) {
|
|
156
|
+
const { name, arguments: args } = toolCall.function;
|
|
157
|
+
console.log(`š ļø ${name}`);
|
|
158
|
+
try {
|
|
159
|
+
const toolArgs = JSON.parse(args);
|
|
160
|
+
const result = await handleToolCall(name, toolArgs);
|
|
161
|
+
// Add tool result to conversation
|
|
162
|
+
this.addMessage({
|
|
163
|
+
role: 'tool',
|
|
164
|
+
tool_call_id: toolCall.id,
|
|
165
|
+
content: result
|
|
166
|
+
});
|
|
167
|
+
// Show abbreviated result to user
|
|
168
|
+
const lines = result.split('\\n');
|
|
169
|
+
if (lines.length > 10) {
|
|
170
|
+
console.log(` ā
${lines.slice(0, 3).join('\\n ')}\\n ... (${lines.length - 6} more lines) ...\\n ${lines.slice(-3).join('\\n ')}`);
|
|
171
|
+
}
|
|
172
|
+
else {
|
|
173
|
+
console.log(` ā
${result.slice(0, 200)}${result.length > 200 ? '...' : ''}`);
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
catch (error) {
|
|
177
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
178
|
+
console.log(` ā Error: ${errorMessage}`);
|
|
179
|
+
// Add error result to conversation
|
|
180
|
+
this.addMessage({
|
|
181
|
+
role: 'tool',
|
|
182
|
+
tool_call_id: toolCall.id,
|
|
183
|
+
content: `Error: ${errorMessage}`
|
|
184
|
+
});
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
// Continue the loop to let AI process tool results
|
|
188
|
+
continue;
|
|
189
|
+
}
|
|
190
|
+
else {
|
|
191
|
+
// AI provided a regular response
|
|
192
|
+
if (message.content && !hasToolCalls) {
|
|
193
|
+
// Content was already streamed to user during the loop above
|
|
194
|
+
if (this.options.streamOutput) {
|
|
195
|
+
console.log('\\n'); // Add newline after streaming is complete
|
|
196
|
+
}
|
|
197
|
+
else {
|
|
198
|
+
console.log(`\\nš¤ ProtoAgent: ${message.content}\\n`);
|
|
199
|
+
}
|
|
200
|
+
// Add AI response to conversation history
|
|
201
|
+
this.addMessage({
|
|
202
|
+
role: 'assistant',
|
|
203
|
+
content: message.content
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
else if (message.content && hasToolCalls) {
|
|
207
|
+
// AI provided content along with tool calls (rare case)
|
|
208
|
+
if (this.options.streamOutput) {
|
|
209
|
+
process.stdout.write('\\nš¤ ProtoAgent: ');
|
|
210
|
+
process.stdout.write(message.content);
|
|
211
|
+
console.log('\\n');
|
|
212
|
+
}
|
|
213
|
+
else {
|
|
214
|
+
console.log(`\\nš¤ ProtoAgent: ${message.content}\\n`);
|
|
215
|
+
}
|
|
216
|
+
// Add AI response to conversation history
|
|
217
|
+
this.addMessage({
|
|
218
|
+
role: 'assistant',
|
|
219
|
+
content: message.content
|
|
220
|
+
});
|
|
221
|
+
}
|
|
222
|
+
continueProcessing = false;
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
catch (apiError) {
|
|
226
|
+
// Handle API errors that weren't caught by the retry logic
|
|
227
|
+
console.error('\\nā API Error:', apiError?.message || 'Unknown API error');
|
|
228
|
+
// Check for specific error types and provide helpful messages
|
|
229
|
+
if (apiError?.status === 401) {
|
|
230
|
+
console.log('š” Authentication failed. Your API key may be invalid or expired.');
|
|
231
|
+
console.log(' Run: protoagent config --update-key');
|
|
232
|
+
}
|
|
233
|
+
else if (apiError?.status === 403) {
|
|
234
|
+
console.log('š” Access forbidden. Check your API key permissions or billing status.');
|
|
235
|
+
}
|
|
236
|
+
else if (apiError?.status === 400) {
|
|
237
|
+
console.log('š” Bad request. There may be an issue with the request format.');
|
|
238
|
+
console.log(' This could be a bug in ProtoAgent. Please check for updates.');
|
|
239
|
+
}
|
|
240
|
+
else {
|
|
241
|
+
console.log('š” An unexpected API error occurred. Please try again.');
|
|
242
|
+
}
|
|
243
|
+
// Exit the processing loop for API errors
|
|
244
|
+
break;
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
if (iterationCount >= this.options.maxIterations) {
|
|
248
|
+
console.log('\\nā ļø Maximum iteration limit reached. Task may be incomplete.');
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
catch (error) {
|
|
252
|
+
// Handle general processing errors
|
|
253
|
+
console.error('\\nā Error during processing:', error?.message || 'Unknown error');
|
|
254
|
+
// Provide helpful error messages for common issues
|
|
255
|
+
if (error?.message?.includes('API key')) {
|
|
256
|
+
console.log('š” There seems to be an issue with your API key configuration.');
|
|
257
|
+
console.log(' Run: protoagent config --show');
|
|
258
|
+
}
|
|
259
|
+
else if (error?.message?.includes('model')) {
|
|
260
|
+
console.log('š” There seems to be an issue with the selected model.');
|
|
261
|
+
console.log(' Run: protoagent config --update-model');
|
|
262
|
+
}
|
|
263
|
+
else {
|
|
264
|
+
console.log('š” An unexpected error occurred. Please check your configuration and try again.');
|
|
265
|
+
}
|
|
266
|
+
console.log('\\nš¤ ProtoAgent: I encountered an error and cannot continue processing this request.\\n');
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
/**
|
|
271
|
+
* Create a new agentic loop instance
|
|
272
|
+
*/
|
|
273
|
+
export async function createAgenticLoop(openaiClient, config, options) {
|
|
274
|
+
const loop = new AgenticLoop(openaiClient, config, options);
|
|
275
|
+
await loop.initialize();
|
|
276
|
+
return loop;
|
|
277
|
+
}
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI client manager for ProtoAgent
|
|
3
|
+
*/
|
|
4
|
+
import OpenAI from 'openai';
|
|
5
|
+
import { geminiProvider, cerebrasProvider, getModelConfig } from './providers.js';
|
|
6
|
+
import { estimateTokens, getContextInfo } from '../utils/cost-tracker.js';
|
|
7
|
+
/**
|
|
8
|
+
* Create OpenAI client from configuration
|
|
9
|
+
*/
|
|
10
|
+
export function createOpenAIClient(config) {
|
|
11
|
+
if (config.provider === 'openai') {
|
|
12
|
+
return new OpenAI({
|
|
13
|
+
apiKey: config.credentials.OPENAI_API_KEY
|
|
14
|
+
});
|
|
15
|
+
}
|
|
16
|
+
if (config.provider === 'gemini') {
|
|
17
|
+
return new OpenAI({
|
|
18
|
+
apiKey: config.credentials.GEMINI_API_KEY,
|
|
19
|
+
baseURL: geminiProvider.baseURL
|
|
20
|
+
});
|
|
21
|
+
}
|
|
22
|
+
if (config.provider === 'cerebras') {
|
|
23
|
+
return new OpenAI({
|
|
24
|
+
apiKey: config.credentials.CEREBRAS_API_KEY,
|
|
25
|
+
baseURL: cerebrasProvider.baseURL
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
throw new Error(`Unsupported provider: ${config.provider}`);
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Sleep for a given number of milliseconds
|
|
32
|
+
*/
|
|
33
|
+
function sleep(ms) {
|
|
34
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Check if an error is retryable
|
|
38
|
+
*/
|
|
39
|
+
function isRetryableError(error) {
|
|
40
|
+
// Check for rate limiting (429)
|
|
41
|
+
if (error?.status === 429) {
|
|
42
|
+
return true;
|
|
43
|
+
}
|
|
44
|
+
// Check for server errors (5xx)
|
|
45
|
+
if (error?.status >= 500 && error?.status < 600) {
|
|
46
|
+
return true;
|
|
47
|
+
}
|
|
48
|
+
// Check for network/connection errors
|
|
49
|
+
if (error?.code === 'ECONNRESET' ||
|
|
50
|
+
error?.code === 'ENOTFOUND' ||
|
|
51
|
+
error?.code === 'ECONNREFUSED' ||
|
|
52
|
+
error?.message?.includes('network') ||
|
|
53
|
+
error?.message?.includes('timeout')) {
|
|
54
|
+
return true;
|
|
55
|
+
}
|
|
56
|
+
return false;
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Get appropriate delay for rate limiting based on error
|
|
60
|
+
*/
|
|
61
|
+
function getRateLimitDelay(error, attempt) {
|
|
62
|
+
// Check for Retry-After header (OpenAI provides this for rate limits)
|
|
63
|
+
if (error?.headers?.['retry-after']) {
|
|
64
|
+
const retryAfter = parseInt(error.headers['retry-after'], 10);
|
|
65
|
+
if (!isNaN(retryAfter)) {
|
|
66
|
+
return retryAfter * 1000; // Convert to milliseconds
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
// Default exponential backoff for rate limits: 2^attempt seconds (min 2s, max 60s)
|
|
70
|
+
const baseDelay = Math.min(Math.pow(2, attempt) * 1000, 60000);
|
|
71
|
+
const jitter = Math.random() * 1000; // Add jitter to prevent thundering herd
|
|
72
|
+
return Math.max(baseDelay + jitter, 2000); // Minimum 2 seconds
|
|
73
|
+
}
|
|
74
|
+
/**
|
|
75
|
+
* Get delay for general retryable errors
|
|
76
|
+
*/
|
|
77
|
+
function getRetryDelay(attempt) {
|
|
78
|
+
// Exponential backoff: 1, 2, 4 seconds with jitter
|
|
79
|
+
const baseDelay = Math.pow(2, attempt - 1) * 1000;
|
|
80
|
+
const jitter = Math.random() * 500;
|
|
81
|
+
return Math.min(baseDelay + jitter, 4000); // Max 4 seconds
|
|
82
|
+
}
|
|
83
|
+
/**
|
|
84
|
+
* Create chat completion with OpenAI with retry logic and cost tracking
|
|
85
|
+
*/
|
|
86
|
+
export async function createChatCompletion(client, params, config, messages) {
|
|
87
|
+
const maxRetries = 3;
|
|
88
|
+
let lastError;
|
|
89
|
+
// Get model configuration for cost tracking
|
|
90
|
+
const modelConfig = getModelConfig(config.provider, config.model);
|
|
91
|
+
// Estimate input tokens for cost calculation
|
|
92
|
+
const estimatedInputTokens = messages.reduce((total, msg) => {
|
|
93
|
+
if ('content' in msg && msg.content && typeof msg.content === 'string') {
|
|
94
|
+
return total + estimateTokens(msg.content);
|
|
95
|
+
}
|
|
96
|
+
return total;
|
|
97
|
+
}, 0);
|
|
98
|
+
// Log context information before making the request
|
|
99
|
+
if (modelConfig) {
|
|
100
|
+
const contextInfo = getContextInfo(messages, modelConfig);
|
|
101
|
+
console.log(`š Context: ${contextInfo.currentTokens}/${contextInfo.maxTokens} tokens (${contextInfo.utilizationPercentage.toFixed(1)}%)`);
|
|
102
|
+
if (contextInfo.needsCompaction) {
|
|
103
|
+
console.log(`ā ļø Context approaching limit - automatic compaction will trigger soon`);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
|
107
|
+
try {
|
|
108
|
+
const stream = await client.chat.completions.create({
|
|
109
|
+
...params,
|
|
110
|
+
stream: true,
|
|
111
|
+
stream_options: { include_usage: true }
|
|
112
|
+
});
|
|
113
|
+
return { stream, estimatedInputTokens };
|
|
114
|
+
}
|
|
115
|
+
catch (error) {
|
|
116
|
+
lastError = error;
|
|
117
|
+
// Handle rate limiting (429) specially
|
|
118
|
+
if (error?.status === 429) {
|
|
119
|
+
const delay = getRateLimitDelay(error, attempt);
|
|
120
|
+
const seconds = Math.round(delay / 1000);
|
|
121
|
+
if (attempt < maxRetries) {
|
|
122
|
+
console.log(`\nā³ Rate limited by API. Waiting ${seconds} seconds before retry (attempt ${attempt}/${maxRetries})...`);
|
|
123
|
+
await sleep(delay);
|
|
124
|
+
continue;
|
|
125
|
+
}
|
|
126
|
+
else {
|
|
127
|
+
console.error('\nā Rate limit exceeded. Maximum retries reached.');
|
|
128
|
+
console.log('š” Tip: Consider upgrading your API plan or waiting before making more requests.');
|
|
129
|
+
break;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
// Handle other retryable errors
|
|
133
|
+
if (isRetryableError(error) && attempt < maxRetries) {
|
|
134
|
+
const delay = getRetryDelay(attempt);
|
|
135
|
+
const seconds = Math.round(delay / 1000);
|
|
136
|
+
console.log(`\nā ļø API error (${error?.status || error?.code || 'unknown'}). Retrying in ${seconds} seconds (attempt ${attempt}/${maxRetries})...`);
|
|
137
|
+
await sleep(delay);
|
|
138
|
+
continue;
|
|
139
|
+
}
|
|
140
|
+
// For non-retryable errors or max retries reached, break immediately
|
|
141
|
+
break;
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
// If we get here, all retries failed
|
|
145
|
+
console.error('\nā API request failed after all retry attempts.');
|
|
146
|
+
// Provide user-friendly error messages
|
|
147
|
+
if (lastError?.status === 401) {
|
|
148
|
+
console.log('š” This looks like an authentication error. Check your API key configuration.');
|
|
149
|
+
console.log(' Run: protoagent config --update-key');
|
|
150
|
+
}
|
|
151
|
+
else if (lastError?.status === 429) {
|
|
152
|
+
console.log('š” Rate limit exceeded. Try again later or upgrade your API plan.');
|
|
153
|
+
}
|
|
154
|
+
else if (lastError?.status === 403) {
|
|
155
|
+
console.log('š” Access forbidden. Check your API key permissions and billing status.');
|
|
156
|
+
}
|
|
157
|
+
else if (lastError?.status >= 500) {
|
|
158
|
+
console.log('š” Server error. The API service may be temporarily unavailable.');
|
|
159
|
+
}
|
|
160
|
+
else if (lastError?.code === 'ENOTFOUND' || lastError?.message?.includes('network')) {
|
|
161
|
+
console.log('š” Network connection error. Check your internet connection.');
|
|
162
|
+
}
|
|
163
|
+
// Suggest graceful shutdown
|
|
164
|
+
console.log('\nšŖ ProtoAgent will now exit. Please resolve the issue and try again.');
|
|
165
|
+
process.exit(1);
|
|
166
|
+
}
|