@compilr-dev/agents 0.0.1 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent.d.ts +20 -0
- package/dist/agent.js +16 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.js +4 -0
- package/dist/providers/gemini.d.ts +91 -0
- package/dist/providers/gemini.js +140 -0
- package/dist/providers/index.d.ts +8 -0
- package/dist/providers/index.js +7 -3
- package/dist/providers/ollama.d.ts +87 -0
- package/dist/providers/ollama.js +133 -0
- package/dist/providers/openai-compatible.d.ts +182 -0
- package/dist/providers/openai-compatible.js +359 -0
- package/dist/providers/openai.d.ts +93 -0
- package/dist/providers/openai.js +133 -0
- package/dist/tools/builtin/glob.d.ts +11 -0
- package/dist/tools/builtin/glob.js +44 -2
- package/dist/tools/builtin/grep.d.ts +11 -1
- package/dist/tools/builtin/grep.js +38 -2
- package/package.json +2 -2
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI-Compatible LLM Provider Base Class
|
|
3
|
+
*
|
|
4
|
+
* Abstract base class for LLM providers that use OpenAI-compatible REST APIs.
|
|
5
|
+
* Provides shared implementation for:
|
|
6
|
+
* - Message conversion (library format → OpenAI format)
|
|
7
|
+
* - Tool definition conversion
|
|
8
|
+
* - SSE stream parsing
|
|
9
|
+
* - Tool call delta accumulation
|
|
10
|
+
* - Token counting (approximation)
|
|
11
|
+
*
|
|
12
|
+
* Extended by: OllamaProvider, OpenAIProvider, GeminiProvider
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```typescript
|
|
16
|
+
* class MyProvider extends OpenAICompatibleProvider {
|
|
17
|
+
* readonly name = 'my-provider';
|
|
18
|
+
* protected getAuthHeaders() { return { 'Authorization': 'Bearer xxx' }; }
|
|
19
|
+
* protected getEndpointPath() { return '/v1/chat/completions'; }
|
|
20
|
+
* // ... other abstract methods
|
|
21
|
+
* }
|
|
22
|
+
* ```
|
|
23
|
+
*/
|
|
24
|
+
import type { LLMProvider, Message, StreamChunk, ChatOptions, ToolDefinition } from './types.js';
|
|
25
|
+
import { ProviderError } from '../errors.js';
|
|
26
|
+
/**
|
|
27
|
+
* OpenAI-compatible message format
|
|
28
|
+
*/
|
|
29
|
+
export interface OpenAIMessage {
|
|
30
|
+
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
31
|
+
content: string | null;
|
|
32
|
+
tool_calls?: OpenAIToolCall[];
|
|
33
|
+
tool_call_id?: string;
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* OpenAI-compatible tool call format
|
|
37
|
+
*/
|
|
38
|
+
export interface OpenAIToolCall {
|
|
39
|
+
id: string;
|
|
40
|
+
type: 'function';
|
|
41
|
+
function: {
|
|
42
|
+
name: string;
|
|
43
|
+
arguments: string;
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
/**
|
|
47
|
+
* OpenAI-compatible tool definition format
|
|
48
|
+
*/
|
|
49
|
+
export interface OpenAITool {
|
|
50
|
+
type: 'function';
|
|
51
|
+
function: {
|
|
52
|
+
name: string;
|
|
53
|
+
description: string;
|
|
54
|
+
parameters: Record<string, unknown>;
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* OpenAI streaming response chunk format
|
|
59
|
+
*/
|
|
60
|
+
export interface OpenAIStreamChunk {
|
|
61
|
+
id: string;
|
|
62
|
+
object: string;
|
|
63
|
+
created: number;
|
|
64
|
+
model: string;
|
|
65
|
+
choices: Array<{
|
|
66
|
+
index: number;
|
|
67
|
+
delta: {
|
|
68
|
+
role?: string;
|
|
69
|
+
content?: string | null;
|
|
70
|
+
tool_calls?: Array<{
|
|
71
|
+
index: number;
|
|
72
|
+
id?: string;
|
|
73
|
+
type?: string;
|
|
74
|
+
function?: {
|
|
75
|
+
name?: string;
|
|
76
|
+
arguments?: string;
|
|
77
|
+
};
|
|
78
|
+
}>;
|
|
79
|
+
};
|
|
80
|
+
finish_reason: string | null;
|
|
81
|
+
}>;
|
|
82
|
+
usage?: {
|
|
83
|
+
prompt_tokens: number;
|
|
84
|
+
completion_tokens: number;
|
|
85
|
+
total_tokens: number;
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Base configuration for OpenAI-compatible providers
|
|
90
|
+
*/
|
|
91
|
+
export interface OpenAICompatibleConfig {
|
|
92
|
+
/** Base URL for the API */
|
|
93
|
+
baseUrl: string;
|
|
94
|
+
/** Default model to use */
|
|
95
|
+
model: string;
|
|
96
|
+
/** Default max tokens (default: 4096) */
|
|
97
|
+
maxTokens?: number;
|
|
98
|
+
/** Request timeout in milliseconds (default: 120000) */
|
|
99
|
+
timeout?: number;
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Abstract base class for OpenAI-compatible LLM providers
|
|
103
|
+
*
|
|
104
|
+
* Provides shared implementation for providers that use the OpenAI
|
|
105
|
+
* chat completions API format (OpenAI, Ollama, Azure OpenAI, Gemini).
|
|
106
|
+
*/
|
|
107
|
+
export declare abstract class OpenAICompatibleProvider implements LLMProvider {
|
|
108
|
+
/**
|
|
109
|
+
* Provider name (e.g., 'openai', 'ollama', 'gemini')
|
|
110
|
+
*/
|
|
111
|
+
abstract readonly name: string;
|
|
112
|
+
protected readonly baseUrl: string;
|
|
113
|
+
protected readonly defaultModel: string;
|
|
114
|
+
protected readonly defaultMaxTokens: number;
|
|
115
|
+
protected readonly timeout: number;
|
|
116
|
+
constructor(config: OpenAICompatibleConfig);
|
|
117
|
+
/**
|
|
118
|
+
* Get authentication headers for API requests
|
|
119
|
+
* @returns Headers object with auth credentials
|
|
120
|
+
*/
|
|
121
|
+
protected abstract getAuthHeaders(): Record<string, string>;
|
|
122
|
+
/**
|
|
123
|
+
* Get the API endpoint path (e.g., '/v1/chat/completions')
|
|
124
|
+
* @returns API endpoint path
|
|
125
|
+
*/
|
|
126
|
+
protected abstract getEndpointPath(): string;
|
|
127
|
+
/**
|
|
128
|
+
* Build provider-specific request body extensions
|
|
129
|
+
* @param options Chat options
|
|
130
|
+
* @returns Additional body fields for the request
|
|
131
|
+
*/
|
|
132
|
+
protected abstract buildProviderSpecificBody(options?: ChatOptions): Record<string, unknown>;
|
|
133
|
+
/**
|
|
134
|
+
* Map HTTP error to ProviderError with provider-specific messages
|
|
135
|
+
* @param status HTTP status code
|
|
136
|
+
* @param body Response body
|
|
137
|
+
* @param model Model name
|
|
138
|
+
* @returns ProviderError with appropriate message
|
|
139
|
+
*/
|
|
140
|
+
protected abstract mapHttpError(status: number, body: string, model: string): ProviderError;
|
|
141
|
+
/**
|
|
142
|
+
* Map connection errors with provider-specific messages
|
|
143
|
+
* @param error Original error
|
|
144
|
+
* @returns ProviderError with appropriate message
|
|
145
|
+
*/
|
|
146
|
+
protected abstract mapConnectionError(error: Error): ProviderError;
|
|
147
|
+
/**
|
|
148
|
+
* Stream chat completion from the provider
|
|
149
|
+
*
|
|
150
|
+
* @param messages - Conversation messages
|
|
151
|
+
* @param options - Chat options (thinking is ignored for non-Claude providers)
|
|
152
|
+
*/
|
|
153
|
+
chat(messages: Message[], options?: ChatOptions): AsyncIterable<StreamChunk>;
|
|
154
|
+
/**
|
|
155
|
+
* Convert library messages to OpenAI format
|
|
156
|
+
*/
|
|
157
|
+
protected convertMessages(messages: Message[]): OpenAIMessage[];
|
|
158
|
+
/**
|
|
159
|
+
* Map library role to OpenAI role
|
|
160
|
+
*/
|
|
161
|
+
protected mapRole(role: string): 'system' | 'user' | 'assistant' | 'tool';
|
|
162
|
+
/**
|
|
163
|
+
* Convert tool definitions to OpenAI format
|
|
164
|
+
*/
|
|
165
|
+
protected convertTools(tools: ToolDefinition[]): OpenAITool[];
|
|
166
|
+
/**
|
|
167
|
+
* Process a stream chunk into StreamChunk events
|
|
168
|
+
*/
|
|
169
|
+
protected processStreamChunk(chunk: OpenAIStreamChunk, toolCalls: Map<number, {
|
|
170
|
+
id: string;
|
|
171
|
+
name: string;
|
|
172
|
+
arguments: string;
|
|
173
|
+
}>): StreamChunk[];
|
|
174
|
+
/**
|
|
175
|
+
* Estimate token count (rough approximation)
|
|
176
|
+
*
|
|
177
|
+
* @remarks
|
|
178
|
+
* Most providers don't have a native token counting endpoint.
|
|
179
|
+
* This uses a rough approximation of ~4 characters per token.
|
|
180
|
+
*/
|
|
181
|
+
countTokens(messages: Message[]): Promise<number>;
|
|
182
|
+
}
|
|
@@ -0,0 +1,359 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI-Compatible LLM Provider Base Class
|
|
3
|
+
*
|
|
4
|
+
* Abstract base class for LLM providers that use OpenAI-compatible REST APIs.
|
|
5
|
+
* Provides shared implementation for:
|
|
6
|
+
* - Message conversion (library format → OpenAI format)
|
|
7
|
+
* - Tool definition conversion
|
|
8
|
+
* - SSE stream parsing
|
|
9
|
+
* - Tool call delta accumulation
|
|
10
|
+
* - Token counting (approximation)
|
|
11
|
+
*
|
|
12
|
+
* Extended by: OllamaProvider, OpenAIProvider, GeminiProvider
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```typescript
|
|
16
|
+
* class MyProvider extends OpenAICompatibleProvider {
|
|
17
|
+
* readonly name = 'my-provider';
|
|
18
|
+
* protected getAuthHeaders() { return { 'Authorization': 'Bearer xxx' }; }
|
|
19
|
+
* protected getEndpointPath() { return '/v1/chat/completions'; }
|
|
20
|
+
* // ... other abstract methods
|
|
21
|
+
* }
|
|
22
|
+
* ```
|
|
23
|
+
*/
|
|
24
|
+
import { ProviderError } from '../errors.js';
|
|
25
|
+
// Default configuration
|
|
26
|
+
const DEFAULT_MAX_TOKENS = 4096;
|
|
27
|
+
const DEFAULT_TIMEOUT = 120000;
|
|
28
|
+
/**
|
|
29
|
+
* Abstract base class for OpenAI-compatible LLM providers
|
|
30
|
+
*
|
|
31
|
+
* Provides shared implementation for providers that use the OpenAI
|
|
32
|
+
* chat completions API format (OpenAI, Ollama, Azure OpenAI, Gemini).
|
|
33
|
+
*/
|
|
34
|
+
export class OpenAICompatibleProvider {
|
|
35
|
+
baseUrl;
|
|
36
|
+
defaultModel;
|
|
37
|
+
defaultMaxTokens;
|
|
38
|
+
timeout;
|
|
39
|
+
constructor(config) {
|
|
40
|
+
this.baseUrl = config.baseUrl;
|
|
41
|
+
this.defaultModel = config.model;
|
|
42
|
+
this.defaultMaxTokens = config.maxTokens ?? DEFAULT_MAX_TOKENS;
|
|
43
|
+
this.timeout = config.timeout ?? DEFAULT_TIMEOUT;
|
|
44
|
+
}
|
|
45
|
+
// ==================== SHARED IMPLEMENTATION ====================
|
|
46
|
+
/**
|
|
47
|
+
* Stream chat completion from the provider
|
|
48
|
+
*
|
|
49
|
+
* @param messages - Conversation messages
|
|
50
|
+
* @param options - Chat options (thinking is ignored for non-Claude providers)
|
|
51
|
+
*/
|
|
52
|
+
async *chat(messages, options) {
|
|
53
|
+
const model = options?.model ?? this.defaultModel;
|
|
54
|
+
const maxTokens = options?.maxTokens ?? this.defaultMaxTokens;
|
|
55
|
+
// Note: options.thinking is ignored - it's a Claude-specific feature
|
|
56
|
+
// Convert messages to OpenAI format
|
|
57
|
+
const openaiMessages = this.convertMessages(messages);
|
|
58
|
+
// Convert tools if provided
|
|
59
|
+
const tools = options?.tools ? this.convertTools(options.tools) : undefined;
|
|
60
|
+
// Build request body
|
|
61
|
+
const body = {
|
|
62
|
+
model,
|
|
63
|
+
messages: openaiMessages,
|
|
64
|
+
stream: true,
|
|
65
|
+
stream_options: { include_usage: true }, // Request usage stats in stream
|
|
66
|
+
max_tokens: maxTokens,
|
|
67
|
+
...this.buildProviderSpecificBody(options),
|
|
68
|
+
};
|
|
69
|
+
if (options?.temperature !== undefined) {
|
|
70
|
+
body.temperature = options.temperature;
|
|
71
|
+
}
|
|
72
|
+
if (options?.stopSequences && options.stopSequences.length > 0) {
|
|
73
|
+
body.stop = options.stopSequences;
|
|
74
|
+
}
|
|
75
|
+
if (tools && tools.length > 0) {
|
|
76
|
+
body.tools = tools;
|
|
77
|
+
}
|
|
78
|
+
// Track tool calls being assembled
|
|
79
|
+
const toolCalls = new Map();
|
|
80
|
+
let usage;
|
|
81
|
+
try {
|
|
82
|
+
const controller = new AbortController();
|
|
83
|
+
const timeoutId = setTimeout(() => {
|
|
84
|
+
controller.abort();
|
|
85
|
+
}, this.timeout);
|
|
86
|
+
const response = await fetch(`${this.baseUrl}${this.getEndpointPath()}`, {
|
|
87
|
+
method: 'POST',
|
|
88
|
+
headers: {
|
|
89
|
+
'Content-Type': 'application/json',
|
|
90
|
+
...this.getAuthHeaders(),
|
|
91
|
+
},
|
|
92
|
+
body: JSON.stringify(body),
|
|
93
|
+
signal: controller.signal,
|
|
94
|
+
});
|
|
95
|
+
clearTimeout(timeoutId);
|
|
96
|
+
if (!response.ok) {
|
|
97
|
+
const errorBody = await response.text();
|
|
98
|
+
throw this.mapHttpError(response.status, errorBody, model);
|
|
99
|
+
}
|
|
100
|
+
const reader = response.body?.getReader();
|
|
101
|
+
if (!reader) {
|
|
102
|
+
throw new ProviderError('No response body', this.name);
|
|
103
|
+
}
|
|
104
|
+
const decoder = new TextDecoder();
|
|
105
|
+
let buffer = '';
|
|
106
|
+
let streamDone = false;
|
|
107
|
+
while (!streamDone) {
|
|
108
|
+
const readResult = (await reader.read());
|
|
109
|
+
if (readResult.done) {
|
|
110
|
+
streamDone = true;
|
|
111
|
+
continue;
|
|
112
|
+
}
|
|
113
|
+
buffer += decoder.decode(readResult.value, { stream: true });
|
|
114
|
+
const lines = buffer.split('\n');
|
|
115
|
+
buffer = lines.pop() ?? '';
|
|
116
|
+
for (const line of lines) {
|
|
117
|
+
if (!line.trim() || line.startsWith(':'))
|
|
118
|
+
continue;
|
|
119
|
+
if (line === 'data: [DONE]')
|
|
120
|
+
continue;
|
|
121
|
+
const data = line.replace(/^data: /, '');
|
|
122
|
+
if (!data.trim())
|
|
123
|
+
continue;
|
|
124
|
+
try {
|
|
125
|
+
const chunk = JSON.parse(data);
|
|
126
|
+
const chunks = this.processStreamChunk(chunk, toolCalls);
|
|
127
|
+
for (const streamChunk of chunks) {
|
|
128
|
+
yield streamChunk;
|
|
129
|
+
}
|
|
130
|
+
// Track usage from final chunk
|
|
131
|
+
if (chunk.usage) {
|
|
132
|
+
usage = {
|
|
133
|
+
inputTokens: chunk.usage.prompt_tokens,
|
|
134
|
+
outputTokens: chunk.usage.completion_tokens,
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
catch {
|
|
139
|
+
// Skip malformed JSON chunks
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
// Yield done chunk with usage
|
|
144
|
+
yield {
|
|
145
|
+
type: 'done',
|
|
146
|
+
usage,
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
catch (error) {
|
|
150
|
+
if (error instanceof ProviderError) {
|
|
151
|
+
throw error;
|
|
152
|
+
}
|
|
153
|
+
if (error instanceof Error) {
|
|
154
|
+
if (error.name === 'AbortError') {
|
|
155
|
+
throw new ProviderError('Request timeout', this.name, 408);
|
|
156
|
+
}
|
|
157
|
+
// Check for connection errors
|
|
158
|
+
if (error.message.includes('fetch') ||
|
|
159
|
+
error.message.includes('ECONNREFUSED') ||
|
|
160
|
+
error.message.includes('network')) {
|
|
161
|
+
throw this.mapConnectionError(error);
|
|
162
|
+
}
|
|
163
|
+
throw new ProviderError(error.message, this.name);
|
|
164
|
+
}
|
|
165
|
+
throw new ProviderError('Unknown error', this.name);
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
/**
|
|
169
|
+
* Convert library messages to OpenAI format
|
|
170
|
+
*/
|
|
171
|
+
convertMessages(messages) {
|
|
172
|
+
const result = [];
|
|
173
|
+
for (const msg of messages) {
|
|
174
|
+
if (typeof msg.content === 'string') {
|
|
175
|
+
result.push({
|
|
176
|
+
role: this.mapRole(msg.role),
|
|
177
|
+
content: msg.content,
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
else if (Array.isArray(msg.content)) {
|
|
181
|
+
// Handle content blocks
|
|
182
|
+
const blocks = msg.content;
|
|
183
|
+
const textParts = [];
|
|
184
|
+
const toolCallsList = [];
|
|
185
|
+
const toolResults = [];
|
|
186
|
+
for (const block of blocks) {
|
|
187
|
+
if (block.type === 'text') {
|
|
188
|
+
textParts.push(block.text);
|
|
189
|
+
}
|
|
190
|
+
else if (block.type === 'tool_use') {
|
|
191
|
+
toolCallsList.push({
|
|
192
|
+
id: block.id,
|
|
193
|
+
type: 'function',
|
|
194
|
+
function: {
|
|
195
|
+
name: block.name,
|
|
196
|
+
arguments: JSON.stringify(block.input),
|
|
197
|
+
},
|
|
198
|
+
});
|
|
199
|
+
}
|
|
200
|
+
else if (block.type === 'tool_result') {
|
|
201
|
+
const content = typeof block.content === 'string'
|
|
202
|
+
? block.content
|
|
203
|
+
: JSON.stringify(block.content);
|
|
204
|
+
toolResults.push({
|
|
205
|
+
id: block.toolUseId,
|
|
206
|
+
content,
|
|
207
|
+
});
|
|
208
|
+
}
|
|
209
|
+
// Note: 'thinking' blocks are ignored (Claude-specific)
|
|
210
|
+
}
|
|
211
|
+
// Handle tool results - each needs its own message
|
|
212
|
+
if (toolResults.length > 0) {
|
|
213
|
+
for (const tr of toolResults) {
|
|
214
|
+
result.push({
|
|
215
|
+
role: 'tool',
|
|
216
|
+
content: tr.content,
|
|
217
|
+
tool_call_id: tr.id,
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
else if (toolCallsList.length > 0) {
|
|
222
|
+
// Assistant message with tool calls
|
|
223
|
+
result.push({
|
|
224
|
+
role: 'assistant',
|
|
225
|
+
content: textParts.length > 0 ? textParts.join('\n') : null,
|
|
226
|
+
tool_calls: toolCallsList,
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
else if (textParts.length > 0) {
|
|
230
|
+
// Regular text message
|
|
231
|
+
result.push({
|
|
232
|
+
role: this.mapRole(msg.role),
|
|
233
|
+
content: textParts.join('\n'),
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
return result;
|
|
239
|
+
}
|
|
240
|
+
/**
|
|
241
|
+
* Map library role to OpenAI role
|
|
242
|
+
*/
|
|
243
|
+
mapRole(role) {
|
|
244
|
+
switch (role) {
|
|
245
|
+
case 'system':
|
|
246
|
+
return 'system';
|
|
247
|
+
case 'user':
|
|
248
|
+
return 'user';
|
|
249
|
+
case 'assistant':
|
|
250
|
+
return 'assistant';
|
|
251
|
+
default:
|
|
252
|
+
return 'user';
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
/**
|
|
256
|
+
* Convert tool definitions to OpenAI format
|
|
257
|
+
*/
|
|
258
|
+
convertTools(tools) {
|
|
259
|
+
return tools.map((tool) => ({
|
|
260
|
+
type: 'function',
|
|
261
|
+
function: {
|
|
262
|
+
name: tool.name,
|
|
263
|
+
description: tool.description,
|
|
264
|
+
parameters: tool.inputSchema,
|
|
265
|
+
},
|
|
266
|
+
}));
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* Process a stream chunk into StreamChunk events
|
|
270
|
+
*/
|
|
271
|
+
processStreamChunk(chunk, toolCalls) {
|
|
272
|
+
const results = [];
|
|
273
|
+
const choices = chunk.choices;
|
|
274
|
+
if (choices.length === 0)
|
|
275
|
+
return results;
|
|
276
|
+
const choice = choices[0];
|
|
277
|
+
const delta = choice.delta;
|
|
278
|
+
// Handle text content
|
|
279
|
+
if (delta.content) {
|
|
280
|
+
results.push({
|
|
281
|
+
type: 'text',
|
|
282
|
+
text: delta.content,
|
|
283
|
+
});
|
|
284
|
+
}
|
|
285
|
+
// Handle tool calls
|
|
286
|
+
if (delta.tool_calls) {
|
|
287
|
+
for (const tc of delta.tool_calls) {
|
|
288
|
+
const index = tc.index;
|
|
289
|
+
let call = toolCalls.get(index);
|
|
290
|
+
// New tool call
|
|
291
|
+
const fn = tc.function;
|
|
292
|
+
if (tc.id && fn?.name) {
|
|
293
|
+
call = {
|
|
294
|
+
id: tc.id,
|
|
295
|
+
name: fn.name,
|
|
296
|
+
arguments: fn.arguments ?? '',
|
|
297
|
+
};
|
|
298
|
+
toolCalls.set(index, call);
|
|
299
|
+
results.push({
|
|
300
|
+
type: 'tool_use_start',
|
|
301
|
+
toolUse: {
|
|
302
|
+
id: tc.id,
|
|
303
|
+
name: fn.name,
|
|
304
|
+
},
|
|
305
|
+
});
|
|
306
|
+
}
|
|
307
|
+
// Streaming arguments
|
|
308
|
+
if (call && fn?.arguments) {
|
|
309
|
+
call.arguments += fn.arguments;
|
|
310
|
+
results.push({
|
|
311
|
+
type: 'tool_use_delta',
|
|
312
|
+
text: fn.arguments,
|
|
313
|
+
});
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
// Handle finish reason - emit tool_use_end for completed tool calls
|
|
318
|
+
// Note: The agent accumulates tool_use_delta chunks and parses the JSON
|
|
319
|
+
if (choice.finish_reason === 'tool_calls' || choice.finish_reason === 'stop') {
|
|
320
|
+
for (const [,] of toolCalls) {
|
|
321
|
+
results.push({ type: 'tool_use_end' });
|
|
322
|
+
}
|
|
323
|
+
toolCalls.clear();
|
|
324
|
+
}
|
|
325
|
+
return results;
|
|
326
|
+
}
|
|
327
|
+
/**
|
|
328
|
+
* Estimate token count (rough approximation)
|
|
329
|
+
*
|
|
330
|
+
* @remarks
|
|
331
|
+
* Most providers don't have a native token counting endpoint.
|
|
332
|
+
* This uses a rough approximation of ~4 characters per token.
|
|
333
|
+
*/
|
|
334
|
+
countTokens(messages) {
|
|
335
|
+
let charCount = 0;
|
|
336
|
+
for (const msg of messages) {
|
|
337
|
+
if (typeof msg.content === 'string') {
|
|
338
|
+
charCount += msg.content.length;
|
|
339
|
+
}
|
|
340
|
+
else if (Array.isArray(msg.content)) {
|
|
341
|
+
for (const block of msg.content) {
|
|
342
|
+
if (block.type === 'text') {
|
|
343
|
+
charCount += block.text.length;
|
|
344
|
+
}
|
|
345
|
+
else if (block.type === 'tool_use') {
|
|
346
|
+
charCount += JSON.stringify(block.input).length;
|
|
347
|
+
}
|
|
348
|
+
else if (block.type === 'tool_result') {
|
|
349
|
+
charCount +=
|
|
350
|
+
typeof block.content === 'string'
|
|
351
|
+
? block.content.length
|
|
352
|
+
: JSON.stringify(block.content).length;
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
return Promise.resolve(Math.ceil(charCount / 4));
|
|
358
|
+
}
|
|
359
|
+
}
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for OpenAI models (GPT-4o, GPT-4o-mini, etc.)
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createOpenAIProvider({
|
|
10
|
+
* model: 'gpt-4o',
|
|
11
|
+
* apiKey: process.env.OPENAI_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid OpenAI API key
|
|
17
|
+
* - Default model is gpt-4o
|
|
18
|
+
* - Extended thinking is not supported (Claude-specific feature)
|
|
19
|
+
*/
|
|
20
|
+
import type { ChatOptions } from './types.js';
|
|
21
|
+
import { ProviderError } from '../errors.js';
|
|
22
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
23
|
+
/**
|
|
24
|
+
* Configuration for OpenAIProvider
|
|
25
|
+
*/
|
|
26
|
+
export interface OpenAIProviderConfig {
|
|
27
|
+
/** OpenAI API key (falls back to OPENAI_API_KEY env var) */
|
|
28
|
+
apiKey?: string;
|
|
29
|
+
/** Base URL for OpenAI API (default: https://api.openai.com) */
|
|
30
|
+
baseUrl?: string;
|
|
31
|
+
/** Default model to use (default: gpt-4o) */
|
|
32
|
+
model?: string;
|
|
33
|
+
/** Default max tokens (default: 4096) */
|
|
34
|
+
maxTokens?: number;
|
|
35
|
+
/** Request timeout in milliseconds (default: 120000) */
|
|
36
|
+
timeout?: number;
|
|
37
|
+
/** OpenAI organization ID (optional) */
|
|
38
|
+
organization?: string;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* OpenAI LLM Provider
|
|
42
|
+
*
|
|
43
|
+
* Provides streaming chat completion using OpenAI models.
|
|
44
|
+
* Supports GPT-4o, GPT-4o-mini, and other compatible models.
|
|
45
|
+
*/
|
|
46
|
+
export declare class OpenAIProvider extends OpenAICompatibleProvider {
|
|
47
|
+
readonly name = "openai";
|
|
48
|
+
private readonly apiKey;
|
|
49
|
+
private readonly organization?;
|
|
50
|
+
constructor(config?: OpenAIProviderConfig);
|
|
51
|
+
/**
|
|
52
|
+
* OpenAI authentication with Bearer token
|
|
53
|
+
*/
|
|
54
|
+
protected getAuthHeaders(): Record<string, string>;
|
|
55
|
+
/**
|
|
56
|
+
* OpenAI chat completions endpoint
|
|
57
|
+
*/
|
|
58
|
+
protected getEndpointPath(): string;
|
|
59
|
+
/**
|
|
60
|
+
* OpenAI uses standard body format (no provider-specific extensions needed)
|
|
61
|
+
*/
|
|
62
|
+
protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
|
|
63
|
+
/**
|
|
64
|
+
* Map HTTP errors with OpenAI-specific messages
|
|
65
|
+
*/
|
|
66
|
+
protected mapHttpError(status: number, body: string, _model: string): ProviderError;
|
|
67
|
+
/**
|
|
68
|
+
* Map connection errors with OpenAI-specific messages
|
|
69
|
+
*/
|
|
70
|
+
protected mapConnectionError(_error: Error): ProviderError;
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Create an OpenAI provider instance
|
|
74
|
+
*
|
|
75
|
+
* @example
|
|
76
|
+
* ```typescript
|
|
77
|
+
* // Using environment variable (OPENAI_API_KEY)
|
|
78
|
+
* const provider = createOpenAIProvider();
|
|
79
|
+
*
|
|
80
|
+
* // With explicit API key
|
|
81
|
+
* const provider = createOpenAIProvider({ apiKey: 'sk-...' });
|
|
82
|
+
*
|
|
83
|
+
* // With custom model
|
|
84
|
+
* const provider = createOpenAIProvider({ model: 'gpt-4o-mini' });
|
|
85
|
+
*
|
|
86
|
+
* // With organization
|
|
87
|
+
* const provider = createOpenAIProvider({
|
|
88
|
+
* apiKey: 'sk-...',
|
|
89
|
+
* organization: 'org-...'
|
|
90
|
+
* });
|
|
91
|
+
* ```
|
|
92
|
+
*/
|
|
93
|
+
export declare function createOpenAIProvider(config?: OpenAIProviderConfig): OpenAIProvider;
|