@compilr-dev/agents 0.0.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/agent.d.ts +188 -1
  2. package/dist/agent.js +284 -14
  3. package/dist/context/file-tracker.d.ts +156 -0
  4. package/dist/context/file-tracker.js +358 -0
  5. package/dist/context/file-tracking-hook.d.ts +29 -0
  6. package/dist/context/file-tracking-hook.js +103 -0
  7. package/dist/context/index.d.ts +5 -1
  8. package/dist/context/index.js +3 -0
  9. package/dist/context/manager.d.ts +69 -1
  10. package/dist/context/manager.js +304 -0
  11. package/dist/context/types.d.ts +95 -0
  12. package/dist/index.d.ts +13 -5
  13. package/dist/index.js +11 -3
  14. package/dist/messages/index.d.ts +13 -0
  15. package/dist/messages/index.js +51 -0
  16. package/dist/permissions/manager.js +6 -1
  17. package/dist/providers/gemini.d.ts +91 -0
  18. package/dist/providers/gemini.js +138 -0
  19. package/dist/providers/index.d.ts +8 -0
  20. package/dist/providers/index.js +7 -3
  21. package/dist/providers/mock.js +8 -0
  22. package/dist/providers/ollama.d.ts +87 -0
  23. package/dist/providers/ollama.js +133 -0
  24. package/dist/providers/openai-compatible.d.ts +182 -0
  25. package/dist/providers/openai-compatible.js +357 -0
  26. package/dist/providers/openai.d.ts +93 -0
  27. package/dist/providers/openai.js +133 -0
  28. package/dist/skills/index.js +691 -0
  29. package/dist/tools/builtin/glob.d.ts +11 -0
  30. package/dist/tools/builtin/glob.js +44 -2
  31. package/dist/tools/builtin/grep.d.ts +11 -1
  32. package/dist/tools/builtin/grep.js +38 -2
  33. package/dist/tools/builtin/index.d.ts +6 -1
  34. package/dist/tools/builtin/index.js +7 -0
  35. package/dist/tools/builtin/suggest.d.ts +57 -0
  36. package/dist/tools/builtin/suggest.js +99 -0
  37. package/dist/tools/builtin/task.js +13 -8
  38. package/dist/tools/builtin/tool-names.d.ts +44 -0
  39. package/dist/tools/builtin/tool-names.js +51 -0
  40. package/dist/tools/index.d.ts +2 -2
  41. package/dist/tools/index.js +5 -1
  42. package/dist/tools/registry.d.ts +4 -0
  43. package/dist/tools/registry.js +9 -0
  44. package/package.json +2 -2
@@ -0,0 +1,357 @@
1
+ /**
2
+ * OpenAI-Compatible LLM Provider Base Class
3
+ *
4
+ * Abstract base class for LLM providers that use OpenAI-compatible REST APIs.
5
+ * Provides shared implementation for:
6
+ * - Message conversion (library format → OpenAI format)
7
+ * - Tool definition conversion
8
+ * - SSE stream parsing
9
+ * - Tool call delta accumulation
10
+ * - Token counting (approximation)
11
+ *
12
+ * Extended by: OllamaProvider, OpenAIProvider, GeminiProvider
13
+ *
14
+ * @example
15
+ * ```typescript
16
+ * class MyProvider extends OpenAICompatibleProvider {
17
+ * readonly name = 'my-provider';
18
+ * protected getAuthHeaders() { return { 'Authorization': 'Bearer xxx' }; }
19
+ * protected getEndpointPath() { return '/v1/chat/completions'; }
20
+ * // ... other abstract methods
21
+ * }
22
+ * ```
23
+ */
24
+ import { ProviderError } from '../errors.js';
25
+ // Default configuration
26
+ const DEFAULT_MAX_TOKENS = 4096;
27
+ const DEFAULT_TIMEOUT = 120000;
28
+ /**
29
+ * Abstract base class for OpenAI-compatible LLM providers
30
+ *
31
+ * Provides shared implementation for providers that use the OpenAI
32
+ * chat completions API format (OpenAI, Ollama, Azure OpenAI, Gemini).
33
+ */
34
+ export class OpenAICompatibleProvider {
35
+ baseUrl;
36
+ defaultModel;
37
+ defaultMaxTokens;
38
+ timeout;
39
+ constructor(config) {
40
+ this.baseUrl = config.baseUrl;
41
+ this.defaultModel = config.model;
42
+ this.defaultMaxTokens = config.maxTokens ?? DEFAULT_MAX_TOKENS;
43
+ this.timeout = config.timeout ?? DEFAULT_TIMEOUT;
44
+ }
45
+ // ==================== SHARED IMPLEMENTATION ====================
46
+ /**
47
+ * Stream chat completion from the provider
48
+ *
49
+ * @param messages - Conversation messages
50
+ * @param options - Chat options (thinking is ignored for non-Claude providers)
51
+ */
52
+ async *chat(messages, options) {
53
+ const model = options?.model ?? this.defaultModel;
54
+ const maxTokens = options?.maxTokens ?? this.defaultMaxTokens;
55
+ // Note: options.thinking is ignored - it's a Claude-specific feature
56
+ // Convert messages to OpenAI format
57
+ const openaiMessages = this.convertMessages(messages);
58
+ // Convert tools if provided
59
+ const tools = options?.tools ? this.convertTools(options.tools) : undefined;
60
+ // Build request body
61
+ const body = {
62
+ model,
63
+ messages: openaiMessages,
64
+ stream: true,
65
+ stream_options: { include_usage: true }, // Request usage stats in stream
66
+ max_tokens: maxTokens,
67
+ ...this.buildProviderSpecificBody(options),
68
+ };
69
+ if (options?.temperature !== undefined) {
70
+ body.temperature = options.temperature;
71
+ }
72
+ if (options?.stopSequences && options.stopSequences.length > 0) {
73
+ body.stop = options.stopSequences;
74
+ }
75
+ if (tools && tools.length > 0) {
76
+ body.tools = tools;
77
+ }
78
+ // Track tool calls being assembled
79
+ const toolCalls = new Map();
80
+ let usage;
81
+ try {
82
+ const controller = new AbortController();
83
+ const timeoutId = setTimeout(() => {
84
+ controller.abort();
85
+ }, this.timeout);
86
+ const response = await fetch(`${this.baseUrl}${this.getEndpointPath()}`, {
87
+ method: 'POST',
88
+ headers: {
89
+ 'Content-Type': 'application/json',
90
+ ...this.getAuthHeaders(),
91
+ },
92
+ body: JSON.stringify(body),
93
+ signal: controller.signal,
94
+ });
95
+ clearTimeout(timeoutId);
96
+ if (!response.ok) {
97
+ const errorBody = await response.text();
98
+ throw this.mapHttpError(response.status, errorBody, model);
99
+ }
100
+ const reader = response.body?.getReader();
101
+ if (!reader) {
102
+ throw new ProviderError('No response body', this.name);
103
+ }
104
+ const decoder = new TextDecoder();
105
+ let buffer = '';
106
+ let streamDone = false;
107
+ while (!streamDone) {
108
+ const readResult = (await reader.read());
109
+ if (readResult.done) {
110
+ streamDone = true;
111
+ continue;
112
+ }
113
+ buffer += decoder.decode(readResult.value, { stream: true });
114
+ const lines = buffer.split('\n');
115
+ buffer = lines.pop() ?? '';
116
+ for (const line of lines) {
117
+ if (!line.trim() || line.startsWith(':'))
118
+ continue;
119
+ if (line === 'data: [DONE]')
120
+ continue;
121
+ const data = line.replace(/^data: /, '');
122
+ if (!data.trim())
123
+ continue;
124
+ try {
125
+ const chunk = JSON.parse(data);
126
+ const chunks = this.processStreamChunk(chunk, toolCalls);
127
+ for (const streamChunk of chunks) {
128
+ yield streamChunk;
129
+ }
130
+ // Track usage from final chunk
131
+ if (chunk.usage) {
132
+ usage = {
133
+ inputTokens: chunk.usage.prompt_tokens,
134
+ outputTokens: chunk.usage.completion_tokens,
135
+ };
136
+ }
137
+ }
138
+ catch {
139
+ // Skip malformed JSON chunks
140
+ }
141
+ }
142
+ }
143
+ // Yield done chunk with usage
144
+ yield {
145
+ type: 'done',
146
+ usage,
147
+ };
148
+ }
149
+ catch (error) {
150
+ if (error instanceof ProviderError) {
151
+ throw error;
152
+ }
153
+ if (error instanceof Error) {
154
+ if (error.name === 'AbortError') {
155
+ throw new ProviderError('Request timeout', this.name, 408);
156
+ }
157
+ // Check for connection errors
158
+ if (error.message.includes('fetch') ||
159
+ error.message.includes('ECONNREFUSED') ||
160
+ error.message.includes('network')) {
161
+ throw this.mapConnectionError(error);
162
+ }
163
+ throw new ProviderError(error.message, this.name);
164
+ }
165
+ throw new ProviderError('Unknown error', this.name);
166
+ }
167
+ }
168
+ /**
169
+ * Convert library messages to OpenAI format
170
+ */
171
+ convertMessages(messages) {
172
+ const result = [];
173
+ for (const msg of messages) {
174
+ if (typeof msg.content === 'string') {
175
+ result.push({
176
+ role: this.mapRole(msg.role),
177
+ content: msg.content,
178
+ });
179
+ }
180
+ else if (Array.isArray(msg.content)) {
181
+ // Handle content blocks
182
+ const blocks = msg.content;
183
+ const textParts = [];
184
+ const toolCallsList = [];
185
+ const toolResults = [];
186
+ for (const block of blocks) {
187
+ if (block.type === 'text') {
188
+ textParts.push(block.text);
189
+ }
190
+ else if (block.type === 'tool_use') {
191
+ toolCallsList.push({
192
+ id: block.id,
193
+ type: 'function',
194
+ function: {
195
+ name: block.name,
196
+ arguments: JSON.stringify(block.input),
197
+ },
198
+ });
199
+ }
200
+ else if (block.type === 'tool_result') {
201
+ const content = typeof block.content === 'string' ? block.content : JSON.stringify(block.content);
202
+ toolResults.push({
203
+ id: block.toolUseId,
204
+ content,
205
+ });
206
+ }
207
+ // Note: 'thinking' blocks are ignored (Claude-specific)
208
+ }
209
+ // Handle tool results - each needs its own message
210
+ if (toolResults.length > 0) {
211
+ for (const tr of toolResults) {
212
+ result.push({
213
+ role: 'tool',
214
+ content: tr.content,
215
+ tool_call_id: tr.id,
216
+ });
217
+ }
218
+ }
219
+ else if (toolCallsList.length > 0) {
220
+ // Assistant message with tool calls
221
+ result.push({
222
+ role: 'assistant',
223
+ content: textParts.length > 0 ? textParts.join('\n') : null,
224
+ tool_calls: toolCallsList,
225
+ });
226
+ }
227
+ else if (textParts.length > 0) {
228
+ // Regular text message
229
+ result.push({
230
+ role: this.mapRole(msg.role),
231
+ content: textParts.join('\n'),
232
+ });
233
+ }
234
+ }
235
+ }
236
+ return result;
237
+ }
238
+ /**
239
+ * Map library role to OpenAI role
240
+ */
241
+ mapRole(role) {
242
+ switch (role) {
243
+ case 'system':
244
+ return 'system';
245
+ case 'user':
246
+ return 'user';
247
+ case 'assistant':
248
+ return 'assistant';
249
+ default:
250
+ return 'user';
251
+ }
252
+ }
253
+ /**
254
+ * Convert tool definitions to OpenAI format
255
+ */
256
+ convertTools(tools) {
257
+ return tools.map((tool) => ({
258
+ type: 'function',
259
+ function: {
260
+ name: tool.name,
261
+ description: tool.description,
262
+ parameters: tool.inputSchema,
263
+ },
264
+ }));
265
+ }
266
+ /**
267
+ * Process a stream chunk into StreamChunk events
268
+ */
269
+ processStreamChunk(chunk, toolCalls) {
270
+ const results = [];
271
+ const choices = chunk.choices;
272
+ if (choices.length === 0)
273
+ return results;
274
+ const choice = choices[0];
275
+ const delta = choice.delta;
276
+ // Handle text content
277
+ if (delta.content) {
278
+ results.push({
279
+ type: 'text',
280
+ text: delta.content,
281
+ });
282
+ }
283
+ // Handle tool calls
284
+ if (delta.tool_calls) {
285
+ for (const tc of delta.tool_calls) {
286
+ const index = tc.index;
287
+ let call = toolCalls.get(index);
288
+ // New tool call
289
+ const fn = tc.function;
290
+ if (tc.id && fn?.name) {
291
+ call = {
292
+ id: tc.id,
293
+ name: fn.name,
294
+ arguments: fn.arguments ?? '',
295
+ };
296
+ toolCalls.set(index, call);
297
+ results.push({
298
+ type: 'tool_use_start',
299
+ toolUse: {
300
+ id: tc.id,
301
+ name: fn.name,
302
+ },
303
+ });
304
+ }
305
+ // Streaming arguments
306
+ if (call && fn?.arguments) {
307
+ call.arguments += fn.arguments;
308
+ results.push({
309
+ type: 'tool_use_delta',
310
+ text: fn.arguments,
311
+ });
312
+ }
313
+ }
314
+ }
315
+ // Handle finish reason - emit tool_use_end for completed tool calls
316
+ // Note: The agent accumulates tool_use_delta chunks and parses the JSON
317
+ if (choice.finish_reason === 'tool_calls' || choice.finish_reason === 'stop') {
318
+ for (const [,] of toolCalls) {
319
+ results.push({ type: 'tool_use_end' });
320
+ }
321
+ toolCalls.clear();
322
+ }
323
+ return results;
324
+ }
325
+ /**
326
+ * Estimate token count (rough approximation)
327
+ *
328
+ * @remarks
329
+ * Most providers don't have a native token counting endpoint.
330
+ * This uses a rough approximation of ~4 characters per token.
331
+ */
332
+ countTokens(messages) {
333
+ let charCount = 0;
334
+ for (const msg of messages) {
335
+ if (typeof msg.content === 'string') {
336
+ charCount += msg.content.length;
337
+ }
338
+ else if (Array.isArray(msg.content)) {
339
+ for (const block of msg.content) {
340
+ if (block.type === 'text') {
341
+ charCount += block.text.length;
342
+ }
343
+ else if (block.type === 'tool_use') {
344
+ charCount += JSON.stringify(block.input).length;
345
+ }
346
+ else if (block.type === 'tool_result') {
347
+ charCount +=
348
+ typeof block.content === 'string'
349
+ ? block.content.length
350
+ : JSON.stringify(block.content).length;
351
+ }
352
+ }
353
+ }
354
+ }
355
+ return Promise.resolve(Math.ceil(charCount / 4));
356
+ }
357
+ }
@@ -0,0 +1,93 @@
1
+ /**
2
+ * OpenAI LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for OpenAI models (GPT-4o, GPT-4o-mini, etc.)
5
+ * Extends OpenAICompatibleProvider for shared functionality.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createOpenAIProvider({
10
+ * model: 'gpt-4o',
11
+ * apiKey: process.env.OPENAI_API_KEY
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires valid OpenAI API key
17
+ * - Default model is gpt-4o
18
+ * - Extended thinking is not supported (Claude-specific feature)
19
+ */
20
+ import type { ChatOptions } from './types.js';
21
+ import { ProviderError } from '../errors.js';
22
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
23
+ /**
24
+ * Configuration for OpenAIProvider
25
+ */
26
+ export interface OpenAIProviderConfig {
27
+ /** OpenAI API key (falls back to OPENAI_API_KEY env var) */
28
+ apiKey?: string;
29
+ /** Base URL for OpenAI API (default: https://api.openai.com) */
30
+ baseUrl?: string;
31
+ /** Default model to use (default: gpt-4o) */
32
+ model?: string;
33
+ /** Default max tokens (default: 4096) */
34
+ maxTokens?: number;
35
+ /** Request timeout in milliseconds (default: 120000) */
36
+ timeout?: number;
37
+ /** OpenAI organization ID (optional) */
38
+ organization?: string;
39
+ }
40
+ /**
41
+ * OpenAI LLM Provider
42
+ *
43
+ * Provides streaming chat completion using OpenAI models.
44
+ * Supports GPT-4o, GPT-4o-mini, and other compatible models.
45
+ */
46
+ export declare class OpenAIProvider extends OpenAICompatibleProvider {
47
+ readonly name = "openai";
48
+ private readonly apiKey;
49
+ private readonly organization?;
50
+ constructor(config?: OpenAIProviderConfig);
51
+ /**
52
+ * OpenAI authentication with Bearer token
53
+ */
54
+ protected getAuthHeaders(): Record<string, string>;
55
+ /**
56
+ * OpenAI chat completions endpoint
57
+ */
58
+ protected getEndpointPath(): string;
59
+ /**
60
+ * OpenAI uses standard body format (no provider-specific extensions needed)
61
+ */
62
+ protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
63
+ /**
64
+ * Map HTTP errors with OpenAI-specific messages
65
+ */
66
+ protected mapHttpError(status: number, body: string, _model: string): ProviderError;
67
+ /**
68
+ * Map connection errors with OpenAI-specific messages
69
+ */
70
+ protected mapConnectionError(_error: Error): ProviderError;
71
+ }
72
+ /**
73
+ * Create an OpenAI provider instance
74
+ *
75
+ * @example
76
+ * ```typescript
77
+ * // Using environment variable (OPENAI_API_KEY)
78
+ * const provider = createOpenAIProvider();
79
+ *
80
+ * // With explicit API key
81
+ * const provider = createOpenAIProvider({ apiKey: 'sk-...' });
82
+ *
83
+ * // With custom model
84
+ * const provider = createOpenAIProvider({ model: 'gpt-4o-mini' });
85
+ *
86
+ * // With organization
87
+ * const provider = createOpenAIProvider({
88
+ * apiKey: 'sk-...',
89
+ * organization: 'org-...'
90
+ * });
91
+ * ```
92
+ */
93
+ export declare function createOpenAIProvider(config?: OpenAIProviderConfig): OpenAIProvider;
@@ -0,0 +1,133 @@
1
+ /**
2
+ * OpenAI LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for OpenAI models (GPT-4o, GPT-4o-mini, etc.)
5
+ * Extends OpenAICompatibleProvider for shared functionality.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createOpenAIProvider({
10
+ * model: 'gpt-4o',
11
+ * apiKey: process.env.OPENAI_API_KEY
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires valid OpenAI API key
17
+ * - Default model is gpt-4o
18
+ * - Extended thinking is not supported (Claude-specific feature)
19
+ */
20
+ import { ProviderError } from '../errors.js';
21
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
22
+ // Default configuration
23
+ const DEFAULT_MODEL = 'gpt-4o';
24
+ const DEFAULT_BASE_URL = 'https://api.openai.com';
25
+ /**
26
+ * OpenAI LLM Provider
27
+ *
28
+ * Provides streaming chat completion using OpenAI models.
29
+ * Supports GPT-4o, GPT-4o-mini, and other compatible models.
30
+ */
31
+ export class OpenAIProvider extends OpenAICompatibleProvider {
32
+ name = 'openai';
33
+ apiKey;
34
+ organization;
35
+ constructor(config = {}) {
36
+ const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY;
37
+ if (!apiKey) {
38
+ throw new ProviderError('OpenAI API key not found. Set OPENAI_API_KEY environment variable or pass apiKey in config.', 'openai');
39
+ }
40
+ const baseConfig = {
41
+ baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
42
+ model: config.model ?? DEFAULT_MODEL,
43
+ maxTokens: config.maxTokens,
44
+ timeout: config.timeout,
45
+ };
46
+ super(baseConfig);
47
+ this.apiKey = apiKey;
48
+ this.organization = config.organization;
49
+ }
50
+ /**
51
+ * OpenAI authentication with Bearer token
52
+ */
53
+ getAuthHeaders() {
54
+ const headers = {
55
+ Authorization: `Bearer ${this.apiKey}`,
56
+ };
57
+ if (this.organization) {
58
+ headers['OpenAI-Organization'] = this.organization;
59
+ }
60
+ return headers;
61
+ }
62
+ /**
63
+ * OpenAI chat completions endpoint
64
+ */
65
+ getEndpointPath() {
66
+ return '/v1/chat/completions';
67
+ }
68
+ /**
69
+ * OpenAI uses standard body format (no provider-specific extensions needed)
70
+ */
71
+ buildProviderSpecificBody(_options) {
72
+ return {};
73
+ }
74
+ /**
75
+ * Map HTTP errors with OpenAI-specific messages
76
+ */
77
+ mapHttpError(status, body, _model) {
78
+ let message = `OpenAI error (${String(status)})`;
79
+ try {
80
+ const parsed = JSON.parse(body);
81
+ if (parsed.error?.message) {
82
+ message = parsed.error.message;
83
+ }
84
+ }
85
+ catch {
86
+ message = body || message;
87
+ }
88
+ switch (status) {
89
+ case 401:
90
+ return new ProviderError('Invalid OpenAI API key. Check your OPENAI_API_KEY.', 'openai', 401);
91
+ case 403:
92
+ return new ProviderError('Access denied. Check your OpenAI API key permissions.', 'openai', 403);
93
+ case 429:
94
+ return new ProviderError('OpenAI rate limit exceeded. Please wait and try again.', 'openai', 429);
95
+ case 500:
96
+ case 502:
97
+ case 503:
98
+ return new ProviderError('OpenAI service temporarily unavailable. Please try again later.', 'openai', status);
99
+ default:
100
+ return new ProviderError(message, 'openai', status);
101
+ }
102
+ }
103
+ /**
104
+ * Map connection errors with OpenAI-specific messages
105
+ */
106
+ mapConnectionError(_error) {
107
+ return new ProviderError('Failed to connect to OpenAI API. Check your internet connection.', 'openai');
108
+ }
109
+ }
110
+ /**
111
+ * Create an OpenAI provider instance
112
+ *
113
+ * @example
114
+ * ```typescript
115
+ * // Using environment variable (OPENAI_API_KEY)
116
+ * const provider = createOpenAIProvider();
117
+ *
118
+ * // With explicit API key
119
+ * const provider = createOpenAIProvider({ apiKey: 'sk-...' });
120
+ *
121
+ * // With custom model
122
+ * const provider = createOpenAIProvider({ model: 'gpt-4o-mini' });
123
+ *
124
+ * // With organization
125
+ * const provider = createOpenAIProvider({
126
+ * apiKey: 'sk-...',
127
+ * organization: 'org-...'
128
+ * });
129
+ * ```
130
+ */
131
+ export function createOpenAIProvider(config = {}) {
132
+ return new OpenAIProvider(config);
133
+ }