@compilr-dev/agents 0.0.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/agent.d.ts +188 -1
  2. package/dist/agent.js +284 -14
  3. package/dist/context/file-tracker.d.ts +156 -0
  4. package/dist/context/file-tracker.js +358 -0
  5. package/dist/context/file-tracking-hook.d.ts +29 -0
  6. package/dist/context/file-tracking-hook.js +103 -0
  7. package/dist/context/index.d.ts +5 -1
  8. package/dist/context/index.js +3 -0
  9. package/dist/context/manager.d.ts +69 -1
  10. package/dist/context/manager.js +304 -0
  11. package/dist/context/types.d.ts +95 -0
  12. package/dist/index.d.ts +13 -5
  13. package/dist/index.js +11 -3
  14. package/dist/messages/index.d.ts +13 -0
  15. package/dist/messages/index.js +51 -0
  16. package/dist/permissions/manager.js +6 -1
  17. package/dist/providers/gemini.d.ts +91 -0
  18. package/dist/providers/gemini.js +138 -0
  19. package/dist/providers/index.d.ts +8 -0
  20. package/dist/providers/index.js +7 -3
  21. package/dist/providers/mock.js +8 -0
  22. package/dist/providers/ollama.d.ts +87 -0
  23. package/dist/providers/ollama.js +133 -0
  24. package/dist/providers/openai-compatible.d.ts +182 -0
  25. package/dist/providers/openai-compatible.js +357 -0
  26. package/dist/providers/openai.d.ts +93 -0
  27. package/dist/providers/openai.js +133 -0
  28. package/dist/skills/index.js +691 -0
  29. package/dist/tools/builtin/glob.d.ts +11 -0
  30. package/dist/tools/builtin/glob.js +44 -2
  31. package/dist/tools/builtin/grep.d.ts +11 -1
  32. package/dist/tools/builtin/grep.js +38 -2
  33. package/dist/tools/builtin/index.d.ts +6 -1
  34. package/dist/tools/builtin/index.js +7 -0
  35. package/dist/tools/builtin/suggest.d.ts +57 -0
  36. package/dist/tools/builtin/suggest.js +99 -0
  37. package/dist/tools/builtin/task.js +13 -8
  38. package/dist/tools/builtin/tool-names.d.ts +44 -0
  39. package/dist/tools/builtin/tool-names.js +51 -0
  40. package/dist/tools/index.d.ts +2 -2
  41. package/dist/tools/index.js +5 -1
  42. package/dist/tools/registry.d.ts +4 -0
  43. package/dist/tools/registry.js +9 -0
  44. package/package.json +2 -2
@@ -0,0 +1,91 @@
1
+ /**
2
+ * Google Gemini LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for Google Gemini models using
5
+ * the OpenAI-compatible endpoint for maximum compatibility.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createGeminiProvider({
10
+ * model: 'gemini-2.0-flash',
11
+ * apiKey: process.env.GOOGLE_AI_API_KEY
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires valid Google AI API key
17
+ * - Uses OpenAI-compatible endpoint for streaming and tool calling
18
+ * - Default model is gemini-2.0-flash
19
+ * - Extended thinking is not supported (Claude-specific feature)
20
+ */
21
+ import type { ChatOptions } from './types.js';
22
+ import { ProviderError } from '../errors.js';
23
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
24
+ /**
25
+ * Configuration for GeminiProvider
26
+ */
27
+ export interface GeminiProviderConfig {
28
+ /** Google AI API key (falls back to GOOGLE_AI_API_KEY or GEMINI_API_KEY env var) */
29
+ apiKey?: string;
30
+ /** Base URL for Gemini API (default: https://generativelanguage.googleapis.com/v1beta/openai) */
31
+ baseUrl?: string;
32
+ /** Default model to use (default: gemini-2.0-flash) */
33
+ model?: string;
34
+ /** Default max tokens (default: 4096) */
35
+ maxTokens?: number;
36
+ /** Request timeout in milliseconds (default: 120000) */
37
+ timeout?: number;
38
+ }
39
+ /**
40
+ * Google Gemini LLM Provider
41
+ *
42
+ * Provides streaming chat completion using Google's Gemini models
43
+ * via the OpenAI-compatible API endpoint.
44
+ * Supports gemini-2.0-flash, gemini-1.5-flash, gemini-1.5-pro, and others.
45
+ */
46
+ export declare class GeminiProvider extends OpenAICompatibleProvider {
47
+ readonly name = "gemini";
48
+ private readonly apiKey;
49
+ constructor(config?: GeminiProviderConfig);
50
+ /**
51
+ * Gemini authentication with Bearer token
52
+ */
53
+ protected getAuthHeaders(): Record<string, string>;
54
+ /**
55
+ * Gemini's OpenAI-compatible endpoint path
56
+ */
57
+ protected getEndpointPath(): string;
58
+ /**
59
+ * Gemini uses standard body format (no provider-specific extensions needed)
60
+ */
61
+ protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
62
+ /**
63
+ * Map HTTP errors with Gemini-specific messages
64
+ */
65
+ protected mapHttpError(status: number, body: string, _model: string): ProviderError;
66
+ /**
67
+ * Map connection errors with Gemini-specific messages
68
+ */
69
+ protected mapConnectionError(_error: Error): ProviderError;
70
+ }
71
+ /**
72
+ * Create a Gemini provider instance
73
+ *
74
+ * @example
75
+ * ```typescript
76
+ * // Using environment variable (GOOGLE_AI_API_KEY or GEMINI_API_KEY)
77
+ * const provider = createGeminiProvider();
78
+ *
79
+ * // With explicit API key
80
+ * const provider = createGeminiProvider({ apiKey: 'AI...' });
81
+ *
82
+ * // With custom model
83
+ * const provider = createGeminiProvider({ model: 'gemini-1.5-pro' });
84
+ *
85
+ * // Available models:
86
+ * // - gemini-2.0-flash (default, fast)
87
+ * // - gemini-1.5-flash (faster, cheaper)
88
+ * // - gemini-1.5-pro (more capable)
89
+ * ```
90
+ */
91
+ export declare function createGeminiProvider(config?: GeminiProviderConfig): GeminiProvider;
@@ -0,0 +1,138 @@
1
+ /**
2
+ * Google Gemini LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for Google Gemini models using
5
+ * the OpenAI-compatible endpoint for maximum compatibility.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createGeminiProvider({
10
+ * model: 'gemini-2.0-flash',
11
+ * apiKey: process.env.GOOGLE_AI_API_KEY
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires valid Google AI API key
17
+ * - Uses OpenAI-compatible endpoint for streaming and tool calling
18
+ * - Default model is gemini-2.0-flash
19
+ * - Extended thinking is not supported (Claude-specific feature)
20
+ */
21
+ import { ProviderError } from '../errors.js';
22
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
23
+ // Default configuration
24
+ const DEFAULT_MODEL = 'gemini-2.0-flash';
25
+ const DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai';
26
+ /**
27
+ * Google Gemini LLM Provider
28
+ *
29
+ * Provides streaming chat completion using Google's Gemini models
30
+ * via the OpenAI-compatible API endpoint.
31
+ * Supports gemini-2.0-flash, gemini-1.5-flash, gemini-1.5-pro, and others.
32
+ */
33
+ export class GeminiProvider extends OpenAICompatibleProvider {
34
+ name = 'gemini';
35
+ apiKey;
36
+ constructor(config = {}) {
37
+ const apiKey = config.apiKey ?? process.env.GOOGLE_AI_API_KEY ?? process.env.GEMINI_API_KEY;
38
+ if (!apiKey) {
39
+ throw new ProviderError('Gemini API key not found. Set GOOGLE_AI_API_KEY or GEMINI_API_KEY environment variable, or pass apiKey in config.', 'gemini');
40
+ }
41
+ const baseConfig = {
42
+ baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
43
+ model: config.model ?? DEFAULT_MODEL,
44
+ maxTokens: config.maxTokens,
45
+ timeout: config.timeout,
46
+ };
47
+ super(baseConfig);
48
+ this.apiKey = apiKey;
49
+ }
50
+ /**
51
+ * Gemini authentication with Bearer token
52
+ */
53
+ getAuthHeaders() {
54
+ return {
55
+ Authorization: `Bearer ${this.apiKey}`,
56
+ };
57
+ }
58
+ /**
59
+ * Gemini's OpenAI-compatible endpoint path
60
+ */
61
+ getEndpointPath() {
62
+ return '/chat/completions';
63
+ }
64
+ /**
65
+ * Gemini uses standard body format (no provider-specific extensions needed)
66
+ */
67
+ buildProviderSpecificBody(_options) {
68
+ return {};
69
+ }
70
+ /**
71
+ * Map HTTP errors with Gemini-specific messages
72
+ */
73
+ mapHttpError(status, body, _model) {
74
+ let message = `Gemini error (${String(status)})`;
75
+ try {
76
+ const parsed = JSON.parse(body);
77
+ // Gemini sometimes wraps errors in an array
78
+ if (Array.isArray(parsed)) {
79
+ const first = parsed[0];
80
+ if (first?.error?.message) {
81
+ message = first.error.message;
82
+ }
83
+ }
84
+ else if (parsed.error?.message) {
85
+ message = parsed.error.message;
86
+ }
87
+ }
88
+ catch {
89
+ // If body isn't JSON, show it directly (truncated)
90
+ message = body ? body.slice(0, 500) : message;
91
+ }
92
+ switch (status) {
93
+ case 400:
94
+ return new ProviderError(`Invalid request to Gemini API: ${message}`, 'gemini', 400);
95
+ case 401:
96
+ return new ProviderError('Invalid Gemini API key. Check your GOOGLE_AI_API_KEY or GEMINI_API_KEY.', 'gemini', 401);
97
+ case 403:
98
+ return new ProviderError('Access denied to Gemini API. Check your API key permissions.', 'gemini', 403);
99
+ case 429:
100
+ return new ProviderError('Gemini rate limit exceeded. Please wait and try again.', 'gemini', 429);
101
+ case 500:
102
+ case 502:
103
+ case 503:
104
+ return new ProviderError('Gemini service temporarily unavailable. Please try again later.', 'gemini', status);
105
+ default:
106
+ return new ProviderError(message, 'gemini', status);
107
+ }
108
+ }
109
+ /**
110
+ * Map connection errors with Gemini-specific messages
111
+ */
112
+ mapConnectionError(_error) {
113
+ return new ProviderError('Failed to connect to Gemini API. Check your internet connection.', 'gemini');
114
+ }
115
+ }
116
+ /**
117
+ * Create a Gemini provider instance
118
+ *
119
+ * @example
120
+ * ```typescript
121
+ * // Using environment variable (GOOGLE_AI_API_KEY or GEMINI_API_KEY)
122
+ * const provider = createGeminiProvider();
123
+ *
124
+ * // With explicit API key
125
+ * const provider = createGeminiProvider({ apiKey: 'AI...' });
126
+ *
127
+ * // With custom model
128
+ * const provider = createGeminiProvider({ model: 'gemini-1.5-pro' });
129
+ *
130
+ * // Available models:
131
+ * // - gemini-2.0-flash (default, fast)
132
+ * // - gemini-1.5-flash (faster, cheaper)
133
+ * // - gemini-1.5-pro (more capable)
134
+ * ```
135
+ */
136
+ export function createGeminiProvider(config = {}) {
137
+ return new GeminiProvider(config);
138
+ }
@@ -6,3 +6,11 @@ export { MockProvider, createMockProvider } from './mock.js';
6
6
  export type { MockProviderConfig, MockResponse, MockToolCall } from './mock.js';
7
7
  export { ClaudeProvider, createClaudeProvider } from './claude.js';
8
8
  export type { ClaudeProviderConfig } from './claude.js';
9
+ export { OllamaProvider, createOllamaProvider } from './ollama.js';
10
+ export type { OllamaProviderConfig } from './ollama.js';
11
+ export { OpenAICompatibleProvider } from './openai-compatible.js';
12
+ export type { OpenAICompatibleConfig, OpenAIMessage, OpenAIToolCall, OpenAITool, OpenAIStreamChunk, } from './openai-compatible.js';
13
+ export { OpenAIProvider, createOpenAIProvider } from './openai.js';
14
+ export type { OpenAIProviderConfig } from './openai.js';
15
+ export { GeminiProvider, createGeminiProvider } from './gemini.js';
16
+ export type { GeminiProviderConfig } from './gemini.js';
@@ -6,6 +6,10 @@ export * from './types.js';
6
6
  export { MockProvider, createMockProvider } from './mock.js';
7
7
  // Provider implementations
8
8
  export { ClaudeProvider, createClaudeProvider } from './claude.js';
9
- // Future providers
10
- // export { OpenAIProvider } from './openai.js';
11
- // export { GeminiProvider } from './gemini.js';
9
+ export { OllamaProvider, createOllamaProvider } from './ollama.js';
10
+ // Base class for OpenAI-compatible providers
11
+ export { OpenAICompatibleProvider } from './openai-compatible.js';
12
+ // OpenAI provider
13
+ export { OpenAIProvider, createOpenAIProvider } from './openai.js';
14
+ // Gemini provider
15
+ export { GeminiProvider, createGeminiProvider } from './gemini.js';
@@ -185,6 +185,14 @@ export class MockProvider {
185
185
  if (block.type === 'text') {
186
186
  charCount += block.text.length;
187
187
  }
188
+ else if (block.type === 'tool_result') {
189
+ // Count tool result content (always a string per our type definition)
190
+ charCount += block.content.length;
191
+ }
192
+ else if (block.type === 'tool_use') {
193
+ // Count tool use input
194
+ charCount += JSON.stringify(block.input).length;
195
+ }
188
196
  }
189
197
  }
190
198
  }
@@ -0,0 +1,87 @@
1
+ /**
2
+ * Ollama LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for local Ollama models.
5
+ * Extends OpenAICompatibleProvider for shared functionality.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createOllamaProvider({
10
+ * model: 'llama3.1',
11
+ * baseUrl: 'http://localhost:11434'
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires Ollama to be running locally
17
+ * - Default model is llama3.1 (supports tool calling)
18
+ * - Extended thinking is not supported (Claude-specific feature)
19
+ */
20
+ import type { ChatOptions } from './types.js';
21
+ import { ProviderError } from '../errors.js';
22
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
23
+ /**
24
+ * Configuration for OllamaProvider
25
+ */
26
+ export interface OllamaProviderConfig {
27
+ /** Base URL for Ollama server (default: http://localhost:11434) */
28
+ baseUrl?: string;
29
+ /** Default model to use (default: llama3.1) */
30
+ model?: string;
31
+ /** Default max tokens (default: 4096) */
32
+ maxTokens?: number;
33
+ /** Request timeout in milliseconds (default: 120000) */
34
+ timeout?: number;
35
+ /** Keep alive duration for model in memory (default: '5m') */
36
+ keepAlive?: string;
37
+ }
38
+ /**
39
+ * Ollama LLM Provider
40
+ *
41
+ * Provides streaming chat completion using local Ollama models.
42
+ * Supports tool calling with compatible models (llama3.1, mistral, etc.)
43
+ */
44
+ export declare class OllamaProvider extends OpenAICompatibleProvider {
45
+ readonly name = "ollama";
46
+ private readonly keepAlive;
47
+ constructor(config?: OllamaProviderConfig);
48
+ /**
49
+ * Ollama has no authentication by default
50
+ */
51
+ protected getAuthHeaders(): Record<string, string>;
52
+ /**
53
+ * Ollama's OpenAI-compatible endpoint
54
+ */
55
+ protected getEndpointPath(): string;
56
+ /**
57
+ * Add Ollama-specific request options
58
+ */
59
+ protected buildProviderSpecificBody(options?: ChatOptions): Record<string, unknown>;
60
+ /**
61
+ * Map HTTP errors with Ollama-specific messages
62
+ */
63
+ protected mapHttpError(status: number, body: string, model: string): ProviderError;
64
+ /**
65
+ * Map connection errors with Ollama-specific messages
66
+ */
67
+ protected mapConnectionError(_error: Error): ProviderError;
68
+ }
69
+ /**
70
+ * Create an Ollama provider instance
71
+ *
72
+ * @example
73
+ * ```typescript
74
+ * // Default configuration (llama3.1 on localhost:11434)
75
+ * const provider = createOllamaProvider();
76
+ *
77
+ * // Custom model
78
+ * const provider = createOllamaProvider({ model: 'mistral' });
79
+ *
80
+ * // Custom server
81
+ * const provider = createOllamaProvider({
82
+ * baseUrl: 'http://192.168.1.100:11434',
83
+ * model: 'llama3.1:70b'
84
+ * });
85
+ * ```
86
+ */
87
+ export declare function createOllamaProvider(config?: OllamaProviderConfig): OllamaProvider;
@@ -0,0 +1,133 @@
1
+ /**
2
+ * Ollama LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for local Ollama models.
5
+ * Extends OpenAICompatibleProvider for shared functionality.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createOllamaProvider({
10
+ * model: 'llama3.1',
11
+ * baseUrl: 'http://localhost:11434'
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires Ollama to be running locally
17
+ * - Default model is llama3.1 (supports tool calling)
18
+ * - Extended thinking is not supported (Claude-specific feature)
19
+ */
20
+ import { ProviderError } from '../errors.js';
21
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
22
+ // Default configuration
23
+ const DEFAULT_MODEL = 'llama3.1';
24
+ const DEFAULT_BASE_URL = 'http://localhost:11434';
25
+ /**
26
+ * Ollama LLM Provider
27
+ *
28
+ * Provides streaming chat completion using local Ollama models.
29
+ * Supports tool calling with compatible models (llama3.1, mistral, etc.)
30
+ */
31
+ export class OllamaProvider extends OpenAICompatibleProvider {
32
+ name = 'ollama';
33
+ keepAlive;
34
+ constructor(config = {}) {
35
+ const baseConfig = {
36
+ baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
37
+ model: config.model ?? DEFAULT_MODEL,
38
+ maxTokens: config.maxTokens,
39
+ timeout: config.timeout,
40
+ };
41
+ super(baseConfig);
42
+ this.keepAlive = config.keepAlive ?? '5m';
43
+ }
44
+ /**
45
+ * Ollama has no authentication by default
46
+ */
47
+ getAuthHeaders() {
48
+ return {};
49
+ }
50
+ /**
51
+ * Ollama's OpenAI-compatible endpoint
52
+ */
53
+ getEndpointPath() {
54
+ return '/v1/chat/completions';
55
+ }
56
+ /**
57
+ * Add Ollama-specific request options
58
+ */
59
+ buildProviderSpecificBody(options) {
60
+ const body = {
61
+ keep_alive: this.keepAlive,
62
+ };
63
+ // Ollama uses 'options' wrapper for model parameters
64
+ const modelOptions = {};
65
+ if (options?.maxTokens !== undefined) {
66
+ modelOptions.num_predict = options.maxTokens;
67
+ }
68
+ if (options?.temperature !== undefined) {
69
+ modelOptions.temperature = options.temperature;
70
+ }
71
+ if (Object.keys(modelOptions).length > 0) {
72
+ body.options = modelOptions;
73
+ }
74
+ return body;
75
+ }
76
+ /**
77
+ * Map HTTP errors with Ollama-specific messages
78
+ */
79
+ mapHttpError(status, body, model) {
80
+ let message = `Ollama error (${String(status)})`;
81
+ try {
82
+ const parsed = JSON.parse(body);
83
+ if (typeof parsed.error === 'string') {
84
+ message = parsed.error;
85
+ }
86
+ else if (parsed.error?.message) {
87
+ message = parsed.error.message;
88
+ }
89
+ }
90
+ catch {
91
+ message = body || message;
92
+ }
93
+ switch (status) {
94
+ case 404:
95
+ return new ProviderError(`Model "${model}" not found. Run: ollama pull ${model}`, 'ollama', 404);
96
+ case 429:
97
+ return new ProviderError('Rate limited', 'ollama', 429);
98
+ case 500:
99
+ case 502:
100
+ case 503:
101
+ return new ProviderError(message, 'ollama', status);
102
+ default:
103
+ return new ProviderError(message, 'ollama', status);
104
+ }
105
+ }
106
+ /**
107
+ * Map connection errors with Ollama-specific messages
108
+ */
109
+ mapConnectionError(_error) {
110
+ return new ProviderError(`Failed to connect to Ollama at ${this.baseUrl}. Is Ollama running? Try: ollama serve`, 'ollama');
111
+ }
112
+ }
113
+ /**
114
+ * Create an Ollama provider instance
115
+ *
116
+ * @example
117
+ * ```typescript
118
+ * // Default configuration (llama3.1 on localhost:11434)
119
+ * const provider = createOllamaProvider();
120
+ *
121
+ * // Custom model
122
+ * const provider = createOllamaProvider({ model: 'mistral' });
123
+ *
124
+ * // Custom server
125
+ * const provider = createOllamaProvider({
126
+ * baseUrl: 'http://192.168.1.100:11434',
127
+ * model: 'llama3.1:70b'
128
+ * });
129
+ * ```
130
+ */
131
+ export function createOllamaProvider(config = {}) {
132
+ return new OllamaProvider(config);
133
+ }
@@ -0,0 +1,182 @@
1
+ /**
2
+ * OpenAI-Compatible LLM Provider Base Class
3
+ *
4
+ * Abstract base class for LLM providers that use OpenAI-compatible REST APIs.
5
+ * Provides shared implementation for:
6
+ * - Message conversion (library format → OpenAI format)
7
+ * - Tool definition conversion
8
+ * - SSE stream parsing
9
+ * - Tool call delta accumulation
10
+ * - Token counting (approximation)
11
+ *
12
+ * Extended by: OllamaProvider, OpenAIProvider, GeminiProvider
13
+ *
14
+ * @example
15
+ * ```typescript
16
+ * class MyProvider extends OpenAICompatibleProvider {
17
+ * readonly name = 'my-provider';
18
+ * protected getAuthHeaders() { return { 'Authorization': 'Bearer xxx' }; }
19
+ * protected getEndpointPath() { return '/v1/chat/completions'; }
20
+ * // ... other abstract methods
21
+ * }
22
+ * ```
23
+ */
24
+ import type { LLMProvider, Message, StreamChunk, ChatOptions, ToolDefinition } from './types.js';
25
+ import { ProviderError } from '../errors.js';
26
+ /**
27
+ * OpenAI-compatible message format
28
+ */
29
+ export interface OpenAIMessage {
30
+ role: 'system' | 'user' | 'assistant' | 'tool';
31
+ content: string | null;
32
+ tool_calls?: OpenAIToolCall[];
33
+ tool_call_id?: string;
34
+ }
35
+ /**
36
+ * OpenAI-compatible tool call format
37
+ */
38
+ export interface OpenAIToolCall {
39
+ id: string;
40
+ type: 'function';
41
+ function: {
42
+ name: string;
43
+ arguments: string;
44
+ };
45
+ }
46
+ /**
47
+ * OpenAI-compatible tool definition format
48
+ */
49
+ export interface OpenAITool {
50
+ type: 'function';
51
+ function: {
52
+ name: string;
53
+ description: string;
54
+ parameters: Record<string, unknown>;
55
+ };
56
+ }
57
+ /**
58
+ * OpenAI streaming response chunk format
59
+ */
60
+ export interface OpenAIStreamChunk {
61
+ id: string;
62
+ object: string;
63
+ created: number;
64
+ model: string;
65
+ choices: Array<{
66
+ index: number;
67
+ delta: {
68
+ role?: string;
69
+ content?: string | null;
70
+ tool_calls?: Array<{
71
+ index: number;
72
+ id?: string;
73
+ type?: string;
74
+ function?: {
75
+ name?: string;
76
+ arguments?: string;
77
+ };
78
+ }>;
79
+ };
80
+ finish_reason: string | null;
81
+ }>;
82
+ usage?: {
83
+ prompt_tokens: number;
84
+ completion_tokens: number;
85
+ total_tokens: number;
86
+ };
87
+ }
88
+ /**
89
+ * Base configuration for OpenAI-compatible providers
90
+ */
91
+ export interface OpenAICompatibleConfig {
92
+ /** Base URL for the API */
93
+ baseUrl: string;
94
+ /** Default model to use */
95
+ model: string;
96
+ /** Default max tokens (default: 4096) */
97
+ maxTokens?: number;
98
+ /** Request timeout in milliseconds (default: 120000) */
99
+ timeout?: number;
100
+ }
101
+ /**
102
+ * Abstract base class for OpenAI-compatible LLM providers
103
+ *
104
+ * Provides shared implementation for providers that use the OpenAI
105
+ * chat completions API format (OpenAI, Ollama, Azure OpenAI, Gemini).
106
+ */
107
+ export declare abstract class OpenAICompatibleProvider implements LLMProvider {
108
+ /**
109
+ * Provider name (e.g., 'openai', 'ollama', 'gemini')
110
+ */
111
+ abstract readonly name: string;
112
+ protected readonly baseUrl: string;
113
+ protected readonly defaultModel: string;
114
+ protected readonly defaultMaxTokens: number;
115
+ protected readonly timeout: number;
116
+ constructor(config: OpenAICompatibleConfig);
117
+ /**
118
+ * Get authentication headers for API requests
119
+ * @returns Headers object with auth credentials
120
+ */
121
+ protected abstract getAuthHeaders(): Record<string, string>;
122
+ /**
123
+ * Get the API endpoint path (e.g., '/v1/chat/completions')
124
+ * @returns API endpoint path
125
+ */
126
+ protected abstract getEndpointPath(): string;
127
+ /**
128
+ * Build provider-specific request body extensions
129
+ * @param options Chat options
130
+ * @returns Additional body fields for the request
131
+ */
132
+ protected abstract buildProviderSpecificBody(options?: ChatOptions): Record<string, unknown>;
133
+ /**
134
+ * Map HTTP error to ProviderError with provider-specific messages
135
+ * @param status HTTP status code
136
+ * @param body Response body
137
+ * @param model Model name
138
+ * @returns ProviderError with appropriate message
139
+ */
140
+ protected abstract mapHttpError(status: number, body: string, model: string): ProviderError;
141
+ /**
142
+ * Map connection errors with provider-specific messages
143
+ * @param error Original error
144
+ * @returns ProviderError with appropriate message
145
+ */
146
+ protected abstract mapConnectionError(error: Error): ProviderError;
147
+ /**
148
+ * Stream chat completion from the provider
149
+ *
150
+ * @param messages - Conversation messages
151
+ * @param options - Chat options (thinking is ignored for non-Claude providers)
152
+ */
153
+ chat(messages: Message[], options?: ChatOptions): AsyncIterable<StreamChunk>;
154
+ /**
155
+ * Convert library messages to OpenAI format
156
+ */
157
+ protected convertMessages(messages: Message[]): OpenAIMessage[];
158
+ /**
159
+ * Map library role to OpenAI role
160
+ */
161
+ protected mapRole(role: string): 'system' | 'user' | 'assistant' | 'tool';
162
+ /**
163
+ * Convert tool definitions to OpenAI format
164
+ */
165
+ protected convertTools(tools: ToolDefinition[]): OpenAITool[];
166
+ /**
167
+ * Process a stream chunk into StreamChunk events
168
+ */
169
+ protected processStreamChunk(chunk: OpenAIStreamChunk, toolCalls: Map<number, {
170
+ id: string;
171
+ name: string;
172
+ arguments: string;
173
+ }>): StreamChunk[];
174
+ /**
175
+ * Estimate token count (rough approximation)
176
+ *
177
+ * @remarks
178
+ * Most providers don't have a native token counting endpoint.
179
+ * This uses a rough approximation of ~4 characters per token.
180
+ */
181
+ countTokens(messages: Message[]): Promise<number>;
182
+ }