@compilr-dev/agents 0.0.1 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/agent.d.ts CHANGED
@@ -449,6 +449,21 @@ export interface RunOptions {
449
449
  * Event handler for this run (in addition to config handler)
450
450
  */
451
451
  onEvent?: AgentEventHandler;
452
+ /**
453
+ * Filter tools for this run.
454
+ * - If provided, only these tool names will be available
455
+ * - Reduces token usage by not sending unused tool definitions
456
+ * - Tools must be registered with the agent
457
+ *
458
+ * @example
459
+ * ```typescript
460
+ * // Only allow file and search tools for this request
461
+ * await agent.run(message, {
462
+ * toolFilter: ['read_file', 'write_file', 'grep', 'glob'],
463
+ * });
464
+ * ```
465
+ */
466
+ toolFilter?: string[];
452
467
  }
453
468
  /**
454
469
  * Agent run result
@@ -1033,6 +1048,11 @@ export declare class Agent {
1033
1048
  * Get the current conversation history
1034
1049
  */
1035
1050
  getHistory(): Message[];
1051
+ /**
1052
+ * Set the conversation history (for manual compaction/restoration)
1053
+ * Also updates the context manager's token count if configured.
1054
+ */
1055
+ setHistory(messages: Message[]): Promise<this>;
1036
1056
  /**
1037
1057
  * Get the context manager (if configured)
1038
1058
  */
package/dist/agent.js CHANGED
@@ -684,6 +684,17 @@ export class Agent {
684
684
  getHistory() {
685
685
  return [...this.conversationHistory];
686
686
  }
687
+ /**
688
+ * Set the conversation history (for manual compaction/restoration)
689
+ * Also updates the context manager's token count if configured.
690
+ */
691
+ async setHistory(messages) {
692
+ this.conversationHistory = [...messages];
693
+ if (this.contextManager) {
694
+ await this.contextManager.updateTokenCount(messages);
695
+ }
696
+ return this;
697
+ }
687
698
  /**
688
699
  * Get the context manager (if configured)
689
700
  */
@@ -1290,6 +1301,11 @@ export class Agent {
1290
1301
  }
1291
1302
  // Get tool definitions
1292
1303
  let tools = this.toolRegistry.getDefinitions();
1304
+ // Apply tool filter if specified (reduces token usage)
1305
+ if (options?.toolFilter && options.toolFilter.length > 0) {
1306
+ const filterSet = new Set(options.toolFilter);
1307
+ tools = tools.filter((tool) => filterSet.has(tool.name));
1308
+ }
1293
1309
  // Run beforeLLM hooks (can modify messages and tools)
1294
1310
  if (this.hooksManager) {
1295
1311
  const llmHookResult = await this.hooksManager.runBeforeLLM({
package/dist/index.d.ts CHANGED
@@ -9,6 +9,14 @@ export type { LLMProvider, Message, MessageRole, ContentBlock, ContentBlockType,
9
9
  export { MockProvider, createMockProvider } from './providers/index.js';
10
10
  export { ClaudeProvider, createClaudeProvider } from './providers/index.js';
11
11
  export type { ClaudeProviderConfig } from './providers/index.js';
12
+ export { OllamaProvider, createOllamaProvider } from './providers/index.js';
13
+ export type { OllamaProviderConfig } from './providers/index.js';
14
+ export { OpenAIProvider, createOpenAIProvider } from './providers/index.js';
15
+ export type { OpenAIProviderConfig } from './providers/index.js';
16
+ export { GeminiProvider, createGeminiProvider } from './providers/index.js';
17
+ export type { GeminiProviderConfig } from './providers/index.js';
18
+ export { OpenAICompatibleProvider } from './providers/index.js';
19
+ export type { OpenAICompatibleConfig } from './providers/index.js';
12
20
  export type { Tool, ToolHandler, ToolRegistry, ToolInputSchema, ToolExecutionResult, ToolRegistryOptions, DefineToolOptions, ReadFileInput, WriteFileInput, BashInput, BashResult, FifoDetectionResult, GrepInput, GlobInput, EditInput, TodoWriteInput, TodoReadInput, TodoItem, TodoStatus, TodoContextCleanupOptions, TaskInput, TaskResult, AgentTypeConfig, TaskToolOptions, ContextMode, ThoroughnessLevel, SubAgentEventInfo, } from './tools/index.js';
13
21
  export { defineTool, createSuccessResult, createErrorResult, wrapToolExecute, DefaultToolRegistry, createToolRegistry, } from './tools/index.js';
14
22
  export { readFileTool, createReadFileTool, writeFileTool, createWriteFileTool, bashTool, createBashTool, execStream, detectFifoUsage, bashOutputTool, createBashOutputTool, killShellTool, createKillShellTool, ShellManager, getDefaultShellManager, setDefaultShellManager, grepTool, createGrepTool, globTool, createGlobTool, editTool, createEditTool, todoWriteTool, todoReadTool, createTodoTools, TodoStore, resetDefaultTodoStore, getDefaultTodoStore, createIsolatedTodoStore, cleanupTodoContextMessages, getTodoContextStats, webFetchTool, createWebFetchTool, createTaskTool, defaultAgentTypes, builtinTools, allBuiltinTools, } from './tools/index.js';
package/dist/index.js CHANGED
@@ -8,6 +8,10 @@ export { Agent } from './agent.js';
8
8
  // Providers
9
9
  export { MockProvider, createMockProvider } from './providers/index.js';
10
10
  export { ClaudeProvider, createClaudeProvider } from './providers/index.js';
11
+ export { OllamaProvider, createOllamaProvider } from './providers/index.js';
12
+ export { OpenAIProvider, createOpenAIProvider } from './providers/index.js';
13
+ export { GeminiProvider, createGeminiProvider } from './providers/index.js';
14
+ export { OpenAICompatibleProvider } from './providers/index.js';
11
15
  // Tool utilities
12
16
  export { defineTool, createSuccessResult, createErrorResult, wrapToolExecute, DefaultToolRegistry, createToolRegistry, } from './tools/index.js';
13
17
  // Built-in tools
@@ -0,0 +1,91 @@
1
+ /**
2
+ * Google Gemini LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for Google Gemini models using
5
+ * the OpenAI-compatible endpoint for maximum compatibility.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createGeminiProvider({
10
+ * model: 'gemini-2.0-flash',
11
+ * apiKey: process.env.GOOGLE_AI_API_KEY
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires valid Google AI API key
17
+ * - Uses OpenAI-compatible endpoint for streaming and tool calling
18
+ * - Default model is gemini-2.0-flash
19
+ * - Extended thinking is not supported (Claude-specific feature)
20
+ */
21
+ import type { ChatOptions } from './types.js';
22
+ import { ProviderError } from '../errors.js';
23
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
24
+ /**
25
+ * Configuration for GeminiProvider
26
+ */
27
+ export interface GeminiProviderConfig {
28
+ /** Google AI API key (falls back to GOOGLE_AI_API_KEY or GEMINI_API_KEY env var) */
29
+ apiKey?: string;
30
+ /** Base URL for Gemini API (default: https://generativelanguage.googleapis.com/v1beta/openai) */
31
+ baseUrl?: string;
32
+ /** Default model to use (default: gemini-2.0-flash) */
33
+ model?: string;
34
+ /** Default max tokens (default: 4096) */
35
+ maxTokens?: number;
36
+ /** Request timeout in milliseconds (default: 120000) */
37
+ timeout?: number;
38
+ }
39
+ /**
40
+ * Google Gemini LLM Provider
41
+ *
42
+ * Provides streaming chat completion using Google's Gemini models
43
+ * via the OpenAI-compatible API endpoint.
44
+ * Supports gemini-2.0-flash, gemini-1.5-flash, gemini-1.5-pro, and others.
45
+ */
46
+ export declare class GeminiProvider extends OpenAICompatibleProvider {
47
+ readonly name = "gemini";
48
+ private readonly apiKey;
49
+ constructor(config?: GeminiProviderConfig);
50
+ /**
51
+ * Gemini authentication with Bearer token
52
+ */
53
+ protected getAuthHeaders(): Record<string, string>;
54
+ /**
55
+ * Gemini's OpenAI-compatible endpoint path
56
+ */
57
+ protected getEndpointPath(): string;
58
+ /**
59
+ * Gemini uses standard body format (no provider-specific extensions needed)
60
+ */
61
+ protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
62
+ /**
63
+ * Map HTTP errors with Gemini-specific messages
64
+ */
65
+ protected mapHttpError(status: number, body: string, _model: string): ProviderError;
66
+ /**
67
+ * Map connection errors with Gemini-specific messages
68
+ */
69
+ protected mapConnectionError(_error: Error): ProviderError;
70
+ }
71
+ /**
72
+ * Create a Gemini provider instance
73
+ *
74
+ * @example
75
+ * ```typescript
76
+ * // Using environment variable (GOOGLE_AI_API_KEY or GEMINI_API_KEY)
77
+ * const provider = createGeminiProvider();
78
+ *
79
+ * // With explicit API key
80
+ * const provider = createGeminiProvider({ apiKey: 'AI...' });
81
+ *
82
+ * // With custom model
83
+ * const provider = createGeminiProvider({ model: 'gemini-1.5-pro' });
84
+ *
85
+ * // Available models:
86
+ * // - gemini-2.0-flash (default, fast)
87
+ * // - gemini-1.5-flash (faster, cheaper)
88
+ * // - gemini-1.5-pro (more capable)
89
+ * ```
90
+ */
91
+ export declare function createGeminiProvider(config?: GeminiProviderConfig): GeminiProvider;
@@ -0,0 +1,140 @@
1
+ /**
2
+ * Google Gemini LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for Google Gemini models using
5
+ * the OpenAI-compatible endpoint for maximum compatibility.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createGeminiProvider({
10
+ * model: 'gemini-2.0-flash',
11
+ * apiKey: process.env.GOOGLE_AI_API_KEY
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires valid Google AI API key
17
+ * - Uses OpenAI-compatible endpoint for streaming and tool calling
18
+ * - Default model is gemini-2.0-flash
19
+ * - Extended thinking is not supported (Claude-specific feature)
20
+ */
21
+ import { ProviderError } from '../errors.js';
22
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
23
+ // Default configuration
24
+ const DEFAULT_MODEL = 'gemini-2.0-flash';
25
+ const DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/openai';
26
+ /**
27
+ * Google Gemini LLM Provider
28
+ *
29
+ * Provides streaming chat completion using Google's Gemini models
30
+ * via the OpenAI-compatible API endpoint.
31
+ * Supports gemini-2.0-flash, gemini-1.5-flash, gemini-1.5-pro, and others.
32
+ */
33
+ export class GeminiProvider extends OpenAICompatibleProvider {
34
+ name = 'gemini';
35
+ apiKey;
36
+ constructor(config = {}) {
37
+ const apiKey = config.apiKey ??
38
+ process.env.GOOGLE_AI_API_KEY ??
39
+ process.env.GEMINI_API_KEY;
40
+ if (!apiKey) {
41
+ throw new ProviderError('Gemini API key not found. Set GOOGLE_AI_API_KEY or GEMINI_API_KEY environment variable, or pass apiKey in config.', 'gemini');
42
+ }
43
+ const baseConfig = {
44
+ baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
45
+ model: config.model ?? DEFAULT_MODEL,
46
+ maxTokens: config.maxTokens,
47
+ timeout: config.timeout,
48
+ };
49
+ super(baseConfig);
50
+ this.apiKey = apiKey;
51
+ }
52
+ /**
53
+ * Gemini authentication with Bearer token
54
+ */
55
+ getAuthHeaders() {
56
+ return {
57
+ Authorization: `Bearer ${this.apiKey}`,
58
+ };
59
+ }
60
+ /**
61
+ * Gemini's OpenAI-compatible endpoint path
62
+ */
63
+ getEndpointPath() {
64
+ return '/chat/completions';
65
+ }
66
+ /**
67
+ * Gemini uses standard body format (no provider-specific extensions needed)
68
+ */
69
+ buildProviderSpecificBody(_options) {
70
+ return {};
71
+ }
72
+ /**
73
+ * Map HTTP errors with Gemini-specific messages
74
+ */
75
+ mapHttpError(status, body, _model) {
76
+ let message = `Gemini error (${String(status)})`;
77
+ try {
78
+ const parsed = JSON.parse(body);
79
+ // Gemini sometimes wraps errors in an array
80
+ if (Array.isArray(parsed)) {
81
+ const first = parsed[0];
82
+ if (first?.error?.message) {
83
+ message = first.error.message;
84
+ }
85
+ }
86
+ else if (parsed.error?.message) {
87
+ message = parsed.error.message;
88
+ }
89
+ }
90
+ catch {
91
+ // If body isn't JSON, show it directly (truncated)
92
+ message = body ? body.slice(0, 500) : message;
93
+ }
94
+ switch (status) {
95
+ case 400:
96
+ return new ProviderError(`Invalid request to Gemini API: ${message}`, 'gemini', 400);
97
+ case 401:
98
+ return new ProviderError('Invalid Gemini API key. Check your GOOGLE_AI_API_KEY or GEMINI_API_KEY.', 'gemini', 401);
99
+ case 403:
100
+ return new ProviderError('Access denied to Gemini API. Check your API key permissions.', 'gemini', 403);
101
+ case 429:
102
+ return new ProviderError('Gemini rate limit exceeded. Please wait and try again.', 'gemini', 429);
103
+ case 500:
104
+ case 502:
105
+ case 503:
106
+ return new ProviderError('Gemini service temporarily unavailable. Please try again later.', 'gemini', status);
107
+ default:
108
+ return new ProviderError(message, 'gemini', status);
109
+ }
110
+ }
111
+ /**
112
+ * Map connection errors with Gemini-specific messages
113
+ */
114
+ mapConnectionError(_error) {
115
+ return new ProviderError('Failed to connect to Gemini API. Check your internet connection.', 'gemini');
116
+ }
117
+ }
118
+ /**
119
+ * Create a Gemini provider instance
120
+ *
121
+ * @example
122
+ * ```typescript
123
+ * // Using environment variable (GOOGLE_AI_API_KEY or GEMINI_API_KEY)
124
+ * const provider = createGeminiProvider();
125
+ *
126
+ * // With explicit API key
127
+ * const provider = createGeminiProvider({ apiKey: 'AI...' });
128
+ *
129
+ * // With custom model
130
+ * const provider = createGeminiProvider({ model: 'gemini-1.5-pro' });
131
+ *
132
+ * // Available models:
133
+ * // - gemini-2.0-flash (default, fast)
134
+ * // - gemini-1.5-flash (faster, cheaper)
135
+ * // - gemini-1.5-pro (more capable)
136
+ * ```
137
+ */
138
+ export function createGeminiProvider(config = {}) {
139
+ return new GeminiProvider(config);
140
+ }
@@ -6,3 +6,11 @@ export { MockProvider, createMockProvider } from './mock.js';
6
6
  export type { MockProviderConfig, MockResponse, MockToolCall } from './mock.js';
7
7
  export { ClaudeProvider, createClaudeProvider } from './claude.js';
8
8
  export type { ClaudeProviderConfig } from './claude.js';
9
+ export { OllamaProvider, createOllamaProvider } from './ollama.js';
10
+ export type { OllamaProviderConfig } from './ollama.js';
11
+ export { OpenAICompatibleProvider } from './openai-compatible.js';
12
+ export type { OpenAICompatibleConfig, OpenAIMessage, OpenAIToolCall, OpenAITool, OpenAIStreamChunk, } from './openai-compatible.js';
13
+ export { OpenAIProvider, createOpenAIProvider } from './openai.js';
14
+ export type { OpenAIProviderConfig } from './openai.js';
15
+ export { GeminiProvider, createGeminiProvider } from './gemini.js';
16
+ export type { GeminiProviderConfig } from './gemini.js';
@@ -6,6 +6,10 @@ export * from './types.js';
6
6
  export { MockProvider, createMockProvider } from './mock.js';
7
7
  // Provider implementations
8
8
  export { ClaudeProvider, createClaudeProvider } from './claude.js';
9
- // Future providers
10
- // export { OpenAIProvider } from './openai.js';
11
- // export { GeminiProvider } from './gemini.js';
9
+ export { OllamaProvider, createOllamaProvider } from './ollama.js';
10
+ // Base class for OpenAI-compatible providers
11
+ export { OpenAICompatibleProvider } from './openai-compatible.js';
12
+ // OpenAI provider
13
+ export { OpenAIProvider, createOpenAIProvider } from './openai.js';
14
+ // Gemini provider
15
+ export { GeminiProvider, createGeminiProvider } from './gemini.js';
@@ -0,0 +1,87 @@
1
+ /**
2
+ * Ollama LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for local Ollama models.
5
+ * Extends OpenAICompatibleProvider for shared functionality.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createOllamaProvider({
10
+ * model: 'llama3.1',
11
+ * baseUrl: 'http://localhost:11434'
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires Ollama to be running locally
17
+ * - Default model is llama3.1 (supports tool calling)
18
+ * - Extended thinking is not supported (Claude-specific feature)
19
+ */
20
+ import type { ChatOptions } from './types.js';
21
+ import { ProviderError } from '../errors.js';
22
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
23
+ /**
24
+ * Configuration for OllamaProvider
25
+ */
26
+ export interface OllamaProviderConfig {
27
+ /** Base URL for Ollama server (default: http://localhost:11434) */
28
+ baseUrl?: string;
29
+ /** Default model to use (default: llama3.1) */
30
+ model?: string;
31
+ /** Default max tokens (default: 4096) */
32
+ maxTokens?: number;
33
+ /** Request timeout in milliseconds (default: 120000) */
34
+ timeout?: number;
35
+ /** Keep alive duration for model in memory (default: '5m') */
36
+ keepAlive?: string;
37
+ }
38
+ /**
39
+ * Ollama LLM Provider
40
+ *
41
+ * Provides streaming chat completion using local Ollama models.
42
+ * Supports tool calling with compatible models (llama3.1, mistral, etc.)
43
+ */
44
+ export declare class OllamaProvider extends OpenAICompatibleProvider {
45
+ readonly name = "ollama";
46
+ private readonly keepAlive;
47
+ constructor(config?: OllamaProviderConfig);
48
+ /**
49
+ * Ollama has no authentication by default
50
+ */
51
+ protected getAuthHeaders(): Record<string, string>;
52
+ /**
53
+ * Ollama's OpenAI-compatible endpoint
54
+ */
55
+ protected getEndpointPath(): string;
56
+ /**
57
+ * Add Ollama-specific request options
58
+ */
59
+ protected buildProviderSpecificBody(options?: ChatOptions): Record<string, unknown>;
60
+ /**
61
+ * Map HTTP errors with Ollama-specific messages
62
+ */
63
+ protected mapHttpError(status: number, body: string, model: string): ProviderError;
64
+ /**
65
+ * Map connection errors with Ollama-specific messages
66
+ */
67
+ protected mapConnectionError(_error: Error): ProviderError;
68
+ }
69
+ /**
70
+ * Create an Ollama provider instance
71
+ *
72
+ * @example
73
+ * ```typescript
74
+ * // Default configuration (llama3.1 on localhost:11434)
75
+ * const provider = createOllamaProvider();
76
+ *
77
+ * // Custom model
78
+ * const provider = createOllamaProvider({ model: 'mistral' });
79
+ *
80
+ * // Custom server
81
+ * const provider = createOllamaProvider({
82
+ * baseUrl: 'http://192.168.1.100:11434',
83
+ * model: 'llama3.1:70b'
84
+ * });
85
+ * ```
86
+ */
87
+ export declare function createOllamaProvider(config?: OllamaProviderConfig): OllamaProvider;
@@ -0,0 +1,133 @@
1
+ /**
2
+ * Ollama LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for local Ollama models.
5
+ * Extends OpenAICompatibleProvider for shared functionality.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createOllamaProvider({
10
+ * model: 'llama3.1',
11
+ * baseUrl: 'http://localhost:11434'
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires Ollama to be running locally
17
+ * - Default model is llama3.1 (supports tool calling)
18
+ * - Extended thinking is not supported (Claude-specific feature)
19
+ */
20
+ import { ProviderError } from '../errors.js';
21
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
22
+ // Default configuration
23
+ const DEFAULT_MODEL = 'llama3.1';
24
+ const DEFAULT_BASE_URL = 'http://localhost:11434';
25
+ /**
26
+ * Ollama LLM Provider
27
+ *
28
+ * Provides streaming chat completion using local Ollama models.
29
+ * Supports tool calling with compatible models (llama3.1, mistral, etc.)
30
+ */
31
+ export class OllamaProvider extends OpenAICompatibleProvider {
32
+ name = 'ollama';
33
+ keepAlive;
34
+ constructor(config = {}) {
35
+ const baseConfig = {
36
+ baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
37
+ model: config.model ?? DEFAULT_MODEL,
38
+ maxTokens: config.maxTokens,
39
+ timeout: config.timeout,
40
+ };
41
+ super(baseConfig);
42
+ this.keepAlive = config.keepAlive ?? '5m';
43
+ }
44
+ /**
45
+ * Ollama has no authentication by default
46
+ */
47
+ getAuthHeaders() {
48
+ return {};
49
+ }
50
+ /**
51
+ * Ollama's OpenAI-compatible endpoint
52
+ */
53
+ getEndpointPath() {
54
+ return '/v1/chat/completions';
55
+ }
56
+ /**
57
+ * Add Ollama-specific request options
58
+ */
59
+ buildProviderSpecificBody(options) {
60
+ const body = {
61
+ keep_alive: this.keepAlive,
62
+ };
63
+ // Ollama uses 'options' wrapper for model parameters
64
+ const modelOptions = {};
65
+ if (options?.maxTokens !== undefined) {
66
+ modelOptions.num_predict = options.maxTokens;
67
+ }
68
+ if (options?.temperature !== undefined) {
69
+ modelOptions.temperature = options.temperature;
70
+ }
71
+ if (Object.keys(modelOptions).length > 0) {
72
+ body.options = modelOptions;
73
+ }
74
+ return body;
75
+ }
76
+ /**
77
+ * Map HTTP errors with Ollama-specific messages
78
+ */
79
+ mapHttpError(status, body, model) {
80
+ let message = `Ollama error (${String(status)})`;
81
+ try {
82
+ const parsed = JSON.parse(body);
83
+ if (typeof parsed.error === 'string') {
84
+ message = parsed.error;
85
+ }
86
+ else if (parsed.error?.message) {
87
+ message = parsed.error.message;
88
+ }
89
+ }
90
+ catch {
91
+ message = body || message;
92
+ }
93
+ switch (status) {
94
+ case 404:
95
+ return new ProviderError(`Model "${model}" not found. Run: ollama pull ${model}`, 'ollama', 404);
96
+ case 429:
97
+ return new ProviderError('Rate limited', 'ollama', 429);
98
+ case 500:
99
+ case 502:
100
+ case 503:
101
+ return new ProviderError(message, 'ollama', status);
102
+ default:
103
+ return new ProviderError(message, 'ollama', status);
104
+ }
105
+ }
106
+ /**
107
+ * Map connection errors with Ollama-specific messages
108
+ */
109
+ mapConnectionError(_error) {
110
+ return new ProviderError(`Failed to connect to Ollama at ${this.baseUrl}. Is Ollama running? Try: ollama serve`, 'ollama');
111
+ }
112
+ }
113
+ /**
114
+ * Create an Ollama provider instance
115
+ *
116
+ * @example
117
+ * ```typescript
118
+ * // Default configuration (llama3.1 on localhost:11434)
119
+ * const provider = createOllamaProvider();
120
+ *
121
+ * // Custom model
122
+ * const provider = createOllamaProvider({ model: 'mistral' });
123
+ *
124
+ * // Custom server
125
+ * const provider = createOllamaProvider({
126
+ * baseUrl: 'http://192.168.1.100:11434',
127
+ * model: 'llama3.1:70b'
128
+ * });
129
+ * ```
130
+ */
131
+ export function createOllamaProvider(config = {}) {
132
+ return new OllamaProvider(config);
133
+ }