specvector 0.0.1 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,146 @@
1
+ /**
2
+ * Provider Factory - Creates LLM providers based on configuration.
3
+ */
4
+
5
+ import type { Result } from "../types/result";
6
+ import { ok, err } from "../types/result";
7
+ import type { LLMProvider, LLMError } from "./provider";
8
+ import { LLMErrors } from "./provider";
9
+ import { OpenRouterProvider } from "./openrouter";
10
+ import { OllamaProvider } from "./ollama";
11
+
12
+ /**
13
+ * Supported provider types.
14
+ */
15
+ export type ProviderType = "openrouter" | "ollama";
16
+
17
+ /**
18
+ * Unified configuration for creating any LLM provider.
19
+ */
20
+ export interface ProviderConfig {
21
+ /** Provider type */
22
+ provider: ProviderType;
23
+ /** Model identifier */
24
+ model: string;
25
+ /** API key (required for OpenRouter) */
26
+ apiKey?: string;
27
+ /** Custom host URL (optional, for Ollama) */
28
+ host?: string;
29
+ }
30
+
31
+ /**
32
+ * Create an LLM provider based on configuration.
33
+ *
34
+ * @example
35
+ * ```typescript
36
+ * // OpenRouter
37
+ * const result = createProvider({
38
+ * provider: "openrouter",
39
+ * model: "anthropic/claude-sonnet-4.5",
40
+ * apiKey: process.env.OPENROUTER_API_KEY,
41
+ * });
42
+ *
43
+ * // Ollama
44
+ * const result = createProvider({
45
+ * provider: "ollama",
46
+ * model: "llama3.2",
47
+ * });
48
+ * ```
49
+ */
50
+ export function createProvider(
51
+ config: ProviderConfig
52
+ ): Result<LLMProvider, LLMError> {
53
+ // Validate common fields
54
+ if (!config.provider) {
55
+ return err(LLMErrors.providerError("Provider type is required"));
56
+ }
57
+
58
+ if (!config.model) {
59
+ return err(LLMErrors.invalidModel("Model is required"));
60
+ }
61
+
62
+ // Route to appropriate provider
63
+ switch (config.provider) {
64
+ case "openrouter":
65
+ return createOpenRouterFromConfig(config);
66
+
67
+ case "ollama":
68
+ return createOllamaFromConfig(config);
69
+
70
+ default:
71
+ return err(
72
+ LLMErrors.providerError(
73
+ `Unsupported provider: "${(config as { provider: string }).provider}". ` +
74
+ `Supported providers: openrouter, ollama`
75
+ )
76
+ );
77
+ }
78
+ }
79
+
80
+ /**
81
+ * Create OpenRouter provider from unified config.
82
+ */
83
+ function createOpenRouterFromConfig(
84
+ config: ProviderConfig
85
+ ): Result<LLMProvider, LLMError> {
86
+ // Check for API key
87
+ const apiKey = config.apiKey ?? process.env.OPENROUTER_API_KEY;
88
+
89
+ if (!apiKey) {
90
+ return err(
91
+ LLMErrors.authFailed(
92
+ "OpenRouter requires an API key. Set OPENROUTER_API_KEY environment variable " +
93
+ "or provide apiKey in config."
94
+ )
95
+ );
96
+ }
97
+
98
+ return ok(new OpenRouterProvider({
99
+ apiKey,
100
+ model: config.model,
101
+ }));
102
+ }
103
+
104
+ /**
105
+ * Create Ollama provider from unified config.
106
+ */
107
+ function createOllamaFromConfig(
108
+ config: ProviderConfig
109
+ ): Result<LLMProvider, LLMError> {
110
+ return ok(new OllamaProvider({
111
+ model: config.model,
112
+ host: config.host,
113
+ }));
114
+ }
115
+
116
+ /**
117
+ * Create provider from environment variables.
118
+ *
119
+ * Reads SPECVECTOR_PROVIDER (default: "openrouter") and SPECVECTOR_MODEL.
120
+ */
121
+ export function createProviderFromEnv(): Result<LLMProvider, LLMError> {
122
+ const provider = (process.env.SPECVECTOR_PROVIDER ?? "openrouter") as ProviderType;
123
+ const model = process.env.SPECVECTOR_MODEL;
124
+
125
+ if (!model) {
126
+ return err(
127
+ LLMErrors.invalidModel(
128
+ "SPECVECTOR_MODEL environment variable is required"
129
+ )
130
+ );
131
+ }
132
+
133
+ return createProvider({
134
+ provider,
135
+ model,
136
+ apiKey: process.env.OPENROUTER_API_KEY,
137
+ host: process.env.OLLAMA_HOST,
138
+ });
139
+ }
140
+
141
+ /**
142
+ * Get list of supported providers.
143
+ */
144
+ export function getSupportedProviders(): ProviderType[] {
145
+ return ["openrouter", "ollama"];
146
+ }
@@ -0,0 +1,50 @@
1
+ /**
2
+ * LLM Provider module - Public API
3
+ *
4
+ * @example
5
+ * ```typescript
6
+ * import { createProvider, type ProviderConfig } from "./llm";
7
+ *
8
+ * const config: ProviderConfig = {
9
+ * provider: "openrouter",
10
+ * model: "anthropic/claude-sonnet-4.5",
11
+ * apiKey: process.env.OPENROUTER_API_KEY,
12
+ * };
13
+ *
14
+ * const result = createProvider(config);
15
+ * if (result.ok) {
16
+ * const response = await result.value.chat([
17
+ * { role: "user", content: "Hello!" }
18
+ * ]);
19
+ * }
20
+ * ```
21
+ */
22
+
23
+ // Re-export types
24
+ export type {
25
+ Message,
26
+ MessageRole,
27
+ Tool,
28
+ ToolCall,
29
+ ChatResponse,
30
+ ChatOptions,
31
+ TokenUsage,
32
+ JSONSchema,
33
+ } from "../types/llm";
34
+
35
+ // Re-export provider interface and errors
36
+ export type { LLMProvider, LLMError, LLMErrorCode } from "./provider";
37
+ export { LLMErrors, isRetryableError, createLLMError } from "./provider";
38
+
39
+ // Re-export factory
40
+ export {
41
+ createProvider,
42
+ createProviderFromEnv,
43
+ getSupportedProviders,
44
+ type ProviderConfig,
45
+ type ProviderType,
46
+ } from "./factory";
47
+
48
+ // Re-export individual providers for direct use
49
+ export { OpenRouterProvider, type OpenRouterConfig } from "./openrouter";
50
+ export { OllamaProvider, type OllamaConfig } from "./ollama";
@@ -0,0 +1,313 @@
1
+ /**
2
+ * Ollama LLM Provider implementation.
3
+ * Uses local Ollama API at /api/chat for self-hosted models.
4
+ */
5
+
6
+ import type { Result } from "../types/result";
7
+ import { ok, err } from "../types/result";
8
+ import type { Message, ChatResponse, ChatOptions, Tool, ToolCall } from "../types/llm";
9
+ import type { LLMProvider, LLMError } from "./provider";
10
+ import { LLMErrors } from "./provider";
11
+
12
+ const DEFAULT_OLLAMA_HOST = "http://localhost:11434";
13
+ const DEFAULT_TIMEOUT_MS = 120000; // 2 minutes (local models can be slow)
14
+
15
+ /**
16
+ * Configuration for Ollama provider.
17
+ */
18
+ export interface OllamaConfig {
19
+ /** Model name (e.g., "llama3.2", "mistral", "codellama") */
20
+ model: string;
21
+ /** Ollama host URL (default: http://localhost:11434) */
22
+ host?: string;
23
+ }
24
+
25
+ /**
26
+ * Ollama API request format.
27
+ */
28
+ interface OllamaRequest {
29
+ model: string;
30
+ messages: OllamaMessage[];
31
+ tools?: OllamaTool[];
32
+ stream: false;
33
+ options?: {
34
+ temperature?: number;
35
+ num_predict?: number;
36
+ };
37
+ }
38
+
39
+ interface OllamaMessage {
40
+ role: "system" | "user" | "assistant" | "tool";
41
+ content: string;
42
+ tool_calls?: OllamaToolCall[];
43
+ tool_name?: string;
44
+ }
45
+
46
+ interface OllamaTool {
47
+ type: "function";
48
+ function: {
49
+ name: string;
50
+ description: string;
51
+ parameters: object;
52
+ };
53
+ }
54
+
55
+ interface OllamaToolCall {
56
+ function: {
57
+ name: string;
58
+ arguments: Record<string, unknown>; // Object, not JSON string
59
+ };
60
+ }
61
+
62
+ /**
63
+ * Ollama API response format.
64
+ */
65
+ interface OllamaResponse {
66
+ model: string;
67
+ message: {
68
+ role: string;
69
+ content: string;
70
+ tool_calls?: OllamaToolCall[];
71
+ };
72
+ done: boolean;
73
+ done_reason?: string;
74
+ prompt_eval_count?: number;
75
+ eval_count?: number;
76
+ total_duration?: number;
77
+ }
78
+
79
+ interface OllamaErrorResponse {
80
+ error: string;
81
+ }
82
+
83
+ /**
84
+ * Ollama LLM Provider for local/self-hosted models.
85
+ */
86
+ export class OllamaProvider implements LLMProvider {
87
+ readonly name = "ollama";
88
+ readonly model: string;
89
+
90
+ private readonly host: string;
91
+
92
+ constructor(config: OllamaConfig) {
93
+ this.model = config.model;
94
+ this.host = config.host ?? process.env.OLLAMA_HOST ?? DEFAULT_OLLAMA_HOST;
95
+ }
96
+
97
+ /**
98
+ * Create provider from environment variables.
99
+ */
100
+ static fromEnv(model: string): OllamaProvider {
101
+ return new OllamaProvider({ model });
102
+ }
103
+
104
+ /**
105
+ * Check if Ollama is running.
106
+ */
107
+ async isAvailable(): Promise<boolean> {
108
+ try {
109
+ const response = await fetch(`${this.host}/api/version`, {
110
+ signal: AbortSignal.timeout(5000),
111
+ });
112
+ return response.ok;
113
+ } catch {
114
+ return false;
115
+ }
116
+ }
117
+
118
+ async chat(
119
+ messages: Message[],
120
+ options?: ChatOptions
121
+ ): Promise<Result<ChatResponse, LLMError>> {
122
+ const request = this.buildRequest(messages, options);
123
+
124
+ // Create abort controller for timeout
125
+ const controller = new AbortController();
126
+ const timeoutId = setTimeout(() => controller.abort(), DEFAULT_TIMEOUT_MS);
127
+
128
+ try {
129
+ const response = await fetch(`${this.host}/api/chat`, {
130
+ method: "POST",
131
+ headers: {
132
+ "Content-Type": "application/json",
133
+ },
134
+ body: JSON.stringify(request),
135
+ signal: controller.signal,
136
+ });
137
+
138
+ clearTimeout(timeoutId);
139
+
140
+ if (!response.ok) {
141
+ return this.handleErrorResponse(response);
142
+ }
143
+
144
+ const data = (await response.json()) as OllamaResponse;
145
+ return this.parseResponse(data);
146
+ } catch (error) {
147
+ clearTimeout(timeoutId);
148
+
149
+ // Check for abort/timeout
150
+ if (error instanceof Error && error.name === "AbortError") {
151
+ return err(LLMErrors.timeout());
152
+ }
153
+
154
+ // Check for connection refused (Ollama not running)
155
+ if (error instanceof TypeError) {
156
+ if (error.message.includes("fetch") ||
157
+ error.message.includes("ECONNREFUSED") ||
158
+ error.message.includes("Failed to fetch")) {
159
+ return err(LLMErrors.networkError(error));
160
+ }
161
+ }
162
+
163
+ return err(
164
+ LLMErrors.providerError(
165
+ `Ollama request failed: ${error instanceof Error ? error.message : "Unknown error"}`,
166
+ error instanceof Error ? error : undefined
167
+ )
168
+ );
169
+ }
170
+ }
171
+
172
+ private buildRequest(
173
+ messages: Message[],
174
+ options?: ChatOptions
175
+ ): OllamaRequest {
176
+ const request: OllamaRequest = {
177
+ model: this.model,
178
+ messages: messages.map(this.mapMessage),
179
+ stream: false,
180
+ };
181
+
182
+ if (options?.tools && options.tools.length > 0) {
183
+ request.tools = options.tools.map(this.mapTool);
184
+ }
185
+
186
+ // Build options object if needed
187
+ const ollamaOptions: OllamaRequest["options"] = {};
188
+
189
+ if (options?.temperature !== undefined) {
190
+ ollamaOptions.temperature = options.temperature;
191
+ }
192
+
193
+ if (options?.max_tokens !== undefined) {
194
+ ollamaOptions.num_predict = options.max_tokens;
195
+ }
196
+
197
+ if (Object.keys(ollamaOptions).length > 0) {
198
+ request.options = ollamaOptions;
199
+ }
200
+
201
+ return request;
202
+ }
203
+
204
+ private mapMessage = (message: Message): OllamaMessage => {
205
+ const mapped: OllamaMessage = {
206
+ role: message.role,
207
+ content: message.content ?? "",
208
+ };
209
+
210
+ // Convert tool calls: our format uses JSON string, Ollama uses object
211
+ if (message.tool_calls && message.tool_calls.length > 0) {
212
+ mapped.tool_calls = message.tool_calls.map((tc) => ({
213
+ function: {
214
+ name: tc.name,
215
+ arguments: JSON.parse(tc.arguments), // Parse JSON string to object
216
+ },
217
+ }));
218
+ }
219
+
220
+ // For tool responses, use tool_name instead of name
221
+ if (message.role === "tool" && message.name) {
222
+ mapped.tool_name = message.name;
223
+ }
224
+
225
+ return mapped;
226
+ };
227
+
228
+ private mapTool = (tool: Tool): OllamaTool => ({
229
+ type: "function",
230
+ function: {
231
+ name: tool.name,
232
+ description: tool.description,
233
+ parameters: tool.parameters,
234
+ },
235
+ });
236
+
237
+ private parseResponse(data: OllamaResponse): Result<ChatResponse, LLMError> {
238
+ if (!data.message) {
239
+ return err(LLMErrors.providerError("Ollama returned empty response"));
240
+ }
241
+
242
+ let toolCalls: ToolCall[] | undefined;
243
+ if (data.message.tool_calls && data.message.tool_calls.length > 0) {
244
+ // Generate unique IDs and convert object args to JSON string
245
+ toolCalls = data.message.tool_calls.map((tc, index) => ({
246
+ id: `ollama_call_${Date.now()}_${index}`,
247
+ name: tc.function.name,
248
+ arguments: JSON.stringify(tc.function.arguments), // Convert object to JSON string
249
+ }));
250
+ }
251
+
252
+ // Determine finish reason
253
+ let finishReason: ChatResponse["finish_reason"] = "stop";
254
+ if (toolCalls && toolCalls.length > 0) {
255
+ finishReason = "tool_calls";
256
+ } else if (data.done_reason === "length") {
257
+ finishReason = "length";
258
+ }
259
+
260
+ return ok({
261
+ content: data.message.content || null,
262
+ tool_calls: toolCalls,
263
+ usage: {
264
+ prompt_tokens: data.prompt_eval_count ?? 0,
265
+ completion_tokens: data.eval_count ?? 0,
266
+ total_tokens: (data.prompt_eval_count ?? 0) + (data.eval_count ?? 0),
267
+ },
268
+ model: data.model,
269
+ finish_reason: finishReason,
270
+ });
271
+ }
272
+
273
+ private async handleErrorResponse(
274
+ response: Response
275
+ ): Promise<Result<ChatResponse, LLMError>> {
276
+ let errorMessage = `Ollama API error: ${response.status}`;
277
+
278
+ try {
279
+ const errorData = (await response.json()) as OllamaErrorResponse;
280
+ if (errorData.error) {
281
+ errorMessage = errorData.error;
282
+ }
283
+ } catch {
284
+ // Use default error message
285
+ }
286
+
287
+ // Check for model not found
288
+ if (response.status === 404 ||
289
+ errorMessage.toLowerCase().includes("model") ||
290
+ errorMessage.toLowerCase().includes("not found")) {
291
+ return err(LLMErrors.invalidModel(this.model));
292
+ }
293
+
294
+ // Check for timeout
295
+ if (response.status === 408 || response.status === 504) {
296
+ return err(LLMErrors.timeout());
297
+ }
298
+
299
+ return err(LLMErrors.providerError(errorMessage));
300
+ }
301
+ }
302
+
303
+ /**
304
+ * Create an Ollama provider with validation.
305
+ */
306
+ export function createOllamaProvider(
307
+ config: OllamaConfig
308
+ ): Result<OllamaProvider, LLMError> {
309
+ if (!config.model) {
310
+ return err(LLMErrors.invalidModel("Model is required"));
311
+ }
312
+ return ok(new OllamaProvider(config));
313
+ }