@hazeljs/ai 0.2.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +496 -0
  2. package/dist/ai-enhanced.service.d.ts +108 -0
  3. package/dist/ai-enhanced.service.d.ts.map +1 -0
  4. package/dist/ai-enhanced.service.js +345 -0
  5. package/dist/ai-enhanced.types.d.ts +269 -0
  6. package/dist/ai-enhanced.types.d.ts.map +1 -0
  7. package/dist/ai-enhanced.types.js +2 -0
  8. package/dist/ai.decorator.d.ts +4 -0
  9. package/dist/ai.decorator.d.ts.map +1 -0
  10. package/dist/ai.decorator.js +57 -0
  11. package/dist/ai.module.d.ts +12 -0
  12. package/dist/ai.module.d.ts.map +1 -0
  13. package/dist/ai.module.js +44 -0
  14. package/dist/ai.service.d.ts +10 -0
  15. package/dist/ai.service.d.ts.map +1 -0
  16. package/dist/ai.service.js +261 -0
  17. package/dist/ai.types.d.ts +30 -0
  18. package/dist/ai.types.d.ts.map +1 -0
  19. package/dist/ai.types.js +2 -0
  20. package/dist/context/context.manager.d.ts +69 -0
  21. package/dist/context/context.manager.d.ts.map +1 -0
  22. package/dist/context/context.manager.js +168 -0
  23. package/dist/decorators/ai-function.decorator.d.ts +42 -0
  24. package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
  25. package/dist/decorators/ai-function.decorator.js +80 -0
  26. package/dist/decorators/ai-validate.decorator.d.ts +46 -0
  27. package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
  28. package/dist/decorators/ai-validate.decorator.js +83 -0
  29. package/dist/index.d.ts +17 -0
  30. package/dist/index.d.ts.map +1 -0
  31. package/dist/index.js +38 -0
  32. package/dist/providers/anthropic.provider.d.ts +48 -0
  33. package/dist/providers/anthropic.provider.d.ts.map +1 -0
  34. package/dist/providers/anthropic.provider.js +194 -0
  35. package/dist/providers/cohere.provider.d.ts +57 -0
  36. package/dist/providers/cohere.provider.d.ts.map +1 -0
  37. package/dist/providers/cohere.provider.js +230 -0
  38. package/dist/providers/gemini.provider.d.ts +45 -0
  39. package/dist/providers/gemini.provider.d.ts.map +1 -0
  40. package/dist/providers/gemini.provider.js +180 -0
  41. package/dist/providers/ollama.provider.d.ts +45 -0
  42. package/dist/providers/ollama.provider.d.ts.map +1 -0
  43. package/dist/providers/ollama.provider.js +232 -0
  44. package/dist/providers/openai.provider.d.ts +47 -0
  45. package/dist/providers/openai.provider.d.ts.map +1 -0
  46. package/dist/providers/openai.provider.js +273 -0
  47. package/dist/tracking/token.tracker.d.ts +72 -0
  48. package/dist/tracking/token.tracker.d.ts.map +1 -0
  49. package/dist/tracking/token.tracker.js +222 -0
  50. package/dist/vector/vector.service.d.ts +50 -0
  51. package/dist/vector/vector.service.d.ts.map +1 -0
  52. package/dist/vector/vector.service.js +163 -0
  53. package/package.json +52 -0
@@ -0,0 +1,230 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.CohereProvider = void 0;
7
+ const core_1 = __importDefault(require("@hazeljs/core"));
8
+ const cohere_ai_1 = require("cohere-ai");
9
+ /**
10
+ * Cohere AI Provider
11
+ *
12
+ * Production-ready implementation using Cohere AI SDK.
13
+ *
14
+ * Setup:
15
+ * 1. Install the SDK: `npm install cohere-ai`
16
+ * 2. Set COHERE_API_KEY environment variable
17
+ * 3. Use the provider in your application
18
+ *
19
+ * Supported models:
20
+ * - command-r-plus: Most powerful model for complex tasks
21
+ * - command-r: Balanced performance and cost
22
+ * - command: Standard text generation
23
+ * - command-light: Fast, cost-effective model
24
+ * - embed-english-v3.0: English text embeddings
25
+ * - embed-multilingual-v3.0: Multilingual embeddings
26
+ * - rerank-english-v3.0: Document reranking
27
+ */
28
+ class CohereProvider {
29
+ constructor(apiKey, endpoint) {
30
+ this.name = 'cohere';
31
+ this.apiKey = apiKey || process.env.COHERE_API_KEY || '';
32
+ this.endpoint = endpoint || 'https://api.cohere.ai/v1';
33
+ if (!this.apiKey) {
34
+ core_1.default.warn('Cohere API key not provided. Set COHERE_API_KEY environment variable.');
35
+ }
36
+ this.cohere = new cohere_ai_1.CohereClient({ token: this.apiKey });
37
+ core_1.default.info('Cohere provider initialized');
38
+ }
39
+ /**
40
+ * Generate completion
41
+ */
42
+ async complete(request) {
43
+ const modelName = request.model || 'command';
44
+ core_1.default.debug(`Cohere completion request for model: ${modelName}`);
45
+ try {
46
+ // Convert messages to prompt format
47
+ const prompt = request.messages.map((m) => `${m.role}: ${m.content}`).join('\n\n');
48
+ // Generate completion
49
+ const response = await this.cohere.generate({
50
+ model: modelName,
51
+ prompt,
52
+ temperature: request.temperature,
53
+ maxTokens: request.maxTokens,
54
+ p: request.topP,
55
+ });
56
+ return {
57
+ id: response.id || `cohere-${Date.now()}`,
58
+ content: response.generations[0].text,
59
+ role: 'assistant',
60
+ model: modelName,
61
+ usage: {
62
+ promptTokens: response.meta?.billedUnits?.inputTokens || 0,
63
+ completionTokens: response.meta?.billedUnits?.outputTokens || 0,
64
+ totalTokens: (response.meta?.billedUnits?.inputTokens || 0) +
65
+ (response.meta?.billedUnits?.outputTokens || 0),
66
+ },
67
+ finishReason: 'COMPLETE',
68
+ };
69
+ }
70
+ catch (error) {
71
+ core_1.default.error('Cohere completion error:', error);
72
+ throw new Error(`Cohere API error: ${error instanceof Error ? error.message : 'Unknown error'}`);
73
+ }
74
+ }
75
+ /**
76
+ * Generate streaming completion
77
+ */
78
+ async *streamComplete(request) {
79
+ const modelName = request.model || 'command';
80
+ core_1.default.debug('Cohere streaming completion started');
81
+ try {
82
+ // Convert messages to prompt format
83
+ const prompt = request.messages.map((m) => `${m.role}: ${m.content}`).join('\n\n');
84
+ // Generate streaming completion
85
+ const stream = await this.cohere.generateStream({
86
+ model: modelName,
87
+ prompt,
88
+ temperature: request.temperature,
89
+ maxTokens: request.maxTokens,
90
+ p: request.topP,
91
+ });
92
+ let fullContent = '';
93
+ const streamId = `cohere-stream-${Date.now()}`;
94
+ for await (const chunk of stream) {
95
+ if (chunk.eventType === 'text-generation') {
96
+ const text = chunk.text || '';
97
+ fullContent += text;
98
+ yield {
99
+ id: streamId,
100
+ content: fullContent,
101
+ delta: text,
102
+ done: false,
103
+ };
104
+ }
105
+ else if (chunk.eventType === 'stream-end') {
106
+ const response = chunk;
107
+ yield {
108
+ id: streamId,
109
+ content: fullContent,
110
+ delta: '',
111
+ done: true,
112
+ usage: response.response?.meta?.billedUnits
113
+ ? {
114
+ promptTokens: response.response.meta.billedUnits.inputTokens || 0,
115
+ completionTokens: response.response.meta.billedUnits.outputTokens || 0,
116
+ totalTokens: (response.response.meta.billedUnits.inputTokens || 0) +
117
+ (response.response.meta.billedUnits.outputTokens || 0),
118
+ }
119
+ : undefined,
120
+ };
121
+ }
122
+ }
123
+ core_1.default.debug('Cohere streaming completed');
124
+ }
125
+ catch (error) {
126
+ core_1.default.error('Cohere streaming error:', error);
127
+ throw new Error(`Cohere streaming error: ${error instanceof Error ? error.message : 'Unknown error'}`);
128
+ }
129
+ }
130
+ /**
131
+ * Generate embeddings
132
+ */
133
+ async embed(request) {
134
+ const modelName = request.model || 'embed-english-v3.0';
135
+ core_1.default.debug(`Cohere embedding request for model: ${modelName}`);
136
+ try {
137
+ const inputs = Array.isArray(request.input) ? request.input : [request.input];
138
+ // Generate embeddings
139
+ const response = await this.cohere.embed({
140
+ texts: inputs,
141
+ model: modelName,
142
+ inputType: 'search_document',
143
+ });
144
+ // Estimate token usage (Cohere doesn't provide exact counts for embeddings)
145
+ const estimatedTokens = inputs.reduce((sum, text) => sum + Math.ceil(text.length / 4), 0);
146
+ // Handle different response formats
147
+ const embeddings = Array.isArray(response.embeddings)
148
+ ? response.embeddings
149
+ : response.embeddings.float || [];
150
+ return {
151
+ embeddings,
152
+ model: modelName,
153
+ usage: {
154
+ promptTokens: estimatedTokens,
155
+ totalTokens: estimatedTokens,
156
+ },
157
+ };
158
+ }
159
+ catch (error) {
160
+ core_1.default.error('Cohere embedding error:', error);
161
+ throw new Error(`Cohere embedding error: ${error instanceof Error ? error.message : 'Unknown error'}`);
162
+ }
163
+ }
164
+ /**
165
+ * Check if provider is available
166
+ */
167
+ async isAvailable() {
168
+ if (!this.apiKey) {
169
+ core_1.default.warn('Cohere API key not configured');
170
+ return false;
171
+ }
172
+ try {
173
+ // Test with a minimal request
174
+ await this.cohere.generate({
175
+ model: 'command-light',
176
+ prompt: 'test',
177
+ maxTokens: 10,
178
+ });
179
+ return true;
180
+ }
181
+ catch (error) {
182
+ core_1.default.error('Cohere availability check failed:', error);
183
+ return false;
184
+ }
185
+ }
186
+ /**
187
+ * Get supported models
188
+ */
189
+ getSupportedModels() {
190
+ return [
191
+ 'command-r-plus',
192
+ 'command-r',
193
+ 'command',
194
+ 'command-light',
195
+ 'command-nightly',
196
+ 'embed-english-v3.0',
197
+ 'embed-multilingual-v3.0',
198
+ 'embed-english-light-v3.0',
199
+ 'embed-multilingual-light-v3.0',
200
+ 'rerank-english-v3.0',
201
+ 'rerank-multilingual-v3.0',
202
+ ];
203
+ }
204
+ /**
205
+ * Rerank documents (Cohere-specific feature)
206
+ * Useful for RAG applications to improve retrieval quality
207
+ */
208
+ async rerank(query, documents, topN, model) {
209
+ const modelName = model || 'rerank-english-v3.0';
210
+ core_1.default.debug(`Cohere rerank request for model: ${modelName}`);
211
+ try {
212
+ const response = await this.cohere.rerank({
213
+ query,
214
+ documents,
215
+ topN,
216
+ model: modelName,
217
+ });
218
+ return response.results.map((r) => ({
219
+ index: r.index,
220
+ score: r.relevanceScore,
221
+ document: documents[r.index],
222
+ }));
223
+ }
224
+ catch (error) {
225
+ core_1.default.error('Cohere rerank error:', error);
226
+ throw new Error(`Cohere rerank error: ${error instanceof Error ? error.message : 'Unknown error'}`);
227
+ }
228
+ }
229
+ }
230
+ exports.CohereProvider = CohereProvider;
@@ -0,0 +1,45 @@
1
+ import { IAIProvider, AIProvider, AICompletionRequest, AICompletionResponse, AIStreamChunk, AIEmbeddingRequest, AIEmbeddingResponse } from '../ai-enhanced.types';
2
+ /**
3
+ * Google Gemini AI Provider
4
+ *
5
+ * Production-ready implementation using Google Generative AI SDK.
6
+ *
7
+ * Setup:
8
+ * 1. Install the SDK: `npm install @google/generative-ai`
9
+ * 2. Set GEMINI_API_KEY environment variable
10
+ * 3. Use the provider in your application
11
+ *
12
+ * Supported models:
13
+ * - gemini-pro: Text generation
14
+ * - gemini-pro-vision: Multimodal (text + images)
15
+ * - gemini-1.5-pro: Latest model with extended context
16
+ * - text-embedding-004: Text embeddings
17
+ */
18
+ export declare class GeminiProvider implements IAIProvider {
19
+ readonly name: AIProvider;
20
+ private apiKey;
21
+ private genAI;
22
+ private endpoint;
23
+ constructor(apiKey?: string, endpoint?: string);
24
+ /**
25
+ * Generate completion
26
+ */
27
+ complete(request: AICompletionRequest): Promise<AICompletionResponse>;
28
+ /**
29
+ * Generate streaming completion
30
+ */
31
+ streamComplete(request: AICompletionRequest): AsyncGenerator<AIStreamChunk>;
32
+ /**
33
+ * Generate embeddings
34
+ */
35
+ embed(request: AIEmbeddingRequest): Promise<AIEmbeddingResponse>;
36
+ /**
37
+ * Check if provider is available
38
+ */
39
+ isAvailable(): Promise<boolean>;
40
+ /**
41
+ * Get supported models
42
+ */
43
+ getSupportedModels(): string[];
44
+ }
45
+ //# sourceMappingURL=gemini.provider.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"gemini.provider.d.ts","sourceRoot":"","sources":["../../src/providers/gemini.provider.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,WAAW,EACX,UAAU,EACV,mBAAmB,EACnB,oBAAoB,EACpB,aAAa,EACb,kBAAkB,EAClB,mBAAmB,EACpB,MAAM,sBAAsB,CAAC;AAI9B;;;;;;;;;;;;;;;GAeG;AACH,qBAAa,cAAe,YAAW,WAAW;IAChD,QAAQ,CAAC,IAAI,EAAE,UAAU,CAAY;IACrC,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,KAAK,CAAqB;IAClC,OAAO,CAAC,QAAQ,CAAS;gBAEb,MAAM,CAAC,EAAE,MAAM,EAAE,QAAQ,CAAC,EAAE,MAAM;IAY9C;;OAEG;IACG,QAAQ,CAAC,OAAO,EAAE,mBAAmB,GAAG,OAAO,CAAC,oBAAoB,CAAC;IAwC3E;;OAEG;IACI,cAAc,CAAC,OAAO,EAAE,mBAAmB,GAAG,cAAc,CAAC,aAAa,CAAC;IAoDlF;;OAEG;IACG,KAAK,CAAC,OAAO,EAAE,kBAAkB,GAAG,OAAO,CAAC,mBAAmB,CAAC;IAmCtE;;OAEG;IACG,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC;IAiBrC;;OAEG;IACH,kBAAkB,IAAI,MAAM,EAAE;CAS/B"}
@@ -0,0 +1,180 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.GeminiProvider = void 0;
7
+ const core_1 = __importDefault(require("@hazeljs/core"));
8
+ const generative_ai_1 = require("@google/generative-ai");
9
+ /**
10
+ * Google Gemini AI Provider
11
+ *
12
+ * Production-ready implementation using Google Generative AI SDK.
13
+ *
14
+ * Setup:
15
+ * 1. Install the SDK: `npm install @google/generative-ai`
16
+ * 2. Set GEMINI_API_KEY environment variable
17
+ * 3. Use the provider in your application
18
+ *
19
+ * Supported models:
20
+ * - gemini-pro: Text generation
21
+ * - gemini-pro-vision: Multimodal (text + images)
22
+ * - gemini-1.5-pro: Latest model with extended context
23
+ * - text-embedding-004: Text embeddings
24
+ */
25
+ class GeminiProvider {
26
+ constructor(apiKey, endpoint) {
27
+ this.name = 'gemini';
28
+ this.apiKey = apiKey || process.env.GEMINI_API_KEY || '';
29
+ this.endpoint = endpoint || 'https://generativelanguage.googleapis.com/v1';
30
+ if (!this.apiKey) {
31
+ core_1.default.warn('Gemini API key not provided. Set GEMINI_API_KEY environment variable.');
32
+ }
33
+ this.genAI = new generative_ai_1.GoogleGenerativeAI(this.apiKey);
34
+ core_1.default.info('Gemini provider initialized');
35
+ }
36
+ /**
37
+ * Generate completion
38
+ */
39
+ async complete(request) {
40
+ const modelName = request.model || 'gemini-pro';
41
+ core_1.default.debug(`Gemini completion request for model: ${modelName}`);
42
+ try {
43
+ const model = this.genAI.getGenerativeModel({ model: modelName });
44
+ // Convert messages to Gemini format
45
+ const prompt = request.messages
46
+ .map((m) => {
47
+ const role = m.role === 'assistant' ? 'model' : m.role;
48
+ return `${role}: ${m.content}`;
49
+ })
50
+ .join('\n\n');
51
+ // Generate content
52
+ const result = await model.generateContent(prompt);
53
+ const response = result.response;
54
+ const text = response.text();
55
+ return {
56
+ id: `gemini-${Date.now()}`,
57
+ content: text,
58
+ role: 'assistant',
59
+ model: modelName,
60
+ usage: {
61
+ promptTokens: response.usageMetadata?.promptTokenCount || 0,
62
+ completionTokens: response.usageMetadata?.candidatesTokenCount || 0,
63
+ totalTokens: response.usageMetadata?.totalTokenCount || 0,
64
+ },
65
+ finishReason: response.candidates?.[0]?.finishReason || 'STOP',
66
+ };
67
+ }
68
+ catch (error) {
69
+ core_1.default.error('Gemini completion error:', error);
70
+ throw new Error(`Gemini API error: ${error instanceof Error ? error.message : 'Unknown error'}`);
71
+ }
72
+ }
73
+ /**
74
+ * Generate streaming completion
75
+ */
76
+ async *streamComplete(request) {
77
+ const modelName = request.model || 'gemini-pro';
78
+ core_1.default.debug('Gemini streaming completion started');
79
+ try {
80
+ const model = this.genAI.getGenerativeModel({ model: modelName });
81
+ // Convert messages to Gemini format
82
+ const prompt = request.messages
83
+ .map((m) => {
84
+ const role = m.role === 'assistant' ? 'model' : m.role;
85
+ return `${role}: ${m.content}`;
86
+ })
87
+ .join('\n\n');
88
+ // Generate streaming content
89
+ const result = await model.generateContentStream(prompt);
90
+ let fullContent = '';
91
+ let chunkCount = 0;
92
+ for await (const chunk of result.stream) {
93
+ const text = chunk.text();
94
+ fullContent += text;
95
+ chunkCount++;
96
+ const isLast = chunk.candidates?.[0]?.finishReason !== undefined;
97
+ yield {
98
+ id: `gemini-stream-${Date.now()}-${chunkCount}`,
99
+ content: fullContent,
100
+ delta: text,
101
+ done: isLast,
102
+ usage: isLast && chunk.usageMetadata
103
+ ? {
104
+ promptTokens: chunk.usageMetadata.promptTokenCount || 0,
105
+ completionTokens: chunk.usageMetadata.candidatesTokenCount || 0,
106
+ totalTokens: chunk.usageMetadata.totalTokenCount || 0,
107
+ }
108
+ : undefined,
109
+ };
110
+ }
111
+ core_1.default.debug('Gemini streaming completed');
112
+ }
113
+ catch (error) {
114
+ core_1.default.error('Gemini streaming error:', error);
115
+ throw new Error(`Gemini streaming error: ${error instanceof Error ? error.message : 'Unknown error'}`);
116
+ }
117
+ }
118
+ /**
119
+ * Generate embeddings
120
+ */
121
+ async embed(request) {
122
+ const modelName = request.model || 'text-embedding-004';
123
+ core_1.default.debug(`Gemini embedding request for model: ${modelName}`);
124
+ try {
125
+ const model = this.genAI.getGenerativeModel({ model: modelName });
126
+ const inputs = Array.isArray(request.input) ? request.input : [request.input];
127
+ // Generate embeddings for each input
128
+ const embeddings = await Promise.all(inputs.map(async (text) => {
129
+ const result = await model.embedContent(text);
130
+ return result.embedding.values;
131
+ }));
132
+ // Estimate token usage (Gemini doesn't provide exact counts for embeddings)
133
+ const estimatedTokens = inputs.reduce((sum, text) => sum + Math.ceil(text.length / 4), 0);
134
+ return {
135
+ embeddings,
136
+ model: modelName,
137
+ usage: {
138
+ promptTokens: estimatedTokens,
139
+ totalTokens: estimatedTokens,
140
+ },
141
+ };
142
+ }
143
+ catch (error) {
144
+ core_1.default.error('Gemini embedding error:', error);
145
+ throw new Error(`Gemini embedding error: ${error instanceof Error ? error.message : 'Unknown error'}`);
146
+ }
147
+ }
148
+ /**
149
+ * Check if provider is available
150
+ */
151
+ async isAvailable() {
152
+ if (!this.apiKey) {
153
+ core_1.default.warn('Gemini API key not configured');
154
+ return false;
155
+ }
156
+ try {
157
+ // Test with a minimal request
158
+ const model = this.genAI.getGenerativeModel({ model: 'gemini-pro' });
159
+ await model.generateContent('test');
160
+ return true;
161
+ }
162
+ catch (error) {
163
+ core_1.default.error('Gemini availability check failed:', error);
164
+ return false;
165
+ }
166
+ }
167
+ /**
168
+ * Get supported models
169
+ */
170
+ getSupportedModels() {
171
+ return [
172
+ 'gemini-pro',
173
+ 'gemini-pro-vision',
174
+ 'gemini-1.5-pro',
175
+ 'gemini-1.5-flash',
176
+ 'text-embedding-004',
177
+ ];
178
+ }
179
+ }
180
+ exports.GeminiProvider = GeminiProvider;
@@ -0,0 +1,45 @@
1
+ import { IAIProvider, AIProvider, AICompletionRequest, AICompletionResponse, AIStreamChunk, AIEmbeddingRequest, AIEmbeddingResponse } from '../ai-enhanced.types';
2
+ /**
3
+ * Ollama Provider
4
+ * Production-ready implementation for local LLM support via Ollama
5
+ * Supports models like Llama 2, Mistral, CodeLlama, and other open-source models
6
+ */
7
+ export declare class OllamaProvider implements IAIProvider {
8
+ readonly name: AIProvider;
9
+ private baseURL;
10
+ private defaultModel;
11
+ constructor(config?: {
12
+ baseURL?: string;
13
+ defaultModel?: string;
14
+ });
15
+ /**
16
+ * Transform messages to Ollama prompt format
17
+ */
18
+ private transformMessages;
19
+ /**
20
+ * Generate completion
21
+ */
22
+ complete(request: AICompletionRequest): Promise<AICompletionResponse>;
23
+ /**
24
+ * Generate streaming completion
25
+ */
26
+ streamComplete(request: AICompletionRequest): AsyncGenerator<AIStreamChunk>;
27
+ /**
28
+ * Generate embeddings
29
+ */
30
+ embed(request: AIEmbeddingRequest): Promise<AIEmbeddingResponse>;
31
+ /**
32
+ * Check if provider is available
33
+ */
34
+ isAvailable(): Promise<boolean>;
35
+ /**
36
+ * Get supported models
37
+ * Note: This returns common models, but Ollama supports any model you pull
38
+ */
39
+ getSupportedModels(): string[];
40
+ /**
41
+ * Get supported embedding models
42
+ */
43
+ getSupportedEmbeddingModels(): string[];
44
+ }
45
+ //# sourceMappingURL=ollama.provider.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ollama.provider.d.ts","sourceRoot":"","sources":["../../src/providers/ollama.provider.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,WAAW,EACX,UAAU,EACV,mBAAmB,EACnB,oBAAoB,EACpB,aAAa,EACb,kBAAkB,EAClB,mBAAmB,EAEpB,MAAM,sBAAsB,CAAC;AAiC9B;;;;GAIG;AACH,qBAAa,cAAe,YAAW,WAAW;IAChD,QAAQ,CAAC,IAAI,EAAE,UAAU,CAAY;IACrC,OAAO,CAAC,OAAO,CAAS;IACxB,OAAO,CAAC,YAAY,CAAS;gBAEjB,MAAM,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAC;QAAC,YAAY,CAAC,EAAE,MAAM,CAAA;KAAE;IAMhE;;OAEG;IACH,OAAO,CAAC,iBAAiB;IAUzB;;OAEG;IACG,QAAQ,CAAC,OAAO,EAAE,mBAAmB,GAAG,OAAO,CAAC,oBAAoB,CAAC;IA+C3E;;OAEG;IACI,cAAc,CAAC,OAAO,EAAE,mBAAmB,GAAG,cAAc,CAAC,aAAa,CAAC;IAqFlF;;OAEG;IACG,KAAK,CAAC,OAAO,EAAE,kBAAkB,GAAG,OAAO,CAAC,mBAAmB,CAAC;IAqCtE;;OAEG;IACG,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC;IAWrC;;;OAGG;IACH,kBAAkB,IAAI,MAAM,EAAE;IAkB9B;;OAEG;IACH,2BAA2B,IAAI,MAAM,EAAE;CAGxC"}