@hazeljs/ai 0.2.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. package/LICENSE +192 -0
  2. package/README.md +497 -0
  3. package/dist/ai-enhanced.service.d.ts +108 -0
  4. package/dist/ai-enhanced.service.d.ts.map +1 -0
  5. package/dist/ai-enhanced.service.js +345 -0
  6. package/dist/ai-enhanced.service.test.d.ts +2 -0
  7. package/dist/ai-enhanced.service.test.d.ts.map +1 -0
  8. package/dist/ai-enhanced.service.test.js +501 -0
  9. package/dist/ai-enhanced.test.d.ts +2 -0
  10. package/dist/ai-enhanced.test.d.ts.map +1 -0
  11. package/dist/ai-enhanced.test.js +587 -0
  12. package/dist/ai-enhanced.types.d.ts +277 -0
  13. package/dist/ai-enhanced.types.d.ts.map +1 -0
  14. package/dist/ai-enhanced.types.js +2 -0
  15. package/dist/ai.decorator.d.ts +4 -0
  16. package/dist/ai.decorator.d.ts.map +1 -0
  17. package/dist/ai.decorator.js +57 -0
  18. package/dist/ai.decorator.test.d.ts +2 -0
  19. package/dist/ai.decorator.test.d.ts.map +1 -0
  20. package/dist/ai.decorator.test.js +189 -0
  21. package/dist/ai.module.d.ts +12 -0
  22. package/dist/ai.module.d.ts.map +1 -0
  23. package/dist/ai.module.js +44 -0
  24. package/dist/ai.module.test.d.ts +2 -0
  25. package/dist/ai.module.test.d.ts.map +1 -0
  26. package/dist/ai.module.test.js +23 -0
  27. package/dist/ai.service.d.ts +11 -0
  28. package/dist/ai.service.d.ts.map +1 -0
  29. package/dist/ai.service.js +266 -0
  30. package/dist/ai.service.test.d.ts +2 -0
  31. package/dist/ai.service.test.d.ts.map +1 -0
  32. package/dist/ai.service.test.js +222 -0
  33. package/dist/ai.types.d.ts +30 -0
  34. package/dist/ai.types.d.ts.map +1 -0
  35. package/dist/ai.types.js +2 -0
  36. package/dist/context/context.manager.d.ts +69 -0
  37. package/dist/context/context.manager.d.ts.map +1 -0
  38. package/dist/context/context.manager.js +168 -0
  39. package/dist/context/context.manager.test.d.ts +2 -0
  40. package/dist/context/context.manager.test.d.ts.map +1 -0
  41. package/dist/context/context.manager.test.js +180 -0
  42. package/dist/decorators/ai-function.decorator.d.ts +42 -0
  43. package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
  44. package/dist/decorators/ai-function.decorator.js +80 -0
  45. package/dist/decorators/ai-validate.decorator.d.ts +46 -0
  46. package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
  47. package/dist/decorators/ai-validate.decorator.js +83 -0
  48. package/dist/index.d.ts +18 -0
  49. package/dist/index.d.ts.map +1 -0
  50. package/dist/index.js +40 -0
  51. package/dist/prompts/task.prompt.d.ts +12 -0
  52. package/dist/prompts/task.prompt.d.ts.map +1 -0
  53. package/dist/prompts/task.prompt.js +12 -0
  54. package/dist/providers/anthropic.provider.d.ts +48 -0
  55. package/dist/providers/anthropic.provider.d.ts.map +1 -0
  56. package/dist/providers/anthropic.provider.js +194 -0
  57. package/dist/providers/anthropic.provider.test.d.ts +2 -0
  58. package/dist/providers/anthropic.provider.test.d.ts.map +1 -0
  59. package/dist/providers/anthropic.provider.test.js +222 -0
  60. package/dist/providers/cohere.provider.d.ts +57 -0
  61. package/dist/providers/cohere.provider.d.ts.map +1 -0
  62. package/dist/providers/cohere.provider.js +230 -0
  63. package/dist/providers/cohere.provider.test.d.ts +2 -0
  64. package/dist/providers/cohere.provider.test.d.ts.map +1 -0
  65. package/dist/providers/cohere.provider.test.js +267 -0
  66. package/dist/providers/gemini.provider.d.ts +45 -0
  67. package/dist/providers/gemini.provider.d.ts.map +1 -0
  68. package/dist/providers/gemini.provider.js +180 -0
  69. package/dist/providers/gemini.provider.test.d.ts +2 -0
  70. package/dist/providers/gemini.provider.test.d.ts.map +1 -0
  71. package/dist/providers/gemini.provider.test.js +219 -0
  72. package/dist/providers/ollama.provider.d.ts +45 -0
  73. package/dist/providers/ollama.provider.d.ts.map +1 -0
  74. package/dist/providers/ollama.provider.js +232 -0
  75. package/dist/providers/ollama.provider.test.d.ts +2 -0
  76. package/dist/providers/ollama.provider.test.d.ts.map +1 -0
  77. package/dist/providers/ollama.provider.test.js +267 -0
  78. package/dist/providers/openai.provider.d.ts +57 -0
  79. package/dist/providers/openai.provider.d.ts.map +1 -0
  80. package/dist/providers/openai.provider.js +320 -0
  81. package/dist/providers/openai.provider.test.d.ts +2 -0
  82. package/dist/providers/openai.provider.test.d.ts.map +1 -0
  83. package/dist/providers/openai.provider.test.js +364 -0
  84. package/dist/tracking/token.tracker.d.ts +72 -0
  85. package/dist/tracking/token.tracker.d.ts.map +1 -0
  86. package/dist/tracking/token.tracker.js +222 -0
  87. package/dist/tracking/token.tracker.test.d.ts +2 -0
  88. package/dist/tracking/token.tracker.test.d.ts.map +1 -0
  89. package/dist/tracking/token.tracker.test.js +272 -0
  90. package/dist/vector/vector.service.d.ts +50 -0
  91. package/dist/vector/vector.service.d.ts.map +1 -0
  92. package/dist/vector/vector.service.js +163 -0
  93. package/package.json +60 -0
@@ -0,0 +1,108 @@
1
+ import { IAIProvider, AIProvider, AICompletionRequest, AICompletionResponse, AIStreamChunk, AIEmbeddingRequest, AIEmbeddingResponse, AIModelConfig } from './ai-enhanced.types';
2
+ import { AIContextManager } from './context/context.manager';
3
+ import { TokenTracker } from './tracking/token.tracker';
4
+ import { CacheService } from '@hazeljs/cache';
5
+ /**
6
+ * Enhanced AI Service
7
+ * Production-ready AI service with provider management, caching, and rate limiting
8
+ */
9
+ export declare class AIEnhancedService {
10
+ private providers;
11
+ private defaultProvider;
12
+ private contextManager?;
13
+ private tokenTracker;
14
+ private cacheService?;
15
+ private retryAttempts;
16
+ private retryDelay;
17
+ constructor(tokenTracker?: TokenTracker, cacheService?: CacheService);
18
+ /**
19
+ * Initialize AI providers
20
+ */
21
+ private initializeProviders;
22
+ /**
23
+ * Register a custom provider
24
+ */
25
+ registerProvider(provider: IAIProvider): void;
26
+ /**
27
+ * Set default provider
28
+ */
29
+ setDefaultProvider(provider: AIProvider): void;
30
+ /**
31
+ * Create a context manager for conversation
32
+ */
33
+ createContext(maxTokens?: number): AIContextManager;
34
+ /**
35
+ * Get current context manager
36
+ */
37
+ getContext(): AIContextManager | undefined;
38
+ /**
39
+ * Generate completion with retry logic and caching
40
+ */
41
+ complete(request: AICompletionRequest, config?: {
42
+ provider?: AIProvider;
43
+ userId?: string;
44
+ cacheKey?: string;
45
+ cacheTTL?: number;
46
+ }): Promise<AICompletionResponse>;
47
+ /**
48
+ * Generate streaming completion
49
+ */
50
+ streamComplete(request: AICompletionRequest, config?: {
51
+ provider?: AIProvider;
52
+ userId?: string;
53
+ }): AsyncGenerator<AIStreamChunk>;
54
+ /**
55
+ * Generate embeddings
56
+ */
57
+ embed(request: AIEmbeddingRequest, config?: {
58
+ provider?: AIProvider;
59
+ userId?: string;
60
+ cacheKey?: string;
61
+ cacheTTL?: number;
62
+ }): Promise<AIEmbeddingResponse>;
63
+ /**
64
+ * Check if a provider is available
65
+ */
66
+ isProviderAvailable(provider: AIProvider): Promise<boolean>;
67
+ /**
68
+ * Get list of available providers
69
+ */
70
+ getAvailableProviders(): AIProvider[];
71
+ /**
72
+ * Get token usage statistics
73
+ */
74
+ getTokenStats(userId?: string, days?: number): unknown;
75
+ /**
76
+ * Configure model settings
77
+ */
78
+ configureModel(config: AIModelConfig): void;
79
+ /**
80
+ * Get provider instance
81
+ */
82
+ private getProvider;
83
+ /**
84
+ * Execute function with retry logic
85
+ */
86
+ private executeWithRetry;
87
+ /**
88
+ * Generate cache key for completion request
89
+ */
90
+ private generateCacheKey;
91
+ /**
92
+ * Generate cache key for embedding request
93
+ */
94
+ private generateEmbeddingCacheKey;
95
+ /**
96
+ * Simple string hash function
97
+ */
98
+ private hashString;
99
+ /**
100
+ * Estimate tokens for a request (rough estimation)
101
+ */
102
+ private estimateRequestTokens;
103
+ /**
104
+ * Set retry configuration
105
+ */
106
+ setRetryConfig(attempts: number, delay: number): void;
107
+ }
108
+ //# sourceMappingURL=ai-enhanced.service.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-enhanced.service.d.ts","sourceRoot":"","sources":["../src/ai-enhanced.service.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,WAAW,EACX,UAAU,EACV,mBAAmB,EACnB,oBAAoB,EACpB,aAAa,EACb,kBAAkB,EAClB,mBAAmB,EACnB,aAAa,EACd,MAAM,qBAAqB,CAAC;AAO7B,OAAO,EAAE,gBAAgB,EAAE,MAAM,2BAA2B,CAAC;AAC7D,OAAO,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;AACxD,OAAO,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AAG9C;;;GAGG;AACH,qBACa,iBAAiB;IAC5B,OAAO,CAAC,SAAS,CAA2C;IAC5D,OAAO,CAAC,eAAe,CAAwB;IAC/C,OAAO,CAAC,cAAc,CAAC,CAAmB;IAC1C,OAAO,CAAC,YAAY,CAAe;IACnC,OAAO,CAAC,YAAY,CAAC,CAAe;IACpC,OAAO,CAAC,aAAa,CAAa;IAClC,OAAO,CAAC,UAAU,CAAgB;gBAEtB,YAAY,CAAC,EAAE,YAAY,EAAE,YAAY,CAAC,EAAE,YAAY;IAOpE;;OAEG;IACH,OAAO,CAAC,mBAAmB;IA6C3B;;OAEG;IACH,gBAAgB,CAAC,QAAQ,EAAE,WAAW,GAAG,IAAI;IAK7C;;OAEG;IACH,kBAAkB,CAAC,QAAQ,EAAE,UAAU,GAAG,IAAI;IAQ9C;;OAEG;IACH,aAAa,CAAC,SAAS,CAAC,EAAE,MAAM,GAAG,gBAAgB;IAKnD;;OAEG;IACH,UAAU,IAAI,gBAAgB,GAAG,SAAS;IAI1C;;OAEG;IACG,QAAQ,CACZ,OAAO,EAAE,mBAAmB,EAC5B,MAAM,CAAC,EAAE;QACP,QAAQ,CAAC,EAAE,UAAU,CAAC;QACtB,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,QAAQ,CAAC,EAAE,MAAM,CAAC;KACnB,GACA,OAAO,CAAC,oBAAoB,CAAC;IAgDhC;;OAEG;IACI,cAAc,CACnB,OAAO,EAAE,mBAAmB,EAC5B,MAAM,CAAC,EAAE;QACP,QAAQ,CAAC,EAAE,UAAU,CAAC;QACtB,MAAM,CAAC,EAAE,MAAM,CAAC;KACjB,GACA,cAAc,CAAC,aAAa,CAAC;IAmChC;;OAEG;IACG,KAAK,CACT,OAAO,EAAE,kBAAkB,EAC3B,MAAM,CAAC,EAAE;QACP,QAAQ,CAAC,EAAE,UAAU,CAAC;QACtB,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,QAAQ,CAAC,EAAE,MAAM,CAAC;KACnB,GACA,OAAO,CAAC,mBAAmB,CAAC;IAwC/B;;OAEG;IACG,mBAAmB,CAAC,QAAQ,EAAE,UAAU,GAAG,OAAO,CAAC,OAAO,CAAC;IAQjE;;OAEG;IACH,qBAAqB,IAAI,UAAU,EAAE;IAIrC;;OAEG;IACH,aAAa,CAAC,MAAM,CAAC,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,GAAG,OAAO;IAOtD;;OAEG;IACH,cAAc,CAAC,MAAM,EAAE,aAAa,GAAG,IAAI;IAU3C;;OAEG;IACH,OAAO,CAAC,WAAW;IAWnB;;OAEG;YACW,gBAAgB;IAoB9B;;OAEG;IACH,OAAO,CAAC,gBAAgB;IAUxB;;OAEG;IACH,OAAO,CAAC,yBAAyB;IAQjC;;OAEG;IACH,OAAO,CAAC,UAAU;IAUlB;;OAEG;IACH,OAAO,CAAC,qBAAqB;IAe7B;;OAEG;IACH,cAAc,CAAC,QAAQ,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,GAAG,IAAI;CAKtD"}
@@ -0,0 +1,345 @@
1
+ "use strict";
2
+ var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
3
+ var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
4
+ if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
5
+ else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
6
+ return c > 3 && r && Object.defineProperty(target, key, r), r;
7
+ };
8
+ var __metadata = (this && this.__metadata) || function (k, v) {
9
+ if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
10
+ };
11
+ var __importDefault = (this && this.__importDefault) || function (mod) {
12
+ return (mod && mod.__esModule) ? mod : { "default": mod };
13
+ };
14
+ Object.defineProperty(exports, "__esModule", { value: true });
15
+ exports.AIEnhancedService = void 0;
16
+ const core_1 = require("@hazeljs/core");
17
+ const openai_provider_1 = require("./providers/openai.provider");
18
+ const anthropic_provider_1 = require("./providers/anthropic.provider");
19
+ const gemini_provider_1 = require("./providers/gemini.provider");
20
+ const cohere_provider_1 = require("./providers/cohere.provider");
21
+ const ollama_provider_1 = require("./providers/ollama.provider");
22
+ const context_manager_1 = require("./context/context.manager");
23
+ const token_tracker_1 = require("./tracking/token.tracker");
24
+ const cache_1 = require("@hazeljs/cache");
25
+ const core_2 = __importDefault(require("@hazeljs/core"));
26
+ /**
27
+ * Enhanced AI Service
28
+ * Production-ready AI service with provider management, caching, and rate limiting
29
+ */
30
+ let AIEnhancedService = class AIEnhancedService {
31
+ constructor(tokenTracker, cacheService) {
32
+ this.providers = new Map();
33
+ this.defaultProvider = 'openai';
34
+ this.retryAttempts = 3;
35
+ this.retryDelay = 1000;
36
+ this.tokenTracker = tokenTracker || new token_tracker_1.TokenTracker();
37
+ this.cacheService = cacheService;
38
+ this.initializeProviders();
39
+ core_2.default.info('AI Enhanced Service initialized');
40
+ }
41
+ /**
42
+ * Initialize AI providers
43
+ */
44
+ initializeProviders() {
45
+ try {
46
+ // Initialize OpenAI
47
+ if (process.env.OPENAI_API_KEY) {
48
+ this.providers.set('openai', new openai_provider_1.OpenAIProvider());
49
+ core_2.default.info('OpenAI provider registered');
50
+ }
51
+ // Initialize Anthropic
52
+ if (process.env.ANTHROPIC_API_KEY) {
53
+ this.providers.set('anthropic', new anthropic_provider_1.AnthropicProvider());
54
+ core_2.default.info('Anthropic provider registered');
55
+ }
56
+ // Initialize Gemini
57
+ if (process.env.GEMINI_API_KEY) {
58
+ this.providers.set('gemini', new gemini_provider_1.GeminiProvider());
59
+ core_2.default.info('Gemini provider registered');
60
+ }
61
+ // Initialize Cohere
62
+ if (process.env.COHERE_API_KEY) {
63
+ this.providers.set('cohere', new cohere_provider_1.CohereProvider());
64
+ core_2.default.info('Cohere provider registered');
65
+ }
66
+ // Initialize Ollama (always available if Ollama server is running)
67
+ // Ollama doesn't require an API key, just a running server
68
+ const ollamaProvider = new ollama_provider_1.OllamaProvider({
69
+ baseURL: process.env.OLLAMA_BASE_URL,
70
+ defaultModel: process.env.OLLAMA_DEFAULT_MODEL,
71
+ });
72
+ this.providers.set('ollama', ollamaProvider);
73
+ core_2.default.info('Ollama provider registered (will check availability on first use)');
74
+ if (this.providers.size === 0) {
75
+ core_2.default.warn('No AI providers configured. Set API keys in environment variables or start Ollama server.');
76
+ }
77
+ }
78
+ catch (error) {
79
+ core_2.default.error('Error initializing AI providers:', error);
80
+ }
81
+ }
82
+ /**
83
+ * Register a custom provider
84
+ */
85
+ registerProvider(provider) {
86
+ this.providers.set(provider.name, provider);
87
+ core_2.default.info(`Custom provider registered: ${provider.name}`);
88
+ }
89
+ /**
90
+ * Set default provider
91
+ */
92
+ setDefaultProvider(provider) {
93
+ if (!this.providers.has(provider)) {
94
+ throw new Error(`Provider ${provider} is not registered`);
95
+ }
96
+ this.defaultProvider = provider;
97
+ core_2.default.info(`Default provider set to: ${provider}`);
98
+ }
99
+ /**
100
+ * Create a context manager for conversation
101
+ */
102
+ createContext(maxTokens) {
103
+ this.contextManager = new context_manager_1.AIContextManager(maxTokens);
104
+ return this.contextManager;
105
+ }
106
+ /**
107
+ * Get current context manager
108
+ */
109
+ getContext() {
110
+ return this.contextManager;
111
+ }
112
+ /**
113
+ * Generate completion with retry logic and caching
114
+ */
115
+ async complete(request, config) {
116
+ const provider = this.getProvider(config?.provider);
117
+ const cacheKey = config?.cacheKey || this.generateCacheKey(request);
118
+ // Check cache first
119
+ if (this.cacheService && config?.cacheKey) {
120
+ const cached = await this.cacheService.get(cacheKey);
121
+ if (cached) {
122
+ core_2.default.debug('Returning cached AI response');
123
+ return cached;
124
+ }
125
+ }
126
+ // Check rate limits
127
+ const estimatedTokens = this.estimateRequestTokens(request);
128
+ const limitCheck = await this.tokenTracker.checkLimits(config?.userId, estimatedTokens);
129
+ if (!limitCheck.allowed) {
130
+ throw new Error(`Rate limit exceeded: ${limitCheck.reason}`);
131
+ }
132
+ // Execute with retry logic
133
+ const response = await this.executeWithRetry(async () => {
134
+ return await provider.complete(request);
135
+ });
136
+ // Track token usage
137
+ if (response.usage) {
138
+ this.tokenTracker.track({
139
+ userId: config?.userId,
140
+ promptTokens: response.usage.promptTokens,
141
+ completionTokens: response.usage.completionTokens,
142
+ totalTokens: response.usage.totalTokens,
143
+ timestamp: Date.now(),
144
+ }, request.model);
145
+ }
146
+ // Cache response
147
+ if (this.cacheService && config?.cacheKey) {
148
+ await this.cacheService.set(cacheKey, response, config.cacheTTL || 3600);
149
+ }
150
+ return response;
151
+ }
152
+ /**
153
+ * Generate streaming completion
154
+ */
155
+ async *streamComplete(request, config) {
156
+ const provider = this.getProvider(config?.provider);
157
+ // Check rate limits
158
+ const estimatedTokens = this.estimateRequestTokens(request);
159
+ const limitCheck = await this.tokenTracker.checkLimits(config?.userId, estimatedTokens);
160
+ if (!limitCheck.allowed) {
161
+ throw new Error(`Rate limit exceeded: ${limitCheck.reason}`);
162
+ }
163
+ try {
164
+ for await (const chunk of provider.streamComplete(request)) {
165
+ yield chunk;
166
+ // Track final usage
167
+ if (chunk.done && chunk.usage) {
168
+ this.tokenTracker.track({
169
+ userId: config?.userId,
170
+ promptTokens: chunk.usage.promptTokens,
171
+ completionTokens: chunk.usage.completionTokens,
172
+ totalTokens: chunk.usage.totalTokens,
173
+ timestamp: Date.now(),
174
+ }, request.model);
175
+ }
176
+ }
177
+ }
178
+ catch (error) {
179
+ core_2.default.error('Streaming completion failed:', error);
180
+ throw error;
181
+ }
182
+ }
183
+ /**
184
+ * Generate embeddings
185
+ */
186
+ async embed(request, config) {
187
+ const provider = this.getProvider(config?.provider);
188
+ const cacheKey = config?.cacheKey || this.generateEmbeddingCacheKey(request);
189
+ // Check cache first
190
+ if (this.cacheService && config?.cacheKey) {
191
+ const cached = await this.cacheService.get(cacheKey);
192
+ if (cached) {
193
+ core_2.default.debug('Returning cached embeddings');
194
+ return cached;
195
+ }
196
+ }
197
+ // Execute with retry logic
198
+ const response = await this.executeWithRetry(async () => {
199
+ return await provider.embed(request);
200
+ });
201
+ // Track token usage
202
+ if (response.usage) {
203
+ this.tokenTracker.track({
204
+ userId: config?.userId,
205
+ promptTokens: response.usage.promptTokens,
206
+ completionTokens: 0,
207
+ totalTokens: response.usage.totalTokens,
208
+ timestamp: Date.now(),
209
+ }, request.model);
210
+ }
211
+ // Cache response
212
+ if (this.cacheService && config?.cacheKey) {
213
+ await this.cacheService.set(cacheKey, response, config.cacheTTL || 86400); // 24 hours
214
+ }
215
+ return response;
216
+ }
217
+ /**
218
+ * Check if a provider is available
219
+ */
220
+ async isProviderAvailable(provider) {
221
+ const providerInstance = this.providers.get(provider);
222
+ if (!providerInstance) {
223
+ return false;
224
+ }
225
+ return await providerInstance.isAvailable();
226
+ }
227
+ /**
228
+ * Get list of available providers
229
+ */
230
+ getAvailableProviders() {
231
+ return Array.from(this.providers.keys());
232
+ }
233
+ /**
234
+ * Get token usage statistics
235
+ */
236
+ getTokenStats(userId, days) {
237
+ if (userId) {
238
+ return this.tokenTracker.getUserStats(userId, days);
239
+ }
240
+ return this.tokenTracker.getGlobalStats(days);
241
+ }
242
+ /**
243
+ * Configure model settings
244
+ */
245
+ configureModel(config) {
246
+ const provider = this.providers.get(config.provider);
247
+ if (!provider) {
248
+ throw new Error(`Provider ${config.provider} not found`);
249
+ }
250
+ // Provider-specific configuration would go here
251
+ core_2.default.info(`Model configured for provider: ${config.provider}`);
252
+ }
253
+ /**
254
+ * Get provider instance
255
+ */
256
+ getProvider(providerName) {
257
+ const name = providerName || this.defaultProvider;
258
+ const provider = this.providers.get(name);
259
+ if (!provider) {
260
+ throw new Error(`Provider ${name} is not registered or available`);
261
+ }
262
+ return provider;
263
+ }
264
+ /**
265
+ * Execute function with retry logic
266
+ */
267
+ async executeWithRetry(fn) {
268
+ let lastError;
269
+ for (let attempt = 1; attempt <= this.retryAttempts; attempt++) {
270
+ try {
271
+ return await fn();
272
+ }
273
+ catch (error) {
274
+ lastError = error instanceof Error ? error : new Error('Unknown error');
275
+ core_2.default.warn(`Attempt ${attempt} failed:`, lastError.message);
276
+ if (attempt < this.retryAttempts) {
277
+ const delay = this.retryDelay * Math.pow(2, attempt - 1); // Exponential backoff
278
+ await new Promise((resolve) => setTimeout(resolve, delay));
279
+ }
280
+ }
281
+ }
282
+ throw lastError || new Error('All retry attempts failed');
283
+ }
284
+ /**
285
+ * Generate cache key for completion request
286
+ */
287
+ generateCacheKey(request) {
288
+ const key = JSON.stringify({
289
+ messages: request.messages,
290
+ model: request.model,
291
+ temperature: request.temperature,
292
+ maxTokens: request.maxTokens,
293
+ });
294
+ return `ai:completion:${this.hashString(key)}`;
295
+ }
296
+ /**
297
+ * Generate cache key for embedding request
298
+ */
299
+ generateEmbeddingCacheKey(request) {
300
+ const key = JSON.stringify({
301
+ input: request.input,
302
+ model: request.model,
303
+ });
304
+ return `ai:embedding:${this.hashString(key)}`;
305
+ }
306
+ /**
307
+ * Simple string hash function
308
+ */
309
+ hashString(str) {
310
+ let hash = 0;
311
+ for (let i = 0; i < str.length; i++) {
312
+ const char = str.charCodeAt(i);
313
+ hash = (hash << 5) - hash + char;
314
+ hash = hash & hash; // Convert to 32-bit integer
315
+ }
316
+ return Math.abs(hash).toString(36);
317
+ }
318
+ /**
319
+ * Estimate tokens for a request (rough estimation)
320
+ */
321
+ estimateRequestTokens(request) {
322
+ let tokens = 0;
323
+ for (const message of request.messages) {
324
+ // Rough estimate: 1 token ≈ 4 characters
325
+ tokens += Math.ceil(message.content.length / 4);
326
+ tokens += 4; // Message overhead
327
+ }
328
+ // Add estimated completion tokens
329
+ tokens += request.maxTokens || 1000;
330
+ return tokens;
331
+ }
332
+ /**
333
+ * Set retry configuration
334
+ */
335
+ setRetryConfig(attempts, delay) {
336
+ this.retryAttempts = attempts;
337
+ this.retryDelay = delay;
338
+ core_2.default.info(`Retry config updated: ${attempts} attempts, ${delay}ms delay`);
339
+ }
340
+ };
341
+ exports.AIEnhancedService = AIEnhancedService;
342
+ exports.AIEnhancedService = AIEnhancedService = __decorate([
343
+ (0, core_1.Service)(),
344
+ __metadata("design:paramtypes", [token_tracker_1.TokenTracker, cache_1.CacheService])
345
+ ], AIEnhancedService);
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=ai-enhanced.service.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-enhanced.service.test.d.ts","sourceRoot":"","sources":["../src/ai-enhanced.service.test.ts"],"names":[],"mappings":""}