@hazeljs/ai 0.2.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +496 -0
  2. package/dist/ai-enhanced.service.d.ts +108 -0
  3. package/dist/ai-enhanced.service.d.ts.map +1 -0
  4. package/dist/ai-enhanced.service.js +345 -0
  5. package/dist/ai-enhanced.types.d.ts +269 -0
  6. package/dist/ai-enhanced.types.d.ts.map +1 -0
  7. package/dist/ai-enhanced.types.js +2 -0
  8. package/dist/ai.decorator.d.ts +4 -0
  9. package/dist/ai.decorator.d.ts.map +1 -0
  10. package/dist/ai.decorator.js +57 -0
  11. package/dist/ai.module.d.ts +12 -0
  12. package/dist/ai.module.d.ts.map +1 -0
  13. package/dist/ai.module.js +44 -0
  14. package/dist/ai.service.d.ts +10 -0
  15. package/dist/ai.service.d.ts.map +1 -0
  16. package/dist/ai.service.js +261 -0
  17. package/dist/ai.types.d.ts +30 -0
  18. package/dist/ai.types.d.ts.map +1 -0
  19. package/dist/ai.types.js +2 -0
  20. package/dist/context/context.manager.d.ts +69 -0
  21. package/dist/context/context.manager.d.ts.map +1 -0
  22. package/dist/context/context.manager.js +168 -0
  23. package/dist/decorators/ai-function.decorator.d.ts +42 -0
  24. package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
  25. package/dist/decorators/ai-function.decorator.js +80 -0
  26. package/dist/decorators/ai-validate.decorator.d.ts +46 -0
  27. package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
  28. package/dist/decorators/ai-validate.decorator.js +83 -0
  29. package/dist/index.d.ts +17 -0
  30. package/dist/index.d.ts.map +1 -0
  31. package/dist/index.js +38 -0
  32. package/dist/providers/anthropic.provider.d.ts +48 -0
  33. package/dist/providers/anthropic.provider.d.ts.map +1 -0
  34. package/dist/providers/anthropic.provider.js +194 -0
  35. package/dist/providers/cohere.provider.d.ts +57 -0
  36. package/dist/providers/cohere.provider.d.ts.map +1 -0
  37. package/dist/providers/cohere.provider.js +230 -0
  38. package/dist/providers/gemini.provider.d.ts +45 -0
  39. package/dist/providers/gemini.provider.d.ts.map +1 -0
  40. package/dist/providers/gemini.provider.js +180 -0
  41. package/dist/providers/ollama.provider.d.ts +45 -0
  42. package/dist/providers/ollama.provider.d.ts.map +1 -0
  43. package/dist/providers/ollama.provider.js +232 -0
  44. package/dist/providers/openai.provider.d.ts +47 -0
  45. package/dist/providers/openai.provider.d.ts.map +1 -0
  46. package/dist/providers/openai.provider.js +273 -0
  47. package/dist/tracking/token.tracker.d.ts +72 -0
  48. package/dist/tracking/token.tracker.d.ts.map +1 -0
  49. package/dist/tracking/token.tracker.js +222 -0
  50. package/dist/vector/vector.service.d.ts +50 -0
  51. package/dist/vector/vector.service.d.ts.map +1 -0
  52. package/dist/vector/vector.service.js +163 -0
  53. package/package.json +52 -0
@@ -0,0 +1,345 @@
1
+ "use strict";
2
+ var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
3
+ var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
4
+ if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
5
+ else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
6
+ return c > 3 && r && Object.defineProperty(target, key, r), r;
7
+ };
8
+ var __metadata = (this && this.__metadata) || function (k, v) {
9
+ if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
10
+ };
11
+ var __importDefault = (this && this.__importDefault) || function (mod) {
12
+ return (mod && mod.__esModule) ? mod : { "default": mod };
13
+ };
14
+ Object.defineProperty(exports, "__esModule", { value: true });
15
+ exports.AIEnhancedService = void 0;
16
+ const core_1 = require("@hazeljs/core");
17
+ const openai_provider_1 = require("./providers/openai.provider");
18
+ const anthropic_provider_1 = require("./providers/anthropic.provider");
19
+ const gemini_provider_1 = require("./providers/gemini.provider");
20
+ const cohere_provider_1 = require("./providers/cohere.provider");
21
+ const ollama_provider_1 = require("./providers/ollama.provider");
22
+ const context_manager_1 = require("./context/context.manager");
23
+ const token_tracker_1 = require("./tracking/token.tracker");
24
+ const cache_1 = require("@hazeljs/cache");
25
+ const core_2 = __importDefault(require("@hazeljs/core"));
26
+ /**
27
+ * Enhanced AI Service
28
+ * Production-ready AI service with provider management, caching, and rate limiting
29
+ */
30
+ let AIEnhancedService = class AIEnhancedService {
31
+ constructor(tokenTracker, cacheService) {
32
+ this.providers = new Map();
33
+ this.defaultProvider = 'openai';
34
+ this.retryAttempts = 3;
35
+ this.retryDelay = 1000;
36
+ this.tokenTracker = tokenTracker || new token_tracker_1.TokenTracker();
37
+ this.cacheService = cacheService;
38
+ this.initializeProviders();
39
+ core_2.default.info('AI Enhanced Service initialized');
40
+ }
41
+ /**
42
+ * Initialize AI providers
43
+ */
44
+ initializeProviders() {
45
+ try {
46
+ // Initialize OpenAI
47
+ if (process.env.OPENAI_API_KEY) {
48
+ this.providers.set('openai', new openai_provider_1.OpenAIProvider());
49
+ core_2.default.info('OpenAI provider registered');
50
+ }
51
+ // Initialize Anthropic
52
+ if (process.env.ANTHROPIC_API_KEY) {
53
+ this.providers.set('anthropic', new anthropic_provider_1.AnthropicProvider());
54
+ core_2.default.info('Anthropic provider registered');
55
+ }
56
+ // Initialize Gemini
57
+ if (process.env.GEMINI_API_KEY) {
58
+ this.providers.set('gemini', new gemini_provider_1.GeminiProvider());
59
+ core_2.default.info('Gemini provider registered');
60
+ }
61
+ // Initialize Cohere
62
+ if (process.env.COHERE_API_KEY) {
63
+ this.providers.set('cohere', new cohere_provider_1.CohereProvider());
64
+ core_2.default.info('Cohere provider registered');
65
+ }
66
+ // Initialize Ollama (always available if Ollama server is running)
67
+ // Ollama doesn't require an API key, just a running server
68
+ const ollamaProvider = new ollama_provider_1.OllamaProvider({
69
+ baseURL: process.env.OLLAMA_BASE_URL,
70
+ defaultModel: process.env.OLLAMA_DEFAULT_MODEL,
71
+ });
72
+ this.providers.set('ollama', ollamaProvider);
73
+ core_2.default.info('Ollama provider registered (will check availability on first use)');
74
+ if (this.providers.size === 0) {
75
+ core_2.default.warn('No AI providers configured. Set API keys in environment variables or start Ollama server.');
76
+ }
77
+ }
78
+ catch (error) {
79
+ core_2.default.error('Error initializing AI providers:', error);
80
+ }
81
+ }
82
+ /**
83
+ * Register a custom provider
84
+ */
85
+ registerProvider(provider) {
86
+ this.providers.set(provider.name, provider);
87
+ core_2.default.info(`Custom provider registered: ${provider.name}`);
88
+ }
89
+ /**
90
+ * Set default provider
91
+ */
92
+ setDefaultProvider(provider) {
93
+ if (!this.providers.has(provider)) {
94
+ throw new Error(`Provider ${provider} is not registered`);
95
+ }
96
+ this.defaultProvider = provider;
97
+ core_2.default.info(`Default provider set to: ${provider}`);
98
+ }
99
+ /**
100
+ * Create a context manager for conversation
101
+ */
102
+ createContext(maxTokens) {
103
+ this.contextManager = new context_manager_1.AIContextManager(maxTokens);
104
+ return this.contextManager;
105
+ }
106
+ /**
107
+ * Get current context manager
108
+ */
109
+ getContext() {
110
+ return this.contextManager;
111
+ }
112
+ /**
113
+ * Generate completion with retry logic and caching
114
+ */
115
+ async complete(request, config) {
116
+ const provider = this.getProvider(config?.provider);
117
+ const cacheKey = config?.cacheKey || this.generateCacheKey(request);
118
+ // Check cache first
119
+ if (this.cacheService && config?.cacheKey) {
120
+ const cached = await this.cacheService.get(cacheKey);
121
+ if (cached) {
122
+ core_2.default.debug('Returning cached AI response');
123
+ return cached;
124
+ }
125
+ }
126
+ // Check rate limits
127
+ const estimatedTokens = this.estimateRequestTokens(request);
128
+ const limitCheck = await this.tokenTracker.checkLimits(config?.userId, estimatedTokens);
129
+ if (!limitCheck.allowed) {
130
+ throw new Error(`Rate limit exceeded: ${limitCheck.reason}`);
131
+ }
132
+ // Execute with retry logic
133
+ const response = await this.executeWithRetry(async () => {
134
+ return await provider.complete(request);
135
+ });
136
+ // Track token usage
137
+ if (response.usage) {
138
+ this.tokenTracker.track({
139
+ userId: config?.userId,
140
+ promptTokens: response.usage.promptTokens,
141
+ completionTokens: response.usage.completionTokens,
142
+ totalTokens: response.usage.totalTokens,
143
+ timestamp: Date.now(),
144
+ }, request.model);
145
+ }
146
+ // Cache response
147
+ if (this.cacheService && config?.cacheKey) {
148
+ await this.cacheService.set(cacheKey, response, config.cacheTTL || 3600);
149
+ }
150
+ return response;
151
+ }
152
+ /**
153
+ * Generate streaming completion
154
+ */
155
+ async *streamComplete(request, config) {
156
+ const provider = this.getProvider(config?.provider);
157
+ // Check rate limits
158
+ const estimatedTokens = this.estimateRequestTokens(request);
159
+ const limitCheck = await this.tokenTracker.checkLimits(config?.userId, estimatedTokens);
160
+ if (!limitCheck.allowed) {
161
+ throw new Error(`Rate limit exceeded: ${limitCheck.reason}`);
162
+ }
163
+ try {
164
+ for await (const chunk of provider.streamComplete(request)) {
165
+ yield chunk;
166
+ // Track final usage
167
+ if (chunk.done && chunk.usage) {
168
+ this.tokenTracker.track({
169
+ userId: config?.userId,
170
+ promptTokens: chunk.usage.promptTokens,
171
+ completionTokens: chunk.usage.completionTokens,
172
+ totalTokens: chunk.usage.totalTokens,
173
+ timestamp: Date.now(),
174
+ }, request.model);
175
+ }
176
+ }
177
+ }
178
+ catch (error) {
179
+ core_2.default.error('Streaming completion failed:', error);
180
+ throw error;
181
+ }
182
+ }
183
+ /**
184
+ * Generate embeddings
185
+ */
186
+ async embed(request, config) {
187
+ const provider = this.getProvider(config?.provider);
188
+ const cacheKey = config?.cacheKey || this.generateEmbeddingCacheKey(request);
189
+ // Check cache first
190
+ if (this.cacheService && config?.cacheKey) {
191
+ const cached = await this.cacheService.get(cacheKey);
192
+ if (cached) {
193
+ core_2.default.debug('Returning cached embeddings');
194
+ return cached;
195
+ }
196
+ }
197
+ // Execute with retry logic
198
+ const response = await this.executeWithRetry(async () => {
199
+ return await provider.embed(request);
200
+ });
201
+ // Track token usage
202
+ if (response.usage) {
203
+ this.tokenTracker.track({
204
+ userId: config?.userId,
205
+ promptTokens: response.usage.promptTokens,
206
+ completionTokens: 0,
207
+ totalTokens: response.usage.totalTokens,
208
+ timestamp: Date.now(),
209
+ }, request.model);
210
+ }
211
+ // Cache response
212
+ if (this.cacheService && config?.cacheKey) {
213
+ await this.cacheService.set(cacheKey, response, config.cacheTTL || 86400); // 24 hours
214
+ }
215
+ return response;
216
+ }
217
+ /**
218
+ * Check if a provider is available
219
+ */
220
+ async isProviderAvailable(provider) {
221
+ const providerInstance = this.providers.get(provider);
222
+ if (!providerInstance) {
223
+ return false;
224
+ }
225
+ return await providerInstance.isAvailable();
226
+ }
227
+ /**
228
+ * Get list of available providers
229
+ */
230
+ getAvailableProviders() {
231
+ return Array.from(this.providers.keys());
232
+ }
233
+ /**
234
+ * Get token usage statistics
235
+ */
236
+ getTokenStats(userId, days) {
237
+ if (userId) {
238
+ return this.tokenTracker.getUserStats(userId, days);
239
+ }
240
+ return this.tokenTracker.getGlobalStats(days);
241
+ }
242
+ /**
243
+ * Configure model settings
244
+ */
245
+ configureModel(config) {
246
+ const provider = this.providers.get(config.provider);
247
+ if (!provider) {
248
+ throw new Error(`Provider ${config.provider} not found`);
249
+ }
250
+ // Provider-specific configuration would go here
251
+ core_2.default.info(`Model configured for provider: ${config.provider}`);
252
+ }
253
+ /**
254
+ * Get provider instance
255
+ */
256
+ getProvider(providerName) {
257
+ const name = providerName || this.defaultProvider;
258
+ const provider = this.providers.get(name);
259
+ if (!provider) {
260
+ throw new Error(`Provider ${name} is not registered or available`);
261
+ }
262
+ return provider;
263
+ }
264
+ /**
265
+ * Execute function with retry logic
266
+ */
267
+ async executeWithRetry(fn) {
268
+ let lastError;
269
+ for (let attempt = 1; attempt <= this.retryAttempts; attempt++) {
270
+ try {
271
+ return await fn();
272
+ }
273
+ catch (error) {
274
+ lastError = error instanceof Error ? error : new Error('Unknown error');
275
+ core_2.default.warn(`Attempt ${attempt} failed:`, lastError.message);
276
+ if (attempt < this.retryAttempts) {
277
+ const delay = this.retryDelay * Math.pow(2, attempt - 1); // Exponential backoff
278
+ await new Promise((resolve) => setTimeout(resolve, delay));
279
+ }
280
+ }
281
+ }
282
+ throw lastError || new Error('All retry attempts failed');
283
+ }
284
+ /**
285
+ * Generate cache key for completion request
286
+ */
287
+ generateCacheKey(request) {
288
+ const key = JSON.stringify({
289
+ messages: request.messages,
290
+ model: request.model,
291
+ temperature: request.temperature,
292
+ maxTokens: request.maxTokens,
293
+ });
294
+ return `ai:completion:${this.hashString(key)}`;
295
+ }
296
+ /**
297
+ * Generate cache key for embedding request
298
+ */
299
+ generateEmbeddingCacheKey(request) {
300
+ const key = JSON.stringify({
301
+ input: request.input,
302
+ model: request.model,
303
+ });
304
+ return `ai:embedding:${this.hashString(key)}`;
305
+ }
306
+ /**
307
+ * Simple string hash function
308
+ */
309
+ hashString(str) {
310
+ let hash = 0;
311
+ for (let i = 0; i < str.length; i++) {
312
+ const char = str.charCodeAt(i);
313
+ hash = (hash << 5) - hash + char;
314
+ hash = hash & hash; // Convert to 32-bit integer
315
+ }
316
+ return Math.abs(hash).toString(36);
317
+ }
318
+ /**
319
+ * Estimate tokens for a request (rough estimation)
320
+ */
321
+ estimateRequestTokens(request) {
322
+ let tokens = 0;
323
+ for (const message of request.messages) {
324
+ // Rough estimate: 1 token ≈ 4 characters
325
+ tokens += Math.ceil(message.content.length / 4);
326
+ tokens += 4; // Message overhead
327
+ }
328
+ // Add estimated completion tokens
329
+ tokens += request.maxTokens || 1000;
330
+ return tokens;
331
+ }
332
+ /**
333
+ * Set retry configuration
334
+ */
335
+ setRetryConfig(attempts, delay) {
336
+ this.retryAttempts = attempts;
337
+ this.retryDelay = delay;
338
+ core_2.default.info(`Retry config updated: ${attempts} attempts, ${delay}ms delay`);
339
+ }
340
+ };
341
+ exports.AIEnhancedService = AIEnhancedService;
342
+ exports.AIEnhancedService = AIEnhancedService = __decorate([
343
+ (0, core_1.Injectable)(),
344
+ __metadata("design:paramtypes", [token_tracker_1.TokenTracker, cache_1.CacheService])
345
+ ], AIEnhancedService);
@@ -0,0 +1,269 @@
1
+ /**
2
+ * AI Provider types
3
+ */
4
+ export type AIProvider = 'openai' | 'anthropic' | 'gemini' | 'cohere' | 'ollama' | 'huggingface';
5
+ /**
6
+ * AI model configuration
7
+ */
8
+ export interface AIModelConfig {
9
+ /**
10
+ * Provider name
11
+ */
12
+ provider: AIProvider;
13
+ /**
14
+ * Model name
15
+ */
16
+ model: string;
17
+ /**
18
+ * API key
19
+ */
20
+ apiKey?: string;
21
+ /**
22
+ * Temperature (0-1)
23
+ */
24
+ temperature?: number;
25
+ /**
26
+ * Max tokens
27
+ */
28
+ maxTokens?: number;
29
+ /**
30
+ * Top P
31
+ */
32
+ topP?: number;
33
+ /**
34
+ * Enable streaming
35
+ */
36
+ streaming?: boolean;
37
+ /**
38
+ * Custom endpoint
39
+ */
40
+ endpoint?: string;
41
+ }
42
+ /**
43
+ * AI message role
44
+ */
45
+ export type AIMessageRole = 'system' | 'user' | 'assistant' | 'function' | 'tool';
46
+ /**
47
+ * AI message
48
+ */
49
+ export interface AIToolCall {
50
+ id: string;
51
+ type: 'function';
52
+ function: {
53
+ name: string;
54
+ arguments: string;
55
+ };
56
+ }
57
+ export interface AIMessage {
58
+ role: AIMessageRole;
59
+ content: string;
60
+ name?: string;
61
+ toolCallId?: string;
62
+ functionCall?: {
63
+ name: string;
64
+ arguments: string;
65
+ };
66
+ toolCalls?: AIToolCall[];
67
+ }
68
+ /**
69
+ * AI completion request
70
+ */
71
+ export interface AICompletionRequest {
72
+ messages: AIMessage[];
73
+ model?: string;
74
+ temperature?: number;
75
+ maxTokens?: number;
76
+ topP?: number;
77
+ stream?: boolean;
78
+ functions?: AIFunction[];
79
+ functionCall?: 'auto' | 'none' | {
80
+ name: string;
81
+ };
82
+ }
83
+ /**
84
+ * AI completion response
85
+ */
86
+ export interface AICompletionResponse {
87
+ id: string;
88
+ content: string;
89
+ role: AIMessageRole;
90
+ model: string;
91
+ usage?: {
92
+ promptTokens: number;
93
+ completionTokens: number;
94
+ totalTokens: number;
95
+ };
96
+ functionCall?: {
97
+ name: string;
98
+ arguments: string;
99
+ };
100
+ toolCalls?: AIToolCall[];
101
+ finishReason?: string;
102
+ }
103
+ /**
104
+ * AI streaming chunk
105
+ */
106
+ export interface AIStreamChunk {
107
+ id: string;
108
+ content: string;
109
+ delta: string;
110
+ done: boolean;
111
+ usage?: {
112
+ promptTokens: number;
113
+ completionTokens: number;
114
+ totalTokens: number;
115
+ };
116
+ }
117
+ /**
118
+ * AI function definition
119
+ */
120
+ export interface AIFunction {
121
+ name: string;
122
+ description: string;
123
+ parameters: {
124
+ type: 'object';
125
+ properties: Record<string, {
126
+ type: string;
127
+ description?: string;
128
+ enum?: string[];
129
+ }>;
130
+ required?: string[];
131
+ };
132
+ }
133
+ /**
134
+ * AI embedding request
135
+ */
136
+ export interface AIEmbeddingRequest {
137
+ input: string | string[];
138
+ model?: string;
139
+ }
140
+ /**
141
+ * AI embedding response
142
+ */
143
+ export interface AIEmbeddingResponse {
144
+ embeddings: number[][];
145
+ model: string;
146
+ usage?: {
147
+ promptTokens: number;
148
+ totalTokens: number;
149
+ };
150
+ }
151
+ /**
152
+ * AI provider interface
153
+ */
154
+ export interface IAIProvider {
155
+ /**
156
+ * Provider name
157
+ */
158
+ readonly name: AIProvider;
159
+ /**
160
+ * Generate completion
161
+ */
162
+ complete(request: AICompletionRequest): Promise<AICompletionResponse>;
163
+ /**
164
+ * Generate streaming completion
165
+ */
166
+ streamComplete(request: AICompletionRequest): AsyncGenerator<AIStreamChunk>;
167
+ /**
168
+ * Generate embeddings
169
+ */
170
+ embed(request: AIEmbeddingRequest): Promise<AIEmbeddingResponse>;
171
+ /**
172
+ * Check if provider is available
173
+ */
174
+ isAvailable(): Promise<boolean>;
175
+ }
176
+ /**
177
+ * Vector database types
178
+ */
179
+ export type VectorDatabase = 'pinecone' | 'weaviate' | 'qdrant' | 'chroma';
180
+ /**
181
+ * Vector store configuration
182
+ */
183
+ export interface VectorStoreConfig {
184
+ database: VectorDatabase;
185
+ apiKey?: string;
186
+ endpoint?: string;
187
+ index?: string;
188
+ namespace?: string;
189
+ }
190
+ /**
191
+ * Vector document
192
+ */
193
+ export interface VectorDocument {
194
+ id: string;
195
+ content: string;
196
+ embedding?: number[];
197
+ metadata?: Record<string, unknown>;
198
+ }
199
+ /**
200
+ * Vector search request
201
+ */
202
+ export interface VectorSearchRequest {
203
+ query: string;
204
+ topK?: number;
205
+ filter?: Record<string, unknown>;
206
+ namespace?: string;
207
+ }
208
+ /**
209
+ * Vector search result
210
+ */
211
+ export interface VectorSearchResult {
212
+ id: string;
213
+ content: string;
214
+ score: number;
215
+ metadata?: Record<string, unknown>;
216
+ }
217
+ /**
218
+ * AI context manager
219
+ */
220
+ export interface AIContext {
221
+ messages: AIMessage[];
222
+ maxTokens: number;
223
+ currentTokens: number;
224
+ addMessage(message: AIMessage): void;
225
+ getMessages(): AIMessage[];
226
+ clear(): void;
227
+ trimToLimit(): void;
228
+ }
229
+ /**
230
+ * Token usage tracker
231
+ */
232
+ export interface TokenUsage {
233
+ userId?: string;
234
+ promptTokens: number;
235
+ completionTokens: number;
236
+ totalTokens: number;
237
+ cost?: number;
238
+ timestamp: number;
239
+ }
240
+ /**
241
+ * Token limit configuration
242
+ */
243
+ export interface TokenLimitConfig {
244
+ maxTokensPerRequest?: number;
245
+ maxTokensPerDay?: number;
246
+ maxTokensPerMonth?: number;
247
+ costPerToken?: number;
248
+ }
249
+ /**
250
+ * AI function decorator options
251
+ */
252
+ export interface AIFunctionOptions {
253
+ provider: AIProvider;
254
+ model: string;
255
+ streaming?: boolean;
256
+ temperature?: number;
257
+ maxTokens?: number;
258
+ systemPrompt?: string;
259
+ }
260
+ /**
261
+ * AI validation options
262
+ */
263
+ export interface AIValidationOptions {
264
+ provider: AIProvider;
265
+ model?: string;
266
+ instruction: string;
267
+ failOnInvalid?: boolean;
268
+ }
269
+ //# sourceMappingURL=ai-enhanced.types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-enhanced.types.d.ts","sourceRoot":"","sources":["../src/ai-enhanced.types.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,MAAM,UAAU,GAAG,QAAQ,GAAG,WAAW,GAAG,QAAQ,GAAG,QAAQ,GAAG,QAAQ,GAAG,aAAa,CAAC;AAEjG;;GAEG;AACH,MAAM,WAAW,aAAa;IAC5B;;OAEG;IACH,QAAQ,EAAE,UAAU,CAAC;IAErB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IAEd;;OAEG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;IAEhB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IAEnB;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,CAAC;IAEd;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB;;OAEG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB;AAED;;GAEG;AACH,MAAM,MAAM,aAAa,GAAG,QAAQ,GAAG,MAAM,GAAG,WAAW,GAAG,UAAU,GAAG,MAAM,CAAC;AAElF;;GAEG;AACH,MAAM,WAAW,UAAU;IACzB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE;QACR,IAAI,EAAE,MAAM,CAAC;QACb,SAAS,EAAE,MAAM,CAAC;KACnB,CAAC;CACH;AAED,MAAM,WAAW,SAAS;IACxB,IAAI,EAAE,aAAa,CAAC;IACpB,OAAO,EAAE,MAAM,CAAC;IAChB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,YAAY,CAAC,EAAE;QACb,IAAI,EAAE,MAAM,CAAC;QACb,SAAS,EAAE,MAAM,CAAC;KACnB,CAAC;IACF,SAAS,CAAC,EAAE,UAAU,EAAE,CAAC;CAC1B;AAED;;GAEG;AACH,MAAM,WAAW,mBAAmB;IAClC,QAAQ,EAAE,SAAS,EAAE,CAAC;IACtB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,SAAS,CAAC,EAAE,UAAU,EAAE,CAAC;IACzB,YAAY,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG;QAAE,IAAI,EAAE,MAAM,CAAA;KAAE,CAAC;CACnD;AAED;;GAEG;AACH,MAAM,WAAW,oBAAoB;IACnC,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,MAAM,CAAC;IAChB,IAAI,EAAE,aAAa,CAAC;IACpB,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,CAAC,EAAE;QACN,YAAY,EAAE,MAAM,CAAC;QACrB,gBAAgB,EAAE,MAAM,CAAC;QACzB,WAAW,EAAE,MAAM,CAAC;KACrB,CAAC;IACF,YAAY,CAAC,EAAE;QACb,IAAI,EAAE,MAAM,CAAC;QACb,SAAS,EAAE,MAAM,CAAC;KACnB,CAAC;IACF,SAAS,CAAC,EAAE,UAAU,EAAE,CAAC;IACzB,YAAY,CAAC,EAAE,MAAM,CAAC;CACvB;AAED;;GAEG;AACH,MAAM,WAAW,aAAa;IAC5B,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,OAAO,CAAC;IACd,KAAK,CAAC,EAAE;QACN,YAAY,EAAE,MAAM,CAAC;QACrB,gBAAgB,EAAE,MAAM,CAAC;QACzB,WAAW,EAAE,MAAM,CAAC;KACrB,CAAC;CACH;AAED;;GAEG;AACH,MAAM,WAAW,UAAU;IACzB,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE;QACV,IAAI,EAAE,QAAQ,CAAC;QACf,UAAU,EAAE,MAAM,CAChB,MAAM,EACN;YACE,IAAI,EAAE,MAAM,CAAC;YACb,WAAW,CAAC,EAAE,MAAM,CAAC;YACrB,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;SACjB,CACF,CAAC;QACF,QAAQ,CAAC,EAAE,MAAM,EAAE,CAAC;KACrB,CAAC;CACH;AAED;;GAEG;AACH,MAAM,WAAW,kBAAkB;IACjC,KAAK,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;IACzB,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAED;;GAEG;AACH,MAAM,WAAW,mBAAmB;IAClC,UAAU,EAAE,MAAM,EAAE,EAAE,CAAC;IACvB,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,CAAC,EAAE;QACN,YAAY,EAAE,MAAM,CAAC;QACrB,WAAW,EAAE,MAAM,CAAC;KACrB,CAAC;CACH;AAED;;GAEG;AACH,MAAM,WAAW,WAAW;IAC1B;;OAEG;IACH,QAAQ,CAAC,IAAI,EAAE,UAAU,CAAC;IAE1B;;OAEG;IACH,QAAQ,CAAC,OAAO,EAAE,mBAAmB,GAAG,OAAO,CAAC,oBAAoB,CAAC,CAAC;IAEtE;;OAEG;IACH,cAAc,CAAC,OAAO,EAAE,mBAAmB,GAAG,cAAc,CAAC,aAAa,CAAC,CAAC;IAE5E;;OAEG;IACH,KAAK,CAAC,OAAO,EAAE,kBAAkB,GAAG,OAAO,CAAC,mBAAmB,CAAC,CAAC;IAEjE;;OAEG;IACH,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC,CAAC;CACjC;AAED;;GAEG;AACH,MAAM,MAAM,cAAc,GAAG,UAAU,GAAG,UAAU,GAAG,QAAQ,GAAG,QAAQ,CAAC;AAE3E;;GAEG;AACH,MAAM,WAAW,iBAAiB;IAChC,QAAQ,EAAE,cAAc,CAAC;IACzB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,cAAc;IAC7B,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,MAAM,CAAC;IAChB,SAAS,CAAC,EAAE,MAAM,EAAE,CAAC;IACrB,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACpC;AAED;;GAEG;AACH,MAAM,WAAW,mBAAmB;IAClC,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,MAAM,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACjC,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,kBAAkB;IACjC,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACpC;AAED;;GAEG;AACH,MAAM,WAAW,SAAS;IACxB,QAAQ,EAAE,SAAS,EAAE,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,aAAa,EAAE,MAAM,CAAC;IAEtB,UAAU,CAAC,OAAO,EAAE,SAAS,GAAG,IAAI,CAAC;IACrC,WAAW,IAAI,SAAS,EAAE,CAAC;IAC3B,KAAK,IAAI,IAAI,CAAC;IACd,WAAW,IAAI,IAAI,CAAC;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,UAAU;IACzB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,YAAY,EAAE,MAAM,CAAC;IACrB,gBAAgB,EAAE,MAAM,CAAC;IACzB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,SAAS,EAAE,MAAM,CAAC;CACnB;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAC/B,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,YAAY,CAAC,EAAE,MAAM,CAAC;CACvB;AAED;;GAEG;AACH,MAAM,WAAW,iBAAiB;IAChC,QAAQ,EAAE,UAAU,CAAC;IACrB,KAAK,EAAE,MAAM,CAAC;IACd,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,YAAY,CAAC,EAAE,MAAM,CAAC;CACvB;AAED;;GAEG;AACH,MAAM,WAAW,mBAAmB;IAClC,QAAQ,EAAE,UAAU,CAAC;IACrB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,WAAW,EAAE,MAAM,CAAC;IACpB,aAAa,CAAC,EAAE,OAAO,CAAC;CACzB"}
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,4 @@
1
+ import { AITaskConfig } from './ai.types';
2
+ import 'reflect-metadata';
3
+ export declare function AITask(config: AITaskConfig): MethodDecorator;
4
+ //# sourceMappingURL=ai.decorator.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai.decorator.d.ts","sourceRoot":"","sources":["../src/ai.decorator.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,YAAY,CAAC;AAC1C,OAAO,kBAAkB,CAAC;AAM1B,wBAAgB,MAAM,CAAC,MAAM,EAAE,YAAY,GAAG,eAAe,CA2D5D"}
@@ -0,0 +1,57 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.AITask = AITask;
7
+ require("reflect-metadata");
8
+ const core_1 = __importDefault(require("@hazeljs/core"));
9
+ const AI_TASK_METADATA_KEY = 'hazel:ai-task';
10
+ function AITask(config) {
11
+ return (target, propertyKey, descriptor) => {
12
+ // Store the AI task configuration in metadata
13
+ Reflect.defineMetadata(AI_TASK_METADATA_KEY, config, target, propertyKey);
14
+ // Replace the method with our AI-powered version
15
+ descriptor.value = async function (...args) {
16
+ try {
17
+ // Get the AI task configuration from metadata
18
+ const taskConfig = Reflect.getMetadata(AI_TASK_METADATA_KEY, target, propertyKey);
19
+ if (!taskConfig) {
20
+ throw new Error('AI task configuration not found');
21
+ }
22
+ // Get the AI service from the container
23
+ const aiService = this.aiService;
24
+ if (!aiService) {
25
+ throw new Error('AI service not found. Make sure to inject AIService in the constructor.');
26
+ }
27
+ core_1.default.debug('Executing AI task with config:', {
28
+ name: taskConfig.name,
29
+ stream: taskConfig.stream,
30
+ });
31
+ // Execute the AI task
32
+ const result = await aiService.executeTask(taskConfig, args[0]);
33
+ // If streaming is enabled and a stream is returned, return the stream directly
34
+ if (taskConfig.stream && result.stream) {
35
+ core_1.default.debug('Returning stream from AI task');
36
+ return result.stream;
37
+ }
38
+ // If there's an error, throw it
39
+ if (result.error) {
40
+ core_1.default.error('AI task error:', result.error);
41
+ throw new Error(result.error);
42
+ }
43
+ // For non-streaming responses, return the data
44
+ core_1.default.debug('Returning data from AI task:', result.data);
45
+ return result.data;
46
+ }
47
+ catch (error) {
48
+ core_1.default.error('AI task execution failed:', error);
49
+ if (error instanceof Error) {
50
+ throw new Error(`AI task execution failed: ${error.message}`);
51
+ }
52
+ throw new Error('AI task execution failed: Unknown error');
53
+ }
54
+ };
55
+ return descriptor;
56
+ };
57
+ }