@hazeljs/ai 0.2.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +496 -0
  2. package/dist/ai-enhanced.service.d.ts +108 -0
  3. package/dist/ai-enhanced.service.d.ts.map +1 -0
  4. package/dist/ai-enhanced.service.js +345 -0
  5. package/dist/ai-enhanced.types.d.ts +269 -0
  6. package/dist/ai-enhanced.types.d.ts.map +1 -0
  7. package/dist/ai-enhanced.types.js +2 -0
  8. package/dist/ai.decorator.d.ts +4 -0
  9. package/dist/ai.decorator.d.ts.map +1 -0
  10. package/dist/ai.decorator.js +57 -0
  11. package/dist/ai.module.d.ts +12 -0
  12. package/dist/ai.module.d.ts.map +1 -0
  13. package/dist/ai.module.js +44 -0
  14. package/dist/ai.service.d.ts +10 -0
  15. package/dist/ai.service.d.ts.map +1 -0
  16. package/dist/ai.service.js +261 -0
  17. package/dist/ai.types.d.ts +30 -0
  18. package/dist/ai.types.d.ts.map +1 -0
  19. package/dist/ai.types.js +2 -0
  20. package/dist/context/context.manager.d.ts +69 -0
  21. package/dist/context/context.manager.d.ts.map +1 -0
  22. package/dist/context/context.manager.js +168 -0
  23. package/dist/decorators/ai-function.decorator.d.ts +42 -0
  24. package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
  25. package/dist/decorators/ai-function.decorator.js +80 -0
  26. package/dist/decorators/ai-validate.decorator.d.ts +46 -0
  27. package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
  28. package/dist/decorators/ai-validate.decorator.js +83 -0
  29. package/dist/index.d.ts +17 -0
  30. package/dist/index.d.ts.map +1 -0
  31. package/dist/index.js +38 -0
  32. package/dist/providers/anthropic.provider.d.ts +48 -0
  33. package/dist/providers/anthropic.provider.d.ts.map +1 -0
  34. package/dist/providers/anthropic.provider.js +194 -0
  35. package/dist/providers/cohere.provider.d.ts +57 -0
  36. package/dist/providers/cohere.provider.d.ts.map +1 -0
  37. package/dist/providers/cohere.provider.js +230 -0
  38. package/dist/providers/gemini.provider.d.ts +45 -0
  39. package/dist/providers/gemini.provider.d.ts.map +1 -0
  40. package/dist/providers/gemini.provider.js +180 -0
  41. package/dist/providers/ollama.provider.d.ts +45 -0
  42. package/dist/providers/ollama.provider.d.ts.map +1 -0
  43. package/dist/providers/ollama.provider.js +232 -0
  44. package/dist/providers/openai.provider.d.ts +47 -0
  45. package/dist/providers/openai.provider.d.ts.map +1 -0
  46. package/dist/providers/openai.provider.js +273 -0
  47. package/dist/tracking/token.tracker.d.ts +72 -0
  48. package/dist/tracking/token.tracker.d.ts.map +1 -0
  49. package/dist/tracking/token.tracker.js +222 -0
  50. package/dist/vector/vector.service.d.ts +50 -0
  51. package/dist/vector/vector.service.d.ts.map +1 -0
  52. package/dist/vector/vector.service.js +163 -0
  53. package/package.json +52 -0
package/README.md ADDED
@@ -0,0 +1,496 @@
1
+ # @hazeljs/ai
2
+
3
+ **AI Integration Module for HazelJS - OpenAI, Anthropic, Gemini, Cohere, and Ollama Support**
4
+
5
+ Build AI-powered applications with first-class LLM integration, streaming support, and decorator-based APIs.
6
+
7
+ [![npm version](https://img.shields.io/npm/v/@hazeljs/ai.svg)](https://www.npmjs.com/package/@hazeljs/ai)
8
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
9
+
10
+ ## Features
11
+
12
+ - 🤖 **Multiple Providers** - OpenAI, Anthropic, Gemini, Cohere, Ollama
13
+ - 🎨 **Decorator-Based API** - `@AITask` decorator for clean integration
14
+ - 📡 **Streaming Support** - Real-time response streaming
15
+ - 🔄 **Retry Logic** - Automatic retries with exponential backoff
16
+ - 💾 **Response Caching** - Built-in caching with @hazeljs/cache
17
+ - 🎯 **Type Safety** - Full TypeScript support with output types
18
+ - 🔧 **Flexible Configuration** - Per-task or global configuration
19
+ - 📊 **Token Tracking** - Monitor usage and costs
20
+
21
+ ## Installation
22
+
23
+ ```bash
24
+ npm install @hazeljs/ai
25
+ ```
26
+
27
+ ### Peer Dependencies
28
+
29
+ Install the provider(s) you want to use:
30
+
31
+ ```bash
32
+ # OpenAI
33
+ npm install openai
34
+
35
+ # Anthropic
36
+ npm install @anthropic-ai/sdk
37
+
38
+ # Google Gemini
39
+ npm install @google/generative-ai
40
+
41
+ # Cohere
42
+ npm install cohere-ai
43
+
44
+ # Ollama (local LLMs)
45
+ npm install ollama
46
+ ```
47
+
48
+ ## Quick Start
49
+
50
+ ### Basic Usage with Decorator
51
+
52
+ ```typescript
53
+ import { Injectable } from '@hazeljs/core';
54
+ import { AIService, AITask } from '@hazeljs/ai';
55
+
56
+ @Injectable()
57
+ export class ChatService {
58
+ constructor(private aiService: AIService) {}
59
+
60
+ @AITask({
61
+ name: 'chat',
62
+ prompt: 'You are a helpful assistant. Respond to: {{input}}',
63
+ provider: 'openai',
64
+ model: 'gpt-4',
65
+ outputType: 'string',
66
+ })
67
+ async chat(message: string): Promise<string> {
68
+ return message; // Decorator handles AI execution
69
+ }
70
+ }
71
+
72
+ // Usage
73
+ const response = await chatService.chat('Hello, how are you?');
74
+ console.log(response);
75
+ ```
76
+
77
+ ### Direct AI Service Usage
78
+
79
+ ```typescript
80
+ import { AIEnhancedService } from '@hazeljs/ai';
81
+
82
+ const aiService = new AIEnhancedService();
83
+
84
+ const response = await aiService.complete({
85
+ messages: [
86
+ { role: 'system', content: 'You are a helpful assistant' },
87
+ { role: 'user', content: 'What is TypeScript?' }
88
+ ],
89
+ model: 'gpt-4',
90
+ provider: 'openai',
91
+ temperature: 0.7,
92
+ maxTokens: 500,
93
+ });
94
+
95
+ console.log(response.content);
96
+ console.log('Tokens used:', response.usage);
97
+ ```
98
+
99
+ ## Providers
100
+
101
+ ### OpenAI
102
+
103
+ ```typescript
104
+ import { AIEnhancedService } from '@hazeljs/ai';
105
+
106
+ const aiService = new AIEnhancedService();
107
+
108
+ // GPT-4
109
+ const response = await aiService.complete({
110
+ messages: [{ role: 'user', content: 'Hello!' }],
111
+ model: 'gpt-4',
112
+ provider: 'openai',
113
+ });
114
+
115
+ // GPT-3.5 Turbo
116
+ const response2 = await aiService.complete({
117
+ messages: [{ role: 'user', content: 'Hello!' }],
118
+ model: 'gpt-3.5-turbo',
119
+ provider: 'openai',
120
+ });
121
+ ```
122
+
123
+ ### Anthropic Claude
124
+
125
+ ```typescript
126
+ const response = await aiService.complete({
127
+ messages: [{ role: 'user', content: 'Explain quantum computing' }],
128
+ model: 'claude-3-opus-20240229',
129
+ provider: 'anthropic',
130
+ maxTokens: 1000,
131
+ });
132
+ ```
133
+
134
+ ### Google Gemini
135
+
136
+ ```typescript
137
+ const response = await aiService.complete({
138
+ messages: [{ role: 'user', content: 'Write a poem' }],
139
+ model: 'gemini-pro',
140
+ provider: 'gemini',
141
+ });
142
+ ```
143
+
144
+ ### Cohere
145
+
146
+ ```typescript
147
+ const response = await aiService.complete({
148
+ messages: [{ role: 'user', content: 'Summarize this text' }],
149
+ model: 'command',
150
+ provider: 'cohere',
151
+ });
152
+ ```
153
+
154
+ ### Ollama (Local LLMs)
155
+
156
+ ```typescript
157
+ const response = await aiService.complete({
158
+ messages: [{ role: 'user', content: 'Hello!' }],
159
+ model: 'llama2',
160
+ provider: 'ollama',
161
+ baseURL: 'http://localhost:11434',
162
+ });
163
+ ```
164
+
165
+ ## Streaming
166
+
167
+ ```typescript
168
+ import { AIEnhancedService } from '@hazeljs/ai';
169
+
170
+ const aiService = new AIEnhancedService();
171
+
172
+ // Stream responses in real-time
173
+ for await (const chunk of aiService.streamComplete({
174
+ messages: [{ role: 'user', content: 'Tell me a long story' }],
175
+ provider: 'openai',
176
+ model: 'gpt-4',
177
+ })) {
178
+ process.stdout.write(chunk.delta);
179
+ }
180
+ ```
181
+
182
+ ### Streaming with Decorator
183
+
184
+ ```typescript
185
+ @AITask({
186
+ name: 'stream-chat',
187
+ prompt: 'You are a storyteller. Tell a story about: {{topic}}',
188
+ provider: 'openai',
189
+ model: 'gpt-4',
190
+ stream: true,
191
+ })
192
+ async streamStory(topic: string): AsyncGenerator<string> {
193
+ return topic; // Returns async generator
194
+ }
195
+
196
+ // Usage
197
+ for await (const chunk of chatService.streamStory('dragons')) {
198
+ console.log(chunk);
199
+ }
200
+ ```
201
+
202
+ ## Advanced Features
203
+
204
+ ### Response Caching
205
+
206
+ ```typescript
207
+ import { AITask } from '@hazeljs/ai';
208
+
209
+ @AITask({
210
+ name: 'cached-completion',
211
+ prompt: 'Explain {{concept}}',
212
+ provider: 'openai',
213
+ model: 'gpt-4',
214
+ cache: {
215
+ enabled: true,
216
+ ttl: 3600, // 1 hour
217
+ key: 'explain-{{concept}}',
218
+ },
219
+ })
220
+ async explainConcept(concept: string): Promise<string> {
221
+ return concept;
222
+ }
223
+ ```
224
+
225
+ ### Retry Logic
226
+
227
+ ```typescript
228
+ const response = await aiService.complete({
229
+ messages: [{ role: 'user', content: 'Hello' }],
230
+ provider: 'openai',
231
+ model: 'gpt-4',
232
+ retry: {
233
+ maxRetries: 3,
234
+ initialDelay: 1000,
235
+ maxDelay: 10000,
236
+ backoffMultiplier: 2,
237
+ },
238
+ });
239
+ ```
240
+
241
+ ### Output Type Validation
242
+
243
+ ```typescript
244
+ interface UserProfile {
245
+ name: string;
246
+ age: number;
247
+ interests: string[];
248
+ }
249
+
250
+ @AITask({
251
+ name: 'extract-profile',
252
+ prompt: 'Extract user profile from: {{text}}',
253
+ provider: 'openai',
254
+ model: 'gpt-4',
255
+ outputType: 'json',
256
+ })
257
+ async extractProfile(text: string): Promise<UserProfile> {
258
+ return text;
259
+ }
260
+
261
+ const profile = await service.extractProfile('John is 25 and loves coding');
262
+ console.log(profile.name); // Type-safe!
263
+ ```
264
+
265
+ ### Function Calling
266
+
267
+ ```typescript
268
+ const response = await aiService.complete({
269
+ messages: [{ role: 'user', content: 'What is the weather in NYC?' }],
270
+ provider: 'openai',
271
+ model: 'gpt-4',
272
+ functions: [
273
+ {
274
+ name: 'get_weather',
275
+ description: 'Get the current weather in a location',
276
+ parameters: {
277
+ type: 'object',
278
+ properties: {
279
+ location: {
280
+ type: 'string',
281
+ description: 'The city and state, e.g. San Francisco, CA',
282
+ },
283
+ unit: { type: 'string', enum: ['celsius', 'fahrenheit'] },
284
+ },
285
+ required: ['location'],
286
+ },
287
+ },
288
+ ],
289
+ functionCall: 'auto',
290
+ });
291
+
292
+ if (response.functionCall) {
293
+ console.log('Function:', response.functionCall.name);
294
+ console.log('Arguments:', response.functionCall.arguments);
295
+ }
296
+ ```
297
+
298
+ ## Configuration
299
+
300
+ ### Global Configuration
301
+
302
+ ```typescript
303
+ import { AIModule } from '@hazeljs/ai';
304
+
305
+ @HazelModule({
306
+ imports: [
307
+ AIModule.forRoot({
308
+ providers: {
309
+ openai: {
310
+ apiKey: process.env.OPENAI_API_KEY,
311
+ organization: process.env.OPENAI_ORG,
312
+ },
313
+ anthropic: {
314
+ apiKey: process.env.ANTHROPIC_API_KEY,
315
+ },
316
+ gemini: {
317
+ apiKey: process.env.GEMINI_API_KEY,
318
+ },
319
+ },
320
+ defaultProvider: 'openai',
321
+ defaultModel: 'gpt-4',
322
+ cache: {
323
+ enabled: true,
324
+ ttl: 3600,
325
+ },
326
+ }),
327
+ ],
328
+ })
329
+ export class AppModule {}
330
+ ```
331
+
332
+ ### Per-Task Configuration
333
+
334
+ ```typescript
335
+ @AITask({
336
+ name: 'custom-task',
337
+ prompt: 'Process: {{input}}',
338
+ provider: 'openai',
339
+ model: 'gpt-4',
340
+ temperature: 0.7,
341
+ maxTokens: 1000,
342
+ topP: 0.9,
343
+ frequencyPenalty: 0.5,
344
+ presencePenalty: 0.5,
345
+ stop: ['\n\n'],
346
+ })
347
+ async processInput(input: string): Promise<string> {
348
+ return input;
349
+ }
350
+ ```
351
+
352
+ ## Use Cases
353
+
354
+ ### Chatbot
355
+
356
+ ```typescript
357
+ @Injectable()
358
+ export class ChatbotService {
359
+ private conversationHistory: Array<{ role: string; content: string }> = [];
360
+
361
+ @AITask({
362
+ name: 'chat',
363
+ provider: 'openai',
364
+ model: 'gpt-4',
365
+ })
366
+ async chat(message: string): Promise<string> {
367
+ this.conversationHistory.push({ role: 'user', content: message });
368
+
369
+ const response = await this.aiService.complete({
370
+ messages: this.conversationHistory,
371
+ provider: 'openai',
372
+ model: 'gpt-4',
373
+ });
374
+
375
+ this.conversationHistory.push({
376
+ role: 'assistant',
377
+ content: response.content
378
+ });
379
+
380
+ return response.content;
381
+ }
382
+ }
383
+ ```
384
+
385
+ ### Content Generation
386
+
387
+ ```typescript
388
+ @Injectable()
389
+ export class ContentService {
390
+ @AITask({
391
+ name: 'generate-blog',
392
+ prompt: `Write a blog post about {{topic}}.
393
+
394
+ Requirements:
395
+ - Length: {{length}} words
396
+ - Tone: {{tone}}
397
+ - Include SEO keywords: {{keywords}}`,
398
+ provider: 'openai',
399
+ model: 'gpt-4',
400
+ outputType: 'string',
401
+ })
402
+ async generateBlogPost(
403
+ topic: string,
404
+ length: number,
405
+ tone: string,
406
+ keywords: string[]
407
+ ): Promise<string> {
408
+ return topic;
409
+ }
410
+ }
411
+ ```
412
+
413
+ ### Data Extraction
414
+
415
+ ```typescript
416
+ interface ExtractedData {
417
+ entities: string[];
418
+ sentiment: 'positive' | 'negative' | 'neutral';
419
+ summary: string;
420
+ }
421
+
422
+ @Injectable()
423
+ export class AnalysisService {
424
+ @AITask({
425
+ name: 'analyze-text',
426
+ prompt: `Analyze the following text and extract:
427
+ 1. Named entities (people, places, organizations)
428
+ 2. Overall sentiment
429
+ 3. Brief summary
430
+
431
+ Text: {{text}}
432
+
433
+ Return as JSON.`,
434
+ provider: 'openai',
435
+ model: 'gpt-4',
436
+ outputType: 'json',
437
+ })
438
+ async analyzeText(text: string): Promise<ExtractedData> {
439
+ return text;
440
+ }
441
+ }
442
+ ```
443
+
444
+ ## API Reference
445
+
446
+ ### AIEnhancedService
447
+
448
+ ```typescript
449
+ class AIEnhancedService {
450
+ complete(options: AICompletionOptions): Promise<AIResponse>;
451
+ streamComplete(options: AICompletionOptions): AsyncGenerator<AIStreamChunk>;
452
+ embed(text: string, options?: EmbedOptions): Promise<number[]>;
453
+ }
454
+ ```
455
+
456
+ ### @AITask Decorator
457
+
458
+ ```typescript
459
+ @AITask({
460
+ name: string;
461
+ prompt?: string;
462
+ provider: 'openai' | 'anthropic' | 'gemini' | 'cohere' | 'ollama';
463
+ model: string;
464
+ outputType?: 'string' | 'json' | 'number' | 'boolean';
465
+ stream?: boolean;
466
+ temperature?: number;
467
+ maxTokens?: number;
468
+ cache?: CacheOptions;
469
+ retry?: RetryOptions;
470
+ })
471
+ ```
472
+
473
+ ## Examples
474
+
475
+ See the [examples](../../example/src/ai) directory for complete working examples.
476
+
477
+ ## Testing
478
+
479
+ ```bash
480
+ npm test
481
+ ```
482
+
483
+ ## Contributing
484
+
485
+ Contributions are welcome! Please read our [Contributing Guide](../../CONTRIBUTING.md) for details.
486
+
487
+ ## License
488
+
489
+ MIT © [HazelJS](https://hazeljs.com)
490
+
491
+ ## Links
492
+
493
+ - [Documentation](https://hazeljs.com/docs/packages/ai)
494
+ - [GitHub](https://github.com/hazel-js/hazeljs)
495
+ - [Issues](https://github.com/hazeljs/hazel-js/issues)
496
+ - [Discord](https://discord.gg/hazeljs)
@@ -0,0 +1,108 @@
1
+ import { IAIProvider, AIProvider, AICompletionRequest, AICompletionResponse, AIStreamChunk, AIEmbeddingRequest, AIEmbeddingResponse, AIModelConfig } from './ai-enhanced.types';
2
+ import { AIContextManager } from './context/context.manager';
3
+ import { TokenTracker } from './tracking/token.tracker';
4
+ import { CacheService } from '@hazeljs/cache';
5
+ /**
6
+ * Enhanced AI Service
7
+ * Production-ready AI service with provider management, caching, and rate limiting
8
+ */
9
+ export declare class AIEnhancedService {
10
+ private providers;
11
+ private defaultProvider;
12
+ private contextManager?;
13
+ private tokenTracker;
14
+ private cacheService?;
15
+ private retryAttempts;
16
+ private retryDelay;
17
+ constructor(tokenTracker?: TokenTracker, cacheService?: CacheService);
18
+ /**
19
+ * Initialize AI providers
20
+ */
21
+ private initializeProviders;
22
+ /**
23
+ * Register a custom provider
24
+ */
25
+ registerProvider(provider: IAIProvider): void;
26
+ /**
27
+ * Set default provider
28
+ */
29
+ setDefaultProvider(provider: AIProvider): void;
30
+ /**
31
+ * Create a context manager for conversation
32
+ */
33
+ createContext(maxTokens?: number): AIContextManager;
34
+ /**
35
+ * Get current context manager
36
+ */
37
+ getContext(): AIContextManager | undefined;
38
+ /**
39
+ * Generate completion with retry logic and caching
40
+ */
41
+ complete(request: AICompletionRequest, config?: {
42
+ provider?: AIProvider;
43
+ userId?: string;
44
+ cacheKey?: string;
45
+ cacheTTL?: number;
46
+ }): Promise<AICompletionResponse>;
47
+ /**
48
+ * Generate streaming completion
49
+ */
50
+ streamComplete(request: AICompletionRequest, config?: {
51
+ provider?: AIProvider;
52
+ userId?: string;
53
+ }): AsyncGenerator<AIStreamChunk>;
54
+ /**
55
+ * Generate embeddings
56
+ */
57
+ embed(request: AIEmbeddingRequest, config?: {
58
+ provider?: AIProvider;
59
+ userId?: string;
60
+ cacheKey?: string;
61
+ cacheTTL?: number;
62
+ }): Promise<AIEmbeddingResponse>;
63
+ /**
64
+ * Check if a provider is available
65
+ */
66
+ isProviderAvailable(provider: AIProvider): Promise<boolean>;
67
+ /**
68
+ * Get list of available providers
69
+ */
70
+ getAvailableProviders(): AIProvider[];
71
+ /**
72
+ * Get token usage statistics
73
+ */
74
+ getTokenStats(userId?: string, days?: number): unknown;
75
+ /**
76
+ * Configure model settings
77
+ */
78
+ configureModel(config: AIModelConfig): void;
79
+ /**
80
+ * Get provider instance
81
+ */
82
+ private getProvider;
83
+ /**
84
+ * Execute function with retry logic
85
+ */
86
+ private executeWithRetry;
87
+ /**
88
+ * Generate cache key for completion request
89
+ */
90
+ private generateCacheKey;
91
+ /**
92
+ * Generate cache key for embedding request
93
+ */
94
+ private generateEmbeddingCacheKey;
95
+ /**
96
+ * Simple string hash function
97
+ */
98
+ private hashString;
99
+ /**
100
+ * Estimate tokens for a request (rough estimation)
101
+ */
102
+ private estimateRequestTokens;
103
+ /**
104
+ * Set retry configuration
105
+ */
106
+ setRetryConfig(attempts: number, delay: number): void;
107
+ }
108
+ //# sourceMappingURL=ai-enhanced.service.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-enhanced.service.d.ts","sourceRoot":"","sources":["../src/ai-enhanced.service.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,WAAW,EACX,UAAU,EACV,mBAAmB,EACnB,oBAAoB,EACpB,aAAa,EACb,kBAAkB,EAClB,mBAAmB,EACnB,aAAa,EACd,MAAM,qBAAqB,CAAC;AAO7B,OAAO,EAAE,gBAAgB,EAAE,MAAM,2BAA2B,CAAC;AAC7D,OAAO,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;AACxD,OAAO,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AAG9C;;;GAGG;AACH,qBACa,iBAAiB;IAC5B,OAAO,CAAC,SAAS,CAA2C;IAC5D,OAAO,CAAC,eAAe,CAAwB;IAC/C,OAAO,CAAC,cAAc,CAAC,CAAmB;IAC1C,OAAO,CAAC,YAAY,CAAe;IACnC,OAAO,CAAC,YAAY,CAAC,CAAe;IACpC,OAAO,CAAC,aAAa,CAAa;IAClC,OAAO,CAAC,UAAU,CAAgB;gBAEtB,YAAY,CAAC,EAAE,YAAY,EAAE,YAAY,CAAC,EAAE,YAAY;IAOpE;;OAEG;IACH,OAAO,CAAC,mBAAmB;IA6C3B;;OAEG;IACH,gBAAgB,CAAC,QAAQ,EAAE,WAAW,GAAG,IAAI;IAK7C;;OAEG;IACH,kBAAkB,CAAC,QAAQ,EAAE,UAAU,GAAG,IAAI;IAQ9C;;OAEG;IACH,aAAa,CAAC,SAAS,CAAC,EAAE,MAAM,GAAG,gBAAgB;IAKnD;;OAEG;IACH,UAAU,IAAI,gBAAgB,GAAG,SAAS;IAI1C;;OAEG;IACG,QAAQ,CACZ,OAAO,EAAE,mBAAmB,EAC5B,MAAM,CAAC,EAAE;QACP,QAAQ,CAAC,EAAE,UAAU,CAAC;QACtB,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,QAAQ,CAAC,EAAE,MAAM,CAAC;KACnB,GACA,OAAO,CAAC,oBAAoB,CAAC;IAgDhC;;OAEG;IACI,cAAc,CACnB,OAAO,EAAE,mBAAmB,EAC5B,MAAM,CAAC,EAAE;QACP,QAAQ,CAAC,EAAE,UAAU,CAAC;QACtB,MAAM,CAAC,EAAE,MAAM,CAAC;KACjB,GACA,cAAc,CAAC,aAAa,CAAC;IAmChC;;OAEG;IACG,KAAK,CACT,OAAO,EAAE,kBAAkB,EAC3B,MAAM,CAAC,EAAE;QACP,QAAQ,CAAC,EAAE,UAAU,CAAC;QACtB,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,QAAQ,CAAC,EAAE,MAAM,CAAC;KACnB,GACA,OAAO,CAAC,mBAAmB,CAAC;IAwC/B;;OAEG;IACG,mBAAmB,CAAC,QAAQ,EAAE,UAAU,GAAG,OAAO,CAAC,OAAO,CAAC;IAQjE;;OAEG;IACH,qBAAqB,IAAI,UAAU,EAAE;IAIrC;;OAEG;IACH,aAAa,CAAC,MAAM,CAAC,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,GAAG,OAAO;IAOtD;;OAEG;IACH,cAAc,CAAC,MAAM,EAAE,aAAa,GAAG,IAAI;IAU3C;;OAEG;IACH,OAAO,CAAC,WAAW;IAWnB;;OAEG;YACW,gBAAgB;IAoB9B;;OAEG;IACH,OAAO,CAAC,gBAAgB;IAUxB;;OAEG;IACH,OAAO,CAAC,yBAAyB;IAQjC;;OAEG;IACH,OAAO,CAAC,UAAU;IAUlB;;OAEG;IACH,OAAO,CAAC,qBAAqB;IAe7B;;OAEG;IACH,cAAc,CAAC,QAAQ,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,GAAG,IAAI;CAKtD"}