expo-ai-kit 0.1.16 → 0.1.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -29,7 +29,9 @@ On-device AI for Expo apps. Run language models locally—no API keys, no cloud,
29
29
  - **Native performance** — Built on Apple Foundation Models (iOS) and Google ML Kit Prompt API (Android)
30
30
  - **Multi-turn conversations** — Full conversation context support
31
31
  - **Streaming support** — Progressive token streaming for responsive UIs
32
- - **Simple API** — Just 3 functions: `isAvailable()`, `sendMessage()`, and `streamMessage()`
32
+ - **Simple API** — Core functions plus prompt helpers for common tasks
33
+ - **Prompt helpers** — Built-in `summarize()`, `translate()`, `rewrite()`, and more
34
+ - **Chat memory** — Built-in `ChatMemoryManager` for managing conversation history
33
35
 
34
36
  ## Requirements
35
37
 
@@ -125,7 +127,38 @@ console.log(response.text);
125
127
 
126
128
  ### Multi-turn Conversations
127
129
 
128
- For conversations with context, pass the full conversation history:
130
+ For conversations with context, use `ChatMemoryManager` to manage history:
131
+
132
+ ```tsx
133
+ import { ChatMemoryManager, streamMessage } from 'expo-ai-kit';
134
+
135
+ // Create a memory manager (handles history automatically)
136
+ const memory = new ChatMemoryManager({
137
+ maxTurns: 10,
138
+ systemPrompt: 'You are a helpful assistant.',
139
+ });
140
+
141
+ // Add user message and get response
142
+ memory.addUserMessage('My name is Alice.');
143
+ const { promise } = streamMessage(
144
+ memory.getAllMessages(),
145
+ (event) => console.log(event.accumulatedText)
146
+ );
147
+ const response = await promise;
148
+
149
+ // Store assistant response in memory
150
+ memory.addAssistantMessage(response.text);
151
+
152
+ // Continue the conversation (memory includes full history)
153
+ memory.addUserMessage('What is my name?');
154
+ const { promise: p2 } = streamMessage(
155
+ memory.getAllMessages(),
156
+ (event) => console.log(event.accumulatedText)
157
+ );
158
+ // Response: "Your name is Alice."
159
+ ```
160
+
161
+ Or manually manage the conversation array:
129
162
 
130
163
  ```tsx
131
164
  import { sendMessage, type LLMMessage } from 'expo-ai-kit';
@@ -172,6 +205,39 @@ const { promise, stop } = streamMessage(
172
205
  await promise;
173
206
  ```
174
207
 
208
+ ### Prompt Helpers
209
+
210
+ Use built-in helpers for common AI tasks without crafting prompts:
211
+
212
+ ```tsx
213
+ import { summarize, translate, rewrite, extractKeyPoints, answerQuestion } from 'expo-ai-kit';
214
+
215
+ // Summarize text
216
+ const summary = await summarize(longArticle, { length: 'short', style: 'bullets' });
217
+
218
+ // Translate text
219
+ const translated = await translate('Hello, world!', { to: 'Spanish' });
220
+
221
+ // Rewrite in a different style
222
+ const formal = await rewrite('hey whats up', { style: 'formal' });
223
+
224
+ // Extract key points
225
+ const points = await extractKeyPoints(article, { maxPoints: 5 });
226
+
227
+ // Answer questions about content
228
+ const answer = await answerQuestion('What is the main topic?', documentText);
229
+ ```
230
+
231
+ All helpers also have streaming variants (`streamSummarize`, `streamTranslate`, etc.):
232
+
233
+ ```tsx
234
+ const { promise, stop } = streamSummarize(
235
+ longArticle,
236
+ (event) => setSummary(event.accumulatedText),
237
+ { style: 'bullets' }
238
+ );
239
+ ```
240
+
175
241
  ### Streaming with Cancel Button
176
242
 
177
243
  ```tsx
@@ -380,6 +446,190 @@ const response = await promise;
380
446
 
381
447
  ---
382
448
 
449
+ ### `summarize(text, options?)`
450
+
451
+ Summarizes text using on-device AI.
452
+
453
+ ```typescript
454
+ function summarize(text: string, options?: LLMSummarizeOptions): Promise<LLMResponse>
455
+ ```
456
+
457
+ | Parameter | Type | Description |
458
+ |-----------|------|-------------|
459
+ | `text` | `string` | Text to summarize |
460
+ | `options.length` | `'short' \| 'medium' \| 'long'` | Summary length (default: `'medium'`) |
461
+ | `options.style` | `'paragraph' \| 'bullets' \| 'tldr'` | Output format (default: `'paragraph'`) |
462
+
463
+ **Streaming:** `streamSummarize(text, onToken, options?)`
464
+
465
+ ---
466
+
467
+ ### `translate(text, options)`
468
+
469
+ Translates text to another language.
470
+
471
+ ```typescript
472
+ function translate(text: string, options: LLMTranslateOptions): Promise<LLMResponse>
473
+ ```
474
+
475
+ | Parameter | Type | Description |
476
+ |-----------|------|-------------|
477
+ | `text` | `string` | Text to translate |
478
+ | `options.to` | `string` | Target language (required) |
479
+ | `options.from` | `string` | Source language (auto-detected if omitted) |
480
+ | `options.tone` | `'formal' \| 'informal' \| 'neutral'` | Translation tone (default: `'neutral'`) |
481
+
482
+ **Streaming:** `streamTranslate(text, onToken, options)`
483
+
484
+ ---
485
+
486
+ ### `rewrite(text, options)`
487
+
488
+ Rewrites text in a different style.
489
+
490
+ ```typescript
491
+ function rewrite(text: string, options: LLMRewriteOptions): Promise<LLMResponse>
492
+ ```
493
+
494
+ | Parameter | Type | Description |
495
+ |-----------|------|-------------|
496
+ | `text` | `string` | Text to rewrite |
497
+ | `options.style` | `string` | Target style (required) |
498
+
499
+ **Available styles:** `'formal'`, `'casual'`, `'professional'`, `'friendly'`, `'concise'`, `'detailed'`, `'simple'`, `'academic'`
500
+
501
+ **Streaming:** `streamRewrite(text, onToken, options)`
502
+
503
+ ---
504
+
505
+ ### `extractKeyPoints(text, options?)`
506
+
507
+ Extracts key points from text as bullet points.
508
+
509
+ ```typescript
510
+ function extractKeyPoints(text: string, options?: LLMExtractKeyPointsOptions): Promise<LLMResponse>
511
+ ```
512
+
513
+ | Parameter | Type | Description |
514
+ |-----------|------|-------------|
515
+ | `text` | `string` | Text to analyze |
516
+ | `options.maxPoints` | `number` | Maximum points to extract (default: `5`) |
517
+
518
+ **Streaming:** `streamExtractKeyPoints(text, onToken, options?)`
519
+
520
+ ---
521
+
522
+ ### `answerQuestion(question, context, options?)`
523
+
524
+ Answers a question based on provided context.
525
+
526
+ ```typescript
527
+ function answerQuestion(question: string, context: string, options?: LLMAnswerQuestionOptions): Promise<LLMResponse>
528
+ ```
529
+
530
+ | Parameter | Type | Description |
531
+ |-----------|------|-------------|
532
+ | `question` | `string` | Question to answer |
533
+ | `context` | `string` | Context/document to base answer on |
534
+ | `options.detail` | `'brief' \| 'medium' \| 'detailed'` | Answer detail level (default: `'medium'`) |
535
+
536
+ **Streaming:** `streamAnswerQuestion(question, context, onToken, options?)`
537
+
538
+ ---
539
+
540
+ ### `ChatMemoryManager`
541
+
542
+ Manages conversation history for stateless on-device AI models. Automatically handles turn limits and provides the full message array for each request.
543
+
544
+ ```typescript
545
+ class ChatMemoryManager {
546
+ constructor(options?: ChatMemoryOptions);
547
+
548
+ addUserMessage(content: string): void;
549
+ addAssistantMessage(content: string): void;
550
+ addMessage(message: LLMMessage): void;
551
+
552
+ getAllMessages(): LLMMessage[];
553
+ getMessages(): LLMMessage[];
554
+ getPrompt(): string;
555
+ getSnapshot(): ChatMemorySnapshot;
556
+ getTurnCount(): number;
557
+
558
+ setSystemPrompt(prompt: string | undefined): void;
559
+ getSystemPrompt(): string | undefined;
560
+ setMaxTurns(maxTurns: number): void;
561
+
562
+ clear(): void;
563
+ reset(): void;
564
+ }
565
+ ```
566
+
567
+ | Option | Type | Description |
568
+ |--------|------|-------------|
569
+ | `maxTurns` | `number` | Maximum conversation turns to keep (default: `10`) |
570
+ | `systemPrompt` | `string` | System prompt to include in every request |
571
+
572
+ **Why use ChatMemoryManager?**
573
+
574
+ On-device models are stateless — they have no built-in memory. Each request must include the full conversation history. `ChatMemoryManager` handles this automatically:
575
+
576
+ - Stores messages client-side
577
+ - Automatically trims old messages when limit is reached
578
+ - Preserves the system prompt (never trimmed)
579
+ - Provides `getAllMessages()` for API calls
580
+
581
+ **Example with React:**
582
+
583
+ ```tsx
584
+ import { useRef } from 'react';
585
+ import { ChatMemoryManager, streamMessage } from 'expo-ai-kit';
586
+
587
+ function Chat() {
588
+ const memoryRef = useRef(new ChatMemoryManager({
589
+ maxTurns: 10,
590
+ systemPrompt: 'You are a helpful assistant.',
591
+ }));
592
+
593
+ const sendMessage = async (text: string) => {
594
+ memoryRef.current.addUserMessage(text);
595
+
596
+ const { promise } = streamMessage(
597
+ memoryRef.current.getAllMessages(),
598
+ (event) => setResponse(event.accumulatedText)
599
+ );
600
+
601
+ const response = await promise;
602
+ memoryRef.current.addAssistantMessage(response.text);
603
+ };
604
+
605
+ const clearChat = () => memoryRef.current.clear();
606
+ }
607
+ ```
608
+
609
+ ---
610
+
611
+ ### `buildPrompt(messages)`
612
+
613
+ Converts a message array to a single prompt string. Useful for debugging or custom implementations.
614
+
615
+ ```typescript
616
+ function buildPrompt(messages: LLMMessage[]): string
617
+ ```
618
+
619
+ **Example:**
620
+ ```tsx
621
+ import { buildPrompt } from 'expo-ai-kit';
622
+
623
+ const prompt = buildPrompt([
624
+ { role: 'system', content: 'You are helpful.' },
625
+ { role: 'user', content: 'Hi!' },
626
+ { role: 'assistant', content: 'Hello!' },
627
+ ]);
628
+ // "SYSTEM: You are helpful.\nUSER: Hi!\nASSISTANT: Hello!"
629
+ ```
630
+
631
+ ---
632
+
383
633
  ### Types
384
634
 
385
635
  ```typescript
@@ -417,6 +667,45 @@ type LLMStreamEvent = {
417
667
  };
418
668
 
419
669
  type LLMStreamCallback = (event: LLMStreamEvent) => void;
670
+
671
+ // Prompt Helper Types
672
+ type LLMSummarizeOptions = {
673
+ length?: 'short' | 'medium' | 'long';
674
+ style?: 'paragraph' | 'bullets' | 'tldr';
675
+ };
676
+
677
+ type LLMTranslateOptions = {
678
+ to: string;
679
+ from?: string;
680
+ tone?: 'formal' | 'informal' | 'neutral';
681
+ };
682
+
683
+ type LLMRewriteOptions = {
684
+ style: 'formal' | 'casual' | 'professional' | 'friendly' | 'concise' | 'detailed' | 'simple' | 'academic';
685
+ };
686
+
687
+ type LLMExtractKeyPointsOptions = {
688
+ maxPoints?: number;
689
+ };
690
+
691
+ type LLMAnswerQuestionOptions = {
692
+ detail?: 'brief' | 'medium' | 'detailed';
693
+ };
694
+
695
+ // Chat Memory Types
696
+ type ChatMemoryOptions = {
697
+ /** Maximum conversation turns to keep (default: 10) */
698
+ maxTurns?: number;
699
+ /** System prompt to include in every request */
700
+ systemPrompt?: string;
701
+ };
702
+
703
+ type ChatMemorySnapshot = {
704
+ messages: LLMMessage[];
705
+ systemPrompt: string | undefined;
706
+ turnCount: number;
707
+ maxTurns: number;
708
+ };
420
709
  ```
421
710
 
422
711
  ## Feature Comparison
@@ -426,6 +715,8 @@ type LLMStreamCallback = (event: LLMStreamEvent) => void;
426
715
  | `isAvailable()` | ✅ | ✅ |
427
716
  | `sendMessage()` | ✅ | ✅ |
428
717
  | `streamMessage()` | ✅ | ✅ |
718
+ | Prompt helpers | ✅ | ✅ |
719
+ | `ChatMemoryManager` | ✅ | ✅ |
429
720
  | System prompts | ✅ Native | ✅ Prepended |
430
721
  | Multi-turn context | ✅ | ✅ |
431
722
  | Cancel streaming | ✅ | ✅ |
@@ -476,7 +767,8 @@ const { text } = await sendMessage(messages, { systemPrompt: '...' });
476
767
  | Feature | Status | Priority |
477
768
  |---------|--------|----------|
478
769
  | ✅ Streaming responses | Done | - |
479
- | Prompt helpers (summarize, translate, etc.) | Planned | Medium |
770
+ | Prompt helpers (summarize, translate, etc.) | Done | - |
771
+ | ✅ Chat memory management | Done | - |
480
772
  | Web/generic fallback | Idea | Medium |
481
773
  | Configurable hyperparameters (temperature, etc.) | Idea | Low |
482
774
 
package/build/index.d.ts CHANGED
@@ -1,5 +1,6 @@
1
- import { LLMMessage, LLMSendOptions, LLMResponse, LLMStreamOptions, LLMStreamCallback } from './types';
1
+ import { LLMMessage, LLMSendOptions, LLMResponse, LLMStreamOptions, LLMStreamCallback, LLMSummarizeOptions, LLMTranslateOptions, LLMRewriteOptions, LLMExtractKeyPointsOptions, LLMAnswerQuestionOptions } from './types';
2
2
  export * from './types';
3
+ export * from './memory';
3
4
  /**
4
5
  * Check if on-device AI is available on the current device.
5
6
  * Returns false on unsupported platforms (web, etc.).
@@ -89,4 +90,247 @@ export declare function streamMessage(messages: LLMMessage[], onToken: LLMStream
89
90
  promise: Promise<LLMResponse>;
90
91
  stop: () => void;
91
92
  };
93
+ /**
94
+ * Summarize text content using on-device AI.
95
+ *
96
+ * @param text - The text to summarize
97
+ * @param options - Optional settings for summary style and length
98
+ * @returns Promise with the generated summary
99
+ *
100
+ * @example
101
+ * ```ts
102
+ * // Basic summarization
103
+ * const result = await summarize(longArticle);
104
+ * console.log(result.text);
105
+ * ```
106
+ *
107
+ * @example
108
+ * ```ts
109
+ * // Short bullet-point summary
110
+ * const result = await summarize(longArticle, {
111
+ * length: 'short',
112
+ * style: 'bullets'
113
+ * });
114
+ * ```
115
+ *
116
+ * @example
117
+ * ```ts
118
+ * // TL;DR style
119
+ * const result = await summarize(longArticle, {
120
+ * style: 'tldr'
121
+ * });
122
+ * ```
123
+ */
124
+ export declare function summarize(text: string, options?: LLMSummarizeOptions): Promise<LLMResponse>;
125
+ /**
126
+ * Summarize text with streaming output.
127
+ *
128
+ * @param text - The text to summarize
129
+ * @param onToken - Callback for each token received
130
+ * @param options - Optional settings for summary style and length
131
+ * @returns Object with stop() function and promise
132
+ *
133
+ * @example
134
+ * ```ts
135
+ * const { promise } = streamSummarize(
136
+ * longArticle,
137
+ * (event) => setSummary(event.accumulatedText),
138
+ * { style: 'bullets' }
139
+ * );
140
+ * await promise;
141
+ * ```
142
+ */
143
+ export declare function streamSummarize(text: string, onToken: LLMStreamCallback, options?: LLMSummarizeOptions): {
144
+ promise: Promise<LLMResponse>;
145
+ stop: () => void;
146
+ };
147
+ /**
148
+ * Translate text to another language using on-device AI.
149
+ *
150
+ * @param text - The text to translate
151
+ * @param options - Translation options including target language
152
+ * @returns Promise with the translated text
153
+ *
154
+ * @example
155
+ * ```ts
156
+ * // Basic translation
157
+ * const result = await translate('Hello, world!', { to: 'Spanish' });
158
+ * console.log(result.text); // "¡Hola, mundo!"
159
+ * ```
160
+ *
161
+ * @example
162
+ * ```ts
163
+ * // Formal translation with source language
164
+ * const result = await translate('Hey, what\'s up?', {
165
+ * to: 'French',
166
+ * from: 'English',
167
+ * tone: 'formal'
168
+ * });
169
+ * ```
170
+ */
171
+ export declare function translate(text: string, options: LLMTranslateOptions): Promise<LLMResponse>;
172
+ /**
173
+ * Translate text with streaming output.
174
+ *
175
+ * @param text - The text to translate
176
+ * @param onToken - Callback for each token received
177
+ * @param options - Translation options including target language
178
+ * @returns Object with stop() function and promise
179
+ *
180
+ * @example
181
+ * ```ts
182
+ * const { promise } = streamTranslate(
183
+ * 'Hello, world!',
184
+ * (event) => setTranslation(event.accumulatedText),
185
+ * { to: 'Japanese' }
186
+ * );
187
+ * await promise;
188
+ * ```
189
+ */
190
+ export declare function streamTranslate(text: string, onToken: LLMStreamCallback, options: LLMTranslateOptions): {
191
+ promise: Promise<LLMResponse>;
192
+ stop: () => void;
193
+ };
194
+ /**
195
+ * Rewrite text in a different style using on-device AI.
196
+ *
197
+ * @param text - The text to rewrite
198
+ * @param options - Rewrite options specifying the target style
199
+ * @returns Promise with the rewritten text
200
+ *
201
+ * @example
202
+ * ```ts
203
+ * // Make text more formal
204
+ * const result = await rewrite('hey can u help me out?', {
205
+ * style: 'formal'
206
+ * });
207
+ * console.log(result.text); // "Would you be able to assist me?"
208
+ * ```
209
+ *
210
+ * @example
211
+ * ```ts
212
+ * // Simplify complex text
213
+ * const result = await rewrite(technicalText, { style: 'simple' });
214
+ * ```
215
+ */
216
+ export declare function rewrite(text: string, options: LLMRewriteOptions): Promise<LLMResponse>;
217
+ /**
218
+ * Rewrite text with streaming output.
219
+ *
220
+ * @param text - The text to rewrite
221
+ * @param onToken - Callback for each token received
222
+ * @param options - Rewrite options specifying the target style
223
+ * @returns Object with stop() function and promise
224
+ *
225
+ * @example
226
+ * ```ts
227
+ * const { promise } = streamRewrite(
228
+ * 'hey whats up',
229
+ * (event) => setRewritten(event.accumulatedText),
230
+ * { style: 'professional' }
231
+ * );
232
+ * await promise;
233
+ * ```
234
+ */
235
+ export declare function streamRewrite(text: string, onToken: LLMStreamCallback, options: LLMRewriteOptions): {
236
+ promise: Promise<LLMResponse>;
237
+ stop: () => void;
238
+ };
239
+ /**
240
+ * Extract key points from text using on-device AI.
241
+ *
242
+ * @param text - The text to extract key points from
243
+ * @param options - Optional settings for extraction
244
+ * @returns Promise with the key points as text
245
+ *
246
+ * @example
247
+ * ```ts
248
+ * // Extract key points from an article
249
+ * const result = await extractKeyPoints(article);
250
+ * console.log(result.text);
251
+ * // "• Point 1\n• Point 2\n• Point 3"
252
+ * ```
253
+ *
254
+ * @example
255
+ * ```ts
256
+ * // Limit to 3 key points
257
+ * const result = await extractKeyPoints(article, { maxPoints: 3 });
258
+ * ```
259
+ */
260
+ export declare function extractKeyPoints(text: string, options?: LLMExtractKeyPointsOptions): Promise<LLMResponse>;
261
+ /**
262
+ * Extract key points with streaming output.
263
+ *
264
+ * @param text - The text to extract key points from
265
+ * @param onToken - Callback for each token received
266
+ * @param options - Optional settings for extraction
267
+ * @returns Object with stop() function and promise
268
+ *
269
+ * @example
270
+ * ```ts
271
+ * const { promise } = streamExtractKeyPoints(
272
+ * article,
273
+ * (event) => setKeyPoints(event.accumulatedText),
274
+ * { maxPoints: 5 }
275
+ * );
276
+ * await promise;
277
+ * ```
278
+ */
279
+ export declare function streamExtractKeyPoints(text: string, onToken: LLMStreamCallback, options?: LLMExtractKeyPointsOptions): {
280
+ promise: Promise<LLMResponse>;
281
+ stop: () => void;
282
+ };
283
+ /**
284
+ * Answer a question based on provided context using on-device AI.
285
+ *
286
+ * @param question - The question to answer
287
+ * @param context - The context/document to base the answer on
288
+ * @param options - Optional settings for the answer
289
+ * @returns Promise with the answer
290
+ *
291
+ * @example
292
+ * ```ts
293
+ * // Answer a question about a document
294
+ * const result = await answerQuestion(
295
+ * 'What is the main topic?',
296
+ * documentText
297
+ * );
298
+ * console.log(result.text);
299
+ * ```
300
+ *
301
+ * @example
302
+ * ```ts
303
+ * // Get a detailed answer
304
+ * const result = await answerQuestion(
305
+ * 'Explain the methodology',
306
+ * researchPaper,
307
+ * { detail: 'detailed' }
308
+ * );
309
+ * ```
310
+ */
311
+ export declare function answerQuestion(question: string, context: string, options?: LLMAnswerQuestionOptions): Promise<LLMResponse>;
312
+ /**
313
+ * Answer a question with streaming output.
314
+ *
315
+ * @param question - The question to answer
316
+ * @param context - The context/document to base the answer on
317
+ * @param onToken - Callback for each token received
318
+ * @param options - Optional settings for the answer
319
+ * @returns Object with stop() function and promise
320
+ *
321
+ * @example
322
+ * ```ts
323
+ * const { promise } = streamAnswerQuestion(
324
+ * 'What are the key findings?',
325
+ * documentText,
326
+ * (event) => setAnswer(event.accumulatedText),
327
+ * { detail: 'detailed' }
328
+ * );
329
+ * await promise;
330
+ * ```
331
+ */
332
+ export declare function streamAnswerQuestion(question: string, context: string, onToken: LLMStreamCallback, options?: LLMAnswerQuestionOptions): {
333
+ promise: Promise<LLMResponse>;
334
+ stop: () => void;
335
+ };
92
336
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAEA,OAAO,EACL,UAAU,EACV,cAAc,EACd,WAAW,EACX,gBAAgB,EAEhB,iBAAiB,EAClB,MAAM,SAAS,CAAC;AAEjB,cAAc,SAAS,CAAC;AAUxB;;;GAGG;AACH,wBAAsB,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC,CAKpD;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAkCG;AACH,wBAAsB,WAAW,CAC/B,QAAQ,EAAE,UAAU,EAAE,EACtB,OAAO,CAAC,EAAE,cAAc,GACvB,OAAO,CAAC,WAAW,CAAC,CAgBtB;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2CG;AACH,wBAAgB,aAAa,CAC3B,QAAQ,EAAE,UAAU,EAAE,EACtB,OAAO,EAAE,iBAAiB,EAC1B,OAAO,CAAC,EAAE,gBAAgB,GACzB;IAAE,OAAO,EAAE,OAAO,CAAC,WAAW,CAAC,CAAC;IAAC,IAAI,EAAE,MAAM,IAAI,CAAA;CAAE,CAiErD"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAEA,OAAO,EACL,UAAU,EACV,cAAc,EACd,WAAW,EACX,gBAAgB,EAEhB,iBAAiB,EACjB,mBAAmB,EACnB,mBAAmB,EACnB,iBAAiB,EACjB,0BAA0B,EAC1B,wBAAwB,EACzB,MAAM,SAAS,CAAC;AAEjB,cAAc,SAAS,CAAC;AACxB,cAAc,UAAU,CAAC;AAkGzB;;;GAGG;AACH,wBAAsB,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC,CAKpD;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAkCG;AACH,wBAAsB,WAAW,CAC/B,QAAQ,EAAE,UAAU,EAAE,EACtB,OAAO,CAAC,EAAE,cAAc,GACvB,OAAO,CAAC,WAAW,CAAC,CAgBtB;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2CG;AACH,wBAAgB,aAAa,CAC3B,QAAQ,EAAE,UAAU,EAAE,EACtB,OAAO,EAAE,iBAAiB,EAC1B,OAAO,CAAC,EAAE,gBAAgB,GACzB;IAAE,OAAO,EAAE,OAAO,CAAC,WAAW,CAAC,CAAC;IAAC,IAAI,EAAE,MAAM,IAAI,CAAA;CAAE,CAiErD;AAMD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,wBAAsB,SAAS,CAC7B,IAAI,EAAE,MAAM,EACZ,OAAO,CAAC,EAAE,mBAAmB,GAC5B,OAAO,CAAC,WAAW,CAAC,CActB;AAED;;;;;;;;;;;;;;;;;GAiBG;AACH,wBAAgB,eAAe,CAC7B,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,iBAAiB,EAC1B,OAAO,CAAC,EAAE,mBAAmB,GAC5B;IAAE,OAAO,EAAE,OAAO,CAAC,WAAW,CAAC,CAAC;IAAC,IAAI,EAAE,MAAM,IAAI,CAAA;CAAE,CAmBrD;AAED;;;;;;;;;;;;;;;;;;;;;;;GAuBG;AACH,wBAAsB,SAAS,CAC7B,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,mBAAmB,GAC3B,OAAO,CAAC,WAAW,CAAC,CAatB;AAED;;;;;;;;;;;;;;;;;GAiBG;AACH,wBAAgB,eAAe,CAC7B,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,iBAAiB,EAC1B,OAAO,EAAE,mBAAmB,GAC3B;IAAE,OAAO,EAAE,OAAO,CAAC,WAAW,CAAC,CAAC;IAAC,IAAI,EAAE,MAAM,IAAI,CAAA;CAAE,CAkBrD;AAED;;;;;;;;;;;;;;;;;;;;;GAqBG;AACH,wBAAsB,OAAO,CAC3B,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,iBAAiB,GACzB,OAAO,CAAC,WAAW,CAAC,CAatB;AAED;;;;;;;;;;;;;;;;;GAiBG;AACH,wBAAgB,aAAa,CAC3B,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,iBAAiB,EAC1B,OAAO,EAAE,iBAAiB,GACzB;IAAE,OAAO,EAAE,OAAO,CAAC,WAAW,CAAC,CAAC;IAAC,IAAI,EAAE,MAAM,IAAI,CAAA;CAAE,CAkBrD;AAED;;;;;;;;;;;;;;;;;;;;GAoBG;AACH,wBAAsB,gBAAgB,CACpC,IAAI,EAAE,MAAM,EACZ,OAAO,CAAC,EAAE,0BAA0B,GACnC,OAAO,CAAC,WAAW,CAAC,CAatB;AAED;;;;;;;;;;;;;;;;;GAiBG;AACH,wBAAgB,sBAAsB,CACpC,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,iBAAiB,EAC1B,OAAO,CAAC,EAAE,0BAA0B,GACnC;IAAE,OAAO,EAAE,OAAO,CAAC,WAAW,CAAC,CAAC;IAAC,IAAI,EAAE,MAAM,IAAI,CAAA;CAAE,CAkBrD;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2BG;AACH,wBAAsB,cAAc,CAClC,QAAQ,EAAE,MAAM,EAChB,OAAO,EAAE,MAAM,EACf,OAAO,CAAC,EAAE,wBAAwB,GACjC,OAAO,CAAC,WAAW,CAAC,CAkBtB;AAED;;;;;;;;;;;;;;;;;;;GAmBG;AACH,wBAAgB,oBAAoB,CAClC,QAAQ,EAAE,MAAM,EAChB,OAAO,EAAE,MAAM,EACf,OAAO,EAAE,iBAAiB,EAC1B,OAAO,CAAC,EAAE,wBAAwB,GACjC;IAAE,OAAO,EAAE,OAAO,CAAC,WAAW,CAAC,CAAC;IAAC,IAAI,EAAE,MAAM,IAAI,CAAA;CAAE,CA0BrD"}