@ank1015/providers 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/README.md +453 -0
  2. package/biome.json +43 -0
  3. package/dist/agent/agent-loop.d.ts +5 -0
  4. package/dist/agent/agent-loop.d.ts.map +1 -0
  5. package/dist/agent/agent-loop.js +219 -0
  6. package/dist/agent/agent-loop.js.map +1 -0
  7. package/dist/agent/types.d.ts +67 -0
  8. package/dist/agent/types.d.ts.map +1 -0
  9. package/dist/agent/types.js +3 -0
  10. package/dist/agent/types.js.map +1 -0
  11. package/dist/index.d.ts +10 -0
  12. package/dist/index.d.ts.map +1 -0
  13. package/dist/index.js +29 -0
  14. package/dist/index.js.map +1 -0
  15. package/dist/models.d.ts +3 -0
  16. package/dist/models.d.ts.map +1 -0
  17. package/dist/models.generated.d.ts +247 -0
  18. package/dist/models.generated.d.ts.map +1 -0
  19. package/dist/models.generated.js +315 -0
  20. package/dist/models.generated.js.map +1 -0
  21. package/dist/models.js +41 -0
  22. package/dist/models.js.map +1 -0
  23. package/dist/providers/convert.d.ts +6 -0
  24. package/dist/providers/convert.d.ts.map +1 -0
  25. package/dist/providers/convert.js +207 -0
  26. package/dist/providers/convert.js.map +1 -0
  27. package/dist/providers/google.d.ts +26 -0
  28. package/dist/providers/google.d.ts.map +1 -0
  29. package/dist/providers/google.js +434 -0
  30. package/dist/providers/google.js.map +1 -0
  31. package/dist/providers/openai.d.ts +17 -0
  32. package/dist/providers/openai.d.ts.map +1 -0
  33. package/dist/providers/openai.js +396 -0
  34. package/dist/providers/openai.js.map +1 -0
  35. package/dist/stream.d.ts +4 -0
  36. package/dist/stream.d.ts.map +1 -0
  37. package/dist/stream.js +40 -0
  38. package/dist/stream.js.map +1 -0
  39. package/dist/test-google-agent-loop.d.ts +2 -0
  40. package/dist/test-google-agent-loop.d.ts.map +1 -0
  41. package/dist/test-google-agent-loop.js +186 -0
  42. package/dist/test-google-agent-loop.js.map +1 -0
  43. package/dist/test-google.d.ts +2 -0
  44. package/dist/test-google.d.ts.map +1 -0
  45. package/dist/test-google.js +41 -0
  46. package/dist/test-google.js.map +1 -0
  47. package/dist/types.d.ts +187 -0
  48. package/dist/types.d.ts.map +1 -0
  49. package/dist/types.js +10 -0
  50. package/dist/types.js.map +1 -0
  51. package/dist/utils/event-stream.d.ts +16 -0
  52. package/dist/utils/event-stream.d.ts.map +1 -0
  53. package/dist/utils/event-stream.js +61 -0
  54. package/dist/utils/event-stream.js.map +1 -0
  55. package/dist/utils/json-parse.d.ts +9 -0
  56. package/dist/utils/json-parse.d.ts.map +1 -0
  57. package/dist/utils/json-parse.js +32 -0
  58. package/dist/utils/json-parse.js.map +1 -0
  59. package/dist/utils/sanitize-unicode.d.ts +22 -0
  60. package/dist/utils/sanitize-unicode.d.ts.map +1 -0
  61. package/dist/utils/sanitize-unicode.js +29 -0
  62. package/dist/utils/sanitize-unicode.js.map +1 -0
  63. package/dist/utils/validation.d.ts +11 -0
  64. package/dist/utils/validation.d.ts.map +1 -0
  65. package/dist/utils/validation.js +61 -0
  66. package/dist/utils/validation.js.map +1 -0
  67. package/package.json +33 -0
  68. package/src/agent/agent-loop.ts +275 -0
  69. package/src/agent/types.ts +80 -0
  70. package/src/index.ts +72 -0
  71. package/src/models.generated.ts +314 -0
  72. package/src/models.ts +45 -0
  73. package/src/providers/convert.ts +222 -0
  74. package/src/providers/google.ts +496 -0
  75. package/src/providers/openai.ts +437 -0
  76. package/src/stream.ts +60 -0
  77. package/src/types.ts +198 -0
  78. package/src/utils/event-stream.ts +60 -0
  79. package/src/utils/json-parse.ts +28 -0
  80. package/src/utils/sanitize-unicode.ts +25 -0
  81. package/src/utils/validation.ts +69 -0
  82. package/test/core/agent-loop.test.ts +958 -0
  83. package/test/core/stream.test.ts +409 -0
  84. package/test/data/red-circle.png +0 -0
  85. package/test/data/superintelligentwill.pdf +0 -0
  86. package/test/edge-cases/general.test.ts +565 -0
  87. package/test/integration/e2e.test.ts +530 -0
  88. package/test/models/cost.test.ts +499 -0
  89. package/test/models/registry.test.ts +298 -0
  90. package/test/providers/convert.test.ts +846 -0
  91. package/test/providers/google-schema.test.ts +666 -0
  92. package/test/providers/google-stream.test.ts +369 -0
  93. package/test/providers/openai-stream.test.ts +251 -0
  94. package/test/utils/event-stream.test.ts +289 -0
  95. package/test/utils/json-parse.test.ts +344 -0
  96. package/test/utils/sanitize-unicode.test.ts +329 -0
  97. package/test/utils/validation.test.ts +614 -0
  98. package/tsconfig.json +21 -0
  99. package/vitest.config.ts +9 -0
package/README.md ADDED
@@ -0,0 +1,453 @@
1
+ # @ank1015/providers
2
+
3
+ A provider abstraction layer for building agentic systems with multiple LLM providers. Built with a philosophy that **harnesses should be model-specific** while maintaining the ability to test and compose different models together.
4
+
5
+ This documentation is written by claude. Most of the coding patterns are taken and inspired from [PI-mono](https://github.com/badlogic/pi-mono/tree/main)
6
+
7
+ ## Philosophy
8
+
9
+ This library is designed around a key insight: **effective AI systems should be built for specific models**, following their unique best practices and capabilities. However, practical development requires:
10
+
11
+ 1. **Testing flexibility**: Try different models during development
12
+ 2. **Multi-model systems**: Compose systems where different models work together
13
+ 3. **Forking capability**: Convert state between providers when needed
14
+
15
+ We achieve this by:
16
+ - Storing **user messages** and **tool results** in a standardized format (can be built for any provider)
17
+ - Storing **assistant messages** in their **native provider format** (preserving caching, thinking traces, and internal state)
18
+ - Providing **standardized streaming** for UI display without losing provider-specific data
19
+
20
+ **This is NOT** about switching models mid-session (which loses critical state). It's about building provider-specific implementations while avoiding vendor lock-in during development. But the library still allows hand-offs mid conversation using conversion function that transform one provider messages to another.
21
+
22
+ ## Features
23
+
24
+ - **🎯 Provider-Specific Implementations**: Each provider follows its own best practices
25
+ - **🔄 Unified Streaming API**: Standardized event stream across all providers
26
+ - **🤖 Agent Loop**: Multi-turn conversations with automatic tool execution
27
+ - **🛠️ Type-Safe Tools**: TypeBox-powered JSON Schema validation
28
+ - **💰 Cost Tracking**: Automatic token usage and cost calculation
29
+ - **⚡ Real-Time Streaming**: 21 granular event types for UI updates
30
+ - **🎨 Extended Thinking**: Support for reasoning/thinking modes
31
+
32
+ ## Supported Providers
33
+
34
+ - **OpenAI**: GPT-5 series (Codex, Mini, Pro) with prompt caching and reasoning
35
+ - **Google**: Gemini 2.5 Flash, Gemini 3 Pro with extended thinking
36
+
37
+ ## Installation
38
+
39
+ ```bash
40
+ npm install @ank1015/providers
41
+ ```
42
+
43
+ Set up your API keys:
44
+ ```bash
45
+ export OPENAI_API_KEY="your-key"
46
+ export GEMINI_API_KEY="your-key"
47
+ ```
48
+
49
+ ## Quick Start
50
+
51
+ ### Basic Streaming
52
+
53
+ ```typescript
54
+ import { stream, MODELS } from "@ank1015/providers";
55
+
56
+ const context = {
57
+ systemPrompt: "You are a helpful assistant.",
58
+ messages: [
59
+ {
60
+ role: "user",
61
+ content: [{ type: "text", content: "Hello!" }],
62
+ timestamp: Date.now(),
63
+ },
64
+ ],
65
+ };
66
+
67
+ const response = stream(MODELS.OPENAI_GPT5_MINI, context);
68
+
69
+ for await (const event of response) {
70
+ if (event.type === "text_delta") {
71
+ process.stdout.write(event.delta);
72
+ }
73
+ }
74
+
75
+ // Get the final native message (preserves provider-specific state)
76
+ const nativeMessage = await response.result();
77
+ ```
78
+
79
+ ### Agent Loop with Tools
80
+
81
+ ```typescript
82
+ import { agentLoop, defineTool, MODELS } from "@ank1015/providers";
83
+ import { Type } from "@sinclair/typebox";
84
+
85
+ // Define tools with type-safe schemas
86
+ const tools = [
87
+ defineTool({
88
+ name: "calculator",
89
+ description: "Perform mathematical calculations",
90
+ parameters: Type.Object({
91
+ expression: Type.String({ description: "Math expression to evaluate" }),
92
+ }),
93
+ }),
94
+ ] as const;
95
+
96
+ // Create agent tools with execution logic
97
+ const agentTools = tools.map((tool) => ({
98
+ ...tool,
99
+ label: "Calculator",
100
+ async execute(toolCallId, params, signal) {
101
+ const result = eval(params.expression); // Use a safe eval in production!
102
+ return {
103
+ content: [{ type: "text", content: `Result: ${result}` }],
104
+ details: { result },
105
+ };
106
+ },
107
+ }));
108
+
109
+ const context = {
110
+ systemPrompt: "You are a helpful assistant with access to a calculator.",
111
+ messages: [],
112
+ tools: agentTools,
113
+ };
114
+
115
+ const prompt = {
116
+ role: "user" as const,
117
+ content: [{ type: "text" as const, content: "What is 156 * 234?" }],
118
+ timestamp: Date.now(),
119
+ };
120
+
121
+ const config = {
122
+ model: MODELS.OPENAI_GPT5_MINI,
123
+ providerOptions: {},
124
+ };
125
+
126
+ const eventStream = agentLoop(prompt, context, config);
127
+
128
+ for await (const event of eventStream) {
129
+ switch (event.type) {
130
+ case "message_update":
131
+ // Handle streaming assistant message
132
+ console.log("Assistant:", event.message);
133
+ break;
134
+ case "tool_execution_start":
135
+ console.log(`Executing tool: ${event.toolName}`);
136
+ break;
137
+ case "tool_execution_end":
138
+ console.log(`Result:`, event.result);
139
+ break;
140
+ case "agent_end":
141
+ console.log("Agent completed with status:", event.status);
142
+ break;
143
+ }
144
+ }
145
+
146
+ // Get all messages for conversation history
147
+ const allMessages = await eventStream.result();
148
+ ```
149
+
150
+ ### Working with Different Providers
151
+
152
+ ```typescript
153
+ import { stream, MODELS } from "@ank1015/providers";
154
+
155
+ // OpenAI with reasoning
156
+ const openaiResponse = stream(MODELS.OPENAI_GPT5_MINI, context, {
157
+ reasoning: {
158
+ effort: "medium",
159
+ summaryStyle: "concise",
160
+ }
161
+ });
162
+
163
+ // Google with thinking
164
+ const googleResponse = stream(MODELS.GOOGLE_GEMINI_2_5_FLASH, context, {
165
+ thinkingConfig: { extendedThinking: { level: "EXTENDED_THINKING_THINK_MODE" } },
166
+ temperature: 0.7,
167
+ });
168
+
169
+ // Both return the same standardized stream format
170
+ for await (const event of openaiResponse) {
171
+ // Handle events the same way regardless of provider
172
+ }
173
+ ```
174
+
175
+ ### Cost Tracking
176
+
177
+ ```typescript
178
+ for await (const event of response) {
179
+ if (event.type === "done") {
180
+ const { usage } = event.message;
181
+ console.log(`Tokens: ${usage.totalTokens}`);
182
+ console.log(`Cost: $${usage.cost.total.toFixed(4)}`);
183
+ console.log(`Input: ${usage.input} ($${usage.cost.input.toFixed(4)})`);
184
+ console.log(`Output: ${usage.output} ($${usage.cost.output.toFixed(4)})`);
185
+ if (usage.cacheRead > 0) {
186
+ console.log(`Cache Read: ${usage.cacheRead} ($${usage.cost.cacheRead.toFixed(4)})`);
187
+ }
188
+ }
189
+ }
190
+ ```
191
+
192
+ ## Architecture
193
+
194
+ ### Message Storage Strategy
195
+
196
+ ```
197
+ User Message ────────> Assistant Message ────────> Tool Result
198
+ Standardized Native Provider Standardized
199
+ ✓ Can rebuild ✗ Store as-is ✓ Can rebuild
200
+ ```
201
+
202
+ - **User Messages** & **Tool Results**: Stored in standardized format, can be converted to any provider
203
+ - **Assistant Messages**: Stored in native provider format to preserve:
204
+ - Prompt caching state
205
+ - Thinking traces
206
+ - Internal provider state
207
+ - Response metadata
208
+
209
+ ### Streaming Events
210
+
211
+ The library provides 21 event types for granular control:
212
+
213
+ ```typescript
214
+ type AssistantMessageEvent =
215
+ | { type: "start"; partial: AssistantMessage }
216
+ | { type: "text_start"; contentIndex: number; partial: AssistantMessage }
217
+ | { type: "text_delta"; contentIndex: number; delta: string; partial: AssistantMessage }
218
+ | { type: "text_end"; contentIndex: number; content: string; partial: AssistantMessage }
219
+ | { type: "thinking_start"; contentIndex: number; partial: AssistantMessage }
220
+ | { type: "thinking_delta"; contentIndex: number; delta: string; partial: AssistantMessage }
221
+ | { type: "thinking_end"; contentIndex: number; content: string; partial: AssistantMessage }
222
+ | { type: "toolcall_start"; contentIndex: number; partial: AssistantMessage }
223
+ | { type: "toolcall_delta"; contentIndex: number; delta: string; partial: AssistantMessage }
224
+ | { type: "toolcall_end"; contentIndex: number; toolCall: AssistantToolCall; partial: AssistantMessage }
225
+ | { type: "done"; reason: StopReason; message: AssistantMessage }
226
+ | { type: "error"; reason: StopReason; error: AssistantMessage };
227
+ ```
228
+
229
+ ### Agent Loop Flow
230
+
231
+ ```
232
+ Initial Prompt
233
+
234
+ ┌─────────────────────────┐
235
+ │ While tool calls exist:│
236
+ │ 1. Stream response │
237
+ │ 2. Execute tools │
238
+ │ 3. Inject results │
239
+ │ 4. Repeat │
240
+ └─────────────────────────┘
241
+
242
+ Return all messages
243
+ ```
244
+
245
+ The agent loop automatically handles multi-turn conversations with tool execution, preserving full conversation state.
246
+
247
+ ## API Reference
248
+
249
+ ### Core Functions
250
+
251
+ #### `stream(model, context, options)`
252
+
253
+ Stream an LLM response with standardized events.
254
+
255
+ - **model**: Model object from `MODELS` registry
256
+ - **context**: Conversation context with messages, system prompt, and tools
257
+ - **options**: Provider-specific options (apiKey, temperature, reasoning, etc.)
258
+ - **returns**: `AssistantMessageEventStream` (async iterable)
259
+
260
+ #### `agentLoop(prompt, context, config, signal)`
261
+
262
+ Run a multi-turn agent loop with automatic tool execution.
263
+
264
+ - **prompt**: Initial user message
265
+ - **context**: Agent context with messages, system prompt, and agent tools
266
+ - **config**: Configuration with model, provider options, and optional preprocessor
267
+ - **signal**: Optional AbortSignal for cancellation
268
+ - **returns**: `EventStream<AgentEvent>` (async iterable)
269
+
270
+ ### Types
271
+
272
+ #### `Message`
273
+
274
+ Union type for all message types:
275
+ ```typescript
276
+ type Message = UserMessage | NativeAssistantMessage | ToolResultMessage
277
+ ```
278
+
279
+ #### `Tool`
280
+
281
+ Type-safe tool definition:
282
+ ```typescript
283
+ interface Tool<TParameters extends TSchema = TSchema> {
284
+ name: string;
285
+ description: string;
286
+ parameters: TParameters; // TypeBox JSON Schema
287
+ }
288
+ ```
289
+
290
+ #### `AgentTool`
291
+
292
+ Extended tool with execution logic:
293
+ ```typescript
294
+ interface AgentTool extends Tool {
295
+ label: string;
296
+ execute(
297
+ toolCallId: string,
298
+ params: Static<TParameters>,
299
+ signal?: AbortSignal
300
+ ): Promise<AgentToolResult>;
301
+ }
302
+ ```
303
+
304
+ ### Utilities
305
+
306
+ #### `defineTool(tool)`
307
+
308
+ Helper for creating tools with better type inference:
309
+ ```typescript
310
+ const tool = defineTool({
311
+ name: "search",
312
+ description: "Search the web",
313
+ parameters: Type.Object({
314
+ query: Type.String(),
315
+ }),
316
+ });
317
+ ```
318
+
319
+ #### `calculateCost(model, usage)`
320
+
321
+ Calculate costs based on token usage:
322
+ ```typescript
323
+ import { calculateCost, MODELS } from "@ank1015/providers";
324
+
325
+ const usage = {
326
+ input: 1000,
327
+ output: 500,
328
+ cacheRead: 200,
329
+ cacheWrite: 0,
330
+ totalTokens: 1700,
331
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
332
+ };
333
+
334
+ calculateCost(MODELS.OPENAI_GPT5_MINI, usage);
335
+ console.log(usage.cost.total); // Calculated cost in USD
336
+ ```
337
+
338
+ ## Provider-Specific Features
339
+
340
+ ### OpenAI
341
+
342
+ ```typescript
343
+ stream(MODELS.OPENAI_GPT5_MINI, context, {
344
+ reasoning: {
345
+ effort: "low" | "medium" | "high",
346
+ summaryStyle: "concise" | "detailed",
347
+ },
348
+ parallelToolCalls: true,
349
+ prompt_cache_key: "unique-cache-key",
350
+ promptCacheRetention: "in-memory" | "24h",
351
+ maxOutputTokens: 4096,
352
+ temperature: 0.7,
353
+ });
354
+ ```
355
+
356
+ ### Google
357
+
358
+ ```typescript
359
+ stream(MODELS.GOOGLE_GEMINI_2_5_FLASH, context, {
360
+ thinkingConfig: {
361
+ extendedThinking: {
362
+ level: "EXTENDED_THINKING_THINK_MODE",
363
+ },
364
+ },
365
+ responseMimeType: "application/json",
366
+ imageConfig: {
367
+ aspectRatio: "ASPECT_RATIO_16_9",
368
+ size: "LARGE",
369
+ },
370
+ maxOutputTokens: 8192,
371
+ temperature: 0.7,
372
+ });
373
+ ```
374
+
375
+ ## Design Principles
376
+
377
+ 1. **Model-Specific Best Practices**: Each provider implementation follows the provider's recommended patterns
378
+ 2. **State Preservation**: Native assistant messages preserve all provider-specific state
379
+ 3. **Type Safety**: TypeBox schemas provide compile-time and runtime validation
380
+ 4. **Stream-First**: All operations are async and support real-time updates
381
+ 5. **Cost Transparency**: Every response includes detailed token usage and costs
382
+ 6. **Graceful Degradation**: Validation falls back gracefully in restricted environments
383
+ 7. **Developer Experience**: Rich type inference and autocomplete support
384
+
385
+ ## Development
386
+
387
+ ```bash
388
+ # Install dependencies
389
+ npm install
390
+
391
+ # Build
392
+ npm run build
393
+
394
+ # Watch mode
395
+ npm run dev
396
+
397
+ # Run tests
398
+ npm run test
399
+
400
+ # Lint and type check
401
+ npm run check
402
+ ```
403
+
404
+ ## Project Structure
405
+
406
+ ```
407
+ src/
408
+ ├── index.ts # Main exports
409
+ ├── types.ts # Core type definitions
410
+ ├── stream.ts # Provider routing
411
+ ├── models.ts # Cost calculation
412
+ ├── models.generated.ts # Model registry
413
+ ├── agent/
414
+ │ ├── agent-loop.ts # Multi-turn agent orchestration
415
+ │ └── types.ts # Agent-specific types
416
+ ├── providers/
417
+ │ ├── openai.ts # OpenAI implementation
418
+ │ ├── google.ts # Google implementation
419
+ │ └── convert.ts # Message format conversion
420
+ └── utils/
421
+ ├── event-stream.ts # Async event streaming
422
+ ├── validation.ts # Tool argument validation
423
+ ├── json-parse.ts # Streaming JSON parser
424
+ └── sanitize-unicode.ts # Unicode sanitization
425
+ ```
426
+
427
+ ## Why This Architecture?
428
+
429
+ Traditional LLM abstraction layers try to make all models interchangeable, leading to:
430
+ - Lost provider-specific features (caching, thinking traces)
431
+ - Lowest-common-denominator APIs
432
+ - Poor utilization of each model's strengths
433
+
434
+ Our approach:
435
+ - ✅ Build provider-specific implementations following best practices
436
+ - ✅ Preserve native state for optimal performance
437
+ - ✅ Provide standardized interfaces for development flexibility
438
+ - ✅ Enable model composition without forcing model switching
439
+ - ✅ Support forking/conversion when truly needed
440
+
441
+ The result: **You get the best of both worlds** - full provider capabilities without vendor lock-in.
442
+
443
+ ## Contributing
444
+
445
+ Contributions are welcome! Please feel free to submit issues or pull requests.
446
+
447
+ ## License
448
+
449
+ MIT
450
+
451
+ ---
452
+
453
+ **Built for developers who want to harness the full power of frontier models without sacrificing flexibility.**
package/biome.json ADDED
@@ -0,0 +1,43 @@
1
+ {
2
+ "$schema": "https://biomejs.dev/schemas/2.3.8/schema.json",
3
+ "linter": {
4
+ "enabled": true,
5
+ "rules": {
6
+ "recommended": true,
7
+ "style": {
8
+ "noNonNullAssertion": "off",
9
+ "useConst": "error",
10
+ "useNodejsImportProtocol": "off",
11
+ "useTemplate": "off"
12
+ },
13
+ "correctness": {
14
+ "noUnusedVariables": "off"
15
+ },
16
+ "suspicious": {
17
+ "noExplicitAny": "off",
18
+ "noControlCharactersInRegex": "off",
19
+ "noEmptyInterface": "off"
20
+ }
21
+ }
22
+ },
23
+ "formatter": {
24
+ "enabled": true,
25
+ "formatWithErrors": false,
26
+ "indentStyle": "tab",
27
+ "indentWidth": 3,
28
+ "lineWidth": 120
29
+ },
30
+ "files": {
31
+ "includes": [
32
+ "packages/*/src/**/*",
33
+ "packages/*/test/**/*",
34
+ "*.json",
35
+ "*.md",
36
+ "!**/node_modules/**/*",
37
+ "!**/test-sessions.ts",
38
+ "!packages/web-ui/src/app.css",
39
+ "!packages/mom/data/**/*",
40
+ "!!**/node_modules"
41
+ ]
42
+ }
43
+ }
@@ -0,0 +1,5 @@
1
+ import { EventStream } from "../utils/event-stream";
2
+ import { UserMessage, Api } from "../types";
3
+ import { AgentContext, AgentEvent, AgentLoopConfig } from "./types";
4
+ export declare function agentLoop<TApi extends Api>(prompt: UserMessage, context: AgentContext, config: AgentLoopConfig<TApi>, signal?: AbortSignal): EventStream<AgentEvent, AgentContext["messages"]>;
5
+ //# sourceMappingURL=agent-loop.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"agent-loop.d.ts","sourceRoot":"","sources":["../../src/agent/agent-loop.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,MAAM,uBAAuB,CAAC;AACpD,OAAO,EAAE,WAAW,EAAE,GAAG,EAAwE,MAAM,UAAU,CAAC;AAClH,OAAO,EAAE,YAAY,EAAE,UAAU,EAAE,eAAe,EAA6C,MAAM,SAAS,CAAC;AAK/G,wBAAgB,SAAS,CAAC,IAAI,SAAS,GAAG,EACzC,MAAM,EAAE,WAAW,EACnB,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,CAAC,IAAI,CAAC,EAC7B,MAAM,CAAC,EAAE,WAAW,GAClB,WAAW,CAAC,UAAU,EAAE,YAAY,CAAC,UAAU,CAAC,CAAC,CA4GnD"}