@mariozechner/pi-ai 0.5.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +231 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +13 -0
- package/dist/index.js.map +1 -0
- package/dist/models.d.ts +72 -0
- package/dist/models.d.ts.map +1 -0
- package/dist/models.generated.d.ts +2890 -0
- package/dist/models.generated.d.ts.map +1 -0
- package/dist/models.generated.js +2889 -0
- package/dist/models.generated.js.map +1 -0
- package/dist/models.js +71 -0
- package/dist/models.js.map +1 -0
- package/dist/providers/anthropic.d.ts +23 -0
- package/dist/providers/anthropic.d.ts.map +1 -0
- package/dist/providers/anthropic.js +312 -0
- package/dist/providers/anthropic.js.map +1 -0
- package/dist/providers/google.d.ts +20 -0
- package/dist/providers/google.d.ts.map +1 -0
- package/dist/providers/google.js +337 -0
- package/dist/providers/google.js.map +1 -0
- package/dist/providers/openai-completions.d.ts +21 -0
- package/dist/providers/openai-completions.d.ts.map +1 -0
- package/dist/providers/openai-completions.js +304 -0
- package/dist/providers/openai-completions.js.map +1 -0
- package/dist/providers/openai-responses.d.ts +16 -0
- package/dist/providers/openai-responses.d.ts.map +1 -0
- package/dist/providers/openai-responses.js +261 -0
- package/dist/providers/openai-responses.js.map +1 -0
- package/dist/types.d.ts +119 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/package.json +50 -0
package/README.md
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
# @mariozechner/pi-ai
|
|
2
|
+
|
|
3
|
+
Unified LLM API with automatic model discovery, provider configuration, token and cost tracking, and simple context persistence and hand-off to other models mid-session.
|
|
4
|
+
|
|
5
|
+
## Supported Providers
|
|
6
|
+
|
|
7
|
+
- **OpenAI**
|
|
8
|
+
- **Anthropic**
|
|
9
|
+
- **Google**
|
|
10
|
+
- **Groq**
|
|
11
|
+
- **Cerebras**
|
|
12
|
+
- **xAI**
|
|
13
|
+
- **OpenRouter**
|
|
14
|
+
- **Any OpenAI-compatible API**: Ollama, vLLM, LM Studio, etc.
|
|
15
|
+
|
|
16
|
+
## Installation
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
npm install @mariozechner/pi-ai
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Quick Start
|
|
23
|
+
|
|
24
|
+
```typescript
|
|
25
|
+
import { createLLM } from '@mariozechner/pi-ai';
|
|
26
|
+
|
|
27
|
+
const llm = createLLM('openai', 'gpt-4o-mini');
|
|
28
|
+
|
|
29
|
+
const response = await llm.complete({
|
|
30
|
+
messages: [{ role: 'user', content: 'Hello!' }]
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
console.log(response.content);
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Image Input
|
|
37
|
+
|
|
38
|
+
```typescript
|
|
39
|
+
import { readFileSync } from 'fs';
|
|
40
|
+
|
|
41
|
+
const imageBuffer = readFileSync('image.png');
|
|
42
|
+
const base64Image = imageBuffer.toString('base64');
|
|
43
|
+
|
|
44
|
+
const response = await llm.complete({
|
|
45
|
+
messages: [{
|
|
46
|
+
role: 'user',
|
|
47
|
+
content: [
|
|
48
|
+
{ type: 'text', text: 'What is in this image?' },
|
|
49
|
+
{ type: 'image', data: base64Image, mimeType: 'image/png' }
|
|
50
|
+
]
|
|
51
|
+
}]
|
|
52
|
+
});
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Tool Calling
|
|
56
|
+
|
|
57
|
+
```typescript
|
|
58
|
+
const tools = [{
|
|
59
|
+
name: 'get_weather',
|
|
60
|
+
description: 'Get current weather for a location',
|
|
61
|
+
parameters: {
|
|
62
|
+
type: 'object',
|
|
63
|
+
properties: {
|
|
64
|
+
location: { type: 'string' }
|
|
65
|
+
},
|
|
66
|
+
required: ['location']
|
|
67
|
+
}
|
|
68
|
+
}];
|
|
69
|
+
|
|
70
|
+
const messages = [];
|
|
71
|
+
messages.push({ role: 'user', content: 'What is the weather in Paris?' });
|
|
72
|
+
|
|
73
|
+
const response = await llm.complete({ messages, tools });
|
|
74
|
+
messages.push(response);
|
|
75
|
+
|
|
76
|
+
if (response.toolCalls) {
|
|
77
|
+
for (const call of response.toolCalls) {
|
|
78
|
+
// Call your actual function
|
|
79
|
+
const result = await getWeather(call.arguments.location);
|
|
80
|
+
|
|
81
|
+
// Add tool result to context
|
|
82
|
+
messages.push({
|
|
83
|
+
role: 'toolResult',
|
|
84
|
+
content: JSON.stringify(result),
|
|
85
|
+
toolCallId: call.id,
|
|
86
|
+
isError: false
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Continue conversation with tool results
|
|
91
|
+
const followUp = await llm.complete({ messages, tools });
|
|
92
|
+
messages.push(followUp);
|
|
93
|
+
console.log(followUp.content);
|
|
94
|
+
}
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
## Streaming
|
|
98
|
+
|
|
99
|
+
```typescript
|
|
100
|
+
const response = await llm.complete({
|
|
101
|
+
messages: [{ role: 'user', content: 'Write a story' }]
|
|
102
|
+
}, {
|
|
103
|
+
onText: (chunk, complete) => {
|
|
104
|
+
process.stdout.write(chunk);
|
|
105
|
+
if (complete) console.log('\n[Text streaming complete]');
|
|
106
|
+
},
|
|
107
|
+
onThinking: (chunk, complete) => {
|
|
108
|
+
process.stderr.write(chunk);
|
|
109
|
+
if (complete) console.error('\n[Thinking complete]');
|
|
110
|
+
}
|
|
111
|
+
});
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
## Abort Signal
|
|
115
|
+
|
|
116
|
+
```typescript
|
|
117
|
+
const controller = new AbortController();
|
|
118
|
+
|
|
119
|
+
// Abort after 5 seconds
|
|
120
|
+
setTimeout(() => controller.abort(), 5000);
|
|
121
|
+
|
|
122
|
+
try {
|
|
123
|
+
const response = await llm.complete({
|
|
124
|
+
messages: [{ role: 'user', content: 'Write a long story' }]
|
|
125
|
+
}, {
|
|
126
|
+
signal: controller.signal,
|
|
127
|
+
onText: (chunk) => process.stdout.write(chunk)
|
|
128
|
+
});
|
|
129
|
+
} catch (error) {
|
|
130
|
+
if (error.name === 'AbortError') {
|
|
131
|
+
console.log('Request was aborted');
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
## Provider-Specific Options
|
|
137
|
+
|
|
138
|
+
### OpenAI Reasoning (o1, o3)
|
|
139
|
+
```typescript
|
|
140
|
+
const llm = createLLM('openai', 'o1-mini');
|
|
141
|
+
|
|
142
|
+
await llm.complete(context, {
|
|
143
|
+
reasoningEffort: 'medium' // 'minimal' | 'low' | 'medium' | 'high'
|
|
144
|
+
});
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
### Anthropic Thinking
|
|
148
|
+
```typescript
|
|
149
|
+
const llm = createLLM('anthropic', 'claude-3-7-sonnet-latest');
|
|
150
|
+
|
|
151
|
+
await llm.complete(context, {
|
|
152
|
+
thinking: {
|
|
153
|
+
enabled: true,
|
|
154
|
+
budgetTokens: 2048 // Optional thinking token limit
|
|
155
|
+
}
|
|
156
|
+
});
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
### Google Gemini Thinking
|
|
160
|
+
```typescript
|
|
161
|
+
const llm = createLLM('google', 'gemini-2.0-flash-thinking-exp');
|
|
162
|
+
|
|
163
|
+
await llm.complete(context, {
|
|
164
|
+
thinking: { enabled: true }
|
|
165
|
+
});
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
## Custom Models
|
|
169
|
+
|
|
170
|
+
### Local Models (Ollama, vLLM, etc.)
|
|
171
|
+
```typescript
|
|
172
|
+
import { OpenAICompletionsLLM } from '@mariozechner/pi-ai';
|
|
173
|
+
|
|
174
|
+
const model = {
|
|
175
|
+
id: 'llama3.1:8b',
|
|
176
|
+
provider: 'ollama',
|
|
177
|
+
baseUrl: 'http://localhost:11434/v1',
|
|
178
|
+
reasoning: false,
|
|
179
|
+
input: ['text'],
|
|
180
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
181
|
+
contextWindow: 8192,
|
|
182
|
+
maxTokens: 4096,
|
|
183
|
+
name: 'Llama 3.1 8B'
|
|
184
|
+
};
|
|
185
|
+
|
|
186
|
+
const llm = new OpenAICompletionsLLM(model, 'dummy-key');
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
### Custom OpenAI-Compatible Endpoints
|
|
190
|
+
```typescript
|
|
191
|
+
const model = {
|
|
192
|
+
id: 'custom-model',
|
|
193
|
+
provider: 'custom',
|
|
194
|
+
baseUrl: 'https://your-api.com/v1',
|
|
195
|
+
reasoning: true,
|
|
196
|
+
input: ['text', 'image'],
|
|
197
|
+
cost: { input: 0.5, output: 1.5, cacheRead: 0, cacheWrite: 0 },
|
|
198
|
+
contextWindow: 32768,
|
|
199
|
+
maxTokens: 8192,
|
|
200
|
+
name: 'Custom Model'
|
|
201
|
+
};
|
|
202
|
+
|
|
203
|
+
const llm = new OpenAICompletionsLLM(model, 'your-api-key');
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
## Environment Variables
|
|
207
|
+
|
|
208
|
+
Set these environment variables to use `createLLM` without passing API keys:
|
|
209
|
+
|
|
210
|
+
```bash
|
|
211
|
+
OPENAI_API_KEY=sk-...
|
|
212
|
+
ANTHROPIC_API_KEY=sk-ant-...
|
|
213
|
+
GEMINI_API_KEY=...
|
|
214
|
+
GROQ_API_KEY=gsk_...
|
|
215
|
+
CEREBRAS_API_KEY=csk-...
|
|
216
|
+
XAI_API_KEY=xai-...
|
|
217
|
+
OPENROUTER_API_KEY=sk-or-...
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
When set, you can omit the API key parameter:
|
|
221
|
+
```typescript
|
|
222
|
+
// Uses OPENAI_API_KEY from environment
|
|
223
|
+
const llm = createLLM('openai', 'gpt-4o-mini');
|
|
224
|
+
|
|
225
|
+
// Or pass explicitly
|
|
226
|
+
const llm = createLLM('openai', 'gpt-4o-mini', 'sk-...');
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
## License
|
|
230
|
+
|
|
231
|
+
MIT
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
export declare const version = "0.5.8";
|
|
2
|
+
export { PROVIDERS } from "./models.generated.js";
|
|
3
|
+
export { type AnthropicModel, type CerebrasModel, createLLM, type GoogleModel, type GroqModel, type Model, type OpenAIModel, type OpenRouterModel, PROVIDER_CONFIG, type ProviderModels, type ProviderToLLM, type XAIModel, } from "./models.js";
|
|
4
|
+
export { AnthropicLLM } from "./providers/anthropic.js";
|
|
5
|
+
export { GoogleLLM } from "./providers/google.js";
|
|
6
|
+
export { OpenAICompletionsLLM } from "./providers/openai-completions.js";
|
|
7
|
+
export { OpenAIResponsesLLM } from "./providers/openai-responses.js";
|
|
8
|
+
export type * from "./types.js";
|
|
9
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAGA,eAAO,MAAM,OAAO,UAAU,CAAC;AAG/B,OAAO,EAAE,SAAS,EAAE,MAAM,uBAAuB,CAAC;AAGlD,OAAO,EACN,KAAK,cAAc,EACnB,KAAK,aAAa,EAClB,SAAS,EACT,KAAK,WAAW,EAChB,KAAK,SAAS,EACd,KAAK,KAAK,EACV,KAAK,WAAW,EAChB,KAAK,eAAe,EACpB,eAAe,EACf,KAAK,cAAc,EACnB,KAAK,aAAa,EAClB,KAAK,QAAQ,GACb,MAAM,aAAa,CAAC;AAGrB,OAAO,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;AACxD,OAAO,EAAE,SAAS,EAAE,MAAM,uBAAuB,CAAC;AAClD,OAAO,EAAE,oBAAoB,EAAE,MAAM,mCAAmC,CAAC;AACzE,OAAO,EAAE,kBAAkB,EAAE,MAAM,iCAAiC,CAAC;AAGrE,mBAAmB,YAAY,CAAC"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
// @mariozechner/pi-ai - Unified LLM API with automatic model discovery
|
|
2
|
+
// This package provides a common interface for working with multiple LLM providers
|
|
3
|
+
export const version = "0.5.8";
|
|
4
|
+
// Export generated models data
|
|
5
|
+
export { PROVIDERS } from "./models.generated.js";
|
|
6
|
+
// Export models utilities and types
|
|
7
|
+
export { createLLM, PROVIDER_CONFIG, } from "./models.js";
|
|
8
|
+
// Export providers
|
|
9
|
+
export { AnthropicLLM } from "./providers/anthropic.js";
|
|
10
|
+
export { GoogleLLM } from "./providers/google.js";
|
|
11
|
+
export { OpenAICompletionsLLM } from "./providers/openai-completions.js";
|
|
12
|
+
export { OpenAIResponsesLLM } from "./providers/openai-responses.js";
|
|
13
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,uEAAuE;AACvE,mFAAmF;AAEnF,MAAM,CAAC,MAAM,OAAO,GAAG,OAAO,CAAC;AAE/B,+BAA+B;AAC/B,OAAO,EAAE,SAAS,EAAE,MAAM,uBAAuB,CAAC;AAElD,oCAAoC;AACpC,OAAO,EAGN,SAAS,EAMT,eAAe,GAIf,MAAM,aAAa,CAAC;AAErB,mBAAmB;AACnB,OAAO,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;AACxD,OAAO,EAAE,SAAS,EAAE,MAAM,uBAAuB,CAAC;AAClD,OAAO,EAAE,oBAAoB,EAAE,MAAM,mCAAmC,CAAC;AACzE,OAAO,EAAE,kBAAkB,EAAE,MAAM,iCAAiC,CAAC"}
|
package/dist/models.d.ts
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import { PROVIDERS } from "./models.generated.js";
|
|
2
|
+
import { AnthropicLLM } from "./providers/anthropic.js";
|
|
3
|
+
import { GoogleLLM } from "./providers/google.js";
|
|
4
|
+
import { OpenAICompletionsLLM } from "./providers/openai-completions.js";
|
|
5
|
+
import { OpenAIResponsesLLM } from "./providers/openai-responses.js";
|
|
6
|
+
import type { Model, Usage } from "./types.js";
|
|
7
|
+
export declare const PROVIDER_CONFIG: {
|
|
8
|
+
readonly google: {
|
|
9
|
+
readonly envKey: "GEMINI_API_KEY";
|
|
10
|
+
readonly create: (model: Model, apiKey: string) => GoogleLLM;
|
|
11
|
+
};
|
|
12
|
+
readonly openai: {
|
|
13
|
+
readonly envKey: "OPENAI_API_KEY";
|
|
14
|
+
readonly create: (model: Model, apiKey: string) => OpenAIResponsesLLM;
|
|
15
|
+
};
|
|
16
|
+
readonly anthropic: {
|
|
17
|
+
readonly envKey: "ANTHROPIC_API_KEY";
|
|
18
|
+
readonly create: (model: Model, apiKey: string) => AnthropicLLM;
|
|
19
|
+
};
|
|
20
|
+
readonly xai: {
|
|
21
|
+
readonly envKey: "XAI_API_KEY";
|
|
22
|
+
readonly create: (model: Model, apiKey: string) => OpenAICompletionsLLM;
|
|
23
|
+
};
|
|
24
|
+
readonly groq: {
|
|
25
|
+
readonly envKey: "GROQ_API_KEY";
|
|
26
|
+
readonly create: (model: Model, apiKey: string) => OpenAICompletionsLLM;
|
|
27
|
+
};
|
|
28
|
+
readonly cerebras: {
|
|
29
|
+
readonly envKey: "CEREBRAS_API_KEY";
|
|
30
|
+
readonly create: (model: Model, apiKey: string) => OpenAICompletionsLLM;
|
|
31
|
+
};
|
|
32
|
+
readonly openrouter: {
|
|
33
|
+
readonly envKey: "OPENROUTER_API_KEY";
|
|
34
|
+
readonly create: (model: Model, apiKey: string) => OpenAICompletionsLLM;
|
|
35
|
+
};
|
|
36
|
+
};
|
|
37
|
+
export type ProviderToLLM = {
|
|
38
|
+
google: GoogleLLM;
|
|
39
|
+
openai: OpenAIResponsesLLM;
|
|
40
|
+
anthropic: AnthropicLLM;
|
|
41
|
+
xai: OpenAICompletionsLLM;
|
|
42
|
+
groq: OpenAICompletionsLLM;
|
|
43
|
+
cerebras: OpenAICompletionsLLM;
|
|
44
|
+
openrouter: OpenAICompletionsLLM;
|
|
45
|
+
};
|
|
46
|
+
export type GoogleModel = keyof typeof PROVIDERS.google.models;
|
|
47
|
+
export type OpenAIModel = keyof typeof PROVIDERS.openai.models;
|
|
48
|
+
export type AnthropicModel = keyof typeof PROVIDERS.anthropic.models;
|
|
49
|
+
export type XAIModel = keyof typeof PROVIDERS.xai.models;
|
|
50
|
+
export type GroqModel = keyof typeof PROVIDERS.groq.models;
|
|
51
|
+
export type CerebrasModel = keyof typeof PROVIDERS.cerebras.models;
|
|
52
|
+
export type OpenRouterModel = keyof typeof PROVIDERS.openrouter.models;
|
|
53
|
+
export type ProviderModels = {
|
|
54
|
+
google: GoogleModel;
|
|
55
|
+
openai: OpenAIModel;
|
|
56
|
+
anthropic: AnthropicModel;
|
|
57
|
+
xai: XAIModel;
|
|
58
|
+
groq: GroqModel;
|
|
59
|
+
cerebras: CerebrasModel;
|
|
60
|
+
openrouter: OpenRouterModel;
|
|
61
|
+
};
|
|
62
|
+
export declare function createLLM<P extends keyof typeof PROVIDERS, M extends keyof (typeof PROVIDERS)[P]["models"]>(provider: P, model: M, apiKey?: string): ProviderToLLM[P];
|
|
63
|
+
export declare function getModel<P extends keyof typeof PROVIDERS>(provider: P, modelId: keyof (typeof PROVIDERS)[P]["models"]): Model | undefined;
|
|
64
|
+
export declare function calculateCost(model: Model, usage: Usage): {
|
|
65
|
+
input: number;
|
|
66
|
+
output: number;
|
|
67
|
+
cacheRead: number;
|
|
68
|
+
cacheWrite: number;
|
|
69
|
+
total: number;
|
|
70
|
+
};
|
|
71
|
+
export type { Model };
|
|
72
|
+
//# sourceMappingURL=models.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"models.d.ts","sourceRoot":"","sources":["../src/models.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,SAAS,EAAE,MAAM,uBAAuB,CAAC;AAClD,OAAO,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;AACxD,OAAO,EAAE,SAAS,EAAE,MAAM,uBAAuB,CAAC;AAClD,OAAO,EAAE,oBAAoB,EAAE,MAAM,mCAAmC,CAAC;AACzE,OAAO,EAAE,kBAAkB,EAAE,MAAM,iCAAiC,CAAC;AACrE,OAAO,KAAK,EAAE,KAAK,EAAE,KAAK,EAAE,MAAM,YAAY,CAAC;AAG/C,eAAO,MAAM,eAAe;;;iCAGV,KAAK,UAAU,MAAM;;;;iCAIrB,KAAK,UAAU,MAAM;;;;iCAIrB,KAAK,UAAU,MAAM;;;;iCAIrB,KAAK,UAAU,MAAM;;;;iCAIrB,KAAK,UAAU,MAAM;;;;iCAIrB,KAAK,UAAU,MAAM;;;;iCAIrB,KAAK,UAAU,MAAM;;CAE7B,CAAC;AAGX,MAAM,MAAM,aAAa,GAAG;IAC3B,MAAM,EAAE,SAAS,CAAC;IAClB,MAAM,EAAE,kBAAkB,CAAC;IAC3B,SAAS,EAAE,YAAY,CAAC;IACxB,GAAG,EAAE,oBAAoB,CAAC;IAC1B,IAAI,EAAE,oBAAoB,CAAC;IAC3B,QAAQ,EAAE,oBAAoB,CAAC;IAC/B,UAAU,EAAE,oBAAoB,CAAC;CACjC,CAAC;AAGF,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,SAAS,CAAC,MAAM,CAAC,MAAM,CAAC;AAC/D,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,SAAS,CAAC,MAAM,CAAC,MAAM,CAAC;AAC/D,MAAM,MAAM,cAAc,GAAG,MAAM,OAAO,SAAS,CAAC,SAAS,CAAC,MAAM,CAAC;AACrE,MAAM,MAAM,QAAQ,GAAG,MAAM,OAAO,SAAS,CAAC,GAAG,CAAC,MAAM,CAAC;AACzD,MAAM,MAAM,SAAS,GAAG,MAAM,OAAO,SAAS,CAAC,IAAI,CAAC,MAAM,CAAC;AAC3D,MAAM,MAAM,aAAa,GAAG,MAAM,OAAO,SAAS,CAAC,QAAQ,CAAC,MAAM,CAAC;AACnE,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,SAAS,CAAC,UAAU,CAAC,MAAM,CAAC;AAGvE,MAAM,MAAM,cAAc,GAAG;IAC5B,MAAM,EAAE,WAAW,CAAC;IACpB,MAAM,EAAE,WAAW,CAAC;IACpB,SAAS,EAAE,cAAc,CAAC;IAC1B,GAAG,EAAE,QAAQ,CAAC;IACd,IAAI,EAAE,SAAS,CAAC;IAChB,QAAQ,EAAE,aAAa,CAAC;IACxB,UAAU,EAAE,eAAe,CAAC;CAC5B,CAAC;AAGF,wBAAgB,SAAS,CAAC,CAAC,SAAS,MAAM,OAAO,SAAS,EAAE,CAAC,SAAS,MAAM,CAAC,OAAO,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,EAC1G,QAAQ,EAAE,CAAC,EACX,KAAK,EAAE,CAAC,EACR,MAAM,CAAC,EAAE,MAAM,GACb,aAAa,CAAC,CAAC,CAAC,CAgBlB;AAGD,wBAAgB,QAAQ,CAAC,CAAC,SAAS,MAAM,OAAO,SAAS,EACxD,QAAQ,EAAE,CAAC,EACX,OAAO,EAAE,MAAM,CAAC,OAAO,SAAS,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,GAC5C,KAAK,GAAG,SAAS,CAKnB;AAED,wBAAgB,aAAa,CAAC,KAAK,EAAE,KAAK,EAAE,KAAK,EAAE,KAAK;;;;;;EAOvD;AAGD,YAAY,EAAE,KAAK,EAAE,CAAC"}
|