primellm 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,20 +2,10 @@
2
2
 
3
3
  Official JavaScript/TypeScript SDK for the PrimeLLM unified AI API.
4
4
 
5
- PrimeLLM lets you access multiple AI models (GPT-5.1, Claude, Gemini) through a single, simple API. This SDK makes it easy to call PrimeLLM from JavaScript or TypeScript.
5
+ PrimeLLM lets you access multiple AI models (GPT-5.1, Claude, Gemini) through a single, simple API.
6
6
 
7
7
  ## Installation
8
8
 
9
- ### Local Development (Not on npm yet)
10
-
11
- ```bash
12
- cd js-sdk
13
- npm install
14
- npm run build
15
- ```
16
-
17
- ### Future npm Usage (Coming Soon)
18
-
19
9
  ```bash
20
10
  npm install primellm
21
11
  ```
@@ -23,11 +13,11 @@ npm install primellm
23
13
  ## Quick Start
24
14
 
25
15
  ```javascript
26
- import PrimeLLMClient from "primellm";
16
+ import PrimeLLM from "primellm";
27
17
 
28
18
  // Create a client with your API key
29
- const client = new PrimeLLMClient({
30
- apiKey: "primellm_live_XXX", // Get from https://primellm.in/dashboard
19
+ const client = new PrimeLLM({
20
+ apiKey: "primellm_XXX", // Get from https://primellm.in/dashboard
31
21
  });
32
22
 
33
23
  // Send a chat message
@@ -58,8 +48,8 @@ console.log("Credits left:", response.credits.remaining);
58
48
  ### Creating a Client
59
49
 
60
50
  ```typescript
61
- const client = new PrimeLLMClient({
62
- apiKey: "primellm_live_XXX", // Required
51
+ const client = new PrimeLLM({
52
+ apiKey: "primellm_XXX", // Required
63
53
  baseURL: "https://api.primellm.in", // Optional, this is the default
64
54
  timeoutMs: 60000, // Optional, 60 seconds default
65
55
  });
@@ -67,7 +57,7 @@ const client = new PrimeLLMClient({
67
57
 
68
58
  ### client.chat(request)
69
59
 
70
- Send a chat completion request to `/v1/chat`. This is the recommended method.
60
+ Send a chat completion request.
71
61
 
72
62
  ```javascript
73
63
  const response = await client.chat({
@@ -102,69 +92,14 @@ const response = await client.chat({
102
92
  }
103
93
  ```
104
94
 
105
- ### client.completions(request)
106
-
107
- Same as `chat()`, but uses the `/v1/chat/completions` endpoint.
108
- Use this for OpenAI API path compatibility.
109
-
110
- ```javascript
111
- const response = await client.completions({
112
- model: "claude-sonnet-4.5",
113
- messages: [{ role: "user", content: "Hello!" }],
114
- });
115
- ```
116
-
117
- ### client.generate(request)
118
-
119
- Legacy endpoint using `/generate`. Returns a simpler response format.
120
-
121
- ```javascript
122
- const response = await client.generate({
123
- model: "gpt-5.1",
124
- messages: [{ role: "user", content: "Hello!" }],
125
- });
126
-
127
- // Response format is different:
128
- console.log(response.reply); // The AI's response
129
- console.log(response.tokens_used); // Total tokens
130
- console.log(response.credits_remaining); // Credits left
131
- ```
132
-
133
- ## Examples
134
-
135
- Run the included examples:
136
-
137
- ```bash
138
- cd js-sdk
139
- npm install
140
- npm run build
141
-
142
- # Edit examples to add your API key, then:
143
- node ./examples/chat-basic.mjs
144
- node ./examples/completions-basic.mjs
145
- node ./examples/generate-basic.mjs
146
- ```
147
-
148
- ## Understanding the Response
149
-
150
- - **model**: Which AI model generated the response
151
- - **messages**: The conversation, including the AI's reply
152
- - **usage**: Token counts (how much "text" was processed)
153
- - `prompt_tokens`: Your input
154
- - `completion_tokens`: AI's output
155
- - `total_tokens`: Total
156
- - **credits**: Your PrimeLLM account balance
157
- - `remaining`: Credits left
158
- - `cost`: Cost of this request
159
-
160
95
  ## TypeScript Support
161
96
 
162
97
  This SDK is written in TypeScript and includes full type definitions.
163
98
 
164
99
  ```typescript
165
- import { PrimeLLMClient, ChatRequest, ChatResponse } from "primellm";
100
+ import PrimeLLM, { ChatRequest, ChatResponse } from "primellm";
166
101
 
167
- const client = new PrimeLLMClient({ apiKey: "..." });
102
+ const client = new PrimeLLM({ apiKey: "..." });
168
103
 
169
104
  const request: ChatRequest = {
170
105
  model: "gpt-5.1",
@@ -184,15 +119,9 @@ try {
184
119
  });
185
120
  } catch (error) {
186
121
  console.error("API Error:", error.message);
187
- // Example: "PrimeLLM API error: 401 Unauthorized - Invalid API key"
188
122
  }
189
123
  ```
190
124
 
191
- ## Notes
192
-
193
- - **Streaming**: Not yet supported. Calling `streamChat()` will throw an error.
194
- - **Publishing**: This SDK will be published to npm as `primellm` in a future release.
195
-
196
125
  ## License
197
126
 
198
127
  MIT
package/cli/index.js ADDED
@@ -0,0 +1,34 @@
1
+ #!/usr/bin/env node
2
+
3
+ import PrimeLLM from "../dist/index.js";
4
+
5
+ const args = process.argv.slice(2);
6
+ if (args.length === 0) {
7
+ console.log("Usage: npx primellm \"your question here\"");
8
+ process.exit(1);
9
+ }
10
+
11
+ const prompt = args.join(" ");
12
+
13
+ const apiKey = process.env.PRIMELLM_API_KEY;
14
+ if (!apiKey) {
15
+ console.error("Error: Set PRIMELLM_API_KEY environment variable before using npx primellm");
16
+ process.exit(1);
17
+ }
18
+
19
+ const client = new PrimeLLM({ apiKey });
20
+
21
+ const run = async () => {
22
+ try {
23
+ const res = await client.chat({
24
+ model: "gpt-5.1",
25
+ messages: [{ role: "user", content: prompt }]
26
+ });
27
+
28
+ console.log("\n" + res.choices[0].message.content + "\n");
29
+ } catch (err) {
30
+ console.error("Error:", err.message);
31
+ }
32
+ };
33
+
34
+ run();
@@ -0,0 +1,59 @@
1
+ /**
2
+ * PrimeLLM SDK Error Classes
3
+ *
4
+ * Typed errors for better error handling in applications.
5
+ */
6
+ export interface ErrorMeta {
7
+ status?: number;
8
+ detail?: string;
9
+ [key: string]: unknown;
10
+ }
11
+ /**
12
+ * Base error class for all PrimeLLM SDK errors
13
+ */
14
+ export declare class PrimeLLMError extends Error {
15
+ meta?: ErrorMeta;
16
+ constructor(message: string, meta?: ErrorMeta);
17
+ }
18
+ /**
19
+ * Authentication failed (401)
20
+ */
21
+ export declare class AuthenticationError extends PrimeLLMError {
22
+ constructor(message?: string, meta?: ErrorMeta);
23
+ }
24
+ /**
25
+ * Insufficient credits (402)
26
+ */
27
+ export declare class InsufficientCreditsError extends PrimeLLMError {
28
+ constructor(message?: string, meta?: ErrorMeta);
29
+ }
30
+ /**
31
+ * Rate limit exceeded (429)
32
+ */
33
+ export declare class RateLimitError extends PrimeLLMError {
34
+ retryAfter?: number;
35
+ constructor(message?: string, meta?: ErrorMeta, retryAfter?: number);
36
+ }
37
+ /**
38
+ * Resource not found (404)
39
+ */
40
+ export declare class NotFoundError extends PrimeLLMError {
41
+ constructor(message?: string, meta?: ErrorMeta);
42
+ }
43
+ /**
44
+ * Validation error (400)
45
+ */
46
+ export declare class ValidationError extends PrimeLLMError {
47
+ constructor(message?: string, meta?: ErrorMeta);
48
+ }
49
+ /**
50
+ * Server error (5xx)
51
+ */
52
+ export declare class ServerError extends PrimeLLMError {
53
+ constructor(message?: string, meta?: ErrorMeta);
54
+ }
55
+ /**
56
+ * Map HTTP status code to appropriate error class
57
+ */
58
+ export declare function createErrorFromStatus(status: number, message: string, detail?: string): PrimeLLMError;
59
+ //# sourceMappingURL=errors.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"errors.d.ts","sourceRoot":"","sources":["../src/errors.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,MAAM,WAAW,SAAS;IACtB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;CAC1B;AAED;;GAEG;AACH,qBAAa,aAAc,SAAQ,KAAK;IAC7B,IAAI,CAAC,EAAE,SAAS,CAAC;gBAEZ,OAAO,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,SAAS;CAKhD;AAED;;GAEG;AACH,qBAAa,mBAAoB,SAAQ,aAAa;gBACtC,OAAO,SAA+B,EAAE,IAAI,CAAC,EAAE,SAAS;CAIvE;AAED;;GAEG;AACH,qBAAa,wBAAyB,SAAQ,aAAa;gBAC3C,OAAO,SAAyB,EAAE,IAAI,CAAC,EAAE,SAAS;CAIjE;AAED;;GAEG;AACH,qBAAa,cAAe,SAAQ,aAAa;IACtC,UAAU,CAAC,EAAE,MAAM,CAAC;gBAEf,OAAO,SAAwB,EAAE,IAAI,CAAC,EAAE,SAAS,EAAE,UAAU,CAAC,EAAE,MAAM;CAKrF;AAED;;GAEG;AACH,qBAAa,aAAc,SAAQ,aAAa;gBAChC,OAAO,SAAuB,EAAE,IAAI,CAAC,EAAE,SAAS;CAI/D;AAED;;GAEG;AACH,qBAAa,eAAgB,SAAQ,aAAa;gBAClC,OAAO,SAAoB,EAAE,IAAI,CAAC,EAAE,SAAS;CAI5D;AAED;;GAEG;AACH,qBAAa,WAAY,SAAQ,aAAa;gBAC9B,OAAO,SAAiB,EAAE,IAAI,CAAC,EAAE,SAAS;CAIzD;AAED;;GAEG;AACH,wBAAgB,qBAAqB,CAAC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,MAAM,GAAG,aAAa,CAoBrG"}
package/dist/errors.js ADDED
@@ -0,0 +1,93 @@
1
+ /**
2
+ * PrimeLLM SDK Error Classes
3
+ *
4
+ * Typed errors for better error handling in applications.
5
+ */
6
+ /**
7
+ * Base error class for all PrimeLLM SDK errors
8
+ */
9
+ export class PrimeLLMError extends Error {
10
+ constructor(message, meta) {
11
+ super(message);
12
+ this.name = 'PrimeLLMError';
13
+ this.meta = meta;
14
+ }
15
+ }
16
+ /**
17
+ * Authentication failed (401)
18
+ */
19
+ export class AuthenticationError extends PrimeLLMError {
20
+ constructor(message = 'Invalid or missing API key', meta) {
21
+ super(message, meta);
22
+ this.name = 'AuthenticationError';
23
+ }
24
+ }
25
+ /**
26
+ * Insufficient credits (402)
27
+ */
28
+ export class InsufficientCreditsError extends PrimeLLMError {
29
+ constructor(message = 'Insufficient credits', meta) {
30
+ super(message, meta);
31
+ this.name = 'InsufficientCreditsError';
32
+ }
33
+ }
34
+ /**
35
+ * Rate limit exceeded (429)
36
+ */
37
+ export class RateLimitError extends PrimeLLMError {
38
+ constructor(message = 'Rate limit exceeded', meta, retryAfter) {
39
+ super(message, meta);
40
+ this.name = 'RateLimitError';
41
+ this.retryAfter = retryAfter;
42
+ }
43
+ }
44
+ /**
45
+ * Resource not found (404)
46
+ */
47
+ export class NotFoundError extends PrimeLLMError {
48
+ constructor(message = 'Resource not found', meta) {
49
+ super(message, meta);
50
+ this.name = 'NotFoundError';
51
+ }
52
+ }
53
+ /**
54
+ * Validation error (400)
55
+ */
56
+ export class ValidationError extends PrimeLLMError {
57
+ constructor(message = 'Invalid request', meta) {
58
+ super(message, meta);
59
+ this.name = 'ValidationError';
60
+ }
61
+ }
62
+ /**
63
+ * Server error (5xx)
64
+ */
65
+ export class ServerError extends PrimeLLMError {
66
+ constructor(message = 'Server error', meta) {
67
+ super(message, meta);
68
+ this.name = 'ServerError';
69
+ }
70
+ }
71
+ /**
72
+ * Map HTTP status code to appropriate error class
73
+ */
74
+ export function createErrorFromStatus(status, message, detail) {
75
+ const meta = { status, detail };
76
+ switch (status) {
77
+ case 400:
78
+ return new ValidationError(message, meta);
79
+ case 401:
80
+ return new AuthenticationError(message, meta);
81
+ case 402:
82
+ return new InsufficientCreditsError(message, meta);
83
+ case 404:
84
+ return new NotFoundError(message, meta);
85
+ case 429:
86
+ return new RateLimitError(message, meta);
87
+ default:
88
+ if (status >= 500) {
89
+ return new ServerError(message, meta);
90
+ }
91
+ return new PrimeLLMError(message, meta);
92
+ }
93
+ }
package/dist/index.d.ts CHANGED
@@ -1,34 +1,40 @@
1
1
  /**
2
- * PrimeLLM JavaScript SDK - Main Client
2
+ * PrimeLLM JavaScript SDK v0.2.0
3
3
  *
4
- * This is the main SDK file. Developers import this to talk to PrimeLLM
5
- * from JavaScript or TypeScript.
4
+ * Production-grade SDK with streaming, retries, and full API parity.
6
5
  *
7
- * Example usage:
6
+ * @example
7
+ * import PrimeLLM from "primellm";
8
8
  *
9
- * import { PrimeLLMClient } from "primellm";
10
- *
11
- * const client = new PrimeLLMClient({ apiKey: "primellm_live_XXX" });
12
- *
13
- * const response = await client.chat({
14
- * model: "gpt-5.1",
15
- * messages: [{ role: "user", content: "Hello!" }],
16
- * });
17
- *
18
- * console.log(response.choices[0].message.content);
9
+ * const client = new PrimeLLM({ apiKey: "primellm_XXX" });
10
+ * const response = await client.chat({
11
+ * model: "gpt-5.1",
12
+ * messages: [{ role: "user", content: "Hello!" }],
13
+ * });
14
+ * console.log(response.choices[0].message.content);
19
15
  */
20
- import { ChatRequest, ChatResponse, GenerateRequest, GenerateResponse, PrimeLLMClientOptions } from "./types.js";
16
+ import { ChatRequest, ChatResponse, Message, PrimeLLMClientOptions, EmbeddingsRequest, EmbeddingsResponse, ModelsResponse, CreditsResponse, KeysResponse, KeyCreateResponse } from "./types.js";
17
+ import { StreamChunk } from "./streaming.js";
21
18
  export * from "./types.js";
19
+ export * from "./errors.js";
20
+ export { countTokens, setTokenizerAdapter } from "./tokenizer.js";
21
+ export { StreamChunk } from "./streaming.js";
22
22
  /**
23
23
  * PrimeLLM API Client
24
24
  *
25
- * This class handles all communication with the PrimeLLM API.
26
- * It provides methods for chat, completions, and the legacy generate endpoint.
25
+ * Production-grade client with streaming, retries, and full API access.
27
26
  */
28
- export declare class PrimeLLMClient {
27
+ export declare class PrimeLLM {
29
28
  private apiKey;
30
29
  private baseURL;
31
30
  private timeoutMs;
31
+ private retry;
32
+ embeddings: EmbeddingsClient;
33
+ models: ModelsClient;
34
+ keys: KeysClient;
35
+ credits: CreditsClient;
36
+ tokens: TokensClient;
37
+ chat: ChatClient;
32
38
  /**
33
39
  * Create a new PrimeLLM client.
34
40
  *
@@ -36,89 +42,107 @@ export declare class PrimeLLMClient {
36
42
  * @param options.apiKey - Your PrimeLLM API key (required)
37
43
  * @param options.baseURL - API base URL (default: "https://api.primellm.in")
38
44
  * @param options.timeoutMs - Request timeout in ms (default: 60000)
39
- *
40
- * @example
41
- * const client = new PrimeLLMClient({
42
- * apiKey: "primellm_live_XXX",
43
- * });
45
+ * @param options.maxRetries - Max retry attempts (default: 3)
44
46
  */
45
47
  constructor(options: PrimeLLMClientOptions);
46
48
  /**
47
- * Internal helper to make API requests.
48
- * Handles authentication, JSON parsing, and error handling.
49
+ * Internal HTTP request with retries and error handling
49
50
  */
50
- private request;
51
+ request<TResponse>(path: string, body?: unknown, options?: {
52
+ method?: string;
53
+ }): Promise<TResponse>;
51
54
  /**
52
- * Send a chat completion request using /v1/chat endpoint.
53
- *
54
- * This is the recommended method for most use cases.
55
- * Returns an OpenAI-compatible response format.
56
- *
57
- * @param request - The chat request with model and messages
58
- * @returns The chat response with choices, usage, and credits
59
- *
60
- * @example
61
- * const response = await client.chat({
62
- * model: "gpt-5.1",
63
- * messages: [
64
- * { role: "system", content: "You are a helpful assistant." },
65
- * { role: "user", content: "What is TypeScript?" },
66
- * ],
67
- * });
68
- * console.log(response.choices[0].message.content);
55
+ * Internal streaming request
69
56
  */
70
- chat(request: ChatRequest): Promise<ChatResponse>;
57
+ streamRequest(path: string, body: unknown): AsyncGenerator<StreamChunk, void, unknown>;
58
+ }
59
+ /**
60
+ * Chat sub-client
61
+ */
62
+ declare class ChatClient {
63
+ private client;
64
+ constructor(client: PrimeLLM);
71
65
  /**
72
- * Send a chat completion request using /v1/chat/completions endpoint.
73
- *
74
- * This is an alternative endpoint that also returns OpenAI-compatible format.
75
- * Use this if you need compatibility with OpenAI's exact endpoint path.
76
- *
77
- * @param request - The chat request with model and messages
78
- * @returns The chat response with choices, usage, and credits
66
+ * Send a chat completion request
79
67
  */
80
- completions(request: ChatRequest): Promise<ChatResponse>;
68
+ create(request: ChatRequest): Promise<ChatResponse>;
81
69
  /**
82
- * Send a request to the legacy /generate endpoint.
83
- *
84
- * This endpoint returns a different response format than chat().
85
- * Use chat() for new projects; this is for backwards compatibility.
86
- *
87
- * @param request - The generate request with model and messages
88
- * @returns The generate response with reply, tokens_used, cost
70
+ * Stream chat completion (async iterator)
89
71
  *
90
72
  * @example
91
- * const response = await client.generate({
92
- * model: "gpt-5.1",
93
- * messages: [{ role: "user", content: "Hello!" }],
94
- * });
95
- * console.log(response.reply);
73
+ * for await (const chunk of client.chat.stream({...})) {
74
+ * console.log(chunk.delta?.content);
75
+ * }
96
76
  */
97
- generate(request: GenerateRequest): Promise<GenerateResponse>;
77
+ stream(request: ChatRequest): AsyncGenerator<StreamChunk, void, unknown>;
78
+ }
79
+ /**
80
+ * Embeddings sub-client
81
+ */
82
+ declare class EmbeddingsClient {
83
+ private client;
84
+ constructor(client: PrimeLLM);
98
85
  /**
99
- * Stream a chat completion response.
100
- *
101
- * ⚠️ NOT IMPLEMENTED YET - Backend streaming support coming soon.
102
- *
103
- * @throws Error always - streaming not supported in this version
86
+ * Create embeddings for input text
104
87
  */
105
- streamChat(_request: ChatRequest): AsyncGenerator<ChatResponse, void, unknown>;
88
+ create(request: EmbeddingsRequest): Promise<EmbeddingsResponse>;
89
+ }
90
+ /**
91
+ * Models sub-client
92
+ */
93
+ declare class ModelsClient {
94
+ private client;
95
+ constructor(client: PrimeLLM);
106
96
  /**
107
- * Stream a completions response.
108
- *
109
- * ⚠️ NOT IMPLEMENTED YET - Backend streaming support coming soon.
110
- *
111
- * @throws Error always - streaming not supported in this version
97
+ * List available models
112
98
  */
113
- streamCompletions(_request: ChatRequest): AsyncGenerator<ChatResponse, void, unknown>;
99
+ list(): Promise<ModelsResponse>;
100
+ }
101
+ /**
102
+ * Keys sub-client
103
+ */
104
+ declare class KeysClient {
105
+ private client;
106
+ constructor(client: PrimeLLM);
114
107
  /**
115
- * Stream a generate response.
116
- *
117
- * ⚠️ NOT IMPLEMENTED YET - Backend streaming support coming soon.
118
- *
119
- * @throws Error always - streaming not supported in this version
108
+ * List API keys
109
+ */
110
+ list(): Promise<KeysResponse>;
111
+ /**
112
+ * Create a new API key
113
+ */
114
+ create(label?: string): Promise<KeyCreateResponse>;
115
+ /**
116
+ * Revoke an API key
117
+ */
118
+ revoke(keyId: number): Promise<{
119
+ ok: boolean;
120
+ }>;
121
+ }
122
+ /**
123
+ * Credits sub-client
124
+ */
125
+ declare class CreditsClient {
126
+ private client;
127
+ constructor(client: PrimeLLM);
128
+ /**
129
+ * Get current credit balance
130
+ */
131
+ get(): Promise<CreditsResponse>;
132
+ }
133
+ /**
134
+ * Tokens sub-client (utility)
135
+ */
136
+ declare class TokensClient {
137
+ /**
138
+ * Count tokens in text or messages
139
+ */
140
+ count(input: string | Message[]): number;
141
+ /**
142
+ * Set custom tokenizer adapter
120
143
  */
121
- streamGenerate(_request: GenerateRequest): AsyncGenerator<GenerateResponse, void, unknown>;
144
+ setAdapter(adapter: ((text: string) => number) | null): void;
122
145
  }
123
- export default PrimeLLMClient;
146
+ export { PrimeLLM as PrimeLLMClient };
147
+ export default PrimeLLM;
124
148
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;GAkBG;AAEH,OAAO,EACH,WAAW,EACX,YAAY,EACZ,eAAe,EACf,gBAAgB,EAChB,qBAAqB,EACxB,MAAM,YAAY,CAAC;AAGpB,cAAc,YAAY,CAAC;AAE3B;;;;;GAKG;AACH,qBAAa,cAAc;IACvB,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,OAAO,CAAS;IACxB,OAAO,CAAC,SAAS,CAAS;IAE1B;;;;;;;;;;;;OAYG;gBACS,OAAO,EAAE,qBAAqB;IAS1C;;;OAGG;YACW,OAAO;IAsCrB;;;;;;;;;;;;;;;;;;OAkBG;IACG,IAAI,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;IAIvD;;;;;;;;OAQG;IACG,WAAW,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;IAI9D;;;;;;;;;;;;;;;OAeG;IACG,QAAQ,CAAC,OAAO,EAAE,eAAe,GAAG,OAAO,CAAC,gBAAgB,CAAC;IAQnE;;;;;;OAMG;IACI,UAAU,CACb,QAAQ,EAAE,WAAW,GACtB,cAAc,CAAC,YAAY,EAAE,IAAI,EAAE,OAAO,CAAC;IAQ9C;;;;;;OAMG;IACI,iBAAiB,CACpB,QAAQ,EAAE,WAAW,GACtB,cAAc,CAAC,YAAY,EAAE,IAAI,EAAE,OAAO,CAAC;IAO9C;;;;;;OAMG;IACI,cAAc,CACjB,QAAQ,EAAE,eAAe,GAC1B,cAAc,CAAC,gBAAgB,EAAE,IAAI,EAAE,OAAO,CAAC;CAMrD;AAGD,eAAe,cAAc,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;GAcG;AAEH,OAAO,EACH,WAAW,EACX,YAAY,EACZ,OAAO,EACP,qBAAqB,EACrB,iBAAiB,EACjB,kBAAkB,EAClB,cAAc,EACd,eAAe,EACf,YAAY,EACZ,iBAAiB,EACpB,MAAM,YAAY,CAAC;AAWpB,OAAO,EAAgB,WAAW,EAAE,MAAM,gBAAgB,CAAC;AAG3D,cAAc,YAAY,CAAC;AAC3B,cAAc,aAAa,CAAC;AAC5B,OAAO,EAAE,WAAW,EAAE,mBAAmB,EAAE,MAAM,gBAAgB,CAAC;AAClE,OAAO,EAAE,WAAW,EAAE,MAAM,gBAAgB,CAAC;AAiC7C;;;;GAIG;AACH,qBAAa,QAAQ;IACjB,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,OAAO,CAAS;IACxB,OAAO,CAAC,SAAS,CAAS;IAC1B,OAAO,CAAC,KAAK,CAAc;IAGpB,UAAU,EAAE,gBAAgB,CAAC;IAC7B,MAAM,EAAE,YAAY,CAAC;IACrB,IAAI,EAAE,UAAU,CAAC;IACjB,OAAO,EAAE,aAAa,CAAC;IACvB,MAAM,EAAE,YAAY,CAAC;IACrB,IAAI,EAAE,UAAU,CAAC;IAExB;;;;;;;;OAQG;gBACS,OAAO,EAAE,qBAAqB;IAqB1C;;OAEG;IACG,OAAO,CAAC,SAAS,EACnB,IAAI,EAAE,MAAM,EACZ,IAAI,CAAC,EAAE,OAAO,EACd,OAAO,CAAC,EAAE;QAAE,MAAM,CAAC,EAAE,MAAM,CAAA;KAAE,GAC9B,OAAO,CAAC,SAAS,CAAC;IAsErB;;OAEG;IACI,aAAa,CAChB,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,OAAO,GACd,cAAc,CAAC,WAAW,EAAE,IAAI,EAAE,OAAO,CAAC;CA8BhD;AAED;;GAEG;AACH,cAAM,UAAU;IACA,OAAO,CAAC,MAAM;gBAAN,MAAM,EAAE,QAAQ;IAEpC;;OAEG;IACG,MAAM,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;IAIzD;;;;;;;OAOG;IACI,MAAM,CAAC,OAAO,EAAE,WAAW,GAAG,cAAc,CAAC,WAAW,EAAE,IAAI,EAAE,OAAO,CAAC;CAGlF;AAED;;GAEG;AACH,cAAM,gBAAgB;IACN,OAAO,CAAC,MAAM;gBAAN,MAAM,EAAE,QAAQ;IAEpC;;OAEG;IACG,MAAM,CAAC,OAAO,EAAE,iBAAiB,GAAG,OAAO,CAAC,kBAAkB,CAAC;CAGxE;AAED;;GAEG;AACH,cAAM,YAAY;IACF,OAAO,CAAC,MAAM;gBAAN,MAAM,EAAE,QAAQ;IAEpC;;OAEG;IACG,IAAI,IAAI,OAAO,CAAC,cAAc,CAAC;CAGxC;AAED;;GAEG;AACH,cAAM,UAAU;IACA,OAAO,CAAC,MAAM;gBAAN,MAAM,EAAE,QAAQ;IAEpC;;OAEG;IACG,IAAI,IAAI,OAAO,CAAC,YAAY,CAAC;IAInC;;OAEG;IACG,MAAM,CAAC,KAAK,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC,iBAAiB,CAAC;IAIxD;;OAEG;IACG,MAAM,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC;QAAE,EAAE,EAAE,OAAO,CAAA;KAAE,CAAC;CAGxD;AAED;;GAEG;AACH,cAAM,aAAa;IACH,OAAO,CAAC,MAAM;gBAAN,MAAM,EAAE,QAAQ;IAEpC;;OAEG;IACG,GAAG,IAAI,OAAO,CAAC,eAAe,CAAC;CAGxC;AAED;;GAEG;AACH,cAAM,YAAY;IACd;;OAEG;IACH,KAAK,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO,EAAE,GAAG,MAAM;IAIxC;;OAEG;IACH,UAAU,CAAC,OAAO,EAAE,CAAC,CAAC,IAAI,EAAE,MAAM,KAAK,MAAM,CAAC,GAAG,IAAI,GAAG,IAAI;CAG/D;AAGD,OAAO,EAAE,QAAQ,IAAI,cAAc,EAAE,CAAC;AAGtC,eAAe,QAAQ,CAAC"}