@krutai/ai-provider 0.2.4 → 0.2.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/AI_REFERENCE.md CHANGED
@@ -85,6 +85,9 @@ await ai.initialize();
85
85
  - `initialize(): Promise<void>` — validates key against server, marks provider ready
86
86
  - `generate(prompt, opts?): Promise<string>` — single response (non-streaming)
87
87
  - `stream(prompt, opts?)` — `AsyncGenerator<string>` — SSE-based streaming
88
+ - `streamResponse(prompt, opts?)` — `Promise<Response>` — returns the raw fetch Response for proxying
89
+ - `streamChat(messages, opts?)` — `AsyncGenerator<string>` — SSE multi-turn streaming
90
+ - `streamChatResponse(messages, opts?)` — `Promise<Response>` — returns the raw fetch Response for proxying
88
91
  - `chat(messages, opts?): Promise<string>` — multi-turn conversation
89
92
  - `getModel(): string` — active model name
90
93
  - `isInitialized(): boolean`
@@ -102,12 +105,14 @@ interface KrutAIProviderConfig {
102
105
  }
103
106
  ```
104
107
 
105
- ### `ChatMessage`
106
-
107
108
  ```typescript
109
+ type ContentPart =
110
+ | { type: 'text'; text: string }
111
+ | { type: 'image_url'; image_url: { url: string; detail?: 'low' | 'high' | 'auto' } };
112
+
108
113
  interface ChatMessage {
109
114
  role: 'user' | 'assistant' | 'system';
110
- content: string;
115
+ content: string | ContentPart[];
111
116
  }
112
117
  ```
113
118
 
@@ -119,6 +124,9 @@ interface GenerateOptions {
119
124
  system?: string; // system prompt
120
125
  maxTokens?: number;
121
126
  temperature?: number;
127
+ images?: string[]; // Array of image URLs or base64 data URIs
128
+ documents?: string[]; // Array of document URLs or base64 data URIs
129
+ pdf?: string[]; // Array of PDF URLs or base64 data URIs
122
130
  }
123
131
  ```
124
132
 
package/README.md CHANGED
@@ -91,6 +91,68 @@ const response = await ai.chat([
91
91
  console.log(response);
92
92
  ```
93
93
 
94
+ ### Multimodal Messages (Images)
95
+
96
+ For vision-supported models, you can pass an array of `ContentPart`s instead of a flat string:
97
+
98
+ ```typescript
99
+ const response = await ai.chat([
100
+ {
101
+ role: 'user',
102
+ content: [
103
+ { type: 'text', text: 'Describe this image for me.' },
104
+ {
105
+ type: 'image_url',
106
+ image_url: { url: 'https://example.com/logo.png' }
107
+ }
108
+ ]
109
+ }
110
+ ], {
111
+ model: 'gpt-4o',
112
+ // You can also pass images, documents, or pdfs via GenerateOptions
113
+ images: ['https://example.com/photo.jpg'],
114
+ documents: ['https://example.com/doc.docx'],
115
+ pdf: ['https://example.com/report.pdf']
116
+ });
117
+ ```
118
+
119
+ ### Streaming Multi-turn Chat
120
+
121
+ ```typescript
122
+ const ai = krutAI({
123
+ apiKey: process.env.KRUTAI_API_KEY!,
124
+ });
125
+
126
+ await ai.initialize();
127
+
128
+ const stream = ai.streamChat([
129
+ { role: 'user', content: 'What is the capital of France?' },
130
+ { role: 'assistant', content: 'Paris.' },
131
+ { role: 'user', content: 'What is it famous for?' },
132
+ ]);
133
+
134
+ for await (const chunk of stream) {
135
+ process.stdout.write(chunk);
136
+ }
137
+ ```
138
+
139
+ ### Proxying Streams to the Frontend (Next.js / API Routes)
140
+
141
+ If you are building an API route (e.g., in Next.js) and want to pipe the true Server-Sent Events (SSE) stream down to your frontend component, use the `Response` variants:
142
+
143
+ ```typescript
144
+ // app/api/chat/route.ts
145
+ export async function POST(req: Request) {
146
+ const { messages } = await req.json();
147
+
148
+ // Returns the native fetch Response (with text/event-stream headers and body)
149
+ const response = await ai.streamChatResponse(messages);
150
+
151
+ // Proxy it directly to the frontend!
152
+ return response;
153
+ }
154
+ ```
155
+
94
156
  ### Skip validation (useful for tests)
95
157
 
96
158
  ```typescript
package/dist/index.d.mts CHANGED
@@ -37,12 +37,28 @@ interface KrutAIProviderConfig {
37
37
  */
38
38
  validateOnInit?: boolean;
39
39
  }
40
+ /**
41
+ * A part of a multimodal message
42
+ */
43
+ interface TextContentPart {
44
+ type: 'text';
45
+ text: string;
46
+ }
47
+ interface ImageContentPart {
48
+ type: 'image_url';
49
+ image_url: {
50
+ url: string;
51
+ detail?: 'low' | 'high' | 'auto';
52
+ };
53
+ }
54
+ type ContentPart = TextContentPart | ImageContentPart;
55
+ type MessageContent = string | ContentPart[];
40
56
  /**
41
57
  * A single chat message
42
58
  */
43
59
  interface ChatMessage {
44
60
  role: 'user' | 'assistant' | 'system';
45
- content: string;
61
+ content: MessageContent;
46
62
  }
47
63
  /**
48
64
  * Options for a single generate / stream / chat call
@@ -64,6 +80,18 @@ interface GenerateOptions {
64
80
  * Temperature (0–2).
65
81
  */
66
82
  temperature?: number;
83
+ /**
84
+ * Array of image URLs or base64 data URIs to include with the request.
85
+ */
86
+ images?: string[];
87
+ /**
88
+ * Array of document URLs or base64 data URIs (e.g. PDFs) to include with the request.
89
+ */
90
+ documents?: string[];
91
+ /**
92
+ * Array of PDF URLs or base64 data URIs to include with the request.
93
+ */
94
+ pdf?: string[];
67
95
  }
68
96
 
69
97
  /**
@@ -150,6 +178,34 @@ declare class KrutAIProvider {
150
178
  * ```
151
179
  */
152
180
  stream(prompt: string, options?: GenerateOptions): AsyncGenerator<string>;
181
+ /**
182
+ * Similar to stream() but returns the raw fetch Response object.
183
+ * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client
184
+ * (e.g., returning this directly from a Next.js API route).
185
+ *
186
+ * @param prompt - The user prompt string
187
+ * @param options - Optional overrides (model, system, maxTokens, temperature)
188
+ * @returns A Promise resolving to the native fetch Response
189
+ */
190
+ streamResponse(prompt: string, options?: GenerateOptions): Promise<Response>;
191
+ /**
192
+ * Multi-turn conversation streaming: pass a full message history.
193
+ * Calls POST /stream with the full { messages } payload.
194
+ *
195
+ * @param messages - Full conversation history
196
+ * @param options - Optional overrides (model, maxTokens, temperature)
197
+ * @returns An async generator yielding string chunks from the server
198
+ */
199
+ streamChat(messages: ChatMessage[], options?: GenerateOptions): AsyncGenerator<string>;
200
+ /**
201
+ * Similar to streamChat() but returns the raw fetch Response object.
202
+ * Useful for proxying the Server-Sent Events stream directly to a frontend client.
203
+ *
204
+ * @param messages - Full conversation history
205
+ * @param options - Optional overrides (model, maxTokens, temperature)
206
+ * @returns A Promise resolving to the native fetch Response
207
+ */
208
+ streamChatResponse(messages: ChatMessage[], options?: GenerateOptions): Promise<Response>;
153
209
  /**
154
210
  * Multi-turn conversation: pass a full message history.
155
211
  *
package/dist/index.d.ts CHANGED
@@ -37,12 +37,28 @@ interface KrutAIProviderConfig {
37
37
  */
38
38
  validateOnInit?: boolean;
39
39
  }
40
+ /**
41
+ * A part of a multimodal message
42
+ */
43
+ interface TextContentPart {
44
+ type: 'text';
45
+ text: string;
46
+ }
47
+ interface ImageContentPart {
48
+ type: 'image_url';
49
+ image_url: {
50
+ url: string;
51
+ detail?: 'low' | 'high' | 'auto';
52
+ };
53
+ }
54
+ type ContentPart = TextContentPart | ImageContentPart;
55
+ type MessageContent = string | ContentPart[];
40
56
  /**
41
57
  * A single chat message
42
58
  */
43
59
  interface ChatMessage {
44
60
  role: 'user' | 'assistant' | 'system';
45
- content: string;
61
+ content: MessageContent;
46
62
  }
47
63
  /**
48
64
  * Options for a single generate / stream / chat call
@@ -64,6 +80,18 @@ interface GenerateOptions {
64
80
  * Temperature (0–2).
65
81
  */
66
82
  temperature?: number;
83
+ /**
84
+ * Array of image URLs or base64 data URIs to include with the request.
85
+ */
86
+ images?: string[];
87
+ /**
88
+ * Array of document URLs or base64 data URIs (e.g. PDFs) to include with the request.
89
+ */
90
+ documents?: string[];
91
+ /**
92
+ * Array of PDF URLs or base64 data URIs to include with the request.
93
+ */
94
+ pdf?: string[];
67
95
  }
68
96
 
69
97
  /**
@@ -150,6 +178,34 @@ declare class KrutAIProvider {
150
178
  * ```
151
179
  */
152
180
  stream(prompt: string, options?: GenerateOptions): AsyncGenerator<string>;
181
+ /**
182
+ * Similar to stream() but returns the raw fetch Response object.
183
+ * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client
184
+ * (e.g., returning this directly from a Next.js API route).
185
+ *
186
+ * @param prompt - The user prompt string
187
+ * @param options - Optional overrides (model, system, maxTokens, temperature)
188
+ * @returns A Promise resolving to the native fetch Response
189
+ */
190
+ streamResponse(prompt: string, options?: GenerateOptions): Promise<Response>;
191
+ /**
192
+ * Multi-turn conversation streaming: pass a full message history.
193
+ * Calls POST /stream with the full { messages } payload.
194
+ *
195
+ * @param messages - Full conversation history
196
+ * @param options - Optional overrides (model, maxTokens, temperature)
197
+ * @returns An async generator yielding string chunks from the server
198
+ */
199
+ streamChat(messages: ChatMessage[], options?: GenerateOptions): AsyncGenerator<string>;
200
+ /**
201
+ * Similar to streamChat() but returns the raw fetch Response object.
202
+ * Useful for proxying the Server-Sent Events stream directly to a frontend client.
203
+ *
204
+ * @param messages - Full conversation history
205
+ * @param options - Optional overrides (model, maxTokens, temperature)
206
+ * @returns A Promise resolving to the native fetch Response
207
+ */
208
+ streamChatResponse(messages: ChatMessage[], options?: GenerateOptions): Promise<Response>;
153
209
  /**
154
210
  * Multi-turn conversation: pass a full message history.
155
211
  *
package/dist/index.js CHANGED
@@ -88,14 +88,22 @@ var KrutAIProvider = class {
88
88
  prompt,
89
89
  model,
90
90
  ...options.system !== void 0 ? { system: options.system } : {},
91
+ ...options.images !== void 0 ? { images: options.images } : {},
92
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
93
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
91
94
  ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
92
95
  ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
93
96
  })
94
97
  });
95
98
  if (!response.ok) {
96
- throw new Error(
97
- `AI server returned HTTP ${response.status} for /generate`
98
- );
99
+ let errorMessage = `AI server returned HTTP ${response.status} for /generate`;
100
+ try {
101
+ const errorData = await response.json();
102
+ if (errorData?.error) errorMessage = errorData.error;
103
+ else if (errorData?.message) errorMessage = errorData.message;
104
+ } catch {
105
+ }
106
+ throw new Error(errorMessage);
99
107
  }
100
108
  const data = await response.json();
101
109
  return data.text ?? data.content ?? data.message ?? "";
@@ -132,14 +140,22 @@ var KrutAIProvider = class {
132
140
  prompt,
133
141
  model,
134
142
  ...options.system !== void 0 ? { system: options.system } : {},
143
+ ...options.images !== void 0 ? { images: options.images } : {},
144
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
145
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
135
146
  ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
136
147
  ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
137
148
  })
138
149
  });
139
150
  if (!response.ok) {
140
- throw new Error(
141
- `AI server returned HTTP ${response.status} for /stream`
142
- );
151
+ let errorMessage = `AI server returned HTTP ${response.status} for /stream`;
152
+ try {
153
+ const errorData = await response.json();
154
+ if (errorData?.error) errorMessage = errorData.error;
155
+ else if (errorData?.message) errorMessage = errorData.message;
156
+ } catch {
157
+ }
158
+ throw new Error(errorMessage);
143
159
  }
144
160
  if (!response.body) {
145
161
  throw new Error("AI server returned no response body for /stream");
@@ -172,6 +188,163 @@ var KrutAIProvider = class {
172
188
  reader.releaseLock();
173
189
  }
174
190
  }
191
+ /**
192
+ * Similar to stream() but returns the raw fetch Response object.
193
+ * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client
194
+ * (e.g., returning this directly from a Next.js API route).
195
+ *
196
+ * @param prompt - The user prompt string
197
+ * @param options - Optional overrides (model, system, maxTokens, temperature)
198
+ * @returns A Promise resolving to the native fetch Response
199
+ */
200
+ async streamResponse(prompt, options = {}) {
201
+ this.assertInitialized();
202
+ const model = options.model ?? this.resolvedModel;
203
+ const response = await fetch(`${this.serverUrl}/stream`, {
204
+ method: "POST",
205
+ headers: {
206
+ ...this.authHeaders(),
207
+ Accept: "text/event-stream"
208
+ },
209
+ body: JSON.stringify({
210
+ prompt,
211
+ model,
212
+ ...options.system !== void 0 ? { system: options.system } : {},
213
+ ...options.images !== void 0 ? { images: options.images } : {},
214
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
215
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
216
+ ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
217
+ ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
218
+ })
219
+ });
220
+ if (!response.ok) {
221
+ let errorMessage = `AI server returned HTTP ${response.status} for /stream`;
222
+ try {
223
+ const errorData = await response.json();
224
+ if (errorData?.error) errorMessage = errorData.error;
225
+ else if (errorData?.message) errorMessage = errorData.message;
226
+ } catch {
227
+ }
228
+ throw new Error(errorMessage);
229
+ }
230
+ console.log(response);
231
+ return response;
232
+ }
233
+ /**
234
+ * Multi-turn conversation streaming: pass a full message history.
235
+ * Calls POST /stream with the full { messages } payload.
236
+ *
237
+ * @param messages - Full conversation history
238
+ * @param options - Optional overrides (model, maxTokens, temperature)
239
+ * @returns An async generator yielding string chunks from the server
240
+ */
241
+ async *streamChat(messages, options = {}) {
242
+ this.assertInitialized();
243
+ if (!messages.length) {
244
+ throw new Error("Messages array cannot be empty for streamChat");
245
+ }
246
+ const model = options.model ?? this.resolvedModel;
247
+ const response = await fetch(`${this.serverUrl}/stream`, {
248
+ method: "POST",
249
+ headers: {
250
+ ...this.authHeaders(),
251
+ Accept: "text/event-stream"
252
+ },
253
+ body: JSON.stringify({
254
+ messages,
255
+ model,
256
+ ...options.system !== void 0 ? { system: options.system } : {},
257
+ ...options.images !== void 0 ? { images: options.images } : {},
258
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
259
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
260
+ ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
261
+ ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
262
+ })
263
+ });
264
+ if (!response.ok) {
265
+ let errorMessage = `AI server returned HTTP ${response.status} for /stream`;
266
+ try {
267
+ const errorData = await response.json();
268
+ if (errorData?.error) errorMessage = errorData.error;
269
+ else if (errorData?.message) errorMessage = errorData.message;
270
+ } catch {
271
+ }
272
+ throw new Error(errorMessage);
273
+ }
274
+ if (!response.body) {
275
+ throw new Error("AI server returned no response body for /stream");
276
+ }
277
+ const reader = response.body.getReader();
278
+ const decoder = new TextDecoder();
279
+ let buffer = "";
280
+ try {
281
+ while (true) {
282
+ const { done, value } = await reader.read();
283
+ if (done) break;
284
+ buffer += decoder.decode(value, { stream: true });
285
+ const lines = buffer.split("\n");
286
+ buffer = lines.pop() ?? "";
287
+ for (const line of lines) {
288
+ if (line.startsWith("data: ")) {
289
+ const raw = line.slice(6).trim();
290
+ if (raw === "[DONE]") return;
291
+ try {
292
+ const parsed = JSON.parse(raw);
293
+ const chunk = parsed.text ?? parsed.content ?? parsed.delta?.content ?? "";
294
+ if (chunk) yield chunk;
295
+ } catch {
296
+ if (raw) yield raw;
297
+ }
298
+ }
299
+ }
300
+ }
301
+ } finally {
302
+ reader.releaseLock();
303
+ }
304
+ }
305
+ /**
306
+ * Similar to streamChat() but returns the raw fetch Response object.
307
+ * Useful for proxying the Server-Sent Events stream directly to a frontend client.
308
+ *
309
+ * @param messages - Full conversation history
310
+ * @param options - Optional overrides (model, maxTokens, temperature)
311
+ * @returns A Promise resolving to the native fetch Response
312
+ */
313
+ async streamChatResponse(messages, options = {}) {
314
+ this.assertInitialized();
315
+ if (!messages.length) {
316
+ throw new Error("Messages array cannot be empty for streamChatResponse");
317
+ }
318
+ const model = options.model ?? this.resolvedModel;
319
+ const response = await fetch(`${this.serverUrl}/stream`, {
320
+ method: "POST",
321
+ headers: {
322
+ ...this.authHeaders(),
323
+ Accept: "text/event-stream"
324
+ },
325
+ body: JSON.stringify({
326
+ messages,
327
+ model,
328
+ ...options.system !== void 0 ? { system: options.system } : {},
329
+ ...options.images !== void 0 ? { images: options.images } : {},
330
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
331
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
332
+ ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
333
+ ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
334
+ })
335
+ });
336
+ if (!response.ok) {
337
+ let errorMessage = `AI server returned HTTP ${response.status} for /stream`;
338
+ try {
339
+ const errorData = await response.json();
340
+ if (errorData?.error) errorMessage = errorData.error;
341
+ else if (errorData?.message) errorMessage = errorData.message;
342
+ } catch {
343
+ }
344
+ throw new Error(errorMessage);
345
+ }
346
+ return response;
347
+ }
175
348
  /**
176
349
  * Multi-turn conversation: pass a full message history.
177
350
  *
@@ -192,14 +365,22 @@ var KrutAIProvider = class {
192
365
  body: JSON.stringify({
193
366
  messages,
194
367
  model,
368
+ ...options.images !== void 0 ? { images: options.images } : {},
369
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
370
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
195
371
  ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
196
372
  ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
197
373
  })
198
374
  });
199
375
  if (!response.ok) {
200
- throw new Error(
201
- `AI server returned HTTP ${response.status} for /chat`
202
- );
376
+ let errorMessage = `AI server returned HTTP ${response.status} for /chat`;
377
+ try {
378
+ const errorData = await response.json();
379
+ if (errorData?.error) errorMessage = errorData.error;
380
+ else if (errorData?.message) errorMessage = errorData.message;
381
+ } catch {
382
+ }
383
+ throw new Error(errorMessage);
203
384
  }
204
385
  const data = await response.json();
205
386
  return data.text ?? data.content ?? data.message ?? "";
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/types.ts","../src/client.ts","../src/index.ts"],"names":["validateApiKeyFormat","validateApiKey"],"mappings":";;;;;AAQO,IAAM,aAAA,GAAgB;AAMtB,IAAM,kBAAA,GAAqB,uBAAA;ACuB3B,IAAM,iBAAN,MAAqB;AAAA,EACP,MAAA;AAAA,EACA,SAAA;AAAA,EACA,aAAA;AAAA,EACA,MAAA;AAAA,EAET,WAAA,GAAc,KAAA;AAAA,EAEtB,YAAY,MAAA,EAA8B;AACtC,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AACd,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA,CAAO,MAAA,IAAU,OAAA,CAAQ,IAAI,cAAA,IAAkB,EAAA;AAC7D,IAAA,IAAA,CAAK,aAAa,MAAA,CAAO,SAAA,IAAa,kBAAA,EAAoB,OAAA,CAAQ,OAAO,EAAE,CAAA;AAC3E,IAAA,IAAA,CAAK,aAAA,GAAgB,OAAO,KAAA,IAAS,aAAA;AAGrC,IAAAA,2BAAA,CAAqB,KAAK,MAAM,CAAA;AAGhC,IAAA,IAAI,MAAA,CAAO,mBAAmB,KAAA,EAAO;AACjC,MAAA,IAAA,CAAK,WAAA,GAAc,IAAA;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,UAAA,GAA4B;AAC9B,IAAA,IAAI,KAAK,WAAA,EAAa;AAEtB,IAAA,IAAI,IAAA,CAAK,MAAA,CAAO,cAAA,KAAmB,KAAA,EAAO;AACtC,MAAA,MAAMC,gCAAA,CAAe,IAAA,CAAK,MAAA,EAAQ,IAAA,CAAK,SAAS,CAAA;AAAA,IACpD;AAEA,IAAA,IAAA,CAAK,WAAA,GAAc,IAAA;AAAA,EACvB;AAAA;AAAA;AAAA;AAAA,EAKA,QAAA,GAAmB;AACf,IAAA,OAAO,IAAA,CAAK,aAAA;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKA,aAAA,GAAyB;AACrB,IAAA,OAAO,IAAA,CAAK,WAAA;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAMQ,iBAAA,GAA0B;AAC9B,IAAA,IAAI,CAAC,KAAK,WAAA,EAAa;AACnB,MAAA,MAAM,IAAI,KAAA;AAAA,QACN;AAAA,OACJ;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA,EAGQ,WAAA,GAAsC;AAC1C,IAAA,OAAO;AAAA,MACH,cAAA,EAAgB,kBAAA;AAAA,MAChB,aAAA,EAAe,CAAA,OAAA,EAAU,IAAA,CAAK,MAAM,CAAA,CAAA;AAAA,MACpC,aAAa,IAAA,CAAK;AAAA,KACtB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,MAAM,QAAA,CAAS,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAAoB;AAC3E,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,SAAA,CAAA,EAAa;AAAA,MACvD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS,KAAK,WAAA,EAAY;AAAA,MAC1B,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,MAAM,IAAI,KAAA;AAAA,QACN,CAAA,wBAAA,EAA2B,SAAS,MAAM,CAAA,cAAA;AAAA,OAC9C;AAAA,IACJ;AAEA,IAAA,MAAM,IAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,EAAK;AAMlC,IAAA,OAAO,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,OAAA,IAAW,KAAK,OAAA,IAAW,EAAA;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBA,OAAO,MAAA,CAAO,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAA2B;AACjF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,MAAM,IAAI,KAAA;AAAA,QACN,CAAA,wBAAA,EAA2B,SAAS,MAAM,CAAA,YAAA;AAAA,OAC9C;AAAA,IACJ;AAEA,IAAA,IAAI,CAAC,SAAS,IAAA,EAAM;AAChB,MAAA,MAAM,IAAI,MAAM,iDAAiD,CAAA;AAAA,IACrE;AAEA,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,IAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAChC,IAAA,IAAI,MAAA,GAAS,EAAA;AAEb,IAAA,IAAI;AACA,MAAA,OAAO,IAAA,EAAM;AACT,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAA,IAAU,QAAQ,MAAA,CAAO,KAAA,EAAO,EAAE,MAAA,EAAQ,MAAM,CAAA;AAChD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,CAAM,IAAI,CAAA;AAE/B,QAAA,MAAA,GAAS,KAAA,CAAM,KAAI,IAAK,EAAA;AAExB,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACtB,UAAA,IAAI,IAAA,CAAK,UAAA,CAAW,QAAQ,CAAA,EAAG;AAC3B,YAAA,MAAM,GAAA,GAAM,IAAA,CAAK,KAAA,CAAM,CAAC,EAAE,IAAA,EAAK;AAC/B,YAAA,IAAI,QAAQ,QAAA,EAAU;AACtB,YAAA,IAAI;AACA,cAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,GAAG,CAAA;AAK7B,cAAA,MAAM,QACF,MAAA,CAAO,IAAA,IACP,OAAO,OAAA,IACP,MAAA,CAAO,OAAO,OAAA,IACd,EAAA;AACJ,cAAA,IAAI,OAAO,MAAM,KAAA;AAAA,YACrB,CAAA,CAAA,MAAQ;AAEJ,cAAA,IAAI,KAAK,MAAM,GAAA;AAAA,YACnB;AAAA,UACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,CAAA,SAAE;AACE,MAAA,MAAA,CAAO,WAAA,EAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,IAAA,CAAK,QAAA,EAAyB,OAAA,GAA2B,EAAC,EAAoB;AAChF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,KAAA,CAAA,EAAS;AAAA,MACnD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS,KAAK,WAAA,EAAY;AAAA,MAC1B,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,MAAM,IAAI,KAAA;AAAA,QACN,CAAA,wBAAA,EAA2B,SAAS,MAAM,CAAA,UAAA;AAAA,OAC9C;AAAA,IACJ;AAEA,IAAA,MAAM,IAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,EAAK;AAMlC,IAAA,OAAO,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,OAAA,IAAW,KAAK,OAAA,IAAW,EAAA;AAAA,EACxD;AACJ;AC5MO,SAAS,OACZ,MAAA,EACc;AACd,EAAA,OAAO,IAAI,cAAA,CAAe;AAAA,IACtB,KAAA,EAAO,aAAA;AAAA,IACP,GAAG;AAAA,GACN,CAAA;AACL;AAGO,IAAM,OAAA,GAAU","file":"index.js","sourcesContent":["/**\n * Types for @krutai/ai-provider\n */\n\n/**\n * Default model identifier sent to the LangChain server when no model is specified.\n * Your server can use this value to route to its own default model.\n */\nexport const DEFAULT_MODEL = 'default' as const;\n\n/**\n * Default base URL for the LangChain backend server.\n * Used when no serverUrl is provided in the config.\n */\nexport const DEFAULT_SERVER_URL = 'http://localhost:8000' as const;\n\n/**\n * Configuration options for KrutAIProvider\n */\nexport interface KrutAIProviderConfig {\n /**\n * KrutAI API key.\n * Validated against the LangChain server before use.\n * Optional: defaults to process.env.KRUTAI_API_KEY\n */\n apiKey?: string;\n\n /**\n * Base URL of your deployed LangChain backend server.\n * @default \"http://localhost:8000\"\n * @example \"https://ai.krut.ai\"\n */\n serverUrl?: string;\n\n /**\n * The AI model to use (passed to the server).\n * The server decides what to do with this value.\n * @default \"default\"\n */\n model?: string;\n\n /**\n * Whether to validate the API key against the server on initialization.\n * Set to false to skip the validation round-trip (e.g. in tests).\n * @default true\n */\n validateOnInit?: boolean;\n}\n\n/**\n * A single chat message\n */\nexport interface ChatMessage {\n role: 'user' | 'assistant' | 'system';\n content: string;\n}\n\n/**\n * Options for a single generate / stream / chat call\n */\nexport interface GenerateOptions {\n /**\n * Override the model for this specific call.\n */\n model?: string;\n\n /**\n * System prompt (prepended as a system message).\n */\n system?: string;\n\n /**\n * Maximum tokens to generate.\n */\n maxTokens?: number;\n\n /**\n * Temperature (0–2).\n */\n temperature?: number;\n}\n","import type { KrutAIProviderConfig, GenerateOptions, ChatMessage } from './types';\nimport { DEFAULT_MODEL, DEFAULT_SERVER_URL } from './types';\nimport {\n validateApiKeyWithService as validateApiKey,\n validateApiKeyFormat,\n ApiKeyValidationError as KrutAIKeyValidationError,\n} from 'krutai';\n\nexport { KrutAIKeyValidationError };\n\n/**\n * KrutAIProvider — fetch-based AI provider for KrutAI\n *\n * Calls your deployed LangChain backend server for all AI operations.\n * The API key is validated against the server before use.\n *\n * @example\n * ```typescript\n * import { KrutAIProvider } from '@krutai/ai-provider';\n *\n * // Using local dev server (http://localhost:8000 by default)\n * const ai = new KrutAIProvider({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * });\n *\n * // Or point to a production server\n * const aiProd = new KrutAIProvider({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://ai.krut.ai',\n * });\n *\n * await ai.initialize(); // validates key against server\n *\n * const text = await ai.generate('Tell me a joke');\n * console.log(text);\n * ```\n */\nexport class KrutAIProvider {\n private readonly apiKey: string;\n private readonly serverUrl: string;\n private readonly resolvedModel: string;\n private readonly config: KrutAIProviderConfig;\n\n private initialized = false;\n\n constructor(config: KrutAIProviderConfig) {\n this.config = config;\n this.apiKey = config.apiKey || process.env.KRUTAI_API_KEY || '';\n this.serverUrl = (config.serverUrl ?? DEFAULT_SERVER_URL).replace(/\\/$/, ''); // strip trailing slash\n this.resolvedModel = config.model ?? DEFAULT_MODEL;\n\n // Basic format check immediately on construction\n validateApiKeyFormat(this.apiKey);\n\n // If validation is disabled, mark as ready immediately\n if (config.validateOnInit === false) {\n this.initialized = true;\n }\n }\n\n /**\n * Initialize the provider.\n * Validates the API key against the LangChain server, then marks provider as ready.\n *\n * @throws {KrutAIKeyValidationError} if the key is rejected or the server is unreachable\n */\n async initialize(): Promise<void> {\n if (this.initialized) return;\n\n if (this.config.validateOnInit !== false) {\n await validateApiKey(this.apiKey, this.serverUrl);\n }\n\n this.initialized = true;\n }\n\n /**\n * Returns the currently configured default model.\n */\n getModel(): string {\n return this.resolvedModel;\n }\n\n /**\n * Returns whether the provider has been initialized.\n */\n isInitialized(): boolean {\n return this.initialized;\n }\n\n // ---------------------------------------------------------------------------\n // Private helpers\n // ---------------------------------------------------------------------------\n\n private assertInitialized(): void {\n if (!this.initialized) {\n throw new Error(\n 'KrutAIProvider not initialized. Call initialize() first or set validateOnInit to false.'\n );\n }\n }\n\n /** Common request headers sent to the server on every AI call. */\n private authHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n Authorization: `Bearer ${this.apiKey}`,\n 'x-api-key': this.apiKey,\n };\n }\n\n // ---------------------------------------------------------------------------\n // Public AI Methods\n // ---------------------------------------------------------------------------\n\n /**\n * Generate a response for a prompt (non-streaming).\n *\n * Calls: POST {serverUrl}/generate\n * Body: { prompt, model, system?, maxTokens?, temperature? }\n * Expected response: { text: string } or { content: string } or { message: string }\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns The assistant's response text\n */\n async generate(prompt: string, options: GenerateOptions = {}): Promise<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/generate`, {\n method: 'POST',\n headers: this.authHeaders(),\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n throw new Error(\n `AI server returned HTTP ${response.status} for /generate`\n );\n }\n\n const data = (await response.json()) as {\n text?: string;\n content?: string;\n message?: string;\n };\n\n return data.text ?? data.content ?? data.message ?? '';\n }\n\n /**\n * Generate a streaming response for a prompt via Server-Sent Events (SSE).\n *\n * Calls: POST {serverUrl}/stream\n * Body: { prompt, model, system?, maxTokens?, temperature? }\n * Expected response: `text/event-stream` with `data: <chunk>` lines.\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns An async generator yielding string chunks from the server\n *\n * @example\n * ```typescript\n * const stream = ai.stream('Tell me a story');\n * for await (const chunk of stream) {\n * process.stdout.write(chunk);\n * }\n * ```\n */\n async *stream(prompt: string, options: GenerateOptions = {}): AsyncGenerator<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n throw new Error(\n `AI server returned HTTP ${response.status} for /stream`\n );\n }\n\n if (!response.body) {\n throw new Error('AI server returned no response body for /stream');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n // Keep the last (potentially incomplete) line in the buffer\n buffer = lines.pop() ?? '';\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const raw = line.slice(6).trim();\n if (raw === '[DONE]') return;\n try {\n const parsed = JSON.parse(raw) as {\n text?: string;\n content?: string;\n delta?: { content?: string };\n };\n const chunk =\n parsed.text ??\n parsed.content ??\n parsed.delta?.content ??\n '';\n if (chunk) yield chunk;\n } catch {\n // raw string chunk (non-JSON SSE)\n if (raw) yield raw;\n }\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Multi-turn conversation: pass a full message history.\n *\n * Calls: POST {serverUrl}/chat\n * Body: { messages, model, maxTokens?, temperature? }\n * Expected response: { text: string } or { content: string } or { message: string }\n *\n * @param messages - Full conversation history\n * @param options - Optional overrides (model, maxTokens, temperature)\n * @returns The assistant's response text\n */\n async chat(messages: ChatMessage[], options: GenerateOptions = {}): Promise<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/chat`, {\n method: 'POST',\n headers: this.authHeaders(),\n body: JSON.stringify({\n messages,\n model,\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n throw new Error(\n `AI server returned HTTP ${response.status} for /chat`\n );\n }\n\n const data = (await response.json()) as {\n text?: string;\n content?: string;\n message?: string;\n };\n\n return data.text ?? data.content ?? data.message ?? '';\n }\n}\n","/**\n * @krutai/ai-provider — AI Provider package for KrutAI\n *\n * A fetch-based wrapper that calls your deployed LangChain backend server.\n * The user's API key is validated against the server before any AI call is made.\n *\n * @example Basic usage\n * ```typescript\n * import { krutAI } from '@krutai/ai-provider';\n *\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n *\n * await ai.initialize(); // validates key with server\n *\n * const text = await ai.generate('Write a poem about TypeScript');\n * console.log(text);\n * ```\n *\n * @example With custom model\n * ```typescript\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * model: 'gpt-4o',\n * });\n * await ai.initialize();\n * const text = await ai.generate('Hello!');\n * ```\n *\n * @example Streaming\n * ```typescript\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n * await ai.initialize();\n *\n * const stream = ai.stream('Tell me a story');\n * for await (const chunk of stream) {\n * process.stdout.write(chunk);\n * }\n * ```\n *\n * @packageDocumentation\n */\n\nimport type { KrutAIProviderConfig } from './types';\nimport { DEFAULT_MODEL } from './types';\nimport { KrutAIProvider } from './client';\n\nexport { KrutAIProvider } from './client';\nexport { KrutAIKeyValidationError } from './client';\nexport {\n validateApiKeyWithService as validateApiKey,\n validateApiKeyFormat,\n} from 'krutai';\nexport type { KrutAIProviderConfig, GenerateOptions, ChatMessage } from './types';\nexport { DEFAULT_MODEL } from './types';\n\n/**\n * krutAI — convenience factory (mirrors `krutAuth` in @krutai/auth).\n *\n * Creates a `KrutAIProvider` instance configured to call your LangChain server.\n *\n * @param config - Provider configuration (`apiKey` and `serverUrl` are required)\n * @returns A `KrutAIProvider` instance — call `.initialize()` before use\n *\n * @example\n * ```typescript\n * import { krutAI } from '@krutai/ai-provider';\n *\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n *\n * await ai.initialize();\n * const text = await ai.generate('Hello!');\n * ```\n */\nexport function krutAI(\n config: KrutAIProviderConfig & { model?: string }\n): KrutAIProvider {\n return new KrutAIProvider({\n model: DEFAULT_MODEL,\n ...config,\n });\n}\n\n// Package metadata\nexport const VERSION = '0.2.0';\n"]}
1
+ {"version":3,"sources":["../src/types.ts","../src/client.ts","../src/index.ts"],"names":["validateApiKeyFormat","validateApiKey"],"mappings":";;;;;AAQO,IAAM,aAAA,GAAgB;AAMtB,IAAM,kBAAA,GAAqB,uBAAA;ACuB3B,IAAM,iBAAN,MAAqB;AAAA,EACP,MAAA;AAAA,EACA,SAAA;AAAA,EACA,aAAA;AAAA,EACA,MAAA;AAAA,EAET,WAAA,GAAc,KAAA;AAAA,EAEtB,YAAY,MAAA,EAA8B;AACtC,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AACd,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA,CAAO,MAAA,IAAU,OAAA,CAAQ,IAAI,cAAA,IAAkB,EAAA;AAC7D,IAAA,IAAA,CAAK,aAAa,MAAA,CAAO,SAAA,IAAa,kBAAA,EAAoB,OAAA,CAAQ,OAAO,EAAE,CAAA;AAC3E,IAAA,IAAA,CAAK,aAAA,GAAgB,OAAO,KAAA,IAAS,aAAA;AAGrC,IAAAA,2BAAA,CAAqB,KAAK,MAAM,CAAA;AAGhC,IAAA,IAAI,MAAA,CAAO,mBAAmB,KAAA,EAAO;AACjC,MAAA,IAAA,CAAK,WAAA,GAAc,IAAA;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,UAAA,GAA4B;AAC9B,IAAA,IAAI,KAAK,WAAA,EAAa;AAEtB,IAAA,IAAI,IAAA,CAAK,MAAA,CAAO,cAAA,KAAmB,KAAA,EAAO;AACtC,MAAA,MAAMC,gCAAA,CAAe,IAAA,CAAK,MAAA,EAAQ,IAAA,CAAK,SAAS,CAAA;AAAA,IACpD;AAEA,IAAA,IAAA,CAAK,WAAA,GAAc,IAAA;AAAA,EACvB;AAAA;AAAA;AAAA;AAAA,EAKA,QAAA,GAAmB;AACf,IAAA,OAAO,IAAA,CAAK,aAAA;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKA,aAAA,GAAyB;AACrB,IAAA,OAAO,IAAA,CAAK,WAAA;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAMQ,iBAAA,GAA0B;AAC9B,IAAA,IAAI,CAAC,KAAK,WAAA,EAAa;AACnB,MAAA,MAAM,IAAI,KAAA;AAAA,QACN;AAAA,OACJ;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA,EAGQ,WAAA,GAAsC;AAC1C,IAAA,OAAO;AAAA,MACH,cAAA,EAAgB,kBAAA;AAAA,MAChB,aAAA,EAAe,CAAA,OAAA,EAAU,IAAA,CAAK,MAAM,CAAA,CAAA;AAAA,MACpC,aAAa,IAAA,CAAK;AAAA,KACtB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,MAAM,QAAA,CAAS,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAAoB;AAC3E,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,SAAA,CAAA,EAAa;AAAA,MACvD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS,KAAK,WAAA,EAAY;AAAA,MAC1B,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,cAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,MAAM,IAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,EAAK;AAMlC,IAAA,OAAO,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,OAAA,IAAW,KAAK,OAAA,IAAW,EAAA;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBA,OAAO,MAAA,CAAO,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAA2B;AACjF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,YAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,IAAI,CAAC,SAAS,IAAA,EAAM;AAChB,MAAA,MAAM,IAAI,MAAM,iDAAiD,CAAA;AAAA,IACrE;AAEA,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,IAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAChC,IAAA,IAAI,MAAA,GAAS,EAAA;AAEb,IAAA,IAAI;AACA,MAAA,OAAO,IAAA,EAAM;AACT,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAA,IAAU,QAAQ,MAAA,CAAO,KAAA,EAAO,EAAE,MAAA,EAAQ,MAAM,CAAA;AAChD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,CAAM,IAAI,CAAA;AAE/B,QAAA,MAAA,GAAS,KAAA,CAAM,KAAI,IAAK,EAAA;AAExB,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACtB,UAAA,IAAI,IAAA,CAAK,UAAA,CAAW,QAAQ,CAAA,EAAG;AAC3B,YAAA,MAAM,GAAA,GAAM,IAAA,CAAK,KAAA,CAAM,CAAC,EAAE,IAAA,EAAK;AAC/B,YAAA,IAAI,QAAQ,QAAA,EAAU;AACtB,YAAA,IAAI;AACA,cAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,GAAG,CAAA;AAK7B,cAAA,MAAM,QACF,MAAA,CAAO,IAAA,IACP,OAAO,OAAA,IACP,MAAA,CAAO,OAAO,OAAA,IACd,EAAA;AACJ,cAAA,IAAI,OAAO,MAAM,KAAA;AAAA,YACrB,CAAA,CAAA,MAAQ;AAEJ,cAAA,IAAI,KAAK,MAAM,GAAA;AAAA,YACnB;AAAA,UACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,CAAA,SAAE;AACE,MAAA,MAAA,CAAO,WAAA,EAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,cAAA,CAAe,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAAsB;AACnF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,YAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AACA,IAAA,OAAA,CAAQ,IAAI,QAAQ,CAAA;AACpB,IAAA,OAAO,QAAA;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,OAAO,UAAA,CAAW,QAAA,EAAyB,OAAA,GAA2B,EAAC,EAA2B;AAC9F,IAAA,IAAA,CAAK,iBAAA,EAAkB;AAEvB,IAAA,IAAI,CAAC,SAAS,MAAA,EAAQ;AAClB,MAAA,MAAM,IAAI,MAAM,+CAA+C,CAAA;AAAA,IACnE;AAEA,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,YAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,IAAI,CAAC,SAAS,IAAA,EAAM;AAChB,MAAA,MAAM,IAAI,MAAM,iDAAiD,CAAA;AAAA,IACrE;AAEA,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,IAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAChC,IAAA,IAAI,MAAA,GAAS,EAAA;AAEb,IAAA,IAAI;AACA,MAAA,OAAO,IAAA,EAAM;AACT,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAA,IAAU,QAAQ,MAAA,CAAO,KAAA,EAAO,EAAE,MAAA,EAAQ,MAAM,CAAA;AAChD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,CAAM,IAAI,CAAA;AAC/B,QAAA,MAAA,GAAS,KAAA,CAAM,KAAI,IAAK,EAAA;AAExB,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACtB,UAAA,IAAI,IAAA,CAAK,UAAA,CAAW,QAAQ,CAAA,EAAG;AAC3B,YAAA,MAAM,GAAA,GAAM,IAAA,CAAK,KAAA,CAAM,CAAC,EAAE,IAAA,EAAK;AAC/B,YAAA,IAAI,QAAQ,QAAA,EAAU;AACtB,YAAA,IAAI;AACA,cAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,GAAG,CAAA;AAK7B,cAAA,MAAM,QACF,MAAA,CAAO,IAAA,IACP,OAAO,OAAA,IACP,MAAA,CAAO,OAAO,OAAA,IACd,EAAA;AACJ,cAAA,IAAI,OAAO,MAAM,KAAA;AAAA,YACrB,CAAA,CAAA,MAAQ;AACJ,cAAA,IAAI,KAAK,MAAM,GAAA;AAAA,YACnB;AAAA,UACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,CAAA,SAAE;AACE,MAAA,MAAA,CAAO,WAAA,EAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,kBAAA,CAAmB,QAAA,EAAyB,OAAA,GAA2B,EAAC,EAAsB;AAChG,IAAA,IAAA,CAAK,iBAAA,EAAkB;AAEvB,IAAA,IAAI,CAAC,SAAS,MAAA,EAAQ;AAClB,MAAA,MAAM,IAAI,MAAM,uDAAuD,CAAA;AAAA,IAC3E;AAEA,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,YAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,OAAO,QAAA;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,IAAA,CAAK,QAAA,EAAyB,OAAA,GAA2B,EAAC,EAAoB;AAChF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,KAAA,CAAA,EAAS;AAAA,MACnD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS,KAAK,WAAA,EAAY;AAAA,MAC1B,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,UAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,MAAM,IAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,EAAK;AAMlC,IAAA,OAAO,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,OAAA,IAAW,KAAK,OAAA,IAAW,EAAA;AAAA,EACxD;AACJ;ACtZO,SAAS,OACZ,MAAA,EACc;AACd,EAAA,OAAO,IAAI,cAAA,CAAe;AAAA,IACtB,KAAA,EAAO,aAAA;AAAA,IACP,GAAG;AAAA,GACN,CAAA;AACL;AAGO,IAAM,OAAA,GAAU","file":"index.js","sourcesContent":["/**\n * Types for @krutai/ai-provider\n */\n\n/**\n * Default model identifier sent to the LangChain server when no model is specified.\n * Your server can use this value to route to its own default model.\n */\nexport const DEFAULT_MODEL = 'default' as const;\n\n/**\n * Default base URL for the LangChain backend server.\n * Used when no serverUrl is provided in the config.\n */\nexport const DEFAULT_SERVER_URL = 'http://localhost:8000' as const;\n\n/**\n * Configuration options for KrutAIProvider\n */\nexport interface KrutAIProviderConfig {\n /**\n * KrutAI API key.\n * Validated against the LangChain server before use.\n * Optional: defaults to process.env.KRUTAI_API_KEY\n */\n apiKey?: string;\n\n /**\n * Base URL of your deployed LangChain backend server.\n * @default \"http://localhost:8000\"\n * @example \"https://ai.krut.ai\"\n */\n serverUrl?: string;\n\n /**\n * The AI model to use (passed to the server).\n * The server decides what to do with this value.\n * @default \"default\"\n */\n model?: string;\n\n /**\n * Whether to validate the API key against the server on initialization.\n * Set to false to skip the validation round-trip (e.g. in tests).\n * @default true\n */\n validateOnInit?: boolean;\n}\n\n/**\n * A part of a multimodal message\n */\nexport interface TextContentPart {\n type: 'text';\n text: string;\n}\n\nexport interface ImageContentPart {\n type: 'image_url';\n image_url: {\n url: string; // Base64 data URI or HTTP(S) URL\n detail?: 'low' | 'high' | 'auto';\n };\n}\n\nexport type ContentPart = TextContentPart | ImageContentPart;\nexport type MessageContent = string | ContentPart[];\n\n/**\n * A single chat message\n */\nexport interface ChatMessage {\n role: 'user' | 'assistant' | 'system';\n content: MessageContent;\n}\n\n/**\n * Options for a single generate / stream / chat call\n */\nexport interface GenerateOptions {\n /**\n * Override the model for this specific call.\n */\n model?: string;\n\n /**\n * System prompt (prepended as a system message).\n */\n system?: string;\n\n /**\n * Maximum tokens to generate.\n */\n maxTokens?: number;\n\n /**\n * Temperature (0–2).\n */\n temperature?: number;\n\n /**\n * Array of image URLs or base64 data URIs to include with the request.\n */\n images?: string[];\n\n /**\n * Array of document URLs or base64 data URIs (e.g. PDFs) to include with the request.\n */\n documents?: string[];\n\n /**\n * Array of PDF URLs or base64 data URIs to include with the request.\n */\n pdf?: string[];\n}\n","import type { KrutAIProviderConfig, GenerateOptions, ChatMessage } from './types';\nimport { DEFAULT_MODEL, DEFAULT_SERVER_URL } from './types';\nimport {\n validateApiKeyWithService as validateApiKey,\n validateApiKeyFormat,\n ApiKeyValidationError as KrutAIKeyValidationError,\n} from 'krutai';\n\nexport { KrutAIKeyValidationError };\n\n/**\n * KrutAIProvider — fetch-based AI provider for KrutAI\n *\n * Calls your deployed LangChain backend server for all AI operations.\n * The API key is validated against the server before use.\n *\n * @example\n * ```typescript\n * import { KrutAIProvider } from '@krutai/ai-provider';\n *\n * // Using local dev server (http://localhost:8000 by default)\n * const ai = new KrutAIProvider({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * });\n *\n * // Or point to a production server\n * const aiProd = new KrutAIProvider({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://ai.krut.ai',\n * });\n *\n * await ai.initialize(); // validates key against server\n *\n * const text = await ai.generate('Tell me a joke');\n * console.log(text);\n * ```\n */\nexport class KrutAIProvider {\n private readonly apiKey: string;\n private readonly serverUrl: string;\n private readonly resolvedModel: string;\n private readonly config: KrutAIProviderConfig;\n\n private initialized = false;\n\n constructor(config: KrutAIProviderConfig) {\n this.config = config;\n this.apiKey = config.apiKey || process.env.KRUTAI_API_KEY || '';\n this.serverUrl = (config.serverUrl ?? DEFAULT_SERVER_URL).replace(/\\/$/, ''); // strip trailing slash\n this.resolvedModel = config.model ?? DEFAULT_MODEL;\n\n // Basic format check immediately on construction\n validateApiKeyFormat(this.apiKey);\n\n // If validation is disabled, mark as ready immediately\n if (config.validateOnInit === false) {\n this.initialized = true;\n }\n }\n\n /**\n * Initialize the provider.\n * Validates the API key against the LangChain server, then marks provider as ready.\n *\n * @throws {KrutAIKeyValidationError} if the key is rejected or the server is unreachable\n */\n async initialize(): Promise<void> {\n if (this.initialized) return;\n\n if (this.config.validateOnInit !== false) {\n await validateApiKey(this.apiKey, this.serverUrl);\n }\n\n this.initialized = true;\n }\n\n /**\n * Returns the currently configured default model.\n */\n getModel(): string {\n return this.resolvedModel;\n }\n\n /**\n * Returns whether the provider has been initialized.\n */\n isInitialized(): boolean {\n return this.initialized;\n }\n\n // ---------------------------------------------------------------------------\n // Private helpers\n // ---------------------------------------------------------------------------\n\n private assertInitialized(): void {\n if (!this.initialized) {\n throw new Error(\n 'KrutAIProvider not initialized. Call initialize() first or set validateOnInit to false.'\n );\n }\n }\n\n /** Common request headers sent to the server on every AI call. */\n private authHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n Authorization: `Bearer ${this.apiKey}`,\n 'x-api-key': this.apiKey,\n };\n }\n\n // ---------------------------------------------------------------------------\n // Public AI Methods\n // ---------------------------------------------------------------------------\n\n /**\n * Generate a response for a prompt (non-streaming).\n *\n * Calls: POST {serverUrl}/generate\n * Body: { prompt, model, system?, maxTokens?, temperature? }\n * Expected response: { text: string } or { content: string } or { message: string }\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns The assistant's response text\n */\n async generate(prompt: string, options: GenerateOptions = {}): Promise<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/generate`, {\n method: 'POST',\n headers: this.authHeaders(),\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /generate`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n const data = (await response.json()) as {\n text?: string;\n content?: string;\n message?: string;\n };\n\n return data.text ?? data.content ?? data.message ?? '';\n }\n\n /**\n * Generate a streaming response for a prompt via Server-Sent Events (SSE).\n *\n * Calls: POST {serverUrl}/stream\n * Body: { prompt, model, system?, maxTokens?, temperature? }\n * Expected response: `text/event-stream` with `data: <chunk>` lines.\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns An async generator yielding string chunks from the server\n *\n * @example\n * ```typescript\n * const stream = ai.stream('Tell me a story');\n * for await (const chunk of stream) {\n * process.stdout.write(chunk);\n * }\n * ```\n */\n async *stream(prompt: string, options: GenerateOptions = {}): AsyncGenerator<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /stream`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n if (!response.body) {\n throw new Error('AI server returned no response body for /stream');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n // Keep the last (potentially incomplete) line in the buffer\n buffer = lines.pop() ?? '';\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const raw = line.slice(6).trim();\n if (raw === '[DONE]') return;\n try {\n const parsed = JSON.parse(raw) as {\n text?: string;\n content?: string;\n delta?: { content?: string };\n };\n const chunk =\n parsed.text ??\n parsed.content ??\n parsed.delta?.content ??\n '';\n if (chunk) yield chunk;\n } catch {\n // raw string chunk (non-JSON SSE)\n if (raw) yield raw;\n }\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Similar to stream() but returns the raw fetch Response object.\n * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client\n * (e.g., returning this directly from a Next.js API route).\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns A Promise resolving to the native fetch Response\n */\n async streamResponse(prompt: string, options: GenerateOptions = {}): Promise<Response> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /stream`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n console.log(response)\n return response;\n }\n\n /**\n * Multi-turn conversation streaming: pass a full message history.\n * Calls POST /stream with the full { messages } payload.\n *\n * @param messages - Full conversation history\n * @param options - Optional overrides (model, maxTokens, temperature)\n * @returns An async generator yielding string chunks from the server\n */\n async *streamChat(messages: ChatMessage[], options: GenerateOptions = {}): AsyncGenerator<string> {\n this.assertInitialized();\n\n if (!messages.length) {\n throw new Error('Messages array cannot be empty for streamChat');\n }\n\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n messages,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /stream`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n if (!response.body) {\n throw new Error('AI server returned no response body for /stream');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() ?? '';\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const raw = line.slice(6).trim();\n if (raw === '[DONE]') return;\n try {\n const parsed = JSON.parse(raw) as {\n text?: string;\n content?: string;\n delta?: { content?: string };\n };\n const chunk =\n parsed.text ??\n parsed.content ??\n parsed.delta?.content ??\n '';\n if (chunk) yield chunk;\n } catch {\n if (raw) yield raw;\n }\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Similar to streamChat() but returns the raw fetch Response object.\n * Useful for proxying the Server-Sent Events stream directly to a frontend client.\n *\n * @param messages - Full conversation history\n * @param options - Optional overrides (model, maxTokens, temperature)\n * @returns A Promise resolving to the native fetch Response\n */\n async streamChatResponse(messages: ChatMessage[], options: GenerateOptions = {}): Promise<Response> {\n this.assertInitialized();\n\n if (!messages.length) {\n throw new Error('Messages array cannot be empty for streamChatResponse');\n }\n\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n messages,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /stream`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n return response;\n }\n\n /**\n * Multi-turn conversation: pass a full message history.\n *\n * Calls: POST {serverUrl}/chat\n * Body: { messages, model, maxTokens?, temperature? }\n * Expected response: { text: string } or { content: string } or { message: string }\n *\n * @param messages - Full conversation history\n * @param options - Optional overrides (model, maxTokens, temperature)\n * @returns The assistant's response text\n */\n async chat(messages: ChatMessage[], options: GenerateOptions = {}): Promise<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/chat`, {\n method: 'POST',\n headers: this.authHeaders(),\n body: JSON.stringify({\n messages,\n model,\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /chat`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n const data = (await response.json()) as {\n text?: string;\n content?: string;\n message?: string;\n };\n\n return data.text ?? data.content ?? data.message ?? '';\n }\n}\n","/**\n * @krutai/ai-provider — AI Provider package for KrutAI\n *\n * A fetch-based wrapper that calls your deployed LangChain backend server.\n * The user's API key is validated against the server before any AI call is made.\n *\n * @example Basic usage\n * ```typescript\n * import { krutAI } from '@krutai/ai-provider';\n *\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n *\n * await ai.initialize(); // validates key with server\n *\n * const text = await ai.generate('Write a poem about TypeScript');\n * console.log(text);\n * ```\n *\n * @example With custom model\n * ```typescript\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * model: 'gpt-4o',\n * });\n * await ai.initialize();\n * const text = await ai.generate('Hello!');\n * ```\n *\n * @example Streaming\n * ```typescript\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n * await ai.initialize();\n *\n * const stream = ai.stream('Tell me a story');\n * for await (const chunk of stream) {\n * process.stdout.write(chunk);\n * }\n * ```\n *\n * @packageDocumentation\n */\n\nimport type { KrutAIProviderConfig } from './types';\nimport { DEFAULT_MODEL } from './types';\nimport { KrutAIProvider } from './client';\n\nexport { KrutAIProvider } from './client';\nexport { KrutAIKeyValidationError } from './client';\nexport {\n validateApiKeyWithService as validateApiKey,\n validateApiKeyFormat,\n} from 'krutai';\nexport type { KrutAIProviderConfig, GenerateOptions, ChatMessage } from './types';\nexport { DEFAULT_MODEL } from './types';\n\n/**\n * krutAI — convenience factory (mirrors `krutAuth` in @krutai/auth).\n *\n * Creates a `KrutAIProvider` instance configured to call your LangChain server.\n *\n * @param config - Provider configuration (`apiKey` and `serverUrl` are required)\n * @returns A `KrutAIProvider` instance — call `.initialize()` before use\n *\n * @example\n * ```typescript\n * import { krutAI } from '@krutai/ai-provider';\n *\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n *\n * await ai.initialize();\n * const text = await ai.generate('Hello!');\n * ```\n */\nexport function krutAI(\n config: KrutAIProviderConfig & { model?: string }\n): KrutAIProvider {\n return new KrutAIProvider({\n model: DEFAULT_MODEL,\n ...config,\n });\n}\n\n// Package metadata\nexport const VERSION = '0.2.0';\n"]}
package/dist/index.mjs CHANGED
@@ -87,14 +87,22 @@ var KrutAIProvider = class {
87
87
  prompt,
88
88
  model,
89
89
  ...options.system !== void 0 ? { system: options.system } : {},
90
+ ...options.images !== void 0 ? { images: options.images } : {},
91
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
92
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
90
93
  ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
91
94
  ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
92
95
  })
93
96
  });
94
97
  if (!response.ok) {
95
- throw new Error(
96
- `AI server returned HTTP ${response.status} for /generate`
97
- );
98
+ let errorMessage = `AI server returned HTTP ${response.status} for /generate`;
99
+ try {
100
+ const errorData = await response.json();
101
+ if (errorData?.error) errorMessage = errorData.error;
102
+ else if (errorData?.message) errorMessage = errorData.message;
103
+ } catch {
104
+ }
105
+ throw new Error(errorMessage);
98
106
  }
99
107
  const data = await response.json();
100
108
  return data.text ?? data.content ?? data.message ?? "";
@@ -131,14 +139,22 @@ var KrutAIProvider = class {
131
139
  prompt,
132
140
  model,
133
141
  ...options.system !== void 0 ? { system: options.system } : {},
142
+ ...options.images !== void 0 ? { images: options.images } : {},
143
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
144
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
134
145
  ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
135
146
  ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
136
147
  })
137
148
  });
138
149
  if (!response.ok) {
139
- throw new Error(
140
- `AI server returned HTTP ${response.status} for /stream`
141
- );
150
+ let errorMessage = `AI server returned HTTP ${response.status} for /stream`;
151
+ try {
152
+ const errorData = await response.json();
153
+ if (errorData?.error) errorMessage = errorData.error;
154
+ else if (errorData?.message) errorMessage = errorData.message;
155
+ } catch {
156
+ }
157
+ throw new Error(errorMessage);
142
158
  }
143
159
  if (!response.body) {
144
160
  throw new Error("AI server returned no response body for /stream");
@@ -171,6 +187,163 @@ var KrutAIProvider = class {
171
187
  reader.releaseLock();
172
188
  }
173
189
  }
190
+ /**
191
+ * Similar to stream() but returns the raw fetch Response object.
192
+ * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client
193
+ * (e.g., returning this directly from a Next.js API route).
194
+ *
195
+ * @param prompt - The user prompt string
196
+ * @param options - Optional overrides (model, system, maxTokens, temperature)
197
+ * @returns A Promise resolving to the native fetch Response
198
+ */
199
+ async streamResponse(prompt, options = {}) {
200
+ this.assertInitialized();
201
+ const model = options.model ?? this.resolvedModel;
202
+ const response = await fetch(`${this.serverUrl}/stream`, {
203
+ method: "POST",
204
+ headers: {
205
+ ...this.authHeaders(),
206
+ Accept: "text/event-stream"
207
+ },
208
+ body: JSON.stringify({
209
+ prompt,
210
+ model,
211
+ ...options.system !== void 0 ? { system: options.system } : {},
212
+ ...options.images !== void 0 ? { images: options.images } : {},
213
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
214
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
215
+ ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
216
+ ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
217
+ })
218
+ });
219
+ if (!response.ok) {
220
+ let errorMessage = `AI server returned HTTP ${response.status} for /stream`;
221
+ try {
222
+ const errorData = await response.json();
223
+ if (errorData?.error) errorMessage = errorData.error;
224
+ else if (errorData?.message) errorMessage = errorData.message;
225
+ } catch {
226
+ }
227
+ throw new Error(errorMessage);
228
+ }
229
+ console.log(response);
230
+ return response;
231
+ }
232
+ /**
233
+ * Multi-turn conversation streaming: pass a full message history.
234
+ * Calls POST /stream with the full { messages } payload.
235
+ *
236
+ * @param messages - Full conversation history
237
+ * @param options - Optional overrides (model, maxTokens, temperature)
238
+ * @returns An async generator yielding string chunks from the server
239
+ */
240
+ async *streamChat(messages, options = {}) {
241
+ this.assertInitialized();
242
+ if (!messages.length) {
243
+ throw new Error("Messages array cannot be empty for streamChat");
244
+ }
245
+ const model = options.model ?? this.resolvedModel;
246
+ const response = await fetch(`${this.serverUrl}/stream`, {
247
+ method: "POST",
248
+ headers: {
249
+ ...this.authHeaders(),
250
+ Accept: "text/event-stream"
251
+ },
252
+ body: JSON.stringify({
253
+ messages,
254
+ model,
255
+ ...options.system !== void 0 ? { system: options.system } : {},
256
+ ...options.images !== void 0 ? { images: options.images } : {},
257
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
258
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
259
+ ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
260
+ ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
261
+ })
262
+ });
263
+ if (!response.ok) {
264
+ let errorMessage = `AI server returned HTTP ${response.status} for /stream`;
265
+ try {
266
+ const errorData = await response.json();
267
+ if (errorData?.error) errorMessage = errorData.error;
268
+ else if (errorData?.message) errorMessage = errorData.message;
269
+ } catch {
270
+ }
271
+ throw new Error(errorMessage);
272
+ }
273
+ if (!response.body) {
274
+ throw new Error("AI server returned no response body for /stream");
275
+ }
276
+ const reader = response.body.getReader();
277
+ const decoder = new TextDecoder();
278
+ let buffer = "";
279
+ try {
280
+ while (true) {
281
+ const { done, value } = await reader.read();
282
+ if (done) break;
283
+ buffer += decoder.decode(value, { stream: true });
284
+ const lines = buffer.split("\n");
285
+ buffer = lines.pop() ?? "";
286
+ for (const line of lines) {
287
+ if (line.startsWith("data: ")) {
288
+ const raw = line.slice(6).trim();
289
+ if (raw === "[DONE]") return;
290
+ try {
291
+ const parsed = JSON.parse(raw);
292
+ const chunk = parsed.text ?? parsed.content ?? parsed.delta?.content ?? "";
293
+ if (chunk) yield chunk;
294
+ } catch {
295
+ if (raw) yield raw;
296
+ }
297
+ }
298
+ }
299
+ }
300
+ } finally {
301
+ reader.releaseLock();
302
+ }
303
+ }
304
+ /**
305
+ * Similar to streamChat() but returns the raw fetch Response object.
306
+ * Useful for proxying the Server-Sent Events stream directly to a frontend client.
307
+ *
308
+ * @param messages - Full conversation history
309
+ * @param options - Optional overrides (model, maxTokens, temperature)
310
+ * @returns A Promise resolving to the native fetch Response
311
+ */
312
+ async streamChatResponse(messages, options = {}) {
313
+ this.assertInitialized();
314
+ if (!messages.length) {
315
+ throw new Error("Messages array cannot be empty for streamChatResponse");
316
+ }
317
+ const model = options.model ?? this.resolvedModel;
318
+ const response = await fetch(`${this.serverUrl}/stream`, {
319
+ method: "POST",
320
+ headers: {
321
+ ...this.authHeaders(),
322
+ Accept: "text/event-stream"
323
+ },
324
+ body: JSON.stringify({
325
+ messages,
326
+ model,
327
+ ...options.system !== void 0 ? { system: options.system } : {},
328
+ ...options.images !== void 0 ? { images: options.images } : {},
329
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
330
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
331
+ ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
332
+ ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
333
+ })
334
+ });
335
+ if (!response.ok) {
336
+ let errorMessage = `AI server returned HTTP ${response.status} for /stream`;
337
+ try {
338
+ const errorData = await response.json();
339
+ if (errorData?.error) errorMessage = errorData.error;
340
+ else if (errorData?.message) errorMessage = errorData.message;
341
+ } catch {
342
+ }
343
+ throw new Error(errorMessage);
344
+ }
345
+ return response;
346
+ }
174
347
  /**
175
348
  * Multi-turn conversation: pass a full message history.
176
349
  *
@@ -191,14 +364,22 @@ var KrutAIProvider = class {
191
364
  body: JSON.stringify({
192
365
  messages,
193
366
  model,
367
+ ...options.images !== void 0 ? { images: options.images } : {},
368
+ ...options.documents !== void 0 ? { documents: options.documents } : {},
369
+ ...options.pdf !== void 0 ? { pdf: options.pdf } : {},
194
370
  ...options.maxTokens !== void 0 ? { maxTokens: options.maxTokens } : {},
195
371
  ...options.temperature !== void 0 ? { temperature: options.temperature } : {}
196
372
  })
197
373
  });
198
374
  if (!response.ok) {
199
- throw new Error(
200
- `AI server returned HTTP ${response.status} for /chat`
201
- );
375
+ let errorMessage = `AI server returned HTTP ${response.status} for /chat`;
376
+ try {
377
+ const errorData = await response.json();
378
+ if (errorData?.error) errorMessage = errorData.error;
379
+ else if (errorData?.message) errorMessage = errorData.message;
380
+ } catch {
381
+ }
382
+ throw new Error(errorMessage);
202
383
  }
203
384
  const data = await response.json();
204
385
  return data.text ?? data.content ?? data.message ?? "";
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/types.ts","../src/client.ts","../src/index.ts"],"names":["validateApiKey"],"mappings":";;;;AAQO,IAAM,aAAA,GAAgB;AAMtB,IAAM,kBAAA,GAAqB,uBAAA;ACuB3B,IAAM,iBAAN,MAAqB;AAAA,EACP,MAAA;AAAA,EACA,SAAA;AAAA,EACA,aAAA;AAAA,EACA,MAAA;AAAA,EAET,WAAA,GAAc,KAAA;AAAA,EAEtB,YAAY,MAAA,EAA8B;AACtC,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AACd,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA,CAAO,MAAA,IAAU,OAAA,CAAQ,IAAI,cAAA,IAAkB,EAAA;AAC7D,IAAA,IAAA,CAAK,aAAa,MAAA,CAAO,SAAA,IAAa,kBAAA,EAAoB,OAAA,CAAQ,OAAO,EAAE,CAAA;AAC3E,IAAA,IAAA,CAAK,aAAA,GAAgB,OAAO,KAAA,IAAS,aAAA;AAGrC,IAAA,oBAAA,CAAqB,KAAK,MAAM,CAAA;AAGhC,IAAA,IAAI,MAAA,CAAO,mBAAmB,KAAA,EAAO;AACjC,MAAA,IAAA,CAAK,WAAA,GAAc,IAAA;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,UAAA,GAA4B;AAC9B,IAAA,IAAI,KAAK,WAAA,EAAa;AAEtB,IAAA,IAAI,IAAA,CAAK,MAAA,CAAO,cAAA,KAAmB,KAAA,EAAO;AACtC,MAAA,MAAMA,yBAAA,CAAe,IAAA,CAAK,MAAA,EAAQ,IAAA,CAAK,SAAS,CAAA;AAAA,IACpD;AAEA,IAAA,IAAA,CAAK,WAAA,GAAc,IAAA;AAAA,EACvB;AAAA;AAAA;AAAA;AAAA,EAKA,QAAA,GAAmB;AACf,IAAA,OAAO,IAAA,CAAK,aAAA;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKA,aAAA,GAAyB;AACrB,IAAA,OAAO,IAAA,CAAK,WAAA;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAMQ,iBAAA,GAA0B;AAC9B,IAAA,IAAI,CAAC,KAAK,WAAA,EAAa;AACnB,MAAA,MAAM,IAAI,KAAA;AAAA,QACN;AAAA,OACJ;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA,EAGQ,WAAA,GAAsC;AAC1C,IAAA,OAAO;AAAA,MACH,cAAA,EAAgB,kBAAA;AAAA,MAChB,aAAA,EAAe,CAAA,OAAA,EAAU,IAAA,CAAK,MAAM,CAAA,CAAA;AAAA,MACpC,aAAa,IAAA,CAAK;AAAA,KACtB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,MAAM,QAAA,CAAS,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAAoB;AAC3E,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,SAAA,CAAA,EAAa;AAAA,MACvD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS,KAAK,WAAA,EAAY;AAAA,MAC1B,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,MAAM,IAAI,KAAA;AAAA,QACN,CAAA,wBAAA,EAA2B,SAAS,MAAM,CAAA,cAAA;AAAA,OAC9C;AAAA,IACJ;AAEA,IAAA,MAAM,IAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,EAAK;AAMlC,IAAA,OAAO,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,OAAA,IAAW,KAAK,OAAA,IAAW,EAAA;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBA,OAAO,MAAA,CAAO,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAA2B;AACjF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,MAAM,IAAI,KAAA;AAAA,QACN,CAAA,wBAAA,EAA2B,SAAS,MAAM,CAAA,YAAA;AAAA,OAC9C;AAAA,IACJ;AAEA,IAAA,IAAI,CAAC,SAAS,IAAA,EAAM;AAChB,MAAA,MAAM,IAAI,MAAM,iDAAiD,CAAA;AAAA,IACrE;AAEA,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,IAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAChC,IAAA,IAAI,MAAA,GAAS,EAAA;AAEb,IAAA,IAAI;AACA,MAAA,OAAO,IAAA,EAAM;AACT,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAA,IAAU,QAAQ,MAAA,CAAO,KAAA,EAAO,EAAE,MAAA,EAAQ,MAAM,CAAA;AAChD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,CAAM,IAAI,CAAA;AAE/B,QAAA,MAAA,GAAS,KAAA,CAAM,KAAI,IAAK,EAAA;AAExB,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACtB,UAAA,IAAI,IAAA,CAAK,UAAA,CAAW,QAAQ,CAAA,EAAG;AAC3B,YAAA,MAAM,GAAA,GAAM,IAAA,CAAK,KAAA,CAAM,CAAC,EAAE,IAAA,EAAK;AAC/B,YAAA,IAAI,QAAQ,QAAA,EAAU;AACtB,YAAA,IAAI;AACA,cAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,GAAG,CAAA;AAK7B,cAAA,MAAM,QACF,MAAA,CAAO,IAAA,IACP,OAAO,OAAA,IACP,MAAA,CAAO,OAAO,OAAA,IACd,EAAA;AACJ,cAAA,IAAI,OAAO,MAAM,KAAA;AAAA,YACrB,CAAA,CAAA,MAAQ;AAEJ,cAAA,IAAI,KAAK,MAAM,GAAA;AAAA,YACnB;AAAA,UACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,CAAA,SAAE;AACE,MAAA,MAAA,CAAO,WAAA,EAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,IAAA,CAAK,QAAA,EAAyB,OAAA,GAA2B,EAAC,EAAoB;AAChF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,KAAA,CAAA,EAAS;AAAA,MACnD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS,KAAK,WAAA,EAAY;AAAA,MAC1B,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,MAAM,IAAI,KAAA;AAAA,QACN,CAAA,wBAAA,EAA2B,SAAS,MAAM,CAAA,UAAA;AAAA,OAC9C;AAAA,IACJ;AAEA,IAAA,MAAM,IAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,EAAK;AAMlC,IAAA,OAAO,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,OAAA,IAAW,KAAK,OAAA,IAAW,EAAA;AAAA,EACxD;AACJ;AC5MO,SAAS,OACZ,MAAA,EACc;AACd,EAAA,OAAO,IAAI,cAAA,CAAe;AAAA,IACtB,KAAA,EAAO,aAAA;AAAA,IACP,GAAG;AAAA,GACN,CAAA;AACL;AAGO,IAAM,OAAA,GAAU","file":"index.mjs","sourcesContent":["/**\n * Types for @krutai/ai-provider\n */\n\n/**\n * Default model identifier sent to the LangChain server when no model is specified.\n * Your server can use this value to route to its own default model.\n */\nexport const DEFAULT_MODEL = 'default' as const;\n\n/**\n * Default base URL for the LangChain backend server.\n * Used when no serverUrl is provided in the config.\n */\nexport const DEFAULT_SERVER_URL = 'http://localhost:8000' as const;\n\n/**\n * Configuration options for KrutAIProvider\n */\nexport interface KrutAIProviderConfig {\n /**\n * KrutAI API key.\n * Validated against the LangChain server before use.\n * Optional: defaults to process.env.KRUTAI_API_KEY\n */\n apiKey?: string;\n\n /**\n * Base URL of your deployed LangChain backend server.\n * @default \"http://localhost:8000\"\n * @example \"https://ai.krut.ai\"\n */\n serverUrl?: string;\n\n /**\n * The AI model to use (passed to the server).\n * The server decides what to do with this value.\n * @default \"default\"\n */\n model?: string;\n\n /**\n * Whether to validate the API key against the server on initialization.\n * Set to false to skip the validation round-trip (e.g. in tests).\n * @default true\n */\n validateOnInit?: boolean;\n}\n\n/**\n * A single chat message\n */\nexport interface ChatMessage {\n role: 'user' | 'assistant' | 'system';\n content: string;\n}\n\n/**\n * Options for a single generate / stream / chat call\n */\nexport interface GenerateOptions {\n /**\n * Override the model for this specific call.\n */\n model?: string;\n\n /**\n * System prompt (prepended as a system message).\n */\n system?: string;\n\n /**\n * Maximum tokens to generate.\n */\n maxTokens?: number;\n\n /**\n * Temperature (0–2).\n */\n temperature?: number;\n}\n","import type { KrutAIProviderConfig, GenerateOptions, ChatMessage } from './types';\nimport { DEFAULT_MODEL, DEFAULT_SERVER_URL } from './types';\nimport {\n validateApiKeyWithService as validateApiKey,\n validateApiKeyFormat,\n ApiKeyValidationError as KrutAIKeyValidationError,\n} from 'krutai';\n\nexport { KrutAIKeyValidationError };\n\n/**\n * KrutAIProvider — fetch-based AI provider for KrutAI\n *\n * Calls your deployed LangChain backend server for all AI operations.\n * The API key is validated against the server before use.\n *\n * @example\n * ```typescript\n * import { KrutAIProvider } from '@krutai/ai-provider';\n *\n * // Using local dev server (http://localhost:8000 by default)\n * const ai = new KrutAIProvider({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * });\n *\n * // Or point to a production server\n * const aiProd = new KrutAIProvider({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://ai.krut.ai',\n * });\n *\n * await ai.initialize(); // validates key against server\n *\n * const text = await ai.generate('Tell me a joke');\n * console.log(text);\n * ```\n */\nexport class KrutAIProvider {\n private readonly apiKey: string;\n private readonly serverUrl: string;\n private readonly resolvedModel: string;\n private readonly config: KrutAIProviderConfig;\n\n private initialized = false;\n\n constructor(config: KrutAIProviderConfig) {\n this.config = config;\n this.apiKey = config.apiKey || process.env.KRUTAI_API_KEY || '';\n this.serverUrl = (config.serverUrl ?? DEFAULT_SERVER_URL).replace(/\\/$/, ''); // strip trailing slash\n this.resolvedModel = config.model ?? DEFAULT_MODEL;\n\n // Basic format check immediately on construction\n validateApiKeyFormat(this.apiKey);\n\n // If validation is disabled, mark as ready immediately\n if (config.validateOnInit === false) {\n this.initialized = true;\n }\n }\n\n /**\n * Initialize the provider.\n * Validates the API key against the LangChain server, then marks provider as ready.\n *\n * @throws {KrutAIKeyValidationError} if the key is rejected or the server is unreachable\n */\n async initialize(): Promise<void> {\n if (this.initialized) return;\n\n if (this.config.validateOnInit !== false) {\n await validateApiKey(this.apiKey, this.serverUrl);\n }\n\n this.initialized = true;\n }\n\n /**\n * Returns the currently configured default model.\n */\n getModel(): string {\n return this.resolvedModel;\n }\n\n /**\n * Returns whether the provider has been initialized.\n */\n isInitialized(): boolean {\n return this.initialized;\n }\n\n // ---------------------------------------------------------------------------\n // Private helpers\n // ---------------------------------------------------------------------------\n\n private assertInitialized(): void {\n if (!this.initialized) {\n throw new Error(\n 'KrutAIProvider not initialized. Call initialize() first or set validateOnInit to false.'\n );\n }\n }\n\n /** Common request headers sent to the server on every AI call. */\n private authHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n Authorization: `Bearer ${this.apiKey}`,\n 'x-api-key': this.apiKey,\n };\n }\n\n // ---------------------------------------------------------------------------\n // Public AI Methods\n // ---------------------------------------------------------------------------\n\n /**\n * Generate a response for a prompt (non-streaming).\n *\n * Calls: POST {serverUrl}/generate\n * Body: { prompt, model, system?, maxTokens?, temperature? }\n * Expected response: { text: string } or { content: string } or { message: string }\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns The assistant's response text\n */\n async generate(prompt: string, options: GenerateOptions = {}): Promise<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/generate`, {\n method: 'POST',\n headers: this.authHeaders(),\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n throw new Error(\n `AI server returned HTTP ${response.status} for /generate`\n );\n }\n\n const data = (await response.json()) as {\n text?: string;\n content?: string;\n message?: string;\n };\n\n return data.text ?? data.content ?? data.message ?? '';\n }\n\n /**\n * Generate a streaming response for a prompt via Server-Sent Events (SSE).\n *\n * Calls: POST {serverUrl}/stream\n * Body: { prompt, model, system?, maxTokens?, temperature? }\n * Expected response: `text/event-stream` with `data: <chunk>` lines.\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns An async generator yielding string chunks from the server\n *\n * @example\n * ```typescript\n * const stream = ai.stream('Tell me a story');\n * for await (const chunk of stream) {\n * process.stdout.write(chunk);\n * }\n * ```\n */\n async *stream(prompt: string, options: GenerateOptions = {}): AsyncGenerator<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n throw new Error(\n `AI server returned HTTP ${response.status} for /stream`\n );\n }\n\n if (!response.body) {\n throw new Error('AI server returned no response body for /stream');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n // Keep the last (potentially incomplete) line in the buffer\n buffer = lines.pop() ?? '';\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const raw = line.slice(6).trim();\n if (raw === '[DONE]') return;\n try {\n const parsed = JSON.parse(raw) as {\n text?: string;\n content?: string;\n delta?: { content?: string };\n };\n const chunk =\n parsed.text ??\n parsed.content ??\n parsed.delta?.content ??\n '';\n if (chunk) yield chunk;\n } catch {\n // raw string chunk (non-JSON SSE)\n if (raw) yield raw;\n }\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Multi-turn conversation: pass a full message history.\n *\n * Calls: POST {serverUrl}/chat\n * Body: { messages, model, maxTokens?, temperature? }\n * Expected response: { text: string } or { content: string } or { message: string }\n *\n * @param messages - Full conversation history\n * @param options - Optional overrides (model, maxTokens, temperature)\n * @returns The assistant's response text\n */\n async chat(messages: ChatMessage[], options: GenerateOptions = {}): Promise<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/chat`, {\n method: 'POST',\n headers: this.authHeaders(),\n body: JSON.stringify({\n messages,\n model,\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n throw new Error(\n `AI server returned HTTP ${response.status} for /chat`\n );\n }\n\n const data = (await response.json()) as {\n text?: string;\n content?: string;\n message?: string;\n };\n\n return data.text ?? data.content ?? data.message ?? '';\n }\n}\n","/**\n * @krutai/ai-provider — AI Provider package for KrutAI\n *\n * A fetch-based wrapper that calls your deployed LangChain backend server.\n * The user's API key is validated against the server before any AI call is made.\n *\n * @example Basic usage\n * ```typescript\n * import { krutAI } from '@krutai/ai-provider';\n *\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n *\n * await ai.initialize(); // validates key with server\n *\n * const text = await ai.generate('Write a poem about TypeScript');\n * console.log(text);\n * ```\n *\n * @example With custom model\n * ```typescript\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * model: 'gpt-4o',\n * });\n * await ai.initialize();\n * const text = await ai.generate('Hello!');\n * ```\n *\n * @example Streaming\n * ```typescript\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n * await ai.initialize();\n *\n * const stream = ai.stream('Tell me a story');\n * for await (const chunk of stream) {\n * process.stdout.write(chunk);\n * }\n * ```\n *\n * @packageDocumentation\n */\n\nimport type { KrutAIProviderConfig } from './types';\nimport { DEFAULT_MODEL } from './types';\nimport { KrutAIProvider } from './client';\n\nexport { KrutAIProvider } from './client';\nexport { KrutAIKeyValidationError } from './client';\nexport {\n validateApiKeyWithService as validateApiKey,\n validateApiKeyFormat,\n} from 'krutai';\nexport type { KrutAIProviderConfig, GenerateOptions, ChatMessage } from './types';\nexport { DEFAULT_MODEL } from './types';\n\n/**\n * krutAI — convenience factory (mirrors `krutAuth` in @krutai/auth).\n *\n * Creates a `KrutAIProvider` instance configured to call your LangChain server.\n *\n * @param config - Provider configuration (`apiKey` and `serverUrl` are required)\n * @returns A `KrutAIProvider` instance — call `.initialize()` before use\n *\n * @example\n * ```typescript\n * import { krutAI } from '@krutai/ai-provider';\n *\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n *\n * await ai.initialize();\n * const text = await ai.generate('Hello!');\n * ```\n */\nexport function krutAI(\n config: KrutAIProviderConfig & { model?: string }\n): KrutAIProvider {\n return new KrutAIProvider({\n model: DEFAULT_MODEL,\n ...config,\n });\n}\n\n// Package metadata\nexport const VERSION = '0.2.0';\n"]}
1
+ {"version":3,"sources":["../src/types.ts","../src/client.ts","../src/index.ts"],"names":["validateApiKey"],"mappings":";;;;AAQO,IAAM,aAAA,GAAgB;AAMtB,IAAM,kBAAA,GAAqB,uBAAA;ACuB3B,IAAM,iBAAN,MAAqB;AAAA,EACP,MAAA;AAAA,EACA,SAAA;AAAA,EACA,aAAA;AAAA,EACA,MAAA;AAAA,EAET,WAAA,GAAc,KAAA;AAAA,EAEtB,YAAY,MAAA,EAA8B;AACtC,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA;AACd,IAAA,IAAA,CAAK,MAAA,GAAS,MAAA,CAAO,MAAA,IAAU,OAAA,CAAQ,IAAI,cAAA,IAAkB,EAAA;AAC7D,IAAA,IAAA,CAAK,aAAa,MAAA,CAAO,SAAA,IAAa,kBAAA,EAAoB,OAAA,CAAQ,OAAO,EAAE,CAAA;AAC3E,IAAA,IAAA,CAAK,aAAA,GAAgB,OAAO,KAAA,IAAS,aAAA;AAGrC,IAAA,oBAAA,CAAqB,KAAK,MAAM,CAAA;AAGhC,IAAA,IAAI,MAAA,CAAO,mBAAmB,KAAA,EAAO;AACjC,MAAA,IAAA,CAAK,WAAA,GAAc,IAAA;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,UAAA,GAA4B;AAC9B,IAAA,IAAI,KAAK,WAAA,EAAa;AAEtB,IAAA,IAAI,IAAA,CAAK,MAAA,CAAO,cAAA,KAAmB,KAAA,EAAO;AACtC,MAAA,MAAMA,yBAAA,CAAe,IAAA,CAAK,MAAA,EAAQ,IAAA,CAAK,SAAS,CAAA;AAAA,IACpD;AAEA,IAAA,IAAA,CAAK,WAAA,GAAc,IAAA;AAAA,EACvB;AAAA;AAAA;AAAA;AAAA,EAKA,QAAA,GAAmB;AACf,IAAA,OAAO,IAAA,CAAK,aAAA;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKA,aAAA,GAAyB;AACrB,IAAA,OAAO,IAAA,CAAK,WAAA;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAMQ,iBAAA,GAA0B;AAC9B,IAAA,IAAI,CAAC,KAAK,WAAA,EAAa;AACnB,MAAA,MAAM,IAAI,KAAA;AAAA,QACN;AAAA,OACJ;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA,EAGQ,WAAA,GAAsC;AAC1C,IAAA,OAAO;AAAA,MACH,cAAA,EAAgB,kBAAA;AAAA,MAChB,aAAA,EAAe,CAAA,OAAA,EAAU,IAAA,CAAK,MAAM,CAAA,CAAA;AAAA,MACpC,aAAa,IAAA,CAAK;AAAA,KACtB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,MAAM,QAAA,CAAS,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAAoB;AAC3E,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,SAAA,CAAA,EAAa;AAAA,MACvD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS,KAAK,WAAA,EAAY;AAAA,MAC1B,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,cAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,MAAM,IAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,EAAK;AAMlC,IAAA,OAAO,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,OAAA,IAAW,KAAK,OAAA,IAAW,EAAA;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBA,OAAO,MAAA,CAAO,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAA2B;AACjF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,YAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,IAAI,CAAC,SAAS,IAAA,EAAM;AAChB,MAAA,MAAM,IAAI,MAAM,iDAAiD,CAAA;AAAA,IACrE;AAEA,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,IAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAChC,IAAA,IAAI,MAAA,GAAS,EAAA;AAEb,IAAA,IAAI;AACA,MAAA,OAAO,IAAA,EAAM;AACT,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAA,IAAU,QAAQ,MAAA,CAAO,KAAA,EAAO,EAAE,MAAA,EAAQ,MAAM,CAAA;AAChD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,CAAM,IAAI,CAAA;AAE/B,QAAA,MAAA,GAAS,KAAA,CAAM,KAAI,IAAK,EAAA;AAExB,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACtB,UAAA,IAAI,IAAA,CAAK,UAAA,CAAW,QAAQ,CAAA,EAAG;AAC3B,YAAA,MAAM,GAAA,GAAM,IAAA,CAAK,KAAA,CAAM,CAAC,EAAE,IAAA,EAAK;AAC/B,YAAA,IAAI,QAAQ,QAAA,EAAU;AACtB,YAAA,IAAI;AACA,cAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,GAAG,CAAA;AAK7B,cAAA,MAAM,QACF,MAAA,CAAO,IAAA,IACP,OAAO,OAAA,IACP,MAAA,CAAO,OAAO,OAAA,IACd,EAAA;AACJ,cAAA,IAAI,OAAO,MAAM,KAAA;AAAA,YACrB,CAAA,CAAA,MAAQ;AAEJ,cAAA,IAAI,KAAK,MAAM,GAAA;AAAA,YACnB;AAAA,UACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,CAAA,SAAE;AACE,MAAA,MAAA,CAAO,WAAA,EAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,cAAA,CAAe,MAAA,EAAgB,OAAA,GAA2B,EAAC,EAAsB;AACnF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,MAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,YAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AACA,IAAA,OAAA,CAAQ,IAAI,QAAQ,CAAA;AACpB,IAAA,OAAO,QAAA;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,OAAO,UAAA,CAAW,QAAA,EAAyB,OAAA,GAA2B,EAAC,EAA2B;AAC9F,IAAA,IAAA,CAAK,iBAAA,EAAkB;AAEvB,IAAA,IAAI,CAAC,SAAS,MAAA,EAAQ;AAClB,MAAA,MAAM,IAAI,MAAM,+CAA+C,CAAA;AAAA,IACnE;AAEA,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,YAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,IAAI,CAAC,SAAS,IAAA,EAAM;AAChB,MAAA,MAAM,IAAI,MAAM,iDAAiD,CAAA;AAAA,IACrE;AAEA,IAAA,MAAM,MAAA,GAAS,QAAA,CAAS,IAAA,CAAK,SAAA,EAAU;AACvC,IAAA,MAAM,OAAA,GAAU,IAAI,WAAA,EAAY;AAChC,IAAA,IAAI,MAAA,GAAS,EAAA;AAEb,IAAA,IAAI;AACA,MAAA,OAAO,IAAA,EAAM;AACT,QAAA,MAAM,EAAE,IAAA,EAAM,KAAA,EAAM,GAAI,MAAM,OAAO,IAAA,EAAK;AAC1C,QAAA,IAAI,IAAA,EAAM;AAEV,QAAA,MAAA,IAAU,QAAQ,MAAA,CAAO,KAAA,EAAO,EAAE,MAAA,EAAQ,MAAM,CAAA;AAChD,QAAA,MAAM,KAAA,GAAQ,MAAA,CAAO,KAAA,CAAM,IAAI,CAAA;AAC/B,QAAA,MAAA,GAAS,KAAA,CAAM,KAAI,IAAK,EAAA;AAExB,QAAA,KAAA,MAAW,QAAQ,KAAA,EAAO;AACtB,UAAA,IAAI,IAAA,CAAK,UAAA,CAAW,QAAQ,CAAA,EAAG;AAC3B,YAAA,MAAM,GAAA,GAAM,IAAA,CAAK,KAAA,CAAM,CAAC,EAAE,IAAA,EAAK;AAC/B,YAAA,IAAI,QAAQ,QAAA,EAAU;AACtB,YAAA,IAAI;AACA,cAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,GAAG,CAAA;AAK7B,cAAA,MAAM,QACF,MAAA,CAAO,IAAA,IACP,OAAO,OAAA,IACP,MAAA,CAAO,OAAO,OAAA,IACd,EAAA;AACJ,cAAA,IAAI,OAAO,MAAM,KAAA;AAAA,YACrB,CAAA,CAAA,MAAQ;AACJ,cAAA,IAAI,KAAK,MAAM,GAAA;AAAA,YACnB;AAAA,UACJ;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,CAAA,SAAE;AACE,MAAA,MAAA,CAAO,WAAA,EAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,kBAAA,CAAmB,QAAA,EAAyB,OAAA,GAA2B,EAAC,EAAsB;AAChG,IAAA,IAAA,CAAK,iBAAA,EAAkB;AAEvB,IAAA,IAAI,CAAC,SAAS,MAAA,EAAQ;AAClB,MAAA,MAAM,IAAI,MAAM,uDAAuD,CAAA;AAAA,IAC3E;AAEA,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,OAAA,CAAA,EAAW;AAAA,MACrD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS;AAAA,QACL,GAAG,KAAK,WAAA,EAAY;AAAA,QACpB,MAAA,EAAQ;AAAA,OACZ;AAAA,MACA,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,YAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,OAAO,QAAA;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,IAAA,CAAK,QAAA,EAAyB,OAAA,GAA2B,EAAC,EAAoB;AAChF,IAAA,IAAA,CAAK,iBAAA,EAAkB;AACvB,IAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,IAAS,IAAA,CAAK,aAAA;AAEpC,IAAA,MAAM,WAAW,MAAM,KAAA,CAAM,CAAA,EAAG,IAAA,CAAK,SAAS,CAAA,KAAA,CAAA,EAAS;AAAA,MACnD,MAAA,EAAQ,MAAA;AAAA,MACR,OAAA,EAAS,KAAK,WAAA,EAAY;AAAA,MAC1B,IAAA,EAAM,KAAK,SAAA,CAAU;AAAA,QACjB,QAAA;AAAA,QACA,KAAA;AAAA,QACA,GAAI,QAAQ,MAAA,KAAW,MAAA,GAAY,EAAE,MAAA,EAAQ,OAAA,CAAQ,MAAA,EAAO,GAAI,EAAC;AAAA,QACjE,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,GAAA,KAAQ,MAAA,GAAY,EAAE,GAAA,EAAK,OAAA,CAAQ,GAAA,EAAI,GAAI,EAAC;AAAA,QACxD,GAAI,QAAQ,SAAA,KAAc,MAAA,GAAY,EAAE,SAAA,EAAW,OAAA,CAAQ,SAAA,EAAU,GAAI,EAAC;AAAA,QAC1E,GAAI,QAAQ,WAAA,KAAgB,MAAA,GAAY,EAAE,WAAA,EAAa,OAAA,CAAQ,WAAA,EAAY,GAAI;AAAC,OACnF;AAAA,KACJ,CAAA;AAED,IAAA,IAAI,CAAC,SAAS,EAAA,EAAI;AACd,MAAA,IAAI,YAAA,GAAe,CAAA,wBAAA,EAA2B,QAAA,CAAS,MAAM,CAAA,UAAA,CAAA;AAC7D,MAAA,IAAI;AACA,QAAA,MAAM,SAAA,GAAa,MAAM,QAAA,CAAS,IAAA,EAAK;AACvC,QAAA,IAAI,SAAA,EAAW,KAAA,EAAO,YAAA,GAAe,SAAA,CAAU,KAAA;AAAA,aAAA,IACtC,SAAA,EAAW,OAAA,EAAS,YAAA,GAAe,SAAA,CAAU,OAAA;AAAA,MAC1D,CAAA,CAAA,MAAQ;AAAA,MAAE;AACV,MAAA,MAAM,IAAI,MAAM,YAAY,CAAA;AAAA,IAChC;AAEA,IAAA,MAAM,IAAA,GAAQ,MAAM,QAAA,CAAS,IAAA,EAAK;AAMlC,IAAA,OAAO,IAAA,CAAK,IAAA,IAAQ,IAAA,CAAK,OAAA,IAAW,KAAK,OAAA,IAAW,EAAA;AAAA,EACxD;AACJ;ACtZO,SAAS,OACZ,MAAA,EACc;AACd,EAAA,OAAO,IAAI,cAAA,CAAe;AAAA,IACtB,KAAA,EAAO,aAAA;AAAA,IACP,GAAG;AAAA,GACN,CAAA;AACL;AAGO,IAAM,OAAA,GAAU","file":"index.mjs","sourcesContent":["/**\n * Types for @krutai/ai-provider\n */\n\n/**\n * Default model identifier sent to the LangChain server when no model is specified.\n * Your server can use this value to route to its own default model.\n */\nexport const DEFAULT_MODEL = 'default' as const;\n\n/**\n * Default base URL for the LangChain backend server.\n * Used when no serverUrl is provided in the config.\n */\nexport const DEFAULT_SERVER_URL = 'http://localhost:8000' as const;\n\n/**\n * Configuration options for KrutAIProvider\n */\nexport interface KrutAIProviderConfig {\n /**\n * KrutAI API key.\n * Validated against the LangChain server before use.\n * Optional: defaults to process.env.KRUTAI_API_KEY\n */\n apiKey?: string;\n\n /**\n * Base URL of your deployed LangChain backend server.\n * @default \"http://localhost:8000\"\n * @example \"https://ai.krut.ai\"\n */\n serverUrl?: string;\n\n /**\n * The AI model to use (passed to the server).\n * The server decides what to do with this value.\n * @default \"default\"\n */\n model?: string;\n\n /**\n * Whether to validate the API key against the server on initialization.\n * Set to false to skip the validation round-trip (e.g. in tests).\n * @default true\n */\n validateOnInit?: boolean;\n}\n\n/**\n * A part of a multimodal message\n */\nexport interface TextContentPart {\n type: 'text';\n text: string;\n}\n\nexport interface ImageContentPart {\n type: 'image_url';\n image_url: {\n url: string; // Base64 data URI or HTTP(S) URL\n detail?: 'low' | 'high' | 'auto';\n };\n}\n\nexport type ContentPart = TextContentPart | ImageContentPart;\nexport type MessageContent = string | ContentPart[];\n\n/**\n * A single chat message\n */\nexport interface ChatMessage {\n role: 'user' | 'assistant' | 'system';\n content: MessageContent;\n}\n\n/**\n * Options for a single generate / stream / chat call\n */\nexport interface GenerateOptions {\n /**\n * Override the model for this specific call.\n */\n model?: string;\n\n /**\n * System prompt (prepended as a system message).\n */\n system?: string;\n\n /**\n * Maximum tokens to generate.\n */\n maxTokens?: number;\n\n /**\n * Temperature (0–2).\n */\n temperature?: number;\n\n /**\n * Array of image URLs or base64 data URIs to include with the request.\n */\n images?: string[];\n\n /**\n * Array of document URLs or base64 data URIs (e.g. PDFs) to include with the request.\n */\n documents?: string[];\n\n /**\n * Array of PDF URLs or base64 data URIs to include with the request.\n */\n pdf?: string[];\n}\n","import type { KrutAIProviderConfig, GenerateOptions, ChatMessage } from './types';\nimport { DEFAULT_MODEL, DEFAULT_SERVER_URL } from './types';\nimport {\n validateApiKeyWithService as validateApiKey,\n validateApiKeyFormat,\n ApiKeyValidationError as KrutAIKeyValidationError,\n} from 'krutai';\n\nexport { KrutAIKeyValidationError };\n\n/**\n * KrutAIProvider — fetch-based AI provider for KrutAI\n *\n * Calls your deployed LangChain backend server for all AI operations.\n * The API key is validated against the server before use.\n *\n * @example\n * ```typescript\n * import { KrutAIProvider } from '@krutai/ai-provider';\n *\n * // Using local dev server (http://localhost:8000 by default)\n * const ai = new KrutAIProvider({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * });\n *\n * // Or point to a production server\n * const aiProd = new KrutAIProvider({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://ai.krut.ai',\n * });\n *\n * await ai.initialize(); // validates key against server\n *\n * const text = await ai.generate('Tell me a joke');\n * console.log(text);\n * ```\n */\nexport class KrutAIProvider {\n private readonly apiKey: string;\n private readonly serverUrl: string;\n private readonly resolvedModel: string;\n private readonly config: KrutAIProviderConfig;\n\n private initialized = false;\n\n constructor(config: KrutAIProviderConfig) {\n this.config = config;\n this.apiKey = config.apiKey || process.env.KRUTAI_API_KEY || '';\n this.serverUrl = (config.serverUrl ?? DEFAULT_SERVER_URL).replace(/\\/$/, ''); // strip trailing slash\n this.resolvedModel = config.model ?? DEFAULT_MODEL;\n\n // Basic format check immediately on construction\n validateApiKeyFormat(this.apiKey);\n\n // If validation is disabled, mark as ready immediately\n if (config.validateOnInit === false) {\n this.initialized = true;\n }\n }\n\n /**\n * Initialize the provider.\n * Validates the API key against the LangChain server, then marks provider as ready.\n *\n * @throws {KrutAIKeyValidationError} if the key is rejected or the server is unreachable\n */\n async initialize(): Promise<void> {\n if (this.initialized) return;\n\n if (this.config.validateOnInit !== false) {\n await validateApiKey(this.apiKey, this.serverUrl);\n }\n\n this.initialized = true;\n }\n\n /**\n * Returns the currently configured default model.\n */\n getModel(): string {\n return this.resolvedModel;\n }\n\n /**\n * Returns whether the provider has been initialized.\n */\n isInitialized(): boolean {\n return this.initialized;\n }\n\n // ---------------------------------------------------------------------------\n // Private helpers\n // ---------------------------------------------------------------------------\n\n private assertInitialized(): void {\n if (!this.initialized) {\n throw new Error(\n 'KrutAIProvider not initialized. Call initialize() first or set validateOnInit to false.'\n );\n }\n }\n\n /** Common request headers sent to the server on every AI call. */\n private authHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n Authorization: `Bearer ${this.apiKey}`,\n 'x-api-key': this.apiKey,\n };\n }\n\n // ---------------------------------------------------------------------------\n // Public AI Methods\n // ---------------------------------------------------------------------------\n\n /**\n * Generate a response for a prompt (non-streaming).\n *\n * Calls: POST {serverUrl}/generate\n * Body: { prompt, model, system?, maxTokens?, temperature? }\n * Expected response: { text: string } or { content: string } or { message: string }\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns The assistant's response text\n */\n async generate(prompt: string, options: GenerateOptions = {}): Promise<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/generate`, {\n method: 'POST',\n headers: this.authHeaders(),\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /generate`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n const data = (await response.json()) as {\n text?: string;\n content?: string;\n message?: string;\n };\n\n return data.text ?? data.content ?? data.message ?? '';\n }\n\n /**\n * Generate a streaming response for a prompt via Server-Sent Events (SSE).\n *\n * Calls: POST {serverUrl}/stream\n * Body: { prompt, model, system?, maxTokens?, temperature? }\n * Expected response: `text/event-stream` with `data: <chunk>` lines.\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns An async generator yielding string chunks from the server\n *\n * @example\n * ```typescript\n * const stream = ai.stream('Tell me a story');\n * for await (const chunk of stream) {\n * process.stdout.write(chunk);\n * }\n * ```\n */\n async *stream(prompt: string, options: GenerateOptions = {}): AsyncGenerator<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /stream`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n if (!response.body) {\n throw new Error('AI server returned no response body for /stream');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n // Keep the last (potentially incomplete) line in the buffer\n buffer = lines.pop() ?? '';\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const raw = line.slice(6).trim();\n if (raw === '[DONE]') return;\n try {\n const parsed = JSON.parse(raw) as {\n text?: string;\n content?: string;\n delta?: { content?: string };\n };\n const chunk =\n parsed.text ??\n parsed.content ??\n parsed.delta?.content ??\n '';\n if (chunk) yield chunk;\n } catch {\n // raw string chunk (non-JSON SSE)\n if (raw) yield raw;\n }\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Similar to stream() but returns the raw fetch Response object.\n * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client\n * (e.g., returning this directly from a Next.js API route).\n *\n * @param prompt - The user prompt string\n * @param options - Optional overrides (model, system, maxTokens, temperature)\n * @returns A Promise resolving to the native fetch Response\n */\n async streamResponse(prompt: string, options: GenerateOptions = {}): Promise<Response> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n prompt,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /stream`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n console.log(response)\n return response;\n }\n\n /**\n * Multi-turn conversation streaming: pass a full message history.\n * Calls POST /stream with the full { messages } payload.\n *\n * @param messages - Full conversation history\n * @param options - Optional overrides (model, maxTokens, temperature)\n * @returns An async generator yielding string chunks from the server\n */\n async *streamChat(messages: ChatMessage[], options: GenerateOptions = {}): AsyncGenerator<string> {\n this.assertInitialized();\n\n if (!messages.length) {\n throw new Error('Messages array cannot be empty for streamChat');\n }\n\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n messages,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /stream`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n if (!response.body) {\n throw new Error('AI server returned no response body for /stream');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() ?? '';\n\n for (const line of lines) {\n if (line.startsWith('data: ')) {\n const raw = line.slice(6).trim();\n if (raw === '[DONE]') return;\n try {\n const parsed = JSON.parse(raw) as {\n text?: string;\n content?: string;\n delta?: { content?: string };\n };\n const chunk =\n parsed.text ??\n parsed.content ??\n parsed.delta?.content ??\n '';\n if (chunk) yield chunk;\n } catch {\n if (raw) yield raw;\n }\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Similar to streamChat() but returns the raw fetch Response object.\n * Useful for proxying the Server-Sent Events stream directly to a frontend client.\n *\n * @param messages - Full conversation history\n * @param options - Optional overrides (model, maxTokens, temperature)\n * @returns A Promise resolving to the native fetch Response\n */\n async streamChatResponse(messages: ChatMessage[], options: GenerateOptions = {}): Promise<Response> {\n this.assertInitialized();\n\n if (!messages.length) {\n throw new Error('Messages array cannot be empty for streamChatResponse');\n }\n\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/stream`, {\n method: 'POST',\n headers: {\n ...this.authHeaders(),\n Accept: 'text/event-stream',\n },\n body: JSON.stringify({\n messages,\n model,\n ...(options.system !== undefined ? { system: options.system } : {}),\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /stream`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n return response;\n }\n\n /**\n * Multi-turn conversation: pass a full message history.\n *\n * Calls: POST {serverUrl}/chat\n * Body: { messages, model, maxTokens?, temperature? }\n * Expected response: { text: string } or { content: string } or { message: string }\n *\n * @param messages - Full conversation history\n * @param options - Optional overrides (model, maxTokens, temperature)\n * @returns The assistant's response text\n */\n async chat(messages: ChatMessage[], options: GenerateOptions = {}): Promise<string> {\n this.assertInitialized();\n const model = options.model ?? this.resolvedModel;\n\n const response = await fetch(`${this.serverUrl}/chat`, {\n method: 'POST',\n headers: this.authHeaders(),\n body: JSON.stringify({\n messages,\n model,\n ...(options.images !== undefined ? { images: options.images } : {}),\n ...(options.documents !== undefined ? { documents: options.documents } : {}),\n ...(options.pdf !== undefined ? { pdf: options.pdf } : {}),\n ...(options.maxTokens !== undefined ? { maxTokens: options.maxTokens } : {}),\n ...(options.temperature !== undefined ? { temperature: options.temperature } : {}),\n }),\n });\n\n if (!response.ok) {\n let errorMessage = `AI server returned HTTP ${response.status} for /chat`;\n try {\n const errorData = (await response.json()) as { message?: string; error?: string };\n if (errorData?.error) errorMessage = errorData.error;\n else if (errorData?.message) errorMessage = errorData.message;\n } catch { }\n throw new Error(errorMessage);\n }\n\n const data = (await response.json()) as {\n text?: string;\n content?: string;\n message?: string;\n };\n\n return data.text ?? data.content ?? data.message ?? '';\n }\n}\n","/**\n * @krutai/ai-provider — AI Provider package for KrutAI\n *\n * A fetch-based wrapper that calls your deployed LangChain backend server.\n * The user's API key is validated against the server before any AI call is made.\n *\n * @example Basic usage\n * ```typescript\n * import { krutAI } from '@krutai/ai-provider';\n *\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n *\n * await ai.initialize(); // validates key with server\n *\n * const text = await ai.generate('Write a poem about TypeScript');\n * console.log(text);\n * ```\n *\n * @example With custom model\n * ```typescript\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * model: 'gpt-4o',\n * });\n * await ai.initialize();\n * const text = await ai.generate('Hello!');\n * ```\n *\n * @example Streaming\n * ```typescript\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n * await ai.initialize();\n *\n * const stream = ai.stream('Tell me a story');\n * for await (const chunk of stream) {\n * process.stdout.write(chunk);\n * }\n * ```\n *\n * @packageDocumentation\n */\n\nimport type { KrutAIProviderConfig } from './types';\nimport { DEFAULT_MODEL } from './types';\nimport { KrutAIProvider } from './client';\n\nexport { KrutAIProvider } from './client';\nexport { KrutAIKeyValidationError } from './client';\nexport {\n validateApiKeyWithService as validateApiKey,\n validateApiKeyFormat,\n} from 'krutai';\nexport type { KrutAIProviderConfig, GenerateOptions, ChatMessage } from './types';\nexport { DEFAULT_MODEL } from './types';\n\n/**\n * krutAI — convenience factory (mirrors `krutAuth` in @krutai/auth).\n *\n * Creates a `KrutAIProvider` instance configured to call your LangChain server.\n *\n * @param config - Provider configuration (`apiKey` and `serverUrl` are required)\n * @returns A `KrutAIProvider` instance — call `.initialize()` before use\n *\n * @example\n * ```typescript\n * import { krutAI } from '@krutai/ai-provider';\n *\n * const ai = krutAI({\n * apiKey: process.env.KRUTAI_API_KEY!,\n * serverUrl: 'https://krut.ai',\n * });\n *\n * await ai.initialize();\n * const text = await ai.generate('Hello!');\n * ```\n */\nexport function krutAI(\n config: KrutAIProviderConfig & { model?: string }\n): KrutAIProvider {\n return new KrutAIProvider({\n model: DEFAULT_MODEL,\n ...config,\n });\n}\n\n// Package metadata\nexport const VERSION = '0.2.0';\n"]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@krutai/ai-provider",
3
- "version": "0.2.4",
3
+ "version": "0.2.12",
4
4
  "description": "AI provider package for KrutAI — fetch-based client for your deployed LangChain server with API key validation",
5
5
  "main": "./dist/index.js",
6
6
  "module": "./dist/index.mjs",
@@ -18,6 +18,7 @@
18
18
  "AI_REFERENCE.md"
19
19
  ],
20
20
  "scripts": {
21
+ "prepack": "npm run build",
21
22
  "build": "tsup",
22
23
  "dev": "tsup --watch",
23
24
  "clean": "rm -rf dist *.tsbuildinfo",
@@ -34,10 +35,10 @@
34
35
  "author": "",
35
36
  "license": "MIT",
36
37
  "dependencies": {
37
- "krutai": ">=0.1.4"
38
+ "krutai": ">=0.1.5"
38
39
  },
39
40
  "peerDependencies": {
40
- "krutai": ">=0.1.4"
41
+ "krutai": ">=0.1.5"
41
42
  },
42
43
  "devDependencies": {
43
44
  "@types/node": "^20.11.0",