@krutai/ai-provider 0.2.11 → 0.2.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/AI_REFERENCE.md CHANGED
@@ -19,7 +19,6 @@ AI Flow:
19
19
  → POST {serverUrl}/validate (key validation)
20
20
  → POST {serverUrl}/generate (single response)
21
21
  → POST {serverUrl}/stream (SSE streaming)
22
- → POST {serverUrl}/chat (multi-turn)
23
22
  → Your deployed LangChain server
24
23
  ```
25
24
 
@@ -43,8 +42,7 @@ packages/ai-provider/
43
42
  |---|---|---|---|
44
43
  | `/validate` | POST | `{ apiKey }` | `{ valid: true/false, message? }` |
45
44
  | `/generate` | POST | `{ prompt, model, system?, maxTokens?, temperature? }` | `{ text/content/message: string }` |
46
- | `/stream` | POST | `{ prompt, model, system?, maxTokens?, temperature? }` | SSE stream `data: <chunk>` |
47
- | `/chat` | POST | `{ messages, model, maxTokens?, temperature? }` | `{ text/content/message: string }` |
45
+ | `/stream` | POST | `{ messages, model, system?, maxTokens?, temperature? }` | SSE stream `data: <chunk>` |
48
46
 
49
47
  All AI endpoints receive `Authorization: Bearer <apiKey>` and `x-api-key: <apiKey>` headers.
50
48
 
@@ -62,8 +60,38 @@ const ai = krutAI({
62
60
  });
63
61
 
64
62
  await ai.initialize(); // validates key with server
63
+ ```
64
+
65
+ ### 1. `chat(prompt: string)` — Simple String Prompts
66
+ Used to get a single, non-streaming text response from a string prompt.
67
+
68
+ ```typescript
69
+ const text = await ai.chat('Write a poem about TypeScript');
70
+ console.log(text);
71
+ ```
72
+
73
+ ### 2. `streamChatResponse(messages: ChatMessage[])` — Multi-Turn & Streaming
74
+ Used for multi-turn conversations and streaming responses. It takes an array of `ChatMessage` objects instead of a single string. It returns a raw fetch `Response` containing the `text/event-stream` body.
75
+
76
+ Ideal for proxying streams (e.g., Next.js API routes) down to your backend component or manually reading the `ReadableStream`.
77
+
78
+ ```typescript
79
+ // Example: Proxying in a Next.js route
80
+ export async function POST(req: Request) {
81
+ const { messages } = await req.json();
82
+
83
+ // ai.streamChatResponse accepts an array of messages:
84
+ // [{ role: 'user', content: '...' }, ...]
85
+ return await ai.streamChatResponse(messages);
86
+ }
65
87
 
66
- const text = await ai.generate('Hello!');
88
+ // Example: Manual Node environment stream reading
89
+ const response = await ai.streamChatResponse([
90
+ { role: 'system', content: 'You are a helpful assistant.' },
91
+ { role: 'user', content: 'Tell me a story' }
92
+ ]);
93
+ const reader = response.body?.getReader();
94
+ // Use a TextDecoder to parse value chunks...
67
95
  ```
68
96
 
69
97
  ### `KrutAIProvider` class ← FULL CLASS API
@@ -83,12 +111,8 @@ await ai.initialize();
83
111
 
84
112
  **Methods:**
85
113
  - `initialize(): Promise<void>` — validates key against server, marks provider ready
86
- - `generate(prompt, opts?): Promise<string>` — single response (non-streaming)
87
- - `stream(prompt, opts?)` — `AsyncGenerator<string>` — SSE-based streaming
88
- - `streamResponse(prompt, opts?)` — `Promise<Response>` — returns the raw fetch Response for proxying
89
- - `streamChat(messages, opts?)` — `AsyncGenerator<string>` — SSE multi-turn streaming
90
- - `streamChatResponse(messages, opts?)` — `Promise<Response>` — returns the raw fetch Response for proxying
91
- - `chat(messages, opts?): Promise<string>` — multi-turn conversation
114
+ - `chat(prompt, opts?): Promise<string>` — single response (non-streaming)
115
+ - `streamChatResponse(messages, opts?)` — `Promise<Response>` — returns the raw fetch Response for proxying (SSE multi-turn streaming)
92
116
  - `getModel(): string` — active model name
93
117
  - `isInitialized(): boolean`
94
118
 
@@ -124,6 +148,9 @@ interface GenerateOptions {
124
148
  system?: string; // system prompt
125
149
  maxTokens?: number;
126
150
  temperature?: number;
151
+ images?: string[]; // Array of image URLs or base64 data URIs
152
+ documents?: string[]; // Array of document URLs or base64 data URIs
153
+ pdf?: string[]; // Array of PDF URLs or base64 data URIs
127
154
  }
128
155
  ```
129
156
 
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # @krutai/ai-provider
2
2
 
3
- AI provider package for KrutAI — fetch-based client for your deployed LangChain server.
3
+ AI provider package for KrutAI — fetch-based client form our deployed server.
4
4
 
5
5
  ## Features
6
6
 
@@ -30,13 +30,13 @@ const ai = krutAI({
30
30
  await ai.initialize(); // validates key with your server
31
31
 
32
32
  // Single response
33
- const text = await ai.generate('Write a poem about TypeScript');
33
+ const text = await ai.chat('Write a poem about TypeScript');
34
34
  console.log(text);
35
35
  ```
36
36
 
37
37
  ## Usage
38
38
 
39
- ### Generate (single response)
39
+ ### Chat (single response)
40
40
 
41
41
  ```typescript
42
42
  const ai = krutAI({
@@ -47,7 +47,7 @@ const ai = krutAI({
47
47
 
48
48
  await ai.initialize();
49
49
 
50
- const text = await ai.generate('Explain async/await in JavaScript', {
50
+ const text = await ai.chat('Explain async/await in JavaScript', {
51
51
  system: 'You are a helpful coding tutor.',
52
52
  maxTokens: 500,
53
53
  temperature: 0.7,
@@ -56,21 +56,6 @@ const text = await ai.generate('Explain async/await in JavaScript', {
56
56
  console.log(text);
57
57
  ```
58
58
 
59
- ### Streaming
60
-
61
- ```typescript
62
- const ai = krutAI({
63
- apiKey: process.env.KRUTAI_API_KEY!,
64
- // uses http://localhost:8000 by default
65
- });
66
-
67
- await ai.initialize();
68
-
69
- // stream() is an async generator
70
- for await (const chunk of ai.stream('Tell me a short story')) {
71
- process.stdout.write(chunk);
72
- }
73
- ```
74
59
 
75
60
  ### Multi-turn Chat
76
61
 
@@ -107,32 +92,20 @@ const response = await ai.chat([
107
92
  }
108
93
  ]
109
94
  }
110
- ], { model: 'gpt-4o' });
111
- ```
112
-
113
- ### Streaming Multi-turn Chat
114
-
115
- ```typescript
116
- const ai = krutAI({
117
- apiKey: process.env.KRUTAI_API_KEY!,
95
+ ], {
96
+ model: 'gpt-4o',
97
+ // You can also pass images, documents, or pdfs via GenerateOptions
98
+ images: ['https://example.com/photo.jpg'],
99
+ documents: ['https://example.com/doc.docx'],
100
+ pdf: ['https://example.com/report.pdf']
118
101
  });
119
-
120
- await ai.initialize();
121
-
122
- const stream = ai.streamChat([
123
- { role: 'user', content: 'What is the capital of France?' },
124
- { role: 'assistant', content: 'Paris.' },
125
- { role: 'user', content: 'What is it famous for?' },
126
- ]);
127
-
128
- for await (const chunk of stream) {
129
- process.stdout.write(chunk);
130
- }
131
102
  ```
132
103
 
133
- ### Proxying Streams to the Frontend (Next.js / API Routes)
104
+ ### Streaming (Proxying SSE Streams)
134
105
 
135
- If you are building an API route (e.g., in Next.js) and want to pipe the true Server-Sent Events (SSE) stream down to your frontend component, use the `Response` variants:
106
+ If you are building an API route (e.g., in Next.js) and want to pipe the true Server-Sent Events (SSE) stream down to your backend component, use `streamChatResponse`.
107
+
108
+ `streamChatResponse` returns the raw fetch `Response` object containing the `text/event-stream` body from deployed LangChain server.
136
109
 
137
110
  ```typescript
138
111
  // app/api/chat/route.ts
@@ -142,11 +115,30 @@ export async function POST(req: Request) {
142
115
  // Returns the native fetch Response (with text/event-stream headers and body)
143
116
  const response = await ai.streamChatResponse(messages);
144
117
 
145
- // Proxy it directly to the frontend!
118
+ // Proxy it directly to the backend!
146
119
  return response;
147
120
  }
148
121
  ```
149
122
 
123
+ If you need to consume the stream in a Node environment rather than proxying it, you can read from the response body directly:
124
+
125
+ ```typescript
126
+ const response = await ai.streamChatResponse([
127
+ { role: 'user', content: 'Tell me a short story' }
128
+ ]);
129
+
130
+ const reader = response.body?.getReader();
131
+ const decoder = new TextDecoder();
132
+
133
+ if (reader) {
134
+ while (true) {
135
+ const { done, value } = await reader.read();
136
+ if (done) break;
137
+ process.stdout.write(decoder.decode(value, { stream: true }));
138
+ }
139
+ }
140
+ ```
141
+
150
142
  ### Skip validation (useful for tests)
151
143
 
152
144
  ```typescript
@@ -157,7 +149,7 @@ const ai = krutAI({
157
149
  });
158
150
 
159
151
  // No need to call initialize() when validateOnInit is false
160
- const text = await ai.generate('Hello!');
152
+ const text = await ai.chat('Hello!');
161
153
  ```
162
154
 
163
155
  ## Server API Contract
@@ -168,8 +160,7 @@ Your LangChain server must expose these endpoints:
168
160
  |---|---|---|---|
169
161
  | `/validate` | POST | `x-api-key` header | `{ "apiKey": "..." }` |
170
162
  | `/generate` | POST | `Authorization: Bearer <key>` | `{ "prompt": "...", "model": "...", ... }` |
171
- | `/stream` | POST | `Authorization: Bearer <key>` | `{ "prompt": "...", "model": "...", ... }` |
172
- | `/chat` | POST | `Authorization: Bearer <key>` | `{ "messages": [...], "model": "...", ... }` |
163
+ | `/stream` | POST | `Authorization: Bearer <key>` | `{ "messages": [...], "model": "...", ... }` |
173
164
 
174
165
  **Validation response:** `{ "valid": true }` or `{ "valid": false, "message": "reason" }`
175
166
 
package/dist/index.d.mts CHANGED
@@ -84,6 +84,14 @@ interface GenerateOptions {
84
84
  * Array of image URLs or base64 data URIs to include with the request.
85
85
  */
86
86
  images?: string[];
87
+ /**
88
+ * Array of document URLs or base64 data URIs (e.g. PDFs) to include with the request.
89
+ */
90
+ documents?: string[];
91
+ /**
92
+ * Array of PDF URLs or base64 data URIs to include with the request.
93
+ */
94
+ pdf?: string[];
87
95
  }
88
96
 
89
97
  /**
@@ -138,57 +146,6 @@ declare class KrutAIProvider {
138
146
  private assertInitialized;
139
147
  /** Common request headers sent to the server on every AI call. */
140
148
  private authHeaders;
141
- /**
142
- * Generate a response for a prompt (non-streaming).
143
- *
144
- * Calls: POST {serverUrl}/generate
145
- * Body: { prompt, model, system?, maxTokens?, temperature? }
146
- * Expected response: { text: string } or { content: string } or { message: string }
147
- *
148
- * @param prompt - The user prompt string
149
- * @param options - Optional overrides (model, system, maxTokens, temperature)
150
- * @returns The assistant's response text
151
- */
152
- generate(prompt: string, options?: GenerateOptions): Promise<string>;
153
- /**
154
- * Generate a streaming response for a prompt via Server-Sent Events (SSE).
155
- *
156
- * Calls: POST {serverUrl}/stream
157
- * Body: { prompt, model, system?, maxTokens?, temperature? }
158
- * Expected response: `text/event-stream` with `data: <chunk>` lines.
159
- *
160
- * @param prompt - The user prompt string
161
- * @param options - Optional overrides (model, system, maxTokens, temperature)
162
- * @returns An async generator yielding string chunks from the server
163
- *
164
- * @example
165
- * ```typescript
166
- * const stream = ai.stream('Tell me a story');
167
- * for await (const chunk of stream) {
168
- * process.stdout.write(chunk);
169
- * }
170
- * ```
171
- */
172
- stream(prompt: string, options?: GenerateOptions): AsyncGenerator<string>;
173
- /**
174
- * Similar to stream() but returns the raw fetch Response object.
175
- * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client
176
- * (e.g., returning this directly from a Next.js API route).
177
- *
178
- * @param prompt - The user prompt string
179
- * @param options - Optional overrides (model, system, maxTokens, temperature)
180
- * @returns A Promise resolving to the native fetch Response
181
- */
182
- streamResponse(prompt: string, options?: GenerateOptions): Promise<Response>;
183
- /**
184
- * Multi-turn conversation streaming: pass a full message history.
185
- * Calls POST /stream with the full { messages } payload.
186
- *
187
- * @param messages - Full conversation history
188
- * @param options - Optional overrides (model, maxTokens, temperature)
189
- * @returns An async generator yielding string chunks from the server
190
- */
191
- streamChat(messages: ChatMessage[], options?: GenerateOptions): AsyncGenerator<string>;
192
149
  /**
193
150
  * Similar to streamChat() but returns the raw fetch Response object.
194
151
  * Useful for proxying the Server-Sent Events stream directly to a frontend client.
@@ -199,17 +156,17 @@ declare class KrutAIProvider {
199
156
  */
200
157
  streamChatResponse(messages: ChatMessage[], options?: GenerateOptions): Promise<Response>;
201
158
  /**
202
- * Multi-turn conversation: pass a full message history.
159
+ * Generate a response for a prompt (non-streaming).
203
160
  *
204
- * Calls: POST {serverUrl}/chat
205
- * Body: { messages, model, maxTokens?, temperature? }
161
+ * Calls: POST {serverUrl}/generate
162
+ * Body: { prompt, model, system?, maxTokens?, temperature? }
206
163
  * Expected response: { text: string } or { content: string } or { message: string }
207
164
  *
208
- * @param messages - Full conversation history
209
- * @param options - Optional overrides (model, maxTokens, temperature)
165
+ * @param prompt - The user prompt string
166
+ * @param options - Optional overrides (model, system, maxTokens, temperature)
210
167
  * @returns The assistant's response text
211
168
  */
212
- chat(messages: ChatMessage[], options?: GenerateOptions): Promise<string>;
169
+ chat(prompt: string, options?: GenerateOptions): Promise<string>;
213
170
  }
214
171
 
215
172
  /**
@@ -229,7 +186,7 @@ declare class KrutAIProvider {
229
186
  *
230
187
  * await ai.initialize(); // validates key with server
231
188
  *
232
- * const text = await ai.generate('Write a poem about TypeScript');
189
+ * const text = await ai.chat('Write a poem about TypeScript');
233
190
  * console.log(text);
234
191
  * ```
235
192
  *
@@ -241,7 +198,7 @@ declare class KrutAIProvider {
241
198
  * model: 'gpt-4o',
242
199
  * });
243
200
  * await ai.initialize();
244
- * const text = await ai.generate('Hello!');
201
+ * const text = await ai.chat('Hello!');
245
202
  * ```
246
203
  *
247
204
  * @example Streaming
@@ -252,10 +209,8 @@ declare class KrutAIProvider {
252
209
  * });
253
210
  * await ai.initialize();
254
211
  *
255
- * const stream = ai.stream('Tell me a story');
256
- * for await (const chunk of stream) {
257
- * process.stdout.write(chunk);
258
- * }
212
+ * const response = await ai.streamChatResponse([{ role: 'user', content: 'Tell me a story' }]);
213
+ * // Example assumes you handle the SSE stream from the response body
259
214
  * ```
260
215
  *
261
216
  * @packageDocumentation
@@ -279,7 +234,7 @@ declare class KrutAIProvider {
279
234
  * });
280
235
  *
281
236
  * await ai.initialize();
282
- * const text = await ai.generate('Hello!');
237
+ * const text = await ai.chat('Hello!');
283
238
  * ```
284
239
  */
285
240
  declare function krutAI(config: KrutAIProviderConfig & {
package/dist/index.d.ts CHANGED
@@ -84,6 +84,14 @@ interface GenerateOptions {
84
84
  * Array of image URLs or base64 data URIs to include with the request.
85
85
  */
86
86
  images?: string[];
87
+ /**
88
+ * Array of document URLs or base64 data URIs (e.g. PDFs) to include with the request.
89
+ */
90
+ documents?: string[];
91
+ /**
92
+ * Array of PDF URLs or base64 data URIs to include with the request.
93
+ */
94
+ pdf?: string[];
87
95
  }
88
96
 
89
97
  /**
@@ -138,57 +146,6 @@ declare class KrutAIProvider {
138
146
  private assertInitialized;
139
147
  /** Common request headers sent to the server on every AI call. */
140
148
  private authHeaders;
141
- /**
142
- * Generate a response for a prompt (non-streaming).
143
- *
144
- * Calls: POST {serverUrl}/generate
145
- * Body: { prompt, model, system?, maxTokens?, temperature? }
146
- * Expected response: { text: string } or { content: string } or { message: string }
147
- *
148
- * @param prompt - The user prompt string
149
- * @param options - Optional overrides (model, system, maxTokens, temperature)
150
- * @returns The assistant's response text
151
- */
152
- generate(prompt: string, options?: GenerateOptions): Promise<string>;
153
- /**
154
- * Generate a streaming response for a prompt via Server-Sent Events (SSE).
155
- *
156
- * Calls: POST {serverUrl}/stream
157
- * Body: { prompt, model, system?, maxTokens?, temperature? }
158
- * Expected response: `text/event-stream` with `data: <chunk>` lines.
159
- *
160
- * @param prompt - The user prompt string
161
- * @param options - Optional overrides (model, system, maxTokens, temperature)
162
- * @returns An async generator yielding string chunks from the server
163
- *
164
- * @example
165
- * ```typescript
166
- * const stream = ai.stream('Tell me a story');
167
- * for await (const chunk of stream) {
168
- * process.stdout.write(chunk);
169
- * }
170
- * ```
171
- */
172
- stream(prompt: string, options?: GenerateOptions): AsyncGenerator<string>;
173
- /**
174
- * Similar to stream() but returns the raw fetch Response object.
175
- * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client
176
- * (e.g., returning this directly from a Next.js API route).
177
- *
178
- * @param prompt - The user prompt string
179
- * @param options - Optional overrides (model, system, maxTokens, temperature)
180
- * @returns A Promise resolving to the native fetch Response
181
- */
182
- streamResponse(prompt: string, options?: GenerateOptions): Promise<Response>;
183
- /**
184
- * Multi-turn conversation streaming: pass a full message history.
185
- * Calls POST /stream with the full { messages } payload.
186
- *
187
- * @param messages - Full conversation history
188
- * @param options - Optional overrides (model, maxTokens, temperature)
189
- * @returns An async generator yielding string chunks from the server
190
- */
191
- streamChat(messages: ChatMessage[], options?: GenerateOptions): AsyncGenerator<string>;
192
149
  /**
193
150
  * Similar to streamChat() but returns the raw fetch Response object.
194
151
  * Useful for proxying the Server-Sent Events stream directly to a frontend client.
@@ -199,17 +156,17 @@ declare class KrutAIProvider {
199
156
  */
200
157
  streamChatResponse(messages: ChatMessage[], options?: GenerateOptions): Promise<Response>;
201
158
  /**
202
- * Multi-turn conversation: pass a full message history.
159
+ * Generate a response for a prompt (non-streaming).
203
160
  *
204
- * Calls: POST {serverUrl}/chat
205
- * Body: { messages, model, maxTokens?, temperature? }
161
+ * Calls: POST {serverUrl}/generate
162
+ * Body: { prompt, model, system?, maxTokens?, temperature? }
206
163
  * Expected response: { text: string } or { content: string } or { message: string }
207
164
  *
208
- * @param messages - Full conversation history
209
- * @param options - Optional overrides (model, maxTokens, temperature)
165
+ * @param prompt - The user prompt string
166
+ * @param options - Optional overrides (model, system, maxTokens, temperature)
210
167
  * @returns The assistant's response text
211
168
  */
212
- chat(messages: ChatMessage[], options?: GenerateOptions): Promise<string>;
169
+ chat(prompt: string, options?: GenerateOptions): Promise<string>;
213
170
  }
214
171
 
215
172
  /**
@@ -229,7 +186,7 @@ declare class KrutAIProvider {
229
186
  *
230
187
  * await ai.initialize(); // validates key with server
231
188
  *
232
- * const text = await ai.generate('Write a poem about TypeScript');
189
+ * const text = await ai.chat('Write a poem about TypeScript');
233
190
  * console.log(text);
234
191
  * ```
235
192
  *
@@ -241,7 +198,7 @@ declare class KrutAIProvider {
241
198
  * model: 'gpt-4o',
242
199
  * });
243
200
  * await ai.initialize();
244
- * const text = await ai.generate('Hello!');
201
+ * const text = await ai.chat('Hello!');
245
202
  * ```
246
203
  *
247
204
  * @example Streaming
@@ -252,10 +209,8 @@ declare class KrutAIProvider {
252
209
  * });
253
210
  * await ai.initialize();
254
211
  *
255
- * const stream = ai.stream('Tell me a story');
256
- * for await (const chunk of stream) {
257
- * process.stdout.write(chunk);
258
- * }
212
+ * const response = await ai.streamChatResponse([{ role: 'user', content: 'Tell me a story' }]);
213
+ * // Example assumes you handle the SSE stream from the response body
259
214
  * ```
260
215
  *
261
216
  * @packageDocumentation
@@ -279,7 +234,7 @@ declare class KrutAIProvider {
279
234
  * });
280
235
  *
281
236
  * await ai.initialize();
282
- * const text = await ai.generate('Hello!');
237
+ * const text = await ai.chat('Hello!');
283
238
  * ```
284
239
  */
285
240
  declare function krutAI(config: KrutAIProviderConfig & {