@krutai/ai-provider 0.2.12 → 0.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/AI_REFERENCE.md CHANGED
@@ -3,7 +3,7 @@
3
3
  ## Package Overview
4
4
 
5
5
  - **Name**: `@krutai/ai-provider`
6
- - **Version**: `0.2.2`
6
+ - **Version**: `0.2.14`
7
7
  - **Purpose**: AI provider for KrutAI — fetch-based client for your deployed LangChain server with API key validation
8
8
  - **Entry**: `src/index.ts` → `dist/index.{js,mjs,d.ts}`
9
9
  - **Build**: `tsup` (CJS + ESM, no external SDK deps)
@@ -11,7 +11,7 @@
11
11
  ## Architecture
12
12
 
13
13
  ```
14
- @krutai/ai-provider@0.2.2
14
+ @krutai/ai-provider@0.2.14
15
15
  └── peerDep: krutai (core utilities)
16
16
 
17
17
  AI Flow:
@@ -19,7 +19,6 @@ AI Flow:
19
19
  → POST {serverUrl}/validate (key validation)
20
20
  → POST {serverUrl}/generate (single response)
21
21
  → POST {serverUrl}/stream (SSE streaming)
22
- → POST {serverUrl}/chat (multi-turn)
23
22
  → Your deployed LangChain server
24
23
  ```
25
24
 
@@ -42,9 +41,8 @@ packages/ai-provider/
42
41
  | Endpoint | Method | Body | Response |
43
42
  |---|---|---|---|
44
43
  | `/validate` | POST | `{ apiKey }` | `{ valid: true/false, message? }` |
45
- | `/generate` | POST | `{ prompt, model, system?, maxTokens?, temperature? }` | `{ text/content/message: string }` |
46
- | `/stream` | POST | `{ prompt, model, system?, maxTokens?, temperature? }` | SSE stream `data: <chunk>` |
47
- | `/chat` | POST | `{ messages, model, maxTokens?, temperature? }` | `{ text/content/message: string }` |
44
+ | `/generate` | POST | `{ prompt, model, system?, maxTokens?, temperature?, isStructure?, output_structure?, history?, attachments? }` | `{ text/content/message: string }` or `any` (if structured) |
45
+ | `/stream` | POST | `{ messages, model, system?, maxTokens?, temperature? }` | SSE stream `data: <chunk>` |
48
46
 
49
47
  All AI endpoints receive `Authorization: Bearer <apiKey>` and `x-api-key: <apiKey>` headers.
50
48
 
@@ -62,8 +60,49 @@ const ai = krutAI({
62
60
  });
63
61
 
64
62
  await ai.initialize(); // validates key with server
63
+ ```
64
+
65
+ ### 1. `chat(prompt: string)` — Simple String Prompts
66
+ Used to get a single, non-streaming text response from a string prompt.
67
+
68
+ ```typescript
69
+ const text = await ai.chat('Write a poem about TypeScript');
70
+ console.log(text);
71
+
72
+ // Example: Structured Output
73
+ interface UserProfile {
74
+ name: string;
75
+ age: number;
76
+ }
77
+ const profile = await ai.chat<UserProfile>('Generate a profile for John Doe', {
78
+ isStructure: true,
79
+ output_structure: ['name', 'age'] // or a JSON Schema
80
+ });
81
+ console.log(profile.name, profile.age);
82
+ ```
83
+
84
+ ### 2. `streamChatResponse(messages: ChatMessage[])` — Multi-Turn & Streaming
85
+ Used for multi-turn conversations and streaming responses. It takes an array of `ChatMessage` objects instead of a single string. It returns a raw fetch `Response` containing the `text/event-stream` body.
86
+
87
+ Ideal for proxying streams (e.g., Next.js API routes) down to your backend component or manually reading the `ReadableStream`.
88
+
89
+ ```typescript
90
+ // Example: Proxying in a Next.js route
91
+ export async function POST(req: Request) {
92
+ const { messages } = await req.json();
93
+
94
+ // ai.streamChatResponse accepts an array of messages:
95
+ // [{ role: 'user', content: '...' }, ...]
96
+ return await ai.streamChatResponse(messages);
97
+ }
65
98
 
66
- const text = await ai.generate('Hello!');
99
+ // Example: Manual Node environment stream reading
100
+ const response = await ai.streamChatResponse([
101
+ { role: 'system', content: 'You are a helpful assistant.' },
102
+ { role: 'user', content: 'Tell me a story' }
103
+ ]);
104
+ const reader = response.body?.getReader();
105
+ // Use a TextDecoder to parse value chunks...
67
106
  ```
68
107
 
69
108
  ### `KrutAIProvider` class ← FULL CLASS API
@@ -74,7 +113,7 @@ import { KrutAIProvider } from '@krutai/ai-provider';
74
113
  const ai = new KrutAIProvider({
75
114
  apiKey: process.env.KRUTAI_API_KEY!,
76
115
  // serverUrl: 'https://krut.ai', // Optional: defaults to localhost:8000
77
- model: 'gpt-4o', // optional, default: 'default'
116
+ model: 'gemini-3.1-pro-preview', // optional, default: 'default'
78
117
  validateOnInit: true, // default: true
79
118
  });
80
119
 
@@ -83,12 +122,8 @@ await ai.initialize();
83
122
 
84
123
  **Methods:**
85
124
  - `initialize(): Promise<void>` — validates key against server, marks provider ready
86
- - `generate(prompt, opts?): Promise<string>` — single response (non-streaming)
87
- - `stream(prompt, opts?)` — `AsyncGenerator<string>` — SSE-based streaming
88
- - `streamResponse(prompt, opts?)` — `Promise<Response>` — returns the raw fetch Response for proxying
89
- - `streamChat(messages, opts?)` — `AsyncGenerator<string>` — SSE multi-turn streaming
90
- - `streamChatResponse(messages, opts?)` — `Promise<Response>` — returns the raw fetch Response for proxying
91
- - `chat(messages, opts?): Promise<string>` — multi-turn conversation
125
+ - `chat(prompt, opts?): Promise<string>` — single response (non-streaming)
126
+ - `streamChatResponse(messages, opts?)` — `Promise<Response>` — returns the raw fetch Response for proxying (SSE multi-turn streaming)
92
127
  - `getModel(): string` — active model name
93
128
  - `isInitialized(): boolean`
94
129
 
@@ -127,6 +162,10 @@ interface GenerateOptions {
127
162
  images?: string[]; // Array of image URLs or base64 data URIs
128
163
  documents?: string[]; // Array of document URLs or base64 data URIs
129
164
  pdf?: string[]; // Array of PDF URLs or base64 data URIs
165
+ history?: ChatMessage[]; // Optional: conversation history
166
+ attachments?: any[]; // Optional: multimodal attachments
167
+ isStructure?: boolean; // Whether to return structured output
168
+ output_structure?: any; // The schema (JSON Schema or field array) for structured output
130
169
  }
131
170
  ```
132
171
 
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # @krutai/ai-provider
2
2
 
3
- AI provider package for KrutAI — fetch-based client for your deployed LangChain server.
3
+ AI provider package for KrutAI — fetch-based client form our deployed server.
4
4
 
5
5
  ## Features
6
6
 
@@ -30,24 +30,24 @@ const ai = krutAI({
30
30
  await ai.initialize(); // validates key with your server
31
31
 
32
32
  // Single response
33
- const text = await ai.generate('Write a poem about TypeScript');
33
+ const text = await ai.chat('Write a poem about TypeScript');
34
34
  console.log(text);
35
35
  ```
36
36
 
37
37
  ## Usage
38
38
 
39
- ### Generate (single response)
39
+ ### Chat (single response)
40
40
 
41
41
  ```typescript
42
42
  const ai = krutAI({
43
43
  apiKey: process.env.KRUTAI_API_KEY!,
44
44
  serverUrl: 'https://krut.ai', // Override default for production
45
- model: 'gpt-4o', // optional — server's default is used if omitted
45
+ model: 'gemini-3.1-pro-preview', // optional — server's default is used if omitted
46
46
  });
47
47
 
48
48
  await ai.initialize();
49
49
 
50
- const text = await ai.generate('Explain async/await in JavaScript', {
50
+ const text = await ai.chat('Explain async/await in JavaScript', {
51
51
  system: 'You are a helpful coding tutor.',
52
52
  maxTokens: 500,
53
53
  temperature: 0.7,
@@ -56,21 +56,6 @@ const text = await ai.generate('Explain async/await in JavaScript', {
56
56
  console.log(text);
57
57
  ```
58
58
 
59
- ### Streaming
60
-
61
- ```typescript
62
- const ai = krutAI({
63
- apiKey: process.env.KRUTAI_API_KEY!,
64
- // uses http://localhost:8000 by default
65
- });
66
-
67
- await ai.initialize();
68
-
69
- // stream() is an async generator
70
- for await (const chunk of ai.stream('Tell me a short story')) {
71
- process.stdout.write(chunk);
72
- }
73
- ```
74
59
 
75
60
  ### Multi-turn Chat
76
61
 
@@ -108,7 +93,7 @@ const response = await ai.chat([
108
93
  ]
109
94
  }
110
95
  ], {
111
- model: 'gpt-4o',
96
+ model: 'gemini-3.1-pro-preview',
112
97
  // You can also pass images, documents, or pdfs via GenerateOptions
113
98
  images: ['https://example.com/photo.jpg'],
114
99
  documents: ['https://example.com/doc.docx'],
@@ -116,29 +101,11 @@ const response = await ai.chat([
116
101
  });
117
102
  ```
118
103
 
119
- ### Streaming Multi-turn Chat
120
-
121
- ```typescript
122
- const ai = krutAI({
123
- apiKey: process.env.KRUTAI_API_KEY!,
124
- });
104
+ ### Streaming (Proxying SSE Streams)
125
105
 
126
- await ai.initialize();
106
+ If you are building an API route (e.g., in Next.js) and want to pipe the true Server-Sent Events (SSE) stream down to your backend component, use `streamChatResponse`.
127
107
 
128
- const stream = ai.streamChat([
129
- { role: 'user', content: 'What is the capital of France?' },
130
- { role: 'assistant', content: 'Paris.' },
131
- { role: 'user', content: 'What is it famous for?' },
132
- ]);
133
-
134
- for await (const chunk of stream) {
135
- process.stdout.write(chunk);
136
- }
137
- ```
138
-
139
- ### Proxying Streams to the Frontend (Next.js / API Routes)
140
-
141
- If you are building an API route (e.g., in Next.js) and want to pipe the true Server-Sent Events (SSE) stream down to your frontend component, use the `Response` variants:
108
+ `streamChatResponse` returns the raw fetch `Response` object containing the `text/event-stream` body from deployed LangChain server.
142
109
 
143
110
  ```typescript
144
111
  // app/api/chat/route.ts
@@ -148,11 +115,50 @@ export async function POST(req: Request) {
148
115
  // Returns the native fetch Response (with text/event-stream headers and body)
149
116
  const response = await ai.streamChatResponse(messages);
150
117
 
151
- // Proxy it directly to the frontend!
118
+ // Proxy it directly to the backend!
152
119
  return response;
153
120
  }
154
121
  ```
155
122
 
123
+ If you need to consume the stream in a Node environment rather than proxying it, you can read from the response body directly:
124
+
125
+ ```typescript
126
+ const response = await ai.streamChatResponse([
127
+ { role: 'user', content: 'Tell me a short story' }
128
+ ]);
129
+
130
+ const reader = response.body?.getReader();
131
+ const decoder = new TextDecoder();
132
+
133
+ if (reader) {
134
+ while (true) {
135
+ const { done, value } = await reader.read();
136
+ if (done) break;
137
+ process.stdout.write(decoder.decode(value, { stream: true }));
138
+ }
139
+ }
140
+ ```
141
+
142
+ ### Structured Output
143
+
144
+ You can request the AI to return data in a specific JSON structure (e.g. for generating models, summaries, or profiles).
145
+
146
+ ```typescript
147
+ interface Profile {
148
+ name: string;
149
+ age: number;
150
+ }
151
+
152
+ const profile = await ai.chat<Profile>('Generate a profile for John Doe', {
153
+ isStructure: true,
154
+ // Pass an array of field names for simple string objects...
155
+ output_structure: ['name', 'age'],
156
+ // ...or pass a full JSON Schema for complex objects
157
+ });
158
+
159
+ console.log(profile.name, profile.age);
160
+ ```
161
+
156
162
  ### Skip validation (useful for tests)
157
163
 
158
164
  ```typescript
@@ -163,7 +169,7 @@ const ai = krutAI({
163
169
  });
164
170
 
165
171
  // No need to call initialize() when validateOnInit is false
166
- const text = await ai.generate('Hello!');
172
+ const text = await ai.chat('Hello!');
167
173
  ```
168
174
 
169
175
  ## Server API Contract
@@ -173,9 +179,8 @@ Your LangChain server must expose these endpoints:
173
179
  | Endpoint | Method | Auth | Body |
174
180
  |---|---|---|---|
175
181
  | `/validate` | POST | `x-api-key` header | `{ "apiKey": "..." }` |
176
- | `/generate` | POST | `Authorization: Bearer <key>` | `{ "prompt": "...", "model": "...", ... }` |
177
- | `/stream` | POST | `Authorization: Bearer <key>` | `{ "prompt": "...", "model": "...", ... }` |
178
- | `/chat` | POST | `Authorization: Bearer <key>` | `{ "messages": [...], "model": "...", ... }` |
182
+ | `/generate` | POST | `Authorization: Bearer <key>` | `{ "prompt": "...", "isStructure": boolean, "output_structure": any, ... }` |
183
+ | `/stream` | POST | `Authorization: Bearer <key>` | `{ "messages": [...], "model": "...", ... }` |
179
184
 
180
185
  **Validation response:** `{ "valid": true }` or `{ "valid": false, "message": "reason" }`
181
186
 
package/dist/index.d.mts CHANGED
@@ -7,7 +7,7 @@ export { ApiKeyValidationError as KrutAIKeyValidationError, validateApiKeyWithSe
7
7
  * Default model identifier sent to the LangChain server when no model is specified.
8
8
  * Your server can use this value to route to its own default model.
9
9
  */
10
- declare const DEFAULT_MODEL: "default";
10
+ declare const DEFAULT_MODEL: "gemini-3.1-pro-preview";
11
11
  /**
12
12
  * Configuration options for KrutAIProvider
13
13
  */
@@ -92,6 +92,23 @@ interface GenerateOptions {
92
92
  * Array of PDF URLs or base64 data URIs to include with the request.
93
93
  */
94
94
  pdf?: string[];
95
+ /**
96
+ * Optional conversation history.
97
+ */
98
+ history?: ChatMessage[];
99
+ /**
100
+ * Optional attachments.
101
+ */
102
+ attachments?: any[];
103
+ /**
104
+ * Whether to return structured output.
105
+ */
106
+ isStructure?: boolean;
107
+ /**
108
+ * The schema for structured output.
109
+ * Can be a JSON Schema object or an array of field names.
110
+ */
111
+ output_structure?: any;
95
112
  }
96
113
 
97
114
  /**
@@ -146,57 +163,6 @@ declare class KrutAIProvider {
146
163
  private assertInitialized;
147
164
  /** Common request headers sent to the server on every AI call. */
148
165
  private authHeaders;
149
- /**
150
- * Generate a response for a prompt (non-streaming).
151
- *
152
- * Calls: POST {serverUrl}/generate
153
- * Body: { prompt, model, system?, maxTokens?, temperature? }
154
- * Expected response: { text: string } or { content: string } or { message: string }
155
- *
156
- * @param prompt - The user prompt string
157
- * @param options - Optional overrides (model, system, maxTokens, temperature)
158
- * @returns The assistant's response text
159
- */
160
- generate(prompt: string, options?: GenerateOptions): Promise<string>;
161
- /**
162
- * Generate a streaming response for a prompt via Server-Sent Events (SSE).
163
- *
164
- * Calls: POST {serverUrl}/stream
165
- * Body: { prompt, model, system?, maxTokens?, temperature? }
166
- * Expected response: `text/event-stream` with `data: <chunk>` lines.
167
- *
168
- * @param prompt - The user prompt string
169
- * @param options - Optional overrides (model, system, maxTokens, temperature)
170
- * @returns An async generator yielding string chunks from the server
171
- *
172
- * @example
173
- * ```typescript
174
- * const stream = ai.stream('Tell me a story');
175
- * for await (const chunk of stream) {
176
- * process.stdout.write(chunk);
177
- * }
178
- * ```
179
- */
180
- stream(prompt: string, options?: GenerateOptions): AsyncGenerator<string>;
181
- /**
182
- * Similar to stream() but returns the raw fetch Response object.
183
- * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client
184
- * (e.g., returning this directly from a Next.js API route).
185
- *
186
- * @param prompt - The user prompt string
187
- * @param options - Optional overrides (model, system, maxTokens, temperature)
188
- * @returns A Promise resolving to the native fetch Response
189
- */
190
- streamResponse(prompt: string, options?: GenerateOptions): Promise<Response>;
191
- /**
192
- * Multi-turn conversation streaming: pass a full message history.
193
- * Calls POST /stream with the full { messages } payload.
194
- *
195
- * @param messages - Full conversation history
196
- * @param options - Optional overrides (model, maxTokens, temperature)
197
- * @returns An async generator yielding string chunks from the server
198
- */
199
- streamChat(messages: ChatMessage[], options?: GenerateOptions): AsyncGenerator<string>;
200
166
  /**
201
167
  * Similar to streamChat() but returns the raw fetch Response object.
202
168
  * Useful for proxying the Server-Sent Events stream directly to a frontend client.
@@ -207,17 +173,17 @@ declare class KrutAIProvider {
207
173
  */
208
174
  streamChatResponse(messages: ChatMessage[], options?: GenerateOptions): Promise<Response>;
209
175
  /**
210
- * Multi-turn conversation: pass a full message history.
176
+ * Generate a response for a prompt (non-streaming).
211
177
  *
212
- * Calls: POST {serverUrl}/chat
213
- * Body: { messages, model, maxTokens?, temperature? }
178
+ * Calls: POST {serverUrl}/generate
179
+ * Body: { prompt, model, system?, maxTokens?, temperature? }
214
180
  * Expected response: { text: string } or { content: string } or { message: string }
215
181
  *
216
- * @param messages - Full conversation history
217
- * @param options - Optional overrides (model, maxTokens, temperature)
218
- * @returns The assistant's response text
182
+ * @param prompt - The user prompt string
183
+ * @param options - Optional overrides (model, system, maxTokens, temperature)
184
+ * @returns The assistant's response text (or an object if structured)
219
185
  */
220
- chat(messages: ChatMessage[], options?: GenerateOptions): Promise<string>;
186
+ chat<T = any>(prompt: string, options?: GenerateOptions): Promise<T>;
221
187
  }
222
188
 
223
189
  /**
@@ -237,7 +203,7 @@ declare class KrutAIProvider {
237
203
  *
238
204
  * await ai.initialize(); // validates key with server
239
205
  *
240
- * const text = await ai.generate('Write a poem about TypeScript');
206
+ * const text = await ai.chat('Write a poem about TypeScript');
241
207
  * console.log(text);
242
208
  * ```
243
209
  *
@@ -246,10 +212,10 @@ declare class KrutAIProvider {
246
212
  * const ai = krutAI({
247
213
  * apiKey: process.env.KRUTAI_API_KEY!,
248
214
  * serverUrl: 'https://krut.ai',
249
- * model: 'gpt-4o',
215
+ * model: 'gemini-3.1-pro-preview',
250
216
  * });
251
217
  * await ai.initialize();
252
- * const text = await ai.generate('Hello!');
218
+ * const text = await ai.chat('Hello!');
253
219
  * ```
254
220
  *
255
221
  * @example Streaming
@@ -260,10 +226,8 @@ declare class KrutAIProvider {
260
226
  * });
261
227
  * await ai.initialize();
262
228
  *
263
- * const stream = ai.stream('Tell me a story');
264
- * for await (const chunk of stream) {
265
- * process.stdout.write(chunk);
266
- * }
229
+ * const response = await ai.streamChatResponse([{ role: 'user', content: 'Tell me a story' }]);
230
+ * // Example assumes you handle the SSE stream from the response body
267
231
  * ```
268
232
  *
269
233
  * @packageDocumentation
@@ -287,12 +251,12 @@ declare class KrutAIProvider {
287
251
  * });
288
252
  *
289
253
  * await ai.initialize();
290
- * const text = await ai.generate('Hello!');
254
+ * const text = await ai.chat('Hello!');
291
255
  * ```
292
256
  */
293
257
  declare function krutAI(config: KrutAIProviderConfig & {
294
258
  model?: string;
295
259
  }): KrutAIProvider;
296
- declare const VERSION = "0.2.0";
260
+ declare const VERSION = "0.2.15";
297
261
 
298
262
  export { type ChatMessage, DEFAULT_MODEL, type GenerateOptions, KrutAIProvider, type KrutAIProviderConfig, VERSION, krutAI };
package/dist/index.d.ts CHANGED
@@ -7,7 +7,7 @@ export { ApiKeyValidationError as KrutAIKeyValidationError, validateApiKeyWithSe
7
7
  * Default model identifier sent to the LangChain server when no model is specified.
8
8
  * Your server can use this value to route to its own default model.
9
9
  */
10
- declare const DEFAULT_MODEL: "default";
10
+ declare const DEFAULT_MODEL: "gemini-3.1-pro-preview";
11
11
  /**
12
12
  * Configuration options for KrutAIProvider
13
13
  */
@@ -92,6 +92,23 @@ interface GenerateOptions {
92
92
  * Array of PDF URLs or base64 data URIs to include with the request.
93
93
  */
94
94
  pdf?: string[];
95
+ /**
96
+ * Optional conversation history.
97
+ */
98
+ history?: ChatMessage[];
99
+ /**
100
+ * Optional attachments.
101
+ */
102
+ attachments?: any[];
103
+ /**
104
+ * Whether to return structured output.
105
+ */
106
+ isStructure?: boolean;
107
+ /**
108
+ * The schema for structured output.
109
+ * Can be a JSON Schema object or an array of field names.
110
+ */
111
+ output_structure?: any;
95
112
  }
96
113
 
97
114
  /**
@@ -146,57 +163,6 @@ declare class KrutAIProvider {
146
163
  private assertInitialized;
147
164
  /** Common request headers sent to the server on every AI call. */
148
165
  private authHeaders;
149
- /**
150
- * Generate a response for a prompt (non-streaming).
151
- *
152
- * Calls: POST {serverUrl}/generate
153
- * Body: { prompt, model, system?, maxTokens?, temperature? }
154
- * Expected response: { text: string } or { content: string } or { message: string }
155
- *
156
- * @param prompt - The user prompt string
157
- * @param options - Optional overrides (model, system, maxTokens, temperature)
158
- * @returns The assistant's response text
159
- */
160
- generate(prompt: string, options?: GenerateOptions): Promise<string>;
161
- /**
162
- * Generate a streaming response for a prompt via Server-Sent Events (SSE).
163
- *
164
- * Calls: POST {serverUrl}/stream
165
- * Body: { prompt, model, system?, maxTokens?, temperature? }
166
- * Expected response: `text/event-stream` with `data: <chunk>` lines.
167
- *
168
- * @param prompt - The user prompt string
169
- * @param options - Optional overrides (model, system, maxTokens, temperature)
170
- * @returns An async generator yielding string chunks from the server
171
- *
172
- * @example
173
- * ```typescript
174
- * const stream = ai.stream('Tell me a story');
175
- * for await (const chunk of stream) {
176
- * process.stdout.write(chunk);
177
- * }
178
- * ```
179
- */
180
- stream(prompt: string, options?: GenerateOptions): AsyncGenerator<string>;
181
- /**
182
- * Similar to stream() but returns the raw fetch Response object.
183
- * Useful when you want to proxy the Server-Sent Events stream directly to a frontend client
184
- * (e.g., returning this directly from a Next.js API route).
185
- *
186
- * @param prompt - The user prompt string
187
- * @param options - Optional overrides (model, system, maxTokens, temperature)
188
- * @returns A Promise resolving to the native fetch Response
189
- */
190
- streamResponse(prompt: string, options?: GenerateOptions): Promise<Response>;
191
- /**
192
- * Multi-turn conversation streaming: pass a full message history.
193
- * Calls POST /stream with the full { messages } payload.
194
- *
195
- * @param messages - Full conversation history
196
- * @param options - Optional overrides (model, maxTokens, temperature)
197
- * @returns An async generator yielding string chunks from the server
198
- */
199
- streamChat(messages: ChatMessage[], options?: GenerateOptions): AsyncGenerator<string>;
200
166
  /**
201
167
  * Similar to streamChat() but returns the raw fetch Response object.
202
168
  * Useful for proxying the Server-Sent Events stream directly to a frontend client.
@@ -207,17 +173,17 @@ declare class KrutAIProvider {
207
173
  */
208
174
  streamChatResponse(messages: ChatMessage[], options?: GenerateOptions): Promise<Response>;
209
175
  /**
210
- * Multi-turn conversation: pass a full message history.
176
+ * Generate a response for a prompt (non-streaming).
211
177
  *
212
- * Calls: POST {serverUrl}/chat
213
- * Body: { messages, model, maxTokens?, temperature? }
178
+ * Calls: POST {serverUrl}/generate
179
+ * Body: { prompt, model, system?, maxTokens?, temperature? }
214
180
  * Expected response: { text: string } or { content: string } or { message: string }
215
181
  *
216
- * @param messages - Full conversation history
217
- * @param options - Optional overrides (model, maxTokens, temperature)
218
- * @returns The assistant's response text
182
+ * @param prompt - The user prompt string
183
+ * @param options - Optional overrides (model, system, maxTokens, temperature)
184
+ * @returns The assistant's response text (or an object if structured)
219
185
  */
220
- chat(messages: ChatMessage[], options?: GenerateOptions): Promise<string>;
186
+ chat<T = any>(prompt: string, options?: GenerateOptions): Promise<T>;
221
187
  }
222
188
 
223
189
  /**
@@ -237,7 +203,7 @@ declare class KrutAIProvider {
237
203
  *
238
204
  * await ai.initialize(); // validates key with server
239
205
  *
240
- * const text = await ai.generate('Write a poem about TypeScript');
206
+ * const text = await ai.chat('Write a poem about TypeScript');
241
207
  * console.log(text);
242
208
  * ```
243
209
  *
@@ -246,10 +212,10 @@ declare class KrutAIProvider {
246
212
  * const ai = krutAI({
247
213
  * apiKey: process.env.KRUTAI_API_KEY!,
248
214
  * serverUrl: 'https://krut.ai',
249
- * model: 'gpt-4o',
215
+ * model: 'gemini-3.1-pro-preview',
250
216
  * });
251
217
  * await ai.initialize();
252
- * const text = await ai.generate('Hello!');
218
+ * const text = await ai.chat('Hello!');
253
219
  * ```
254
220
  *
255
221
  * @example Streaming
@@ -260,10 +226,8 @@ declare class KrutAIProvider {
260
226
  * });
261
227
  * await ai.initialize();
262
228
  *
263
- * const stream = ai.stream('Tell me a story');
264
- * for await (const chunk of stream) {
265
- * process.stdout.write(chunk);
266
- * }
229
+ * const response = await ai.streamChatResponse([{ role: 'user', content: 'Tell me a story' }]);
230
+ * // Example assumes you handle the SSE stream from the response body
267
231
  * ```
268
232
  *
269
233
  * @packageDocumentation
@@ -287,12 +251,12 @@ declare class KrutAIProvider {
287
251
  * });
288
252
  *
289
253
  * await ai.initialize();
290
- * const text = await ai.generate('Hello!');
254
+ * const text = await ai.chat('Hello!');
291
255
  * ```
292
256
  */
293
257
  declare function krutAI(config: KrutAIProviderConfig & {
294
258
  model?: string;
295
259
  }): KrutAIProvider;
296
- declare const VERSION = "0.2.0";
260
+ declare const VERSION = "0.2.15";
297
261
 
298
262
  export { type ChatMessage, DEFAULT_MODEL, type GenerateOptions, KrutAIProvider, type KrutAIProviderConfig, VERSION, krutAI };