@krutai/ai-provider 0.1.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,54 +1,49 @@
1
- import * as _openrouter_sdk_lib_event_streams_js from '@openrouter/sdk/lib/event-streams.js';
2
- import * as _openrouter_sdk_models from '@openrouter/sdk/models';
3
- import { OpenRouter } from '@openrouter/sdk';
4
-
5
1
  /**
6
2
  * Types for @krutai/ai-provider
7
3
  */
8
4
  /**
9
- * Default model used when no model is specified
5
+ * Default model identifier sent to the LangChain server when no model is specified.
6
+ * Your server can use this value to route to its own default model.
10
7
  */
11
- declare const DEFAULT_MODEL: "qwen/qwen3-235b-a22b-thinking-2507";
8
+ declare const DEFAULT_MODEL: "default";
12
9
  /**
13
10
  * Configuration options for KrutAIProvider
14
11
  */
15
12
  interface KrutAIProviderConfig {
16
13
  /**
17
- * KrutAI API key for service validation.
14
+ * KrutAI API key.
15
+ * Validated against the LangChain server before use.
18
16
  * @required
19
17
  */
20
18
  apiKey: string;
21
19
  /**
22
- * OpenRouter API key.
23
- * Falls back to process.env.OPENROUTER_API_KEY if not provided.
20
+ * Base URL of your deployed LangChain backend server.
21
+ * @default "http://localhost:8000"
22
+ * @example "https://ai.krut.ai"
24
23
  */
25
- openRouterApiKey?: string;
24
+ serverUrl?: string;
26
25
  /**
27
- * The AI model to use.
28
- * @default "qwen/qwen3-235b-a22b-thinking-2507"
29
- * @see https://openrouter.ai/models
26
+ * The AI model to use (passed to the server).
27
+ * The server decides what to do with this value.
28
+ * @default "default"
30
29
  */
31
30
  model?: string;
32
31
  /**
33
- * Whether to validate the OpenRouter API key on initialization.
32
+ * Whether to validate the API key against the server on initialization.
33
+ * Set to false to skip the validation round-trip (e.g. in tests).
34
34
  * @default true
35
35
  */
36
36
  validateOnInit?: boolean;
37
- /**
38
- * Custom POST endpoint for OpenRouter API key validation.
39
- * Will be wired in once you deploy the route.
40
- */
41
- validationEndpoint?: string;
42
37
  }
43
38
  /**
44
- * A single chat message (OpenRouter format)
39
+ * A single chat message
45
40
  */
46
41
  interface ChatMessage {
47
42
  role: 'user' | 'assistant' | 'system';
48
43
  content: string;
49
44
  }
50
45
  /**
51
- * Options for a single generate / stream call
46
+ * Options for a single generate / stream / chat call
52
47
  */
53
48
  interface GenerateOptions {
54
49
  /**
@@ -70,115 +65,133 @@ interface GenerateOptions {
70
65
  }
71
66
 
72
67
  /**
73
- * OpenRouter API Key Validator
68
+ * API Key Validator for @krutai/ai-provider
74
69
  *
75
- * Validates the OpenRouter API key format and (optionally) its validity
76
- * via a configurable POST endpoint.
70
+ * Validates the KrutAI API key by calling the deployed LangChain server's
71
+ * validation endpoint. The server is expected to respond with { valid: true }
72
+ * for a valid key and { valid: false } (or a non-2xx status) for an invalid one.
77
73
  *
78
- * The user will later provide a live POST route; until then the
79
- * service-level check is a placeholder that accepts any well-formed key.
74
+ * Validation endpoint called:
75
+ * POST {serverUrl}/validate
76
+ * Headers: { 'Content-Type': 'application/json', 'x-api-key': apiKey }
77
+ * Body: { "apiKey": "<key>" }
78
+ * Expected Response: { "valid": true }
80
79
  */
81
- declare class OpenRouterKeyValidationError extends Error {
80
+ declare class KrutAIKeyValidationError extends Error {
82
81
  constructor(message: string);
83
82
  }
84
83
  /**
85
- * Validates the format of an OpenRouter API key.
86
- * @throws {OpenRouterKeyValidationError}
84
+ * Basic sanity check — ensures the key is a non-empty string.
85
+ * @throws {KrutAIKeyValidationError}
87
86
  */
88
- declare function validateOpenRouterKeyFormat(apiKey: string): void;
87
+ declare function validateApiKeyFormat(apiKey: string): void;
89
88
  /**
90
- * Validates the OpenRouter API key with the KrutAI validation service.
89
+ * Validates the API key against the deployed LangChain server.
91
90
  *
92
- * The user will provide a live POST route later. Until then this is a
93
- * placeholder that accepts any key that passes format validation.
91
+ * Sends a POST request to `{serverUrl}/validate` and expects:
92
+ * - HTTP 2xx status
93
+ * - JSON body: `{ "valid": true }`
94
94
  *
95
- * @param apiKey - OpenRouter API key to validate
96
- * @param validationEndpoint - Optional POST URL to validate against
95
+ * @param apiKey - The API key to validate
96
+ * @param serverUrl - Base URL of the LangChain backend (e.g. "https://ai.yourapp.com")
97
+ * @throws {KrutAIKeyValidationError}
97
98
  */
98
- declare function validateOpenRouterKeyWithService(apiKey: string, validationEndpoint?: string): Promise<boolean>;
99
+ declare function validateApiKey(apiKey: string, serverUrl: string): Promise<boolean>;
99
100
 
100
101
  /**
101
- * KrutAIProvider — AI provider for KrutAI
102
+ * KrutAIProvider — fetch-based AI provider for KrutAI
102
103
  *
103
- * Wraps `@openrouter/sdk` and adds:
104
- * - OpenRouter API key format validation
105
- * - Configurable default model (defaults to qwen/qwen3-235b-a22b-thinking-2507)
106
- * - Optional pluggable validation endpoint
104
+ * Calls your deployed LangChain backend server for all AI operations.
105
+ * The API key is validated against the server before use.
107
106
  *
108
107
  * @example
109
108
  * ```typescript
110
109
  * import { KrutAIProvider } from '@krutai/ai-provider';
111
110
  *
111
+ * // Using local dev server (http://localhost:8000 by default)
112
112
  * const ai = new KrutAIProvider({
113
113
  * apiKey: process.env.KRUTAI_API_KEY!,
114
- * openRouterApiKey: process.env.OPENROUTER_API_KEY!, // or set in env
115
114
  * });
116
115
  *
117
- * await ai.initialize();
116
+ * // Or point to a production server
117
+ * const aiProd = new KrutAIProvider({
118
+ * apiKey: process.env.KRUTAI_API_KEY!,
119
+ * serverUrl: 'https://ai.krut.ai',
120
+ * });
121
+ *
122
+ * await ai.initialize(); // validates key against server
118
123
  *
119
124
  * const text = await ai.generate('Tell me a joke');
120
125
  * console.log(text);
121
126
  * ```
122
127
  */
123
128
  declare class KrutAIProvider {
124
- private readonly resolvedOpenRouterKey;
129
+ private readonly apiKey;
130
+ private readonly serverUrl;
125
131
  private readonly resolvedModel;
126
132
  private readonly config;
127
- private openRouterClient;
128
133
  private initialized;
129
134
  constructor(config: KrutAIProviderConfig);
130
135
  /**
131
136
  * Initialize the provider.
132
- * Validates the OpenRouter API key (optionally against a service endpoint)
133
- * and sets up the underlying OpenRouter client.
137
+ * Validates the API key against the LangChain server, then marks provider as ready.
134
138
  *
135
- * @throws {OpenRouterKeyValidationError}
139
+ * @throws {KrutAIKeyValidationError} if the key is rejected or the server is unreachable
136
140
  */
137
141
  initialize(): Promise<void>;
138
- /** @private */
139
- private setupClient;
140
142
  /**
141
- * Get the raw OpenRouter SDK client instance.
142
- * @throws {Error} If not initialized
143
- */
144
- getClient(): OpenRouter;
145
- /**
146
- * Get the currently configured default model.
143
+ * Returns the currently configured default model.
147
144
  */
148
145
  getModel(): string;
149
146
  /**
150
- * Check whether the provider has been initialized.
147
+ * Returns whether the provider has been initialized.
151
148
  */
152
149
  isInitialized(): boolean;
150
+ private assertInitialized;
151
+ /** Common request headers sent to the server on every AI call. */
152
+ private authHeaders;
153
153
  /**
154
154
  * Generate a response for a prompt (non-streaming).
155
155
  *
156
+ * Calls: POST {serverUrl}/generate
157
+ * Body: { prompt, model, system?, maxTokens?, temperature? }
158
+ * Expected response: { text: string } or { content: string } or { message: string }
159
+ *
156
160
  * @param prompt - The user prompt string
157
161
  * @param options - Optional overrides (model, system, maxTokens, temperature)
158
162
  * @returns The assistant's response text
159
163
  */
160
164
  generate(prompt: string, options?: GenerateOptions): Promise<string>;
161
165
  /**
162
- * Generate a streaming response for a prompt.
166
+ * Generate a streaming response for a prompt via Server-Sent Events (SSE).
167
+ *
168
+ * Calls: POST {serverUrl}/stream
169
+ * Body: { prompt, model, system?, maxTokens?, temperature? }
170
+ * Expected response: `text/event-stream` with `data: <chunk>` lines.
163
171
  *
164
172
  * @param prompt - The user prompt string
165
173
  * @param options - Optional overrides (model, system, maxTokens, temperature)
166
- * @returns An async iterable of server-sent event chunks
174
+ * @returns An async generator yielding string chunks from the server
167
175
  *
168
176
  * @example
169
177
  * ```typescript
170
- * const stream = await ai.stream('Tell me a story');
178
+ * const stream = ai.stream('Tell me a story');
171
179
  * for await (const chunk of stream) {
172
- * process.stdout.write(chunk.choices?.[0]?.delta?.content ?? '');
180
+ * process.stdout.write(chunk);
173
181
  * }
174
182
  * ```
175
183
  */
176
- stream(prompt: string, options?: GenerateOptions): Promise<_openrouter_sdk_lib_event_streams_js.EventStream<_openrouter_sdk_models.ChatStreamingResponseChunkData>>;
184
+ stream(prompt: string, options?: GenerateOptions): AsyncGenerator<string>;
177
185
  /**
178
186
  * Multi-turn conversation: pass a full message history.
179
187
  *
188
+ * Calls: POST {serverUrl}/chat
189
+ * Body: { messages, model, maxTokens?, temperature? }
190
+ * Expected response: { text: string } or { content: string } or { message: string }
191
+ *
180
192
  * @param messages - Full conversation history
181
193
  * @param options - Optional overrides (model, maxTokens, temperature)
194
+ * @returns The assistant's response text
182
195
  */
183
196
  chat(messages: ChatMessage[], options?: GenerateOptions): Promise<string>;
184
197
  }
@@ -186,16 +199,19 @@ declare class KrutAIProvider {
186
199
  /**
187
200
  * @krutai/ai-provider — AI Provider package for KrutAI
188
201
  *
189
- * A thin wrapper around `@openrouter/sdk`, mirroring the patterns from `@krutai/auth`.
190
- *
191
- * Default model: `qwen/qwen3-235b-a22b-thinking-2507`
202
+ * A fetch-based wrapper that calls your deployed LangChain backend server.
203
+ * The user's API key is validated against the server before any AI call is made.
192
204
  *
193
205
  * @example Basic usage
194
206
  * ```typescript
195
207
  * import { krutAI } from '@krutai/ai-provider';
196
208
  *
197
- * const ai = krutAI(); // uses OPENROUTER_API_KEY env var
198
- * await ai.initialize();
209
+ * const ai = krutAI({
210
+ * apiKey: process.env.KRUTAI_API_KEY!,
211
+ * serverUrl: 'https://ai.yourapp.com',
212
+ * });
213
+ *
214
+ * await ai.initialize(); // validates key with server
199
215
  *
200
216
  * const text = await ai.generate('Write a poem about TypeScript');
201
217
  * console.log(text);
@@ -203,19 +219,26 @@ declare class KrutAIProvider {
203
219
  *
204
220
  * @example With custom model
205
221
  * ```typescript
206
- * const ai = krutAI({ model: 'openai/gpt-4o' });
222
+ * const ai = krutAI({
223
+ * apiKey: process.env.KRUTAI_API_KEY!,
224
+ * serverUrl: 'https://ai.yourapp.com',
225
+ * model: 'gpt-4o',
226
+ * });
207
227
  * await ai.initialize();
208
228
  * const text = await ai.generate('Hello!');
209
229
  * ```
210
230
  *
211
231
  * @example Streaming
212
232
  * ```typescript
213
- * const ai = krutAI();
233
+ * const ai = krutAI({
234
+ * apiKey: process.env.KRUTAI_API_KEY!,
235
+ * serverUrl: 'https://ai.yourapp.com',
236
+ * });
214
237
  * await ai.initialize();
215
238
  *
216
- * const stream = await ai.stream('Tell me a story');
239
+ * const stream = ai.stream('Tell me a story');
217
240
  * for await (const chunk of stream) {
218
- * process.stdout.write(chunk.choices[0]?.delta?.content ?? '');
241
+ * process.stdout.write(chunk);
219
242
  * }
220
243
  * ```
221
244
  *
@@ -225,24 +248,27 @@ declare class KrutAIProvider {
225
248
  /**
226
249
  * krutAI — convenience factory (mirrors `krutAuth` in @krutai/auth).
227
250
  *
228
- * Creates a `KrutAIProvider` instance. OpenRouter API key is read from
229
- * `config.openRouterApiKey` or falls back to `process.env.OPENROUTER_API_KEY`.
251
+ * Creates a `KrutAIProvider` instance configured to call your LangChain server.
230
252
  *
231
- * @param config - Provider configuration (all fields optional except apiKey)
232
- * @returns A `KrutAIProvider` instance (call `.initialize()` before use)
253
+ * @param config - Provider configuration (`apiKey` and `serverUrl` are required)
254
+ * @returns A `KrutAIProvider` instance call `.initialize()` before use
233
255
  *
234
256
  * @example
235
257
  * ```typescript
236
258
  * import { krutAI } from '@krutai/ai-provider';
237
259
  *
238
- * const ai = krutAI(); // env OPENROUTER_API_KEY + default model
260
+ * const ai = krutAI({
261
+ * apiKey: process.env.KRUTAI_API_KEY!,
262
+ * serverUrl: 'https://ai.yourapp.com',
263
+ * });
264
+ *
239
265
  * await ai.initialize();
240
266
  * const text = await ai.generate('Hello!');
241
267
  * ```
242
268
  */
243
- declare function krutAI(config?: Partial<KrutAIProviderConfig> & {
244
- apiKey?: string;
269
+ declare function krutAI(config: KrutAIProviderConfig & {
270
+ model?: string;
245
271
  }): KrutAIProvider;
246
- declare const VERSION = "0.1.0";
272
+ declare const VERSION = "0.2.0";
247
273
 
248
- export { type ChatMessage, DEFAULT_MODEL, type GenerateOptions, KrutAIProvider, type KrutAIProviderConfig, OpenRouterKeyValidationError, VERSION, krutAI, validateOpenRouterKeyFormat, validateOpenRouterKeyWithService };
274
+ export { type ChatMessage, DEFAULT_MODEL, type GenerateOptions, KrutAIKeyValidationError, KrutAIProvider, type KrutAIProviderConfig, VERSION, krutAI, validateApiKey, validateApiKeyFormat };