miragedev-sdk 0.1.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/README.md +154 -44
  2. package/dist/ai/index.cjs +383 -0
  3. package/dist/ai/index.d.cts +14 -0
  4. package/dist/ai/index.d.ts +14 -0
  5. package/dist/ai/index.js +366 -0
  6. package/dist/ai-DTlWq4Xf.d.cts +72 -0
  7. package/dist/ai-DTlWq4Xf.d.ts +72 -0
  8. package/dist/auth/index.cjs +4 -4
  9. package/dist/auth/index.js +2 -2
  10. package/dist/auth/middleware.cjs +3 -3
  11. package/dist/auth/middleware.js +2 -2
  12. package/dist/billing/index.cjs +2 -2
  13. package/dist/billing/index.js +1 -1
  14. package/dist/billing/mobile.cjs +3 -3
  15. package/dist/billing/mobile.js +1 -1
  16. package/dist/billing/webhook.cjs +2 -2
  17. package/dist/billing/webhook.js +1 -1
  18. package/dist/chunk-6YFK3S4F.js +21 -0
  19. package/dist/chunk-7ZDFU6CA.cjs +4095 -0
  20. package/dist/{chunk-PHTUPKEM.cjs → chunk-BKLMAYJY.cjs} +2 -2
  21. package/dist/chunk-J7DLQ6NM.js +4092 -0
  22. package/dist/chunk-OIWPIRJY.cjs +591 -0
  23. package/dist/{chunk-M3DPIKWT.js → chunk-ONCJKP65.js} +1 -1
  24. package/dist/chunk-PDNLOOI2.js +581 -0
  25. package/dist/chunk-TALL2IL5.cjs +24 -0
  26. package/dist/cli/commands/create.cjs +11 -0
  27. package/dist/cli/commands/create.d.cts +3 -0
  28. package/dist/cli/commands/create.d.ts +3 -0
  29. package/dist/cli/commands/create.js +2 -0
  30. package/dist/cli/index.cjs +3 -1
  31. package/dist/cli/index.js +3 -1
  32. package/dist/email/index.cjs +2 -2
  33. package/dist/email/index.js +1 -1
  34. package/dist/index.cjs +6 -2
  35. package/dist/index.d.cts +5 -2
  36. package/dist/index.d.ts +5 -2
  37. package/dist/index.js +5 -1
  38. package/package.json +8 -2
  39. package/dist/chunk-5YXI4Q2K.js +0 -13813
  40. package/dist/chunk-E5YC2MHX.cjs +0 -13816
package/README.md CHANGED
@@ -16,75 +16,61 @@ AI-first SDK for building SAAS applications with Next.js. Build production-ready
16
16
  - 📦 **Modular** - Import only what you need
17
17
  - 🔒 **Type-Safe** - Full TypeScript support with Zod validation
18
18
 
19
- ## Quick Start (5 minutes)
19
+ ## Quick Start (2 minutes)
20
20
 
21
- ### 1. Installation
21
+ ### Option 1: Create New Project (Recommended)
22
22
 
23
23
  ```bash
24
- npm install miragedev-sdk
25
- # or
26
- pnpm add miragedev-sdk
24
+ npx miragedev create my-saas
25
+ cd my-saas
26
+ npm run dev
27
27
  ```
28
28
 
29
- ### 2. Initialize with CLI
29
+ That's it! You now have a full-featured SAAS with:
30
+ - ✅ Next.js 14+ with App Router
31
+ - ✅ Authentication (login page included)
32
+ - ✅ Protected dashboard
33
+ - ✅ Stripe billing setup
34
+ - ✅ Email templates
35
+ - ✅ Pricing page
36
+ - ✅ PWA ready
37
+
38
+ ### Option 2: Add to Existing Project
30
39
 
31
40
  ```bash
41
+ # In your existing Next.js project
42
+ npm install miragedev-sdk
32
43
  npx miragedev init
33
44
  ```
34
45
 
35
- The CLI will guide you through selecting providers and generating configuration files.
36
-
37
46
  **What gets created:**
38
- - `miragedev.config.ts` - SDK configuration
47
+ - `lib/miragedev.config.ts` - SDK configuration
39
48
  - `.env.local.example` - Environment variables template
40
- - `miragedev.init.ts` - Initialization file
49
+ - `lib/miragedev.init.ts` - Initialization file
41
50
  - `AGENTS.md` - Instructions for AI assistants (Copilot, Cursor, etc.)
42
51
 
43
- ### 3. Configure Environment
52
+ ### Configure Environment
53
+
54
+ Copy `.env.local.example` to `.env.local` and fill in your API keys:
44
55
 
45
- Create `.env.local`:
56
+ ```bash
57
+ cp .env.local.example .env.local
58
+ ```
46
59
 
47
60
  ```env
48
61
  # Auth
49
- AUTH_SECRET=your-secret-key-here
62
+ AUTH_SECRET=your-secret-key-here # Generate with: openssl rand -base64 32
50
63
 
51
64
  # Billing (Stripe)
52
- STRIPE_SECRET_KEY=sk_test_xxx
53
- STRIPE_WEBHOOK_SECRET=whsec_xxx
65
+ STRIPE_SECRET_KEY=sk_test_xxx # From https://dashboard.stripe.com/test/apikeys
66
+ STRIPE_WEBHOOK_SECRET=whsec_xxx # From https://dashboard.stripe.com/test/webhooks
54
67
 
55
68
  # Email (Resend)
56
- RESEND_API_KEY=re_xxx
69
+ RESEND_API_KEY=re_xxx # From https://resend.com/api-keys
57
70
  EMAIL_FROM=noreply@yourapp.com
58
71
  ```
59
72
 
60
- ### 4. Initialize SDK
61
-
62
- Create `lib/miragedev.ts`:
63
-
64
- ```typescript
65
- import { initMirageDev } from 'miragedev-sdk'
66
-
67
- initMirageDev({
68
- auth: {
69
- provider: 'nextauth',
70
- sessionSecret: process.env.AUTH_SECRET!,
71
- },
72
- billing: {
73
- provider: 'stripe',
74
- stripeSecretKey: process.env.STRIPE_SECRET_KEY!,
75
- webhookSecret: process.env.STRIPE_WEBHOOK_SECRET!,
76
- },
77
- email: {
78
- provider: 'resend',
79
- apiKey: process.env.RESEND_API_KEY!,
80
- from: process.env.EMAIL_FROM!,
81
- },
82
- })
83
- ```
84
-
85
- Import this in your root layout (`app/layout.tsx`).
86
-
87
- ### 5. Start Building
73
+ ### Start Building
88
74
 
89
75
  ```typescript
90
76
  // Server Component - Protected Page
@@ -281,6 +267,130 @@ function MyComponent() {
281
267
  }
282
268
  ```
283
269
 
270
+ ### AI Module
271
+
272
+ ```typescript
273
+ import { chat, chatStream, useChat, setAIConfig } from 'miragedev-sdk/ai'
274
+ ```
275
+
276
+ **Setup:**
277
+
278
+ ```typescript
279
+ // lib/miragedev.init.ts or app setup
280
+ import { initMirageDev } from 'miragedev-sdk'
281
+
282
+ initMirageDev({
283
+ ai: {
284
+ provider: 'openai',
285
+ apiKey: process.env.OPENAI_API_KEY,
286
+ defaultModel: 'gpt-4-turbo-preview',
287
+ temperature: 0.7,
288
+ }
289
+ })
290
+ ```
291
+
292
+ **Server-Side Chat:**
293
+
294
+ ```typescript
295
+ import { chat } from 'miragedev-sdk/ai'
296
+
297
+ export async function POST(req: Request) {
298
+ const { message } = await req.json()
299
+
300
+ const response = await chat({
301
+ messages: [
302
+ { role: 'system', content: 'You are a helpful assistant' },
303
+ { role: 'user', content: message }
304
+ ],
305
+ temperature: 0.7,
306
+ })
307
+
308
+ return Response.json({ reply: response.content })
309
+ }
310
+ ```
311
+
312
+ **Streaming (Edge Runtime):**
313
+
314
+ ```typescript
315
+ import { chatStream, createEdgeStreamResponse } from 'miragedev-sdk/ai'
316
+
317
+ export const runtime = 'edge'
318
+
319
+ export async function POST(req: Request) {
320
+ const { messages } = await req.json()
321
+
322
+ return createEdgeStreamResponse(async (write) => {
323
+ await chatStream({ messages }, (chunk) => {
324
+ write(chunk.delta)
325
+ })
326
+ })
327
+ }
328
+ ```
329
+
330
+ **Client-Side Hook:**
331
+
332
+ ```typescript
333
+ 'use client'
334
+
335
+ import { useChat } from 'miragedev-sdk/ai'
336
+
337
+ export function ChatComponent() {
338
+ const { messages, sendMessage, isLoading, streamingMessage } = useChat({
339
+ stream: true,
340
+ initialMessages: [
341
+ { role: 'system', content: 'You are a helpful assistant' }
342
+ ],
343
+ onError: (error) => console.error(error)
344
+ })
345
+
346
+ return (
347
+ <div>
348
+ {messages.map((msg, i) => (
349
+ <div key={i}>
350
+ <strong>{msg.role}:</strong> {msg.content}
351
+ </div>
352
+ ))}
353
+
354
+ {streamingMessage && (
355
+ <div>
356
+ <strong>assistant:</strong> {streamingMessage}
357
+ </div>
358
+ )}
359
+
360
+ <button
361
+ onClick={() => sendMessage('Hello!')}
362
+ disabled={isLoading}
363
+ >
364
+ Send Message
365
+ </button>
366
+ </div>
367
+ )
368
+ }
369
+ ```
370
+
371
+ **Embeddings:**
372
+
373
+ ```typescript
374
+ import { createEmbeddings } from 'miragedev-sdk/ai'
375
+
376
+ const response = await createEmbeddings({
377
+ input: 'Machine learning is fascinating',
378
+ })
379
+
380
+ console.log(response.embeddings[0]) // [0.123, -0.456, ...]
381
+ ```
382
+
383
+ **Environment Variables:**
384
+
385
+ ```env
386
+ OPENAI_API_KEY=sk-...
387
+ ```
388
+
389
+ **Supported Providers:**
390
+ - ✅ OpenAI (GPT-4, GPT-3.5, embeddings)
391
+ - 🔜 Anthropic (Claude) - Coming soon
392
+ ```
393
+
284
394
  ## API Reference
285
395
 
286
396
  Full API documentation with examples for every function is available in the code via JSDoc. Your AI assistant can read these to help you build faster.
@@ -0,0 +1,383 @@
1
+ 'use strict';
2
+
3
+ var chunkTALL2IL5_cjs = require('../chunk-TALL2IL5.cjs');
4
+ var chunkBW4BLEIM_cjs = require('../chunk-BW4BLEIM.cjs');
5
+ require('../chunk-75ZPJI57.cjs');
6
+ var OpenAI = require('openai');
7
+
8
+ function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
9
+
10
+ var OpenAI__default = /*#__PURE__*/_interopDefault(OpenAI);
11
+
12
+ var OpenAIProvider = class {
13
+ client;
14
+ config;
15
+ constructor(config) {
16
+ if (!config.apiKey) {
17
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
18
+ "AI_CONFIG_INVALID",
19
+ "OpenAI API key is required"
20
+ );
21
+ }
22
+ this.config = config;
23
+ this.client = new OpenAI__default.default({
24
+ apiKey: config.apiKey,
25
+ baseURL: config.baseURL,
26
+ organization: config.organization,
27
+ timeout: config.timeout,
28
+ dangerouslyAllowBrowser: true
29
+ // Allow in tests and edge runtime
30
+ });
31
+ }
32
+ /**
33
+ * Convert our message format to OpenAI message format
34
+ */
35
+ convertMessages(messages) {
36
+ return messages.map((msg) => {
37
+ if (msg.role === "system") {
38
+ const systemMessage = {
39
+ role: "system",
40
+ content: msg.content
41
+ };
42
+ if (msg.name) {
43
+ systemMessage.name = msg.name;
44
+ }
45
+ return systemMessage;
46
+ }
47
+ if (msg.role === "user") {
48
+ const userMessage = {
49
+ role: "user",
50
+ content: msg.content
51
+ };
52
+ if (msg.name) {
53
+ userMessage.name = msg.name;
54
+ }
55
+ return userMessage;
56
+ }
57
+ if (msg.role === "assistant") {
58
+ const assistantMessage = {
59
+ role: "assistant",
60
+ content: msg.content
61
+ };
62
+ if (msg.name) {
63
+ assistantMessage.name = msg.name;
64
+ }
65
+ if (msg.functionCall) {
66
+ assistantMessage.function_call = {
67
+ name: msg.functionCall.name,
68
+ arguments: msg.functionCall.arguments
69
+ };
70
+ }
71
+ return assistantMessage;
72
+ }
73
+ const functionMessage = {
74
+ role: "function",
75
+ content: msg.content,
76
+ name: msg.name || ""
77
+ };
78
+ return functionMessage;
79
+ });
80
+ }
81
+ /**
82
+ * Send a chat completion request
83
+ */
84
+ async chat(options) {
85
+ try {
86
+ if (!options.messages || options.messages.length === 0) {
87
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
88
+ "AI_CONFIG_INVALID",
89
+ "Messages array cannot be empty"
90
+ );
91
+ }
92
+ const model = options.model || this.config.defaultModel || "gpt-4o-mini";
93
+ const temperature = options.temperature ?? this.config.temperature;
94
+ const maxTokens = options.maxTokens ?? this.config.maxTokens;
95
+ const params = {
96
+ model,
97
+ messages: this.convertMessages(options.messages)
98
+ };
99
+ if (temperature !== void 0) {
100
+ params.temperature = temperature;
101
+ }
102
+ if (maxTokens !== void 0) {
103
+ params.max_tokens = maxTokens;
104
+ }
105
+ if (options.functions) {
106
+ params.functions = options.functions.map((fn) => ({
107
+ name: fn.name,
108
+ description: fn.description,
109
+ parameters: fn.parameters
110
+ }));
111
+ }
112
+ const response = await this.client.chat.completions.create(params);
113
+ const choice = response.choices[0];
114
+ if (!choice) {
115
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
116
+ "AI_API_ERROR",
117
+ "No response choices returned from OpenAI"
118
+ );
119
+ }
120
+ const result = {
121
+ content: choice.message.content || "",
122
+ role: choice.message.role,
123
+ finishReason: this.mapFinishReason(choice.finish_reason)
124
+ };
125
+ if (choice.message.function_call) {
126
+ try {
127
+ result.functionCall = {
128
+ name: choice.message.function_call.name,
129
+ arguments: JSON.parse(choice.message.function_call.arguments || "{}")
130
+ };
131
+ } catch (error) {
132
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
133
+ "AI_API_ERROR",
134
+ `Failed to parse function call arguments: ${error instanceof Error ? error.message : "Invalid JSON"}`,
135
+ error
136
+ );
137
+ }
138
+ }
139
+ if (response.usage) {
140
+ result.usage = {
141
+ promptTokens: response.usage.prompt_tokens,
142
+ completionTokens: response.usage.completion_tokens,
143
+ totalTokens: response.usage.total_tokens
144
+ };
145
+ }
146
+ return result;
147
+ } catch (error) {
148
+ if (error instanceof chunkBW4BLEIM_cjs.MirageDevError) {
149
+ throw error;
150
+ }
151
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
152
+ "AI_API_ERROR",
153
+ error instanceof Error ? error.message : "Unknown error occurred",
154
+ error
155
+ );
156
+ }
157
+ }
158
+ /**
159
+ * Send a streaming chat completion request
160
+ */
161
+ async chatStream(options, onChunk) {
162
+ try {
163
+ const model = options.model || this.config.defaultModel || "gpt-4o-mini";
164
+ const temperature = options.temperature ?? this.config.temperature;
165
+ const maxTokens = options.maxTokens ?? this.config.maxTokens;
166
+ const params = {
167
+ model,
168
+ messages: this.convertMessages(options.messages),
169
+ stream: true
170
+ };
171
+ if (temperature !== void 0) {
172
+ params.temperature = temperature;
173
+ }
174
+ if (maxTokens !== void 0) {
175
+ params.max_tokens = maxTokens;
176
+ }
177
+ if (options.functions) {
178
+ params.functions = options.functions.map((fn) => ({
179
+ name: fn.name,
180
+ description: fn.description,
181
+ parameters: fn.parameters
182
+ }));
183
+ }
184
+ const stream = await this.client.chat.completions.create(params);
185
+ let content = "";
186
+ for await (const chunk of stream) {
187
+ const delta = chunk.choices[0]?.delta?.content || "";
188
+ const finishReason = chunk.choices[0]?.finish_reason;
189
+ if (delta) {
190
+ content += delta;
191
+ }
192
+ onChunk({
193
+ content,
194
+ delta,
195
+ isComplete: finishReason !== null
196
+ });
197
+ }
198
+ } catch (error) {
199
+ if (error instanceof chunkBW4BLEIM_cjs.MirageDevError) {
200
+ throw error;
201
+ }
202
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
203
+ "AI_API_ERROR",
204
+ error instanceof Error ? error.message : "Unknown error occurred",
205
+ error
206
+ );
207
+ }
208
+ }
209
+ /**
210
+ * Create embeddings for text input(s)
211
+ */
212
+ async createEmbeddings(options) {
213
+ try {
214
+ const model = options.model || "text-embedding-ada-002";
215
+ const input = Array.isArray(options.input) ? options.input : [options.input];
216
+ const response = await this.client.embeddings.create({
217
+ model,
218
+ input
219
+ });
220
+ const result = {
221
+ embeddings: response.data.map((item) => item.embedding)
222
+ };
223
+ if (response.usage) {
224
+ result.usage = {
225
+ promptTokens: response.usage.prompt_tokens,
226
+ totalTokens: response.usage.total_tokens
227
+ };
228
+ }
229
+ return result;
230
+ } catch (error) {
231
+ if (error instanceof chunkBW4BLEIM_cjs.MirageDevError) {
232
+ throw error;
233
+ }
234
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
235
+ "AI_API_ERROR",
236
+ error instanceof Error ? error.message : "Unknown error occurred",
237
+ error
238
+ );
239
+ }
240
+ }
241
+ /**
242
+ * Map OpenAI finish reason to our format
243
+ */
244
+ mapFinishReason(reason) {
245
+ switch (reason) {
246
+ case "stop":
247
+ return "stop";
248
+ case "length":
249
+ return "length";
250
+ case "function_call":
251
+ return "function_call";
252
+ case "content_filter":
253
+ return "content_filter";
254
+ default:
255
+ return "stop";
256
+ }
257
+ }
258
+ };
259
+
260
+ // src/ai/factory.ts
261
+ var providerInstance = null;
262
+ function getAIProvider() {
263
+ const config = chunkTALL2IL5_cjs.getAIConfig();
264
+ if (providerInstance) {
265
+ return providerInstance;
266
+ }
267
+ if (!config.apiKey) {
268
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
269
+ "AI_CONFIG_INVALID",
270
+ "API key required for AI provider"
271
+ );
272
+ }
273
+ switch (config.provider) {
274
+ case "openai":
275
+ providerInstance = new OpenAIProvider({
276
+ apiKey: config.apiKey,
277
+ baseURL: config.baseURL,
278
+ defaultModel: config.defaultModel,
279
+ maxTokens: config.maxTokens,
280
+ temperature: config.temperature
281
+ });
282
+ break;
283
+ case "anthropic":
284
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
285
+ "AI_PROVIDER_ERROR",
286
+ "Anthropic provider not yet supported. Coming soon!"
287
+ );
288
+ case "custom":
289
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
290
+ "AI_PROVIDER_ERROR",
291
+ "Custom provider requires implementation"
292
+ );
293
+ default:
294
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
295
+ "AI_PROVIDER_ERROR",
296
+ `Unknown AI provider: ${config.provider}`
297
+ );
298
+ }
299
+ return providerInstance;
300
+ }
301
+
302
+ // src/ai/edge.ts
303
+ function createEdgeStreamResponse(callback) {
304
+ const encoder = new TextEncoder();
305
+ const stream = new ReadableStream({
306
+ async start(controller) {
307
+ const write = async (data) => {
308
+ controller.enqueue(encoder.encode(data));
309
+ };
310
+ try {
311
+ await callback(write);
312
+ } catch (error) {
313
+ controller.error(error);
314
+ } finally {
315
+ controller.close();
316
+ }
317
+ }
318
+ });
319
+ return new Response(stream, {
320
+ headers: {
321
+ "Content-Type": "text/event-stream",
322
+ "Cache-Control": "no-cache",
323
+ "Connection": "keep-alive"
324
+ }
325
+ });
326
+ }
327
+ function createSSEStream(callback) {
328
+ const encoder = new TextEncoder();
329
+ const stream = new ReadableStream({
330
+ async start(controller) {
331
+ const send = async (event, data) => {
332
+ const message = `event: ${event}
333
+ data: ${data}
334
+
335
+ `;
336
+ controller.enqueue(encoder.encode(message));
337
+ };
338
+ try {
339
+ await callback(send);
340
+ } catch (error) {
341
+ await send("error", JSON.stringify({ error: String(error) }));
342
+ controller.error(error);
343
+ } finally {
344
+ controller.close();
345
+ }
346
+ }
347
+ });
348
+ return new Response(stream, {
349
+ headers: {
350
+ "Content-Type": "text/event-stream",
351
+ "Cache-Control": "no-cache",
352
+ "Connection": "keep-alive"
353
+ }
354
+ });
355
+ }
356
+
357
+ // src/ai/index.ts
358
+ async function chat(options) {
359
+ const provider = getAIProvider();
360
+ return provider.chat(options);
361
+ }
362
+ async function chatStream(options, onChunk) {
363
+ const provider = getAIProvider();
364
+ return provider.chatStream(options, onChunk);
365
+ }
366
+ async function createEmbeddings(options) {
367
+ const provider = getAIProvider();
368
+ return provider.createEmbeddings(options);
369
+ }
370
+
371
+ Object.defineProperty(exports, "getAIConfig", {
372
+ enumerable: true,
373
+ get: function () { return chunkTALL2IL5_cjs.getAIConfig; }
374
+ });
375
+ Object.defineProperty(exports, "setAIConfig", {
376
+ enumerable: true,
377
+ get: function () { return chunkTALL2IL5_cjs.setAIConfig; }
378
+ });
379
+ exports.chat = chat;
380
+ exports.chatStream = chatStream;
381
+ exports.createEdgeStreamResponse = createEdgeStreamResponse;
382
+ exports.createEmbeddings = createEmbeddings;
383
+ exports.createSSEStream = createSSEStream;
@@ -0,0 +1,14 @@
1
+ import { A as AIConfig, C as ChatOptions, e as ChatResponse, S as StreamChunk, E as EmbeddingOptions, f as EmbeddingResponse } from '../ai-DTlWq4Xf.cjs';
2
+ export { d as AIFunction, c as AIMessage, a as AIProvider, b as AIRole } from '../ai-DTlWq4Xf.cjs';
3
+
4
+ declare function setAIConfig(config: Partial<AIConfig>): void;
5
+ declare function getAIConfig(): AIConfig;
6
+
7
+ declare function createEdgeStreamResponse(callback: (write: (data: string) => Promise<void>) => Promise<void>): Response;
8
+ declare function createSSEStream(callback: (send: (event: string, data: string) => Promise<void>) => Promise<void>): Response;
9
+
10
+ declare function chat(options: ChatOptions): Promise<ChatResponse>;
11
+ declare function chatStream(options: ChatOptions, onChunk: (chunk: StreamChunk) => void): Promise<void>;
12
+ declare function createEmbeddings(options: EmbeddingOptions): Promise<EmbeddingResponse>;
13
+
14
+ export { AIConfig, ChatOptions, ChatResponse, EmbeddingOptions, EmbeddingResponse, StreamChunk, chat, chatStream, createEdgeStreamResponse, createEmbeddings, createSSEStream, getAIConfig, setAIConfig };
@@ -0,0 +1,14 @@
1
+ import { A as AIConfig, C as ChatOptions, e as ChatResponse, S as StreamChunk, E as EmbeddingOptions, f as EmbeddingResponse } from '../ai-DTlWq4Xf.js';
2
+ export { d as AIFunction, c as AIMessage, a as AIProvider, b as AIRole } from '../ai-DTlWq4Xf.js';
3
+
4
+ declare function setAIConfig(config: Partial<AIConfig>): void;
5
+ declare function getAIConfig(): AIConfig;
6
+
7
+ declare function createEdgeStreamResponse(callback: (write: (data: string) => Promise<void>) => Promise<void>): Response;
8
+ declare function createSSEStream(callback: (send: (event: string, data: string) => Promise<void>) => Promise<void>): Response;
9
+
10
+ declare function chat(options: ChatOptions): Promise<ChatResponse>;
11
+ declare function chatStream(options: ChatOptions, onChunk: (chunk: StreamChunk) => void): Promise<void>;
12
+ declare function createEmbeddings(options: EmbeddingOptions): Promise<EmbeddingResponse>;
13
+
14
+ export { AIConfig, ChatOptions, ChatResponse, EmbeddingOptions, EmbeddingResponse, StreamChunk, chat, chatStream, createEdgeStreamResponse, createEmbeddings, createSSEStream, getAIConfig, setAIConfig };