miragedev-sdk 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -267,6 +267,130 @@ function MyComponent() {
267
267
  }
268
268
  ```
269
269
 
270
+ ### AI Module
271
+
272
+ ```typescript
273
+ import { chat, chatStream, useChat, setAIConfig } from 'miragedev-sdk/ai'
274
+ ```
275
+
276
+ **Setup:**
277
+
278
+ ```typescript
279
+ // lib/miragedev.init.ts or app setup
280
+ import { initMirageDev } from 'miragedev-sdk'
281
+
282
+ initMirageDev({
283
+ ai: {
284
+ provider: 'openai',
285
+ apiKey: process.env.OPENAI_API_KEY,
286
+ defaultModel: 'gpt-4-turbo-preview',
287
+ temperature: 0.7,
288
+ }
289
+ })
290
+ ```
291
+
292
+ **Server-Side Chat:**
293
+
294
+ ```typescript
295
+ import { chat } from 'miragedev-sdk/ai'
296
+
297
+ export async function POST(req: Request) {
298
+ const { message } = await req.json()
299
+
300
+ const response = await chat({
301
+ messages: [
302
+ { role: 'system', content: 'You are a helpful assistant' },
303
+ { role: 'user', content: message }
304
+ ],
305
+ temperature: 0.7,
306
+ })
307
+
308
+ return Response.json({ reply: response.content })
309
+ }
310
+ ```
311
+
312
+ **Streaming (Edge Runtime):**
313
+
314
+ ```typescript
315
+ import { chatStream, createEdgeStreamResponse } from 'miragedev-sdk/ai'
316
+
317
+ export const runtime = 'edge'
318
+
319
+ export async function POST(req: Request) {
320
+ const { messages } = await req.json()
321
+
322
+ return createEdgeStreamResponse(async (write) => {
323
+ await chatStream({ messages }, (chunk) => {
324
+ write(chunk.delta)
325
+ })
326
+ })
327
+ }
328
+ ```
329
+
330
+ **Client-Side Hook:**
331
+
332
+ ```typescript
333
+ 'use client'
334
+
335
+ import { useChat } from 'miragedev-sdk/ai'
336
+
337
+ export function ChatComponent() {
338
+ const { messages, sendMessage, isLoading, streamingMessage } = useChat({
339
+ stream: true,
340
+ initialMessages: [
341
+ { role: 'system', content: 'You are a helpful assistant' }
342
+ ],
343
+ onError: (error) => console.error(error)
344
+ })
345
+
346
+ return (
347
+ <div>
348
+ {messages.map((msg, i) => (
349
+ <div key={i}>
350
+ <strong>{msg.role}:</strong> {msg.content}
351
+ </div>
352
+ ))}
353
+
354
+ {streamingMessage && (
355
+ <div>
356
+ <strong>assistant:</strong> {streamingMessage}
357
+ </div>
358
+ )}
359
+
360
+ <button
361
+ onClick={() => sendMessage('Hello!')}
362
+ disabled={isLoading}
363
+ >
364
+ Send Message
365
+ </button>
366
+ </div>
367
+ )
368
+ }
369
+ ```
370
+
371
+ **Embeddings:**
372
+
373
+ ```typescript
374
+ import { createEmbeddings } from 'miragedev-sdk/ai'
375
+
376
+ const response = await createEmbeddings({
377
+ input: 'Machine learning is fascinating',
378
+ })
379
+
380
+ console.log(response.embeddings[0]) // [0.123, -0.456, ...]
381
+ ```
382
+
383
+ **Environment Variables:**
384
+
385
+ ```env
386
+ OPENAI_API_KEY=sk-...
387
+ ```
388
+
389
+ **Supported Providers:**
390
+ - ✅ OpenAI (GPT-4, GPT-3.5, embeddings)
391
+ - 🔜 Anthropic (Claude) - Coming soon
392
+ ```
393
+
270
394
  ## API Reference
271
395
 
272
396
  Full API documentation with examples for every function is available in the code via JSDoc. Your AI assistant can read these to help you build faster.
@@ -0,0 +1,383 @@
1
+ 'use strict';
2
+
3
+ var chunkTALL2IL5_cjs = require('../chunk-TALL2IL5.cjs');
4
+ var chunkBW4BLEIM_cjs = require('../chunk-BW4BLEIM.cjs');
5
+ require('../chunk-75ZPJI57.cjs');
6
+ var OpenAI = require('openai');
7
+
8
+ function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
9
+
10
+ var OpenAI__default = /*#__PURE__*/_interopDefault(OpenAI);
11
+
12
+ var OpenAIProvider = class {
13
+ client;
14
+ config;
15
+ constructor(config) {
16
+ if (!config.apiKey) {
17
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
18
+ "AI_CONFIG_INVALID",
19
+ "OpenAI API key is required"
20
+ );
21
+ }
22
+ this.config = config;
23
+ this.client = new OpenAI__default.default({
24
+ apiKey: config.apiKey,
25
+ baseURL: config.baseURL,
26
+ organization: config.organization,
27
+ timeout: config.timeout,
28
+ dangerouslyAllowBrowser: true
29
+ // Allow in tests and edge runtime
30
+ });
31
+ }
32
+ /**
33
+ * Convert our message format to OpenAI message format
34
+ */
35
+ convertMessages(messages) {
36
+ return messages.map((msg) => {
37
+ if (msg.role === "system") {
38
+ const systemMessage = {
39
+ role: "system",
40
+ content: msg.content
41
+ };
42
+ if (msg.name) {
43
+ systemMessage.name = msg.name;
44
+ }
45
+ return systemMessage;
46
+ }
47
+ if (msg.role === "user") {
48
+ const userMessage = {
49
+ role: "user",
50
+ content: msg.content
51
+ };
52
+ if (msg.name) {
53
+ userMessage.name = msg.name;
54
+ }
55
+ return userMessage;
56
+ }
57
+ if (msg.role === "assistant") {
58
+ const assistantMessage = {
59
+ role: "assistant",
60
+ content: msg.content
61
+ };
62
+ if (msg.name) {
63
+ assistantMessage.name = msg.name;
64
+ }
65
+ if (msg.functionCall) {
66
+ assistantMessage.function_call = {
67
+ name: msg.functionCall.name,
68
+ arguments: msg.functionCall.arguments
69
+ };
70
+ }
71
+ return assistantMessage;
72
+ }
73
+ const functionMessage = {
74
+ role: "function",
75
+ content: msg.content,
76
+ name: msg.name || ""
77
+ };
78
+ return functionMessage;
79
+ });
80
+ }
81
+ /**
82
+ * Send a chat completion request
83
+ */
84
+ async chat(options) {
85
+ try {
86
+ if (!options.messages || options.messages.length === 0) {
87
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
88
+ "AI_CONFIG_INVALID",
89
+ "Messages array cannot be empty"
90
+ );
91
+ }
92
+ const model = options.model || this.config.defaultModel || "gpt-4o-mini";
93
+ const temperature = options.temperature ?? this.config.temperature;
94
+ const maxTokens = options.maxTokens ?? this.config.maxTokens;
95
+ const params = {
96
+ model,
97
+ messages: this.convertMessages(options.messages)
98
+ };
99
+ if (temperature !== void 0) {
100
+ params.temperature = temperature;
101
+ }
102
+ if (maxTokens !== void 0) {
103
+ params.max_tokens = maxTokens;
104
+ }
105
+ if (options.functions) {
106
+ params.functions = options.functions.map((fn) => ({
107
+ name: fn.name,
108
+ description: fn.description,
109
+ parameters: fn.parameters
110
+ }));
111
+ }
112
+ const response = await this.client.chat.completions.create(params);
113
+ const choice = response.choices[0];
114
+ if (!choice) {
115
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
116
+ "AI_API_ERROR",
117
+ "No response choices returned from OpenAI"
118
+ );
119
+ }
120
+ const result = {
121
+ content: choice.message.content || "",
122
+ role: choice.message.role,
123
+ finishReason: this.mapFinishReason(choice.finish_reason)
124
+ };
125
+ if (choice.message.function_call) {
126
+ try {
127
+ result.functionCall = {
128
+ name: choice.message.function_call.name,
129
+ arguments: JSON.parse(choice.message.function_call.arguments || "{}")
130
+ };
131
+ } catch (error) {
132
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
133
+ "AI_API_ERROR",
134
+ `Failed to parse function call arguments: ${error instanceof Error ? error.message : "Invalid JSON"}`,
135
+ error
136
+ );
137
+ }
138
+ }
139
+ if (response.usage) {
140
+ result.usage = {
141
+ promptTokens: response.usage.prompt_tokens,
142
+ completionTokens: response.usage.completion_tokens,
143
+ totalTokens: response.usage.total_tokens
144
+ };
145
+ }
146
+ return result;
147
+ } catch (error) {
148
+ if (error instanceof chunkBW4BLEIM_cjs.MirageDevError) {
149
+ throw error;
150
+ }
151
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
152
+ "AI_API_ERROR",
153
+ error instanceof Error ? error.message : "Unknown error occurred",
154
+ error
155
+ );
156
+ }
157
+ }
158
+ /**
159
+ * Send a streaming chat completion request
160
+ */
161
+ async chatStream(options, onChunk) {
162
+ try {
163
+ const model = options.model || this.config.defaultModel || "gpt-4o-mini";
164
+ const temperature = options.temperature ?? this.config.temperature;
165
+ const maxTokens = options.maxTokens ?? this.config.maxTokens;
166
+ const params = {
167
+ model,
168
+ messages: this.convertMessages(options.messages),
169
+ stream: true
170
+ };
171
+ if (temperature !== void 0) {
172
+ params.temperature = temperature;
173
+ }
174
+ if (maxTokens !== void 0) {
175
+ params.max_tokens = maxTokens;
176
+ }
177
+ if (options.functions) {
178
+ params.functions = options.functions.map((fn) => ({
179
+ name: fn.name,
180
+ description: fn.description,
181
+ parameters: fn.parameters
182
+ }));
183
+ }
184
+ const stream = await this.client.chat.completions.create(params);
185
+ let content = "";
186
+ for await (const chunk of stream) {
187
+ const delta = chunk.choices[0]?.delta?.content || "";
188
+ const finishReason = chunk.choices[0]?.finish_reason;
189
+ if (delta) {
190
+ content += delta;
191
+ }
192
+ onChunk({
193
+ content,
194
+ delta,
195
+ isComplete: finishReason !== null
196
+ });
197
+ }
198
+ } catch (error) {
199
+ if (error instanceof chunkBW4BLEIM_cjs.MirageDevError) {
200
+ throw error;
201
+ }
202
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
203
+ "AI_API_ERROR",
204
+ error instanceof Error ? error.message : "Unknown error occurred",
205
+ error
206
+ );
207
+ }
208
+ }
209
+ /**
210
+ * Create embeddings for text input(s)
211
+ */
212
+ async createEmbeddings(options) {
213
+ try {
214
+ const model = options.model || "text-embedding-ada-002";
215
+ const input = Array.isArray(options.input) ? options.input : [options.input];
216
+ const response = await this.client.embeddings.create({
217
+ model,
218
+ input
219
+ });
220
+ const result = {
221
+ embeddings: response.data.map((item) => item.embedding)
222
+ };
223
+ if (response.usage) {
224
+ result.usage = {
225
+ promptTokens: response.usage.prompt_tokens,
226
+ totalTokens: response.usage.total_tokens
227
+ };
228
+ }
229
+ return result;
230
+ } catch (error) {
231
+ if (error instanceof chunkBW4BLEIM_cjs.MirageDevError) {
232
+ throw error;
233
+ }
234
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
235
+ "AI_API_ERROR",
236
+ error instanceof Error ? error.message : "Unknown error occurred",
237
+ error
238
+ );
239
+ }
240
+ }
241
+ /**
242
+ * Map OpenAI finish reason to our format
243
+ */
244
+ mapFinishReason(reason) {
245
+ switch (reason) {
246
+ case "stop":
247
+ return "stop";
248
+ case "length":
249
+ return "length";
250
+ case "function_call":
251
+ return "function_call";
252
+ case "content_filter":
253
+ return "content_filter";
254
+ default:
255
+ return "stop";
256
+ }
257
+ }
258
+ };
259
+
260
+ // src/ai/factory.ts
261
+ var providerInstance = null;
262
+ function getAIProvider() {
263
+ const config = chunkTALL2IL5_cjs.getAIConfig();
264
+ if (providerInstance) {
265
+ return providerInstance;
266
+ }
267
+ if (!config.apiKey) {
268
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
269
+ "AI_CONFIG_INVALID",
270
+ "API key required for AI provider"
271
+ );
272
+ }
273
+ switch (config.provider) {
274
+ case "openai":
275
+ providerInstance = new OpenAIProvider({
276
+ apiKey: config.apiKey,
277
+ baseURL: config.baseURL,
278
+ defaultModel: config.defaultModel,
279
+ maxTokens: config.maxTokens,
280
+ temperature: config.temperature
281
+ });
282
+ break;
283
+ case "anthropic":
284
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
285
+ "AI_PROVIDER_ERROR",
286
+ "Anthropic provider not yet supported. Coming soon!"
287
+ );
288
+ case "custom":
289
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
290
+ "AI_PROVIDER_ERROR",
291
+ "Custom provider requires implementation"
292
+ );
293
+ default:
294
+ throw new chunkBW4BLEIM_cjs.MirageDevError(
295
+ "AI_PROVIDER_ERROR",
296
+ `Unknown AI provider: ${config.provider}`
297
+ );
298
+ }
299
+ return providerInstance;
300
+ }
301
+
302
+ // src/ai/edge.ts
303
+ function createEdgeStreamResponse(callback) {
304
+ const encoder = new TextEncoder();
305
+ const stream = new ReadableStream({
306
+ async start(controller) {
307
+ const write = async (data) => {
308
+ controller.enqueue(encoder.encode(data));
309
+ };
310
+ try {
311
+ await callback(write);
312
+ } catch (error) {
313
+ controller.error(error);
314
+ } finally {
315
+ controller.close();
316
+ }
317
+ }
318
+ });
319
+ return new Response(stream, {
320
+ headers: {
321
+ "Content-Type": "text/event-stream",
322
+ "Cache-Control": "no-cache",
323
+ "Connection": "keep-alive"
324
+ }
325
+ });
326
+ }
327
+ function createSSEStream(callback) {
328
+ const encoder = new TextEncoder();
329
+ const stream = new ReadableStream({
330
+ async start(controller) {
331
+ const send = async (event, data) => {
332
+ const message = `event: ${event}
333
+ data: ${data}
334
+
335
+ `;
336
+ controller.enqueue(encoder.encode(message));
337
+ };
338
+ try {
339
+ await callback(send);
340
+ } catch (error) {
341
+ await send("error", JSON.stringify({ error: String(error) }));
342
+ controller.error(error);
343
+ } finally {
344
+ controller.close();
345
+ }
346
+ }
347
+ });
348
+ return new Response(stream, {
349
+ headers: {
350
+ "Content-Type": "text/event-stream",
351
+ "Cache-Control": "no-cache",
352
+ "Connection": "keep-alive"
353
+ }
354
+ });
355
+ }
356
+
357
+ // src/ai/index.ts
358
+ async function chat(options) {
359
+ const provider = getAIProvider();
360
+ return provider.chat(options);
361
+ }
362
+ async function chatStream(options, onChunk) {
363
+ const provider = getAIProvider();
364
+ return provider.chatStream(options, onChunk);
365
+ }
366
+ async function createEmbeddings(options) {
367
+ const provider = getAIProvider();
368
+ return provider.createEmbeddings(options);
369
+ }
370
+
371
+ Object.defineProperty(exports, "getAIConfig", {
372
+ enumerable: true,
373
+ get: function () { return chunkTALL2IL5_cjs.getAIConfig; }
374
+ });
375
+ Object.defineProperty(exports, "setAIConfig", {
376
+ enumerable: true,
377
+ get: function () { return chunkTALL2IL5_cjs.setAIConfig; }
378
+ });
379
+ exports.chat = chat;
380
+ exports.chatStream = chatStream;
381
+ exports.createEdgeStreamResponse = createEdgeStreamResponse;
382
+ exports.createEmbeddings = createEmbeddings;
383
+ exports.createSSEStream = createSSEStream;
@@ -0,0 +1,14 @@
1
+ import { A as AIConfig, C as ChatOptions, e as ChatResponse, S as StreamChunk, E as EmbeddingOptions, f as EmbeddingResponse } from '../ai-DTlWq4Xf.cjs';
2
+ export { d as AIFunction, c as AIMessage, a as AIProvider, b as AIRole } from '../ai-DTlWq4Xf.cjs';
3
+
4
+ declare function setAIConfig(config: Partial<AIConfig>): void;
5
+ declare function getAIConfig(): AIConfig;
6
+
7
+ declare function createEdgeStreamResponse(callback: (write: (data: string) => Promise<void>) => Promise<void>): Response;
8
+ declare function createSSEStream(callback: (send: (event: string, data: string) => Promise<void>) => Promise<void>): Response;
9
+
10
+ declare function chat(options: ChatOptions): Promise<ChatResponse>;
11
+ declare function chatStream(options: ChatOptions, onChunk: (chunk: StreamChunk) => void): Promise<void>;
12
+ declare function createEmbeddings(options: EmbeddingOptions): Promise<EmbeddingResponse>;
13
+
14
+ export { AIConfig, ChatOptions, ChatResponse, EmbeddingOptions, EmbeddingResponse, StreamChunk, chat, chatStream, createEdgeStreamResponse, createEmbeddings, createSSEStream, getAIConfig, setAIConfig };
@@ -0,0 +1,14 @@
1
+ import { A as AIConfig, C as ChatOptions, e as ChatResponse, S as StreamChunk, E as EmbeddingOptions, f as EmbeddingResponse } from '../ai-DTlWq4Xf.js';
2
+ export { d as AIFunction, c as AIMessage, a as AIProvider, b as AIRole } from '../ai-DTlWq4Xf.js';
3
+
4
+ declare function setAIConfig(config: Partial<AIConfig>): void;
5
+ declare function getAIConfig(): AIConfig;
6
+
7
+ declare function createEdgeStreamResponse(callback: (write: (data: string) => Promise<void>) => Promise<void>): Response;
8
+ declare function createSSEStream(callback: (send: (event: string, data: string) => Promise<void>) => Promise<void>): Response;
9
+
10
+ declare function chat(options: ChatOptions): Promise<ChatResponse>;
11
+ declare function chatStream(options: ChatOptions, onChunk: (chunk: StreamChunk) => void): Promise<void>;
12
+ declare function createEmbeddings(options: EmbeddingOptions): Promise<EmbeddingResponse>;
13
+
14
+ export { AIConfig, ChatOptions, ChatResponse, EmbeddingOptions, EmbeddingResponse, StreamChunk, chat, chatStream, createEdgeStreamResponse, createEmbeddings, createSSEStream, getAIConfig, setAIConfig };