@blinkdotnew/sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,563 @@
1
+ import { BlinkClientConfig, BlinkUser, AuthState, HttpClient, TableOperations, CreateOptions, UpsertOptions, QueryOptions, ListResponse, UpdateOptions, FilterCondition, BlinkStorage, BlinkAI, StorageUploadOptions, StorageUploadResponse, TextGenerationRequest, TextGenerationResponse, ObjectGenerationRequest, ObjectGenerationResponse, ImageGenerationRequest, ImageGenerationResponse, SpeechGenerationRequest, SpeechGenerationResponse, TranscriptionRequest, TranscriptionResponse } from '@blink/core';
2
+ export { AuthState, AuthTokens, BlinkAI, BlinkClientConfig, BlinkStorage, BlinkUser, CreateOptions, FileObject, FilterCondition, ImageGenerationRequest, ImageGenerationResponse, ListResponse, Message, ObjectGenerationRequest, ObjectGenerationResponse, QueryOptions, SpeechGenerationRequest, SpeechGenerationResponse, StorageUploadOptions, StorageUploadResponse, TableOperations, TextGenerationRequest, TextGenerationResponse, TokenUsage, TranscriptionRequest, TranscriptionResponse, UpdateOptions, UpsertOptions } from '@blink/core';
3
+
4
+ /**
5
+ * Blink Auth Module - Client-side authentication management
6
+ * Handles token storage, user state, and authentication flows
7
+ */
8
+
9
+ type AuthStateChangeCallback = (state: AuthState) => void;
10
+ declare class BlinkAuth {
11
+ private config;
12
+ private authState;
13
+ private listeners;
14
+ private readonly authUrl;
15
+ constructor(config: BlinkClientConfig);
16
+ /**
17
+ * Initialize authentication from stored tokens or URL fragments
18
+ */
19
+ initialize(): Promise<void>;
20
+ /**
21
+ * Redirect to Blink auth page
22
+ */
23
+ login(nextUrl?: string): void;
24
+ /**
25
+ * Logout and clear stored tokens
26
+ */
27
+ logout(redirectUrl?: string): void;
28
+ /**
29
+ * Check if user is authenticated
30
+ */
31
+ isAuthenticated(): boolean;
32
+ /**
33
+ * Get current user (sync)
34
+ */
35
+ currentUser(): BlinkUser | null;
36
+ /**
37
+ * Get current access token
38
+ */
39
+ getToken(): string | null;
40
+ /**
41
+ * Check if access token is expired based on timestamp
42
+ */
43
+ private isAccessTokenExpired;
44
+ /**
45
+ * Check if refresh token is expired based on timestamp
46
+ */
47
+ private isRefreshTokenExpired;
48
+ /**
49
+ * Get a valid access token, refreshing if necessary
50
+ */
51
+ getValidToken(): Promise<string | null>;
52
+ /**
53
+ * Fetch current user profile from API
54
+ */
55
+ me(): Promise<BlinkUser>;
56
+ /**
57
+ * Update user profile
58
+ */
59
+ updateMe(updates: Partial<BlinkUser>): Promise<BlinkUser>;
60
+ /**
61
+ * Manually set tokens (for server-side usage)
62
+ */
63
+ setToken(jwt: string, persist?: boolean): Promise<void>;
64
+ /**
65
+ * Refresh access token using refresh token
66
+ */
67
+ refreshToken(): Promise<boolean>;
68
+ /**
69
+ * Add auth state change listener
70
+ */
71
+ onAuthStateChanged(callback: AuthStateChangeCallback): () => void;
72
+ /**
73
+ * Private helper methods
74
+ */
75
+ private validateStoredTokens;
76
+ private setTokens;
77
+ private clearTokens;
78
+ private getStoredTokens;
79
+ private extractTokensFromUrl;
80
+ private clearUrlTokens;
81
+ private redirectToAuth;
82
+ private setLoading;
83
+ private updateAuthState;
84
+ }
85
+
86
+ /**
87
+ * Blink Database Module - Table operations and query interface
88
+ * Provides CRUD operations with PostgREST-compatible API
89
+ */
90
+
91
+ declare class BlinkTable<T = any> implements TableOperations<T> {
92
+ private tableName;
93
+ private httpClient;
94
+ constructor(tableName: string, httpClient: HttpClient);
95
+ /**
96
+ * Create a single record
97
+ */
98
+ create(data: Partial<T>, options?: CreateOptions): Promise<T>;
99
+ /**
100
+ * Create multiple records
101
+ */
102
+ createMany(data: Partial<T>[], options?: CreateOptions): Promise<T[]>;
103
+ /**
104
+ * Upsert a single record (insert or update on conflict)
105
+ */
106
+ upsert(data: Partial<T>, options?: UpsertOptions): Promise<T>;
107
+ /**
108
+ * Upsert multiple records
109
+ */
110
+ upsertMany(data: Partial<T>[], options?: UpsertOptions): Promise<T[]>;
111
+ /**
112
+ * Get a single record by ID
113
+ */
114
+ get(id: string): Promise<T | null>;
115
+ /**
116
+ * List records with filtering, sorting, and pagination
117
+ */
118
+ list(options?: QueryOptions): Promise<ListResponse<T>>;
119
+ /**
120
+ * Update a single record by ID
121
+ */
122
+ update(id: string, data: Partial<T>, options?: UpdateOptions): Promise<T>;
123
+ /**
124
+ * Update multiple records
125
+ */
126
+ updateMany(updates: Array<{
127
+ id: string;
128
+ } & Partial<T>>, options?: UpdateOptions): Promise<T[]>;
129
+ /**
130
+ * Delete a single record by ID
131
+ */
132
+ delete(id: string): Promise<void>;
133
+ /**
134
+ * Delete multiple records based on filter
135
+ */
136
+ deleteMany(options: {
137
+ where: FilterCondition;
138
+ }): Promise<void>;
139
+ /**
140
+ * Count records matching filter
141
+ */
142
+ count(options?: {
143
+ where?: FilterCondition;
144
+ }): Promise<number>;
145
+ /**
146
+ * Check if any records exist matching filter
147
+ */
148
+ exists(options: {
149
+ where: FilterCondition;
150
+ }): Promise<boolean>;
151
+ /**
152
+ * Raw SQL query on this table (for advanced use cases)
153
+ */
154
+ sql<R = any>(query: string, params?: any[]): Promise<{
155
+ rows: R[];
156
+ columns: string[];
157
+ rowCount: number;
158
+ executionTime: number;
159
+ }>;
160
+ /**
161
+ * Private helper methods
162
+ */
163
+ private extractCursor;
164
+ }
165
+ declare class BlinkDatabase {
166
+ private httpClient;
167
+ private tables;
168
+ constructor(httpClient: HttpClient);
169
+ /**
170
+ * Get a table instance for any table name
171
+ */
172
+ table<T = any>(tableName: string): BlinkTable<T>;
173
+ /**
174
+ * Execute raw SQL query
175
+ */
176
+ sql<T = any>(query: string, params?: any[]): Promise<{
177
+ rows: T[];
178
+ columns: string[];
179
+ rowCount: number;
180
+ executionTime: number;
181
+ }>;
182
+ /**
183
+ * Execute batch SQL operations
184
+ */
185
+ batch<T = any>(statements: Array<{
186
+ sql: string;
187
+ args?: any[];
188
+ }>, mode?: 'read' | 'write'): Promise<{
189
+ results: Array<{
190
+ rows: T[];
191
+ columns: string[];
192
+ rowCount: number;
193
+ }>;
194
+ executionTime: number;
195
+ success: boolean;
196
+ }>;
197
+ }
198
+
199
+ /**
200
+ * Blink Client - Main SDK entry point
201
+ * Factory function and client class for the Blink SDK
202
+ */
203
+
204
+ interface BlinkClient {
205
+ auth: BlinkAuth;
206
+ db: BlinkDatabase;
207
+ storage: BlinkStorage;
208
+ ai: BlinkAI;
209
+ }
210
+ /**
211
+ * Create a new Blink client instance
212
+ */
213
+ declare function createClient(config: BlinkClientConfig): BlinkClient;
214
+
215
+ /**
216
+ * Blink Storage Module
217
+ * Handles file upload and file removal
218
+ */
219
+
220
+ declare class BlinkStorageImpl implements BlinkStorage {
221
+ private httpClient;
222
+ constructor(httpClient: HttpClient);
223
+ /**
224
+ * Upload a file to project storage
225
+ *
226
+ * @param file - File, Blob, or Buffer to upload
227
+ * @param path - Destination path within project storage
228
+ * @param options - Upload options including upsert and progress callback
229
+ * @returns Promise resolving to upload response with public URL
230
+ *
231
+ * @example
232
+ * ```ts
233
+ * const { publicUrl } = await blink.storage.upload(
234
+ * fileInput.files[0],
235
+ * `avatars/${user.id}.png`,
236
+ * {
237
+ * upsert: true,
238
+ * onProgress: pct => console.log(`${pct}%`)
239
+ * }
240
+ * );
241
+ * ```
242
+ */
243
+ upload(file: File | Blob | Buffer, path: string, options?: StorageUploadOptions): Promise<StorageUploadResponse>;
244
+ /**
245
+ * Remove one or more files from project storage
246
+ *
247
+ * @param paths - File paths to remove
248
+ * @returns Promise that resolves when files are removed
249
+ *
250
+ * @example
251
+ * ```ts
252
+ * await blink.storage.remove('avatars/user1.png');
253
+ * await blink.storage.remove('file1.pdf', 'file2.pdf', 'file3.pdf');
254
+ * ```
255
+ */
256
+ remove(...paths: string[]): Promise<void>;
257
+ }
258
+
259
+ /**
260
+ * Blink AI Module
261
+ * Provides AI generation capabilities with Vercel AI SDK compatibility
262
+ */
263
+
264
+ declare class BlinkAIImpl implements BlinkAI {
265
+ private httpClient;
266
+ constructor(httpClient: HttpClient);
267
+ /**
268
+ * Get MIME type for audio format
269
+ */
270
+ private getMimeTypeForFormat;
271
+ /**
272
+ * Generates a text response using the Blink AI engine.
273
+ *
274
+ * @param options - An object containing either:
275
+ * - `prompt`: a simple string prompt
276
+ * - OR `messages`: an array of chat messages for conversation
277
+ * - Plus optional model, maxTokens, temperature, signal parameters
278
+ *
279
+ * @example
280
+ * ```ts
281
+ * // Simple prompt
282
+ * const { text } = await blink.ai.generateText({
283
+ * prompt: "Write a poem about coding"
284
+ * });
285
+ *
286
+ * // Chat messages
287
+ * const { text } = await blink.ai.generateText({
288
+ * messages: [
289
+ * { role: "system", content: "You are a helpful assistant" },
290
+ * { role: "user", content: "Explain quantum computing" }
291
+ * ]
292
+ * });
293
+ *
294
+ * // With options
295
+ * const { text, usage } = await blink.ai.generateText({
296
+ * prompt: "Summarize this article",
297
+ * model: "gpt-4o-mini",
298
+ * maxTokens: 150,
299
+ * temperature: 0.7
300
+ * });
301
+ * ```
302
+ *
303
+ * @returns Promise<TextGenerationResponse> - Object containing:
304
+ * - `text`: Generated text string
305
+ * - `usage`: Token usage information
306
+ * - `finishReason`: Why generation stopped ("stop", "length", etc.)
307
+ */
308
+ generateText(options: TextGenerationRequest): Promise<TextGenerationResponse>;
309
+ /**
310
+ * Streams text generation with real-time updates as the AI generates content.
311
+ *
312
+ * @param options - Same as generateText: either `prompt` or `messages` with optional parameters
313
+ * @param onChunk - Callback function that receives each text chunk as it's generated
314
+ *
315
+ * @example
316
+ * ```ts
317
+ * // Stream with prompt
318
+ * await blink.ai.streamText(
319
+ * { prompt: "Write a short story about space exploration" },
320
+ * (chunk) => {
321
+ * process.stdout.write(chunk); // Real-time output
322
+ * }
323
+ * );
324
+ *
325
+ * // Stream with messages
326
+ * await blink.ai.streamText(
327
+ * {
328
+ * messages: [
329
+ * { role: "system", content: "You are a creative writer" },
330
+ * { role: "user", content: "Write a haiku about programming" }
331
+ * ]
332
+ * },
333
+ * (chunk) => updateUI(chunk)
334
+ * );
335
+ * ```
336
+ *
337
+ * @returns Promise<TextGenerationResponse> - Final complete response with full text and metadata
338
+ */
339
+ streamText(options: TextGenerationRequest, onChunk: (chunk: string) => void): Promise<TextGenerationResponse>;
340
+ /**
341
+ * Generates structured JSON objects using AI with schema validation.
342
+ *
343
+ * @param options - Object containing:
344
+ * - `prompt`: Description of what object to generate (required)
345
+ * - `schema`: JSON Schema to validate the generated object
346
+ * - `output`: Type of output ("object", "array", "enum")
347
+ * - `enum`: Array of allowed values for enum output
348
+ * - Plus optional model, signal parameters
349
+ *
350
+ * @example
351
+ * ```ts
352
+ * // Generate user profile
353
+ * const { object } = await blink.ai.generateObject({
354
+ * prompt: "Generate a user profile for a software developer",
355
+ * schema: {
356
+ * type: "object",
357
+ * properties: {
358
+ * name: { type: "string" },
359
+ * age: { type: "number" },
360
+ * skills: { type: "array", items: { type: "string" } },
361
+ * experience: { type: "number" }
362
+ * },
363
+ * required: ["name", "skills"]
364
+ * }
365
+ * });
366
+ *
367
+ * // Generate array of items
368
+ * const { object } = await blink.ai.generateObject({
369
+ * prompt: "List 5 programming languages",
370
+ * output: "array",
371
+ * schema: {
372
+ * type: "array",
373
+ * items: { type: "string" }
374
+ * }
375
+ * });
376
+ *
377
+ * // Generate enum value
378
+ * const { object } = await blink.ai.generateObject({
379
+ * prompt: "Choose the best programming language for web development",
380
+ * output: "enum",
381
+ * enum: ["JavaScript", "Python", "TypeScript", "Go"]
382
+ * });
383
+ * ```
384
+ *
385
+ * @returns Promise<ObjectGenerationResponse> - Object containing:
386
+ * - `object`: The generated and validated JSON object/array/enum
387
+ * - `usage`: Token usage information
388
+ * - `finishReason`: Why generation stopped
389
+ */
390
+ generateObject(options: ObjectGenerationRequest): Promise<ObjectGenerationResponse>;
391
+ /**
392
+ * Streams structured object generation with real-time partial updates as the AI builds the object.
393
+ *
394
+ * @param options - Same as generateObject: prompt, schema, output type, etc.
395
+ * @param onPartial - Callback function that receives partial object updates as they're generated
396
+ *
397
+ * @example
398
+ * ```ts
399
+ * // Stream object generation with schema
400
+ * await blink.ai.streamObject(
401
+ * {
402
+ * prompt: "Generate a detailed product catalog entry",
403
+ * schema: {
404
+ * type: "object",
405
+ * properties: {
406
+ * name: { type: "string" },
407
+ * price: { type: "number" },
408
+ * description: { type: "string" },
409
+ * features: { type: "array", items: { type: "string" } }
410
+ * }
411
+ * }
412
+ * },
413
+ * (partial) => {
414
+ * console.log("Partial update:", partial);
415
+ * updateProductForm(partial); // Update UI in real-time
416
+ * }
417
+ * );
418
+ * ```
419
+ *
420
+ * @returns Promise<ObjectGenerationResponse> - Final complete object with metadata
421
+ */
422
+ streamObject(options: ObjectGenerationRequest, onPartial: (partial: any) => void): Promise<ObjectGenerationResponse>;
423
+ /**
424
+ * Generates images from text descriptions using AI image models.
425
+ *
426
+ * @param options - Object containing:
427
+ * - `prompt`: Text description of the image to generate (required)
428
+ * - `size`: Image dimensions (e.g., "1024x1024", "512x512") - varies by model
429
+ * - `quality`: Image quality ("standard" or "hd")
430
+ * - `n`: Number of images to generate (default: 1)
431
+ * - `response_format`: Output format ("url" or "b64_json")
432
+ * - Plus optional model, signal parameters
433
+ *
434
+ * @example
435
+ * ```ts
436
+ * // Basic image generation
437
+ * const { data } = await blink.ai.generateImage({
438
+ * prompt: "A serene landscape with mountains and a lake at sunset"
439
+ * });
440
+ * console.log("Image URL:", data[0].url);
441
+ *
442
+ * // High-quality image with specific size
443
+ * const { data } = await blink.ai.generateImage({
444
+ * prompt: "A futuristic city skyline with flying cars",
445
+ * size: "1792x1024",
446
+ * quality: "hd",
447
+ * model: "dall-e-3"
448
+ * });
449
+ *
450
+ * // Multiple images
451
+ * const { data } = await blink.ai.generateImage({
452
+ * prompt: "A cute robot mascot for a tech company",
453
+ * n: 3,
454
+ * size: "1024x1024"
455
+ * });
456
+ * data.forEach((img, i) => console.log(`Image ${i+1}:`, img.url));
457
+ *
458
+ * // Base64 format for direct embedding
459
+ * const { data } = await blink.ai.generateImage({
460
+ * prompt: "A minimalist logo design",
461
+ * response_format: "b64_json"
462
+ * });
463
+ * console.log("Base64 data:", data[0].b64_json);
464
+ * ```
465
+ *
466
+ * @returns Promise<ImageGenerationResponse> - Object containing:
467
+ * - `data`: Array of generated images with url or b64_json
468
+ */
469
+ generateImage(options: ImageGenerationRequest): Promise<ImageGenerationResponse>;
470
+ /**
471
+ * Converts text to speech using AI voice synthesis models.
472
+ *
473
+ * @param options - Object containing:
474
+ * - `text`: Text content to convert to speech (required)
475
+ * - `voice`: Voice to use ("alloy", "echo", "fable", "onyx", "nova", "shimmer")
476
+ * - `response_format`: Audio format ("mp3", "opus", "aac", "flac", "wav", "pcm")
477
+ * - `speed`: Speech speed (0.25 to 4.0, default: 1.0)
478
+ * - Plus optional model, signal parameters
479
+ *
480
+ * @example
481
+ * ```ts
482
+ * // Basic text-to-speech
483
+ * const { url } = await blink.ai.generateSpeech({
484
+ * text: "Hello, welcome to our AI-powered application!"
485
+ * });
486
+ * console.log("Audio URL:", url);
487
+ *
488
+ * // Custom voice and format
489
+ * const { url, voice, format } = await blink.ai.generateSpeech({
490
+ * text: "This is a demonstration of our speech synthesis capabilities.",
491
+ * voice: "nova",
492
+ * response_format: "wav",
493
+ * speed: 1.2
494
+ * });
495
+ * console.log(`Generated ${format} audio with ${voice} voice:`, url);
496
+ *
497
+ * // Slow, clear speech for accessibility
498
+ * const { url } = await blink.ai.generateSpeech({
499
+ * text: "Please listen carefully to these important instructions.",
500
+ * voice: "echo",
501
+ * speed: 0.8
502
+ * });
503
+ * ```
504
+ *
505
+ * @returns Promise<SpeechGenerationResponse> - Object containing:
506
+ * - `url`: URL to the generated audio file
507
+ * - `voice`: Voice used for generation
508
+ * - `format`: Audio format
509
+ * - `mimeType`: MIME type of the audio
510
+ */
511
+ generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
512
+ /**
513
+ * Transcribes audio content to text using AI speech recognition models.
514
+ *
515
+ * @param options - Object containing:
516
+ * - `audio`: Audio input as URL string, base64 string, or number array buffer (required)
517
+ * - `language`: Language code for transcription (e.g., "en", "es", "fr")
518
+ * - `response_format`: Output format ("json", "text", "srt", "verbose_json", "vtt")
519
+ * - Plus optional model, signal parameters
520
+ *
521
+ * @example
522
+ * ```ts
523
+ * // Transcribe from URL
524
+ * const { text } = await blink.ai.transcribeAudio({
525
+ * audio: "https://example.com/meeting-recording.mp3"
526
+ * });
527
+ * console.log("Transcription:", text);
528
+ *
529
+ * // Transcribe with language hint
530
+ * const { text, language } = await blink.ai.transcribeAudio({
531
+ * audio: "https://example.com/spanish-audio.wav",
532
+ * language: "es"
533
+ * });
534
+ * console.log(`Transcribed ${language}:`, text);
535
+ *
536
+ * // Transcribe with timestamps (verbose format)
537
+ * const result = await blink.ai.transcribeAudio({
538
+ * audio: audioFileUrl,
539
+ * response_format: "verbose_json"
540
+ * });
541
+ * result.segments?.forEach(segment => {
542
+ * console.log(`${segment.start}s - ${segment.end}s: ${segment.text}`);
543
+ * });
544
+ *
545
+ * // Transcribe from audio buffer
546
+ * const audioBuffer = new Array(1024).fill(0); // Your audio data
547
+ * const { text } = await blink.ai.transcribeAudio({
548
+ * audio: audioBuffer,
549
+ * language: "en"
550
+ * });
551
+ * ```
552
+ *
553
+ * @returns Promise<TranscriptionResponse> - Object containing:
554
+ * - `text`: Transcribed text content
555
+ * - `transcript`: Alias for text
556
+ * - `segments`: Array of timestamped segments (if verbose format)
557
+ * - `language`: Detected language
558
+ * - `duration`: Audio duration in seconds
559
+ */
560
+ transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
561
+ }
562
+
563
+ export { type AuthStateChangeCallback, BlinkAIImpl, type BlinkClient, BlinkDatabase, BlinkStorageImpl, BlinkTable, createClient };