@proveanything/smartlinks 1.3.16 → 1.3.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -11,7 +11,13 @@ Build Smartlinks-powered apps in Node.js or the browser: list collections and pr
11
11
  • Optional iframe proxy mode for embedded apps
12
12
 
13
13
  For the full list of functions and types, see the API summary:
14
- → API Summary (API_SUMMARY.md)
14
+ [API Summary](API_SUMMARY.md)
15
+
16
+ **Documentation:**
17
+ - [AI & Chat Completions](docs/ai.md) - Chat completions, RAG, voice integration
18
+ - [Widgets](docs/widgets.md) - Embeddable React components
19
+ - [Realtime](docs/realtime.md) - Realtime data updates
20
+ - [iframe Responder](docs/iframe-responder.md) - iframe integration
15
21
 
16
22
  ## Install
17
23
 
package/dist/api/ai.d.ts CHANGED
@@ -1,3 +1,334 @@
1
+ /** Content part for multimodal messages */
2
+ export interface ContentPart {
3
+ type: 'text' | 'image_url';
4
+ text?: string;
5
+ image_url?: {
6
+ url: string;
7
+ detail?: 'auto' | 'low' | 'high';
8
+ };
9
+ }
10
+ /** Function call representation */
11
+ export interface FunctionCall {
12
+ name: string;
13
+ arguments: string;
14
+ }
15
+ /** Tool call representation */
16
+ export interface ToolCall {
17
+ id: string;
18
+ type: 'function';
19
+ function: {
20
+ name: string;
21
+ arguments: string;
22
+ };
23
+ }
24
+ /** Chat message with role and content */
25
+ export interface ChatMessage {
26
+ role: 'system' | 'user' | 'assistant' | 'function' | 'tool';
27
+ content: string | ContentPart[];
28
+ name?: string;
29
+ function_call?: FunctionCall;
30
+ tool_calls?: ToolCall[];
31
+ tool_call_id?: string;
32
+ }
33
+ /** Tool/Function definition */
34
+ export interface ToolDefinition {
35
+ type: 'function';
36
+ function: {
37
+ name: string;
38
+ description: string;
39
+ parameters: {
40
+ type: 'object';
41
+ properties: Record<string, {
42
+ type: string;
43
+ description?: string;
44
+ enum?: string[];
45
+ }>;
46
+ required?: string[];
47
+ };
48
+ };
49
+ }
50
+ /** Chat completion request */
51
+ export interface ChatCompletionRequest {
52
+ messages: ChatMessage[];
53
+ model?: string;
54
+ stream?: boolean;
55
+ tools?: ToolDefinition[];
56
+ tool_choice?: 'none' | 'auto' | 'required' | {
57
+ type: 'function';
58
+ function: {
59
+ name: string;
60
+ };
61
+ };
62
+ temperature?: number;
63
+ max_tokens?: number;
64
+ top_p?: number;
65
+ frequency_penalty?: number;
66
+ presence_penalty?: number;
67
+ response_format?: {
68
+ type: 'text' | 'json_object';
69
+ };
70
+ user?: string;
71
+ }
72
+ /** Chat completion choice */
73
+ export interface ChatCompletionChoice {
74
+ index: number;
75
+ message: ChatMessage;
76
+ finish_reason: 'stop' | 'length' | 'function_call' | 'tool_calls' | 'content_filter' | null;
77
+ }
78
+ /** Chat completion response */
79
+ export interface ChatCompletionResponse {
80
+ id: string;
81
+ object: 'chat.completion';
82
+ created: number;
83
+ model: string;
84
+ choices: ChatCompletionChoice[];
85
+ usage: {
86
+ prompt_tokens: number;
87
+ completion_tokens: number;
88
+ total_tokens: number;
89
+ };
90
+ }
91
+ /** Streaming chunk */
92
+ export interface ChatCompletionChunk {
93
+ id: string;
94
+ object: 'chat.completion.chunk';
95
+ created: number;
96
+ model: string;
97
+ choices: Array<{
98
+ index: number;
99
+ delta: Partial<ChatMessage>;
100
+ finish_reason: string | null;
101
+ }>;
102
+ }
103
+ /** AI Model information */
104
+ export interface AIModel {
105
+ id: string;
106
+ provider: 'gemini' | 'openai';
107
+ modelId: string;
108
+ name: string;
109
+ description: string;
110
+ capabilities: Array<'text' | 'vision' | 'audio' | 'code'>;
111
+ contextWindow: number;
112
+ pricing: {
113
+ input: number;
114
+ output: number;
115
+ cached?: number;
116
+ };
117
+ features: string[];
118
+ recommended?: string;
119
+ }
120
+ /** List of models */
121
+ export interface ModelList {
122
+ object: 'list';
123
+ data: AIModel[];
124
+ }
125
+ /** Document chunk with embedding */
126
+ export interface DocumentChunk {
127
+ text: string;
128
+ embedding: number[];
129
+ metadata: {
130
+ chunkIndex: number;
131
+ documentId: string;
132
+ [key: string]: any;
133
+ };
134
+ }
135
+ /** Index document request */
136
+ export interface IndexDocumentRequest {
137
+ productId: string;
138
+ text?: string;
139
+ documentUrl?: string;
140
+ metadata?: Record<string, any>;
141
+ chunkSize?: number;
142
+ overlap?: number;
143
+ provider?: 'openai' | 'gemini';
144
+ }
145
+ /** Index document response */
146
+ export interface IndexDocumentResponse {
147
+ success: boolean;
148
+ productId: string;
149
+ documentId: string;
150
+ chunks: number;
151
+ metadata: {
152
+ textLength: number;
153
+ chunkSize: number;
154
+ overlap: number;
155
+ embeddingDimensions: number;
156
+ };
157
+ sample?: {
158
+ text: string;
159
+ chunkIndex: number;
160
+ };
161
+ }
162
+ /** Configure assistant request */
163
+ export interface ConfigureAssistantRequest {
164
+ productId: string;
165
+ systemPrompt?: string;
166
+ model?: string;
167
+ maxTokensPerResponse?: number;
168
+ temperature?: number;
169
+ rateLimitPerUser?: number;
170
+ allowedTopics?: string[];
171
+ customInstructions?: {
172
+ tone?: string;
173
+ additionalRules?: string;
174
+ [key: string]: any;
175
+ };
176
+ }
177
+ /** Configure assistant response */
178
+ export interface ConfigureAssistantResponse {
179
+ success: boolean;
180
+ configuration: {
181
+ productId: string;
182
+ systemPrompt: string;
183
+ model: string;
184
+ maxTokensPerResponse: number;
185
+ temperature: number;
186
+ rateLimitPerUser: number;
187
+ allowedTopics: string[];
188
+ customInstructions?: Record<string, any>;
189
+ updatedAt: string;
190
+ };
191
+ }
192
+ /** Public chat request */
193
+ export interface PublicChatRequest {
194
+ productId: string;
195
+ userId: string;
196
+ message: string;
197
+ sessionId?: string;
198
+ stream?: boolean;
199
+ }
200
+ /** Public chat response */
201
+ export interface PublicChatResponse {
202
+ message: string;
203
+ sessionId: string;
204
+ usage: {
205
+ prompt_tokens: number;
206
+ completion_tokens: number;
207
+ total_tokens: number;
208
+ };
209
+ context?: {
210
+ chunksUsed: number;
211
+ topSimilarity: number;
212
+ };
213
+ }
214
+ /** Session information */
215
+ export interface Session {
216
+ sessionId: string;
217
+ productId: string;
218
+ userId: string;
219
+ messageCount: number;
220
+ createdAt: string;
221
+ lastActivityAt: string;
222
+ messages: ChatMessage[];
223
+ }
224
+ /** Rate limit status */
225
+ export interface RateLimitStatus {
226
+ used: number;
227
+ remaining: number;
228
+ resetAt: string;
229
+ }
230
+ /** Session statistics */
231
+ export interface SessionStatistics {
232
+ totalSessions: number;
233
+ activeSessions: number;
234
+ totalMessages: number;
235
+ rateLimitedUsers: number;
236
+ }
237
+ /** Voice session request */
238
+ export interface VoiceSessionRequest {
239
+ productId: string;
240
+ userId: string;
241
+ collectionId: string;
242
+ settings?: {
243
+ voice?: string;
244
+ language?: string;
245
+ model?: string;
246
+ };
247
+ }
248
+ /** Voice session response */
249
+ export interface VoiceSessionResponse {
250
+ token: string;
251
+ systemInstruction: string;
252
+ expiresAt: string;
253
+ productName: string;
254
+ }
255
+ /** Ephemeral token request */
256
+ export interface EphemeralTokenRequest {
257
+ settings?: {
258
+ ttl?: number;
259
+ voice?: string;
260
+ language?: string;
261
+ model?: string;
262
+ };
263
+ }
264
+ /** Ephemeral token response */
265
+ export interface EphemeralTokenResponse {
266
+ token: string;
267
+ expiresAt: string;
268
+ }
269
+ /** Transcription response */
270
+ export interface TranscriptionResponse {
271
+ text: string;
272
+ }
273
+ /** TTS request */
274
+ export interface TTSRequest {
275
+ text: string;
276
+ voice?: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
277
+ speed?: number;
278
+ format?: 'mp3' | 'opus' | 'aac' | 'flac';
279
+ }
280
+ /** Podcast generation request */
281
+ export interface GeneratePodcastRequest {
282
+ productId: string;
283
+ documentText?: string;
284
+ duration?: number;
285
+ style?: 'casual' | 'professional' | 'educational' | 'entertaining';
286
+ voices?: {
287
+ host1?: string;
288
+ host2?: string;
289
+ };
290
+ includeAudio?: boolean;
291
+ language?: string;
292
+ customInstructions?: string;
293
+ }
294
+ /** Podcast script segment */
295
+ export interface PodcastSegment {
296
+ speaker: 'host1' | 'host2';
297
+ text: string;
298
+ timestamp?: number;
299
+ duration?: number;
300
+ }
301
+ /** Podcast script */
302
+ export interface PodcastScript {
303
+ title: string;
304
+ description: string;
305
+ segments: PodcastSegment[];
306
+ }
307
+ /** Podcast generation response */
308
+ export interface GeneratePodcastResponse {
309
+ success: boolean;
310
+ podcastId: string;
311
+ script: PodcastScript;
312
+ audio?: {
313
+ host1Url?: string;
314
+ host2Url?: string;
315
+ mixedUrl?: string;
316
+ };
317
+ metadata: {
318
+ duration: number;
319
+ wordCount: number;
320
+ generatedAt: string;
321
+ };
322
+ }
323
+ /** Podcast status */
324
+ export interface PodcastStatus {
325
+ podcastId: string;
326
+ status: 'generating_script' | 'generating_audio' | 'mixing' | 'completed' | 'failed';
327
+ progress: number;
328
+ estimatedTimeRemaining?: number;
329
+ error?: string;
330
+ result?: GeneratePodcastResponse;
331
+ }
1
332
  export interface AIGenerateContentRequest {
2
333
  /** The prompt or message contents sent to the AI */
3
334
  contents: string | any;
@@ -47,8 +378,112 @@ export interface AISearchPhotosPhoto {
47
378
  [key: string]: any;
48
379
  }
49
380
  export declare namespace ai {
381
+ namespace chat {
382
+ namespace completions {
383
+ /**
384
+ * Create a chat completion (streaming or non-streaming)
385
+ * @param collectionId - Collection identifier
386
+ * @param request - Chat completion request
387
+ * @returns Chat completion response or async iterable for streaming
388
+ */
389
+ function create(collectionId: string, request: ChatCompletionRequest): Promise<ChatCompletionResponse | AsyncIterable<ChatCompletionChunk>>;
390
+ }
391
+ }
392
+ namespace models {
393
+ /**
394
+ * List available AI models
395
+ */
396
+ function list(collectionId: string): Promise<ModelList>;
397
+ /**
398
+ * Get specific model information
399
+ */
400
+ function get(collectionId: string, modelId: string): Promise<AIModel>;
401
+ }
402
+ namespace rag {
403
+ /**
404
+ * Index a document for RAG
405
+ */
406
+ function indexDocument(collectionId: string, request: IndexDocumentRequest): Promise<IndexDocumentResponse>;
407
+ /**
408
+ * Configure AI assistant behavior
409
+ */
410
+ function configureAssistant(collectionId: string, request: ConfigureAssistantRequest): Promise<ConfigureAssistantResponse>;
411
+ }
412
+ namespace sessions {
413
+ /**
414
+ * Get session statistics
415
+ */
416
+ function stats(collectionId: string): Promise<SessionStatistics>;
417
+ }
418
+ namespace rateLimit {
419
+ /**
420
+ * Reset rate limit for a user
421
+ */
422
+ function reset(collectionId: string, userId: string): Promise<{
423
+ success: boolean;
424
+ userId: string;
425
+ }>;
426
+ }
427
+ namespace podcast {
428
+ /**
429
+ * Generate a NotebookLM-style conversational podcast from product documents
430
+ */
431
+ function generate(collectionId: string, request: GeneratePodcastRequest): Promise<GeneratePodcastResponse>;
432
+ /**
433
+ * Get podcast generation status
434
+ */
435
+ function getStatus(collectionId: string, podcastId: string): Promise<PodcastStatus>;
436
+ }
437
+ namespace tts {
438
+ /**
439
+ * Generate text-to-speech audio
440
+ */
441
+ function generate(collectionId: string, request: TTSRequest): Promise<Blob>;
442
+ }
443
+ namespace publicApi {
444
+ /**
445
+ * Chat with product assistant (RAG)
446
+ */
447
+ function chat(collectionId: string, request: PublicChatRequest): Promise<PublicChatResponse>;
448
+ /**
449
+ * Get session history
450
+ */
451
+ function getSession(collectionId: string, sessionId: string): Promise<Session>;
452
+ /**
453
+ * Clear session history
454
+ */
455
+ function clearSession(collectionId: string, sessionId: string): Promise<{
456
+ success: boolean;
457
+ }>;
458
+ /**
459
+ * Check rate limit status
460
+ */
461
+ function getRateLimit(collectionId: string, userId: string): Promise<RateLimitStatus>;
462
+ /**
463
+ * Generate ephemeral token for Gemini Live
464
+ */
465
+ function getToken(collectionId: string, request: EphemeralTokenRequest): Promise<EphemeralTokenResponse>;
466
+ }
467
+ namespace voice {
468
+ /**
469
+ * Check if voice is supported in browser
470
+ */
471
+ function isSupported(): boolean;
472
+ /**
473
+ * Listen for voice input
474
+ */
475
+ function listen(language?: string): Promise<string>;
476
+ /**
477
+ * Speak text
478
+ */
479
+ function speak(text: string, options?: {
480
+ voice?: string;
481
+ rate?: number;
482
+ }): Promise<void>;
483
+ }
50
484
  /**
51
485
  * Generate text/content via AI (admin)
486
+ * @deprecated Use ai.chat.completions.create() instead
52
487
  */
53
488
  function generateContent(collectionId: string, params: AIGenerateContentRequest, admin?: boolean): Promise<any>;
54
489
  /**