@realtimex/sdk 1.0.8 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -24,8 +24,9 @@ Before using this SDK, ensure your Supabase database is set up:
24
24
  ```typescript
25
25
  import { RealtimeXSDK } from '@realtimex/sdk';
26
26
 
27
- // No config needed - RTX_APP_ID is auto-detected from environment
28
- const sdk = new RealtimeXSDK();
27
+ const sdk = new RealtimeXSDK({
28
+ permissions: ['activities.read', 'activities.write', 'webhook.trigger']
29
+ });
29
30
 
30
31
  // Insert activity
31
32
  const activity = await sdk.activities.insert({
@@ -119,6 +120,158 @@ const threads = await sdk.api.getThreads('sales');
119
120
  const task = await sdk.api.getTask('task-uuid');
120
121
  ```
121
122
 
123
+ ### LLM Module
124
+
125
+ Access AI capabilities through the RealtimeX proxy:
126
+
127
+ ```typescript
128
+ const sdk = new RealtimeXSDK({
129
+ permissions: ['llm.chat', 'llm.embed', 'llm.providers', 'vectors.write', 'vectors.read']
130
+ });
131
+ ```
132
+
133
+ #### List Providers & Models
134
+
135
+ ```typescript
136
+ const { llm, embedding } = await sdk.llm.getProviders();
137
+ // llm[]: Array of LLM providers with models
138
+ // embedding[]: Array of embedding providers with models
139
+ ```
140
+
141
+ #### Chat Completion
142
+
143
+ ```typescript
144
+ // Sync Chat
145
+ const response = await sdk.llm.chat(
146
+ [
147
+ { role: 'system', content: 'You are a helpful assistant.' },
148
+ { role: 'user', content: 'What is RealtimeX?' }
149
+ ],
150
+ {
151
+ model: 'gpt-4o', // Optional: specific model
152
+ provider: 'openai', // Optional: specific provider
153
+ temperature: 0.7, // Optional: 0.0-2.0
154
+ max_tokens: 1000 // Optional: max response tokens
155
+ }
156
+ );
157
+ console.log(response.response?.content);
158
+
159
+ // Streaming Chat
160
+ for await (const chunk of sdk.llm.chatStream(messages, options)) {
161
+ process.stdout.write(chunk.textResponse || '');
162
+ }
163
+ ```
164
+
165
+ #### Generate Embeddings
166
+
167
+ ```typescript
168
+ const { embeddings, dimensions, provider, model } = await sdk.llm.embed(
169
+ ['Hello world', 'Goodbye'],
170
+ { provider: 'openai', model: 'text-embedding-3-small' } // Optional
171
+ );
172
+ // embeddings: number[][] - vector arrays
173
+ // dimensions: number - vector dimension (e.g., 1536)
174
+ ```
175
+
176
+ #### Vector Store Operations
177
+
178
+ ```typescript
179
+ // Upsert vectors with metadata
180
+ await sdk.llm.vectors.upsert([
181
+ {
182
+ id: 'chunk-1',
183
+ vector: embeddings[0],
184
+ metadata: {
185
+ text: 'Hello world', // Original text (for retrieval)
186
+ documentId: 'doc-1', // Logical grouping
187
+ customField: 'any value' // Any custom metadata
188
+ }
189
+ }
190
+ ], {
191
+ workspaceId: 'ws-123' // Optional: physical namespace isolation
192
+ });
193
+
194
+ // Query similar vectors
195
+ const results = await sdk.llm.vectors.query(queryVector, {
196
+ topK: 5, // Number of results
197
+ workspaceId: 'ws-123', // Optional: search in specific workspace
198
+ filter: { documentId: 'doc-1' } // Optional: filter by document
199
+ });
200
+ // returns: { success, results: [{ id, score, metadata }] }
201
+
202
+ // List all workspaces for this app
203
+ const { workspaces } = await sdk.llm.vectors.listWorkspaces();
204
+ // returns: { success, workspaces: ['ws-123', 'default', ...] }
205
+
206
+ // Delete all vectors in a workspace
207
+ await sdk.llm.vectors.delete({
208
+ deleteAll: true,
209
+ workspaceId: 'ws-123'
210
+ });
211
+ ```
212
+
213
+ #### High-Level Helpers
214
+
215
+ These combine multiple operations for common RAG patterns:
216
+
217
+ ```typescript
218
+ // embedAndStore: Text → Embed → Store (one call)
219
+ await sdk.llm.embedAndStore(
220
+ ['Document text 1', 'Document text 2'], // texts to embed
221
+ {
222
+ documentId: 'doc-123', // Optional: logical grouping
223
+ workspaceId: 'ws-456', // Optional: physical isolation
224
+ provider: 'openai', // Optional: embedding provider
225
+ model: 'text-embedding-3-small' // Optional: embedding model
226
+ }
227
+ );
228
+
229
+ // search: Query → Embed → Search (one call)
230
+ const searchResults = await sdk.llm.search(
231
+ 'What is RealtimeX?', // search query (text, not vector)
232
+ {
233
+ topK: 5, // Number of results
234
+ workspaceId: 'ws-123', // Optional: search in workspace
235
+ documentId: 'doc-1', // Optional: filter by document
236
+ provider: 'openai', // Optional: embedding provider
237
+ model: 'text-embedding-3-small' // Optional: embedding model
238
+ }
239
+ );
240
+ // returns: [{ id, score, metadata: { text, documentId, ... } }]
241
+ ```
242
+
243
+ > **Note on Isolation:**
244
+ > - `workspaceId`: Creates **physical namespace** (`sdk_{appId}_{wsId}`) - data completely isolated
245
+ > - `documentId`: Stored as **metadata**, filtered after search (post-filter)
246
+
247
+ ### Error Handling
248
+
249
+ The SDK provides specific error classes for handling LLM-related issues:
250
+
251
+ ```typescript
252
+ import { LLMPermissionError, LLMProviderError } from '@realtimex/sdk';
253
+
254
+ try {
255
+ for await (const chunk of sdk.llm.chatStream(messages)) {
256
+ process.stdout.write(chunk.textResponse || '');
257
+ }
258
+ } catch (error) {
259
+ if (error instanceof LLMPermissionError) {
260
+ // Permission not granted: 'llm.chat' etc.
261
+ console.error(`Permission required: ${error.permission}`);
262
+ } else if (error instanceof LLMProviderError) {
263
+ // Provider errors: rate limit, timeout, model unavailable, etc.
264
+ console.error(`Provider error: ${error.message} (code: ${error.code})`);
265
+ // Common codes: LLM_STREAM_ERROR, RATE_LIMIT, PROVIDER_UNAVAILABLE
266
+ }
267
+ }
268
+ ```
269
+
270
+ | Error Class | Common Codes | Description |
271
+ |-------------|--------------|-------------|
272
+ | `LLMPermissionError` | `PERMISSION_REQUIRED` | Missing or denied permission |
273
+ | `LLMProviderError` | `LLM_STREAM_ERROR`, `RATE_LIMIT`, `PROVIDER_UNAVAILABLE` | AI provider issues |
274
+
122
275
  ## Environment Variables
123
276
 
124
277
  | Variable | Description |
package/dist/index.d.mts CHANGED
@@ -8,6 +8,7 @@ interface SDKConfig {
8
8
  appName?: string;
9
9
  };
10
10
  defaultPort?: number;
11
+ permissions?: string[];
11
12
  }
12
13
  interface Activity {
13
14
  id: string;
@@ -86,7 +87,12 @@ interface Task {
86
87
  declare class ActivitiesModule {
87
88
  private baseUrl;
88
89
  private appId;
89
- constructor(realtimexUrl: string, appId: string);
90
+ private appName;
91
+ constructor(realtimexUrl: string, appId: string, appName?: string);
92
+ /**
93
+ * Request a single permission from Electron via internal API
94
+ */
95
+ private requestPermission;
90
96
  private request;
91
97
  /**
92
98
  * Insert a new activity
@@ -114,15 +120,16 @@ declare class ActivitiesModule {
114
120
  }): Promise<Activity[]>;
115
121
  }
116
122
 
117
- /**
118
- * Webhook Module - Call RealtimeX webhook
119
- */
120
-
121
123
  declare class WebhookModule {
122
124
  private realtimexUrl;
123
125
  private appName?;
124
126
  private appId?;
125
127
  constructor(realtimexUrl: string, appName?: string, appId?: string);
128
+ /**
129
+ * Request a single permission from Electron via internal API
130
+ */
131
+ private requestPermission;
132
+ private request;
126
133
  triggerAgent(payload: TriggerAgentPayload): Promise<TriggerAgentResponse>;
127
134
  ping(): Promise<{
128
135
  success: boolean;
@@ -135,11 +142,34 @@ declare class WebhookModule {
135
142
  * API Module - Call RealtimeX public APIs
136
143
  */
137
144
 
145
+ /**
146
+ * Error thrown when a permission is permanently denied
147
+ */
148
+ declare class PermissionDeniedError extends Error {
149
+ readonly permission: string;
150
+ constructor(permission: string, message?: string);
151
+ }
152
+ /**
153
+ * Error thrown when a permission needs to be granted
154
+ */
155
+ declare class PermissionRequiredError extends Error {
156
+ readonly permission: string;
157
+ constructor(permission: string, message?: string);
158
+ }
138
159
  declare class ApiModule {
139
160
  private realtimexUrl;
140
161
  private appId;
141
- constructor(realtimexUrl: string, appId: string);
162
+ private appName;
163
+ constructor(realtimexUrl: string, appId: string, appName?: string);
142
164
  private getHeaders;
165
+ /**
166
+ * Request a single permission from Electron via internal API
167
+ */
168
+ private requestPermission;
169
+ /**
170
+ * Make an API call with automatic permission handling
171
+ */
172
+ private apiCall;
143
173
  getAgents(): Promise<Agent[]>;
144
174
  getWorkspaces(): Promise<Workspace[]>;
145
175
  getThreads(workspaceSlug: string): Promise<Thread[]>;
@@ -219,6 +249,297 @@ declare class PortModule {
219
249
  getPort(): Promise<number>;
220
250
  }
221
251
 
252
+ /**
253
+ * LLM Module for RealtimeX SDK
254
+ *
255
+ * Provides access to LLM capabilities:
256
+ * - Chat completion (sync and streaming)
257
+ * - Embedding generation
258
+ * - Provider/model listing
259
+ * - Vector storage (upsert, query, delete)
260
+ */
261
+ interface ChatMessage {
262
+ role: 'system' | 'user' | 'assistant';
263
+ content: string;
264
+ }
265
+ interface ChatOptions {
266
+ model?: string;
267
+ provider?: string;
268
+ temperature?: number;
269
+ max_tokens?: number;
270
+ }
271
+ interface ChatResponse {
272
+ success: boolean;
273
+ response?: {
274
+ content: string;
275
+ model: string;
276
+ provider?: string;
277
+ metrics?: {
278
+ prompt_tokens: number;
279
+ completion_tokens: number;
280
+ total_tokens: number;
281
+ duration?: number;
282
+ outputTps?: number;
283
+ };
284
+ };
285
+ error?: string;
286
+ code?: string;
287
+ }
288
+ interface StreamChunk {
289
+ uuid?: string;
290
+ type?: string;
291
+ textResponse?: string;
292
+ close?: boolean;
293
+ error?: boolean;
294
+ }
295
+ interface EmbedOptions {
296
+ provider?: string;
297
+ model?: string;
298
+ }
299
+ interface EmbedResponse {
300
+ success: boolean;
301
+ embeddings?: number[][];
302
+ provider?: string;
303
+ model?: string;
304
+ dimensions?: number;
305
+ error?: string;
306
+ code?: string;
307
+ errors?: string[];
308
+ }
309
+ interface Provider {
310
+ provider: string;
311
+ models: Array<{
312
+ id: string;
313
+ name: string;
314
+ }>;
315
+ }
316
+ interface ProvidersResponse {
317
+ success: boolean;
318
+ llm?: Provider[];
319
+ embedding?: Provider[];
320
+ error?: string;
321
+ code?: string;
322
+ }
323
+ interface VectorRecord {
324
+ id: string;
325
+ vector: number[];
326
+ metadata?: {
327
+ text?: string;
328
+ documentId?: string;
329
+ workspaceId?: string;
330
+ [key: string]: unknown;
331
+ };
332
+ }
333
+ interface VectorUpsertOptions {
334
+ workspaceId?: string;
335
+ }
336
+ interface VectorUpsertResponse {
337
+ success: boolean;
338
+ upserted?: number;
339
+ namespace?: string;
340
+ error?: string;
341
+ code?: string;
342
+ errors?: string[];
343
+ }
344
+ interface VectorQueryOptions {
345
+ topK?: number;
346
+ filter?: {
347
+ workspaceId?: string;
348
+ documentId?: string;
349
+ };
350
+ workspaceId?: string;
351
+ provider?: string;
352
+ model?: string;
353
+ }
354
+ interface VectorQueryResult {
355
+ id: string;
356
+ score: number;
357
+ metadata?: {
358
+ text?: string;
359
+ documentId?: string;
360
+ workspaceId?: string;
361
+ [key: string]: unknown;
362
+ };
363
+ }
364
+ interface VectorQueryResponse {
365
+ success: boolean;
366
+ results?: VectorQueryResult[];
367
+ error?: string;
368
+ code?: string;
369
+ }
370
+ interface VectorDeleteOptions {
371
+ workspaceId?: string;
372
+ deleteAll: true;
373
+ }
374
+ interface VectorDeleteResponse {
375
+ success: boolean;
376
+ deleted?: number;
377
+ message?: string;
378
+ error?: string;
379
+ code?: string;
380
+ errors?: string[];
381
+ }
382
+ interface VectorListWorkspacesResponse {
383
+ success: boolean;
384
+ workspaces?: string[];
385
+ error?: string;
386
+ code?: string;
387
+ }
388
+ declare class LLMPermissionError extends Error {
389
+ permission: string;
390
+ code: string;
391
+ constructor(permission: string, code?: string);
392
+ }
393
+ declare class LLMProviderError extends Error {
394
+ code: string;
395
+ constructor(message: string, code?: string);
396
+ }
397
+ declare class VectorStore {
398
+ private baseUrl;
399
+ private appId;
400
+ constructor(baseUrl: string, appId: string);
401
+ private get headers();
402
+ /**
403
+ * Upsert (insert or update) vectors into storage
404
+ *
405
+ * @example
406
+ * ```ts
407
+ * await sdk.llm.vectors.upsert([
408
+ * { id: 'chunk-1', vector: embeddings[0], metadata: { text: 'Hello', documentId: 'doc-1' } }
409
+ * ], { workspaceId: 'ws-123' });
410
+ * ```
411
+ */
412
+ upsert(vectors: VectorRecord[], options?: VectorUpsertOptions): Promise<VectorUpsertResponse>;
413
+ /**
414
+ * Query similar vectors by embedding
415
+ *
416
+ * @example
417
+ * ```ts
418
+ * const results = await sdk.llm.vectors.query(queryVector, {
419
+ * topK: 5,
420
+ * filter: { documentId: 'doc-1' },
421
+ * workspaceId: 'ws-123'
422
+ * });
423
+ * ```
424
+ */
425
+ query(vector: number[], options?: VectorQueryOptions): Promise<VectorQueryResponse>;
426
+ /**
427
+ * Delete vectors from storage
428
+ *
429
+ * Note: Currently only supports deleteAll: true
430
+ * Use workspaceId to scope deletion to a specific workspace
431
+ *
432
+ * @example
433
+ * ```ts
434
+ * await sdk.llm.vectors.delete({ deleteAll: true, workspaceId: 'ws-123' });
435
+ * ```
436
+ */
437
+ delete(options: VectorDeleteOptions): Promise<VectorDeleteResponse>;
438
+ /**
439
+ * List all available workspaces (namespaces) for this app
440
+ *
441
+ * @example
442
+ * ```ts
443
+ * const { workspaces } = await sdk.llm.vectors.listWorkspaces();
444
+ * console.log('Workspaces:', workspaces);
445
+ * ```
446
+ */
447
+ listWorkspaces(): Promise<VectorListWorkspacesResponse>;
448
+ }
449
+ declare class LLMModule {
450
+ private baseUrl;
451
+ private appId;
452
+ vectors: VectorStore;
453
+ constructor(baseUrl: string, appId: string);
454
+ private get headers();
455
+ /**
456
+ * Get available LLM and embedding providers/models
457
+ *
458
+ * @example
459
+ * ```ts
460
+ * const { llm, embedding } = await sdk.llm.getProviders();
461
+ * console.log('Available LLM models:', llm[0].models);
462
+ * ```
463
+ */
464
+ getProviders(): Promise<ProvidersResponse>;
465
+ /**
466
+ * Send a chat completion request (synchronous)
467
+ *
468
+ * @example
469
+ * ```ts
470
+ * const response = await sdk.llm.chat([
471
+ * { role: 'system', content: 'You are a helpful assistant.' },
472
+ * { role: 'user', content: 'Hello!' }
473
+ * ], { model: 'gpt-4o', temperature: 0.7 });
474
+ *
475
+ * console.log(response.response?.content);
476
+ * ```
477
+ */
478
+ chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResponse>;
479
+ /**
480
+ * Send a streaming chat completion request (SSE)
481
+ *
482
+ * @example
483
+ * ```ts
484
+ * for await (const chunk of sdk.llm.chatStream([
485
+ * { role: 'user', content: 'Tell me a story' }
486
+ * ])) {
487
+ * process.stdout.write(chunk.textResponse || '');
488
+ * }
489
+ * ```
490
+ */
491
+ chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;
492
+ /**
493
+ * Generate vector embeddings from text
494
+ *
495
+ * @example
496
+ * ```ts
497
+ * // Single text
498
+ * const { embeddings } = await sdk.llm.embed('Hello world');
499
+ *
500
+ * // Multiple texts
501
+ * const { embeddings } = await sdk.llm.embed(['Hello', 'World']);
502
+ * ```
503
+ */
504
+ embed(input: string | string[], options?: EmbedOptions): Promise<EmbedResponse>;
505
+ /**
506
+ * Helper: Embed text and store as vectors in one call
507
+ *
508
+ * @example
509
+ * ```ts
510
+ * await sdk.llm.embedAndStore({
511
+ * texts: ['Hello world', 'Goodbye world'],
512
+ * documentId: 'doc-123',
513
+ * workspaceId: 'ws-456'
514
+ * });
515
+ * ```
516
+ */
517
+ embedAndStore(params: {
518
+ texts: string[];
519
+ documentId?: string;
520
+ workspaceId?: string;
521
+ idPrefix?: string;
522
+ provider?: string;
523
+ model?: string;
524
+ }): Promise<VectorUpsertResponse>;
525
+ /**
526
+ * Helper: Search similar documents by text query
527
+ *
528
+ * @example
529
+ * ```ts
530
+ * const results = await sdk.llm.search('What is RealtimeX?', {
531
+ * topK: 5,
532
+ * workspaceId: 'ws-123'
533
+ * });
534
+ *
535
+ * for (const result of results) {
536
+ * console.log(result.metadata?.text, result.score);
537
+ * }
538
+ * ```
539
+ */
540
+ search(query: string, options?: VectorQueryOptions): Promise<VectorQueryResult[]>;
541
+ }
542
+
222
543
  /**
223
544
  * RealtimeX Local App SDK
224
545
  *
@@ -232,14 +553,22 @@ declare class RealtimeXSDK {
232
553
  api: ApiModule;
233
554
  task: TaskModule;
234
555
  port: PortModule;
556
+ llm: LLMModule;
235
557
  readonly appId: string;
236
558
  readonly appName: string | undefined;
559
+ private readonly realtimexUrl;
560
+ private readonly permissions;
237
561
  private static DEFAULT_REALTIMEX_URL;
238
562
  constructor(config?: SDKConfig);
563
+ /**
564
+ * Register app with RealtimeX hub and request declared permissions upfront.
565
+ * This is called automatically if permissions are provided in constructor.
566
+ */
567
+ register(permissions?: string[]): Promise<void>;
239
568
  /**
240
569
  * Get environment variable (works in Node.js and browser)
241
570
  */
242
571
  private getEnvVar;
243
572
  }
244
573
 
245
- export { ActivitiesModule, type Activity, type Agent, ApiModule, PortModule, RealtimeXSDK, type SDKConfig, type Task, TaskModule, type TaskRun, type Thread, type TriggerAgentPayload, type TriggerAgentResponse, WebhookModule, type Workspace };
574
+ export { ActivitiesModule, type Activity, type Agent, ApiModule, type ChatMessage, type ChatOptions, type ChatResponse, type EmbedOptions, type EmbedResponse, LLMModule, LLMPermissionError, LLMProviderError, PermissionDeniedError, PermissionRequiredError, PortModule, type Provider, type ProvidersResponse, RealtimeXSDK, type SDKConfig, type StreamChunk, type Task, TaskModule, type TaskRun, type Thread, type TriggerAgentPayload, type TriggerAgentResponse, type VectorDeleteOptions, type VectorDeleteResponse, type VectorQueryOptions, type VectorQueryResponse, type VectorQueryResult, type VectorRecord, VectorStore, type VectorUpsertOptions, type VectorUpsertResponse, WebhookModule, type Workspace };