@realtimex/sdk 1.0.9 → 1.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -25,6 +25,9 @@ Before using this SDK, ensure your Supabase database is set up:
25
25
  import { RealtimeXSDK } from '@realtimex/sdk';
26
26
 
27
27
  const sdk = new RealtimeXSDK({
28
+ // Development Mode: Use API key for full access
29
+ realtimex: { apiKey: 'sk-abc123...' },
30
+ // OR Production Mode: Declare permissions
28
31
  permissions: ['activities.read', 'activities.write', 'webhook.trigger']
29
32
  });
30
33
 
@@ -59,8 +62,9 @@ When you start your Local App from the RealtimeX Main App:
59
62
  const sdk = new RealtimeXSDK({
60
63
  realtimex: {
61
64
  url: 'http://custom-host:3001', // Default: localhost:3001
62
- appId: 'custom-id', // Override auto-detected
63
- appName: 'My App', // Override auto-detected
65
+ apiKey: 'sk-abc123...', // Development mode
66
+ appId: 'custom-id', // Production mode (override)
67
+ appName: 'My App', // Optional
64
68
  }
65
69
  });
66
70
  ```
@@ -120,6 +124,165 @@ const threads = await sdk.api.getThreads('sales');
120
124
  const task = await sdk.api.getTask('task-uuid');
121
125
  ```
122
126
 
127
+ ### LLM Module
128
+
129
+ Access AI capabilities through the RealtimeX proxy:
130
+
131
+ ```typescript
132
+ const sdk = new RealtimeXSDK({
133
+ permissions: ['llm.chat', 'llm.embed', 'llm.providers', 'vectors.write', 'vectors.read']
134
+ });
135
+ ```
136
+
137
+ #### List Providers & Models
138
+
139
+ ```typescript
140
+
141
+
142
+ // Get only configured Chat providers (recommended)
143
+ const chatRes = await sdk.llm.chatProviders();
144
+ // chatRes.providers: Array of chat providers with models
145
+
146
+ // Get only configured Embedding providers (recommended)
147
+ const embedRes = await sdk.llm.embedProviders();
148
+ // embedRes.providers: Array of embedding providers with models
149
+ ```
150
+
151
+
152
+ #### Chat Completion
153
+
154
+ ```typescript
155
+ // Sync Chat
156
+ const response = await sdk.llm.chat(
157
+ [
158
+ { role: 'system', content: 'You are a helpful assistant.' },
159
+ { role: 'user', content: 'What is RealtimeX?' }
160
+ ],
161
+ {
162
+ model: 'gpt-4o', // Optional: specific model
163
+ provider: 'openai', // Optional: specific provider
164
+ temperature: 0.7, // Optional: 0.0-2.0
165
+ max_tokens: 1000 // Optional: max response tokens
166
+ }
167
+ );
168
+ console.log(response.response?.content);
169
+
170
+ // Streaming Chat
171
+ for await (const chunk of sdk.llm.chatStream(messages, options)) {
172
+ process.stdout.write(chunk.textResponse || '');
173
+ }
174
+ ```
175
+
176
+ #### Generate Embeddings
177
+
178
+ ```typescript
179
+ const { embeddings, dimensions, provider, model } = await sdk.llm.embed(
180
+ ['Hello world', 'Goodbye'],
181
+ { provider: 'openai', model: 'text-embedding-3-small' } // Optional
182
+ );
183
+ // embeddings: number[][] - vector arrays
184
+ // dimensions: number - vector dimension (e.g., 1536)
185
+ ```
186
+
187
+ #### Vector Store Operations
188
+
189
+ ```typescript
190
+ // Upsert vectors with metadata
191
+ await sdk.llm.vectors.upsert([
192
+ {
193
+ id: 'chunk-1',
194
+ vector: embeddings[0],
195
+ metadata: {
196
+ text: 'Hello world', // Original text (for retrieval)
197
+ documentId: 'doc-1', // Logical grouping
198
+ customField: 'any value' // Any custom metadata
199
+ }
200
+ }
201
+ ], {
202
+ workspaceId: 'ws-123' // Optional: physical namespace isolation
203
+ });
204
+
205
+ // Query similar vectors
206
+ const results = await sdk.llm.vectors.query(queryVector, {
207
+ topK: 5, // Number of results
208
+ workspaceId: 'ws-123', // Optional: search in specific workspace
209
+ filter: { documentId: 'doc-1' } // Optional: filter by document
210
+ });
211
+ // returns: { success, results: [{ id, score, metadata }] }
212
+
213
+ // List all workspaces for this app
214
+ const { workspaces } = await sdk.llm.vectors.listWorkspaces();
215
+ // returns: { success, workspaces: ['ws-123', 'default', ...] }
216
+
217
+ // Delete all vectors in a workspace
218
+ await sdk.llm.vectors.delete({
219
+ deleteAll: true,
220
+ workspaceId: 'ws-123'
221
+ });
222
+ ```
223
+
224
+ #### High-Level Helpers
225
+
226
+ These combine multiple operations for common RAG patterns:
227
+
228
+ ```typescript
229
+ // embedAndStore: Text → Embed → Store (one call)
230
+ await sdk.llm.embedAndStore(
231
+ ['Document text 1', 'Document text 2'], // texts to embed
232
+ {
233
+ documentId: 'doc-123', // Optional: logical grouping
234
+ workspaceId: 'ws-456', // Optional: physical isolation
235
+ provider: 'openai', // Optional: embedding provider
236
+ model: 'text-embedding-3-small' // Optional: embedding model
237
+ }
238
+ );
239
+
240
+ // search: Query → Embed → Search (one call)
241
+ const searchResults = await sdk.llm.search(
242
+ 'What is RealtimeX?', // search query (text, not vector)
243
+ {
244
+ topK: 5, // Number of results
245
+ workspaceId: 'ws-123', // Optional: search in workspace
246
+ documentId: 'doc-1', // Optional: filter by document
247
+ provider: 'openai', // Optional: embedding provider
248
+ model: 'text-embedding-3-small' // Optional: embedding model
249
+ }
250
+ );
251
+ // returns: [{ id, score, metadata: { text, documentId, ... } }]
252
+ ```
253
+
254
+ > **Note on Isolation:**
255
+ > - `workspaceId`: Creates **physical namespace** (`sdk_{appId}_{wsId}`) - data completely isolated
256
+ > - `documentId`: Stored as **metadata**, filtered after search (post-filter)
257
+
258
+ ### Error Handling
259
+
260
+ The SDK provides specific error classes for handling LLM-related issues:
261
+
262
+ ```typescript
263
+ import { LLMPermissionError, LLMProviderError } from '@realtimex/sdk';
264
+
265
+ try {
266
+ for await (const chunk of sdk.llm.chatStream(messages)) {
267
+ process.stdout.write(chunk.textResponse || '');
268
+ }
269
+ } catch (error) {
270
+ if (error instanceof LLMPermissionError) {
271
+ // Permission not granted: 'llm.chat' etc.
272
+ console.error(`Permission required: ${error.permission}`);
273
+ } else if (error instanceof LLMProviderError) {
274
+ // Provider errors: rate limit, timeout, model unavailable, etc.
275
+ console.error(`Provider error: ${error.message} (code: ${error.code})`);
276
+ // Common codes: LLM_STREAM_ERROR, RATE_LIMIT, PROVIDER_UNAVAILABLE
277
+ }
278
+ }
279
+ ```
280
+
281
+ | Error Class | Common Codes | Description |
282
+ |-------------|--------------|-------------|
283
+ | `LLMPermissionError` | `PERMISSION_REQUIRED` | Missing or denied permission |
284
+ | `LLMProviderError` | `LLM_STREAM_ERROR`, `RATE_LIMIT`, `PROVIDER_UNAVAILABLE` | AI provider issues |
285
+
123
286
  ## Environment Variables
124
287
 
125
288
  | Variable | Description |
package/dist/index.d.mts CHANGED
@@ -6,6 +6,7 @@ interface SDKConfig {
6
6
  url?: string;
7
7
  appId?: string;
8
8
  appName?: string;
9
+ apiKey?: string;
9
10
  };
10
11
  defaultPort?: number;
11
12
  permissions?: string[];
@@ -88,7 +89,8 @@ declare class ActivitiesModule {
88
89
  private baseUrl;
89
90
  private appId;
90
91
  private appName;
91
- constructor(realtimexUrl: string, appId: string, appName?: string);
92
+ private apiKey?;
93
+ constructor(realtimexUrl: string, appId: string, appName?: string, apiKey?: string);
92
94
  /**
93
95
  * Request a single permission from Electron via internal API
94
96
  */
@@ -124,7 +126,8 @@ declare class WebhookModule {
124
126
  private realtimexUrl;
125
127
  private appName?;
126
128
  private appId?;
127
- constructor(realtimexUrl: string, appName?: string, appId?: string);
129
+ private apiKey?;
130
+ constructor(realtimexUrl: string, appName?: string, appId?: string, apiKey?: string);
128
131
  /**
129
132
  * Request a single permission from Electron via internal API
130
133
  */
@@ -160,7 +163,8 @@ declare class ApiModule {
160
163
  private realtimexUrl;
161
164
  private appId;
162
165
  private appName;
163
- constructor(realtimexUrl: string, appId: string, appName?: string);
166
+ private apiKey?;
167
+ constructor(realtimexUrl: string, appId: string, appName?: string, apiKey?: string);
164
168
  private getHeaders;
165
169
  /**
166
170
  * Request a single permission from Electron via internal API
@@ -190,7 +194,8 @@ declare class TaskModule {
190
194
  private realtimexUrl;
191
195
  private appName?;
192
196
  private appId?;
193
- constructor(realtimexUrl: string, appName?: string, appId?: string);
197
+ private apiKey?;
198
+ constructor(realtimexUrl: string, appName?: string, appId?: string, apiKey?: string);
194
199
  /**
195
200
  * Mark task as processing
196
201
  */
@@ -249,6 +254,310 @@ declare class PortModule {
249
254
  getPort(): Promise<number>;
250
255
  }
251
256
 
257
+ /**
258
+ * LLM Module for RealtimeX SDK
259
+ *
260
+ * Provides access to LLM capabilities:
261
+ * - Chat completion (sync and streaming)
262
+ * - Embedding generation
263
+ * - Provider/model listing
264
+ * - Vector storage (upsert, query, delete)
265
+ */
266
+ interface ChatMessage {
267
+ role: 'system' | 'user' | 'assistant';
268
+ content: string;
269
+ }
270
+ interface ChatOptions {
271
+ model?: string;
272
+ provider?: string;
273
+ temperature?: number;
274
+ max_tokens?: number;
275
+ }
276
+ interface ChatResponse {
277
+ success: boolean;
278
+ response?: {
279
+ content: string;
280
+ model: string;
281
+ provider?: string;
282
+ metrics?: {
283
+ prompt_tokens: number;
284
+ completion_tokens: number;
285
+ total_tokens: number;
286
+ duration?: number;
287
+ outputTps?: number;
288
+ };
289
+ };
290
+ error?: string;
291
+ code?: string;
292
+ }
293
+ interface StreamChunk {
294
+ uuid?: string;
295
+ type?: string;
296
+ textResponse?: string;
297
+ close?: boolean;
298
+ error?: boolean;
299
+ }
300
+ interface EmbedOptions {
301
+ provider?: string;
302
+ model?: string;
303
+ }
304
+ interface EmbedResponse {
305
+ success: boolean;
306
+ embeddings?: number[][];
307
+ provider?: string;
308
+ model?: string;
309
+ dimensions?: number;
310
+ error?: string;
311
+ code?: string;
312
+ errors?: string[];
313
+ }
314
+ interface Provider {
315
+ provider: string;
316
+ models: Array<{
317
+ id: string;
318
+ name: string;
319
+ }>;
320
+ }
321
+ interface ProvidersResponse {
322
+ success: boolean;
323
+ llm?: Provider[];
324
+ embedding?: Provider[];
325
+ providers?: Provider[];
326
+ error?: string;
327
+ code?: string;
328
+ }
329
+ interface VectorRecord {
330
+ id: string;
331
+ vector: number[];
332
+ metadata?: {
333
+ text?: string;
334
+ documentId?: string;
335
+ workspaceId?: string;
336
+ [key: string]: unknown;
337
+ };
338
+ }
339
+ interface VectorUpsertOptions {
340
+ workspaceId?: string;
341
+ }
342
+ interface VectorUpsertResponse {
343
+ success: boolean;
344
+ upserted?: number;
345
+ namespace?: string;
346
+ error?: string;
347
+ code?: string;
348
+ errors?: string[];
349
+ }
350
+ interface VectorQueryOptions {
351
+ topK?: number;
352
+ filter?: {
353
+ workspaceId?: string;
354
+ documentId?: string;
355
+ };
356
+ workspaceId?: string;
357
+ provider?: string;
358
+ model?: string;
359
+ }
360
+ interface VectorQueryResult {
361
+ id: string;
362
+ score: number;
363
+ metadata?: {
364
+ text?: string;
365
+ documentId?: string;
366
+ workspaceId?: string;
367
+ [key: string]: unknown;
368
+ };
369
+ }
370
+ interface VectorQueryResponse {
371
+ success: boolean;
372
+ results?: VectorQueryResult[];
373
+ error?: string;
374
+ code?: string;
375
+ }
376
+ interface VectorDeleteOptions {
377
+ workspaceId?: string;
378
+ deleteAll: true;
379
+ }
380
+ interface VectorDeleteResponse {
381
+ success: boolean;
382
+ deleted?: number;
383
+ message?: string;
384
+ error?: string;
385
+ code?: string;
386
+ errors?: string[];
387
+ }
388
+ interface VectorListWorkspacesResponse {
389
+ success: boolean;
390
+ workspaces?: string[];
391
+ error?: string;
392
+ code?: string;
393
+ }
394
+ declare class LLMPermissionError extends Error {
395
+ permission: string;
396
+ code: string;
397
+ constructor(permission: string, code?: string);
398
+ }
399
+ declare class LLMProviderError extends Error {
400
+ code: string;
401
+ constructor(message: string, code?: string);
402
+ }
403
+ declare class VectorStore {
404
+ private baseUrl;
405
+ private appId;
406
+ private apiKey?;
407
+ constructor(baseUrl: string, appId: string, apiKey?: string | undefined);
408
+ private get headers();
409
+ /**
410
+ * Upsert (insert or update) vectors into storage
411
+ *
412
+ * @example
413
+ * ```ts
414
+ * await sdk.llm.vectors.upsert([
415
+ * { id: 'chunk-1', vector: embeddings[0], metadata: { text: 'Hello', documentId: 'doc-1' } }
416
+ * ], { workspaceId: 'ws-123' });
417
+ * ```
418
+ */
419
+ upsert(vectors: VectorRecord[], options?: VectorUpsertOptions): Promise<VectorUpsertResponse>;
420
+ /**
421
+ * Query similar vectors by embedding
422
+ *
423
+ * @example
424
+ * ```ts
425
+ * const results = await sdk.llm.vectors.query(queryVector, {
426
+ * topK: 5,
427
+ * filter: { documentId: 'doc-1' },
428
+ * workspaceId: 'ws-123'
429
+ * });
430
+ * ```
431
+ */
432
+ query(vector: number[], options?: VectorQueryOptions): Promise<VectorQueryResponse>;
433
+ /**
434
+ * Delete vectors from storage
435
+ *
436
+ * Note: Currently only supports deleteAll: true
437
+ * Use workspaceId to scope deletion to a specific workspace
438
+ *
439
+ * @example
440
+ * ```ts
441
+ * await sdk.llm.vectors.delete({ deleteAll: true, workspaceId: 'ws-123' });
442
+ * ```
443
+ */
444
+ delete(options: VectorDeleteOptions): Promise<VectorDeleteResponse>;
445
+ /**
446
+ * List all available workspaces (namespaces) for this app
447
+ *
448
+ * @example
449
+ * ```ts
450
+ * const { workspaces } = await sdk.llm.vectors.listWorkspaces();
451
+ * console.log('Workspaces:', workspaces);
452
+ * ```
453
+ */
454
+ listWorkspaces(): Promise<VectorListWorkspacesResponse>;
455
+ }
456
+ declare class LLMModule {
457
+ private baseUrl;
458
+ private appId;
459
+ private apiKey?;
460
+ vectors: VectorStore;
461
+ constructor(baseUrl: string, appId: string, apiKey?: string | undefined);
462
+ private get headers();
463
+ /**
464
+ * Get only configured chat (LLM) providers
465
+ *
466
+ * @example
467
+ * ```ts
468
+ * const { providers } = await sdk.llm.chatProviders();
469
+ * console.log('Available chat models:', providers[0].models);
470
+ * ```
471
+ */
472
+ chatProviders(): Promise<ProvidersResponse>;
473
+ /**
474
+ * Get only configured embedding providers
475
+ *
476
+ * @example
477
+ * ```ts
478
+ * const { providers } = await sdk.llm.embedProviders();
479
+ * console.log('Available embedding models:', providers[0].models);
480
+ * ```
481
+ */
482
+ embedProviders(): Promise<ProvidersResponse>;
483
+ /**
484
+ * Send a chat completion request (synchronous)
485
+ *
486
+ * @example
487
+ * ```ts
488
+ * const response = await sdk.llm.chat([
489
+ * { role: 'system', content: 'You are a helpful assistant.' },
490
+ * { role: 'user', content: 'Hello!' }
491
+ * ], { model: 'gpt-4o', temperature: 0.7 });
492
+ *
493
+ * console.log(response.response?.content);
494
+ * ```
495
+ */
496
+ chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResponse>;
497
+ /**
498
+ * Send a streaming chat completion request (SSE)
499
+ *
500
+ * @example
501
+ * ```ts
502
+ * for await (const chunk of sdk.llm.chatStream([
503
+ * { role: 'user', content: 'Tell me a story' }
504
+ * ])) {
505
+ * process.stdout.write(chunk.textResponse || '');
506
+ * }
507
+ * ```
508
+ */
509
+ chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator<StreamChunk, void, unknown>;
510
+ /**
511
+ * Generate vector embeddings from text
512
+ *
513
+ * @example
514
+ * ```ts
515
+ * // Single text
516
+ * const { embeddings } = await sdk.llm.embed('Hello world');
517
+ *
518
+ * // Multiple texts
519
+ * const { embeddings } = await sdk.llm.embed(['Hello', 'World']);
520
+ * ```
521
+ */
522
+ embed(input: string | string[], options?: EmbedOptions): Promise<EmbedResponse>;
523
+ /**
524
+ * Helper: Embed text and store as vectors in one call
525
+ *
526
+ * @example
527
+ * ```ts
528
+ * await sdk.llm.embedAndStore({
529
+ * texts: ['Hello world', 'Goodbye world'],
530
+ * documentId: 'doc-123',
531
+ * workspaceId: 'ws-456'
532
+ * });
533
+ * ```
534
+ */
535
+ embedAndStore(params: {
536
+ texts: string[];
537
+ documentId?: string;
538
+ workspaceId?: string;
539
+ idPrefix?: string;
540
+ provider?: string;
541
+ model?: string;
542
+ }): Promise<VectorUpsertResponse>;
543
+ /**
544
+ * Helper: Search similar documents by text query
545
+ *
546
+ * @example
547
+ * ```ts
548
+ * const results = await sdk.llm.search('What is RealtimeX?', {
549
+ * topK: 5,
550
+ * workspaceId: 'ws-123'
551
+ * });
552
+ *
553
+ * for (const result of results) {
554
+ * console.log(result.metadata?.text, result.score);
555
+ * }
556
+ * ```
557
+ */
558
+ search(query: string, options?: VectorQueryOptions): Promise<VectorQueryResult[]>;
559
+ }
560
+
252
561
  /**
253
562
  * RealtimeX Local App SDK
254
563
  *
@@ -262,8 +571,10 @@ declare class RealtimeXSDK {
262
571
  api: ApiModule;
263
572
  task: TaskModule;
264
573
  port: PortModule;
574
+ llm: LLMModule;
265
575
  readonly appId: string;
266
576
  readonly appName: string | undefined;
577
+ readonly apiKey: string | undefined;
267
578
  private readonly realtimexUrl;
268
579
  private readonly permissions;
269
580
  private static DEFAULT_REALTIMEX_URL;
@@ -277,6 +588,16 @@ declare class RealtimeXSDK {
277
588
  * Get environment variable (works in Node.js and browser)
278
589
  */
279
590
  private getEnvVar;
591
+ /**
592
+ * Ping RealtimeX server to verify connection and authentication.
593
+ * Works in both development (API Key) and production (App ID) modes.
594
+ */
595
+ ping(): Promise<{
596
+ success: boolean;
597
+ mode: 'development' | 'production';
598
+ appId?: string;
599
+ timestamp: string;
600
+ }>;
280
601
  }
281
602
 
282
- export { ActivitiesModule, type Activity, type Agent, ApiModule, PermissionDeniedError, PermissionRequiredError, PortModule, RealtimeXSDK, type SDKConfig, type Task, TaskModule, type TaskRun, type Thread, type TriggerAgentPayload, type TriggerAgentResponse, WebhookModule, type Workspace };
603
+ export { ActivitiesModule, type Activity, type Agent, ApiModule, type ChatMessage, type ChatOptions, type ChatResponse, type EmbedOptions, type EmbedResponse, LLMModule, LLMPermissionError, LLMProviderError, PermissionDeniedError, PermissionRequiredError, PortModule, type Provider, type ProvidersResponse, RealtimeXSDK, type SDKConfig, type StreamChunk, type Task, TaskModule, type TaskRun, type Thread, type TriggerAgentPayload, type TriggerAgentResponse, type VectorDeleteOptions, type VectorDeleteResponse, type VectorQueryOptions, type VectorQueryResponse, type VectorQueryResult, type VectorRecord, VectorStore, type VectorUpsertOptions, type VectorUpsertResponse, WebhookModule, type Workspace };