@modelriver/client 1.1.3 → 1.1.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -6,7 +6,7 @@ Official ModelRiver client SDK for real-time AI response streaming via WebSocket
6
6
 
7
7
  - **WebSocket streaming** - Receive AI responses in real-time via Phoenix Channels
8
8
  - **Auto-reconnection** - Automatically reconnects on connection loss
9
- - **Persistence** - Survives page refreshes with localStorage persistence
9
+ - **Persistence + reconnect** - Survives page refreshes with localStorage + backend reconnect
10
10
  - **Framework adapters** - First-class support for React, Vue, Angular, and Svelte
11
11
  - **CDN ready** - Use via script tag without a build step
12
12
  - **TypeScript** - Full type definitions included
@@ -27,16 +27,16 @@ pnpm add @modelriver/client
27
27
  ### CDN
28
28
 
29
29
  ```html
30
- <script src="https://cdn.modelriver.com/client/v1.0.0/modelriver.min.js"></script>
30
+ <script src="https://cdn.modelriver.com/client/v1.1.35/modelriver.min.js"></script>
31
31
  <!-- or latest -->
32
32
  <script src="https://cdn.modelriver.com/client/latest/modelriver.min.js"></script>
33
33
  ```
34
34
 
35
35
  ## Quick Start
36
36
 
37
- ### 1. Get channel ID from your backend
37
+ ### 1. Get async connection details from your backend
38
38
 
39
- Your backend calls the ModelRiver `/api/ai/async` endpoint and receives connection details:
39
+ Your backend calls the ModelRiver `/api/v1/ai/async` endpoint and receives connection details:
40
40
 
41
41
  ```javascript
42
42
  // Your backend endpoint proxies to ModelRiver
@@ -45,18 +45,19 @@ const response = await fetch('/api/ai/request', {
45
45
  body: JSON.stringify({ message: 'Hello AI' }),
46
46
  });
47
47
 
48
- // Response from /api/ai/async:
48
+ // Response from /api/v1/ai/async:
49
49
  // {
50
50
  // "message": "success",
51
51
  // "status": "pending",
52
52
  // "channel_id": "a1b2c3d4-...",
53
+ // "ws_token": "one-time-websocket-token",
53
54
  // "websocket_url": "wss://api.modelriver.com/socket",
54
- // "websocket_channel": "ai_response:a1b2c3d4-..."
55
+ // "websocket_channel": "ai_response:PROJECT_ID:a1b2c3d4-..."
55
56
  // }
56
- const { channel_id, websocket_url, websocket_channel } = await response.json();
57
+ const { channel_id, ws_token, websocket_url, websocket_channel } = await response.json();
57
58
  ```
58
59
 
59
- ### 2. Connect to ModelRiver
60
+ ### 2. Connect to ModelRiver WebSocket
60
61
 
61
62
  ```javascript
62
63
  import { ModelRiverClient } from '@modelriver/client';
@@ -73,7 +74,12 @@ client.on('error', (error) => {
73
74
  console.error('Error:', error);
74
75
  });
75
76
 
76
- client.connect({ channelId: channel_id, websocketUrl: websocket_url });
77
+ client.connect({
78
+ channelId: channel_id,
79
+ wsToken: ws_token,
80
+ websocketUrl: websocket_url,
81
+ websocketChannel: websocket_channel,
82
+ });
77
83
  ```
78
84
 
79
85
  ## Framework Usage
@@ -97,8 +103,19 @@ function ChatComponent() {
97
103
  });
98
104
 
99
105
  const handleSend = async () => {
100
- const { channel_id, websocket_url } = await yourBackendAPI.createRequest(message);
101
- connect({ channelId: channel_id, websocketUrl: websocket_url });
106
+ const {
107
+ channel_id,
108
+ ws_token,
109
+ websocket_url,
110
+ websocket_channel,
111
+ } = await yourBackendAPI.createRequest(message); // calls /api/v1/ai/async
112
+
113
+ connect({
114
+ channelId: channel_id,
115
+ wsToken: ws_token,
116
+ websocketUrl: websocket_url,
117
+ websocketChannel: websocket_channel,
118
+ });
102
119
  };
103
120
 
104
121
  return (
@@ -144,8 +161,19 @@ const {
144
161
  });
145
162
 
146
163
  async function handleSend() {
147
- const { channel_id, websocket_url } = await yourBackendAPI.createRequest(message);
148
- connect({ channelId: channel_id, websocketUrl: websocket_url });
164
+ const {
165
+ channel_id,
166
+ ws_token,
167
+ websocket_url,
168
+ websocket_channel,
169
+ } = await yourBackendAPI.createRequest(message); // calls /api/v1/ai/async
170
+
171
+ connect({
172
+ channelId: channel_id,
173
+ wsToken: ws_token,
174
+ websocketUrl: websocket_url,
175
+ websocketChannel: websocket_channel,
176
+ });
149
177
  }
150
178
  </script>
151
179
 
@@ -198,8 +226,19 @@ export class ChatComponent implements OnDestroy {
198
226
  }
199
227
 
200
228
  async send() {
201
- const { channel_id, websocket_url } = await this.backendService.createRequest(message);
202
- this.modelRiver.connect({ channelId: channel_id, websocketUrl: websocket_url });
229
+ const {
230
+ channel_id,
231
+ ws_token,
232
+ websocket_url,
233
+ websocket_channel,
234
+ } = await this.backendService.createRequest(message); // calls /api/v1/ai/async
235
+
236
+ this.modelRiver.connect({
237
+ channelId: channel_id,
238
+ wsToken: ws_token,
239
+ websocketUrl: websocket_url,
240
+ websocketChannel: websocket_channel,
241
+ });
203
242
  }
204
243
 
205
244
  ngOnDestroy() {
@@ -222,8 +261,19 @@ export class ChatComponent implements OnDestroy {
222
261
  const { response, error, isConnected, steps, connect, disconnect } = modelRiver;
223
262
 
224
263
  async function send() {
225
- const { channel_id, websocket_url } = await backendAPI.createRequest(message);
226
- connect({ channelId: channel_id, websocketUrl: websocket_url });
264
+ const {
265
+ channel_id,
266
+ ws_token,
267
+ websocket_url,
268
+ websocket_channel,
269
+ } = await backendAPI.createRequest(message); // calls /api/v1/ai/async
270
+
271
+ connect({
272
+ channelId: channel_id,
273
+ wsToken: ws_token,
274
+ websocketUrl: websocket_url,
275
+ websocketChannel: websocket_channel,
276
+ });
227
277
  }
228
278
 
229
279
  onDestroy(() => disconnect());
@@ -271,11 +321,21 @@ export class ChatComponent implements OnDestroy {
271
321
  });
272
322
 
273
323
  document.getElementById('send').addEventListener('click', async () => {
274
- // Get channel ID from your backend
324
+ // Get async connection info from your backend
275
325
  const res = await fetch('/api/ai/request', { method: 'POST' });
276
- const { channel_id, websocket_url } = await res.json();
326
+ const {
327
+ channel_id,
328
+ ws_token,
329
+ websocket_url,
330
+ websocket_channel,
331
+ } = await res.json(); // your backend calls /api/v1/ai/async
277
332
 
278
- client.connect({ channelId: channel_id, websocketUrl: websocket_url });
333
+ client.connect({
334
+ channelId: channel_id,
335
+ wsToken: ws_token,
336
+ websocketUrl: websocket_url,
337
+ websocketChannel: websocket_channel,
338
+ });
279
339
  });
280
340
  </script>
281
341
  </body>
@@ -291,6 +351,7 @@ export class ChatComponent implements OnDestroy {
291
351
  ```typescript
292
352
  interface ModelRiverClientOptions {
293
353
  baseUrl?: string; // WebSocket URL (default: 'wss://api.modelriver.com/socket')
354
+ apiBaseUrl?: string; // Optional HTTP base URL for backend reconnect (/api/v1/ai/reconnect)
294
355
  debug?: boolean; // Enable debug logging (default: false)
295
356
  persist?: boolean; // Enable localStorage persistence (default: true)
296
357
  storageKeyPrefix?: string; // Storage key prefix (default: 'modelriver_')
@@ -307,6 +368,7 @@ interface ModelRiverClientOptions {
307
368
  | `disconnect()` | Disconnect from WebSocket |
308
369
  | `reset()` | Reset state and clear stored data |
309
370
  | `reconnect()` | Reconnect using stored channel ID |
371
+ | `reconnectWithBackend()` | Call your backend `/api/v1/ai/reconnect` to get a fresh `ws_token` and reconnect |
310
372
  | `getState()` | Get current client state |
311
373
  | `hasPendingRequest()` | Check if there's a pending request |
312
374
  | `on(event, callback)` | Add event listener (returns unsubscribe function) |
@@ -334,6 +396,7 @@ interface AsyncResponse {
334
396
  message: string; // "success"
335
397
  status: 'pending'; // Always "pending" for async
336
398
  channel_id: string; // Unique channel ID
399
+ ws_token: string; // One-time WebSocket token for authentication
337
400
  websocket_url: string; // WebSocket URL to connect to
338
401
  websocket_channel: string; // Full channel name (e.g., "ai_response:uuid")
339
402
  instructions?: {
@@ -377,12 +440,13 @@ interface WorkflowStep {
377
440
 
378
441
  ## How It Works
379
442
 
380
- 1. **Your backend** calls ModelRiver's `/api/ai/async` endpoint
381
- 2. **ModelRiver** returns `channel_id`, `websocket_url`, and `websocket_channel`
382
- 3. **Your backend** returns these fields to the frontend
383
- 4. **Your frontend** uses this SDK to connect via WebSocket using `channel_id`
443
+ 1. **Your backend** calls ModelRiver's `/api/v1/ai/async` endpoint
444
+ 2. **ModelRiver** returns `channel_id`, `ws_token`, `websocket_url`, and `websocket_channel`
445
+ 3. **Your backend** returns these fields to the frontend (never the API key)
446
+ 4. **Your frontend** uses this SDK to connect via WebSocket using `channel_id` + `ws_token`
384
447
  5. **AI responses** are delivered in real-time to your frontend
385
- 6. **The SDK** handles reconnection, heartbeats, and error recovery
448
+ 6. **The SDK** handles heartbeats, channel joins, and automatic reconnection for transient network issues.
449
+ 7. For **page refresh recovery**, use the persistence + reconnect helpers (`persist`, `hasPendingRequest`, `reconnect`, `reconnectWithBackend`) together with your backend `/api/v1/ai/reconnect` endpoint.
386
450
 
387
451
  ```
388
452
  ┌──────────────┐ ┌──────────────┐ ┌──────────────┐
@@ -409,14 +473,27 @@ interface WorkflowStep {
409
473
 
410
474
  ## Security
411
475
 
412
- The `/api/ai/async` response contains:
476
+ The `/api/v1/ai/async` response contains:
413
477
  - `channel_id` - Unique identifier for this request
478
+ - `ws_token` - Short-lived, one-time WebSocket token (per user + project)
414
479
  - `websocket_url` - WebSocket endpoint URL
415
480
  - `websocket_channel` - Channel name to join
416
481
 
417
- The client SDK uses `channel_id` directly to connect to the WebSocket. The `channel_id` is unique per request and is used to join the appropriate channel for receiving responses.
482
+ The client SDK uses `channel_id` and `ws_token` to connect to the WebSocket.
483
+ The `ws_token` is:
484
+
485
+ - Short-lived (≈5 minutes)
486
+ - Single-use (consumed on first successful WebSocket authentication)
487
+
488
+ For page refresh recovery:
489
+
490
+ - The SDK persists the active request (by default) to `localStorage`
491
+ - On reload, you can:
492
+ - either call `client.reconnect()` to reuse the stored `ws_token` (if still valid)
493
+ - or call `client.reconnectWithBackend()` to have your backend issue a **fresh** `ws_token` via `/api/v1/ai/reconnect`
418
494
 
419
- **Important**: Always obtain `channel_id` from your backend. Never expose your ModelRiver API key in frontend code.
495
+ **Important**: Always obtain `channel_id` and `ws_token` from your backend.
496
+ Never expose your ModelRiver API key in frontend code. Your backend should be the only component that talks to ModelRiver's HTTP API (`/api/v1/ai/async`, `/api/v1/ai/reconnect`, etc.).
420
497
 
421
498
  ## Browser Support
422
499
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@modelriver/client",
3
- "version": "1.1.3",
3
+ "version": "1.1.35",
4
4
  "description": "Official ModelRiver client SDK for real-time AI response streaming via WebSockets",
5
5
  "author": "ModelRiver",
6
6
  "license": "MIT",
File without changes
File without changes