@push.rocks/smartrust 1.1.1 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/changelog.md +20 -0
- package/dist_ts/00_commitinfo_data.js +1 -1
- package/dist_ts/classes.rustbinarylocator.js +3 -2
- package/dist_ts/classes.rustbridge.d.ts +20 -2
- package/dist_ts/classes.rustbridge.js +139 -26
- package/dist_ts/classes.streamingresponse.d.ts +36 -0
- package/dist_ts/classes.streamingresponse.js +102 -0
- package/dist_ts/index.d.ts +1 -0
- package/dist_ts/index.js +2 -1
- package/dist_ts/interfaces/config.d.ts +5 -0
- package/dist_ts/interfaces/ipc.d.ts +23 -0
- package/dist_ts/plugins.d.ts +2 -2
- package/dist_ts/plugins.js +3 -3
- package/package.json +1 -1
- package/readme.md +167 -33
- package/test/helpers/mock-rust-binary.mjs +63 -21
- package/test/test.rustbridge.node.ts +249 -0
- package/ts/00_commitinfo_data.ts +1 -1
- package/ts/classes.rustbinarylocator.ts +2 -1
- package/ts/classes.rustbridge.ts +166 -27
- package/ts/classes.streamingresponse.ts +110 -0
- package/ts/index.ts +1 -0
- package/ts/interfaces/config.ts +5 -0
- package/ts/interfaces/ipc.ts +22 -0
- package/ts/plugins.ts +2 -2
package/dist_ts/plugins.js
CHANGED
|
@@ -2,10 +2,10 @@
|
|
|
2
2
|
import * as path from 'path';
|
|
3
3
|
import * as fs from 'fs';
|
|
4
4
|
import * as childProcess from 'child_process';
|
|
5
|
-
import * as readline from 'readline';
|
|
6
5
|
import * as events from 'events';
|
|
7
|
-
|
|
6
|
+
import * as url from 'url';
|
|
7
|
+
export { path, fs, childProcess, events, url };
|
|
8
8
|
// @push.rocks scope
|
|
9
9
|
import * as smartpath from '@push.rocks/smartpath';
|
|
10
10
|
export { smartpath };
|
|
11
|
-
//# sourceMappingURL=data:application/json;base64,
|
|
11
|
+
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoicGx1Z2lucy5qcyIsInNvdXJjZVJvb3QiOiIiLCJzb3VyY2VzIjpbIi4uL3RzL3BsdWdpbnMudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IkFBQUEsZUFBZTtBQUNmLE9BQU8sS0FBSyxJQUFJLE1BQU0sTUFBTSxDQUFDO0FBQzdCLE9BQU8sS0FBSyxFQUFFLE1BQU0sSUFBSSxDQUFDO0FBQ3pCLE9BQU8sS0FBSyxZQUFZLE1BQU0sZUFBZSxDQUFDO0FBQzlDLE9BQU8sS0FBSyxNQUFNLE1BQU0sUUFBUSxDQUFDO0FBQ2pDLE9BQU8sS0FBSyxHQUFHLE1BQU0sS0FBSyxDQUFDO0FBRTNCLE9BQU8sRUFBRSxJQUFJLEVBQUUsRUFBRSxFQUFFLFlBQVksRUFBRSxNQUFNLEVBQUUsR0FBRyxFQUFFLENBQUM7QUFFL0Msb0JBQW9CO0FBQ3BCLE9BQU8sS0FBSyxTQUFTLE1BQU0sdUJBQXVCLENBQUM7QUFFbkQsT0FBTyxFQUFFLFNBQVMsRUFBRSxDQUFDIn0=
|
package/package.json
CHANGED
package/readme.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# @push.rocks/smartrust
|
|
2
2
|
|
|
3
|
-
A type-safe,
|
|
3
|
+
A type-safe, production-ready bridge between TypeScript and Rust binaries via JSON-over-stdin/stdout IPC — with support for request/response, streaming, and event patterns.
|
|
4
4
|
|
|
5
5
|
## Issue Reporting and Security
|
|
6
6
|
|
|
@@ -16,18 +16,19 @@ pnpm install @push.rocks/smartrust
|
|
|
16
16
|
|
|
17
17
|
## Overview 🔭
|
|
18
18
|
|
|
19
|
-
`@push.rocks/smartrust` provides a
|
|
19
|
+
`@push.rocks/smartrust` provides a complete bridge for TypeScript applications that need to communicate with Rust binaries. It handles the entire lifecycle — binary discovery, process spawning, request/response correlation, **streaming responses**, event pub/sub, and graceful shutdown — so you can focus on your command definitions instead of IPC plumbing.
|
|
20
20
|
|
|
21
|
-
### Why?
|
|
21
|
+
### Why? 🤔
|
|
22
22
|
|
|
23
23
|
If you're integrating Rust into a Node.js project, you'll inevitably need:
|
|
24
24
|
- A way to **find** the compiled Rust binary across different environments (dev, CI, production, platform packages)
|
|
25
25
|
- A way to **spawn** it and establish reliable two-way communication
|
|
26
26
|
- **Type-safe** request/response patterns with proper error handling
|
|
27
|
+
- **Streaming responses** for progressive data processing, log tailing, or chunked transfers
|
|
27
28
|
- **Event streaming** from Rust to TypeScript
|
|
28
29
|
- **Graceful lifecycle management** (ready detection, clean shutdown, force kill)
|
|
29
30
|
|
|
30
|
-
`smartrust` wraps all of this into
|
|
31
|
+
`smartrust` wraps all of this into three classes: `RustBridge`, `RustBinaryLocator`, and `StreamingResponse`.
|
|
31
32
|
|
|
32
33
|
## Usage 🚀
|
|
33
34
|
|
|
@@ -38,8 +39,9 @@ If you're integrating Rust into a Node.js project, you'll inevitably need:
|
|
|
38
39
|
| Direction | Format | Description |
|
|
39
40
|
|-----------|--------|-------------|
|
|
40
41
|
| **TS → Rust** (Request) | `{"id": "req_1", "method": "start", "params": {...}}` | Command with unique ID |
|
|
41
|
-
| **Rust → TS** (Response) | `{"id": "req_1", "success": true, "result": {...}}` |
|
|
42
|
+
| **Rust → TS** (Response) | `{"id": "req_1", "success": true, "result": {...}}` | Final response correlated by ID |
|
|
42
43
|
| **Rust → TS** (Error) | `{"id": "req_1", "success": false, "error": "msg"}` | Error correlated by ID |
|
|
44
|
+
| **Rust → TS** (Stream Chunk) | `{"id": "req_1", "stream": true, "data": {...}}` | Intermediate chunk (zero or more) |
|
|
43
45
|
| **Rust → TS** (Event) | `{"event": "ready", "data": {...}}` | Unsolicited event (no ID) |
|
|
44
46
|
|
|
45
47
|
Your Rust binary reads JSON lines from stdin and writes JSON lines to stdout. That's it. Stderr is free for logging.
|
|
@@ -49,7 +51,7 @@ Your Rust binary reads JSON lines from stdin and writes JSON lines to stdout. Th
|
|
|
49
51
|
Start by defining a type map of commands your Rust binary supports:
|
|
50
52
|
|
|
51
53
|
```typescript
|
|
52
|
-
import { RustBridge
|
|
54
|
+
import { RustBridge } from '@push.rocks/smartrust';
|
|
53
55
|
|
|
54
56
|
// Define your command types
|
|
55
57
|
type TMyCommands = {
|
|
@@ -92,7 +94,91 @@ bridge.on('management:configChanged', (data) => {
|
|
|
92
94
|
bridge.kill();
|
|
93
95
|
```
|
|
94
96
|
|
|
95
|
-
###
|
|
97
|
+
### Streaming Commands 🌊
|
|
98
|
+
|
|
99
|
+
For commands where the Rust binary sends a series of chunks before a final result, use `sendCommandStreaming`. This is perfect for progressive data processing, log tailing, search results, or any scenario where you want incremental output.
|
|
100
|
+
|
|
101
|
+
#### Defining Streaming Commands
|
|
102
|
+
|
|
103
|
+
Add a `chunk` field to your command type definition to mark it as streamable:
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
type TMyCommands = {
|
|
107
|
+
// Regular command (request → response)
|
|
108
|
+
ping: { params: {}; result: { pong: boolean } };
|
|
109
|
+
|
|
110
|
+
// Streaming command (request → chunks... → final result)
|
|
111
|
+
processData: { params: { count: number }; chunk: { index: number; progress: number }; result: { totalProcessed: number } };
|
|
112
|
+
tailLogs: { params: { lines: number }; chunk: string; result: { linesRead: number } };
|
|
113
|
+
};
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
#### Consuming Streams
|
|
117
|
+
|
|
118
|
+
```typescript
|
|
119
|
+
// Returns a StreamingResponse immediately (does NOT block)
|
|
120
|
+
const stream = bridge.sendCommandStreaming('processData', { count: 1000 });
|
|
121
|
+
|
|
122
|
+
// Consume chunks with for-await-of
|
|
123
|
+
for await (const chunk of stream) {
|
|
124
|
+
console.log(`Processing item ${chunk.index}, progress: ${chunk.progress}%`);
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Get the final result after all chunks are consumed
|
|
128
|
+
const result = await stream.result;
|
|
129
|
+
console.log(`Done! Processed ${result.totalProcessed} items`);
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
#### Error Handling in Streams
|
|
133
|
+
|
|
134
|
+
Errors propagate to both the iterator and the `.result` promise:
|
|
135
|
+
|
|
136
|
+
```typescript
|
|
137
|
+
const stream = bridge.sendCommandStreaming('processData', { count: 100 });
|
|
138
|
+
|
|
139
|
+
try {
|
|
140
|
+
for await (const chunk of stream) {
|
|
141
|
+
console.log(chunk);
|
|
142
|
+
}
|
|
143
|
+
} catch (err) {
|
|
144
|
+
console.error('Stream failed:', err.message);
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// .result also rejects on error
|
|
148
|
+
try {
|
|
149
|
+
await stream.result;
|
|
150
|
+
} catch (err) {
|
|
151
|
+
console.error('Same error here:', err.message);
|
|
152
|
+
}
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
#### Stream Timeout
|
|
156
|
+
|
|
157
|
+
By default, streaming commands use the same timeout as regular commands (`requestTimeoutMs`). The timeout **resets on each chunk received**, so it acts as an inactivity timeout rather than an absolute timeout. You can configure it separately:
|
|
158
|
+
|
|
159
|
+
```typescript
|
|
160
|
+
const bridge = new RustBridge<TMyCommands>({
|
|
161
|
+
binaryName: 'my-server',
|
|
162
|
+
requestTimeoutMs: 30000, // regular command timeout: 30s
|
|
163
|
+
streamTimeoutMs: 60000, // streaming inactivity timeout: 60s
|
|
164
|
+
});
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
#### Implementing Streaming on the Rust Side
|
|
168
|
+
|
|
169
|
+
Your Rust binary sends stream chunks by writing lines with `"stream": true` before the final response:
|
|
170
|
+
|
|
171
|
+
```rust
|
|
172
|
+
// For each chunk:
|
|
173
|
+
println!(r#"{{"id":"{}","stream":true,"data":{{"index":{},"progress":{}}}}}"#, req.id, i, pct);
|
|
174
|
+
io::stdout().flush().unwrap();
|
|
175
|
+
|
|
176
|
+
// When done, send the final response (same as non-streaming):
|
|
177
|
+
println!(r#"{{"id":"{}","success":true,"result":{{"totalProcessed":{}}}}}"#, req.id, total);
|
|
178
|
+
io::stdout().flush().unwrap();
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
### Binary Locator 🔍
|
|
96
182
|
|
|
97
183
|
The `RustBinaryLocator` searches for your binary using a priority-ordered strategy:
|
|
98
184
|
|
|
@@ -119,7 +205,7 @@ const binaryPath = await locator.findBinary();
|
|
|
119
205
|
// Result is cached — call clearCache() to force re-search
|
|
120
206
|
```
|
|
121
207
|
|
|
122
|
-
### Configuration Reference
|
|
208
|
+
### Configuration Reference ⚙️
|
|
123
209
|
|
|
124
210
|
The `RustBridge` constructor accepts an `IRustBridgeOptions` object:
|
|
125
211
|
|
|
@@ -136,14 +222,16 @@ const bridge = new RustBridge<TMyCommands>({
|
|
|
136
222
|
// --- Bridge Options ---
|
|
137
223
|
cliArgs: ['--management'], // optional: args passed to binary (default: ['--management'])
|
|
138
224
|
requestTimeoutMs: 30000, // optional: per-request timeout (default: 30000)
|
|
225
|
+
streamTimeoutMs: 30000, // optional: streaming inactivity timeout (default: requestTimeoutMs)
|
|
139
226
|
readyTimeoutMs: 10000, // optional: ready event timeout (default: 10000)
|
|
227
|
+
maxPayloadSize: 50 * 1024 * 1024, // optional: max message size in bytes (default: 50MB)
|
|
140
228
|
env: { RUST_LOG: 'debug' }, // optional: extra env vars for the child process
|
|
141
229
|
readyEventName: 'ready', // optional: name of the ready event (default: 'ready')
|
|
142
230
|
logger: myLogger, // optional: logger implementing IRustBridgeLogger
|
|
143
231
|
});
|
|
144
232
|
```
|
|
145
233
|
|
|
146
|
-
### Events
|
|
234
|
+
### Events 📡
|
|
147
235
|
|
|
148
236
|
`RustBridge` extends `EventEmitter` and emits the following events:
|
|
149
237
|
|
|
@@ -154,7 +242,7 @@ const bridge = new RustBridge<TMyCommands>({
|
|
|
154
242
|
| `stderr` | `string` | A line from the binary's stderr |
|
|
155
243
|
| `management:<name>` | `any` | Custom event from Rust (e.g. `management:configChanged`) |
|
|
156
244
|
|
|
157
|
-
### Custom Logger
|
|
245
|
+
### Custom Logger 📝
|
|
158
246
|
|
|
159
247
|
Plug in your own logger by implementing the `IRustBridgeLogger` interface:
|
|
160
248
|
|
|
@@ -173,7 +261,7 @@ const bridge = new RustBridge<TMyCommands>({
|
|
|
173
261
|
});
|
|
174
262
|
```
|
|
175
263
|
|
|
176
|
-
### Writing the Rust Side
|
|
264
|
+
### Writing the Rust Side 🦀
|
|
177
265
|
|
|
178
266
|
Your Rust binary needs to implement a simple protocol:
|
|
179
267
|
|
|
@@ -186,9 +274,11 @@ Your Rust binary needs to implement a simple protocol:
|
|
|
186
274
|
|
|
187
275
|
3. **Write JSON responses to stdout**, each as `{"id": "...", "success": true, "result": {...}}\n`
|
|
188
276
|
|
|
189
|
-
4. **
|
|
277
|
+
4. **For streaming commands**, write zero or more `{"id": "...", "stream": true, "data": {...}}\n` chunks before the final response
|
|
190
278
|
|
|
191
|
-
5. **
|
|
279
|
+
5. **Emit events** anytime by writing `{"event": "name", "data": {...}}\n` to stdout
|
|
280
|
+
|
|
281
|
+
6. **Use stderr** for logging — it won't interfere with the IPC protocol
|
|
192
282
|
|
|
193
283
|
Here's a minimal Rust skeleton:
|
|
194
284
|
|
|
@@ -213,6 +303,13 @@ struct Response {
|
|
|
213
303
|
error: Option<String>,
|
|
214
304
|
}
|
|
215
305
|
|
|
306
|
+
#[derive(Serialize)]
|
|
307
|
+
struct StreamChunk {
|
|
308
|
+
id: String,
|
|
309
|
+
stream: bool,
|
|
310
|
+
data: serde_json::Value,
|
|
311
|
+
}
|
|
312
|
+
|
|
216
313
|
fn main() {
|
|
217
314
|
// Signal ready
|
|
218
315
|
println!(r#"{{"event":"ready","data":{{"version":"1.0.0"}}}}"#);
|
|
@@ -223,24 +320,50 @@ fn main() {
|
|
|
223
320
|
let line = line.unwrap();
|
|
224
321
|
let req: Request = serde_json::from_str(&line).unwrap();
|
|
225
322
|
|
|
226
|
-
|
|
227
|
-
"ping" =>
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
323
|
+
match req.method.as_str() {
|
|
324
|
+
"ping" => {
|
|
325
|
+
let resp = Response {
|
|
326
|
+
id: req.id,
|
|
327
|
+
success: true,
|
|
328
|
+
result: Some(serde_json::json!({"pong": true})),
|
|
329
|
+
error: None,
|
|
330
|
+
};
|
|
331
|
+
println!("{}", serde_json::to_string(&resp).unwrap());
|
|
332
|
+
io::stdout().flush().unwrap();
|
|
333
|
+
}
|
|
334
|
+
"processData" => {
|
|
335
|
+
let count = req.params["count"].as_u64().unwrap_or(0);
|
|
336
|
+
// Send stream chunks
|
|
337
|
+
for i in 0..count {
|
|
338
|
+
let chunk = StreamChunk {
|
|
339
|
+
id: req.id.clone(),
|
|
340
|
+
stream: true,
|
|
341
|
+
data: serde_json::json!({"index": i, "progress": ((i+1) * 100 / count)}),
|
|
342
|
+
};
|
|
343
|
+
println!("{}", serde_json::to_string(&chunk).unwrap());
|
|
344
|
+
io::stdout().flush().unwrap();
|
|
345
|
+
}
|
|
346
|
+
// Send final response
|
|
347
|
+
let resp = Response {
|
|
348
|
+
id: req.id,
|
|
349
|
+
success: true,
|
|
350
|
+
result: Some(serde_json::json!({"totalProcessed": count})),
|
|
351
|
+
error: None,
|
|
352
|
+
};
|
|
353
|
+
println!("{}", serde_json::to_string(&resp).unwrap());
|
|
354
|
+
io::stdout().flush().unwrap();
|
|
355
|
+
}
|
|
356
|
+
_ => {
|
|
357
|
+
let resp = Response {
|
|
358
|
+
id: req.id,
|
|
359
|
+
success: false,
|
|
360
|
+
result: None,
|
|
361
|
+
error: Some(format!("Unknown method: {}", req.method)),
|
|
362
|
+
};
|
|
363
|
+
println!("{}", serde_json::to_string(&resp).unwrap());
|
|
364
|
+
io::stdout().flush().unwrap();
|
|
365
|
+
}
|
|
366
|
+
}
|
|
244
367
|
}
|
|
245
368
|
}
|
|
246
369
|
```
|
|
@@ -254,9 +377,17 @@ fn main() {
|
|
|
254
377
|
| `constructor` | `new RustBridge<T>(options: IRustBridgeOptions)` | Create a new bridge instance |
|
|
255
378
|
| `spawn()` | `Promise<boolean>` | Spawn the binary and wait for ready; returns `false` on failure |
|
|
256
379
|
| `sendCommand(method, params)` | `Promise<TCommands[K]['result']>` | Send a typed command and await the response |
|
|
380
|
+
| `sendCommandStreaming(method, params)` | `StreamingResponse<TChunk, TResult>` | Send a streaming command; returns immediately |
|
|
257
381
|
| `kill()` | `void` | SIGTERM the process, reject pending requests, force SIGKILL after 5s |
|
|
258
382
|
| `running` | `boolean` | Whether the bridge is currently connected |
|
|
259
383
|
|
|
384
|
+
### `StreamingResponse<TChunk, TResult>`
|
|
385
|
+
|
|
386
|
+
| Method / Property | Type | Description |
|
|
387
|
+
|---|---|---|
|
|
388
|
+
| `[Symbol.asyncIterator]()` | `AsyncIterator<TChunk>` | Enables `for await...of` consumption of chunks |
|
|
389
|
+
| `result` | `Promise<TResult>` | Resolves with the final result after stream ends |
|
|
390
|
+
|
|
260
391
|
### `RustBinaryLocator`
|
|
261
392
|
|
|
262
393
|
| Method / Property | Signature | Description |
|
|
@@ -265,9 +396,9 @@ fn main() {
|
|
|
265
396
|
| `findBinary()` | `Promise<string \| null>` | Find the binary using the priority search; result is cached |
|
|
266
397
|
| `clearCache()` | `void` | Clear the cached path to force a fresh search |
|
|
267
398
|
|
|
268
|
-
### Exported Interfaces
|
|
399
|
+
### Exported Interfaces & Types
|
|
269
400
|
|
|
270
|
-
| Interface | Description |
|
|
401
|
+
| Interface / Type | Description |
|
|
271
402
|
|---|---|
|
|
272
403
|
| `IRustBridgeOptions` | Full configuration for `RustBridge` |
|
|
273
404
|
| `IBinaryLocatorOptions` | Configuration for `RustBinaryLocator` |
|
|
@@ -275,8 +406,11 @@ fn main() {
|
|
|
275
406
|
| `IManagementRequest` | IPC request shape: `{ id, method, params }` |
|
|
276
407
|
| `IManagementResponse` | IPC response shape: `{ id, success, result?, error? }` |
|
|
277
408
|
| `IManagementEvent` | IPC event shape: `{ event, data }` |
|
|
409
|
+
| `IManagementStreamChunk` | IPC stream chunk shape: `{ id, stream: true, data }` |
|
|
278
410
|
| `ICommandDefinition` | Single command definition: `{ params, result }` |
|
|
279
411
|
| `TCommandMap` | `Record<string, ICommandDefinition>` |
|
|
412
|
+
| `TStreamingCommandKeys<T>` | Extracts keys from a command map that have a `chunk` field |
|
|
413
|
+
| `TExtractChunk<T>` | Extracts the chunk type from a streaming command definition |
|
|
280
414
|
|
|
281
415
|
## License and Legal Information
|
|
282
416
|
|
|
@@ -2,22 +2,46 @@
|
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* Mock "Rust binary" for testing the RustBridge IPC protocol.
|
|
5
|
-
* Reads JSON lines from stdin, writes JSON lines to stdout.
|
|
5
|
+
* Reads JSON lines from stdin via Buffer-based scanner, writes JSON lines to stdout.
|
|
6
6
|
* Emits a ready event on startup.
|
|
7
7
|
*/
|
|
8
8
|
|
|
9
|
-
import { createInterface } from 'readline';
|
|
10
|
-
|
|
11
9
|
// Emit ready event
|
|
12
10
|
const readyEvent = JSON.stringify({ event: 'ready', data: { version: '1.0.0' } });
|
|
13
11
|
process.stdout.write(readyEvent + '\n');
|
|
14
12
|
|
|
15
|
-
|
|
13
|
+
// Buffer-based newline scanner for stdin (mirrors the RustBridge approach)
|
|
14
|
+
let stdinBuffer = Buffer.alloc(0);
|
|
15
|
+
|
|
16
|
+
process.stdin.on('data', (chunk) => {
|
|
17
|
+
stdinBuffer = Buffer.concat([stdinBuffer, chunk]);
|
|
18
|
+
|
|
19
|
+
let newlineIndex;
|
|
20
|
+
while ((newlineIndex = stdinBuffer.indexOf(0x0A)) !== -1) {
|
|
21
|
+
const lineBuffer = stdinBuffer.subarray(0, newlineIndex);
|
|
22
|
+
stdinBuffer = stdinBuffer.subarray(newlineIndex + 1);
|
|
23
|
+
const line = lineBuffer.toString('utf8').trim();
|
|
24
|
+
if (line) {
|
|
25
|
+
handleLine(line);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
});
|
|
16
29
|
|
|
17
|
-
|
|
30
|
+
/**
|
|
31
|
+
* Backpressure-aware write to stdout.
|
|
32
|
+
*/
|
|
33
|
+
function writeResponse(data) {
|
|
34
|
+
const json = JSON.stringify(data) + '\n';
|
|
35
|
+
if (!process.stdout.write(json)) {
|
|
36
|
+
// Wait for drain before continuing
|
|
37
|
+
process.stdout.once('drain', () => {});
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
function handleLine(line) {
|
|
18
42
|
let request;
|
|
19
43
|
try {
|
|
20
|
-
request = JSON.parse(line
|
|
44
|
+
request = JSON.parse(line);
|
|
21
45
|
} catch {
|
|
22
46
|
return;
|
|
23
47
|
}
|
|
@@ -26,35 +50,53 @@ rl.on('line', (line) => {
|
|
|
26
50
|
|
|
27
51
|
if (method === 'echo') {
|
|
28
52
|
// Echo back the params as result
|
|
29
|
-
|
|
30
|
-
|
|
53
|
+
writeResponse({ id, success: true, result: params });
|
|
54
|
+
} else if (method === 'largeEcho') {
|
|
55
|
+
// Echo back params (same as echo, named distinctly for large payload tests)
|
|
56
|
+
writeResponse({ id, success: true, result: params });
|
|
31
57
|
} else if (method === 'error') {
|
|
32
58
|
// Return an error
|
|
33
|
-
|
|
34
|
-
process.stdout.write(response + '\n');
|
|
59
|
+
writeResponse({ id, success: false, error: 'Test error message' });
|
|
35
60
|
} else if (method === 'emitEvent') {
|
|
36
61
|
// Emit a custom event, then respond with success
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
const response = JSON.stringify({ id, success: true, result: null });
|
|
40
|
-
process.stdout.write(response + '\n');
|
|
62
|
+
writeResponse({ event: params.eventName, data: params.eventData });
|
|
63
|
+
writeResponse({ id, success: true, result: null });
|
|
41
64
|
} else if (method === 'slow') {
|
|
42
65
|
// Respond after a delay
|
|
43
66
|
setTimeout(() => {
|
|
44
|
-
|
|
45
|
-
process.stdout.write(response + '\n');
|
|
67
|
+
writeResponse({ id, success: true, result: { delayed: true } });
|
|
46
68
|
}, 100);
|
|
69
|
+
} else if (method === 'streamEcho') {
|
|
70
|
+
// Send params.count stream chunks, then final response
|
|
71
|
+
const count = params.count || 0;
|
|
72
|
+
let sent = 0;
|
|
73
|
+
const interval = setInterval(() => {
|
|
74
|
+
if (sent < count) {
|
|
75
|
+
writeResponse({ id, stream: true, data: { index: sent, value: `chunk_${sent}` } });
|
|
76
|
+
sent++;
|
|
77
|
+
} else {
|
|
78
|
+
clearInterval(interval);
|
|
79
|
+
writeResponse({ id, success: true, result: { totalChunks: count } });
|
|
80
|
+
}
|
|
81
|
+
}, 10);
|
|
82
|
+
} else if (method === 'streamError') {
|
|
83
|
+
// Send 1 chunk, then error
|
|
84
|
+
writeResponse({ id, stream: true, data: { index: 0, value: 'before_error' } });
|
|
85
|
+
setTimeout(() => {
|
|
86
|
+
writeResponse({ id, success: false, error: 'Stream error after chunk' });
|
|
87
|
+
}, 20);
|
|
88
|
+
} else if (method === 'streamEmpty') {
|
|
89
|
+
// Zero chunks, immediate final response
|
|
90
|
+
writeResponse({ id, success: true, result: { totalChunks: 0 } });
|
|
47
91
|
} else if (method === 'exit') {
|
|
48
92
|
// Graceful exit
|
|
49
|
-
|
|
50
|
-
process.stdout.write(response + '\n');
|
|
93
|
+
writeResponse({ id, success: true, result: null });
|
|
51
94
|
process.exit(0);
|
|
52
95
|
} else {
|
|
53
96
|
// Unknown command
|
|
54
|
-
|
|
55
|
-
process.stdout.write(response + '\n');
|
|
97
|
+
writeResponse({ id, success: false, error: `Unknown method: ${method}` });
|
|
56
98
|
}
|
|
57
|
-
}
|
|
99
|
+
}
|
|
58
100
|
|
|
59
101
|
// Handle SIGTERM gracefully
|
|
60
102
|
process.on('SIGTERM', () => {
|