@soederpop/luca 0.0.26 → 0.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  // Auto-generated bootstrap content
2
- // Generated at: 2026-03-22T20:57:24.866Z
2
+ // Generated at: 2026-03-23T07:45:58.711Z
3
3
  // Source: docs/bootstrap/*.md, docs/bootstrap/templates/*, docs/examples/*.md, docs/tutorials/*.md
4
4
  //
5
5
  // Do not edit manually. Run: luca build-bootstrap
@@ -1167,6 +1167,91 @@ console.log('Running after killAll:', remaining.length)
1167
1167
  ## Summary
1168
1168
 
1169
1169
  This demo covered the \`processManager\` feature: spawning processes that return handles immediately, tracking them by ID or tag, listing all tracked processes, and killing them individually or all at once. It is the right tool for orchestrating background services, dev servers, and any scenario where you need non-blocking process management with lifecycle events.
1170
+ `,
1171
+ "assistant-with-process-manager.md": `---
1172
+ title: "Assistant with ProcessManager Tools"
1173
+ tags: [assistant, processManager, tools, runtime, use]
1174
+ lastTested: null
1175
+ lastTestPassed: null
1176
+ ---
1177
+
1178
+ # Assistant with ProcessManager Tools
1179
+
1180
+ Create an assistant at runtime, give it processManager tools, and watch it orchestrate long-running processes — spawning ping and top, checking their output over time, running a quick command in between, then coming back to report.
1181
+
1182
+ ## The Demo
1183
+
1184
+ \`\`\`ts
1185
+ const pm = container.feature('processManager', { enable: true, autoCleanup: true })
1186
+ const ui = container.feature('ui')
1187
+
1188
+ const assistant = container.feature('assistant', {
1189
+ systemPrompt: [
1190
+ 'You are a process management assistant with tools to spawn, monitor, inspect, and kill background processes.',
1191
+ 'When asked to check on processes, use getProcessOutput to read their latest output and summarize what you see.',
1192
+ 'For ping output, parse the lines and calculate the average response time yourself.',
1193
+ 'For top output, summarize CPU and memory usage from the header lines.',
1194
+ 'Always be concise — give the data, not a lecture.',
1195
+ ].join('\\n'),
1196
+ model: 'gpt-4.1-mini',
1197
+ })
1198
+
1199
+ assistant.use(pm)
1200
+ await assistant.start()
1201
+
1202
+ const tools = Object.keys(assistant.tools)
1203
+ console.log(ui.colors.cyan('Tools registered:'), tools.join(', '))
1204
+ console.log()
1205
+
1206
+ // ── Helper to print assistant responses ──────────────────────────────
1207
+ const ask = async (label, question) => {
1208
+ console.log(ui.colors.dim(\`── \${label} ──\`))
1209
+ console.log(ui.colors.yellow('→'), question.split('\\n')[0])
1210
+ const response = await assistant.ask(question)
1211
+ console.log(ui.markdown(response))
1212
+ console.log()
1213
+ return response
1214
+ }
1215
+
1216
+ // Step 1: Spawn long-running processes
1217
+ await ask('SPAWN',
1218
+ 'Spawn two background processes:\\n' +
1219
+ '1. Ping google.com with tag "ping-google" (use: ping -c 20 google.com)\\n' +
1220
+ '2. Run top in batch mode with tag "top-monitor" (use: top -l 5 -s 2)\\n' +
1221
+ 'Confirm both are running.'
1222
+ )
1223
+
1224
+ // Step 2: Wait, then check in on their output
1225
+ await new Promise(r => setTimeout(r, 4000))
1226
+ await ask('CHECK-IN #1',
1227
+ 'Check on both processes. For ping-google, read the stdout and tell me how many replies so far and the average response time. For top-monitor, read the stdout and tell me the current CPU usage summary.'
1228
+ )
1229
+
1230
+ // Step 3: Quick one-shot command while the others keep going
1231
+ await ask('QUICK COMMAND',
1232
+ 'Run a quick command: "uptime" — tell me the system load averages.'
1233
+ )
1234
+
1235
+ // Step 4: Second check-in — more data should have accumulated
1236
+ await new Promise(r => setTimeout(r, 4000))
1237
+ await ask('CHECK-IN #2',
1238
+ 'Check on ping-google again. How many replies now vs last time? What is the average response time? Also list all tracked processes and their status.'
1239
+ )
1240
+
1241
+ // Step 5: Kill everything
1242
+ await ask('CLEANUP',
1243
+ 'Kill all running processes and confirm they are stopped.'
1244
+ )
1245
+
1246
+ // Belt and suspenders
1247
+ pm.killAll()
1248
+ const remaining = pm.list().filter(h => h.status === 'running')
1249
+ console.log(ui.colors.green('Running after cleanup:'), remaining.length)
1250
+ \`\`\`
1251
+
1252
+ ## Summary
1253
+
1254
+ This example showed a runtime assistant orchestrating real background processes over multiple conversation turns — spawning long-running \`ping\` and \`top\` commands, checking in on their output as it accumulates, running a quick \`uptime\` in between, then coming back for a second check-in before cleaning everything up. The assistant parsed ping times, summarized CPU usage, and managed the full lifecycle without any hardcoded logic — just natural language and processManager tools.
1170
1255
  `,
1171
1256
  "postgres.md": `---
1172
1257
  title: "PostgreSQL"
@@ -2054,6 +2139,135 @@ Attach network volumes to pods via the \`networkVolumeId\` option in \`createPod
2054
2139
  ## Summary
2055
2140
 
2056
2141
  The \`runpod\` feature provides complete GPU cloud management. Create pods from templates, manage lifecycle (start/stop/remove), SSH into running pods, and manage network storage volumes. Supports polling for readiness and file transfer operations. Key methods: \`createPod()\`, \`getpods()\`, \`waitForPod()\`, \`getShell()\`, \`listVolumes()\`, \`createVolume()\`.
2142
+ `,
2143
+ "websocket-ask-and-reply-example.md": `---
2144
+ title: "websocket-ask-and-reply"
2145
+ tags: [websocket, client, server, ask, reply, rpc]
2146
+ lastTested: null
2147
+ lastTestPassed: null
2148
+ ---
2149
+
2150
+ # websocket-ask-and-reply
2151
+
2152
+ Request/response conversations over WebSocket using \`ask()\` and \`reply()\`.
2153
+
2154
+ ## Overview
2155
+
2156
+ The WebSocket client and server both support a request/response protocol on top of the normal fire-and-forget message stream. The client can \`ask()\` the server a question and await the answer. The server can \`ask()\` a connected client the same way. Under the hood it works with correlation IDs — \`requestId\` on the request, \`replyTo\` on the response — but you never have to touch those directly.
2157
+
2158
+ ## Setup
2159
+
2160
+ Declare the shared references that all blocks will use, and wire up the server's message handler. This block is synchronous so the variables persist across subsequent blocks.
2161
+
2162
+ \`\`\`ts
2163
+ var port = 0
2164
+ var server = container.server('websocket', { json: true })
2165
+ var client = null
2166
+
2167
+ server.on('message', (data, ws) => {
2168
+ if (data.type === 'add') {
2169
+ data.reply({ sum: data.data.a + data.data.b })
2170
+ } else if (data.type === 'divide') {
2171
+ if (data.data.b === 0) {
2172
+ data.replyError('division by zero')
2173
+ } else {
2174
+ data.reply({ result: data.data.a / data.data.b })
2175
+ }
2176
+ }
2177
+ })
2178
+ console.log('Server and handlers configured')
2179
+ \`\`\`
2180
+
2181
+ ## Start Server and Connect Client
2182
+
2183
+ \`\`\`ts
2184
+ port = await networking.findOpenPort(19900)
2185
+ await server.start({ port })
2186
+ console.log('Server listening on port', port)
2187
+
2188
+ client = container.client('websocket', { baseURL: \`ws://localhost:\${port}\` })
2189
+ await client.connect()
2190
+ console.log('Client connected')
2191
+ \`\`\`
2192
+
2193
+ ## Client Asks the Server
2194
+
2195
+ \`ask(type, data, timeout?)\` sends a message and returns a promise that resolves with the response payload.
2196
+
2197
+ \`\`\`ts
2198
+ var sum = await client.ask('add', { a: 3, b: 4 })
2199
+ console.log('3 + 4 =', sum.sum)
2200
+
2201
+ var quotient = await client.ask('divide', { a: 10, b: 3 })
2202
+ console.log('10 / 3 =', quotient.result.toFixed(2))
2203
+ \`\`\`
2204
+
2205
+ ## Handling Errors
2206
+
2207
+ When the server calls \`replyError(message)\`, the client's \`ask()\` promise rejects with that message.
2208
+
2209
+ \`\`\`ts
2210
+ try {
2211
+ await client.ask('divide', { a: 1, b: 0 })
2212
+ } catch (err) {
2213
+ console.log('Caught error:', err.message)
2214
+ }
2215
+ \`\`\`
2216
+
2217
+ ## Server Asks the Client
2218
+
2219
+ The server can also ask a connected client. The client handles incoming requests by listening for messages with a \`requestId\` and sending back a \`replyTo\` response.
2220
+
2221
+ \`\`\`ts
2222
+ client.on('message', (data) => {
2223
+ if (data.requestId && data.type === 'whoAreYou') {
2224
+ client.send({ replyTo: data.requestId, data: { name: 'luca-client', version: '1.0' } })
2225
+ }
2226
+ })
2227
+
2228
+ var firstClient = [...server.connections][0]
2229
+ var identity = await server.ask(firstClient, 'whoAreYou')
2230
+ console.log('Client identified as:', identity.name, identity.version)
2231
+ \`\`\`
2232
+
2233
+ ## Timeouts
2234
+
2235
+ If nobody replies, \`ask()\` rejects after the timeout (default 10s, configurable as the third argument).
2236
+
2237
+ \`\`\`ts
2238
+ try {
2239
+ await client.ask('noop', {}, 500)
2240
+ } catch (err) {
2241
+ console.log('Timed out as expected:', err.message)
2242
+ }
2243
+ \`\`\`
2244
+
2245
+ ## Regular Messages Still Work
2246
+
2247
+ Messages without \`requestId\` flow through the normal \`message\` event as always. The ask/reply protocol is purely additive.
2248
+
2249
+ \`\`\`ts
2250
+ var received = null
2251
+ server.on('message', (data) => {
2252
+ if (data.type === 'ping') received = data
2253
+ })
2254
+
2255
+ await client.send({ type: 'ping', ts: Date.now() })
2256
+ await new Promise(r => setTimeout(r, 50))
2257
+ console.log('Regular message received:', received.type, '— no requestId:', received.requestId === undefined)
2258
+ \`\`\`
2259
+
2260
+ ## Cleanup
2261
+
2262
+ \`\`\`ts
2263
+ await client.disconnect()
2264
+ await server.stop()
2265
+ console.log('Done')
2266
+ \`\`\`
2267
+
2268
+ ## Summary
2269
+
2270
+ The ask/reply protocol gives you awaitable request/response over WebSocket without leaving the Luca helper API. The client calls \`ask(type, data)\` and gets back a promise. The server's message handler gets \`reply()\` and \`replyError()\` injected on any message that carries a \`requestId\`. The server can also \`ask()\` a specific client. Timeouts, error propagation, and cleanup of pending requests on disconnect are all handled automatically.
2057
2271
  `,
2058
2272
  "port-exposer.md": `---
2059
2273
  title: "Port Exposer"
@@ -1,4 +1,4 @@
1
1
  // Generated at compile time — do not edit manually
2
- export const BUILD_SHA = '4f7677c'
2
+ export const BUILD_SHA = 'c1e466d'
3
3
  export const BUILD_BRANCH = 'main'
4
- export const BUILD_DATE = '2026-03-22T20:57:24Z'
4
+ export const BUILD_DATE = '2026-03-23T07:45:58Z'
@@ -19,6 +19,12 @@ declare module '../client' {
19
19
  * providing a clean interface for sending/receiving messages, tracking connection
20
20
  * state, and optional auto-reconnection with exponential backoff.
21
21
  *
22
+ * Supports ask/reply semantics when paired with the Luca WebSocket server.
23
+ * The client can `ask(type, data)` the server and await a typed response.
24
+ * Incoming messages with a `requestId` are treated as asks from the server
25
+ * and can be answered with `send({ replyTo: requestId, data })`. Requests
26
+ * time out if no reply arrives within the configurable window.
27
+ *
22
28
  * Events emitted:
23
29
  * - `open` — connection established
24
30
  * - `message` — message received (JSON-parsed when possible)
@@ -36,14 +42,24 @@ declare module '../client' {
36
42
  * ws.on('message', (data) => console.log('Received:', data))
37
43
  * await ws.connect()
38
44
  * await ws.send({ type: 'hello' })
45
+ *
46
+ * // ask/reply: request data from the server
47
+ * const result = await ws.ask('getUser', { id: 42 })
39
48
  * ```
40
49
  */
50
+ export interface PendingRequest<T = any> {
51
+ resolve: (value: T) => void
52
+ reject: (reason: any) => void
53
+ timer: ReturnType<typeof setTimeout>
54
+ }
55
+
41
56
  export class WebSocketClient<
42
57
  T extends WebSocketClientState = WebSocketClientState,
43
58
  K extends WebSocketClientOptions = WebSocketClientOptions
44
59
  > extends Client<T, K> {
45
60
  ws!: WebSocket
46
61
  _intentionalClose: boolean
62
+ _pending = new Map<string, PendingRequest>()
47
63
 
48
64
  static override shortcut = "clients.websocket" as const
49
65
  static override stateSchema = WebSocketClientStateSchema
@@ -97,7 +113,9 @@ export class WebSocketClient<
97
113
  try {
98
114
  data = JSON.parse(data)
99
115
  } catch {}
100
- this.emit('message', data)
116
+ if (!this._handleReply(data)) {
117
+ this.emit('message', data)
118
+ }
101
119
  }
102
120
 
103
121
  ws.onclose = (event: any) => {
@@ -130,12 +148,69 @@ export class WebSocketClient<
130
148
  this.ws.send(JSON.stringify(data))
131
149
  }
132
150
 
151
+ /**
152
+ * Send a request and wait for a correlated response. The message is sent
153
+ * with a unique `requestId`; the remote side is expected to reply with a
154
+ * message containing `replyTo` set to that same ID.
155
+ *
156
+ * @param type - A string identifying the request type
157
+ * @param data - Optional payload to include with the request
158
+ * @param timeout - How long to wait for a response (default 10 000 ms)
159
+ * @returns The `data` field of the response message
160
+ *
161
+ * @example
162
+ * ```typescript
163
+ * const result = await ws.ask('getUser', { id: 42 })
164
+ * ```
165
+ */
166
+ async ask<R = any>(type: string, data?: any, timeout = 10000): Promise<R> {
167
+ const requestId = this.container.utils.uuid()
168
+
169
+ return new Promise<R>((resolve, reject) => {
170
+ const timer = setTimeout(() => {
171
+ this._pending.delete(requestId)
172
+ reject(new Error(`ask("${type}") timed out after ${timeout}ms`))
173
+ }, timeout)
174
+
175
+ this._pending.set(requestId, { resolve, reject, timer })
176
+ this.send({ type, data, requestId })
177
+ })
178
+ }
179
+
180
+ /** @internal Resolve a pending ask() if the incoming message has a replyTo field. Returns true if handled. */
181
+ _handleReply(message: any): boolean {
182
+ if (!message || !message.replyTo) return false
183
+
184
+ const pending = this._pending.get(message.replyTo)
185
+ if (!pending) return false
186
+
187
+ this._pending.delete(message.replyTo)
188
+ clearTimeout(pending.timer)
189
+
190
+ if (message.error) {
191
+ pending.reject(new Error(message.error))
192
+ } else {
193
+ pending.resolve(message.data)
194
+ }
195
+ return true
196
+ }
197
+
198
+ /** @internal Reject all pending ask() calls — used on disconnect. */
199
+ _rejectAllPending(reason: string) {
200
+ for (const [id, pending] of this._pending) {
201
+ clearTimeout(pending.timer)
202
+ pending.reject(new Error(reason))
203
+ }
204
+ this._pending.clear()
205
+ }
206
+
133
207
  /**
134
208
  * Gracefully close the WebSocket connection. Suppresses auto-reconnect
135
209
  * and updates connection state to disconnected.
136
210
  */
137
211
  async disconnect(): Promise<this> {
138
212
  this._intentionalClose = true
213
+ this._rejectAllPending('WebSocket disconnected')
139
214
  if (this.ws) {
140
215
  this.ws.close()
141
216
  }
package/src/helper.ts CHANGED
@@ -138,9 +138,16 @@ export abstract class Helper<T extends HelperState = HelperState, K extends Help
138
138
  this.container.emit('helperInitialized', this)
139
139
  }
140
140
 
141
- /**
141
+ /**
142
+ * The static shortcut identifier for this helper type, e.g. "features.assistant".
143
+ */
144
+ get shortcut(): string {
145
+ return (this.constructor as any).shortcut || ''
146
+ }
147
+
148
+ /**
142
149
  * Every helper has a cache key which is computed at the time it is created through the container.
143
- *
150
+ *
144
151
  * This ensures only a single instance of the helper exists for the requested options.
145
152
  */
146
153
  get cacheKey() {
@@ -189,6 +196,26 @@ export abstract class Helper<T extends HelperState = HelperState, K extends Help
189
196
  return this
190
197
  }
191
198
 
199
+ /**
200
+ * Called when another helper (e.g. an assistant) consumes this helper's
201
+ * tools via `use()`. Override this to detect the consumer type and react —
202
+ * for example, adding system prompt extensions to an assistant.
203
+ *
204
+ * Use `consumer.shortcut` to identify the consumer type:
205
+ * ```typescript
206
+ * override setupToolsConsumer(consumer: Helper) {
207
+ * if (consumer.shortcut === 'features.assistant') {
208
+ * (consumer as any).addSystemPromptExtension('myFeature', 'usage hints here')
209
+ * }
210
+ * }
211
+ * ```
212
+ *
213
+ * The default implementation is a no-op.
214
+ *
215
+ * @param consumer - The helper instance that is consuming this helper's tools
216
+ */
217
+ setupToolsConsumer(consumer: Helper): void {}
218
+
192
219
  /**
193
220
  * Collect all tools from the inheritance chain and instance, returning
194
221
  * { schemas, handlers } with matching keys. Walks the prototype chain