@soederpop/luca 0.0.26 → 0.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,84 @@
1
+ ---
2
+ title: "Assistant with ProcessManager Tools"
3
+ tags: [assistant, processManager, tools, runtime, use]
4
+ lastTested: null
5
+ lastTestPassed: null
6
+ ---
7
+
8
+ # Assistant with ProcessManager Tools
9
+
10
+ Create an assistant at runtime, give it processManager tools, and watch it orchestrate long-running processes — spawning ping and top, checking their output over time, running a quick command in between, then coming back to report.
11
+
12
+ ## The Demo
13
+
14
+ ```ts
15
+ const pm = container.feature('processManager', { enable: true, autoCleanup: true })
16
+ const ui = container.feature('ui')
17
+
18
+ const assistant = container.feature('assistant', {
19
+ systemPrompt: [
20
+ 'You are a process management assistant with tools to spawn, monitor, inspect, and kill background processes.',
21
+ 'When asked to check on processes, use getProcessOutput to read their latest output and summarize what you see.',
22
+ 'For ping output, parse the lines and calculate the average response time yourself.',
23
+ 'For top output, summarize CPU and memory usage from the header lines.',
24
+ 'Always be concise — give the data, not a lecture.',
25
+ ].join('\n'),
26
+ model: 'gpt-4.1-mini',
27
+ })
28
+
29
+ assistant.use(pm)
30
+ await assistant.start()
31
+
32
+ const tools = Object.keys(assistant.tools)
33
+ console.log(ui.colors.cyan('Tools registered:'), tools.join(', '))
34
+ console.log()
35
+
36
+ // ── Helper to print assistant responses ──────────────────────────────
37
+ const ask = async (label, question) => {
38
+ console.log(ui.colors.dim(`── ${label} ──`))
39
+ console.log(ui.colors.yellow('→'), question.split('\n')[0])
40
+ const response = await assistant.ask(question)
41
+ console.log(ui.markdown(response))
42
+ console.log()
43
+ return response
44
+ }
45
+
46
+ // Step 1: Spawn long-running processes
47
+ await ask('SPAWN',
48
+ 'Spawn two background processes:\n' +
49
+ '1. Ping google.com with tag "ping-google" (use: ping -c 20 google.com)\n' +
50
+ '2. Run top in batch mode with tag "top-monitor" (use: top -l 5 -s 2)\n' +
51
+ 'Confirm both are running.'
52
+ )
53
+
54
+ // Step 2: Wait, then check in on their output
55
+ await new Promise(r => setTimeout(r, 4000))
56
+ await ask('CHECK-IN #1',
57
+ 'Check on both processes. For ping-google, read the stdout and tell me how many replies so far and the average response time. For top-monitor, read the stdout and tell me the current CPU usage summary.'
58
+ )
59
+
60
+ // Step 3: Quick one-shot command while the others keep going
61
+ await ask('QUICK COMMAND',
62
+ 'Run a quick command: "uptime" — tell me the system load averages.'
63
+ )
64
+
65
+ // Step 4: Second check-in — more data should have accumulated
66
+ await new Promise(r => setTimeout(r, 4000))
67
+ await ask('CHECK-IN #2',
68
+ 'Check on ping-google again. How many replies now vs last time? What is the average response time? Also list all tracked processes and their status.'
69
+ )
70
+
71
+ // Step 5: Kill everything
72
+ await ask('CLEANUP',
73
+ 'Kill all running processes and confirm they are stopped.'
74
+ )
75
+
76
+ // Belt and suspenders
77
+ pm.killAll()
78
+ const remaining = pm.list().filter(h => h.status === 'running')
79
+ console.log(ui.colors.green('Running after cleanup:'), remaining.length)
80
+ ```
81
+
82
+ ## Summary
83
+
84
+ This example showed a runtime assistant orchestrating real background processes over multiple conversation turns — spawning long-running `ping` and `top` commands, checking in on their output as it accumulates, running a quick `uptime` in between, then coming back for a second check-in before cleaning everything up. The assistant parsed ping times, summarized CPU usage, and managed the full lifecycle without any hardcoded logic — just natural language and processManager tools.
@@ -0,0 +1,128 @@
1
+ ---
2
+ title: "websocket-ask-and-reply"
3
+ tags: [websocket, client, server, ask, reply, rpc]
4
+ lastTested: null
5
+ lastTestPassed: null
6
+ ---
7
+
8
+ # websocket-ask-and-reply
9
+
10
+ Request/response conversations over WebSocket using `ask()` and `reply()`.
11
+
12
+ ## Overview
13
+
14
+ The WebSocket client and server both support a request/response protocol on top of the normal fire-and-forget message stream. The client can `ask()` the server a question and await the answer. The server can `ask()` a connected client the same way. Under the hood it works with correlation IDs — `requestId` on the request, `replyTo` on the response — but you never have to touch those directly.
15
+
16
+ ## Setup
17
+
18
+ Declare the shared references that all blocks will use, and wire up the server's message handler. This block is synchronous so the variables persist across subsequent blocks.
19
+
20
+ ```ts
21
+ var port = 0
22
+ var server = container.server('websocket', { json: true })
23
+ var client = null
24
+
25
+ server.on('message', (data, ws) => {
26
+ if (data.type === 'add') {
27
+ data.reply({ sum: data.data.a + data.data.b })
28
+ } else if (data.type === 'divide') {
29
+ if (data.data.b === 0) {
30
+ data.replyError('division by zero')
31
+ } else {
32
+ data.reply({ result: data.data.a / data.data.b })
33
+ }
34
+ }
35
+ })
36
+ console.log('Server and handlers configured')
37
+ ```
38
+
39
+ ## Start Server and Connect Client
40
+
41
+ ```ts
42
+ port = await networking.findOpenPort(19900)
43
+ await server.start({ port })
44
+ console.log('Server listening on port', port)
45
+
46
+ client = container.client('websocket', { baseURL: `ws://localhost:${port}` })
47
+ await client.connect()
48
+ console.log('Client connected')
49
+ ```
50
+
51
+ ## Client Asks the Server
52
+
53
+ `ask(type, data, timeout?)` sends a message and returns a promise that resolves with the response payload.
54
+
55
+ ```ts
56
+ var sum = await client.ask('add', { a: 3, b: 4 })
57
+ console.log('3 + 4 =', sum.sum)
58
+
59
+ var quotient = await client.ask('divide', { a: 10, b: 3 })
60
+ console.log('10 / 3 =', quotient.result.toFixed(2))
61
+ ```
62
+
63
+ ## Handling Errors
64
+
65
+ When the server calls `replyError(message)`, the client's `ask()` promise rejects with that message.
66
+
67
+ ```ts
68
+ try {
69
+ await client.ask('divide', { a: 1, b: 0 })
70
+ } catch (err) {
71
+ console.log('Caught error:', err.message)
72
+ }
73
+ ```
74
+
75
+ ## Server Asks the Client
76
+
77
+ The server can also ask a connected client. The client handles incoming requests by listening for messages with a `requestId` and sending back a `replyTo` response.
78
+
79
+ ```ts
80
+ client.on('message', (data) => {
81
+ if (data.requestId && data.type === 'whoAreYou') {
82
+ client.send({ replyTo: data.requestId, data: { name: 'luca-client', version: '1.0' } })
83
+ }
84
+ })
85
+
86
+ var firstClient = [...server.connections][0]
87
+ var identity = await server.ask(firstClient, 'whoAreYou')
88
+ console.log('Client identified as:', identity.name, identity.version)
89
+ ```
90
+
91
+ ## Timeouts
92
+
93
+ If nobody replies, `ask()` rejects after the timeout (default 10s, configurable as the third argument).
94
+
95
+ ```ts
96
+ try {
97
+ await client.ask('noop', {}, 500)
98
+ } catch (err) {
99
+ console.log('Timed out as expected:', err.message)
100
+ }
101
+ ```
102
+
103
+ ## Regular Messages Still Work
104
+
105
+ Messages without `requestId` flow through the normal `message` event as always. The ask/reply protocol is purely additive.
106
+
107
+ ```ts
108
+ var received = null
109
+ server.on('message', (data) => {
110
+ if (data.type === 'ping') received = data
111
+ })
112
+
113
+ await client.send({ type: 'ping', ts: Date.now() })
114
+ await new Promise(r => setTimeout(r, 50))
115
+ console.log('Regular message received:', received.type, '— no requestId:', received.requestId === undefined)
116
+ ```
117
+
118
+ ## Cleanup
119
+
120
+ ```ts
121
+ await client.disconnect()
122
+ await server.stop()
123
+ console.log('Done')
124
+ ```
125
+
126
+ ## Summary
127
+
128
+ The ask/reply protocol gives you awaitable request/response over WebSocket without leaving the Luca helper API. The client calls `ask(type, data)` and gets back a promise. The server's message handler gets `reply()` and `replyError()` injected on any message that carries a `requestId`. The server can also `ask()` a specific client. Timeouts, error propagation, and cleanup of pending requests on disconnect are all handled automatically.
@@ -0,0 +1,249 @@
1
+ # Window Manager Fix
2
+
3
+ ## Problem
4
+
5
+ The current `windowManager` design allows any Luca process to call `listen()` on the same well-known Unix socket:
6
+
7
+ - `~/Library/Application Support/LucaVoiceLauncher/ipc-window.sock`
8
+
9
+ That means unrelated commands can compete for ownership of the app-facing socket. The current implementation makes this worse by doing the following on startup:
10
+
11
+ 1. If the socket path exists, `unlinkSync(socketPath)`.
12
+ 2. Bind a new server at the same path.
13
+
14
+ This creates a race where one Luca process can steal the socket from another. The native `LucaVoiceLauncher` app then disconnects from the old server and reconnects to whichever process currently owns the path. If that process exits, the app falls into reconnect loops.
15
+
16
+ This is the root cause of the observed behavior where:
17
+
18
+ - the launcher sometimes connects successfully
19
+ - the connection then drops unexpectedly
20
+ - repeated `ipc connect failed` messages appear in the launcher log
21
+
22
+ ## Design Goal
23
+
24
+ We want:
25
+
26
+ - one stable owner of the app-facing socket
27
+ - many independent Luca commands able to trigger window actions
28
+ - optional failover if the main owner dies
29
+ - support for multiple launcher app clients over time, and optionally at once
30
+
31
+ The key design rule is:
32
+
33
+ > Many clients is fine. Many servers competing for the same well-known socket is not.
34
+
35
+ ## Recommended Architecture
36
+
37
+ ### 1. Single broker for the app socket
38
+
39
+ Only one broker process may own:
40
+
41
+ - `ipc-window.sock`
42
+
43
+ The broker is responsible for:
44
+
45
+ - accepting native launcher app connections
46
+ - tracking connected app clients
47
+ - routing window commands to the selected app client
48
+ - receiving `windowAck`, `windowClosed`, and `terminalExited`
49
+ - routing responses and lifecycle events back to the original requester
50
+
51
+ ### 2. Separate control channel for Luca commands
52
+
53
+ Luca commands should not bind the app-facing socket directly.
54
+
55
+ Instead, they should talk to the broker over a separate channel, for example:
56
+
57
+ - `~/Library/Application Support/LucaVoiceLauncher/ipc-window-control.sock`
58
+
59
+ This control channel is for producers:
60
+
61
+ - `luca main`
62
+ - `luca workflow run ...`
63
+ - `luca present`
64
+ - scripts
65
+ - background jobs
66
+
67
+ These producers send requests to the broker, and the broker fans them out to the connected app client.
68
+
69
+ ### 3. Broker supports multiple app clients
70
+
71
+ The broker should replace the current single `_client` field with a registry:
72
+
73
+ ```ts
74
+ Map<string, ClientConnection>
75
+ ```
76
+
77
+ Each client should have:
78
+
79
+ - `clientId`
80
+ - `socket`
81
+ - `buffer`
82
+ - metadata if useful later, such as display, role, labels, or lastSeenAt
83
+
84
+ This allows:
85
+
86
+ - multiple launcher app instances
87
+ - reconnect without confusing request ownership
88
+ - future routing by target client
89
+
90
+ ## Routing Model
91
+
92
+ ### Producer -> broker
93
+
94
+ Producer sends a request like:
95
+
96
+ ```json
97
+ {
98
+ "type": "windowRequest",
99
+ "requestId": "uuid",
100
+ "originId": "uuid",
101
+ "targetClientId": "optional",
102
+ "window": {
103
+ "action": "open",
104
+ "url": "https://example.com"
105
+ }
106
+ }
107
+ ```
108
+
109
+ ### Broker -> app client
110
+
111
+ Broker forwards the request to the chosen app client, preserving `requestId`.
112
+
113
+ ### App client -> broker
114
+
115
+ App replies with:
116
+
117
+ - `windowAck`
118
+ - `windowClosed`
119
+ - `terminalExited`
120
+
121
+ ### Broker -> producer
122
+
123
+ Broker routes:
124
+
125
+ - the `windowAck` back to the producer that originated the request
126
+ - lifecycle events either to the originating producer, or to any subscribed producer
127
+
128
+ ## Client Selection Policy
129
+
130
+ The simplest policy is:
131
+
132
+ - use the most recently connected healthy app client
133
+
134
+ Later policies can support:
135
+
136
+ - explicit `targetClientId`
137
+ - labels like `role=presenter`
138
+ - display-aware routing
139
+ - sticky routing based on `windowId -> clientId`
140
+
141
+ ## Leader Election / Failover
142
+
143
+ If we want multiple `windowManager` instances to exist, they must not all behave as brokers.
144
+
145
+ Instead:
146
+
147
+ 1. Try connecting to the broker control socket.
148
+ 2. If broker exists, act as a producer client.
149
+ 3. If broker does not exist, try to acquire a broker lock.
150
+ 4. If lock succeeds, become broker and bind both sockets.
151
+ 5. If lock fails, retry broker connection and act as producer.
152
+
153
+ Possible lock mechanisms:
154
+
155
+ - lock file with `flock`
156
+ - lock directory with atomic `mkdir`
157
+ - local TCP/Unix registration endpoint
158
+
159
+ The important constraint is:
160
+
161
+ - only the elected broker binds `ipc-window.sock`
162
+
163
+ All other instances must route through it.
164
+
165
+ ## Why not let many processes bind the same socket?
166
+
167
+ Because Unix domain socket paths are singular ownership points. A path is not a shared bus.
168
+
169
+ If multiple processes all call `listen()` against the same path and delete stale files optimistically, they will:
170
+
171
+ - steal the path from each other
172
+ - disconnect the app unexpectedly
173
+ - lose in-flight requests
174
+ - create non-deterministic routing
175
+
176
+ This is fundamentally the wrong abstraction.
177
+
178
+ ## Backward-Compatible Migration
179
+
180
+ We can migrate without breaking the public `windowManager.spawn()` API.
181
+
182
+ ### Phase 1
183
+
184
+ - Introduce a broker mode internally.
185
+ - Add `ipc-window-control.sock`.
186
+ - Keep the existing app protocol unchanged.
187
+ - Make `windowManager.spawn()` talk to the broker when possible.
188
+
189
+ ### Phase 2
190
+
191
+ - Prevent non-broker processes from binding `ipc-window.sock`.
192
+ - Replace blind `unlinkSync(socketPath)` with active listener detection.
193
+ - Add broker election and failover.
194
+
195
+ ### Phase 3
196
+
197
+ - Add multi-client routing.
198
+ - Add subscriptions for lifecycle events.
199
+ - Add explicit target selection if needed.
200
+
201
+ ## Minimal Fix if We Need Something Fast
202
+
203
+ If we do not implement the full broker immediately, we should at least stop destroying active listeners.
204
+
205
+ `listen()` should:
206
+
207
+ 1. Attempt to connect to the existing socket.
208
+ 2. If a listener is alive, do not unlink or rebind.
209
+ 3. If the socket is dead, clean it up and bind.
210
+
211
+ This does not solve multi-producer routing, but it prevents random Luca commands from stealing the app socket from a healthy broker.
212
+
213
+ ## Proposed Internal Refactor
214
+
215
+ Current state:
216
+
217
+ - one process tries to be both broker and producer
218
+ - one `_client`
219
+ - one app-facing socket
220
+
221
+ Target state:
222
+
223
+ - broker owns app-facing socket
224
+ - producers use control socket
225
+ - broker stores:
226
+ - `clients: Map<clientId, ClientConnection>`
227
+ - `pendingRequests: Map<requestId, PendingRequest>`
228
+ - `requestOrigins: Map<requestId, originConnection>`
229
+ - `windowOwners: Map<windowId, clientId>`
230
+
231
+ That separation gives us:
232
+
233
+ - stable app connectivity
234
+ - multi-command triggering
235
+ - failover
236
+ - room for multi-client routing
237
+
238
+ ## Summary
239
+
240
+ The right fix is not “allow many `listen()` calls on the same socket.”
241
+
242
+ The right fix is:
243
+
244
+ - one elected broker owns the app socket
245
+ - many Luca processes talk to the broker
246
+ - many app clients may connect to the broker
247
+ - failover is implemented through broker election, not socket contention
248
+
249
+ That preserves a stable connection for the launcher app while still allowing multiple people, commands, or workflows to trigger window operations.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@soederpop/luca",
3
- "version": "0.0.26",
3
+ "version": "0.0.28",
4
4
  "website": "https://luca.soederpop.com",
5
5
  "description": "lightweight universal conversational architecture AKA Le Ultimate Component Architecture AKA Last Universal Common Ancestor, part AI part Human",
6
6
  "author": "jon soeder aka the people's champ <jon@soederpop.com>",
@@ -28,6 +28,7 @@ export const AssistantEventsSchema = FeatureEventsSchema.extend({
28
28
  toolResult: z.tuple([z.string().describe('Tool name'), z.any().describe('Result value')]).describe('Emitted when a tool returns a result'),
29
29
  toolError: z.tuple([z.string().describe('Tool name'), z.any().describe('Error')]).describe('Emitted when a tool call fails'),
30
30
  hookFired: z.tuple([z.string().describe('Hook/event name')]).describe('Emitted when a hook function is called'),
31
+ systemPromptExtensionsChanged: z.tuple([]).describe('Emitted when system prompt extensions are added or removed'),
31
32
  })
32
33
 
33
34
  export const AssistantStateSchema = FeatureStateSchema.extend({
@@ -39,6 +40,7 @@ export const AssistantStateSchema = FeatureStateSchema.extend({
39
40
  conversationId: z.string().optional().describe('The active conversation persistence ID'),
40
41
  threadId: z.string().optional().describe('The active thread ID'),
41
42
  systemPrompt: z.string().describe('The loaded system prompt text'),
43
+ systemPromptExtensions: z.record(z.string(), z.string()).describe('Named extensions appended to the system prompt'),
42
44
  meta: z.record(z.string(), z.any()).describe('Parsed YAML frontmatter from CORE.md'),
43
45
  tools: z.record(z.string(), z.any()).describe('Registered tool implementations'),
44
46
  hooks: z.record(z.string(), z.any()).describe('Loaded event hook functions'),
@@ -120,6 +122,7 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
120
122
  lastResponse: '',
121
123
  folder: this.resolvedFolder,
122
124
  systemPrompt: '',
125
+ systemPromptExtensions: {},
123
126
  meta: {},
124
127
  tools: {},
125
128
  hooks: {},
@@ -228,7 +231,7 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
228
231
  api: 'chat',
229
232
  ...(this.options.maxTokens ? { maxTokens: this.options.maxTokens } : {}),
230
233
  history: [
231
- { role: 'system', content: this.systemPrompt || this.loadSystemPrompt() },
234
+ { role: 'system', content: this.effectiveSystemPrompt },
232
235
  ],
233
236
  })
234
237
  this.state.set('conversation', conv)
@@ -254,6 +257,60 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
254
257
  return this.state.get('systemPrompt') || ''
255
258
  }
256
259
 
260
+ /** The named extensions appended to the system prompt. */
261
+ get systemPromptExtensions(): Record<string, string> {
262
+ return (this.state.get('systemPromptExtensions') || {}) as Record<string, string>
263
+ }
264
+
265
+ /** The system prompt with all extensions appended. This is the value passed to the conversation. */
266
+ get effectiveSystemPrompt(): string {
267
+ const base = this.systemPrompt
268
+ const extensions = Object.values(this.systemPromptExtensions)
269
+ if (!extensions.length) return base
270
+ return [base, ...extensions].join('\n\n')
271
+ }
272
+
273
+ /**
274
+ * Add or update a named system prompt extension. The value is appended
275
+ * to the base system prompt when passed to the conversation.
276
+ *
277
+ * @param key - A unique identifier for this extension
278
+ * @param value - The text to append
279
+ * @returns this, for chaining
280
+ */
281
+ addSystemPromptExtension(key: string, value: string): this {
282
+ this.state.set('systemPromptExtensions', { ...this.systemPromptExtensions, [key]: value })
283
+ this.syncSystemPromptToConversation()
284
+ this.emit('systemPromptExtensionsChanged')
285
+ return this
286
+ }
287
+
288
+ /**
289
+ * Remove a named system prompt extension.
290
+ *
291
+ * @param key - The identifier of the extension to remove
292
+ * @returns this, for chaining
293
+ */
294
+ removeSystemPromptExtension(key: string): this {
295
+ const current = { ...this.systemPromptExtensions }
296
+ delete current[key]
297
+ this.state.set('systemPromptExtensions', current)
298
+ this.syncSystemPromptToConversation()
299
+ this.emit('systemPromptExtensionsChanged')
300
+ return this
301
+ }
302
+
303
+ /** Update the conversation's system message to reflect the current effective prompt. */
304
+ private syncSystemPromptToConversation() {
305
+ const conv = this.state.get('conversation') as Conversation | null
306
+ if (!conv) return
307
+ const messages = [...conv.messages]
308
+ if (messages.length > 0 && (messages[0]!.role === 'system' || messages[0]!.role === 'developer')) {
309
+ messages[0] = { ...messages[0]!, content: this.effectiveSystemPrompt }
310
+ conv.state.set('messages', messages)
311
+ }
312
+ }
313
+
257
314
  /** The tools registered with this assistant. */
258
315
  get tools(): Record<string, ConversationTool> {
259
316
  return (this.state.get('tools') || {}) as Record<string, ConversationTool>
@@ -287,6 +344,9 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
287
344
  }
288
345
  } else if (fnOrHelper && typeof (fnOrHelper as any).toTools === 'function') {
289
346
  this._registerTools((fnOrHelper as any).toTools())
347
+ if (typeof (fnOrHelper as any).setupToolsConsumer === 'function') {
348
+ (fnOrHelper as any).setupToolsConsumer(this)
349
+ }
290
350
  } else if (fnOrHelper && 'schemas' in fnOrHelper && 'handlers' in fnOrHelper) {
291
351
  this._registerTools(fnOrHelper as { schemas: Record<string, z.ZodType>, handlers: Record<string, Function> })
292
352
  }
@@ -768,7 +828,7 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
768
828
 
769
829
  // Swap in fresh system prompt if it changed
770
830
  if (messages.length > 0 && (messages[0]!.role === 'system' || messages[0]!.role === 'developer')) {
771
- messages[0] = { role: messages[0]!.role, content: this.systemPrompt }
831
+ messages[0] = { role: messages[0]!.role, content: this.effectiveSystemPrompt }
772
832
  }
773
833
 
774
834
  this.conversation.state.set('id', existing.id)