@tyvm/knowhow 0.0.88 → 0.0.90
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CONFIG.md +52 -0
- package/README.md +344 -29
- package/WORKER.md +169 -334
- package/autodoc/chat-guide.md +540 -0
- package/autodoc/cli-reference.md +765 -0
- package/autodoc/config-reference.md +541 -0
- package/autodoc/embeddings-guide.md +566 -0
- package/autodoc/generate-guide.md +477 -0
- package/autodoc/language-plugin-guide.md +443 -0
- package/autodoc/modules-guide.md +352 -0
- package/autodoc/plugins-guide.md +720 -0
- package/autodoc/quickstart-guide.md +129 -0
- package/autodoc/skills-guide.md +468 -0
- package/autodoc/worker-guide.md +526 -0
- package/package.json +2 -2
- package/src/ai.ts +33 -2
- package/src/config.ts +28 -4
- package/src/index.ts +22 -2
- package/src/processors/TokenCompressor.ts +2 -2
- package/src/processors/ToolResponseCache.ts +3 -3
- package/src/processors/tools/grepToolResponse.ts +9 -4
- package/src/processors/tools/jqToolResponse.ts +11 -6
- package/src/processors/tools/listStoredToolResponses.ts +1 -1
- package/src/processors/tools/tailToolResponse.ts +9 -4
- package/src/worker.ts +9 -7
- package/ts_build/package.json +2 -2
- package/ts_build/src/ai.js +18 -1
- package/ts_build/src/ai.js.map +1 -1
- package/ts_build/src/config.js +17 -2
- package/ts_build/src/config.js.map +1 -1
- package/ts_build/src/index.js +12 -2
- package/ts_build/src/index.js.map +1 -1
- package/ts_build/src/processors/TokenCompressor.js +2 -2
- package/ts_build/src/processors/TokenCompressor.js.map +1 -1
- package/ts_build/src/processors/ToolResponseCache.js +3 -3
- package/ts_build/src/processors/ToolResponseCache.js.map +1 -1
- package/ts_build/src/processors/tools/grepToolResponse.d.ts +3 -1
- package/ts_build/src/processors/tools/grepToolResponse.js +8 -2
- package/ts_build/src/processors/tools/grepToolResponse.js.map +1 -1
- package/ts_build/src/processors/tools/jqToolResponse.d.ts +3 -1
- package/ts_build/src/processors/tools/jqToolResponse.js +10 -4
- package/ts_build/src/processors/tools/jqToolResponse.js.map +1 -1
- package/ts_build/src/processors/tools/listStoredToolResponses.js +1 -1
- package/ts_build/src/processors/tools/listStoredToolResponses.js.map +1 -1
- package/ts_build/src/processors/tools/tailToolResponse.d.ts +3 -1
- package/ts_build/src/processors/tools/tailToolResponse.js +8 -2
- package/ts_build/src/processors/tools/tailToolResponse.js.map +1 -1
- package/ts_build/src/worker.js +5 -3
- package/ts_build/src/worker.js.map +1 -1
- package/autodoc/chat.mdx +0 -20
- package/autodoc/cli.mdx +0 -11
- package/autodoc/plugins/asana.mdx +0 -47
- package/autodoc/plugins/downloader/downloader.mdx +0 -38
- package/autodoc/plugins/downloader/plugin.mdx +0 -37
- package/autodoc/plugins/downloader/types.mdx +0 -42
- package/autodoc/plugins/embedding.mdx +0 -41
- package/autodoc/plugins/figma.mdx +0 -45
- package/autodoc/plugins/github.mdx +0 -40
- package/autodoc/plugins/jira.mdx +0 -46
- package/autodoc/plugins/language.mdx +0 -37
- package/autodoc/plugins/linear.mdx +0 -35
- package/autodoc/plugins/notion.mdx +0 -38
- package/autodoc/plugins/plugins.mdx +0 -59
- package/autodoc/plugins/types.mdx +0 -51
- package/autodoc/plugins/vim.mdx +0 -39
- package/autodoc/tools/addInternalTools.mdx +0 -1
- package/autodoc/tools/agentCall.mdx +0 -1
- package/autodoc/tools/asana/definitions.mdx +0 -10
- package/autodoc/tools/asana/index.mdx +0 -12
- package/autodoc/tools/askHuman.mdx +0 -1
- package/autodoc/tools/callPlugin.mdx +0 -1
- package/autodoc/tools/embeddingSearch.mdx +0 -1
- package/autodoc/tools/execCommand.mdx +0 -1
- package/autodoc/tools/fileSearch.mdx +0 -1
- package/autodoc/tools/finalAnswer.mdx +0 -1
- package/autodoc/tools/github/definitions.mdx +0 -6
- package/autodoc/tools/github/index.mdx +0 -8
- package/autodoc/tools/index.mdx +0 -14
- package/autodoc/tools/lintFile.mdx +0 -7
- package/autodoc/tools/list.mdx +0 -16
- package/autodoc/tools/modifyFile.mdx +0 -7
- package/autodoc/tools/patch.mdx +0 -9
- package/autodoc/tools/readBlocks.mdx +0 -1
- package/autodoc/tools/readFile.mdx +0 -1
- package/autodoc/tools/scanFile.mdx +0 -1
- package/autodoc/tools/textSearch.mdx +0 -6
- package/autodoc/tools/types/fileblock.mdx +0 -1
- package/autodoc/tools/visionTool.mdx +0 -1
- package/autodoc/tools/writeFile.mdx +0 -1
- package/test-comprehensive.ts +0 -31
|
@@ -0,0 +1,526 @@
|
|
|
1
|
+
# Worker System Guide (Knowhow CLI)
|
|
2
|
+
|
|
3
|
+
The **Knowhow worker** is how you expose your **local machine** to the Knowhow cloud so **AI agents running on `knowhow.tyvm.ai`** can call your tools and access your workspace.
|
|
4
|
+
|
|
5
|
+
A worker runs a local **MCP server** and keeps a persistent **WebSocket connection** to the Knowhow cloud. The cloud can then invoke the MCP tools that you explicitly allow.
|
|
6
|
+
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
## 1) What the worker is
|
|
10
|
+
|
|
11
|
+
A **worker** is a process started by `knowhow worker` that:
|
|
12
|
+
|
|
13
|
+
- Loads the CLI’s tool registry (agent tools + worker tools).
|
|
14
|
+
- Starts a local **MCP server** over WebSockets.
|
|
15
|
+
- Connects to **Knowhow cloud** at `knowhow.tyvm.ai` (via a configured API URL).
|
|
16
|
+
- Advertises only the tools allowed by your `knowhow.json` configuration.
|
|
17
|
+
- Optionally enables:
|
|
18
|
+
- **Sharing/visibility controls**
|
|
19
|
+
- **Tunnel-based port forwarding**
|
|
20
|
+
- **Docker sandbox mode**
|
|
21
|
+
- **Passkey-based locking/unlocking**
|
|
22
|
+
|
|
23
|
+
In `src/worker.ts`, this is implemented by:
|
|
24
|
+
|
|
25
|
+
- Creating an MCP server: `mcpServer.createServer(...).withTools(toolsToUse)`
|
|
26
|
+
- Connecting to the cloud WebSocket endpoint: `new WebSocket(`${API_URL}/ws/worker`, { headers })`
|
|
27
|
+
- Running the MCP-over-WebSocket transport: `mcpServer.runWsServer(ws)`
|
|
28
|
+
|
|
29
|
+
---
|
|
30
|
+
|
|
31
|
+
## 2) `knowhow worker` — starting a worker
|
|
32
|
+
|
|
33
|
+
Command:
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
knowhow worker
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
At runtime, the worker does the following (high level):
|
|
40
|
+
|
|
41
|
+
1. **Loads config** from `./.knowhow/knowhow.json` (`getConfig()`).
|
|
42
|
+
2. Handles special flags:
|
|
43
|
+
- `--passkey-reset` clears passkey config and exits.
|
|
44
|
+
- `--passkey` starts a browser-based registration flow and exits.
|
|
45
|
+
3. Decides whether to run in **Docker sandbox mode**:
|
|
46
|
+
- If already inside Docker (`process.env.KNOWHOW_DOCKER === "true"`), it disables sandbox to avoid nested Docker.
|
|
47
|
+
- Otherwise, sandbox selection priority is:
|
|
48
|
+
1. CLI flag `--sandbox` / `--no-sandbox`
|
|
49
|
+
2. `config.worker.sandbox`
|
|
50
|
+
3. default: `false` (host mode)
|
|
51
|
+
4. If in **host mode**:
|
|
52
|
+
- Registers the MCP tools locally by:
|
|
53
|
+
- `Tools.defineTools(includedTools, combinedTools)`
|
|
54
|
+
- `Tools.defineTools(workerTools.definitions, workerTools.tools)`
|
|
55
|
+
- `await Mcp.addTools(Tools)`
|
|
56
|
+
- Ensures `worker.allowedTools` exists:
|
|
57
|
+
- If `config.worker?.allowedTools` is missing, it auto-generates:
|
|
58
|
+
- `allowedTools: Tools.getToolNames()`
|
|
59
|
+
- saves it to config
|
|
60
|
+
- prints a message and **returns early** (so you can edit allowed tools before running again)
|
|
61
|
+
5. If **registration** is enabled (`--register`):
|
|
62
|
+
- Calls `registerWorkerPath(process.cwd())` and exits.
|
|
63
|
+
6. If **passkey auth** is enabled in config:
|
|
64
|
+
- Starts in a **locked** state.
|
|
65
|
+
- Wraps each allowed tool to block calls while locked, returning:
|
|
66
|
+
- `error: "WORKER_LOCKED"`
|
|
67
|
+
- a message instructing the caller to use `unlock`.
|
|
68
|
+
- Registers special auth tools:
|
|
69
|
+
- `unlock` (two-step flow)
|
|
70
|
+
- `lock`
|
|
71
|
+
7. Connects to the cloud via WebSockets:
|
|
72
|
+
- `API_URL/ws/worker` for the MCP tool channel
|
|
73
|
+
- Optional `API_URL/ws/tunnel` for the tunnel system
|
|
74
|
+
8. Loops forever, pinging every ~5 seconds, and reconnecting on disconnect.
|
|
75
|
+
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
## 3) CLI flags
|
|
79
|
+
|
|
80
|
+
These flags are defined under the `worker` command in `src/cli.ts`.
|
|
81
|
+
|
|
82
|
+
### `--share` / `--unshare` (visibility control)
|
|
83
|
+
|
|
84
|
+
- `--share` makes the worker accessible to your organization.
|
|
85
|
+
- `--unshare` makes it private to you.
|
|
86
|
+
|
|
87
|
+
Implementation detail (`src/worker.ts`): the worker sets a WebSocket header:
|
|
88
|
+
|
|
89
|
+
- `headers.Shared = "true"` when `--share` is used
|
|
90
|
+
- `headers.Shared = "false"` when `--unshare` is used
|
|
91
|
+
- otherwise: “Worker is private (only you can use it)”
|
|
92
|
+
|
|
93
|
+
### `--sandbox` / `--no-sandbox` (Docker sandbox mode)
|
|
94
|
+
|
|
95
|
+
- `--sandbox` runs the worker inside Docker for isolation.
|
|
96
|
+
- `--no-sandbox` runs it on the host.
|
|
97
|
+
|
|
98
|
+
Sandbox selection priority is:
|
|
99
|
+
|
|
100
|
+
1. CLI flags
|
|
101
|
+
2. `config.worker.sandbox`
|
|
102
|
+
3. default: `false`
|
|
103
|
+
|
|
104
|
+
Implementation detail:
|
|
105
|
+
- When `shouldUseSandbox` is true, the worker calls `runWorkerInSandbox(...)`.
|
|
106
|
+
- If Docker isn’t available, sandbox mode exits with an error.
|
|
107
|
+
- Sandbox always rebuilds the worker image:
|
|
108
|
+
- `Docker.buildWorkerImage()`
|
|
109
|
+
|
|
110
|
+
### `--register` (register worker path)
|
|
111
|
+
|
|
112
|
+
Registers the current directory as a worker in the local worker registry:
|
|
113
|
+
|
|
114
|
+
```bash
|
|
115
|
+
knowhow worker --register
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
Implementation detail: `registerWorkerPath(process.cwd())`.
|
|
119
|
+
|
|
120
|
+
### `--passkey` / `--passkey-reset` (passkey security setup)
|
|
121
|
+
|
|
122
|
+
- `--passkey` starts the passkey registration flow (requires you to be logged in).
|
|
123
|
+
- `--passkey-reset` removes passkey requirement from config.
|
|
124
|
+
|
|
125
|
+
Implementation detail:
|
|
126
|
+
- `--passkey` uses `PasskeySetupService.setup(jwt)`
|
|
127
|
+
- `--passkey-reset` uses `PasskeySetupService.reset()`
|
|
128
|
+
- If you’re not logged in, `--passkey` errors and tells you to run `knowhow login`.
|
|
129
|
+
|
|
130
|
+
---
|
|
131
|
+
|
|
132
|
+
## 4) `worker.allowedTools` — configuring which tools to expose
|
|
133
|
+
|
|
134
|
+
### How the initial list is created (first run)
|
|
135
|
+
|
|
136
|
+
When running in host mode:
|
|
137
|
+
|
|
138
|
+
- If `config.worker.allowedTools` is **missing**, the worker:
|
|
139
|
+
- auto-generates it as:
|
|
140
|
+
- `allowedTools: Tools.getToolNames()`
|
|
141
|
+
- writes it to `.knowhow/knowhow.json`
|
|
142
|
+
- prints:
|
|
143
|
+
> “Worker tools configured! Update knowhow.json to adjust which tools are allowed by the worker.”
|
|
144
|
+
- then **exits early** (so you can edit the list before actually serving tools)
|
|
145
|
+
|
|
146
|
+
So the typical workflow is:
|
|
147
|
+
|
|
148
|
+
1. Start worker once
|
|
149
|
+
2. Edit `worker.allowedTools`
|
|
150
|
+
3. Start worker again
|
|
151
|
+
|
|
152
|
+
### Tool naming (including MCP tools)
|
|
153
|
+
|
|
154
|
+
The guide expects the following naming convention for MCP tool exposure:
|
|
155
|
+
|
|
156
|
+
- **MCP tools** appear as:
|
|
157
|
+
- `mcp_0_<server>_<toolname>`
|
|
158
|
+
|
|
159
|
+
The worker’s tool registry can include both:
|
|
160
|
+
- built-in worker/tools
|
|
161
|
+
- agent tools
|
|
162
|
+
- configured MCP tools (for example browser automation)
|
|
163
|
+
|
|
164
|
+
### Example `allowedTools` list
|
|
165
|
+
|
|
166
|
+
Example (illustrative):
|
|
167
|
+
|
|
168
|
+
```json
|
|
169
|
+
{
|
|
170
|
+
"worker": {
|
|
171
|
+
"allowedTools": [
|
|
172
|
+
"readFile",
|
|
173
|
+
"writeFile",
|
|
174
|
+
"searchFiles",
|
|
175
|
+
"exec",
|
|
176
|
+
"mcp_0_browser_navigate",
|
|
177
|
+
"mcp_0_browser_click"
|
|
178
|
+
]
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
> Tip: Keep this list tight. Tools are gated by your explicit configuration, and (optionally) by passkey locking.
|
|
184
|
+
|
|
185
|
+
---
|
|
186
|
+
|
|
187
|
+
## 5) Connecting to the cloud
|
|
188
|
+
|
|
189
|
+
After you run:
|
|
190
|
+
|
|
191
|
+
```bash
|
|
192
|
+
knowhow login
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
the worker retrieves your JWT token (`loadJwt()`) and connects to Knowhow cloud using WebSockets:
|
|
196
|
+
|
|
197
|
+
- **MCP/tool channel**:
|
|
198
|
+
- `ws://${API_URL}/ws/worker` (API URL is derived from `KNOWHOW_API_URL`)
|
|
199
|
+
- Optional **tunnel channel**:
|
|
200
|
+
- `ws://${API_URL}/ws/tunnel`
|
|
201
|
+
|
|
202
|
+
Headers sent with the WebSocket connection include:
|
|
203
|
+
|
|
204
|
+
- `Authorization: Bearer <jwt>`
|
|
205
|
+
- `User-Agent: knowhow-worker/1.1.1/<hostname>`
|
|
206
|
+
- `Root: <workspace root path representation>`
|
|
207
|
+
- `Shared: "true"` or `"false"` if share/unshare flags are used
|
|
208
|
+
|
|
209
|
+
Reconnect behavior:
|
|
210
|
+
- If the worker WebSocket closes, it logs and reconnects.
|
|
211
|
+
- The worker also periodically pings (`await connection.ws.ping()`), and will reconnect if ping fails.
|
|
212
|
+
|
|
213
|
+
---
|
|
214
|
+
|
|
215
|
+
## 6) Sharing the worker
|
|
216
|
+
|
|
217
|
+
- By default (no `--share` / `--unshare`):
|
|
218
|
+
- the worker is treated as **private**.
|
|
219
|
+
- With `--share`:
|
|
220
|
+
- the worker advertises `Shared: "true"` and is accessible to others in your organization.
|
|
221
|
+
- With `--unshare`:
|
|
222
|
+
- the worker advertises `Shared: "false"` (explicitly private).
|
|
223
|
+
|
|
224
|
+
---
|
|
225
|
+
|
|
226
|
+
## 7) Tunnel system (`worker.tunnel`)
|
|
227
|
+
|
|
228
|
+
The worker can also forward inbound requests to **your local ports** through the Knowhow cloud using a tunnel.
|
|
229
|
+
|
|
230
|
+
### Enable it
|
|
231
|
+
|
|
232
|
+
In `knowhow.json`:
|
|
233
|
+
|
|
234
|
+
```json
|
|
235
|
+
{
|
|
236
|
+
"worker": {
|
|
237
|
+
"tunnel": {
|
|
238
|
+
"enabled": true
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
### `allowedPorts`
|
|
245
|
+
|
|
246
|
+
When tunnel is enabled, you must configure which ports the tunnel will be allowed to forward:
|
|
247
|
+
|
|
248
|
+
```json
|
|
249
|
+
{
|
|
250
|
+
"worker": {
|
|
251
|
+
"tunnel": {
|
|
252
|
+
"enabled": true,
|
|
253
|
+
"allowedPorts": [3000, 5432]
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
If tunnel is enabled but `allowedPorts` is empty, the worker warns:
|
|
260
|
+
|
|
261
|
+
> “Tunnel enabled but no allowedPorts configured. Add tunnel.allowedPorts to knowhow.json”
|
|
262
|
+
|
|
263
|
+
### Other tunnel config (from code)
|
|
264
|
+
|
|
265
|
+
The worker also reads (optional) tunnel settings:
|
|
266
|
+
|
|
267
|
+
- `worker.tunnel.localHost`
|
|
268
|
+
- If not set:
|
|
269
|
+
- inside Docker: uses `host.docker.internal`
|
|
270
|
+
- otherwise: uses `127.0.0.1`
|
|
271
|
+
- `worker.tunnel.portMapping`
|
|
272
|
+
- Logged as “Container port → Host port”
|
|
273
|
+
- `worker.tunnel.maxConcurrentStreams` (default 50)
|
|
274
|
+
- `worker.tunnel.enableUrlRewriting` (default enabled)
|
|
275
|
+
- `worker.tunnel.enableUrlRewriting !== false` enables URL rewriting
|
|
276
|
+
- Tunnel URL rewriting is based on either a `secret` or `workerId` in tunnel metadata
|
|
277
|
+
|
|
278
|
+
---
|
|
279
|
+
|
|
280
|
+
## 8) Docker sandbox mode
|
|
281
|
+
|
|
282
|
+
Sandbox mode runs the worker in Docker for isolation.
|
|
283
|
+
|
|
284
|
+
### Enable it
|
|
285
|
+
|
|
286
|
+
Either:
|
|
287
|
+
|
|
288
|
+
```bash
|
|
289
|
+
knowhow worker --sandbox
|
|
290
|
+
```
|
|
291
|
+
|
|
292
|
+
or in config:
|
|
293
|
+
|
|
294
|
+
```json
|
|
295
|
+
{
|
|
296
|
+
"worker": {
|
|
297
|
+
"sandbox": true
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
```
|
|
301
|
+
|
|
302
|
+
### Configuration: `worker.volumes`
|
|
303
|
+
|
|
304
|
+
When sandboxing, you typically need to mount your workspace and any other resources into the container.
|
|
305
|
+
|
|
306
|
+
This guide documents the expected config keys passed into the Docker runner:
|
|
307
|
+
|
|
308
|
+
```json
|
|
309
|
+
{
|
|
310
|
+
"worker": {
|
|
311
|
+
"sandbox": true,
|
|
312
|
+
"volumes": [
|
|
313
|
+
{ "host": ".", "container": "/workspace" }
|
|
314
|
+
]
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
```
|
|
318
|
+
|
|
319
|
+
> The worker code passes the entire `config` into `Docker.runWorkerContainer(...)`, so `worker.volumes` is expected to be consumed by the Docker layer.
|
|
320
|
+
|
|
321
|
+
### Configuration: `worker.envFile`
|
|
322
|
+
|
|
323
|
+
Similarly, you can pass environment variables into the sandboxed container using a file path:
|
|
324
|
+
|
|
325
|
+
```json
|
|
326
|
+
{
|
|
327
|
+
"worker": {
|
|
328
|
+
"sandbox": true,
|
|
329
|
+
"envFile": ".knowhow/worker.env"
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
```
|
|
333
|
+
|
|
334
|
+
> As above, the worker passes `config` through to the Docker runner.
|
|
335
|
+
|
|
336
|
+
### Notes specific to nested containers
|
|
337
|
+
|
|
338
|
+
If you run the worker inside an environment where:
|
|
339
|
+
|
|
340
|
+
- `KNOWHOW_DOCKER=true`
|
|
341
|
+
|
|
342
|
+
then the worker automatically disables sandbox mode (prevents “nested Docker”).
|
|
343
|
+
|
|
344
|
+
---
|
|
345
|
+
|
|
346
|
+
## 9) Passkey security
|
|
347
|
+
|
|
348
|
+
Passkey auth protects your worker by requiring a **hardware passkey** to unlock tool access.
|
|
349
|
+
|
|
350
|
+
### Setup and reset
|
|
351
|
+
|
|
352
|
+
- Register/enable passkey auth:
|
|
353
|
+
|
|
354
|
+
```bash
|
|
355
|
+
knowhow worker --passkey
|
|
356
|
+
```
|
|
357
|
+
|
|
358
|
+
- Remove passkey requirement:
|
|
359
|
+
|
|
360
|
+
```bash
|
|
361
|
+
knowhow worker --passkey-reset
|
|
362
|
+
```
|
|
363
|
+
|
|
364
|
+
### What happens at startup
|
|
365
|
+
|
|
366
|
+
If config contains passkey credentials:
|
|
367
|
+
|
|
368
|
+
- `config.worker.auth.passkey.publicKey`
|
|
369
|
+
- `config.worker.auth.passkey.credentialId`
|
|
370
|
+
|
|
371
|
+
then the worker:
|
|
372
|
+
|
|
373
|
+
- enables passkey auth
|
|
374
|
+
- starts **locked**
|
|
375
|
+
- wraps each configured allowed tool so that when locked it returns:
|
|
376
|
+
|
|
377
|
+
```json
|
|
378
|
+
{
|
|
379
|
+
"error": "WORKER_LOCKED",
|
|
380
|
+
"message": "Worker is locked. Call the `unlock` tool with your passkey assertion to unlock it first."
|
|
381
|
+
}
|
|
382
|
+
```
|
|
383
|
+
|
|
384
|
+
### How unlocking works (tools)
|
|
385
|
+
|
|
386
|
+
When passkey auth is enabled, the worker registers these tools:
|
|
387
|
+
|
|
388
|
+
- `getChallenge` (returns a challenge string)
|
|
389
|
+
- `unlock` (two-step tool)
|
|
390
|
+
- **Call without assertion fields** → returns a challenge
|
|
391
|
+
- **Call with assertion fields** → verifies assertion and unlocks
|
|
392
|
+
- `lock` (re-locks the worker)
|
|
393
|
+
|
|
394
|
+
**Important behavior:** the wrapper gating applies to your *configured allowed tools*, while the auth tools (`unlock`, `lock`, and the unlock flow challenge) are added so callers can regain access.
|
|
395
|
+
|
|
396
|
+
---
|
|
397
|
+
|
|
398
|
+
## 10) Worker in production (systemd / background)
|
|
399
|
+
|
|
400
|
+
The worker runs an infinite loop that reconnects automatically, so it’s well-suited for a supervisor.
|
|
401
|
+
|
|
402
|
+
### systemd example
|
|
403
|
+
|
|
404
|
+
Create `/etc/systemd/system/knowhow-worker.service`:
|
|
405
|
+
|
|
406
|
+
```ini
|
|
407
|
+
[Unit]
|
|
408
|
+
Description=Knowhow Worker
|
|
409
|
+
After=network-online.target
|
|
410
|
+
Wants=network-online.target
|
|
411
|
+
|
|
412
|
+
[Service]
|
|
413
|
+
Type=simple
|
|
414
|
+
WorkingDirectory=/path/to/your/worker-directory
|
|
415
|
+
ExecStart=/usr/local/bin/knowhow worker --register --share --sandbox
|
|
416
|
+
Restart=always
|
|
417
|
+
RestartSec=5
|
|
418
|
+
Environment=NODE_ENV=production
|
|
419
|
+
|
|
420
|
+
# Optional: load environment variables
|
|
421
|
+
# EnvironmentFile=/path/to/your/envfile
|
|
422
|
+
|
|
423
|
+
[Install]
|
|
424
|
+
WantedBy=multi-user.target
|
|
425
|
+
```
|
|
426
|
+
|
|
427
|
+
Then:
|
|
428
|
+
|
|
429
|
+
```bash
|
|
430
|
+
sudo systemctl daemon-reload
|
|
431
|
+
sudo systemctl enable --now knowhow-worker
|
|
432
|
+
sudo journalctl -u knowhow-worker -f
|
|
433
|
+
```
|
|
434
|
+
|
|
435
|
+
### Background process example
|
|
436
|
+
|
|
437
|
+
```bash
|
|
438
|
+
nohup knowhow worker --share > /var/log/knowhow-worker.log 2>&1 &
|
|
439
|
+
```
|
|
440
|
+
|
|
441
|
+
---
|
|
442
|
+
|
|
443
|
+
## Example `knowhow.json` worker configuration
|
|
444
|
+
|
|
445
|
+
Place this in `./.knowhow/knowhow.json` (the worker edits/reads it).
|
|
446
|
+
|
|
447
|
+
```json
|
|
448
|
+
{
|
|
449
|
+
"worker": {
|
|
450
|
+
"allowedTools": [
|
|
451
|
+
"exec",
|
|
452
|
+
"readFile",
|
|
453
|
+
"writeFile",
|
|
454
|
+
"mcp_0_browser_navigate",
|
|
455
|
+
"mcp_0_browser_click"
|
|
456
|
+
],
|
|
457
|
+
"sandbox": false,
|
|
458
|
+
"tunnel": {
|
|
459
|
+
"enabled": true,
|
|
460
|
+
"allowedPorts": [3000, 5432]
|
|
461
|
+
},
|
|
462
|
+
"auth": {
|
|
463
|
+
"passkey": {
|
|
464
|
+
"publicKey": "-----BEGIN PUBLIC KEY-----...",
|
|
465
|
+
"credentialId": "base64url-credential-id"
|
|
466
|
+
},
|
|
467
|
+
"sessionDurationHours": 3
|
|
468
|
+
},
|
|
469
|
+
"volumes": [],
|
|
470
|
+
"envFile": ".knowhow/worker.env"
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
```
|
|
474
|
+
|
|
475
|
+
---
|
|
476
|
+
|
|
477
|
+
## Example workflows
|
|
478
|
+
|
|
479
|
+
### Workflow A: Configure allowed tools (safe first run)
|
|
480
|
+
|
|
481
|
+
1. Run once to auto-generate `worker.allowedTools`:
|
|
482
|
+
```bash
|
|
483
|
+
knowhow worker
|
|
484
|
+
```
|
|
485
|
+
2. Edit `.knowhow/knowhow.json` and narrow `worker.allowedTools`.
|
|
486
|
+
3. Run again:
|
|
487
|
+
```bash
|
|
488
|
+
knowhow worker --share
|
|
489
|
+
```
|
|
490
|
+
|
|
491
|
+
### Workflow B: Expose a local web app through the tunnel
|
|
492
|
+
|
|
493
|
+
1. Enable tunnel and allow the port:
|
|
494
|
+
```json
|
|
495
|
+
{
|
|
496
|
+
"worker": {
|
|
497
|
+
"tunnel": { "enabled": true, "allowedPorts": [3000] }
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
```
|
|
501
|
+
2. Start the worker:
|
|
502
|
+
```bash
|
|
503
|
+
knowhow worker --share
|
|
504
|
+
```
|
|
505
|
+
3. Your cloud agent can then reach forwarded services via tunnel-generated subdomains (URL rewriting enabled by default).
|
|
506
|
+
|
|
507
|
+
### Workflow C: Secure the worker with passkey locking
|
|
508
|
+
|
|
509
|
+
1. Log in:
|
|
510
|
+
```bash
|
|
511
|
+
knowhow login
|
|
512
|
+
```
|
|
513
|
+
2. Register the passkey:
|
|
514
|
+
```bash
|
|
515
|
+
knowhow worker --passkey
|
|
516
|
+
```
|
|
517
|
+
3. Edit `worker.allowedTools` to include only what you want agents to do.
|
|
518
|
+
4. Start the worker normally (it starts locked):
|
|
519
|
+
```bash
|
|
520
|
+
knowhow worker
|
|
521
|
+
```
|
|
522
|
+
5. The agent must call `unlock` using the challenge + WebAuthn assertion to use the other tools.
|
|
523
|
+
|
|
524
|
+
---
|
|
525
|
+
|
|
526
|
+
If you want, paste your current `./.knowhow/knowhow.json` worker block and I can suggest a minimal `allowedTools` list and a safe tunnel configuration for your use case.
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@tyvm/knowhow",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.90",
|
|
4
4
|
"description": "ai cli with plugins and agents",
|
|
5
5
|
"main": "ts_build/src/index.js",
|
|
6
6
|
"bin": {
|
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
"@octokit/rest": "^20.0.2",
|
|
49
49
|
"@simplewebauthn/server": "^13.3.0",
|
|
50
50
|
"@types/react": "^19.1.8",
|
|
51
|
-
"@tyvm/knowhow-tunnel": "0.0.
|
|
51
|
+
"@tyvm/knowhow-tunnel": "0.0.4",
|
|
52
52
|
"asana": "^3.0.16",
|
|
53
53
|
"axios": "^1.5.0",
|
|
54
54
|
"cheerio": "^1.0.0",
|
package/src/ai.ts
CHANGED
|
@@ -6,6 +6,7 @@ import { Assistant } from "./types";
|
|
|
6
6
|
import { convertToText } from "./conversion";
|
|
7
7
|
import { getConfigSync } from "./config";
|
|
8
8
|
import { Clients } from "./clients";
|
|
9
|
+
import { getModelContextLimit } from "./clients/contextLimits";
|
|
9
10
|
|
|
10
11
|
const config = getConfigSync();
|
|
11
12
|
const OPENAI_KEY = process.env.OPENAI_KEY;
|
|
@@ -59,7 +60,7 @@ export async function singlePrompt(userPrompt: string, model = "", agent = "") {
|
|
|
59
60
|
}
|
|
60
61
|
|
|
61
62
|
if (!model) {
|
|
62
|
-
model = Models.openai.
|
|
63
|
+
model = Models.openai.GPT_54_Nano;
|
|
63
64
|
}
|
|
64
65
|
|
|
65
66
|
// Assume we're using provider/model format of model
|
|
@@ -71,12 +72,43 @@ export async function singlePrompt(userPrompt: string, model = "", agent = "") {
|
|
|
71
72
|
return resp?.choices?.[0]?.message?.content;
|
|
72
73
|
}
|
|
73
74
|
|
|
75
|
+
/**
|
|
76
|
+
* Rough token estimate: ~4 characters per token (common heuristic).
|
|
77
|
+
*/
|
|
78
|
+
function estimateTokens(text: string): number {
|
|
79
|
+
return Math.ceil(text.length / 4);
|
|
80
|
+
}
|
|
81
|
+
|
|
74
82
|
export async function summarizeTexts(
|
|
75
83
|
texts: string[],
|
|
76
84
|
template: string,
|
|
77
85
|
model = "",
|
|
78
86
|
agent = ""
|
|
79
87
|
) {
|
|
88
|
+
const effectiveModel = model || Models.openai.GPT_54_Nano;
|
|
89
|
+
|
|
90
|
+
// Estimate total tokens if we were to combine all texts into one prompt
|
|
91
|
+
const combinedText = texts.join("\n\n");
|
|
92
|
+
const combinedContent = template.replaceAll("{text}", combinedText);
|
|
93
|
+
const estimatedTokens = estimateTokens(combinedContent);
|
|
94
|
+
const contextLimit = getModelContextLimit(effectiveModel);
|
|
95
|
+
|
|
96
|
+
console.log(
|
|
97
|
+
`summarizeTexts: ${texts.length} text(s), ~${estimatedTokens} estimated tokens, context limit: ${contextLimit}`
|
|
98
|
+
);
|
|
99
|
+
|
|
100
|
+
// If everything fits in one context window, do a single prompt
|
|
101
|
+
if (estimatedTokens < contextLimit) {
|
|
102
|
+
console.log("summarizeTexts: fits in context window, using single prompt");
|
|
103
|
+
return singlePrompt(combinedContent, model, agent).catch((err) => {
|
|
104
|
+
return `Texts of combined length ${combinedText.length} could not be summarized due to error: ${err.message}`;
|
|
105
|
+
});
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// Otherwise summarize each text individually, then combine
|
|
109
|
+
console.log(
|
|
110
|
+
"summarizeTexts: exceeds context window, summarizing texts individually"
|
|
111
|
+
);
|
|
80
112
|
const summaries = [];
|
|
81
113
|
for (const text of texts) {
|
|
82
114
|
const content = template.replaceAll("{text}", text);
|
|
@@ -94,7 +126,6 @@ export async function summarizeTexts(
|
|
|
94
126
|
}
|
|
95
127
|
|
|
96
128
|
// Otherwise form a final summary of the pieces
|
|
97
|
-
|
|
98
129
|
const finalPrompt =
|
|
99
130
|
`Generate a final output for this prompt ${template} with these incremental summaries: ` +
|
|
100
131
|
summaries.join("\n\n");
|
package/src/config.ts
CHANGED
|
@@ -316,10 +316,34 @@ export async function loadPrompt(promptName: string) {
|
|
|
316
316
|
return "";
|
|
317
317
|
}
|
|
318
318
|
|
|
319
|
-
const
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
);
|
|
319
|
+
const promptsDir = config.promptsDir || ".knowhow/prompts";
|
|
320
|
+
|
|
321
|
+
// Try to find the prompt as a file in promptsDir (with .mdx extension)
|
|
322
|
+
const promptFilePath = path.join(promptsDir, `${promptName}.mdx`);
|
|
323
|
+
if (fs.existsSync(promptFilePath)) {
|
|
324
|
+
const prompt = await readFile(promptFilePath, "utf8");
|
|
325
|
+
return ensureTextPlaceholder(prompt);
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Try as a direct file path (in case promptName is a path or has its own extension)
|
|
329
|
+
if (fs.existsSync(promptName)) {
|
|
330
|
+
const prompt = await readFile(promptName, "utf8");
|
|
331
|
+
return ensureTextPlaceholder(prompt);
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
// Otherwise treat promptName itself as the prompt string
|
|
335
|
+
return ensureTextPlaceholder(promptName);
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
/**
|
|
339
|
+
* Ensures that the prompt contains a {text} placeholder.
|
|
340
|
+
* If it doesn't, appends \n\n{text} to the end so that
|
|
341
|
+
* the input text is included when the prompt is rendered.
|
|
342
|
+
*/
|
|
343
|
+
function ensureTextPlaceholder(prompt: string): string {
|
|
344
|
+
if (!prompt.includes("{text}")) {
|
|
345
|
+
return `${prompt}\n\n{text}`;
|
|
346
|
+
}
|
|
323
347
|
return prompt;
|
|
324
348
|
}
|
|
325
349
|
|
package/src/index.ts
CHANGED
|
@@ -154,12 +154,32 @@ export async function upload() {
|
|
|
154
154
|
}
|
|
155
155
|
}
|
|
156
156
|
|
|
157
|
+
/**
|
|
158
|
+
* Normalizes an input pattern to a valid glob pattern.
|
|
159
|
+
* Supports:
|
|
160
|
+
* - Standard glob patterns (e.g. "src/**\/*.ts")
|
|
161
|
+
* - Brace expansion (e.g. "{src/a.ts,src/b.ts}")
|
|
162
|
+
* - Comma-separated file paths (e.g. "src/a.ts,src/b.ts") — auto-converted to brace expansion
|
|
163
|
+
*/
|
|
164
|
+
function normalizeInputPattern(input: string): string {
|
|
165
|
+
// If it already has braces or glob chars other than comma, use as-is
|
|
166
|
+
if (input.includes("{") || input.includes("*") || input.includes("?")) {
|
|
167
|
+
return input;
|
|
168
|
+
}
|
|
169
|
+
// If it contains commas, treat as comma-separated list and wrap in braces
|
|
170
|
+
if (input.includes(",")) {
|
|
171
|
+
const parts = input.split(",").map((p) => p.trim());
|
|
172
|
+
return `{${parts.join(",")}}`;
|
|
173
|
+
}
|
|
174
|
+
return input;
|
|
175
|
+
}
|
|
176
|
+
|
|
157
177
|
export async function generate(): Promise<void> {
|
|
158
178
|
const config = await getConfig();
|
|
159
179
|
for (const source of config.sources) {
|
|
160
180
|
console.log("Generating", source.input, "to", source.output);
|
|
161
181
|
if (source.kind === "file" || !source.kind) {
|
|
162
|
-
const files = globSync(source.input);
|
|
182
|
+
const files = globSync(normalizeInputPattern(source.input));
|
|
163
183
|
const prompt = await loadPrompt(source.prompt);
|
|
164
184
|
|
|
165
185
|
if (source.output.endsWith("/")) {
|
|
@@ -205,7 +225,7 @@ async function handleAllKindsGeneration(source: GenerationSource) {
|
|
|
205
225
|
|
|
206
226
|
async function handleFileKindGeneration(source: GenerationSource) {
|
|
207
227
|
const prompt = await loadPrompt(source.prompt);
|
|
208
|
-
const files = globSync(source.input);
|
|
228
|
+
const files = globSync(normalizeInputPattern(source.input));
|
|
209
229
|
console.log("Analyzing files: ", files);
|
|
210
230
|
|
|
211
231
|
if (source.output.endsWith("/")) {
|
|
@@ -131,7 +131,7 @@ export class TokenCompressor implements JsonCompressorStorage {
|
|
|
131
131
|
200
|
|
132
132
|
)}...\n[Use ${
|
|
133
133
|
this.toolName
|
|
134
|
-
} tool with key "${firstKey}" to retrieve content. Follow NEXT_CHUNK_KEY references for complete content]`;
|
|
134
|
+
} tool with key "${firstKey}" to retrieve content. Follow NEXT_CHUNK_KEY references for complete content]\n[TIP: If this is JSON data, prefer using the jqToolResponse tool with the toolCallId instead — it parses ._data automatically and lets you filter/search without repeated expandTokens calls]`;
|
|
135
135
|
}
|
|
136
136
|
|
|
137
137
|
/**
|
|
@@ -413,7 +413,7 @@ export const expandTokensDefinition: Tool = {
|
|
|
413
413
|
function: {
|
|
414
414
|
name: "expandTokens",
|
|
415
415
|
description:
|
|
416
|
-
"Retrieve a chunk of compressed data that was stored during message processing. The returned content may contain a `NEXT_CHUNK_KEY` to retrieve subsequent chunks.",
|
|
416
|
+
"Retrieve a chunk of compressed data that was stored during message processing. The returned content may contain a `NEXT_CHUNK_KEY` to retrieve subsequent chunks. NOTE: If the compressed data is JSON (e.g. a tool response), prefer using jqToolResponse instead — it lets you query the data directly using JQ without repeatedly calling expandTokens. Use expandTokens only for plain text/string data or when you need the raw content.",
|
|
417
417
|
parameters: {
|
|
418
418
|
type: "object",
|
|
419
419
|
positional: true,
|