screenpipe-mcp 0.10.0 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.js +499 -59
  2. package/package.json +1 -1
  3. package/src/index.ts +513 -58
package/dist/index.js CHANGED
@@ -64,63 +64,67 @@ const server = new index_js_1.Server({
64
64
  },
65
65
  });
66
66
  // ---------------------------------------------------------------------------
67
- // Tools — minimal descriptions, no behavioral guidance (that belongs in resources)
67
+ // Tools
68
68
  // ---------------------------------------------------------------------------
69
69
  const TOOLS = [
70
70
  {
71
71
  name: "search-content",
72
72
  description: "Search screen text, audio transcriptions, input events, and memories. " +
73
- "Returns timestamped results with app context. Call with no params for recent activity.",
74
- annotations: { title: "Search Content", readOnlyHint: true },
73
+ "Returns timestamped results with app context. " +
74
+ "IMPORTANT: prefer activity-summary for broad questions ('what was I doing?'). " +
75
+ "Use search-content only when you need specific text/content. " +
76
+ "Start with limit=5, increase only if needed. Results can be large — use max_content_length=500 to truncate.",
77
+ annotations: { title: "Search Content", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
75
78
  inputSchema: {
76
79
  type: "object",
77
80
  properties: {
78
81
  q: {
79
82
  type: "string",
80
- description: "Full-text search query. Omit to return all content in time range.",
83
+ description: "Full-text search query. Omit to return all content in time range. Avoid for audio — transcriptions are noisy, q filters too aggressively.",
81
84
  },
82
85
  content_type: {
83
86
  type: "string",
84
87
  enum: ["all", "ocr", "audio", "input", "accessibility", "memory"],
85
- description: "Filter by content type. Default: 'all'.",
88
+ description: "Filter by content type. 'accessibility' is preferred for screen text (OS-native). 'ocr' is fallback for apps without accessibility support. Default: 'all'.",
86
89
  default: "all",
87
90
  },
88
- limit: { type: "integer", description: "Max results (default 10)", default: 10 },
89
- offset: { type: "integer", description: "Pagination offset", default: 0 },
91
+ limit: { type: "integer", description: "Max results (default 10, max 20). Start with 5 for exploration.", default: 10 },
92
+ offset: { type: "integer", description: "Pagination offset. Use when results say 'use offset=N for more'.", default: 0 },
90
93
  start_time: {
91
94
  type: "string",
92
- description: "ISO 8601 UTC or relative (e.g. '2h ago')",
95
+ description: "ISO 8601 UTC or relative (e.g. '2h ago', '1d ago'). Always provide to avoid scanning entire history.",
93
96
  },
94
97
  end_time: {
95
98
  type: "string",
96
- description: "ISO 8601 UTC or relative (e.g. 'now')",
99
+ description: "ISO 8601 UTC or relative (e.g. 'now'). Defaults to now.",
97
100
  },
98
- app_name: { type: "string", description: "Filter by app name" },
99
- window_name: { type: "string", description: "Filter by window title" },
100
- min_length: { type: "integer", description: "Min content length" },
101
- max_length: { type: "integer", description: "Max content length" },
101
+ app_name: { type: "string", description: "Filter by app name (e.g. 'Google Chrome', 'Slack', 'zoom.us'). Case-sensitive." },
102
+ window_name: { type: "string", description: "Filter by window title substring" },
103
+ min_length: { type: "integer", description: "Min content length in characters" },
104
+ max_length: { type: "integer", description: "Max content length in characters" },
102
105
  include_frames: {
103
106
  type: "boolean",
104
- description: "Include base64 screenshots (OCR only)",
107
+ description: "Include base64 screenshots (OCR only). Warning: large response.",
105
108
  default: false,
106
109
  },
107
- speaker_ids: { type: "string", description: "Comma-separated speaker IDs" },
108
- speaker_name: { type: "string", description: "Filter audio by speaker name" },
110
+ speaker_ids: { type: "string", description: "Comma-separated speaker IDs to filter audio" },
111
+ speaker_name: { type: "string", description: "Filter audio by speaker name (case-insensitive partial match)" },
109
112
  max_content_length: {
110
113
  type: "integer",
111
- description: "Truncate each result via middle-truncation",
114
+ description: "Truncate each result's text via middle-truncation. Use 200-500 to keep responses compact.",
112
115
  },
113
116
  },
114
117
  },
115
118
  },
116
119
  {
117
120
  name: "list-meetings",
118
- description: "List detected meetings (Zoom, Teams, Meet, etc.) with duration, app, and attendees.",
119
- annotations: { title: "List Meetings", readOnlyHint: true },
121
+ description: "List detected meetings (Zoom, Teams, Meet, etc.) with duration, app, and attendees. " +
122
+ "Only available when screenpipe runs in smart transcription mode.",
123
+ annotations: { title: "List Meetings", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
120
124
  inputSchema: {
121
125
  type: "object",
122
126
  properties: {
123
- start_time: { type: "string", description: "ISO 8601 UTC or relative" },
127
+ start_time: { type: "string", description: "ISO 8601 UTC or relative (e.g. '1d ago')" },
124
128
  end_time: { type: "string", description: "ISO 8601 UTC or relative" },
125
129
  limit: { type: "integer", description: "Max results (default 20)", default: 20 },
126
130
  offset: { type: "integer", description: "Pagination offset", default: 0 },
@@ -130,14 +134,15 @@ const TOOLS = [
130
134
  {
131
135
  name: "activity-summary",
132
136
  description: "Lightweight activity overview (~200-500 tokens): app usage with active minutes, audio speakers, recent texts. " +
133
- "Use for 'how long on X?', 'which apps?', 'what was I doing?' questions.",
134
- annotations: { title: "Activity Summary", readOnlyHint: true },
137
+ "USE THIS FIRST for broad questions: 'what was I doing?', 'how long on X?', 'which apps?'. " +
138
+ "Only escalate to search-content if you need specific text content.",
139
+ annotations: { title: "Activity Summary", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
135
140
  inputSchema: {
136
141
  type: "object",
137
142
  properties: {
138
- start_time: { type: "string", description: "ISO 8601 UTC or relative" },
139
- end_time: { type: "string", description: "ISO 8601 UTC or relative" },
140
- app_name: { type: "string", description: "Optional app name filter" },
143
+ start_time: { type: "string", description: "ISO 8601 UTC or relative (e.g. '3h ago')" },
144
+ end_time: { type: "string", description: "ISO 8601 UTC or relative (e.g. 'now')" },
145
+ app_name: { type: "string", description: "Optional app name filter to focus on one app" },
141
146
  },
142
147
  required: ["start_time", "end_time"],
143
148
  },
@@ -145,85 +150,89 @@ const TOOLS = [
145
150
  {
146
151
  name: "search-elements",
147
152
  description: "Search UI elements (buttons, links, text fields) from the accessibility tree. " +
148
- "Lighter than search-content for targeted UI lookups.",
149
- annotations: { title: "Search Elements", readOnlyHint: true },
153
+ "Lighter than search-content for targeted UI lookups. " +
154
+ "Use when you need to find specific UI controls or page structure, not general content.",
155
+ annotations: { title: "Search Elements", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
150
156
  inputSchema: {
151
157
  type: "object",
152
158
  properties: {
153
159
  q: { type: "string", description: "Full-text search on element text" },
154
- frame_id: { type: "integer", description: "Filter to specific frame" },
160
+ frame_id: { type: "integer", description: "Filter to specific frame ID from search results" },
155
161
  source: {
156
162
  type: "string",
157
163
  enum: ["accessibility", "ocr"],
158
- description: "Element source filter",
164
+ description: "Element source. 'accessibility' is preferred (OS-native tree). 'ocr' for apps without a11y.",
159
165
  },
160
- role: { type: "string", description: "Element role (e.g. AXButton, AXLink)" },
166
+ role: { type: "string", description: "Element role filter (e.g. 'AXButton', 'AXLink', 'AXTextField')" },
161
167
  start_time: { type: "string", description: "ISO 8601 UTC or relative" },
162
168
  end_time: { type: "string", description: "ISO 8601 UTC or relative" },
163
169
  app_name: { type: "string", description: "Filter by app name" },
164
- limit: { type: "integer", description: "Max results (default 50)", default: 50 },
170
+ limit: { type: "integer", description: "Max results (default 50). Start with 10-20.", default: 50 },
165
171
  offset: { type: "integer", description: "Pagination offset", default: 0 },
166
172
  },
167
173
  },
168
174
  },
169
175
  {
170
176
  name: "frame-context",
171
- description: "Get accessibility text, parsed tree nodes, and URLs for a specific frame ID.",
172
- annotations: { title: "Frame Context", readOnlyHint: true },
177
+ description: "Get full accessibility text, parsed tree nodes, and URLs for a specific frame ID. " +
178
+ "Use after search-content to get detailed context for a specific moment.",
179
+ annotations: { title: "Frame Context", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
173
180
  inputSchema: {
174
181
  type: "object",
175
182
  properties: {
176
- frame_id: { type: "integer", description: "Frame ID from search results" },
183
+ frame_id: { type: "integer", description: "Frame ID from search-content results (content.frame_id field)" },
177
184
  },
178
185
  required: ["frame_id"],
179
186
  },
180
187
  },
181
188
  {
182
189
  name: "export-video",
183
- description: "Export an MP4 video of screen recordings for a time range.",
184
- annotations: { title: "Export Video", destructiveHint: true },
190
+ description: "Export an MP4 video of screen recordings for a time range. " +
191
+ "Returns the file path. Can take a few minutes for long ranges.",
192
+ annotations: { title: "Export Video", readOnlyHint: false, destructiveHint: false, openWorldHint: false },
185
193
  inputSchema: {
186
194
  type: "object",
187
195
  properties: {
188
196
  start_time: { type: "string", description: "ISO 8601 UTC or relative" },
189
197
  end_time: { type: "string", description: "ISO 8601 UTC or relative" },
190
- fps: { type: "number", description: "Output FPS (default 1.0)", default: 1.0 },
198
+ fps: { type: "number", description: "Output FPS (default 1.0). Higher = smoother but larger file.", default: 1.0 },
191
199
  },
192
200
  required: ["start_time", "end_time"],
193
201
  },
194
202
  },
195
203
  {
196
204
  name: "update-memory",
197
- description: "Create, update, or delete a persistent memory (facts, preferences, decisions). " +
198
- "Retrieve memories via search-content with content_type='memory'.",
199
- annotations: { title: "Update Memory", destructiveHint: false },
205
+ description: "Create, update, or delete a persistent memory (facts, preferences, decisions the user wants to remember). " +
206
+ "To retrieve memories, use search-content with content_type='memory'. " +
207
+ "To create: provide content + tags. To update: provide id + fields to change. To delete: provide id + delete=true.",
208
+ annotations: { title: "Update Memory", readOnlyHint: false, destructiveHint: false, openWorldHint: false, idempotentHint: true },
200
209
  inputSchema: {
201
210
  type: "object",
202
211
  properties: {
203
- id: { type: "integer", description: "Memory ID (omit to create new)" },
204
- content: { type: "string", description: "Memory text" },
205
- tags: { type: "array", items: { type: "string" }, description: "Categorization tags" },
206
- importance: { type: "number", description: "0.0-1.0 (default 0.5)" },
207
- source_context: { type: "object", description: "Optional source data links" },
208
- delete: { type: "boolean", description: "Delete the memory identified by id" },
212
+ id: { type: "integer", description: "Memory ID omit to create new, provide to update/delete" },
213
+ content: { type: "string", description: "Memory text (required for creation)" },
214
+ tags: { type: "array", items: { type: "string" }, description: "Categorization tags (e.g. ['work', 'project-x'])" },
215
+ importance: { type: "number", description: "0.0 (trivial) to 1.0 (critical). Default 0.5." },
216
+ source_context: { type: "object", description: "Optional metadata linking to source (app, timestamp, etc.)" },
217
+ delete: { type: "boolean", description: "Set true to delete the memory identified by id" },
209
218
  },
210
219
  },
211
220
  },
212
221
  {
213
222
  name: "send-notification",
214
- description: "Send a notification to the screenpipe desktop UI with optional action buttons. " +
215
- "Actions can re-run pipes with context, call API endpoints, or open deep links.",
216
- annotations: { title: "Send Notification", destructiveHint: false },
223
+ description: "Send a notification to the screenpipe desktop UI. " +
224
+ "Use to alert the user about findings, completed tasks, or actions needing attention.",
225
+ annotations: { title: "Send Notification", readOnlyHint: false, destructiveHint: false, openWorldHint: false },
217
226
  inputSchema: {
218
227
  type: "object",
219
228
  properties: {
220
- title: { type: "string", description: "Notification title" },
229
+ title: { type: "string", description: "Notification title (short, descriptive)" },
221
230
  body: { type: "string", description: "Notification body (markdown supported)" },
222
- pipe_name: { type: "string", description: "Name of the pipe sending this notification" },
223
- timeout_secs: { type: "integer", description: "Auto-dismiss seconds (default 20)", default: 20 },
231
+ pipe_name: { type: "string", description: "Name of the pipe/tool sending this notification" },
232
+ timeout_secs: { type: "integer", description: "Auto-dismiss after N seconds (default 20). Use 0 for persistent.", default: 20 },
224
233
  actions: {
225
234
  type: "array",
226
- description: "Up to 5 action buttons",
235
+ description: "Up to 5 action buttons. Each needs id, label, type ('pipe'|'api'|'deeplink'|'dismiss').",
227
236
  items: {
228
237
  type: "object",
229
238
  properties: {
@@ -232,6 +241,7 @@ const TOOLS = [
232
241
  type: { type: "string", enum: ["pipe", "api", "deeplink", "dismiss"], description: "Action type" },
233
242
  pipe: { type: "string", description: "Pipe name to run (type=pipe)" },
234
243
  context: { type: "object", description: "Context passed to pipe (type=pipe)" },
244
+ open_in_chat: { type: "boolean", description: "Open pipe run in chat UI instead of background (type=pipe)" },
235
245
  url: { type: "string", description: "URL for api/deeplink actions" },
236
246
  },
237
247
  required: ["id", "label", "type"],
@@ -241,6 +251,162 @@ const TOOLS = [
241
251
  required: ["title", "pipe_name"],
242
252
  },
243
253
  },
254
+ {
255
+ name: "health-check",
256
+ description: "Check if screenpipe is running and healthy. Returns recording status, frame/audio stats, timestamps.",
257
+ annotations: { title: "Health Check", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
258
+ inputSchema: { type: "object", properties: {} },
259
+ },
260
+ {
261
+ name: "list-audio-devices",
262
+ description: "List available audio input/output devices for recording.",
263
+ annotations: { title: "List Audio Devices", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
264
+ inputSchema: { type: "object", properties: {} },
265
+ },
266
+ {
267
+ name: "list-monitors",
268
+ description: "List available monitors/screens for capture.",
269
+ annotations: { title: "List Monitors", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
270
+ inputSchema: { type: "object", properties: {} },
271
+ },
272
+ {
273
+ name: "add-tags",
274
+ description: "Add tags to a content item (vision frame or audio chunk) for organization and retrieval.",
275
+ annotations: { title: "Add Tags", readOnlyHint: false, destructiveHint: false, openWorldHint: false },
276
+ inputSchema: {
277
+ type: "object",
278
+ properties: {
279
+ content_type: { type: "string", enum: ["vision", "audio"], description: "Type of content to tag" },
280
+ id: { type: "integer", description: "Content item ID" },
281
+ tags: { type: "array", items: { type: "string" }, description: "Tags to add" },
282
+ },
283
+ required: ["content_type", "id", "tags"],
284
+ },
285
+ },
286
+ {
287
+ name: "search-speakers",
288
+ description: "Search for speakers by name prefix. Returns speaker ID, name, and metadata.",
289
+ annotations: { title: "Search Speakers", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
290
+ inputSchema: {
291
+ type: "object",
292
+ properties: {
293
+ name: { type: "string", description: "Speaker name prefix to search for (case-insensitive)" },
294
+ },
295
+ },
296
+ },
297
+ {
298
+ name: "list-unnamed-speakers",
299
+ description: "List speakers that haven't been named yet. Useful for speaker identification workflow.",
300
+ annotations: { title: "List Unnamed Speakers", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
301
+ inputSchema: {
302
+ type: "object",
303
+ properties: {
304
+ limit: { type: "integer", description: "Max results (default 10)", default: 10 },
305
+ offset: { type: "integer", description: "Pagination offset", default: 0 },
306
+ },
307
+ },
308
+ },
309
+ {
310
+ name: "update-speaker",
311
+ description: "Rename a speaker or update their metadata.",
312
+ annotations: { title: "Update Speaker", readOnlyHint: false, destructiveHint: false, openWorldHint: false },
313
+ inputSchema: {
314
+ type: "object",
315
+ properties: {
316
+ id: { type: "integer", description: "Speaker ID" },
317
+ name: { type: "string", description: "New speaker name" },
318
+ metadata: { type: "string", description: "JSON metadata string" },
319
+ },
320
+ required: ["id"],
321
+ },
322
+ },
323
+ {
324
+ name: "merge-speakers",
325
+ description: "Merge two speakers into one (e.g. when the same person was detected as different speakers).",
326
+ annotations: { title: "Merge Speakers", readOnlyHint: false, destructiveHint: true, openWorldHint: false },
327
+ inputSchema: {
328
+ type: "object",
329
+ properties: {
330
+ speaker_to_keep: { type: "integer", description: "Speaker ID to keep" },
331
+ speaker_to_merge: { type: "integer", description: "Speaker ID to merge into the kept one" },
332
+ },
333
+ required: ["speaker_to_keep", "speaker_to_merge"],
334
+ },
335
+ },
336
+ {
337
+ name: "start-meeting",
338
+ description: "Manually start a meeting recording session.",
339
+ annotations: { title: "Start Meeting", readOnlyHint: false, destructiveHint: false, openWorldHint: false },
340
+ inputSchema: {
341
+ type: "object",
342
+ properties: {
343
+ app: { type: "string", description: "App name (default 'manual')", default: "manual" },
344
+ title: { type: "string", description: "Meeting title" },
345
+ attendees: { type: "string", description: "Comma-separated attendee names" },
346
+ },
347
+ },
348
+ },
349
+ {
350
+ name: "stop-meeting",
351
+ description: "Stop the current manual meeting recording session.",
352
+ annotations: { title: "Stop Meeting", readOnlyHint: false, destructiveHint: false, openWorldHint: false },
353
+ inputSchema: { type: "object", properties: {} },
354
+ },
355
+ {
356
+ name: "get-meeting",
357
+ description: "Get details of a specific meeting by ID, including transcription and attendees.",
358
+ annotations: { title: "Get Meeting", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
359
+ inputSchema: {
360
+ type: "object",
361
+ properties: {
362
+ id: { type: "integer", description: "Meeting ID" },
363
+ },
364
+ required: ["id"],
365
+ },
366
+ },
367
+ {
368
+ name: "keyword-search",
369
+ description: "Fast keyword search using FTS index. Faster than search-content for exact keyword matching. " +
370
+ "Returns frame IDs and matched text.",
371
+ annotations: { title: "Keyword Search", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
372
+ inputSchema: {
373
+ type: "object",
374
+ properties: {
375
+ q: { type: "string", description: "Keyword search query" },
376
+ content_type: { type: "string", enum: ["ocr", "audio", "all"], description: "Content type filter", default: "all" },
377
+ start_time: { type: "string", description: "ISO 8601 UTC or relative" },
378
+ end_time: { type: "string", description: "ISO 8601 UTC or relative" },
379
+ app_name: { type: "string", description: "Filter by app name" },
380
+ limit: { type: "integer", description: "Max results (default 20)", default: 20 },
381
+ offset: { type: "integer", description: "Pagination offset", default: 0 },
382
+ },
383
+ required: ["q"],
384
+ },
385
+ },
386
+ {
387
+ name: "get-frame-elements",
388
+ description: "Get all UI elements for a specific frame. More targeted than search-elements when you already have a frame_id.",
389
+ annotations: { title: "Get Frame Elements", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
390
+ inputSchema: {
391
+ type: "object",
392
+ properties: {
393
+ frame_id: { type: "integer", description: "Frame ID" },
394
+ },
395
+ required: ["frame_id"],
396
+ },
397
+ },
398
+ {
399
+ name: "control-recording",
400
+ description: "Start or stop audio/screen recording. Use to pause/resume capture.",
401
+ annotations: { title: "Control Recording", readOnlyHint: false, destructiveHint: false, openWorldHint: false },
402
+ inputSchema: {
403
+ type: "object",
404
+ properties: {
405
+ action: { type: "string", enum: ["start-audio", "stop-audio"], description: "Recording action" },
406
+ },
407
+ required: ["action"],
408
+ },
409
+ },
244
410
  ];
245
411
  server.setRequestHandler(types_js_1.ListToolsRequestSchema, async () => {
246
412
  return { tools: TOOLS };
@@ -255,6 +421,12 @@ const RESOURCES = [
255
421
  description: "Current date/time, timezone, and pre-computed timestamps for common time ranges",
256
422
  mimeType: "application/json",
257
423
  },
424
+ {
425
+ uri: "screenpipe://guide",
426
+ name: "Usage Guide",
427
+ description: "How to use screenpipe tools effectively — search strategy, progressive disclosure, and common patterns",
428
+ mimeType: "text/markdown",
429
+ },
258
430
  ];
259
431
  server.setRequestHandler(types_js_1.ListResourcesRequestSchema, async () => {
260
432
  return { resources: RESOURCES };
@@ -291,6 +463,51 @@ server.setRequestHandler(types_js_1.ReadResourceRequestSchema, async (request) =
291
463
  ],
292
464
  };
293
465
  }
466
+ if (uri === "screenpipe://guide") {
467
+ return {
468
+ contents: [
469
+ {
470
+ uri,
471
+ mimeType: "text/markdown",
472
+ text: `# Screenpipe Usage Guide
473
+
474
+ ## Progressive Disclosure — start light, escalate only when needed
475
+
476
+ | Step | Tool | When to use |
477
+ |------|------|-------------|
478
+ | 1 | activity-summary | Broad questions: "what was I doing?", "which apps?", "how long on X?" |
479
+ | 2 | search-content | Need specific text, transcriptions, or content |
480
+ | 3 | search-elements | Need UI structure — buttons, links, form fields |
481
+ | 4 | frame-context | Need full detail for a specific moment (use frame_id from step 2) |
482
+
483
+ ## Search Strategy
484
+
485
+ - **Always provide start_time** — without it, search scans the entire history
486
+ - **Start with limit=5** — increase only if you need more results
487
+ - **Use max_content_length=500** to keep responses compact
488
+ - **Don't use q for audio** — transcriptions are noisy, q filters too aggressively. Search audio by time range and speaker instead
489
+ - **app_name is case-sensitive** — use exact names: "Google Chrome" not "chrome"
490
+ - **content_type=accessibility is preferred** for screen text (OS-native). ocr is fallback for apps without accessibility support
491
+
492
+ ## Common Patterns
493
+
494
+ - "What was I doing for the last 2 hours?" → activity-summary with start_time='2h ago'
495
+ - "What did I discuss in my meeting?" → list-meetings to find it, then search-content with audio + that time range
496
+ - "Find when I was on Twitter" → search-content with app_name='Arc' (or the browser name), q='twitter'
497
+ - "Remember that I prefer X" → update-memory with content describing the preference
498
+ - "What do you remember about X?" → search-content with content_type='memory', q='X'
499
+
500
+ ## Deep Links
501
+
502
+ When referencing specific moments in results, create clickable links:
503
+ - Frame: [10:30 AM — Chrome](screenpipe://frame/{frame_id}) — use frame_id from search results
504
+ - Timeline: [meeting at 3pm](screenpipe://timeline?timestamp=2024-01-15T15:00:00Z) — use exact timestamp from results
505
+ Never fabricate IDs or timestamps — only use values from actual results.
506
+ `,
507
+ },
508
+ ],
509
+ };
510
+ }
294
511
  throw new Error(`Unknown resource: ${uri}`);
295
512
  });
296
513
  // ---------------------------------------------------------------------------
@@ -701,23 +918,246 @@ server.setRequestHandler(types_js_1.CallToolRequestSchema, async (request) => {
701
918
  case "send-notification": {
702
919
  const notifBody = {
703
920
  title: args.title,
704
- pipe_name: args.pipe_name,
921
+ body: args.body || "",
922
+ type: "pipe",
705
923
  };
706
- if (args.body)
707
- notifBody.body = args.body;
708
924
  if (args.timeout_secs)
709
- notifBody.timeout_secs = args.timeout_secs;
925
+ notifBody.timeout = Number(args.timeout_secs) * 1000;
710
926
  if (args.actions)
711
927
  notifBody.actions = args.actions;
712
- const notifResponse = await fetchAPI("/notify", {
928
+ const notifResponse = await fetch("http://localhost:11435/notify", {
713
929
  method: "POST",
930
+ headers: { "Content-Type": "application/json" },
714
931
  body: JSON.stringify(notifBody),
715
932
  });
716
933
  if (!notifResponse.ok)
717
934
  throw new Error(`HTTP error: ${notifResponse.status}`);
718
935
  const notifResult = await notifResponse.json();
719
936
  return {
720
- content: [{ type: "text", text: `Notification sent (id: ${notifResult.id})` }],
937
+ content: [{ type: "text", text: `Notification sent: ${notifResult.message}` }],
938
+ };
939
+ }
940
+ case "health-check": {
941
+ const response = await fetchAPI("/health");
942
+ if (!response.ok)
943
+ throw new Error(`HTTP error: ${response.status}`);
944
+ const data = await response.json();
945
+ return {
946
+ content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
947
+ };
948
+ }
949
+ case "list-audio-devices": {
950
+ const response = await fetchAPI("/audio/list");
951
+ if (!response.ok)
952
+ throw new Error(`HTTP error: ${response.status}`);
953
+ const devices = await response.json();
954
+ if (!Array.isArray(devices) || devices.length === 0) {
955
+ return { content: [{ type: "text", text: "No audio devices found." }] };
956
+ }
957
+ const formatted = devices.map((d) => `${d.is_default ? "* " : " "}${d.name}${d.device_type ? ` (${d.device_type})` : ""}`);
958
+ return {
959
+ content: [{ type: "text", text: `Audio devices:\n${formatted.join("\n")}` }],
960
+ };
961
+ }
962
+ case "list-monitors": {
963
+ const response = await fetchAPI("/vision/list");
964
+ if (!response.ok)
965
+ throw new Error(`HTTP error: ${response.status}`);
966
+ const monitors = await response.json();
967
+ if (!Array.isArray(monitors) || monitors.length === 0) {
968
+ return { content: [{ type: "text", text: "No monitors found." }] };
969
+ }
970
+ const formatted = monitors.map((m) => `${m.is_default ? "* " : " "}Monitor ${m.id}${m.name ? `: ${m.name}` : ""}${m.width ? ` (${m.width}x${m.height})` : ""}`);
971
+ return {
972
+ content: [{ type: "text", text: `Monitors:\n${formatted.join("\n")}` }],
973
+ };
974
+ }
975
+ case "add-tags": {
976
+ const contentType = args.content_type;
977
+ const id = args.id;
978
+ const tags = args.tags;
979
+ if (!contentType || !id || !tags) {
980
+ return { content: [{ type: "text", text: "Error: content_type, id, and tags are required" }] };
981
+ }
982
+ const response = await fetchAPI(`/tags/${contentType}/${id}`, {
983
+ method: "POST",
984
+ body: JSON.stringify({ tags }),
985
+ });
986
+ if (!response.ok)
987
+ throw new Error(`HTTP error: ${response.status}`);
988
+ return {
989
+ content: [{ type: "text", text: `Tags added to ${contentType}/${id}: ${tags.join(", ")}` }],
990
+ };
991
+ }
992
+ case "search-speakers": {
993
+ const nameQuery = args.name;
994
+ if (!nameQuery) {
995
+ return { content: [{ type: "text", text: "Error: name is required" }] };
996
+ }
997
+ const response = await fetchAPI(`/speakers/search?name=${encodeURIComponent(nameQuery)}`);
998
+ if (!response.ok)
999
+ throw new Error(`HTTP error: ${response.status}`);
1000
+ const speakers = await response.json();
1001
+ if (!Array.isArray(speakers) || speakers.length === 0) {
1002
+ return { content: [{ type: "text", text: "No speakers found." }] };
1003
+ }
1004
+ const formatted = speakers.map((s) => `#${s.id} ${s.name}${s.metadata ? ` — ${s.metadata}` : ""}`);
1005
+ return {
1006
+ content: [{ type: "text", text: `Speakers:\n${formatted.join("\n")}` }],
1007
+ };
1008
+ }
1009
+ case "list-unnamed-speakers": {
1010
+ const limit = args.limit || 10;
1011
+ const offset = args.offset || 0;
1012
+ const response = await fetchAPI(`/speakers/unnamed?limit=${limit}&offset=${offset}`);
1013
+ if (!response.ok)
1014
+ throw new Error(`HTTP error: ${response.status}`);
1015
+ const speakers = await response.json();
1016
+ if (!Array.isArray(speakers) || speakers.length === 0) {
1017
+ return { content: [{ type: "text", text: "No unnamed speakers found." }] };
1018
+ }
1019
+ const formatted = speakers.map((s) => `#${s.id} ${s.name}`);
1020
+ return {
1021
+ content: [{ type: "text", text: `Unnamed speakers:\n${formatted.join("\n")}` }],
1022
+ };
1023
+ }
1024
+ case "update-speaker": {
1025
+ const speakerId = args.id;
1026
+ if (!speakerId) {
1027
+ return { content: [{ type: "text", text: "Error: id is required" }] };
1028
+ }
1029
+ const body = { id: speakerId };
1030
+ if (args.name !== undefined)
1031
+ body.name = args.name;
1032
+ if (args.metadata !== undefined)
1033
+ body.metadata = args.metadata;
1034
+ const response = await fetchAPI("/speakers/update", {
1035
+ method: "POST",
1036
+ body: JSON.stringify(body),
1037
+ });
1038
+ if (!response.ok)
1039
+ throw new Error(`HTTP error: ${response.status}`);
1040
+ return {
1041
+ content: [{ type: "text", text: `Speaker ${speakerId} updated.` }],
1042
+ };
1043
+ }
1044
+ case "merge-speakers": {
1045
+ const keepId = args.speaker_to_keep;
1046
+ const mergeId = args.speaker_to_merge;
1047
+ if (!keepId || !mergeId) {
1048
+ return { content: [{ type: "text", text: "Error: speaker_to_keep and speaker_to_merge are required" }] };
1049
+ }
1050
+ const response = await fetchAPI("/speakers/merge", {
1051
+ method: "POST",
1052
+ body: JSON.stringify({ speaker_to_keep: keepId, speaker_to_merge: mergeId }),
1053
+ });
1054
+ if (!response.ok)
1055
+ throw new Error(`HTTP error: ${response.status}`);
1056
+ return {
1057
+ content: [{ type: "text", text: `Merged speaker ${mergeId} into ${keepId}.` }],
1058
+ };
1059
+ }
1060
+ case "start-meeting": {
1061
+ const body = {};
1062
+ if (args.app)
1063
+ body.app = args.app;
1064
+ if (args.title)
1065
+ body.title = args.title;
1066
+ if (args.attendees)
1067
+ body.attendees = args.attendees;
1068
+ const response = await fetchAPI("/meetings/start", {
1069
+ method: "POST",
1070
+ body: JSON.stringify(body),
1071
+ });
1072
+ if (!response.ok)
1073
+ throw new Error(`HTTP error: ${response.status}`);
1074
+ const meeting = await response.json();
1075
+ return {
1076
+ content: [{ type: "text", text: `Meeting started (id: ${meeting.id || "ok"}).` }],
1077
+ };
1078
+ }
1079
+ case "stop-meeting": {
1080
+ const response = await fetchAPI("/meetings/stop", { method: "POST" });
1081
+ if (!response.ok)
1082
+ throw new Error(`HTTP error: ${response.status}`);
1083
+ return {
1084
+ content: [{ type: "text", text: "Meeting stopped." }],
1085
+ };
1086
+ }
1087
+ case "get-meeting": {
1088
+ const meetingId = args.id;
1089
+ if (!meetingId) {
1090
+ return { content: [{ type: "text", text: "Error: id is required" }] };
1091
+ }
1092
+ const response = await fetchAPI(`/meetings/${meetingId}`);
1093
+ if (!response.ok)
1094
+ throw new Error(`HTTP error: ${response.status}`);
1095
+ const meeting = await response.json();
1096
+ return {
1097
+ content: [{ type: "text", text: JSON.stringify(meeting, null, 2) }],
1098
+ };
1099
+ }
1100
+ case "keyword-search": {
1101
+ const params = new URLSearchParams();
1102
+ for (const [key, value] of Object.entries(args)) {
1103
+ if (value !== null && value !== undefined) {
1104
+ params.append(key, String(value));
1105
+ }
1106
+ }
1107
+ const response = await fetchAPI(`/search/keyword?${params.toString()}`);
1108
+ if (!response.ok)
1109
+ throw new Error(`HTTP error: ${response.status}`);
1110
+ const data = await response.json();
1111
+ const results = data.data || [];
1112
+ if (results.length === 0) {
1113
+ return { content: [{ type: "text", text: "No keyword search results found." }] };
1114
+ }
1115
+ const formatted = results.map((r) => {
1116
+ const content = r.content;
1117
+ return `[${r.type}] ${content?.app_name || "?"} | ${content?.timestamp || ""}\n${content?.text || content?.transcription || ""}`;
1118
+ });
1119
+ return {
1120
+ content: [{ type: "text", text: `Results: ${results.length}\n\n${formatted.join("\n---\n")}` }],
1121
+ };
1122
+ }
1123
+ case "get-frame-elements": {
1124
+ const frameId = args.frame_id;
1125
+ if (!frameId) {
1126
+ return { content: [{ type: "text", text: "Error: frame_id is required" }] };
1127
+ }
1128
+ const response = await fetchAPI(`/frames/${frameId}/elements`);
1129
+ if (!response.ok)
1130
+ throw new Error(`HTTP error: ${response.status}`);
1131
+ const elements = await response.json();
1132
+ if (!Array.isArray(elements) || elements.length === 0) {
1133
+ return { content: [{ type: "text", text: `No elements found for frame ${frameId}.` }] };
1134
+ }
1135
+ const formatted = elements.map((e) => {
1136
+ const indent = " ".repeat(Math.min(e.depth, 5));
1137
+ return `${indent}[${e.source}:${e.role}] ${e.text || "(no text)"}`;
1138
+ });
1139
+ return {
1140
+ content: [{ type: "text", text: `Frame ${frameId} elements (${elements.length}):\n${formatted.join("\n")}` }],
1141
+ };
1142
+ }
1143
+ case "control-recording": {
1144
+ const action = args.action;
1145
+ if (!action) {
1146
+ return { content: [{ type: "text", text: "Error: action is required" }] };
1147
+ }
1148
+ let endpoint;
1149
+ if (action === "start-audio")
1150
+ endpoint = "/audio/start";
1151
+ else if (action === "stop-audio")
1152
+ endpoint = "/audio/stop";
1153
+ else {
1154
+ return { content: [{ type: "text", text: `Error: unknown action '${action}'` }] };
1155
+ }
1156
+ const response = await fetchAPI(endpoint, { method: "POST" });
1157
+ if (!response.ok)
1158
+ throw new Error(`HTTP error: ${response.status}`);
1159
+ return {
1160
+ content: [{ type: "text", text: `Recording action '${action}' executed.` }],
721
1161
  };
722
1162
  }
723
1163
  default: