screenpipe-mcp 0.9.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.js +452 -1329
  2. package/package.json +1 -1
  3. package/src/index.ts +514 -1413
package/src/index.ts CHANGED
@@ -8,8 +8,6 @@ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"
8
8
  import {
9
9
  CallToolRequestSchema,
10
10
  ListToolsRequestSchema,
11
- ListPromptsRequestSchema,
12
- GetPromptRequestSchema,
13
11
  ListResourcesRequestSchema,
14
12
  ReadResourceRequestSchema,
15
13
  Tool,
@@ -19,20 +17,6 @@ import * as fs from "fs";
19
17
  import * as path from "path";
20
18
  import * as os from "os";
21
19
 
22
- // Helper to get current date in ISO format
23
- function getCurrentDateInfo(): { isoDate: string; localDate: string } {
24
- const now = new Date();
25
- return {
26
- isoDate: now.toISOString(),
27
- localDate: now.toLocaleDateString("en-US", {
28
- weekday: "long",
29
- year: "numeric",
30
- month: "long",
31
- day: "numeric",
32
- }),
33
- };
34
- }
35
-
36
20
  // Parse command line arguments
37
21
  const args = process.argv.slice(2);
38
22
  let port = 3030;
@@ -48,1234 +32,330 @@ const SCREENPIPE_API = `http://localhost:${port}`;
48
32
  const server = new Server(
49
33
  {
50
34
  name: "screenpipe",
51
- version: "0.8.5",
35
+ version: "0.9.0",
52
36
  },
53
37
  {
54
38
  capabilities: {
55
39
  tools: {},
56
- prompts: {},
57
40
  resources: {},
58
41
  },
59
42
  }
60
43
  );
61
44
 
62
- // Tool definitions
63
- const BASE_TOOLS: Tool[] = [
45
+ // ---------------------------------------------------------------------------
46
+ // Tools
47
+ // ---------------------------------------------------------------------------
48
+ const TOOLS: Tool[] = [
64
49
  {
65
50
  name: "search-content",
66
51
  description:
67
- "Search screenpipe's recorded content: screen text (accessibility APIs, with OCR fallback), audio transcriptions, and UI elements. " +
52
+ "Search screen text, audio transcriptions, input events, and memories. " +
68
53
  "Returns timestamped results with app context. " +
69
- "Call with no parameters to get recent activity. " +
70
- "Use the 'screenpipe://context' resource for current time when building time-based queries.\n\n" +
71
- "WHEN TO USE WHICH content_type:\n" +
72
- "- For meetings/calls/conversations: content_type='audio', do NOT use q param (transcriptions are noisy, q filters too aggressively)\n" +
73
- "- For screen text/reading: content_type='all' or 'accessibility'\n" +
74
- "- For time spent/app usage questions: use activity-summary tool instead (this tool returns content, not time stats)\n\n" +
75
- "SEARCH STRATEGY: First search with ONLY time params (start_time/end_time) — no q, no app_name, no content_type. " +
76
- "This gives ground truth of what's recorded. Scan results to find correct app_name values, then narrow with filters using exact observed values. " +
77
- "App names are case-sensitive (e.g. 'Discord' vs 'Discord.exe'). " +
78
- "The q param searches captured text, NOT app names. NEVER report 'no data' after one filtered search — verify with unfiltered time-only search first.\n\n" +
79
- "DEEP LINKS: When referencing specific moments, create clickable links using IDs from search results:\n" +
80
- "- OCR results (PREFERRED): [10:30 AM — Chrome](screenpipe://frame/12345) — use content.frame_id from the result\n" +
81
- "- Audio results: [meeting at 3pm](screenpipe://timeline?timestamp=2024-01-15T15:00:00Z) — use exact timestamp from result\n" +
82
- "NEVER fabricate frame IDs or timestamps — only use values from actual search results.",
83
- annotations: {
84
- title: "Search Content",
85
- readOnlyHint: true,
86
- },
54
+ "IMPORTANT: prefer activity-summary for broad questions ('what was I doing?'). " +
55
+ "Use search-content only when you need specific text/content. " +
56
+ "Start with limit=5, increase only if needed. Results can be large — use max_content_length=500 to truncate.",
57
+ annotations: { title: "Search Content", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
87
58
  inputSchema: {
88
59
  type: "object",
89
60
  properties: {
90
61
  q: {
91
62
  type: "string",
92
- description: "Search query (full-text search on captured text). Optional - omit to return all content in time range. IMPORTANT: Do NOT use q for audio/meeting searches — transcriptions are noisy and q filters too aggressively. Only use q when searching for specific text the user saw on screen.",
63
+ description: "Full-text search query. Omit to return all content in time range. Avoid for audio — transcriptions are noisy, q filters too aggressively.",
93
64
  },
94
65
  content_type: {
95
66
  type: "string",
96
- enum: ["all", "ocr", "audio", "input", "accessibility"],
97
- description: "Content type filter: 'audio' (transcriptions — use for meetings/calls/conversations), 'accessibility' (accessibility tree text, preferred for screen content), 'ocr' (screen text via OCR, legacy fallback), 'input' (clicks, keystrokes, clipboard, app switches), 'all'. Default: 'all'. For meeting/call queries, ALWAYS use 'audio'.",
67
+ enum: ["all", "ocr", "audio", "input", "accessibility", "memory"],
68
+ description: "Filter by content type. 'accessibility' is preferred for screen text (OS-native). 'ocr' is fallback for apps without accessibility support. Default: 'all'.",
98
69
  default: "all",
99
70
  },
100
- limit: {
101
- type: "integer",
102
- description: "Max results. Default: 10",
103
- default: 10,
104
- },
105
- offset: {
106
- type: "integer",
107
- description: "Skip N results for pagination. Default: 0",
108
- default: 0,
109
- },
71
+ limit: { type: "integer", description: "Max results (default 10, max 20). Start with 5 for exploration.", default: 10 },
72
+ offset: { type: "integer", description: "Pagination offset. Use when results say 'use offset=N for more'.", default: 0 },
110
73
  start_time: {
111
74
  type: "string",
112
- format: "date-time",
113
- description: "Start time: ISO 8601 UTC (e.g., 2024-01-15T10:00:00Z) or relative (e.g., '16h ago', '2d ago', 'now')",
75
+ description: "ISO 8601 UTC or relative (e.g. '2h ago', '1d ago'). Always provide to avoid scanning entire history.",
114
76
  },
115
77
  end_time: {
116
78
  type: "string",
117
- format: "date-time",
118
- description: "End time: ISO 8601 UTC (e.g., 2024-01-15T18:00:00Z) or relative (e.g., 'now', '1h ago')",
119
- },
120
- app_name: {
121
- type: "string",
122
- description: "Filter by app (e.g., 'Google Chrome', 'Slack', 'zoom.us')",
123
- },
124
- window_name: {
125
- type: "string",
126
- description: "Filter by window title",
127
- },
128
- min_length: {
129
- type: "integer",
130
- description: "Minimum content length in characters",
131
- },
132
- max_length: {
133
- type: "integer",
134
- description: "Maximum content length in characters",
79
+ description: "ISO 8601 UTC or relative (e.g. 'now'). Defaults to now.",
135
80
  },
81
+ app_name: { type: "string", description: "Filter by app name (e.g. 'Google Chrome', 'Slack', 'zoom.us'). Case-sensitive." },
82
+ window_name: { type: "string", description: "Filter by window title substring" },
83
+ min_length: { type: "integer", description: "Min content length in characters" },
84
+ max_length: { type: "integer", description: "Max content length in characters" },
136
85
  include_frames: {
137
86
  type: "boolean",
138
- description: "Include base64 screenshots (OCR only). Default: false",
87
+ description: "Include base64 screenshots (OCR only). Warning: large response.",
139
88
  default: false,
140
89
  },
141
- speaker_ids: {
142
- type: "string",
143
- description: "Comma-separated speaker IDs to filter audio results (e.g., '1,2,3')",
144
- },
145
- speaker_name: {
146
- type: "string",
147
- description: "Filter audio by speaker name (case-insensitive partial match)",
148
- },
90
+ speaker_ids: { type: "string", description: "Comma-separated speaker IDs to filter audio" },
91
+ speaker_name: { type: "string", description: "Filter audio by speaker name (case-insensitive partial match)" },
149
92
  max_content_length: {
150
93
  type: "integer",
151
- description: "Truncate each result's text/transcription to this many characters using middle-truncation (keeps first half + last half). Useful for limiting token usage with small-context models.",
94
+ description: "Truncate each result's text via middle-truncation. Use 200-500 to keep responses compact.",
152
95
  },
153
96
  },
154
97
  },
155
98
  },
156
99
  {
157
- name: "export-video",
100
+ name: "list-meetings",
158
101
  description:
159
- "Export a video of screen recordings for a specific time range. " +
160
- "Creates an MP4 video from the recorded frames between the start and end times.\n\n" +
161
- "IMPORTANT: Use ISO 8601 UTC timestamps (e.g., 2024-01-15T10:00:00Z) or relative times (e.g., '16h ago', 'now')\n\n" +
162
- "EXAMPLES:\n" +
163
- "- Last 30 minutes: Calculate timestamps from current time\n" +
164
- "- Specific meeting: Use the meeting's start and end times in UTC",
165
- annotations: {
166
- title: "Export Video",
167
- destructiveHint: true,
102
+ "List detected meetings (Zoom, Teams, Meet, etc.) with duration, app, and attendees. " +
103
+ "Only available when screenpipe runs in smart transcription mode.",
104
+ annotations: { title: "List Meetings", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
105
+ inputSchema: {
106
+ type: "object",
107
+ properties: {
108
+ start_time: { type: "string", description: "ISO 8601 UTC or relative (e.g. '1d ago')" },
109
+ end_time: { type: "string", description: "ISO 8601 UTC or relative" },
110
+ limit: { type: "integer", description: "Max results (default 20)", default: 20 },
111
+ offset: { type: "integer", description: "Pagination offset", default: 0 },
112
+ },
168
113
  },
114
+ },
115
+ {
116
+ name: "activity-summary",
117
+ description:
118
+ "Lightweight activity overview (~200-500 tokens): app usage with active minutes, audio speakers, recent texts. " +
119
+ "USE THIS FIRST for broad questions: 'what was I doing?', 'how long on X?', 'which apps?'. " +
120
+ "Only escalate to search-content if you need specific text content.",
121
+ annotations: { title: "Activity Summary", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
169
122
  inputSchema: {
170
123
  type: "object",
171
124
  properties: {
172
- start_time: {
173
- type: "string",
174
- format: "date-time",
175
- description:
176
- "Start time: ISO 8601 UTC (e.g., '2024-01-15T10:00:00Z') or relative (e.g., '16h ago', 'now')",
177
- },
178
- end_time: {
179
- type: "string",
180
- format: "date-time",
181
- description:
182
- "End time: ISO 8601 UTC (e.g., '2024-01-15T10:30:00Z') or relative (e.g., 'now', '1h ago')",
183
- },
184
- fps: {
185
- type: "number",
186
- description:
187
- "Frames per second for the output video. Lower values (0.5-1.0) create smaller files, higher values (5-10) create smoother playback. Default: 1.0",
188
- default: 1.0,
189
- },
125
+ start_time: { type: "string", description: "ISO 8601 UTC or relative (e.g. '3h ago')" },
126
+ end_time: { type: "string", description: "ISO 8601 UTC or relative (e.g. 'now')" },
127
+ app_name: { type: "string", description: "Optional app name filter to focus on one app" },
190
128
  },
191
129
  required: ["start_time", "end_time"],
192
130
  },
193
131
  },
194
132
  {
195
- name: "list-meetings",
133
+ name: "search-elements",
196
134
  description:
197
- "List detected meetings with duration, app, and attendees. " +
198
- "Returns meetings detected via app focus (Zoom, Meet, Teams) and audio. " +
199
- "Only available when screenpipe runs in smart transcription mode.",
200
- annotations: {
201
- title: "List Meetings",
202
- readOnlyHint: true,
203
- },
135
+ "Search UI elements (buttons, links, text fields) from the accessibility tree. " +
136
+ "Lighter than search-content for targeted UI lookups. " +
137
+ "Use when you need to find specific UI controls or page structure, not general content.",
138
+ annotations: { title: "Search Elements", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
204
139
  inputSchema: {
205
140
  type: "object",
206
141
  properties: {
207
- start_time: {
208
- type: "string",
209
- format: "date-time",
210
- description: "Start filter: ISO 8601 UTC (e.g., 2024-01-15T10:00:00Z) or relative (e.g., '16h ago', 'now')",
211
- },
212
- end_time: {
142
+ q: { type: "string", description: "Full-text search on element text" },
143
+ frame_id: { type: "integer", description: "Filter to specific frame ID from search results" },
144
+ source: {
213
145
  type: "string",
214
- format: "date-time",
215
- description: "End filter: ISO 8601 UTC (e.g., 2024-01-15T18:00:00Z) or relative (e.g., 'now', '1h ago')",
216
- },
217
- limit: {
218
- type: "integer",
219
- description: "Max results. Default: 20",
220
- default: 20,
221
- },
222
- offset: {
223
- type: "integer",
224
- description: "Skip N results for pagination. Default: 0",
225
- default: 0,
226
- },
146
+ enum: ["accessibility", "ocr"],
147
+ description: "Element source. 'accessibility' is preferred (OS-native tree). 'ocr' for apps without a11y.",
148
+ },
149
+ role: { type: "string", description: "Element role filter (e.g. 'AXButton', 'AXLink', 'AXTextField')" },
150
+ start_time: { type: "string", description: "ISO 8601 UTC or relative" },
151
+ end_time: { type: "string", description: "ISO 8601 UTC or relative" },
152
+ app_name: { type: "string", description: "Filter by app name" },
153
+ limit: { type: "integer", description: "Max results (default 50). Start with 10-20.", default: 50 },
154
+ offset: { type: "integer", description: "Pagination offset", default: 0 },
227
155
  },
228
156
  },
229
157
  },
230
158
  {
231
- name: "activity-summary",
159
+ name: "frame-context",
232
160
  description:
233
- "Get a lightweight compressed activity overview for a time range (~200-500 tokens). " +
234
- "Returns app usage (name, frame count, active minutes, first/last seen), recent accessibility texts, and audio speaker summary. " +
235
- "Minutes are based on active session time (consecutive frames with gaps < 5min count as active). " +
236
- "first_seen/last_seen show the wall-clock span per app.\n\n" +
237
- "USE THIS TOOL (not search-content or raw SQL) for:\n" +
238
- "- 'how long did I spend on X?' → active_minutes per app\n" +
239
- "- 'which apps did I use today?' app list sorted by active_minutes\n" +
240
- "- 'what was I doing?' → broad overview before drilling deeper\n" +
241
- "- Any time-spent or app-usage question\n\n" +
242
- "WARNING: Do NOT estimate time from raw frame counts or SQL queries — those are inaccurate. " +
243
- "This endpoint calculates actual active session time correctly.",
244
- annotations: {
245
- title: "Activity Summary",
246
- readOnlyHint: true,
161
+ "Get full accessibility text, parsed tree nodes, and URLs for a specific frame ID. " +
162
+ "Use after search-content to get detailed context for a specific moment.",
163
+ annotations: { title: "Frame Context", readOnlyHint: true, openWorldHint: false, idempotentHint: true },
164
+ inputSchema: {
165
+ type: "object",
166
+ properties: {
167
+ frame_id: { type: "integer", description: "Frame ID from search-content results (content.frame_id field)" },
168
+ },
169
+ required: ["frame_id"],
247
170
  },
171
+ },
172
+ {
173
+ name: "export-video",
174
+ description:
175
+ "Export an MP4 video of screen recordings for a time range. " +
176
+ "Returns the file path. Can take a few minutes for long ranges.",
177
+ annotations: { title: "Export Video", readOnlyHint: false, destructiveHint: false, openWorldHint: false },
248
178
  inputSchema: {
249
179
  type: "object",
250
180
  properties: {
251
- start_time: {
252
- type: "string",
253
- format: "date-time",
254
- description: "Start of time range: ISO 8601 UTC (e.g., 2024-01-15T10:00:00Z) or relative (e.g., '16h ago', 'now')",
255
- },
256
- end_time: {
257
- type: "string",
258
- format: "date-time",
259
- description: "End of time range: ISO 8601 UTC (e.g., 2024-01-15T18:00:00Z) or relative (e.g., 'now', '1h ago')",
260
- },
261
- app_name: {
262
- type: "string",
263
- description: "Optional app name filter (e.g., 'Google Chrome', 'VS Code')",
264
- },
181
+ start_time: { type: "string", description: "ISO 8601 UTC or relative" },
182
+ end_time: { type: "string", description: "ISO 8601 UTC or relative" },
183
+ fps: { type: "number", description: "Output FPS (default 1.0). Higher = smoother but larger file.", default: 1.0 },
265
184
  },
266
185
  required: ["start_time", "end_time"],
267
186
  },
268
187
  },
269
188
  {
270
- name: "search-elements",
189
+ name: "update-memory",
271
190
  description:
272
- "Search structured UI elements (accessibility tree nodes and OCR text blocks). " +
273
- "Returns ~100-500 bytes per element — much lighter than search-content for targeted lookups. " +
274
- "Each element has: id, frame_id, source (accessibility/ocr), role (AXButton, AXStaticText, AXLink, etc.), text, bounds, depth.\n\n" +
275
- "Use for: finding specific buttons, links, text fields, or UI components. " +
276
- "Prefer this over search-content when you need structural UI detail rather than full screen text.",
277
- annotations: {
278
- title: "Search Elements",
279
- readOnlyHint: true,
280
- },
191
+ "Create, update, or delete a persistent memory (facts, preferences, decisions the user wants to remember). " +
192
+ "To retrieve memories, use search-content with content_type='memory'. " +
193
+ "To create: provide content + tags. To update: provide id + fields to change. To delete: provide id + delete=true.",
194
+ annotations: { title: "Update Memory", readOnlyHint: false, destructiveHint: false, openWorldHint: false, idempotentHint: true },
281
195
  inputSchema: {
282
196
  type: "object",
283
197
  properties: {
284
- q: {
285
- type: "string",
286
- description: "Full-text search query across element text. Optional.",
287
- },
288
- frame_id: {
289
- type: "integer",
290
- description: "Filter to elements from a specific frame",
291
- },
292
- source: {
293
- type: "string",
294
- enum: ["accessibility", "ocr"],
295
- description: "Filter by element source: 'accessibility' (structured tree) or 'ocr' (text blocks)",
296
- },
297
- role: {
298
- type: "string",
299
- description: "Filter by element role (e.g., 'AXButton', 'AXStaticText', 'AXLink', 'AXTextField', 'line')",
300
- },
301
- start_time: {
302
- type: "string",
303
- format: "date-time",
304
- description: "Start time: ISO 8601 UTC or relative (e.g., '16h ago', 'now')",
305
- },
306
- end_time: {
307
- type: "string",
308
- format: "date-time",
309
- description: "End time: ISO 8601 UTC or relative (e.g., 'now', '1h ago')",
310
- },
311
- app_name: {
312
- type: "string",
313
- description: "Filter by app name",
314
- },
315
- limit: {
316
- type: "integer",
317
- description: "Max results. Default: 50",
318
- default: 50,
319
- },
320
- offset: {
321
- type: "integer",
322
- description: "Skip N results for pagination. Default: 0",
323
- default: 0,
324
- },
198
+ id: { type: "integer", description: "Memory ID — omit to create new, provide to update/delete" },
199
+ content: { type: "string", description: "Memory text (required for creation)" },
200
+ tags: { type: "array", items: { type: "string" }, description: "Categorization tags (e.g. ['work', 'project-x'])" },
201
+ importance: { type: "number", description: "0.0 (trivial) to 1.0 (critical). Default 0.5." },
202
+ source_context: { type: "object", description: "Optional metadata linking to source (app, timestamp, etc.)" },
203
+ delete: { type: "boolean", description: "Set true to delete the memory identified by id" },
325
204
  },
326
205
  },
327
206
  },
328
207
  {
329
- name: "frame-context",
208
+ name: "send-notification",
330
209
  description:
331
- "Get accessibility text, parsed tree nodes, and extracted URLs for a specific frame. " +
332
- "Falls back to OCR data for legacy frames without accessibility data. " +
333
- "Use after finding a frame_id from search-content or search-elements to get full structural detail and URLs.",
334
- annotations: {
335
- title: "Frame Context",
336
- readOnlyHint: true,
337
- },
210
+ "Send a notification to the screenpipe desktop UI. " +
211
+ "Use to alert the user about findings, completed tasks, or actions needing attention.",
212
+ annotations: { title: "Send Notification", readOnlyHint: false, destructiveHint: false, openWorldHint: false },
338
213
  inputSchema: {
339
214
  type: "object",
340
215
  properties: {
341
- frame_id: {
342
- type: "integer",
343
- description: "The frame ID to get context for (from search results)",
216
+ title: { type: "string", description: "Notification title (short, descriptive)" },
217
+ body: { type: "string", description: "Notification body (markdown supported)" },
218
+ pipe_name: { type: "string", description: "Name of the pipe/tool sending this notification" },
219
+ timeout_secs: { type: "integer", description: "Auto-dismiss after N seconds (default 20). Use 0 for persistent.", default: 20 },
220
+ actions: {
221
+ type: "array",
222
+ description: "Up to 5 action buttons. Each needs id, label, type ('pipe'|'api'|'deeplink'|'dismiss').",
223
+ items: {
224
+ type: "object",
225
+ properties: {
226
+ id: { type: "string", description: "Unique action ID" },
227
+ label: { type: "string", description: "Button label" },
228
+ type: { type: "string", enum: ["pipe", "api", "deeplink", "dismiss"], description: "Action type" },
229
+ pipe: { type: "string", description: "Pipe name to run (type=pipe)" },
230
+ context: { type: "object", description: "Context passed to pipe (type=pipe)" },
231
+ open_in_chat: { type: "boolean", description: "Open pipe run in chat UI instead of background (type=pipe)" },
232
+ url: { type: "string", description: "URL for api/deeplink actions" },
233
+ },
234
+ required: ["id", "label", "type"],
235
+ },
344
236
  },
345
237
  },
346
- required: ["frame_id"],
238
+ required: ["title", "pipe_name"],
347
239
  },
348
240
  },
349
241
  ];
350
242
 
351
- // List tools handler
352
243
  server.setRequestHandler(ListToolsRequestSchema, async () => {
353
- return { tools: BASE_TOOLS };
244
+ return { tools: TOOLS };
354
245
  });
355
246
 
356
- // MCP Resources - provide dynamic context data
247
+ // ---------------------------------------------------------------------------
248
+ // Resources — dynamic context only (no duplicated reference docs)
249
+ // ---------------------------------------------------------------------------
357
250
  const RESOURCES = [
358
251
  {
359
252
  uri: "screenpipe://context",
360
253
  name: "Current Context",
361
- description: "Current date/time and pre-computed timestamps for common time ranges",
254
+ description: "Current date/time, timezone, and pre-computed timestamps for common time ranges",
362
255
  mimeType: "application/json",
363
256
  },
364
257
  {
365
258
  uri: "screenpipe://guide",
366
259
  name: "Usage Guide",
367
- description: "How to use screenpipe search effectively",
368
- mimeType: "text/markdown",
369
- },
370
- {
371
- uri: "ui://search",
372
- name: "Search Dashboard",
373
- description: "Interactive search UI for exploring screen recordings and audio transcriptions",
374
- mimeType: "text/html",
375
- },
376
- {
377
- uri: "screenpipe://pipe-creation-guide",
378
- name: "Pipe Creation Guide",
379
- description: "How to create screenpipe pipes (scheduled AI automations): format, YAML frontmatter, schedule syntax, API parameters, and example templates",
380
- mimeType: "text/markdown",
381
- },
382
- {
383
- uri: "screenpipe://api-reference",
384
- name: "REST API Reference",
385
- description: "Full screenpipe REST API reference: search, activity-summary, elements, frames, export, retranscribe, raw SQL, connections, speakers (60+ endpoints)",
386
- mimeType: "text/markdown",
387
- },
388
- {
389
- uri: "screenpipe://cli-reference",
390
- name: "CLI Reference",
391
- description: "Screenpipe CLI commands: pipe management (list, enable, run, install, delete) and connection management (Telegram, Slack, Discord, etc.)",
260
+ description: "How to use screenpipe tools effectively — search strategy, progressive disclosure, and common patterns",
392
261
  mimeType: "text/markdown",
393
262
  },
394
263
  ];
395
264
 
396
- // List resources handler
397
265
  server.setRequestHandler(ListResourcesRequestSchema, async () => {
398
266
  return { resources: RESOURCES };
399
267
  });
400
268
 
401
- // Read resource handler
402
269
  server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
403
270
  const { uri } = request.params;
404
- const dateInfo = getCurrentDateInfo();
405
- const now = Date.now();
406
-
407
- switch (uri) {
408
- case "screenpipe://context":
409
- return {
410
- contents: [
411
- {
412
- uri,
413
- mimeType: "application/json",
414
- text: JSON.stringify({
415
- current_time: dateInfo.isoDate,
416
- current_date_local: dateInfo.localDate,
271
+
272
+ if (uri === "screenpipe://context") {
273
+ const now = new Date();
274
+ const ms = now.getTime();
275
+ return {
276
+ contents: [
277
+ {
278
+ uri,
279
+ mimeType: "application/json",
280
+ text: JSON.stringify(
281
+ {
282
+ current_time: now.toISOString(),
283
+ current_date_local: now.toLocaleDateString("en-US", {
284
+ weekday: "long",
285
+ year: "numeric",
286
+ month: "long",
287
+ day: "numeric",
288
+ }),
417
289
  timezone: Intl.DateTimeFormat().resolvedOptions().timeZone,
418
290
  timestamps: {
419
- now: dateInfo.isoDate,
420
- one_hour_ago: new Date(now - 60 * 60 * 1000).toISOString(),
421
- three_hours_ago: new Date(now - 3 * 60 * 60 * 1000).toISOString(),
422
- today_start: `${new Date().toISOString().split("T")[0]}T00:00:00Z`,
423
- yesterday_start: `${new Date(now - 24 * 60 * 60 * 1000).toISOString().split("T")[0]}T00:00:00Z`,
424
- one_week_ago: new Date(now - 7 * 24 * 60 * 60 * 1000).toISOString(),
291
+ now: now.toISOString(),
292
+ one_hour_ago: new Date(ms - 60 * 60 * 1000).toISOString(),
293
+ three_hours_ago: new Date(ms - 3 * 60 * 60 * 1000).toISOString(),
294
+ today_start: `${now.toISOString().split("T")[0]}T00:00:00Z`,
295
+ yesterday_start: `${new Date(ms - 24 * 60 * 60 * 1000).toISOString().split("T")[0]}T00:00:00Z`,
296
+ one_week_ago: new Date(ms - 7 * 24 * 60 * 60 * 1000).toISOString(),
425
297
  },
426
- common_apps: ["Google Chrome", "Safari", "Slack", "zoom.us", "Microsoft Teams", "Code", "Terminal"],
427
- }, null, 2),
428
- },
429
- ],
430
- };
431
-
432
- case "screenpipe://guide":
433
- return {
434
- contents: [
435
- {
436
- uri,
437
- mimeType: "text/markdown",
438
- text: `# Screenpipe Search Guide
439
-
440
- ## Data Modalities
441
-
442
- Screenpipe captures four types of data:
443
- 1. **Accessibility** - Screen text via accessibility APIs (primary, preferred for screen content)
444
- 2. **OCR** - Screen text from screenshots (legacy fallback for apps without accessibility support)
445
- 3. **Audio** - Transcribed speech from microphone/system audio
446
- 4. **Input** - Keyboard input, mouse clicks, app switches, clipboard (macOS)
447
-
448
- ## Quick Start
449
- - **Get recent activity**: Call search-content with no parameters
450
- - **Search screen text**: \`{"q": "search term", "content_type": "all"}\`
451
- - **Get keyboard input**: \`{"content_type": "input"}\`
452
- - **Get audio only**: \`{"content_type": "audio"}\`
453
-
454
- ## Common User Requests → Correct Tool Choice
455
- | User says | Use this tool | Key params |
456
- |-----------|--------------|------------|
457
- | "summarize my meeting/call" | search-content | content_type:"audio", NO q param, start_time |
458
- | "what did they/I say about X" | search-content | content_type:"audio", NO q param (scan results manually) |
459
- | "how long on X" / "which apps" / "time spent" | activity-summary | start_time, end_time |
460
- | "what was I doing" | activity-summary | start_time, end_time (then drill into search-content) |
461
- | "what was I reading/looking at" | search-content | content_type:"all", start_time |
462
-
463
- ## Behavior Rules
464
- - Act immediately on clear requests. NEVER ask "what time range?" or "which content type?" when the intent is obvious.
465
- - If search returns empty, silently retry with wider time range or fewer filters. Do NOT ask the user what to change.
466
- - For meetings: ALWAYS use content_type:"audio" and do NOT use the q param. Transcriptions are noisy — q filters too aggressively and misses relevant content.
467
-
468
- ## search-content
469
- | Parameter | Description | Default |
470
- |-----------|-------------|---------|
471
- | q | Search query | (none - returns all) |
472
- | content_type | all/ocr/audio/input/accessibility | all |
473
- | limit | Max results | 10 |
474
- | start_time | ISO 8601 UTC or relative (e.g. '16h ago') | (no filter) |
475
- | end_time | ISO 8601 UTC or relative (e.g. 'now') | (no filter) |
476
- | app_name | Filter by app | (no filter) |
477
- | include_frames | Include screenshots | false |
478
-
479
- ## Search Strategy (MANDATORY)
480
- 1. First search: ONLY use time params (start_time/end_time). No q, no app_name, no content_type. This gives ground truth of what's recorded.
481
- 2. Scan results to find correct app_name values and content patterns.
482
- 3. Only THEN narrow with filters using exact observed values. App names are case-sensitive and may differ from user input (e.g. "Discord" vs "Discord.exe").
483
- 4. The q param searches captured text (accessibility/OCR), NOT app names — an app can be visible without its name in the captured text.
484
- 5. NEVER report "no data found" after one filtered search. Verify with unfiltered time-only search first.
485
-
486
- ## Progressive Disclosure (Token-Efficient Strategy)
487
- 1. **Start with activity-summary** (~200 tokens) for broad questions ("what was I doing?")
488
- 2. **Narrow with search-content** (~500-1000 tokens) using filters from step 1
489
- 3. **Drill into search-elements** (~200 tokens each) for structural UI detail (buttons, links)
490
- 4. **Fetch frame-context** for URLs and accessibility tree of specific frames
491
- 5. **Screenshots** (include_frames=true) only when text isn't enough
492
-
493
- ## Chat History
494
- Previous screenpipe chat conversations are stored as individual JSON files in ~/.screenpipe/chats/{conversation-id}.json
495
- Each file contains: id, title, messages[], createdAt, updatedAt. You can read these files to reference or search previous conversations.
496
-
497
- ## Speaker Management
498
- screenpipe auto-identifies speakers in audio. API endpoints for managing them:
499
- - \`GET /speakers/unnamed?limit=10\` — list unnamed speakers
500
- - \`GET /speakers/search?name=John\` — search by name
501
- - \`POST /speakers/update\` with \`{"id": 5, "name": "John"}\` — rename a speaker
502
- - \`POST /speakers/merge\` with \`{"speaker_to_keep_id": 1, "speaker_to_merge_id": 2}\` — merge duplicates
503
- - \`GET /speakers/similar?speaker_id=5\` — find similar speakers for merging
504
- - \`POST /speakers/reassign\` — reassign audio chunk to different speaker
505
-
506
- ## Tips
507
- 1. Read screenpipe://context first to get current timestamps
508
- 2. Use activity-summary before search-content for broad overview questions
509
- 3. Use search-elements instead of search-content for targeted UI lookups (10x lighter)
510
- 4. Use content_type=input for "what did I type?" queries
511
- 5. Use content_type=accessibility for accessibility tree text
512
- 6. For large aggregations (e.g. "what apps did I use today?"), paginate with offset or suggest the user run raw SQL via \`curl -X POST http://localhost:3030/raw_sql\` for efficient GROUP BY queries
513
-
514
- ## Deep Links (Clickable References)
515
- When showing search results to users, create clickable links so they can jump to that exact moment.
516
-
517
- **ALWAYS prefer frame-based links for OCR results** (frame IDs are exact DB keys):
518
- - \`[10:30 AM — Chrome](screenpipe://frame/12345)\` — use \`content.frame_id\` from OCR results
519
-
520
- **Use timestamp links only for audio results** (which have no frame_id):
521
- - \`[meeting at 3pm](screenpipe://timeline?timestamp=2024-01-15T15:00:00Z)\` — use exact \`timestamp\` from audio results
522
-
523
- **NEVER fabricate frame IDs or timestamps.** Only use values copied from actual search results.`,
524
- },
525
- ],
526
- };
527
-
528
- case "ui://search": {
529
- // MCP App UI - Interactive search dashboard
530
- const uiHtmlPath = path.join(__dirname, "..", "ui", "search.html");
531
- let htmlContent: string;
532
- try {
533
- htmlContent = fs.readFileSync(uiHtmlPath, "utf-8");
534
- } catch {
535
- // Fallback: serve embedded minimal UI if file not found
536
- htmlContent = `<!DOCTYPE html>
537
- <html>
538
- <head>
539
- <style>
540
- body { font-family: system-ui; background: #0a0a0a; color: #fff; padding: 20px; }
541
- input { width: 100%; padding: 10px; margin-bottom: 10px; background: #1a1a1a; border: 1px solid #333; color: #fff; border-radius: 6px; }
542
- button { padding: 10px 20px; background: #fff; color: #000; border: none; border-radius: 6px; cursor: pointer; }
543
- #results { margin-top: 20px; }
544
- .result { background: #1a1a1a; padding: 12px; margin: 8px 0; border-radius: 8px; border: 1px solid #333; }
545
- </style>
546
- </head>
547
- <body>
548
- <h2>screenpipe search</h2>
549
- <input id="q" placeholder="search..." onkeydown="if(event.key==='Enter')search()"/>
550
- <button onclick="search()">search</button>
551
- <div id="results"></div>
552
- <script>
553
- function search() {
554
- window.parent.postMessage({jsonrpc:'2.0',method:'tools/call',params:{name:'search-content',arguments:{q:document.getElementById('q').value,limit:20}}},'*');
555
- }
556
- window.addEventListener('message',e=>{
557
- if(e.data?.result||e.data?.method==='tool/result'){
558
- const r=e.data.result||e.data.params?.result;
559
- const d=r?.data||r||[];
560
- document.getElementById('results').innerHTML=d.map(x=>'<div class="result"><b>'+((x.type||'')+'</b> '+(x.content?.app_name||'')+': '+(x.content?.text||x.content?.transcription||'').substring(0,200))+'</div>').join('');
561
- }
562
- });
563
- </script>
564
- </body>
565
- </html>`;
566
- }
567
- return {
568
- contents: [
569
- {
570
- uri,
571
- mimeType: "text/html",
572
- text: htmlContent,
573
- },
574
- ],
575
- };
576
- }
577
-
578
- case "screenpipe://pipe-creation-guide":
579
- return {
580
- contents: [
581
- {
582
- uri,
583
- mimeType: "text/markdown",
584
- text: `# Screenpipe Pipe Creation Guide
585
-
586
- ## What is a pipe?
587
-
588
- A pipe is a scheduled AI agent defined as a single markdown file: \`~/.screenpipe/pipes/{name}/pipe.md\`
589
- Every N minutes, screenpipe runs a coding agent (like pi or claude-code) with the pipe's prompt.
590
- The agent can query your screen data, write files, call external APIs, send notifications, etc.
591
-
592
- ## pipe.md format
593
-
594
- The file starts with YAML frontmatter on the very first line (no blank lines before it), then the prompt body:
595
-
596
- \`\`\`markdown
597
- ---
598
- schedule: every 30m
599
- enabled: true
600
- ---
601
-
602
- Your prompt instructions here...
603
- \`\`\`
604
-
605
- ### Config fields
606
-
607
- | Field | Values | Description |
608
- |-------|--------|-------------|
609
- | \`schedule\` | \`every 30m\`, \`every 1h\`, \`every day at 9am\`, \`every monday at 9am\`, \`manual\`, or cron: \`*/30 * * * *\` | When to run |
610
- | \`enabled\` | \`true\` / \`false\` | Whether the pipe is active |
611
- | \`preset\` | AI preset name (e.g. \`Oai\`) | Which AI model to use |
612
- | \`history\` | \`true\` / \`false\` | Include previous output as context |
613
- | \`connections\` | list of connection IDs | Required integrations (e.g. \`obsidian\`, \`telegram\`) |
614
-
615
- ## Context header
616
-
617
- Before execution, screenpipe prepends a context header to the prompt with:
618
- - Time range (start/end timestamps based on the schedule interval)
619
- - Current date and user's timezone
620
- - Screenpipe API base URL (http://localhost:3030)
621
- - Output directory
622
-
623
- The AI agent uses this context to query the right time range. No template variables needed in the prompt.
624
-
625
- ## Screenpipe search API
626
-
627
- The agent queries screen data via the local REST API:
628
-
629
- \`\`\`
630
- curl "http://localhost:3030/search?limit=20&content_type=all&start_time=<ISO8601>&end_time=<ISO8601>"
631
- \`\`\`
632
-
633
- ### Query parameters
634
-
635
- | Parameter | Description |
636
- |-----------|-------------|
637
- | \`q\` | Text search query (optional — skip for audio, transcriptions are noisy) |
638
- | \`content_type\` | \`all\`, \`ocr\`, \`audio\`, \`input\`, \`accessibility\` (prefer \`all\` or \`accessibility\`) |
639
- | \`limit\` | Max results (default 20) |
640
- | \`offset\` | Pagination offset |
641
- | \`start_time\` / \`end_time\` | ISO 8601 timestamps or relative (\`1h ago\`, \`now\`) |
642
- | \`app_name\` | Filter by app (e.g. \`Google Chrome\`, \`Slack\`) |
643
- | \`window_name\` | Filter by window title |
644
- | \`browser_url\` | Filter by URL |
645
- | \`min_length\` / \`max_length\` | Filter by text length |
646
- | \`speaker_name\` | Filter audio by speaker |
647
-
648
- Other useful endpoints:
649
- - \`GET /activity-summary?start_time=...&end_time=...\` — lightweight overview (~200 tokens)
650
- - \`GET /elements?q=...&role=AXButton&start_time=...\` — UI elements
651
- - \`GET /connections/{id}\` — get integration credentials (telegram, slack, obsidian, etc.)
652
- - \`POST /raw_sql\` — run SQL queries (always include LIMIT)
653
-
654
- Full API reference: read the \`screenpipe://api-reference\` resource.
655
-
656
- ## Installing and running
657
-
658
- After creating the pipe.md file:
659
-
660
- \`\`\`bash
661
- bunx screenpipe@latest pipe install ~/.screenpipe/pipes/my-pipe
662
- bunx screenpipe@latest pipe enable my-pipe
663
- bunx screenpipe@latest pipe run my-pipe # test immediately
664
- \`\`\`
665
-
666
- ## Example pipes
667
-
668
- ### Daily recap (manual trigger)
669
- \`\`\`markdown
670
- ---
671
- schedule: manual
672
- enabled: true
673
- ---
674
-
675
- Analyze my screen and audio recordings from today (last 16 hours). Use limit=10 per search, max 5 searches total.
676
-
677
- ## Summary
678
- One sentence: what I mainly did today.
679
-
680
- ## Accomplishments
681
- - Top 3 things I finished, with timestamps
682
-
683
- ## Key Moments
684
- - Important things I saw, said, or heard
685
-
686
- ## Unfinished Work
687
- - What I should continue tomorrow
688
- \`\`\`
689
-
690
- ### Obsidian sync (every hour)
691
- \`\`\`markdown
692
- ---
693
- schedule: every 1h
694
- enabled: true
695
- connections:
696
- - obsidian
697
- ---
698
-
699
- Sync screenpipe activity to Obsidian vault as a daily note.
700
-
701
- 1. Get vault path from GET http://localhost:3030/connections/obsidian
702
- 2. Read existing daily note (merge into it)
703
- 3. Query search API in 30-minute chunks with min_length=50
704
- 4. Synthesize activities, extract action items, write note
705
- \`\`\`
706
-
707
- ### Slack standup (every weekday at 9am)
708
- \`\`\`markdown
709
- ---
710
- schedule: every weekday at 9am
711
- enabled: true
712
- connections:
713
- - slack
714
- ---
715
-
716
- Generate standup update from yesterday's activity and post to Slack.
717
-
718
- 1. Query activity-summary for yesterday
719
- 2. Search for key accomplishments and blockers
720
- 3. Format as: Done / Doing / Blocked
721
- 4. POST to Slack webhook from GET http://localhost:3030/connections/slack
722
- \`\`\`
723
-
724
- ## Optimization tips
725
-
726
- - Be specific about expected output format
727
- - Give step-by-step instructions
728
- - Add error handling: "if API returns empty, try content_type=accessibility instead of ocr"
729
- - Add validation: "before writing, verify you have at least 3 entries"
730
- - Specify exact file paths, API parameters, output structure
731
- - Keep search limit low (10-20) and use time ranges from the context header
732
- - Use \`min_length=50\` to skip noisy OCR fragments`,
733
- },
734
- ],
735
- };
736
-
737
- case "screenpipe://api-reference":
738
- return {
739
- contents: [
740
- {
741
- uri,
742
- mimeType: "text/markdown",
743
- text: `# Screenpipe REST API Reference
744
-
745
- Local REST API at \`http://localhost:3030\`. Full reference (60+ endpoints): https://docs.screenpi.pe/llms-full.txt
746
-
747
- ## Shell
748
-
749
- - **macOS/Linux** → \`bash\`, \`curl\`
750
- - **Windows** → \`powershell\`, \`curl.exe\` (not the alias)
751
-
752
- ## Context Window Protection
753
-
754
- API responses can be large. Always write curl output to a file first (\`curl ... -o /tmp/sp_result.json\`), check size (\`wc -c\`), and if over 5KB read only the first 50-100 lines. Extract what you need with \`jq\`. NEVER dump full large responses into context.
755
-
756
- ---
757
-
758
- ## 1. Search — \`GET /search\`
759
-
760
- \`\`\`bash
761
- curl "http://localhost:3030/search?q=QUERY&content_type=all&limit=10&start_time=1h%20ago"
762
- \`\`\`
763
-
764
- ### Parameters
765
-
766
- | Parameter | Type | Required | Description |
767
- |-----------|------|----------|-------------|
768
- | \`q\` | string | No | Keywords. Do NOT use for audio — transcriptions are noisy. |
769
- | \`content_type\` | string | No | \`all\` (default), \`ocr\`, \`audio\`, \`input\`, \`accessibility\` |
770
- | \`limit\` | integer | No | Max 1-20. Default: 10 |
771
- | \`offset\` | integer | No | Pagination. Default: 0 |
772
- | \`start_time\` | ISO 8601 or relative | **Yes** | \`2024-01-15T10:00:00Z\` or \`16h ago\`, \`2d ago\`, \`30m ago\` |
773
- | \`end_time\` | ISO 8601 or relative | No | Defaults to now. \`now\`, \`1h ago\` |
774
- | \`app_name\` | string | No | e.g. "Google Chrome", "Slack", "zoom.us" |
775
- | \`window_name\` | string | No | Window title substring |
776
- | \`speaker_name\` | string | No | Filter audio by speaker (case-insensitive partial) |
777
- | \`focused\` | boolean | No | Only focused windows |
778
-
779
- ### Critical Rules
780
-
781
- 1. **ALWAYS include \`start_time\`** — queries without time bounds WILL timeout
782
- 2. **Start with 1-2 hour ranges** — expand only if no results
783
- 3. **Use \`app_name\`** when user mentions a specific app
784
- 4. **"recent"** = 30 min. **"today"** = since midnight. **"yesterday"** = yesterday's range
785
-
786
- ### Response Format
787
-
788
- \`\`\`json
789
- {
790
- "data": [
791
- {"type": "OCR", "content": {"frame_id": 12345, "text": "...", "timestamp": "...", "app_name": "Chrome"}},
792
- {"type": "Audio", "content": {"chunk_id": 678, "transcription": "...", "timestamp": "...", "speaker": {"name": "John"}}},
793
- {"type": "UI", "content": {"id": 999, "text": "Clicked Submit", "timestamp": "...", "app_name": "Safari"}}
794
- ],
795
- "pagination": {"limit": 10, "offset": 0, "total": 42}
796
- }
797
- \`\`\`
798
-
799
- ---
800
-
801
- ## 2. Activity Summary — \`GET /activity-summary\`
802
-
803
- \`\`\`bash
804
- curl "http://localhost:3030/activity-summary?start_time=1h%20ago&end_time=now"
805
- \`\`\`
806
-
807
- Returns app usage with \`active_minutes\`, first/last seen, recent texts, audio summary. ~200-500 tokens. Best starting point.
808
-
809
- ---
810
-
811
- ## 3. Elements — \`GET /elements\`
812
-
813
- Lightweight FTS search across UI elements (~100-500 bytes each).
814
-
815
- \`\`\`bash
816
- curl "http://localhost:3030/elements?q=Submit&role=AXButton&start_time=1h%20ago&limit=10"
817
- \`\`\`
818
-
819
- Parameters: \`q\`, \`frame_id\`, \`source\` (\`accessibility\`|\`ocr\`), \`role\`, \`start_time\`, \`end_time\`, \`app_name\`, \`limit\`, \`offset\`.
820
-
821
- ### Frame Context — \`GET /frames/{id}/context\`
822
-
823
- Returns accessibility text, parsed nodes, and extracted URLs for a frame.
824
-
825
- Common roles: \`AXButton\`, \`AXStaticText\`, \`AXLink\`, \`AXTextField\`, \`AXTextArea\`, \`AXMenuItem\`, \`AXCheckBox\`
826
-
827
- ---
828
-
829
- ## 4. Frames — \`GET /frames/{frame_id}\`
830
-
831
- Returns raw PNG screenshot. Never fetch more than 2-3 per query.
832
-
833
- ---
834
-
835
- ## 5. Media Export — \`POST /frames/export\`
836
-
837
- \`\`\`bash
838
- curl -X POST http://localhost:3030/frames/export \\
839
- -H "Content-Type: application/json" \\
840
- -d '{"start_time": "5m ago", "end_time": "now", "fps": 1.0}'
841
- \`\`\`
842
-
843
- FPS guidelines: 5min→1.0, 30min→0.5, 1h→0.2, 2h+→0.1. Max 10,000 frames.
844
-
845
- ---
846
-
847
- ## 6. Retranscribe — \`POST /audio/retranscribe\`
848
-
849
- \`\`\`bash
850
- curl -X POST http://localhost:3030/audio/retranscribe \\
851
- -H "Content-Type: application/json" \\
852
- -d '{"start": "1h ago", "end": "now"}'
853
- \`\`\`
854
-
855
- Optional: \`engine\`, \`vocabulary\` (array of \`{"word": "...", "replacement": "..."}\`), \`prompt\` (topic context).
856
-
857
- ---
858
-
859
- ## 7. Raw SQL — \`POST /raw_sql\`
860
-
861
- \`\`\`bash
862
- curl -X POST http://localhost:3030/raw_sql \\
863
- -H "Content-Type: application/json" \\
864
- -d '{"query": "SELECT ... LIMIT 100"}'
865
- \`\`\`
866
-
867
- Every SELECT needs LIMIT. Always filter by time. Read-only.
868
-
869
- ### Schema
870
-
871
- | Table | Key Columns | Time Column |
872
- |-------|-------------|-------------|
873
- | \`frames\` | \`app_name\`, \`window_name\`, \`browser_url\`, \`focused\` | \`timestamp\` |
874
- | \`ocr_text\` | \`text\`, \`app_name\`, \`window_name\` | join via \`frame_id\` |
875
- | \`elements\` | \`source\`, \`role\`, \`text\` | join via \`frame_id\` |
876
- | \`audio_transcriptions\` | \`transcription\`, \`device\`, \`speaker_id\`, \`is_input_device\` | \`timestamp\` |
877
- | \`speakers\` | \`name\`, \`metadata\` | — |
878
- | \`ui_events\` | \`event_type\`, \`app_name\`, \`window_title\`, \`browser_url\` | \`timestamp\` |
879
- | \`accessibility\` | \`app_name\`, \`window_name\`, \`text_content\` | \`timestamp\` |
880
-
881
- ### Example Queries
882
-
883
- \`\`\`sql
884
- -- Most used apps (last 24h)
885
- SELECT app_name, COUNT(*) as frames FROM frames
886
- WHERE timestamp > datetime('now', '-24 hours') AND app_name IS NOT NULL
887
- GROUP BY app_name ORDER BY frames DESC LIMIT 20
888
-
889
- -- Speaker stats
890
- SELECT COALESCE(NULLIF(s.name, ''), 'Unknown') as speaker, COUNT(*) as segments
891
- FROM audio_transcriptions at LEFT JOIN speakers s ON at.speaker_id = s.id
892
- WHERE at.timestamp > datetime('now', '-24 hours')
893
- GROUP BY at.speaker_id ORDER BY segments DESC LIMIT 20
894
- \`\`\`
895
-
896
- ---
897
-
898
- ## 8. Connections — \`GET /connections\`
899
-
900
- \`\`\`bash
901
- curl http://localhost:3030/connections # List all
902
- curl http://localhost:3030/connections/telegram # Get credentials
903
- \`\`\`
904
-
905
- Services: Telegram (\`bot_token\` + \`chat_id\`), Slack (\`webhook_url\`), Discord (\`webhook_url\`), Todoist (\`api_token\`), Teams (\`webhook_url\`), Email (SMTP config).
906
-
907
- ---
298
+ },
299
+ null,
300
+ 2
301
+ ),
302
+ },
303
+ ],
304
+ };
305
+ }
908
306
 
909
- ## 9. Speakers
307
+ if (uri === "screenpipe://guide") {
308
+ return {
309
+ contents: [
310
+ {
311
+ uri,
312
+ mimeType: "text/markdown",
313
+ text: `# Screenpipe Usage Guide
910
314
 
911
- \`\`\`bash
912
- curl "http://localhost:3030/speakers/search?name=John"
913
- curl "http://localhost:3030/speakers/unnamed?limit=10"
914
- curl -X POST http://localhost:3030/speakers/update -H "Content-Type: application/json" -d '{"id": 5, "name": "John"}'
915
- curl -X POST http://localhost:3030/speakers/merge -H "Content-Type: application/json" -d '{"speaker_to_keep_id": 1, "speaker_to_merge_id": 2}'
916
- \`\`\`
315
+ ## Progressive Disclosure — start light, escalate only when needed
917
316
 
918
- ---
317
+ | Step | Tool | When to use |
318
+ |------|------|-------------|
319
+ | 1 | activity-summary | Broad questions: "what was I doing?", "which apps?", "how long on X?" |
320
+ | 2 | search-content | Need specific text, transcriptions, or content |
321
+ | 3 | search-elements | Need UI structure — buttons, links, form fields |
322
+ | 4 | frame-context | Need full detail for a specific moment (use frame_id from step 2) |
919
323
 
920
- ## 10. Other Endpoints
324
+ ## Search Strategy
921
325
 
922
- \`\`\`bash
923
- curl http://localhost:3030/health # Health check
924
- curl http://localhost:3030/audio/list # Audio devices
925
- curl http://localhost:3030/vision/list # Monitors
926
- \`\`\`
326
+ - **Always provide start_time** — without it, search scans the entire history
327
+ - **Start with limit=5** — increase only if you need more results
328
+ - **Use max_content_length=500** to keep responses compact
329
+ - **Don't use q for audio** — transcriptions are noisy, q filters too aggressively. Search audio by time range and speaker instead
330
+ - **app_name is case-sensitive** — use exact names: "Google Chrome" not "chrome"
331
+ - **content_type=accessibility is preferred** for screen text (OS-native). ocr is fallback for apps without accessibility support
927
332
 
928
- ## Pipes API
333
+ ## Common Patterns
929
334
 
930
- \`\`\`bash
931
- curl http://localhost:3030/pipes/list # List all pipes
932
- curl -X POST http://localhost:3030/pipes/enable -d '{"name":"..."}' # Enable
933
- curl -X POST http://localhost:3030/pipes/disable -d '{"name":"..."}' # Disable
934
- curl -X POST http://localhost:3030/pipes/run -d '{"name":"..."}' # Run once
935
- curl "http://localhost:3030/pipes/{name}/executions?limit=5" # Execution history
936
- \`\`\`
335
+ - "What was I doing for the last 2 hours?" → activity-summary with start_time='2h ago'
336
+ - "What did I discuss in my meeting?" → list-meetings to find it, then search-content with audio + that time range
337
+ - "Find when I was on Twitter" → search-content with app_name='Arc' (or the browser name), q='twitter'
338
+ - "Remember that I prefer X" update-memory with content describing the preference
339
+ - "What do you remember about X?" search-content with content_type='memory', q='X'
937
340
 
938
341
  ## Deep Links
939
342
 
940
- \`\`\`markdown
941
- [10:30 AM — Chrome](screenpipe://frame/12345) # OCR results (use frame_id)
942
- [meeting at 3pm](screenpipe://timeline?timestamp=ISO8601) # Audio results (use timestamp)
943
- \`\`\`
944
-
945
- Only use IDs/timestamps from actual search results. Never fabricate.`,
946
- },
947
- ],
948
- };
949
-
950
- case "screenpipe://cli-reference":
951
- return {
952
- contents: [
953
- {
954
- uri,
955
- mimeType: "text/markdown",
956
- text: `# Screenpipe CLI Reference
957
-
958
- Use \`bunx screenpipe@latest\` to run CLI commands (or \`npx screenpipe@latest\`). No separate install needed.
959
-
960
- ## Shell
961
-
962
- - **macOS/Linux** → \`bash\`
963
- - **Windows** → \`powershell\`
964
-
965
- ---
966
-
967
- ## Pipe Management
968
-
969
- Pipes are markdown-based AI automations. Each pipe lives at \`~/.screenpipe/pipes/<name>/pipe.md\`.
970
-
971
- ### Commands
972
-
973
- \`\`\`bash
974
- bunx screenpipe@latest pipe list # List all pipes (compact table)
975
- bunx screenpipe@latest pipe enable <name> # Enable a pipe
976
- bunx screenpipe@latest pipe disable <name> # Disable a pipe
977
- bunx screenpipe@latest pipe run <name> # Run once immediately (for testing)
978
- bunx screenpipe@latest pipe logs <name> # View execution logs
979
- bunx screenpipe@latest pipe install <url-or-path> # Install from GitHub or local path
980
- bunx screenpipe@latest pipe delete <name> # Delete a pipe
981
- bunx screenpipe@latest pipe models list # View AI model presets
982
- \`\`\`
983
-
984
- ### Creating a Pipe
985
-
986
- Create \`~/.screenpipe/pipes/<name>/pipe.md\` with YAML frontmatter + prompt:
987
-
988
- \`\`\`markdown
989
- ---
990
- schedule: every 30m
991
- enabled: true
992
- preset: Oai
993
- ---
994
-
995
- Your prompt instructions here. The AI agent executes this on schedule.
996
- \`\`\`
997
-
998
- **Schedule syntax**: \`every 30m\`, \`every 1h\`, \`every day at 9am\`, \`every monday at 9am\`, \`manual\`, or cron: \`*/30 * * * *\`
999
-
1000
- **Config fields**: \`schedule\`, \`enabled\` (bool), \`preset\` (AI preset name), \`history\` (bool — include previous output), \`connections\` (list of required integrations)
1001
-
1002
- After creating:
1003
- \`\`\`bash
1004
- bunx screenpipe@latest pipe install ~/.screenpipe/pipes/my-pipe
1005
- bunx screenpipe@latest pipe enable my-pipe
1006
- bunx screenpipe@latest pipe run my-pipe # test immediately
1007
- \`\`\`
1008
-
1009
- ### Editing Config
1010
-
1011
- Edit frontmatter in the pipe.md file directly, or via API:
1012
-
1013
- \`\`\`bash
1014
- curl -X POST http://localhost:3030/pipes/<name>/config \\
1015
- -H "Content-Type: application/json" \\
1016
- -d '{"config": {"schedule": "every 1h", "enabled": true}}'
1017
- \`\`\`
1018
-
1019
- ### Rules
1020
-
1021
- 1. Use \`pipe list\` (not \`--json\`) — table output is compact
1022
- 2. Never dump full pipe JSON — can be 15MB+
1023
- 3. Check logs first when debugging: \`pipe logs <name>\`
1024
- 4. Use \`pipe run <name>\` to test before waiting for schedule
1025
-
1026
- ---
1027
-
1028
- ## Connection Management
1029
-
1030
- Manage integrations (Telegram, Slack, Discord, Email, Todoist, Teams) from the CLI.
1031
-
1032
- ### Commands
1033
-
1034
- \`\`\`bash
1035
- bunx screenpipe@latest connection list # List all connections + status
1036
- bunx screenpipe@latest connection list --json # JSON output
1037
- bunx screenpipe@latest connection get <id> # Show saved credentials
1038
- bunx screenpipe@latest connection set <id> key=val # Save credentials
1039
- bunx screenpipe@latest connection test <id> # Test a connection
1040
- bunx screenpipe@latest connection remove <id> # Remove credentials
1041
- \`\`\`
1042
-
1043
- ### Examples
1044
-
1045
- \`\`\`bash
1046
- # Set up Telegram
1047
- bunx screenpipe@latest connection set telegram bot_token=123456:ABC-DEF chat_id=5776185278
1048
-
1049
- # Set up Slack webhook
1050
- bunx screenpipe@latest connection set slack webhook_url=https://hooks.slack.com/services/...
1051
-
1052
- # Verify it works
1053
- bunx screenpipe@latest connection test telegram
1054
- \`\`\`
1055
-
1056
- Connection IDs: \`telegram\`, \`slack\`, \`discord\`, \`email\`, \`todoist\`, \`teams\`, \`google-calendar\`, \`apple-intelligence\`, \`openclaw\`, \`obsidian\`
1057
-
1058
- Credentials are stored locally at \`~/.screenpipe/connections.json\`.`,
1059
- },
1060
- ],
1061
- };
1062
-
1063
- default:
1064
- throw new Error(`Unknown resource: ${uri}`);
343
+ When referencing specific moments in results, create clickable links:
344
+ - Frame: [10:30 AM — Chrome](screenpipe://frame/{frame_id}) use frame_id from search results
345
+ - Timeline: [meeting at 3pm](screenpipe://timeline?timestamp=2024-01-15T15:00:00Z) use exact timestamp from results
346
+ Never fabricate IDs or timestamps — only use values from actual results.
347
+ `,
348
+ },
349
+ ],
350
+ };
1065
351
  }
1066
- });
1067
352
 
1068
- // MCP Prompts - static interaction templates
1069
- const PROMPTS = [
1070
- {
1071
- name: "search-recent",
1072
- description: "Search recent screen activity",
1073
- arguments: [
1074
- { name: "query", description: "Optional search term", required: false },
1075
- { name: "hours", description: "Hours to look back (default: 1)", required: false },
1076
- ],
1077
- },
1078
- {
1079
- name: "find-in-app",
1080
- description: "Find content from a specific application",
1081
- arguments: [
1082
- { name: "app", description: "App name (e.g., Chrome, Slack)", required: true },
1083
- { name: "query", description: "Optional search term", required: false },
1084
- ],
1085
- },
1086
- {
1087
- name: "meeting-notes",
1088
- description: "Get audio transcriptions from meetings",
1089
- arguments: [
1090
- { name: "hours", description: "Hours to look back (default: 3)", required: false },
1091
- ],
1092
- },
1093
- {
1094
- name: "create-pipe",
1095
- description: "Create a new screenpipe pipe (scheduled AI automation)",
1096
- arguments: [
1097
- { name: "description", description: "What the pipe should do", required: true },
1098
- { name: "schedule", description: "Schedule (e.g., 'every 30m', 'every day at 9am', 'manual')", required: false },
1099
- ],
1100
- },
1101
- ];
1102
-
1103
- // List prompts handler
1104
- server.setRequestHandler(ListPromptsRequestSchema, async () => {
1105
- return { prompts: PROMPTS };
353
+ throw new Error(`Unknown resource: ${uri}`);
1106
354
  });
1107
355
 
1108
- // Get prompt handler
1109
- server.setRequestHandler(GetPromptRequestSchema, async (request) => {
1110
- const { name, arguments: promptArgs } = request.params;
1111
- const dateInfo = getCurrentDateInfo();
1112
- const now = Date.now();
1113
-
1114
- switch (name) {
1115
- case "search-recent": {
1116
- const query = promptArgs?.query || "";
1117
- const hours = parseInt(promptArgs?.hours || "1", 10);
1118
- const startTime = new Date(now - hours * 60 * 60 * 1000).toISOString();
1119
-
1120
- return {
1121
- description: `Search recent activity (last ${hours} hour${hours > 1 ? "s" : ""})`,
1122
- messages: [
1123
- {
1124
- role: "user" as const,
1125
- content: {
1126
- type: "text" as const,
1127
- text: `Search screenpipe for recent activity.
1128
-
1129
- Current time: ${dateInfo.isoDate}
1130
-
1131
- Use search-content with:
1132
- ${query ? `- q: "${query}"` : "- No query filter (get all content)"}
1133
- - start_time: "${startTime}"
1134
- - limit: 50`,
1135
- },
1136
- },
1137
- ],
1138
- };
1139
- }
1140
-
1141
- case "find-in-app": {
1142
- const app = promptArgs?.app || "Google Chrome";
1143
- const query = promptArgs?.query || "";
1144
-
1145
- return {
1146
- description: `Find content from ${app}`,
1147
- messages: [
1148
- {
1149
- role: "user" as const,
1150
- content: {
1151
- type: "text" as const,
1152
- text: `Search screenpipe for content from ${app}.
1153
-
1154
- Current time: ${dateInfo.isoDate}
1155
-
1156
- Use search-content with:
1157
- - app_name: "${app}"
1158
- ${query ? `- q: "${query}"` : "- No query filter"}
1159
- - content_type: "all"
1160
- - limit: 50`,
1161
- },
1162
- },
1163
- ],
1164
- };
1165
- }
1166
-
1167
- case "meeting-notes": {
1168
- const hours = parseInt(promptArgs?.hours || "3", 10);
1169
- const startTime = new Date(now - hours * 60 * 60 * 1000).toISOString();
1170
-
1171
- return {
1172
- description: `Get meeting transcriptions (last ${hours} hours)`,
1173
- messages: [
1174
- {
1175
- role: "user" as const,
1176
- content: {
1177
- type: "text" as const,
1178
- text: `Get audio transcriptions from recent meetings.
1179
-
1180
- Current time: ${dateInfo.isoDate}
1181
-
1182
- Use search-content with:
1183
- - content_type: "audio"
1184
- - start_time: "${startTime}"
1185
- - limit: 100
1186
-
1187
- Common meeting apps: zoom.us, Microsoft Teams, Google Meet, Slack`,
1188
- },
1189
- },
1190
- ],
1191
- };
1192
- }
1193
-
1194
- case "create-pipe": {
1195
- const description = promptArgs?.description || "a useful automation";
1196
- const schedule = promptArgs?.schedule || "every 30m";
1197
-
1198
- return {
1199
- description: `Create a new screenpipe pipe: ${description}`,
1200
- messages: [
1201
- {
1202
- role: "user" as const,
1203
- content: {
1204
- type: "text" as const,
1205
- text: `Create a new screenpipe pipe based on this description: "${description}"
1206
- Schedule: ${schedule}
1207
-
1208
- ## How to create a pipe
1209
-
1210
- A pipe is a TypeScript file that runs on a schedule or manually. It uses the screenpipe API to access screen/audio data and can send notifications, call AI, etc.
1211
-
1212
- ### Pipe structure
1213
- \`\`\`typescript
1214
- const pipe = () => import("https://raw.githubusercontent.com/nichochar/screenpipe/refs/heads/main/pipes/pipe-modules/pipe-core/index.ts");
1215
-
1216
- async function main() {
1217
- const sp = await pipe();
1218
-
1219
- // Query recent screen/audio data
1220
- const results = await sp.queryScreenpipe({
1221
- q: "search term",
1222
- contentType: "all", // "ocr" | "audio" | "all" | "ui"
1223
- limit: 50,
1224
- startTime: new Date(Date.now() - 30 * 60 * 1000).toISOString(),
1225
- endTime: new Date().toISOString(),
1226
- });
1227
-
1228
- // Send notification
1229
- await sp.sendDesktopNotification({ title: "Title", body: "Body" });
1230
-
1231
- // Call AI (uses user's configured AI provider)
1232
- const response = await sp.generateText({
1233
- messages: [{ role: "user", content: "Analyze this data..." }],
1234
- });
1235
- }
1236
-
1237
- main();
1238
- \`\`\`
1239
-
1240
- ### Key APIs available in pipes
1241
- - \`queryScreenpipe(params)\` - Search screen text (OCR/UI), audio transcriptions
1242
- - \`sendDesktopNotification({ title, body })\` - System notifications
1243
- - \`generateText({ messages, model? })\` - AI text generation
1244
- - \`generateObject({ messages, schema, model? })\` - AI structured output
1245
- - \`loadPipeConfig()\` - Load pipe configuration
1246
- - \`fetch()\` - HTTP requests to external services
1247
-
1248
- ### pipe.json config
1249
- \`\`\`json
1250
- {
1251
- "cron": "${schedule === "manual" ? "" : schedule.replace("every ", "*/").replace("m", " * * * *").replace("h", " * * *")}",
1252
- "is_nextjs": false,
1253
- "fields": [
1254
- { "name": "setting_name", "type": "string", "default": "value", "description": "Setting description" }
1255
- ]
1256
- }
1257
- \`\`\`
1258
-
1259
- ### Important notes
1260
- - Use \`contentType: "ui"\` for accessibility/structured text, \`"ocr"\` for raw screen text
1261
- - Always handle empty results gracefully
1262
- - Use \`startTime\`/\`endTime\` to scope queries
1263
- - Pipes run in Bun runtime with full TypeScript support
1264
- - For scheduled pipes, keep execution fast (< 30s)
1265
-
1266
- Create the pipe with the necessary files (pipe.ts and pipe.json). Follow the patterns above exactly.`,
1267
- },
1268
- },
1269
- ],
1270
- };
1271
- }
1272
-
1273
- default:
1274
- throw new Error(`Unknown prompt: ${name}`);
1275
- }
1276
- });
1277
-
1278
- // Helper function to make HTTP requests
356
+ // ---------------------------------------------------------------------------
357
+ // Helper
358
+ // ---------------------------------------------------------------------------
1279
359
  async function fetchAPI(
1280
360
  endpoint: string,
1281
361
  options: RequestInit = {}
@@ -1290,7 +370,9 @@ async function fetchAPI(
1290
370
  });
1291
371
  }
1292
372
 
1293
- // Call tool handler
373
+ // ---------------------------------------------------------------------------
374
+ // Tool handlers
375
+ // ---------------------------------------------------------------------------
1294
376
  server.setRequestHandler(CallToolRequestSchema, async (request) => {
1295
377
  const { name, arguments: args } = request.params;
1296
378
 
@@ -1302,265 +384,100 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1302
384
  switch (name) {
1303
385
  case "search-content": {
1304
386
  const includeFrames = args.include_frames === true;
1305
- const params = new URLSearchParams();
1306
- for (const [key, value] of Object.entries(args)) {
1307
- if (value !== null && value !== undefined) {
1308
- params.append(key, String(value));
1309
- }
1310
- }
1311
-
1312
- const response = await fetchAPI(`/search?${params.toString()}`);
1313
- if (!response.ok) {
1314
- throw new Error(`HTTP error: ${response.status}`);
1315
- }
1316
-
1317
- const data = await response.json();
1318
- const results = data.data || [];
1319
- const pagination = data.pagination || {};
1320
-
1321
- if (results.length === 0) {
1322
- return {
1323
- content: [
1324
- {
1325
- type: "text",
1326
- text: "No results found. Try: broader search terms, different content_type, or wider time range.",
1327
- },
1328
- ],
1329
- };
1330
- }
1331
-
1332
- // Build content array with text and optional images
1333
- const contentItems: Array<
1334
- | { type: "text"; text: string }
1335
- | { type: "image"; data: string; mimeType: string }
1336
- > = [];
1337
-
1338
- const formattedResults: string[] = [];
1339
- const images: Array<{ data: string; context: string }> = [];
1340
-
1341
- for (const result of results) {
1342
- const content = result.content;
1343
- if (!content) continue;
1344
-
1345
- if (result.type === "OCR") {
1346
- const tagsStr = content.tags?.length ? `\nTags: ${content.tags.join(", ")}` : "";
1347
- formattedResults.push(
1348
- `[OCR] ${content.app_name || "?"} | ${content.window_name || "?"}\n` +
1349
- `${content.timestamp || ""}\n` +
1350
- `${content.text || ""}` +
1351
- tagsStr
1352
- );
1353
- if (includeFrames && content.frame) {
1354
- images.push({
1355
- data: content.frame,
1356
- context: `${content.app_name} at ${content.timestamp}`,
1357
- });
1358
- }
1359
- } else if (result.type === "Audio") {
1360
- const tagsStr = content.tags?.length ? `\nTags: ${content.tags.join(", ")}` : "";
1361
- formattedResults.push(
1362
- `[Audio] ${content.device_name || "?"}\n` +
1363
- `${content.timestamp || ""}\n` +
1364
- `${content.transcription || ""}` +
1365
- tagsStr
1366
- );
1367
- } else if (result.type === "UI" || result.type === "Accessibility") {
1368
- formattedResults.push(
1369
- `[Accessibility] ${content.app_name || "?"} | ${content.window_name || "?"}\n` +
1370
- `${content.timestamp || ""}\n` +
1371
- `${content.text || ""}`
1372
- );
1373
- }
1374
- }
1375
-
1376
- // Header with pagination info
1377
- const header = `Results: ${results.length}/${pagination.total || "?"}` +
1378
- (pagination.total > results.length ? ` (use offset=${(pagination.offset || 0) + results.length} for more)` : "");
1379
-
1380
- contentItems.push({
1381
- type: "text",
1382
- text: header + "\n\n" + formattedResults.join("\n---\n"),
1383
- });
1384
-
1385
- // Add images if requested
1386
- for (const img of images) {
1387
- contentItems.push({ type: "text", text: `\n📷 ${img.context}` });
1388
- contentItems.push({ type: "image", data: img.data, mimeType: "image/png" });
1389
- }
1390
-
1391
- return { content: contentItems };
1392
- }
1393
-
1394
- case "export-video": {
1395
- const startTime = args.start_time as string;
1396
- const endTime = args.end_time as string;
1397
- const fps = (args.fps as number) || 1.0;
1398
-
1399
- // Validate time inputs
1400
- if (!startTime || !endTime) {
1401
- return {
1402
- content: [
1403
- {
1404
- type: "text",
1405
- text: "Error: Both start_time and end_time are required in ISO 8601 format (e.g., '2024-01-15T10:00:00Z')",
1406
- },
1407
- ],
1408
- };
1409
- }
1410
-
1411
- // Step 1: Query the search API to get frame IDs for the time range
1412
- const searchParams = new URLSearchParams({
1413
- content_type: "ocr",
1414
- start_time: startTime,
1415
- end_time: endTime,
1416
- limit: "10000", // Get all frames in range
1417
- });
1418
-
1419
- const searchResponse = await fetchAPI(`/search?${searchParams.toString()}`);
1420
- if (!searchResponse.ok) {
1421
- throw new Error(`Failed to search for frames: HTTP ${searchResponse.status}`);
1422
- }
1423
-
1424
- const searchData = await searchResponse.json();
1425
- const results = searchData.data || [];
1426
-
1427
- if (results.length === 0) {
1428
- return {
1429
- content: [
1430
- {
1431
- type: "text",
1432
- text: `No screen recordings found between ${startTime} and ${endTime}. Make sure screenpipe was recording during this time period.`,
1433
- },
1434
- ],
1435
- };
1436
- }
1437
-
1438
- // Extract unique frame IDs from OCR results
1439
- const frameIds: number[] = [];
1440
- const seenIds = new Set<number>();
1441
- for (const result of results) {
1442
- if (result.type === "OCR" && result.content?.frame_id) {
1443
- const frameId = result.content.frame_id;
1444
- if (!seenIds.has(frameId)) {
1445
- seenIds.add(frameId);
1446
- frameIds.push(frameId);
1447
- }
1448
- }
1449
- }
1450
-
1451
- if (frameIds.length === 0) {
1452
- return {
1453
- content: [
1454
- {
1455
- type: "text",
1456
- text: `Found ${results.length} results but no valid frame IDs. The recordings may be audio-only.`,
1457
- },
1458
- ],
1459
- };
1460
- }
1461
-
1462
- // Sort frame IDs
1463
- frameIds.sort((a, b) => a - b);
1464
-
1465
- // Step 2: Connect to WebSocket and export video
1466
- // Send frame_ids in message body to avoid URL length limits
1467
- const wsUrl = `ws://localhost:${port}/frames/export?fps=${fps}`;
1468
-
1469
- const exportResult = await new Promise<{
1470
- success: boolean;
1471
- filePath?: string;
1472
- error?: string;
1473
- frameCount?: number;
1474
- }>((resolve) => {
1475
- const ws = new WebSocket(wsUrl);
1476
- let resolved = false;
1477
-
1478
- const timeout = setTimeout(() => {
1479
- if (!resolved) {
1480
- resolved = true;
1481
- ws.close();
1482
- resolve({ success: false, error: "Export timed out after 5 minutes" });
1483
- }
1484
- }, 5 * 60 * 1000); // 5 minute timeout
1485
-
1486
- ws.on("open", () => {
1487
- // Send frame_ids in message body to avoid URL length limits
1488
- ws.send(JSON.stringify({ frame_ids: frameIds }));
1489
- });
1490
-
1491
- ws.on("error", (error) => {
1492
- if (!resolved) {
1493
- resolved = true;
1494
- clearTimeout(timeout);
1495
- resolve({ success: false, error: `WebSocket error: ${error.message}` });
1496
- }
1497
- });
1498
-
1499
- ws.on("close", () => {
1500
- if (!resolved) {
1501
- resolved = true;
1502
- clearTimeout(timeout);
1503
- resolve({ success: false, error: "Connection closed unexpectedly" });
1504
- }
1505
- });
1506
-
1507
- ws.on("message", (data) => {
1508
- try {
1509
- const message = JSON.parse(data.toString());
1510
-
1511
- if (message.status === "completed" && message.video_data) {
1512
- // Save video to temp file
1513
- const tempDir = os.tmpdir();
1514
- const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
1515
- const filename = `screenpipe_export_${timestamp}.mp4`;
1516
- const filePath = path.join(tempDir, filename);
387
+ const params = new URLSearchParams();
388
+ for (const [key, value] of Object.entries(args)) {
389
+ if (value !== null && value !== undefined) {
390
+ params.append(key, String(value));
391
+ }
392
+ }
1517
393
 
1518
- fs.writeFileSync(filePath, Buffer.from(message.video_data));
394
+ const response = await fetchAPI(`/search?${params.toString()}`);
395
+ if (!response.ok) throw new Error(`HTTP error: ${response.status}`);
1519
396
 
1520
- resolved = true;
1521
- clearTimeout(timeout);
1522
- ws.close();
1523
- resolve({
1524
- success: true,
1525
- filePath,
1526
- frameCount: frameIds.length,
1527
- });
1528
- } else if (message.status === "error") {
1529
- resolved = true;
1530
- clearTimeout(timeout);
1531
- ws.close();
1532
- resolve({ success: false, error: message.error || "Export failed" });
1533
- }
1534
- // Ignore "extracting" and "encoding" status updates
1535
- } catch (parseError) {
1536
- // Ignore parse errors for progress messages
1537
- }
1538
- });
1539
- });
397
+ const data = await response.json();
398
+ const results = data.data || [];
399
+ const pagination = data.pagination || {};
1540
400
 
1541
- if (exportResult.success && exportResult.filePath) {
1542
- return {
1543
- content: [
1544
- {
1545
- type: "text",
1546
- text: `Successfully exported video!\n\n` +
1547
- `File: ${exportResult.filePath}\n` +
1548
- `Frames: ${exportResult.frameCount}\n` +
1549
- `Time range: ${startTime} to ${endTime}\n` +
1550
- `FPS: ${fps}`,
1551
- },
1552
- ],
1553
- };
1554
- } else {
401
+ if (results.length === 0) {
1555
402
  return {
1556
403
  content: [
1557
404
  {
1558
405
  type: "text",
1559
- text: `Failed to export video: ${exportResult.error}`,
406
+ text: "No results found. Try: broader terms, different content_type, or wider time range.",
1560
407
  },
1561
408
  ],
1562
409
  };
1563
410
  }
411
+
412
+ const contentItems: Array<
413
+ | { type: "text"; text: string }
414
+ | { type: "image"; data: string; mimeType: string }
415
+ > = [];
416
+
417
+ const formattedResults: string[] = [];
418
+ const images: Array<{ data: string; context: string }> = [];
419
+
420
+ for (const result of results) {
421
+ const content = result.content;
422
+ if (!content) continue;
423
+
424
+ if (result.type === "OCR") {
425
+ const tagsStr = content.tags?.length ? `\nTags: ${content.tags.join(", ")}` : "";
426
+ formattedResults.push(
427
+ `[OCR] ${content.app_name || "?"} | ${content.window_name || "?"}\n` +
428
+ `${content.timestamp || ""}\n` +
429
+ `${content.text || ""}` +
430
+ tagsStr
431
+ );
432
+ if (includeFrames && content.frame) {
433
+ images.push({
434
+ data: content.frame,
435
+ context: `${content.app_name} at ${content.timestamp}`,
436
+ });
437
+ }
438
+ } else if (result.type === "Audio") {
439
+ const tagsStr = content.tags?.length ? `\nTags: ${content.tags.join(", ")}` : "";
440
+ formattedResults.push(
441
+ `[Audio] ${content.device_name || "?"}\n` +
442
+ `${content.timestamp || ""}\n` +
443
+ `${content.transcription || ""}` +
444
+ tagsStr
445
+ );
446
+ } else if (result.type === "UI" || result.type === "Accessibility") {
447
+ formattedResults.push(
448
+ `[Accessibility] ${content.app_name || "?"} | ${content.window_name || "?"}\n` +
449
+ `${content.timestamp || ""}\n` +
450
+ `${content.text || ""}`
451
+ );
452
+ } else if (result.type === "Memory") {
453
+ const tagsStr = content.tags?.length ? ` [${content.tags.join(", ")}]` : "";
454
+ const importance =
455
+ content.importance != null ? ` (importance: ${content.importance})` : "";
456
+ formattedResults.push(
457
+ `[Memory #${content.id}]${tagsStr}${importance}\n` +
458
+ `${content.created_at || ""}\n` +
459
+ `${content.content || ""}`
460
+ );
461
+ }
462
+ }
463
+
464
+ const header =
465
+ `Results: ${results.length}/${pagination.total || "?"}` +
466
+ (pagination.total > results.length
467
+ ? ` (use offset=${(pagination.offset || 0) + results.length} for more)`
468
+ : "");
469
+
470
+ contentItems.push({
471
+ type: "text",
472
+ text: header + "\n\n" + formattedResults.join("\n---\n"),
473
+ });
474
+
475
+ for (const img of images) {
476
+ contentItems.push({ type: "text", text: `\n📷 ${img.context}` });
477
+ contentItems.push({ type: "image", data: img.data, mimeType: "image/png" });
478
+ }
479
+
480
+ return { content: contentItems };
1564
481
  }
1565
482
 
1566
483
  case "list-meetings": {
@@ -1572,20 +489,13 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1572
489
  }
1573
490
 
1574
491
  const response = await fetchAPI(`/meetings?${params.toString()}`);
1575
- if (!response.ok) {
1576
- throw new Error(`HTTP error: ${response.status}`);
1577
- }
492
+ if (!response.ok) throw new Error(`HTTP error: ${response.status}`);
1578
493
 
1579
494
  const meetings = await response.json();
1580
495
 
1581
496
  if (!Array.isArray(meetings) || meetings.length === 0) {
1582
497
  return {
1583
- content: [
1584
- {
1585
- type: "text",
1586
- text: "No meetings found. Make sure screenpipe is running in smart transcription mode.",
1587
- },
1588
- ],
498
+ content: [{ type: "text", text: "No meetings found in the given time range." }],
1589
499
  };
1590
500
  }
1591
501
 
@@ -1600,10 +510,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1600
510
 
1601
511
  return {
1602
512
  content: [
1603
- {
1604
- type: "text",
1605
- text: `Meetings: ${meetings.length}\n\n${formatted.join("\n---\n")}`,
1606
- },
513
+ { type: "text", text: `Meetings: ${meetings.length}\n\n${formatted.join("\n---\n")}` },
1607
514
  ],
1608
515
  };
1609
516
  }
@@ -1617,29 +524,31 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1617
524
  }
1618
525
 
1619
526
  const response = await fetchAPI(`/activity-summary?${params.toString()}`);
1620
- if (!response.ok) {
1621
- throw new Error(`HTTP error: ${response.status}`);
1622
- }
527
+ if (!response.ok) throw new Error(`HTTP error: ${response.status}`);
1623
528
 
1624
529
  const data = await response.json();
1625
530
 
1626
- // Format apps
1627
531
  const appsLines = (data.apps || []).map(
1628
- (a: { name: string; frame_count: number; minutes: number; first_seen?: string; last_seen?: string }) => {
1629
- const timeSpan = a.first_seen && a.last_seen
1630
- ? `, ${a.first_seen.slice(11, 16)}–${a.last_seen.slice(11, 16)} UTC`
1631
- : "";
532
+ (a: {
533
+ name: string;
534
+ frame_count: number;
535
+ minutes: number;
536
+ first_seen?: string;
537
+ last_seen?: string;
538
+ }) => {
539
+ const timeSpan =
540
+ a.first_seen && a.last_seen
541
+ ? `, ${a.first_seen.slice(11, 16)}–${a.last_seen.slice(11, 16)} UTC`
542
+ : "";
1632
543
  return ` ${a.name}: ${a.minutes} min (${a.frame_count} frames${timeSpan})`;
1633
544
  }
1634
545
  );
1635
546
 
1636
- // Format audio
1637
547
  const speakerLines = (data.audio_summary?.speakers || []).map(
1638
548
  (s: { name: string; segment_count: number }) =>
1639
549
  ` ${s.name}: ${s.segment_count} segments`
1640
550
  );
1641
551
 
1642
- // Format recent texts
1643
552
  const textLines = (data.recent_texts || []).map(
1644
553
  (t: { text: string; app_name: string; timestamp: string }) =>
1645
554
  ` [${t.app_name}] ${t.text}`
@@ -1671,9 +580,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1671
580
  }
1672
581
 
1673
582
  const response = await fetchAPI(`/elements?${params.toString()}`);
1674
- if (!response.ok) {
1675
- throw new Error(`HTTP error: ${response.status}`);
1676
- }
583
+ if (!response.ok) throw new Error(`HTTP error: ${response.status}`);
1677
584
 
1678
585
  const data = await response.json();
1679
586
  const elements = data.data || [];
@@ -1721,21 +628,14 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1721
628
  case "frame-context": {
1722
629
  const frameId = args.frame_id as number;
1723
630
  if (!frameId) {
1724
- return {
1725
- content: [{ type: "text", text: "Error: frame_id is required" }],
1726
- };
631
+ return { content: [{ type: "text", text: "Error: frame_id is required" }] };
1727
632
  }
1728
633
 
1729
634
  const response = await fetchAPI(`/frames/${frameId}/context`);
1730
- if (!response.ok) {
1731
- throw new Error(`HTTP error: ${response.status}`);
1732
- }
635
+ if (!response.ok) throw new Error(`HTTP error: ${response.status}`);
1733
636
 
1734
637
  const data = await response.json();
1735
-
1736
- const lines = [
1737
- `Frame ${data.frame_id} (source: ${data.text_source})`,
1738
- ];
638
+ const lines = [`Frame ${data.frame_id} (source: ${data.text_source})`];
1739
639
 
1740
640
  if (data.urls?.length) {
1741
641
  lines.push("", "URLs:", ...data.urls.map((u: string) => ` ${u}`));
@@ -1753,27 +653,228 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1753
653
  }
1754
654
 
1755
655
  if (data.text) {
1756
- // Truncate to avoid massive outputs
1757
- const truncated = data.text.length > 2000 ? data.text.substring(0, 2000) + "..." : data.text;
656
+ const truncated =
657
+ data.text.length > 2000 ? data.text.substring(0, 2000) + "..." : data.text;
1758
658
  lines.push("", "Full text:", truncated);
1759
659
  }
1760
660
 
1761
661
  return { content: [{ type: "text", text: lines.join("\n") }] };
1762
662
  }
1763
663
 
664
+ case "export-video": {
665
+ const startTime = args.start_time as string;
666
+ const endTime = args.end_time as string;
667
+ const fps = (args.fps as number) || 1.0;
668
+
669
+ if (!startTime || !endTime) {
670
+ return {
671
+ content: [{ type: "text", text: "Error: start_time and end_time are required" }],
672
+ };
673
+ }
674
+
675
+ // Get frame IDs for the time range
676
+ const searchParams = new URLSearchParams({
677
+ content_type: "ocr",
678
+ start_time: startTime,
679
+ end_time: endTime,
680
+ limit: "10000",
681
+ });
682
+
683
+ const searchResponse = await fetchAPI(`/search?${searchParams.toString()}`);
684
+ if (!searchResponse.ok) {
685
+ throw new Error(`Failed to search for frames: HTTP ${searchResponse.status}`);
686
+ }
687
+
688
+ const searchData = await searchResponse.json();
689
+ const results = searchData.data || [];
690
+
691
+ if (results.length === 0) {
692
+ return {
693
+ content: [
694
+ {
695
+ type: "text",
696
+ text: `No screen recordings found between ${startTime} and ${endTime}.`,
697
+ },
698
+ ],
699
+ };
700
+ }
701
+
702
+ const frameIds: number[] = [];
703
+ const seenIds = new Set<number>();
704
+ for (const result of results) {
705
+ if (result.type === "OCR" && result.content?.frame_id) {
706
+ const frameId = result.content.frame_id;
707
+ if (!seenIds.has(frameId)) {
708
+ seenIds.add(frameId);
709
+ frameIds.push(frameId);
710
+ }
711
+ }
712
+ }
713
+
714
+ if (frameIds.length === 0) {
715
+ return {
716
+ content: [{ type: "text", text: "No valid frame IDs found (audio-only?)." }],
717
+ };
718
+ }
719
+
720
+ frameIds.sort((a, b) => a - b);
721
+
722
+ const wsUrl = `ws://localhost:${port}/frames/export?fps=${fps}`;
723
+
724
+ const exportResult = await new Promise<{
725
+ success: boolean;
726
+ filePath?: string;
727
+ error?: string;
728
+ frameCount?: number;
729
+ }>((resolve) => {
730
+ const ws = new WebSocket(wsUrl);
731
+ let resolved = false;
732
+
733
+ const timeout = setTimeout(() => {
734
+ if (!resolved) {
735
+ resolved = true;
736
+ ws.close();
737
+ resolve({ success: false, error: "Export timed out after 5 minutes" });
738
+ }
739
+ }, 5 * 60 * 1000);
740
+
741
+ ws.on("open", () => {
742
+ ws.send(JSON.stringify({ frame_ids: frameIds }));
743
+ });
744
+
745
+ ws.on("error", (error) => {
746
+ if (!resolved) {
747
+ resolved = true;
748
+ clearTimeout(timeout);
749
+ resolve({ success: false, error: `WebSocket error: ${error.message}` });
750
+ }
751
+ });
752
+
753
+ ws.on("close", () => {
754
+ if (!resolved) {
755
+ resolved = true;
756
+ clearTimeout(timeout);
757
+ resolve({ success: false, error: "Connection closed unexpectedly" });
758
+ }
759
+ });
760
+
761
+ ws.on("message", (data) => {
762
+ try {
763
+ const message = JSON.parse(data.toString());
764
+ if (message.status === "completed" && message.video_data) {
765
+ const tempDir = os.tmpdir();
766
+ const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
767
+ const filename = `screenpipe_export_${timestamp}.mp4`;
768
+ const filePath = path.join(tempDir, filename);
769
+ fs.writeFileSync(filePath, Buffer.from(message.video_data));
770
+ resolved = true;
771
+ clearTimeout(timeout);
772
+ ws.close();
773
+ resolve({ success: true, filePath, frameCount: frameIds.length });
774
+ } else if (message.status === "error") {
775
+ resolved = true;
776
+ clearTimeout(timeout);
777
+ ws.close();
778
+ resolve({ success: false, error: message.error || "Export failed" });
779
+ }
780
+ } catch {
781
+ // Ignore parse errors for progress messages
782
+ }
783
+ });
784
+ });
785
+
786
+ if (exportResult.success && exportResult.filePath) {
787
+ return {
788
+ content: [
789
+ {
790
+ type: "text",
791
+ text:
792
+ `Video exported: ${exportResult.filePath}\n` +
793
+ `Frames: ${exportResult.frameCount} | ${startTime} → ${endTime} | ${fps} fps`,
794
+ },
795
+ ],
796
+ };
797
+ } else {
798
+ return {
799
+ content: [{ type: "text", text: `Export failed: ${exportResult.error}` }],
800
+ };
801
+ }
802
+ }
803
+
804
+ case "update-memory": {
805
+ if (args.delete && args.id) {
806
+ const response = await fetchAPI(`/memories/${args.id}`, { method: "DELETE" });
807
+ if (!response.ok) throw new Error(`HTTP error: ${response.status}`);
808
+ return { content: [{ type: "text", text: `Memory ${args.id} deleted.` }] };
809
+ }
810
+ if (args.id) {
811
+ const body: Record<string, unknown> = {};
812
+ if (args.content !== undefined) body.content = args.content;
813
+ if (args.tags !== undefined) body.tags = args.tags;
814
+ if (args.importance !== undefined) body.importance = args.importance;
815
+ if (args.source_context !== undefined) body.source_context = args.source_context;
816
+ const response = await fetchAPI(`/memories/${args.id}`, {
817
+ method: "PUT",
818
+ body: JSON.stringify(body),
819
+ });
820
+ if (!response.ok) throw new Error(`HTTP error: ${response.status}`);
821
+ const memory = await response.json();
822
+ return {
823
+ content: [{ type: "text", text: `Memory ${memory.id} updated: "${memory.content}"` }],
824
+ };
825
+ }
826
+ if (!args.content) {
827
+ return {
828
+ content: [{ type: "text", text: "Error: 'content' is required to create a memory" }],
829
+ };
830
+ }
831
+ const memoryBody: Record<string, unknown> = {
832
+ content: args.content,
833
+ source: "mcp",
834
+ tags: args.tags || [],
835
+ importance: args.importance ?? 0.5,
836
+ };
837
+ if (args.source_context) memoryBody.source_context = args.source_context;
838
+ const memoryResponse = await fetchAPI("/memories", {
839
+ method: "POST",
840
+ body: JSON.stringify(memoryBody),
841
+ });
842
+ if (!memoryResponse.ok) throw new Error(`HTTP error: ${memoryResponse.status}`);
843
+ const newMemory = await memoryResponse.json();
844
+ return {
845
+ content: [
846
+ { type: "text", text: `Memory created (id: ${newMemory.id}): "${newMemory.content}"` },
847
+ ],
848
+ };
849
+ }
850
+
851
+ case "send-notification": {
852
+ const notifBody: Record<string, unknown> = {
853
+ title: args.title,
854
+ body: args.body || "",
855
+ type: "pipe",
856
+ };
857
+ if (args.timeout_secs) notifBody.timeout = Number(args.timeout_secs) * 1000;
858
+ if (args.actions) notifBody.actions = args.actions;
859
+ const notifResponse = await fetch("http://localhost:11435/notify", {
860
+ method: "POST",
861
+ headers: { "Content-Type": "application/json" },
862
+ body: JSON.stringify(notifBody),
863
+ });
864
+ if (!notifResponse.ok) throw new Error(`HTTP error: ${notifResponse.status}`);
865
+ const notifResult = await notifResponse.json();
866
+ return {
867
+ content: [{ type: "text", text: `Notification sent: ${notifResult.message}` }],
868
+ };
869
+ }
870
+
1764
871
  default:
1765
872
  throw new Error(`Unknown tool: ${name}`);
1766
873
  }
1767
874
  } catch (error) {
1768
- const errorMessage =
1769
- error instanceof Error ? error.message : "Unknown error";
875
+ const errorMessage = error instanceof Error ? error.message : "Unknown error";
1770
876
  return {
1771
- content: [
1772
- {
1773
- type: "text",
1774
- text: `Error executing ${name}: ${errorMessage}`,
1775
- },
1776
- ],
877
+ content: [{ type: "text", text: `Error executing ${name}: ${errorMessage}` }],
1777
878
  };
1778
879
  }
1779
880
  });