screenpipe-sync 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +135 -0
- package/bun.lock +97 -0
- package/dist/index.js +9961 -0
- package/package.json +37 -0
- package/src/index.ts +496 -0
- package/tsconfig.json +14 -0
package/package.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "screenpipe-sync",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Sync Screenpipe activity to structured daily summaries. Extract todos, goals, decisions from your screen history.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"screenpipe-sync": "dist/index.js"
|
|
8
|
+
},
|
|
9
|
+
"scripts": {
|
|
10
|
+
"build": "bun build src/index.ts --outdir dist --target node",
|
|
11
|
+
"dev": "bun run src/index.ts",
|
|
12
|
+
"prepublishOnly": "bun run build"
|
|
13
|
+
},
|
|
14
|
+
"keywords": [
|
|
15
|
+
"screenpipe",
|
|
16
|
+
"productivity",
|
|
17
|
+
"ai",
|
|
18
|
+
"screen-recording",
|
|
19
|
+
"daily-summary",
|
|
20
|
+
"todo",
|
|
21
|
+
"context-sync"
|
|
22
|
+
],
|
|
23
|
+
"author": "Louis Beaumont <louis@screenpi.pe>",
|
|
24
|
+
"license": "MIT",
|
|
25
|
+
"repository": {
|
|
26
|
+
"type": "git",
|
|
27
|
+
"url": "https://github.com/mediar-ai/screenpipe",
|
|
28
|
+
"directory": "packages/sync"
|
|
29
|
+
},
|
|
30
|
+
"dependencies": {
|
|
31
|
+
"@anthropic-ai/sdk": "^0.39.0"
|
|
32
|
+
},
|
|
33
|
+
"devDependencies": {
|
|
34
|
+
"@types/bun": "latest",
|
|
35
|
+
"typescript": "^5.0.0"
|
|
36
|
+
}
|
|
37
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,496 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
/**
|
|
3
|
+
* @screenpipe/sync - Daily context sync from Screenpipe
|
|
4
|
+
*
|
|
5
|
+
* Usage:
|
|
6
|
+
* bunx @screenpipe/sync # Summary to stdout
|
|
7
|
+
* bunx @screenpipe/sync --output ~/notes # Save to folder
|
|
8
|
+
* bunx @screenpipe/sync --hours 8 # Last 8 hours
|
|
9
|
+
* bunx @screenpipe/sync --git # Auto commit & push
|
|
10
|
+
* bunx @screenpipe/sync --remote host:path # Sync to remote
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import Anthropic from "@anthropic-ai/sdk";
|
|
14
|
+
|
|
15
|
+
// ============================================================================
|
|
16
|
+
// Types
|
|
17
|
+
// ============================================================================
|
|
18
|
+
|
|
19
|
+
interface ScreenpipeResult {
|
|
20
|
+
type: "OCR" | "Audio";
|
|
21
|
+
content: {
|
|
22
|
+
text: string;
|
|
23
|
+
timestamp: string;
|
|
24
|
+
app_name?: string;
|
|
25
|
+
window_name?: string;
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
interface DailySummary {
|
|
30
|
+
date: string;
|
|
31
|
+
apps: Record<string, number>;
|
|
32
|
+
todos: string[];
|
|
33
|
+
goals: string[];
|
|
34
|
+
decisions: string[];
|
|
35
|
+
activities: string[];
|
|
36
|
+
meetings: string[];
|
|
37
|
+
blockers: string[];
|
|
38
|
+
insights: string[];
|
|
39
|
+
rawMinutes: number;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
interface Config {
|
|
43
|
+
screenpipeUrl: string;
|
|
44
|
+
outputDir: string | null;
|
|
45
|
+
hours: number;
|
|
46
|
+
gitPush: boolean;
|
|
47
|
+
remote: string | null;
|
|
48
|
+
anthropicKey: string | null;
|
|
49
|
+
openaiKey: string | null;
|
|
50
|
+
ollamaUrl: string | null;
|
|
51
|
+
ollamaModel: string;
|
|
52
|
+
format: "markdown" | "json";
|
|
53
|
+
verbose: boolean;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// ============================================================================
|
|
57
|
+
// CLI Parsing
|
|
58
|
+
// ============================================================================
|
|
59
|
+
|
|
60
|
+
function parseArgs(): Config {
|
|
61
|
+
const args = process.argv.slice(2);
|
|
62
|
+
const config: Config = {
|
|
63
|
+
screenpipeUrl: process.env.SCREENPIPE_URL || "http://localhost:3030",
|
|
64
|
+
outputDir: null,
|
|
65
|
+
hours: 12,
|
|
66
|
+
gitPush: false,
|
|
67
|
+
remote: null,
|
|
68
|
+
anthropicKey: process.env.ANTHROPIC_API_KEY || null,
|
|
69
|
+
openaiKey: process.env.OPENAI_API_KEY || null,
|
|
70
|
+
ollamaUrl: process.env.OLLAMA_URL || "http://localhost:11434",
|
|
71
|
+
ollamaModel: process.env.OLLAMA_MODEL || "llama3.2",
|
|
72
|
+
format: "markdown",
|
|
73
|
+
verbose: false,
|
|
74
|
+
};
|
|
75
|
+
|
|
76
|
+
for (let i = 0; i < args.length; i++) {
|
|
77
|
+
const arg = args[i];
|
|
78
|
+
switch (arg) {
|
|
79
|
+
case "--output":
|
|
80
|
+
case "-o":
|
|
81
|
+
config.outputDir = args[++i];
|
|
82
|
+
break;
|
|
83
|
+
case "--hours":
|
|
84
|
+
case "-h":
|
|
85
|
+
config.hours = parseInt(args[++i]) || 12;
|
|
86
|
+
break;
|
|
87
|
+
case "--git":
|
|
88
|
+
case "-g":
|
|
89
|
+
config.gitPush = true;
|
|
90
|
+
break;
|
|
91
|
+
case "--remote":
|
|
92
|
+
case "-r":
|
|
93
|
+
config.remote = args[++i];
|
|
94
|
+
break;
|
|
95
|
+
case "--json":
|
|
96
|
+
config.format = "json";
|
|
97
|
+
break;
|
|
98
|
+
case "--verbose":
|
|
99
|
+
case "-v":
|
|
100
|
+
config.verbose = true;
|
|
101
|
+
break;
|
|
102
|
+
case "--help":
|
|
103
|
+
printHelp();
|
|
104
|
+
process.exit(0);
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
return config;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
function printHelp() {
|
|
112
|
+
console.log(`
|
|
113
|
+
@screenpipe/sync - Extract daily context from Screenpipe
|
|
114
|
+
|
|
115
|
+
USAGE:
|
|
116
|
+
bunx @screenpipe/sync [options]
|
|
117
|
+
|
|
118
|
+
OPTIONS:
|
|
119
|
+
-o, --output <dir> Save summary to directory (default: stdout)
|
|
120
|
+
-h, --hours <n> Hours of history to analyze (default: 12)
|
|
121
|
+
-g, --git Auto commit and push after writing
|
|
122
|
+
-r, --remote <host> Sync to remote via SSH (user@host:path)
|
|
123
|
+
--json Output as JSON instead of markdown
|
|
124
|
+
-v, --verbose Show debug output
|
|
125
|
+
|
|
126
|
+
ENVIRONMENT:
|
|
127
|
+
SCREENPIPE_URL Screenpipe API URL (default: http://localhost:3030)
|
|
128
|
+
ANTHROPIC_API_KEY Required for AI summarization
|
|
129
|
+
|
|
130
|
+
EXAMPLES:
|
|
131
|
+
bunx @screenpipe/sync
|
|
132
|
+
bunx @screenpipe/sync --output ~/Documents/brain/context --git
|
|
133
|
+
bunx @screenpipe/sync --hours 24 --json
|
|
134
|
+
bunx @screenpipe/sync --remote clawdbot:~/brain/context
|
|
135
|
+
|
|
136
|
+
OUTPUT:
|
|
137
|
+
Creates structured daily summaries with:
|
|
138
|
+
- Todo items extracted from screen content
|
|
139
|
+
- Goals and intentions mentioned
|
|
140
|
+
- Decisions made
|
|
141
|
+
- Key activities by app
|
|
142
|
+
- Meetings and conversations
|
|
143
|
+
- Blockers and problems
|
|
144
|
+
- AI-generated insights
|
|
145
|
+
`);
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// ============================================================================
|
|
149
|
+
// Screenpipe API
|
|
150
|
+
// ============================================================================
|
|
151
|
+
|
|
152
|
+
async function queryScreenpipe(
|
|
153
|
+
config: Config
|
|
154
|
+
): Promise<ScreenpipeResult[]> {
|
|
155
|
+
const startTime = new Date(Date.now() - config.hours * 60 * 60 * 1000);
|
|
156
|
+
const url = new URL(`${config.screenpipeUrl}/search`);
|
|
157
|
+
url.searchParams.set("content_type", "ocr");
|
|
158
|
+
url.searchParams.set("limit", "500");
|
|
159
|
+
url.searchParams.set("start_time", startTime.toISOString());
|
|
160
|
+
|
|
161
|
+
if (config.verbose) {
|
|
162
|
+
console.error(`[screenpipe] Querying: ${url}`);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
try {
|
|
166
|
+
const res = await fetch(url.toString());
|
|
167
|
+
if (!res.ok) {
|
|
168
|
+
throw new Error(`Screenpipe API error: ${res.status}`);
|
|
169
|
+
}
|
|
170
|
+
const data = await res.json();
|
|
171
|
+
return data.data || [];
|
|
172
|
+
} catch (e) {
|
|
173
|
+
console.error(`[error] Failed to connect to Screenpipe at ${config.screenpipeUrl}`);
|
|
174
|
+
console.error(` Make sure Screenpipe is running.`);
|
|
175
|
+
process.exit(1);
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
// ============================================================================
|
|
180
|
+
// Data Processing
|
|
181
|
+
// ============================================================================
|
|
182
|
+
|
|
183
|
+
function processResults(results: ScreenpipeResult[]): {
|
|
184
|
+
byApp: Record<string, string[]>;
|
|
185
|
+
timeline: { time: string; app: string; text: string }[];
|
|
186
|
+
} {
|
|
187
|
+
const byApp: Record<string, string[]> = {};
|
|
188
|
+
const timeline: { time: string; app: string; text: string }[] = [];
|
|
189
|
+
const seen = new Set<string>();
|
|
190
|
+
|
|
191
|
+
for (const r of results) {
|
|
192
|
+
if (r.type !== "OCR") continue;
|
|
193
|
+
const text = r.content.text?.trim();
|
|
194
|
+
if (!text || text.length < 20) continue;
|
|
195
|
+
|
|
196
|
+
// Dedupe similar content
|
|
197
|
+
const hash = text.slice(0, 100);
|
|
198
|
+
if (seen.has(hash)) continue;
|
|
199
|
+
seen.add(hash);
|
|
200
|
+
|
|
201
|
+
const app = r.content.app_name || "Unknown";
|
|
202
|
+
byApp[app] = byApp[app] || [];
|
|
203
|
+
byApp[app].push(text);
|
|
204
|
+
|
|
205
|
+
timeline.push({
|
|
206
|
+
time: r.content.timestamp,
|
|
207
|
+
app,
|
|
208
|
+
text: text.slice(0, 500),
|
|
209
|
+
});
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
return { byApp, timeline };
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// ============================================================================
|
|
216
|
+
// AI Extraction
|
|
217
|
+
// ============================================================================
|
|
218
|
+
|
|
219
|
+
const EXTRACTION_PROMPT = `You are analyzing screen capture data from a user's computer to create a structured daily summary.
|
|
220
|
+
|
|
221
|
+
INPUT: Raw OCR text from screen captures, organized by app.
|
|
222
|
+
|
|
223
|
+
OUTPUT: A structured extraction in this exact JSON format:
|
|
224
|
+
{
|
|
225
|
+
"todos": ["actionable items mentioned or visible on screen"],
|
|
226
|
+
"goals": ["goals, objectives, intentions mentioned"],
|
|
227
|
+
"decisions": ["decisions made or discussed"],
|
|
228
|
+
"activities": ["key activities/tasks worked on"],
|
|
229
|
+
"meetings": ["meetings, calls, conversations"],
|
|
230
|
+
"blockers": ["problems, blockers, frustrations"],
|
|
231
|
+
"insights": ["2-3 AI observations about the day's work patterns"]
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
RULES:
|
|
235
|
+
- Extract ACTUAL content seen on screen, not generic statements
|
|
236
|
+
- Todos should be actionable (start with verb)
|
|
237
|
+
- Be specific: "Fix auth bug in login.ts" not "worked on code"
|
|
238
|
+
- Deduplicate similar items
|
|
239
|
+
- Max 10 items per category
|
|
240
|
+
- If no relevant content for a category, use empty array
|
|
241
|
+
- Insights should note patterns (context switching, focus blocks, late nights, etc.)
|
|
242
|
+
|
|
243
|
+
Analyze this screen data:
|
|
244
|
+
`;
|
|
245
|
+
|
|
246
|
+
async function extractWithAI(
|
|
247
|
+
byApp: Record<string, string[]>,
|
|
248
|
+
config: Config
|
|
249
|
+
): Promise<Partial<DailySummary>> {
|
|
250
|
+
// Prepare condensed input (limit to avoid token explosion)
|
|
251
|
+
const condensed = Object.entries(byApp)
|
|
252
|
+
.map(([app, texts]) => {
|
|
253
|
+
const sample = texts.slice(0, 20).join("\n---\n").slice(0, 3000);
|
|
254
|
+
return `## ${app}\n${sample}`;
|
|
255
|
+
})
|
|
256
|
+
.join("\n\n")
|
|
257
|
+
.slice(0, 15000);
|
|
258
|
+
|
|
259
|
+
const prompt = EXTRACTION_PROMPT + condensed;
|
|
260
|
+
|
|
261
|
+
// Try providers in order: Anthropic > OpenAI > Ollama > fallback
|
|
262
|
+
if (config.anthropicKey) {
|
|
263
|
+
if (config.verbose) console.error(`[ai] Using Claude (${condensed.length} chars)`);
|
|
264
|
+
try {
|
|
265
|
+
const client = new Anthropic({ apiKey: config.anthropicKey });
|
|
266
|
+
const response = await client.messages.create({
|
|
267
|
+
model: "claude-sonnet-4-20250514",
|
|
268
|
+
max_tokens: 2000,
|
|
269
|
+
messages: [{ role: "user", content: prompt }],
|
|
270
|
+
});
|
|
271
|
+
const text = response.content[0].type === "text" ? response.content[0].text : "";
|
|
272
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
273
|
+
if (jsonMatch) return JSON.parse(jsonMatch[0]);
|
|
274
|
+
} catch (e) {
|
|
275
|
+
console.error(`[error] Claude failed: ${e}`);
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
if (config.openaiKey) {
|
|
280
|
+
if (config.verbose) console.error(`[ai] Using OpenAI`);
|
|
281
|
+
try {
|
|
282
|
+
const res = await fetch("https://api.openai.com/v1/chat/completions", {
|
|
283
|
+
method: "POST",
|
|
284
|
+
headers: {
|
|
285
|
+
"Content-Type": "application/json",
|
|
286
|
+
Authorization: `Bearer ${config.openaiKey}`,
|
|
287
|
+
},
|
|
288
|
+
body: JSON.stringify({
|
|
289
|
+
model: "gpt-4o-mini",
|
|
290
|
+
messages: [{ role: "user", content: prompt }],
|
|
291
|
+
max_tokens: 2000,
|
|
292
|
+
}),
|
|
293
|
+
});
|
|
294
|
+
const data = await res.json();
|
|
295
|
+
const text = data.choices?.[0]?.message?.content || "";
|
|
296
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
297
|
+
if (jsonMatch) return JSON.parse(jsonMatch[0]);
|
|
298
|
+
} catch (e) {
|
|
299
|
+
console.error(`[error] OpenAI failed: ${e}`);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
// Try Ollama (local)
|
|
304
|
+
try {
|
|
305
|
+
if (config.verbose) console.error(`[ai] Trying Ollama at ${config.ollamaUrl}`);
|
|
306
|
+
const res = await fetch(`${config.ollamaUrl}/api/generate`, {
|
|
307
|
+
method: "POST",
|
|
308
|
+
headers: { "Content-Type": "application/json" },
|
|
309
|
+
body: JSON.stringify({
|
|
310
|
+
model: config.ollamaModel,
|
|
311
|
+
prompt: prompt,
|
|
312
|
+
stream: false,
|
|
313
|
+
}),
|
|
314
|
+
});
|
|
315
|
+
if (res.ok) {
|
|
316
|
+
const data = await res.json();
|
|
317
|
+
const text = data.response || "";
|
|
318
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
319
|
+
if (jsonMatch) return JSON.parse(jsonMatch[0]);
|
|
320
|
+
}
|
|
321
|
+
} catch (e) {
|
|
322
|
+
if (config.verbose) console.error(`[warn] Ollama not available`);
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
// Fallback: no AI
|
|
326
|
+
console.error("[warn] No AI provider available - returning basic summary");
|
|
327
|
+
console.error(" Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or run Ollama locally");
|
|
328
|
+
return {
|
|
329
|
+
todos: [],
|
|
330
|
+
goals: [],
|
|
331
|
+
decisions: [],
|
|
332
|
+
activities: Object.keys(byApp).map((app) => `Used ${app}`),
|
|
333
|
+
meetings: [],
|
|
334
|
+
blockers: [],
|
|
335
|
+
insights: ["No AI provider configured - set ANTHROPIC_API_KEY or OPENAI_API_KEY"],
|
|
336
|
+
};
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// ============================================================================
|
|
340
|
+
// Output Formatting
|
|
341
|
+
// ============================================================================
|
|
342
|
+
|
|
343
|
+
function formatMarkdown(summary: DailySummary): string {
|
|
344
|
+
const { date, apps, todos, goals, decisions, activities, meetings, blockers, insights } = summary;
|
|
345
|
+
|
|
346
|
+
const appList = Object.entries(apps)
|
|
347
|
+
.sort((a, b) => b[1] - a[1])
|
|
348
|
+
.slice(0, 10)
|
|
349
|
+
.map(([app, mins]) => `- **${app}**: ~${mins} min`)
|
|
350
|
+
.join("\n");
|
|
351
|
+
|
|
352
|
+
const formatList = (items: string[]) =>
|
|
353
|
+
items.length > 0 ? items.map((i) => `- ${i}`).join("\n") : "_None extracted_";
|
|
354
|
+
|
|
355
|
+
return `# Daily Context - ${date}
|
|
356
|
+
|
|
357
|
+
> Auto-generated by @screenpipe/sync
|
|
358
|
+
> Analyzed ${summary.rawMinutes} minutes of screen activity
|
|
359
|
+
|
|
360
|
+
## 📱 Apps Used
|
|
361
|
+
${appList}
|
|
362
|
+
|
|
363
|
+
## ✅ Todos Extracted
|
|
364
|
+
${formatList(todos)}
|
|
365
|
+
|
|
366
|
+
## 🎯 Goals Mentioned
|
|
367
|
+
${formatList(goals)}
|
|
368
|
+
|
|
369
|
+
## 🔀 Decisions Made
|
|
370
|
+
${formatList(decisions)}
|
|
371
|
+
|
|
372
|
+
## 💻 Key Activities
|
|
373
|
+
${formatList(activities)}
|
|
374
|
+
|
|
375
|
+
## 👥 Meetings & Conversations
|
|
376
|
+
${formatList(meetings)}
|
|
377
|
+
|
|
378
|
+
## 🚧 Blockers & Problems
|
|
379
|
+
${formatList(blockers)}
|
|
380
|
+
|
|
381
|
+
## 💡 AI Insights
|
|
382
|
+
${formatList(insights)}
|
|
383
|
+
|
|
384
|
+
---
|
|
385
|
+
_Generated at ${new Date().toISOString()}_
|
|
386
|
+
`;
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
// ============================================================================
|
|
390
|
+
// Output Handlers
|
|
391
|
+
// ============================================================================
|
|
392
|
+
|
|
393
|
+
async function writeOutput(content: string, config: Config, filename: string) {
|
|
394
|
+
if (!config.outputDir) {
|
|
395
|
+
console.log(content);
|
|
396
|
+
return;
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
const fs = await import("fs/promises");
|
|
400
|
+
const path = await import("path");
|
|
401
|
+
|
|
402
|
+
const dir = path.resolve(config.outputDir);
|
|
403
|
+
await fs.mkdir(dir, { recursive: true });
|
|
404
|
+
|
|
405
|
+
const filepath = path.join(dir, filename);
|
|
406
|
+
await fs.writeFile(filepath, content);
|
|
407
|
+
console.error(`[ok] Written to ${filepath}`);
|
|
408
|
+
|
|
409
|
+
if (config.gitPush) {
|
|
410
|
+
const { execSync } = await import("child_process");
|
|
411
|
+
try {
|
|
412
|
+
execSync(`cd "${dir}" && git add -A && git commit -m "sync: ${filename}" && git push`, {
|
|
413
|
+
stdio: config.verbose ? "inherit" : "pipe",
|
|
414
|
+
});
|
|
415
|
+
console.error(`[ok] Git pushed`);
|
|
416
|
+
} catch (e) {
|
|
417
|
+
console.error(`[warn] Git push failed - maybe no changes?`);
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
if (config.remote) {
|
|
422
|
+
const { execSync } = await import("child_process");
|
|
423
|
+
try {
|
|
424
|
+
execSync(`scp "${filepath}" "${config.remote}/"`, {
|
|
425
|
+
stdio: config.verbose ? "inherit" : "pipe",
|
|
426
|
+
});
|
|
427
|
+
console.error(`[ok] Synced to ${config.remote}`);
|
|
428
|
+
} catch (e) {
|
|
429
|
+
console.error(`[error] Remote sync failed: ${e}`);
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
// ============================================================================
|
|
435
|
+
// Main
|
|
436
|
+
// ============================================================================
|
|
437
|
+
|
|
438
|
+
async function main() {
|
|
439
|
+
const config = parseArgs();
|
|
440
|
+
const today = new Date().toISOString().split("T")[0];
|
|
441
|
+
|
|
442
|
+
console.error(`[screenpipe-sync] Analyzing last ${config.hours} hours...`);
|
|
443
|
+
|
|
444
|
+
// 1. Query Screenpipe
|
|
445
|
+
const results = await queryScreenpipe(config);
|
|
446
|
+
console.error(`[ok] Retrieved ${results.length} screen captures`);
|
|
447
|
+
|
|
448
|
+
if (results.length === 0) {
|
|
449
|
+
console.error("[warn] No screen data found. Is Screenpipe running?");
|
|
450
|
+
process.exit(0);
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
// 2. Process results
|
|
454
|
+
const { byApp, timeline } = processResults(results);
|
|
455
|
+
console.error(`[ok] Processed ${Object.keys(byApp).length} apps`);
|
|
456
|
+
|
|
457
|
+
// 3. Calculate app usage (rough estimate: each capture ≈ 5 seconds)
|
|
458
|
+
const appMinutes: Record<string, number> = {};
|
|
459
|
+
for (const app of Object.keys(byApp)) {
|
|
460
|
+
appMinutes[app] = Math.round((byApp[app].length * 5) / 60);
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
// 4. AI extraction
|
|
464
|
+
console.error(`[ai] Extracting todos, goals, decisions...`);
|
|
465
|
+
const extracted = await extractWithAI(byApp, config);
|
|
466
|
+
|
|
467
|
+
// 5. Build summary
|
|
468
|
+
const summary: DailySummary = {
|
|
469
|
+
date: today,
|
|
470
|
+
apps: appMinutes,
|
|
471
|
+
todos: extracted.todos || [],
|
|
472
|
+
goals: extracted.goals || [],
|
|
473
|
+
decisions: extracted.decisions || [],
|
|
474
|
+
activities: extracted.activities || [],
|
|
475
|
+
meetings: extracted.meetings || [],
|
|
476
|
+
blockers: extracted.blockers || [],
|
|
477
|
+
insights: extracted.insights || [],
|
|
478
|
+
rawMinutes: Math.round((results.length * 5) / 60),
|
|
479
|
+
};
|
|
480
|
+
|
|
481
|
+
// 6. Output
|
|
482
|
+
const filename =
|
|
483
|
+
config.format === "json" ? `${today}.json` : `${today}.md`;
|
|
484
|
+
|
|
485
|
+
const content =
|
|
486
|
+
config.format === "json"
|
|
487
|
+
? JSON.stringify(summary, null, 2)
|
|
488
|
+
: formatMarkdown(summary);
|
|
489
|
+
|
|
490
|
+
await writeOutput(content, config, filename);
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
main().catch((e) => {
|
|
494
|
+
console.error(`[fatal] ${e.message}`);
|
|
495
|
+
process.exit(1);
|
|
496
|
+
});
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2022",
|
|
4
|
+
"module": "ESNext",
|
|
5
|
+
"moduleResolution": "bundler",
|
|
6
|
+
"esModuleInterop": true,
|
|
7
|
+
"strict": true,
|
|
8
|
+
"skipLibCheck": true,
|
|
9
|
+
"outDir": "dist",
|
|
10
|
+
"declaration": true,
|
|
11
|
+
"types": ["bun-types"]
|
|
12
|
+
},
|
|
13
|
+
"include": ["src/**/*"]
|
|
14
|
+
}
|