escribano 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +297 -0
- package/dist/0_types.js +279 -0
- package/dist/actions/classify-session.js +77 -0
- package/dist/actions/create-contexts.js +44 -0
- package/dist/actions/create-topic-blocks.js +68 -0
- package/dist/actions/extract-metadata.js +24 -0
- package/dist/actions/generate-artifact-v3.js +296 -0
- package/dist/actions/generate-artifact.js +61 -0
- package/dist/actions/generate-summary-v3.js +260 -0
- package/dist/actions/outline-index.js +204 -0
- package/dist/actions/process-recording-v2.js +494 -0
- package/dist/actions/process-recording-v3.js +412 -0
- package/dist/actions/process-session.js +183 -0
- package/dist/actions/publish-summary-v3.js +303 -0
- package/dist/actions/sync-to-outline.js +196 -0
- package/dist/adapters/audio.silero.adapter.js +69 -0
- package/dist/adapters/cap.adapter.js +94 -0
- package/dist/adapters/capture.cap.adapter.js +107 -0
- package/dist/adapters/capture.filesystem.adapter.js +124 -0
- package/dist/adapters/embedding.ollama.adapter.js +141 -0
- package/dist/adapters/intelligence.adapter.js +202 -0
- package/dist/adapters/intelligence.mlx.adapter.js +395 -0
- package/dist/adapters/intelligence.ollama.adapter.js +741 -0
- package/dist/adapters/publishing.outline.adapter.js +75 -0
- package/dist/adapters/storage.adapter.js +81 -0
- package/dist/adapters/storage.fs.adapter.js +83 -0
- package/dist/adapters/transcription.whisper.adapter.js +206 -0
- package/dist/adapters/video.ffmpeg.adapter.js +405 -0
- package/dist/adapters/whisper.adapter.js +168 -0
- package/dist/batch-context.js +329 -0
- package/dist/db/helpers.js +50 -0
- package/dist/db/index.js +95 -0
- package/dist/db/migrate.js +80 -0
- package/dist/db/repositories/artifact.sqlite.js +77 -0
- package/dist/db/repositories/cluster.sqlite.js +92 -0
- package/dist/db/repositories/context.sqlite.js +75 -0
- package/dist/db/repositories/index.js +10 -0
- package/dist/db/repositories/observation.sqlite.js +70 -0
- package/dist/db/repositories/recording.sqlite.js +56 -0
- package/dist/db/repositories/subject.sqlite.js +64 -0
- package/dist/db/repositories/topic-block.sqlite.js +45 -0
- package/dist/db/types.js +4 -0
- package/dist/domain/classification.js +60 -0
- package/dist/domain/context.js +97 -0
- package/dist/domain/index.js +2 -0
- package/dist/domain/observation.js +17 -0
- package/dist/domain/recording.js +41 -0
- package/dist/domain/segment.js +93 -0
- package/dist/domain/session.js +93 -0
- package/dist/domain/time-range.js +38 -0
- package/dist/domain/transcript.js +79 -0
- package/dist/index.js +173 -0
- package/dist/pipeline/context.js +162 -0
- package/dist/pipeline/events.js +2 -0
- package/dist/prerequisites.js +226 -0
- package/dist/scripts/rebuild-index.js +53 -0
- package/dist/scripts/seed-fixtures.js +290 -0
- package/dist/services/activity-segmentation.js +333 -0
- package/dist/services/activity-segmentation.test.js +191 -0
- package/dist/services/app-normalization.js +212 -0
- package/dist/services/cluster-merge.js +69 -0
- package/dist/services/clustering.js +237 -0
- package/dist/services/debug.js +58 -0
- package/dist/services/frame-sampling.js +318 -0
- package/dist/services/signal-extraction.js +106 -0
- package/dist/services/subject-grouping.js +342 -0
- package/dist/services/temporal-alignment.js +99 -0
- package/dist/services/vlm-enrichment.js +84 -0
- package/dist/services/vlm-service.js +130 -0
- package/dist/stats/index.js +3 -0
- package/dist/stats/observer.js +65 -0
- package/dist/stats/repository.js +36 -0
- package/dist/stats/resource-tracker.js +86 -0
- package/dist/stats/types.js +1 -0
- package/dist/test-classification-prompts.js +181 -0
- package/dist/tests/cap.adapter.test.js +75 -0
- package/dist/tests/capture.cap.adapter.test.js +69 -0
- package/dist/tests/classify-session.test.js +140 -0
- package/dist/tests/db/repositories.test.js +243 -0
- package/dist/tests/domain/time-range.test.js +31 -0
- package/dist/tests/integration.test.js +84 -0
- package/dist/tests/intelligence.adapter.test.js +102 -0
- package/dist/tests/intelligence.ollama.adapter.test.js +178 -0
- package/dist/tests/process-v2.test.js +90 -0
- package/dist/tests/services/clustering.test.js +112 -0
- package/dist/tests/services/frame-sampling.test.js +152 -0
- package/dist/tests/utils/ocr.test.js +76 -0
- package/dist/tests/utils/parallel.test.js +57 -0
- package/dist/tests/visual-observer.test.js +175 -0
- package/dist/utils/id-normalization.js +15 -0
- package/dist/utils/index.js +9 -0
- package/dist/utils/model-detector.js +154 -0
- package/dist/utils/ocr.js +80 -0
- package/dist/utils/parallel.js +32 -0
- package/migrations/001_initial.sql +109 -0
- package/migrations/002_clusters.sql +41 -0
- package/migrations/003_observations_vlm_fields.sql +14 -0
- package/migrations/004_observations_unique.sql +18 -0
- package/migrations/005_processing_stats.sql +29 -0
- package/migrations/006_vlm_raw_response.sql +6 -0
- package/migrations/007_subjects.sql +23 -0
- package/migrations/008_artifacts_recording.sql +6 -0
- package/migrations/009_artifact_subjects.sql +10 -0
- package/package.json +82 -0
- package/prompts/action-items.md +55 -0
- package/prompts/blog-draft.md +54 -0
- package/prompts/blog-research.md +87 -0
- package/prompts/card.md +54 -0
- package/prompts/classify-segment.md +38 -0
- package/prompts/classify.md +37 -0
- package/prompts/code-snippets.md +163 -0
- package/prompts/extract-metadata.md +149 -0
- package/prompts/notes.md +83 -0
- package/prompts/runbook.md +123 -0
- package/prompts/standup.md +50 -0
- package/prompts/step-by-step.md +125 -0
- package/prompts/subject-grouping.md +31 -0
- package/prompts/summary-v3.md +89 -0
- package/prompts/summary.md +77 -0
- package/prompts/topic-classifier.md +24 -0
- package/prompts/topic-extract.md +13 -0
- package/prompts/vlm-batch.md +21 -0
- package/prompts/vlm-single.md +19 -0
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import { pipelineEvents } from '../pipeline/events.js';
|
|
2
|
+
let currentRunId = null;
|
|
3
|
+
let statsRepo = null;
|
|
4
|
+
export function setupStatsObserver(repo) {
|
|
5
|
+
statsRepo = repo;
|
|
6
|
+
pipelineEvents.on('run:start', (data) => {
|
|
7
|
+
currentRunId = data.runId;
|
|
8
|
+
repo.createRun({
|
|
9
|
+
id: data.runId,
|
|
10
|
+
recording_id: data.recordingId,
|
|
11
|
+
run_type: data.runType,
|
|
12
|
+
status: 'running',
|
|
13
|
+
started_at: new Date(data.timestamp).toISOString(),
|
|
14
|
+
metadata: data.metadata ? JSON.stringify(data.metadata) : undefined,
|
|
15
|
+
});
|
|
16
|
+
});
|
|
17
|
+
pipelineEvents.on('run:end', (data) => {
|
|
18
|
+
if (!currentRunId)
|
|
19
|
+
return;
|
|
20
|
+
const startedAt = data.timestamp;
|
|
21
|
+
repo.updateRun(currentRunId, {
|
|
22
|
+
status: data.status,
|
|
23
|
+
completed_at: new Date(data.timestamp).toISOString(),
|
|
24
|
+
total_duration_ms: data.durationMs,
|
|
25
|
+
error_message: data.error,
|
|
26
|
+
});
|
|
27
|
+
if (data.status !== 'cancelled') {
|
|
28
|
+
currentRunId = null;
|
|
29
|
+
}
|
|
30
|
+
});
|
|
31
|
+
pipelineEvents.on('phase:start', (data) => {
|
|
32
|
+
if (!data.runId)
|
|
33
|
+
return;
|
|
34
|
+
repo.createStat({
|
|
35
|
+
id: data.phaseId,
|
|
36
|
+
run_id: data.runId,
|
|
37
|
+
phase: data.phase,
|
|
38
|
+
status: 'running',
|
|
39
|
+
started_at: new Date(data.timestamp).toISOString(),
|
|
40
|
+
items_total: data.itemsTotal,
|
|
41
|
+
});
|
|
42
|
+
});
|
|
43
|
+
pipelineEvents.on('phase:end', (data) => {
|
|
44
|
+
repo.updateStat(data.phaseId, {
|
|
45
|
+
status: data.status,
|
|
46
|
+
completed_at: new Date(data.timestamp).toISOString(),
|
|
47
|
+
duration_ms: data.durationMs,
|
|
48
|
+
items_processed: data.itemsProcessed,
|
|
49
|
+
metadata: data.metadata ? JSON.stringify(data.metadata) : undefined,
|
|
50
|
+
});
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
export function cancelCurrentRun() {
|
|
54
|
+
if (currentRunId && statsRepo) {
|
|
55
|
+
statsRepo.updateRun(currentRunId, {
|
|
56
|
+
status: 'cancelled',
|
|
57
|
+
completed_at: new Date().toISOString(),
|
|
58
|
+
total_duration_ms: 0,
|
|
59
|
+
});
|
|
60
|
+
currentRunId = null;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
export function getCurrentRunId() {
|
|
64
|
+
return currentRunId;
|
|
65
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
export function createStatsRepository(db) {
|
|
2
|
+
const stmts = {
|
|
3
|
+
insertRun: db.prepare(`
|
|
4
|
+
INSERT INTO processing_runs (id, recording_id, run_type, status, started_at, metadata)
|
|
5
|
+
VALUES (?, ?, ?, ?, ?, ?)
|
|
6
|
+
`),
|
|
7
|
+
updateRun: db.prepare(`
|
|
8
|
+
UPDATE processing_runs
|
|
9
|
+
SET status = ?, completed_at = ?, total_duration_ms = ?, error_message = ?
|
|
10
|
+
WHERE id = ?
|
|
11
|
+
`),
|
|
12
|
+
insertStat: db.prepare(`
|
|
13
|
+
INSERT INTO processing_stats (id, run_id, phase, status, started_at, items_total)
|
|
14
|
+
VALUES (?, ?, ?, ?, ?, ?)
|
|
15
|
+
`),
|
|
16
|
+
updateStat: db.prepare(`
|
|
17
|
+
UPDATE processing_stats
|
|
18
|
+
SET status = ?, completed_at = ?, duration_ms = ?, items_processed = ?, metadata = ?
|
|
19
|
+
WHERE id = ?
|
|
20
|
+
`),
|
|
21
|
+
};
|
|
22
|
+
return {
|
|
23
|
+
createRun(run) {
|
|
24
|
+
stmts.insertRun.run(run.id, run.recording_id, run.run_type, run.status, run.started_at, run.metadata ?? null);
|
|
25
|
+
},
|
|
26
|
+
updateRun(id, updates) {
|
|
27
|
+
stmts.updateRun.run(updates.status, updates.completed_at, updates.total_duration_ms, updates.error_message ?? null, id);
|
|
28
|
+
},
|
|
29
|
+
createStat(stat) {
|
|
30
|
+
stmts.insertStat.run(stat.id, stat.run_id, stat.phase, stat.status, stat.started_at, stat.items_total ?? null);
|
|
31
|
+
},
|
|
32
|
+
updateStat(id, updates) {
|
|
33
|
+
stmts.updateStat.run(updates.status, updates.completed_at, updates.duration_ms, updates.items_processed ?? null, updates.metadata ?? null, id);
|
|
34
|
+
},
|
|
35
|
+
};
|
|
36
|
+
}
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import os from 'node:os';
|
|
2
|
+
import pidusage from 'pidusage';
|
|
3
|
+
export class ResourceTracker {
|
|
4
|
+
resources = new Map();
|
|
5
|
+
samples = new Map();
|
|
6
|
+
interval = null;
|
|
7
|
+
running = false;
|
|
8
|
+
register(resource) {
|
|
9
|
+
const name = resource.getResourceName();
|
|
10
|
+
if (!this.resources.has(name)) {
|
|
11
|
+
this.resources.set(name, resource);
|
|
12
|
+
this.samples.set(name, { memories: [], cpus: [] });
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
async start(intervalMs = 1000) {
|
|
16
|
+
if (this.running)
|
|
17
|
+
return;
|
|
18
|
+
this.running = true;
|
|
19
|
+
// Register nodejs itself if not already registered
|
|
20
|
+
if (!this.resources.has('nodejs')) {
|
|
21
|
+
this.register({
|
|
22
|
+
getResourceName: () => 'nodejs',
|
|
23
|
+
getPid: () => process.pid,
|
|
24
|
+
});
|
|
25
|
+
}
|
|
26
|
+
// Initial sample
|
|
27
|
+
await this.sample();
|
|
28
|
+
this.interval = setInterval(() => {
|
|
29
|
+
this.sample().catch(() => { });
|
|
30
|
+
}, intervalMs);
|
|
31
|
+
}
|
|
32
|
+
async sample() {
|
|
33
|
+
for (const [name, resource] of this.resources) {
|
|
34
|
+
const pid = resource.getPid();
|
|
35
|
+
if (!pid)
|
|
36
|
+
continue;
|
|
37
|
+
try {
|
|
38
|
+
const stats = await pidusage(pid);
|
|
39
|
+
const sample = this.samples.get(name);
|
|
40
|
+
if (sample) {
|
|
41
|
+
sample.memories.push(stats.memory / 1024 / 1024);
|
|
42
|
+
sample.cpus.push(stats.cpu);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
catch {
|
|
46
|
+
// Process exited or not found - skip
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
stop() {
|
|
51
|
+
if (this.interval) {
|
|
52
|
+
clearInterval(this.interval);
|
|
53
|
+
this.interval = null;
|
|
54
|
+
}
|
|
55
|
+
this.running = false;
|
|
56
|
+
const result = {};
|
|
57
|
+
for (const [name, sample] of this.samples) {
|
|
58
|
+
if (sample.memories.length === 0)
|
|
59
|
+
continue;
|
|
60
|
+
const avgMem = sample.memories.reduce((a, b) => a + b, 0) / sample.memories.length;
|
|
61
|
+
const avgCpu = sample.cpus.reduce((a, b) => a + b, 0) / sample.cpus.length;
|
|
62
|
+
result[name] = {
|
|
63
|
+
peakMemoryMB: Math.round(Math.max(...sample.memories)),
|
|
64
|
+
avgMemoryMB: Math.round(avgMem),
|
|
65
|
+
peakCpuPercent: Math.round(Math.max(...sample.cpus) * 10) / 10,
|
|
66
|
+
avgCpuPercent: Math.round(avgCpu * 10) / 10,
|
|
67
|
+
sampleCount: sample.memories.length,
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
// Reset samples for next phase
|
|
71
|
+
for (const name of this.samples.keys()) {
|
|
72
|
+
this.samples.set(name, { memories: [], cpus: [] });
|
|
73
|
+
}
|
|
74
|
+
return result;
|
|
75
|
+
}
|
|
76
|
+
getSystemInfo() {
|
|
77
|
+
return {
|
|
78
|
+
totalMemoryGB: Math.round(os.totalmem() / 1024 / 1024 / 1024),
|
|
79
|
+
cpuCores: os.cpus().length,
|
|
80
|
+
platform: process.platform,
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
isRunning() {
|
|
84
|
+
return this.running;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
#!/usr/bin/env npx tsx
|
|
2
|
+
/**
|
|
3
|
+
* Test different classification prompts to find the best approach
|
|
4
|
+
* for handling mixed-type sessions
|
|
5
|
+
*/
|
|
6
|
+
import { createStorageService } from './adapters/storage.adapter.js';
|
|
7
|
+
// Test Prompts
|
|
8
|
+
const PROMPTS = {
|
|
9
|
+
// Approach A: Current style but clearer
|
|
10
|
+
strictSingle: `You are a session classifier. Analyze this transcript and output ONLY valid JSON.
|
|
11
|
+
|
|
12
|
+
Session types:
|
|
13
|
+
- meeting: Conversations, interviews, discussions between people
|
|
14
|
+
- debugging: Fixing issues, troubleshooting, error analysis
|
|
15
|
+
- tutorial: Teaching, explaining, demonstrating how to do something
|
|
16
|
+
- learning: Researching, studying, exploring new concepts
|
|
17
|
+
|
|
18
|
+
Output exactly this JSON structure:
|
|
19
|
+
{ "type": "meeting|debugging|tutorial|learning", "confidence": 0.0-1.0 }
|
|
20
|
+
|
|
21
|
+
Transcript: {{TRANSCRIPT}}`,
|
|
22
|
+
// Approach B: Multi-label scoring
|
|
23
|
+
multiLabel: `Rate how much this transcript matches each session type (0-100):
|
|
24
|
+
|
|
25
|
+
Session types:
|
|
26
|
+
- meeting: Conversations, interviews, discussions between people
|
|
27
|
+
- debugging: Fixing issues, troubleshooting, error analysis
|
|
28
|
+
- tutorial: Teaching, explaining, demonstrating how to do something
|
|
29
|
+
- learning: Researching, studying, exploring new concepts
|
|
30
|
+
|
|
31
|
+
Output JSON with all types scored:
|
|
32
|
+
{ "classifications": { "meeting": 85, "debugging": 10, "tutorial": 20, "learning": 45 } }
|
|
33
|
+
|
|
34
|
+
Transcript: {{TRANSCRIPT}}`,
|
|
35
|
+
// Approach C: Primary + Secondary
|
|
36
|
+
primarySecondary: `Identify the PRIMARY type and any SECONDARY types that apply.
|
|
37
|
+
|
|
38
|
+
Session types:
|
|
39
|
+
- meeting: Conversations, interviews, discussions between people
|
|
40
|
+
- debugging: Fixing issues, troubleshooting, error analysis
|
|
41
|
+
- tutorial: Teaching, explaining, demonstrating how to do something
|
|
42
|
+
- learning: Researching, studying, exploring new concepts
|
|
43
|
+
|
|
44
|
+
Output JSON with structure shown:
|
|
45
|
+
{ "primary": { "type": "meeting|debugging|tutorial|learning", "confidence": 0.0-1.0 }, "secondary": ["type1", "type2"] }
|
|
46
|
+
|
|
47
|
+
Transcript: {{TRANSCRIPT}}`,
|
|
48
|
+
// Approach D: Array of applicable types
|
|
49
|
+
arrayTypes: `List ALL session types that apply with confidence scores.
|
|
50
|
+
|
|
51
|
+
Session types:
|
|
52
|
+
- meeting: Conversations, interviews, discussions between people
|
|
53
|
+
- debugging: Fixing issues, troubleshooting, error analysis
|
|
54
|
+
- tutorial: Teaching, explaining, demonstrating how to do something
|
|
55
|
+
- learning: Researching, studying, exploring new concepts
|
|
56
|
+
|
|
57
|
+
Output JSON with array of applicable types (include if confidence > 0.3):
|
|
58
|
+
{ "types": [ {"type": "meeting", "confidence": 0.85} ] }
|
|
59
|
+
|
|
60
|
+
Transcript: {{TRANSCRIPT}}`,
|
|
61
|
+
};
|
|
62
|
+
async function testPrompt(name, prompt, transcript, intelligenceConfig) {
|
|
63
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
64
|
+
console.log(`Testing: ${name}`);
|
|
65
|
+
console.log(`${'='.repeat(60)}`);
|
|
66
|
+
const finalPrompt = prompt.replace('{{TRANSCRIPT}}', transcript.fullText);
|
|
67
|
+
try {
|
|
68
|
+
const controller = new AbortController();
|
|
69
|
+
const timeoutId = setTimeout(() => controller.abort(), 3000000);
|
|
70
|
+
const response = await fetch(intelligenceConfig.endpoint, {
|
|
71
|
+
method: 'POST',
|
|
72
|
+
headers: { 'Content-Type': 'application/json' },
|
|
73
|
+
body: JSON.stringify({
|
|
74
|
+
model: intelligenceConfig.model,
|
|
75
|
+
messages: [{ role: 'system', content: finalPrompt }],
|
|
76
|
+
stream: false,
|
|
77
|
+
format: 'json',
|
|
78
|
+
temperature: 0.3,
|
|
79
|
+
}),
|
|
80
|
+
signal: controller.signal,
|
|
81
|
+
});
|
|
82
|
+
clearTimeout(timeoutId);
|
|
83
|
+
if (!response.ok) {
|
|
84
|
+
throw new Error(`API error: ${response.status} ${response.statusText}`);
|
|
85
|
+
}
|
|
86
|
+
const result = await response.json();
|
|
87
|
+
console.log('Result:');
|
|
88
|
+
console.log(JSON.stringify(result, null, 2));
|
|
89
|
+
return result;
|
|
90
|
+
}
|
|
91
|
+
catch (error) {
|
|
92
|
+
if (error instanceof Error && error.name === 'AbortError') {
|
|
93
|
+
console.error('Error: Request timed out after 30s');
|
|
94
|
+
}
|
|
95
|
+
else {
|
|
96
|
+
console.error('Error:', error);
|
|
97
|
+
}
|
|
98
|
+
return null;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
async function main() {
|
|
102
|
+
const sessionId = process.argv[2];
|
|
103
|
+
if (!sessionId) {
|
|
104
|
+
console.error('Usage: pnpm run test-prompts <session-id>');
|
|
105
|
+
console.error('Example: pnpm run test-prompts "Your Recording.cap"');
|
|
106
|
+
process.exit(1);
|
|
107
|
+
}
|
|
108
|
+
console.log(`\n🔍 Testing classification prompts for: ${sessionId}`);
|
|
109
|
+
const storage = createStorageService();
|
|
110
|
+
const session = await storage.loadSession(sessionId);
|
|
111
|
+
if (!session || !session.transcripts || session.transcripts.length === 0) {
|
|
112
|
+
console.error(`\n❌ Session not found or has no transcripts: ${sessionId}`);
|
|
113
|
+
console.error('\nTo find sessions, run:');
|
|
114
|
+
console.error(' pnpm run list');
|
|
115
|
+
process.exit(1);
|
|
116
|
+
}
|
|
117
|
+
const transcript = session.transcripts[0].transcript;
|
|
118
|
+
console.log(`✓ Transcript length: ${transcript.fullText.length} chars`);
|
|
119
|
+
console.log(`✓ Number of segments: ${transcript.segments.length}`);
|
|
120
|
+
console.log(`✓ Audio source: ${session.transcripts[0].source}`);
|
|
121
|
+
console.log(`\n⏱️ Running 4 classification tests...\n`);
|
|
122
|
+
// Intelligence config
|
|
123
|
+
const intelligenceConfig = {
|
|
124
|
+
provider: 'ollama',
|
|
125
|
+
endpoint: 'http://localhost:11434/v1/chat/completions',
|
|
126
|
+
model: 'qwen3:32b',
|
|
127
|
+
maxRetries: 1,
|
|
128
|
+
timeout: 3000000,
|
|
129
|
+
};
|
|
130
|
+
// Test each prompt
|
|
131
|
+
const results = {};
|
|
132
|
+
let testNum = 1;
|
|
133
|
+
for (const [name, prompt] of Object.entries(PROMPTS)) {
|
|
134
|
+
console.log(`\n[${testNum}/4] Running test...`);
|
|
135
|
+
results[name] = await testPrompt(name, prompt, transcript, intelligenceConfig);
|
|
136
|
+
testNum++;
|
|
137
|
+
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
138
|
+
}
|
|
139
|
+
// Summary
|
|
140
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
141
|
+
console.log('📊 FINAL SUMMARY');
|
|
142
|
+
console.log(`${'='.repeat(60)}`);
|
|
143
|
+
for (const [name, result] of Object.entries(results)) {
|
|
144
|
+
console.log(`\n${name
|
|
145
|
+
.toUpperCase()
|
|
146
|
+
.replace(/([A-Z])/g, ' $1')
|
|
147
|
+
.trim()}:`);
|
|
148
|
+
if (result) {
|
|
149
|
+
// Try to extract key info for quick comparison
|
|
150
|
+
if (result.type) {
|
|
151
|
+
console.log(` Primary Type: ${result.type}`);
|
|
152
|
+
if (result.confidence)
|
|
153
|
+
console.log(` Confidence: ${(result.confidence * 100).toFixed(0)}%`);
|
|
154
|
+
}
|
|
155
|
+
else if (result.classifications) {
|
|
156
|
+
console.log(' Scores:', JSON.stringify(result.classifications));
|
|
157
|
+
}
|
|
158
|
+
else if (result.primary) {
|
|
159
|
+
console.log(` Primary: ${result.primary.type} (${(result.primary.confidence * 100).toFixed(0)}%)`);
|
|
160
|
+
if (result.secondary?.length)
|
|
161
|
+
console.log(` Secondary: [${result.secondary.join(', ')}]`);
|
|
162
|
+
}
|
|
163
|
+
else if (result.types) {
|
|
164
|
+
console.log(` Types: ${result.types.map((t) => `${t.type} (${(t.confidence * 100).toFixed(0)}%)`).join(', ')}`);
|
|
165
|
+
}
|
|
166
|
+
else {
|
|
167
|
+
console.log(` Raw:`, JSON.stringify(result));
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
else {
|
|
171
|
+
console.log(' ❌ Failed or timed out');
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
175
|
+
console.log('✅ All tests complete!');
|
|
176
|
+
console.log(`${'='.repeat(60)}\n`);
|
|
177
|
+
}
|
|
178
|
+
main().catch((error) => {
|
|
179
|
+
console.error('Fatal error:', error);
|
|
180
|
+
process.exit(1);
|
|
181
|
+
});
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cap Adapter Tests
|
|
3
|
+
*/
|
|
4
|
+
import { describe, expect, it } from 'vitest';
|
|
5
|
+
import { createCapSource } from '../adapters/cap.adapter';
|
|
6
|
+
describe('Cap Adapter', () => {
|
|
7
|
+
it('should create a CapSource', () => {
|
|
8
|
+
const capSource = createCapSource({
|
|
9
|
+
recordingsPath: '~/tmp/recordings',
|
|
10
|
+
});
|
|
11
|
+
expect(capSource).toBeDefined();
|
|
12
|
+
expect(capSource.getLatestRecording).toBeInstanceOf(Function);
|
|
13
|
+
});
|
|
14
|
+
it('should handle nonexistent directory gracefully', async () => {
|
|
15
|
+
const capSource = createCapSource({
|
|
16
|
+
recordingsPath: '/nonexistent/path',
|
|
17
|
+
});
|
|
18
|
+
const latest = await capSource.getLatestRecording();
|
|
19
|
+
expect(latest).toBeNull();
|
|
20
|
+
});
|
|
21
|
+
it('should validate Cap recording metadata structure', async () => {
|
|
22
|
+
const mockMeta = {
|
|
23
|
+
platform: 'MacOS',
|
|
24
|
+
pretty_name: 'Cap 2026-01-08 at 16.46.37',
|
|
25
|
+
segments: [
|
|
26
|
+
{
|
|
27
|
+
display: {
|
|
28
|
+
path: 'content/segments/segment-0/display.mp4',
|
|
29
|
+
fps: 37,
|
|
30
|
+
},
|
|
31
|
+
mic: {
|
|
32
|
+
path: 'content/segments/segment-0/audio-input.ogg',
|
|
33
|
+
start_time: -0.032719958,
|
|
34
|
+
},
|
|
35
|
+
cursor: 'content/segments/segment-0/cursor.json',
|
|
36
|
+
},
|
|
37
|
+
],
|
|
38
|
+
};
|
|
39
|
+
expect(mockMeta.segments).toBeDefined();
|
|
40
|
+
expect(mockMeta.segments[0]).toBeDefined();
|
|
41
|
+
expect(mockMeta.segments[0].display?.path).toBe('content/segments/segment-0/display.mp4');
|
|
42
|
+
expect(mockMeta.segments[0].mic?.path).toBe('content/segments/segment-0/audio-input.ogg');
|
|
43
|
+
});
|
|
44
|
+
it('should identify recordings without mic/audio field', async () => {
|
|
45
|
+
const mockMeta = {
|
|
46
|
+
platform: 'MacOS',
|
|
47
|
+
pretty_name: 'Cap 2026-01-08 at 16.46.37',
|
|
48
|
+
segments: [
|
|
49
|
+
{
|
|
50
|
+
display: {
|
|
51
|
+
path: 'content/segments/segment-0/display.mp4',
|
|
52
|
+
fps: 37,
|
|
53
|
+
},
|
|
54
|
+
cursor: 'content/segments/segment-0/cursor.json',
|
|
55
|
+
},
|
|
56
|
+
],
|
|
57
|
+
};
|
|
58
|
+
expect(mockMeta.segments[0]?.mic).toBeUndefined();
|
|
59
|
+
});
|
|
60
|
+
it('should identify missing segments array', async () => {
|
|
61
|
+
const invalidMeta = {
|
|
62
|
+
platform: 'MacOS',
|
|
63
|
+
pretty_name: 'Cap 2026-01-08 at 16.46.37',
|
|
64
|
+
};
|
|
65
|
+
expect(invalidMeta.segments).toBeUndefined();
|
|
66
|
+
});
|
|
67
|
+
it('should identify empty segments array', async () => {
|
|
68
|
+
const invalidMeta = {
|
|
69
|
+
platform: 'MacOS',
|
|
70
|
+
pretty_name: 'Cap 2026-01-08 at 16.46.37',
|
|
71
|
+
segments: [],
|
|
72
|
+
};
|
|
73
|
+
expect(invalidMeta.segments?.length).toBe(0);
|
|
74
|
+
});
|
|
75
|
+
});
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cap Adapter Tests
|
|
3
|
+
*/
|
|
4
|
+
import { describe, expect, it, vi } from 'vitest';
|
|
5
|
+
import { createCapCaptureSource } from '../adapters/capture.cap.adapter.js';
|
|
6
|
+
// Mock VideoService
|
|
7
|
+
const mockVideoService = {
|
|
8
|
+
extractFramesAtInterval: vi.fn(),
|
|
9
|
+
extractFramesAtTimestamps: vi.fn(),
|
|
10
|
+
extractFramesAtTimestampsBatch: vi.fn(),
|
|
11
|
+
getMetadata: vi
|
|
12
|
+
.fn()
|
|
13
|
+
.mockResolvedValue({ duration: 300, width: 1920, height: 1080, fps: 30 }),
|
|
14
|
+
detectSceneChanges: vi.fn(),
|
|
15
|
+
runVisualIndexing: vi.fn(),
|
|
16
|
+
};
|
|
17
|
+
describe('Cap Adapter', () => {
|
|
18
|
+
it('should create a CapSource', () => {
|
|
19
|
+
const capSource = createCapCaptureSource({ recordingsPath: '~/tmp/recordings' }, mockVideoService);
|
|
20
|
+
expect(capSource).toBeDefined();
|
|
21
|
+
expect(capSource.getLatestRecording).toBeInstanceOf(Function);
|
|
22
|
+
});
|
|
23
|
+
it('should handle nonexistent directory gracefully', async () => {
|
|
24
|
+
const capSource = createCapCaptureSource({ recordingsPath: '/nonexistent/path' }, mockVideoService);
|
|
25
|
+
const latest = await capSource.getLatestRecording();
|
|
26
|
+
expect(latest).toBeNull();
|
|
27
|
+
});
|
|
28
|
+
it('should validate Cap recording metadata structure', async () => {
|
|
29
|
+
const mockMeta = {
|
|
30
|
+
platform: 'MacOS',
|
|
31
|
+
pretty_name: 'Cap 2026-01-08 at 16.46.37',
|
|
32
|
+
segments: [
|
|
33
|
+
{
|
|
34
|
+
display: {
|
|
35
|
+
path: 'content/segments/segment-0/display.mp4',
|
|
36
|
+
fps: 37,
|
|
37
|
+
},
|
|
38
|
+
mic: {
|
|
39
|
+
path: 'content/segments/segment-0/audio-input.ogg',
|
|
40
|
+
start_time: -0.032719958,
|
|
41
|
+
},
|
|
42
|
+
cursor: 'content/segments/segment-0/cursor.json',
|
|
43
|
+
},
|
|
44
|
+
],
|
|
45
|
+
};
|
|
46
|
+
expect(mockMeta.segments).toBeDefined();
|
|
47
|
+
expect(mockMeta.segments[0]).toBeDefined();
|
|
48
|
+
expect(mockMeta.segments[0].display?.path).toBe('content/segments/segment-0/display.mp4');
|
|
49
|
+
expect(mockMeta.segments[0].mic?.path).toBe('content/segments/segment-0/audio-input.ogg');
|
|
50
|
+
});
|
|
51
|
+
it('should identify recordings without mic/audio field', async () => {
|
|
52
|
+
const mockMeta = {
|
|
53
|
+
segments: [{}],
|
|
54
|
+
};
|
|
55
|
+
expect(mockMeta.segments?.[0]?.mic).toBeUndefined();
|
|
56
|
+
});
|
|
57
|
+
it('should identify missing segments array', async () => {
|
|
58
|
+
const invalidMeta = {};
|
|
59
|
+
expect(invalidMeta.segments).toBeUndefined();
|
|
60
|
+
});
|
|
61
|
+
it('should identify empty segments array', async () => {
|
|
62
|
+
const invalidMeta = {
|
|
63
|
+
platform: 'MacOS',
|
|
64
|
+
pretty_name: 'Cap 2026-01-08 at 16.46.37',
|
|
65
|
+
segments: [],
|
|
66
|
+
};
|
|
67
|
+
expect(invalidMeta.segments?.length).toBe(0);
|
|
68
|
+
});
|
|
69
|
+
});
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Classify Session Action Tests
|
|
3
|
+
*/
|
|
4
|
+
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
|
5
|
+
import { classifySession } from '../actions/classify-session.js';
|
|
6
|
+
const mockTranscript = {
|
|
7
|
+
fullText: 'This is a debugging session about authentication errors.',
|
|
8
|
+
segments: [
|
|
9
|
+
{
|
|
10
|
+
id: 'seg-0',
|
|
11
|
+
start: 0,
|
|
12
|
+
end: 5,
|
|
13
|
+
text: 'I fixed the authentication bug.',
|
|
14
|
+
},
|
|
15
|
+
{
|
|
16
|
+
id: 'seg-1',
|
|
17
|
+
start: 5,
|
|
18
|
+
end: 10,
|
|
19
|
+
text: 'Used JWT tokens for security.',
|
|
20
|
+
},
|
|
21
|
+
],
|
|
22
|
+
language: 'en',
|
|
23
|
+
duration: 10,
|
|
24
|
+
};
|
|
25
|
+
const mockSession = {
|
|
26
|
+
id: 'session-123',
|
|
27
|
+
recording: {
|
|
28
|
+
id: 'rec-123',
|
|
29
|
+
source: {
|
|
30
|
+
type: 'cap',
|
|
31
|
+
originalPath: '/test/path',
|
|
32
|
+
},
|
|
33
|
+
videoPath: '/test/video.mp4',
|
|
34
|
+
audioMicPath: '/test/audio.mp3',
|
|
35
|
+
audioSystemPath: null,
|
|
36
|
+
duration: 10,
|
|
37
|
+
capturedAt: new Date('2026-01-08'),
|
|
38
|
+
},
|
|
39
|
+
transcripts: [
|
|
40
|
+
{
|
|
41
|
+
source: 'mic',
|
|
42
|
+
transcript: mockTranscript,
|
|
43
|
+
},
|
|
44
|
+
],
|
|
45
|
+
visualLogs: [],
|
|
46
|
+
segments: [],
|
|
47
|
+
status: 'transcribed',
|
|
48
|
+
classification: null,
|
|
49
|
+
metadata: null,
|
|
50
|
+
artifacts: [],
|
|
51
|
+
createdAt: new Date('2026-01-08'),
|
|
52
|
+
updatedAt: new Date('2026-01-08'),
|
|
53
|
+
};
|
|
54
|
+
const mockClassificationResult = {
|
|
55
|
+
meeting: 10,
|
|
56
|
+
debugging: 90,
|
|
57
|
+
tutorial: 15,
|
|
58
|
+
learning: 20,
|
|
59
|
+
working: 5,
|
|
60
|
+
};
|
|
61
|
+
const mockClassify = vi.fn();
|
|
62
|
+
const mockExtractMetadata = vi.fn().mockResolvedValue({
|
|
63
|
+
speakers: [],
|
|
64
|
+
keyMoments: [],
|
|
65
|
+
actionItems: [],
|
|
66
|
+
technicalTerms: [],
|
|
67
|
+
codeSnippets: [],
|
|
68
|
+
});
|
|
69
|
+
const mockGenerate = vi.fn();
|
|
70
|
+
const mockIntelligence = {
|
|
71
|
+
classify: mockClassify,
|
|
72
|
+
classifySegment: vi.fn(),
|
|
73
|
+
extractMetadata: mockExtractMetadata,
|
|
74
|
+
generate: mockGenerate,
|
|
75
|
+
describeImages: vi.fn(),
|
|
76
|
+
embedText: vi.fn(),
|
|
77
|
+
extractTopics: vi.fn(),
|
|
78
|
+
generateText: vi.fn().mockResolvedValue('Mock generated text'),
|
|
79
|
+
};
|
|
80
|
+
describe('classifySession', () => {
|
|
81
|
+
beforeEach(() => {
|
|
82
|
+
vi.resetAllMocks();
|
|
83
|
+
mockClassify.mockResolvedValue(mockClassificationResult);
|
|
84
|
+
});
|
|
85
|
+
it('should throw error without transcripts', async () => {
|
|
86
|
+
const sessionWithoutTranscripts = { ...mockSession, transcripts: [] };
|
|
87
|
+
await expect(classifySession(sessionWithoutTranscripts, mockIntelligence)).rejects.toThrow('Cannot classify session without transcripts');
|
|
88
|
+
});
|
|
89
|
+
it('should classify session and update status', async () => {
|
|
90
|
+
const result = await classifySession(mockSession, mockIntelligence);
|
|
91
|
+
expect(result.status).toBe('classified');
|
|
92
|
+
expect(result.classification).toBeDefined();
|
|
93
|
+
if (result.classification) {
|
|
94
|
+
expect(result.classification.debugging).toBe(90);
|
|
95
|
+
expect(result.classification.meeting).toBe(10);
|
|
96
|
+
}
|
|
97
|
+
});
|
|
98
|
+
it('should not mutate original session', async () => {
|
|
99
|
+
const originalStatus = mockSession.status;
|
|
100
|
+
const originalUpdatedAt = mockSession.updatedAt;
|
|
101
|
+
await classifySession(mockSession, mockIntelligence);
|
|
102
|
+
expect(mockSession.status).toBe(originalStatus);
|
|
103
|
+
expect(mockSession.updatedAt).toBe(originalUpdatedAt);
|
|
104
|
+
});
|
|
105
|
+
it('should call intelligence.classify with transcript', async () => {
|
|
106
|
+
await classifySession(mockSession, mockIntelligence);
|
|
107
|
+
expect(mockClassify).toHaveBeenCalled();
|
|
108
|
+
// Since we only have one transcript, it should be called with the original
|
|
109
|
+
expect(mockClassify).toHaveBeenCalledWith(mockTranscript, []);
|
|
110
|
+
});
|
|
111
|
+
it('should interleave multiple transcripts', async () => {
|
|
112
|
+
const sessionWithMultipleTranscripts = {
|
|
113
|
+
...mockSession,
|
|
114
|
+
transcripts: [
|
|
115
|
+
{ source: 'mic', transcript: mockTranscript },
|
|
116
|
+
{
|
|
117
|
+
source: 'system',
|
|
118
|
+
transcript: {
|
|
119
|
+
...mockTranscript,
|
|
120
|
+
segments: [
|
|
121
|
+
{
|
|
122
|
+
id: 'seg-sys-0',
|
|
123
|
+
start: 3,
|
|
124
|
+
end: 4,
|
|
125
|
+
text: 'Error notification sound',
|
|
126
|
+
},
|
|
127
|
+
],
|
|
128
|
+
fullText: 'Error notification sound',
|
|
129
|
+
},
|
|
130
|
+
},
|
|
131
|
+
],
|
|
132
|
+
};
|
|
133
|
+
await classifySession(sessionWithMultipleTranscripts, mockIntelligence);
|
|
134
|
+
expect(mockClassify).toHaveBeenCalled();
|
|
135
|
+
const calledWith = mockClassify.mock.calls[0][0];
|
|
136
|
+
// Should have interleaved the transcripts
|
|
137
|
+
expect(calledWith.fullText).toContain('[00:00 MIC]');
|
|
138
|
+
expect(calledWith.fullText).toContain('[00:03 SYSTEM]');
|
|
139
|
+
});
|
|
140
|
+
});
|