eprec 1.5.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/app/assets/styles.css +18 -28
- package/package.json +8 -11
- package/process-course/chapter-processor.ts +40 -3
- package/process-course/cli.ts +1 -1
- package/process-course/edits/cli.ts +14 -5
- package/process-course/edits/combined-video-editor.ts +2 -2
- package/process-course/edits/edit-workspace.ts +1 -1
- package/process-course/edits/regenerate-transcript.ts +2 -2
- package/process-course/edits/remove-ranges.ts +1 -1
- package/process-course/edits/timestamp-refinement.ts +1 -1
- package/process-course/edits/transcript-output.test.ts +1 -1
- package/process-course/edits/transcript-output.ts +1 -1
- package/process-course/ffmpeg.ts +1 -1
- package/process-course/jarvis-commands/parser.test.ts +1 -1
- package/process-course/jarvis-commands/parser.ts +1 -1
- package/process-course/jarvis-commands/windows.ts +3 -3
- package/process-course/logging.ts +1 -1
- package/process-course/summary.ts +1 -1
- package/process-course/utils/filename.ts +1 -1
- package/process-course/utils/transcript.test.ts +1 -1
- package/process-course/utils/transcript.ts +1 -1
- package/{app-server.ts → src/app-server.ts} +8 -6
- package/{cli.ts → src/cli.ts} +30 -14
- package/{process-course-video.ts → src/process-course-video.ts} +162 -11
- package/{speech-detection.ts → src/speech-detection.ts} +4 -4
- package/src/utils.test.ts +71 -0
- /package/{utils.ts → src/utils.ts} +0 -0
- /package/{whispercpp-transcribe.ts → src/whispercpp-transcribe.ts} +0 -0
package/README.md
CHANGED
|
@@ -40,7 +40,7 @@ bun install
|
|
|
40
40
|
## Quick Start
|
|
41
41
|
|
|
42
42
|
```bash
|
|
43
|
-
bun process-course-video.ts "/path/to/input.mp4" "/path/to/output" \
|
|
43
|
+
bun src/process-course-video.ts "/path/to/input.mp4" "/path/to/output" \
|
|
44
44
|
--enable-transcription \
|
|
45
45
|
--keep-intermediates \
|
|
46
46
|
--write-logs
|
package/app/assets/styles.css
CHANGED
|
@@ -33,12 +33,7 @@
|
|
|
33
33
|
--color-danger-text: #b91c1c;
|
|
34
34
|
--color-danger-border: #fecaca;
|
|
35
35
|
--color-danger-border-strong: #fca5a5;
|
|
36
|
-
--font-family:
|
|
37
|
-
'Inter',
|
|
38
|
-
'Segoe UI',
|
|
39
|
-
system-ui,
|
|
40
|
-
-apple-system,
|
|
41
|
-
sans-serif;
|
|
36
|
+
--font-family: 'Inter', 'Segoe UI', system-ui, -apple-system, sans-serif;
|
|
42
37
|
--font-size-xs: 12px;
|
|
43
38
|
--font-size-sm: 14px;
|
|
44
39
|
--font-size-base: 16px;
|
|
@@ -63,10 +58,8 @@
|
|
|
63
58
|
--radius-lg: 12px;
|
|
64
59
|
--radius-xl: 16px;
|
|
65
60
|
--radius-pill: 999px;
|
|
66
|
-
--shadow-sm: 0 1px 2px
|
|
67
|
-
|
|
68
|
-
--shadow-md: 0 6px 16px
|
|
69
|
-
color-mix(in srgb, var(--color-text) 14%, transparent);
|
|
61
|
+
--shadow-sm: 0 1px 2px color-mix(in srgb, var(--color-text) 12%, transparent);
|
|
62
|
+
--shadow-md: 0 6px 16px color-mix(in srgb, var(--color-text) 14%, transparent);
|
|
70
63
|
--shadow-lg: 0 18px 38px
|
|
71
64
|
color-mix(in srgb, var(--color-text) 16%, transparent);
|
|
72
65
|
--transition-fast: 150ms ease;
|
|
@@ -82,18 +75,18 @@
|
|
|
82
75
|
@media (prefers-color-scheme: dark) {
|
|
83
76
|
:root {
|
|
84
77
|
color-scheme: dark;
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
78
|
+
--color-primary: #38bdf8;
|
|
79
|
+
--color-primary-hover: #0ea5e9;
|
|
80
|
+
--color-primary-active: #0284c7;
|
|
81
|
+
--color-on-primary: #0f172a;
|
|
82
|
+
--color-background: #0b1120;
|
|
83
|
+
--color-surface: #111827;
|
|
84
|
+
--color-surface-muted: #0f172a;
|
|
85
|
+
--color-surface-inverse: #f8fafc;
|
|
86
|
+
--color-text: #f8fafc;
|
|
87
|
+
--color-text-muted: #94a3b8;
|
|
88
|
+
--color-text-subtle: #cbd5e1;
|
|
89
|
+
--color-text-secondary: #e2e8f0;
|
|
97
90
|
--color-text-faint: #64748b;
|
|
98
91
|
--color-text-inverse: #0f172a;
|
|
99
92
|
--color-border: #334155;
|
|
@@ -110,12 +103,9 @@
|
|
|
110
103
|
--color-danger-text: #fecaca;
|
|
111
104
|
--color-danger-border: #ef4444;
|
|
112
105
|
--color-danger-border-strong: #fca5a5;
|
|
113
|
-
--shadow-sm: 0 1px 2px
|
|
114
|
-
|
|
115
|
-
--shadow-
|
|
116
|
-
color-mix(in srgb, #0f172a 14%, transparent);
|
|
117
|
-
--shadow-lg: 0 18px 38px
|
|
118
|
-
color-mix(in srgb, #0f172a 16%, transparent);
|
|
106
|
+
--shadow-sm: 0 1px 2px color-mix(in srgb, #0f172a 12%, transparent);
|
|
107
|
+
--shadow-md: 0 6px 16px color-mix(in srgb, #0f172a 14%, transparent);
|
|
108
|
+
--shadow-lg: 0 18px 38px color-mix(in srgb, #0f172a 16%, transparent);
|
|
119
109
|
}
|
|
120
110
|
}
|
|
121
111
|
|
package/package.json
CHANGED
|
@@ -1,35 +1,30 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "eprec",
|
|
3
3
|
"type": "module",
|
|
4
|
-
"version": "1.
|
|
4
|
+
"version": "1.7.0",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"repository": {
|
|
7
7
|
"type": "git",
|
|
8
8
|
"url": "https://github.com/epicweb-dev/eprec"
|
|
9
9
|
},
|
|
10
10
|
"scripts": {
|
|
11
|
-
"app:start": "bun --watch ./cli.ts app start",
|
|
11
|
+
"app:start": "bun --watch ./src/cli.ts app start",
|
|
12
12
|
"format": "prettier --write .",
|
|
13
|
-
"test": "bun test process-course utils.test.ts",
|
|
13
|
+
"test": "bun test process-course src/utils.test.ts",
|
|
14
14
|
"test:e2e": "bun test ./e2e",
|
|
15
|
-
"test:smoke": "bunx playwright test -c playwright-smoke-config.ts",
|
|
15
|
+
"test:smoke": "bunx playwright test -c playwright/playwright-smoke-config.ts",
|
|
16
16
|
"test:all": "bun test '**/*.test.ts'",
|
|
17
17
|
"validate": "bun run test"
|
|
18
18
|
},
|
|
19
19
|
"bin": {
|
|
20
|
-
"eprec": "./cli.ts"
|
|
20
|
+
"eprec": "./src/cli.ts"
|
|
21
21
|
},
|
|
22
22
|
"files": [
|
|
23
23
|
"app/**",
|
|
24
|
-
"app-server.ts",
|
|
25
|
-
"cli.ts",
|
|
26
24
|
"process-course/**",
|
|
27
|
-
"process-course-video.ts",
|
|
28
25
|
"public/**",
|
|
29
26
|
"server/**",
|
|
30
|
-
"
|
|
31
|
-
"utils.ts",
|
|
32
|
-
"whispercpp-transcribe.ts"
|
|
27
|
+
"src/**"
|
|
33
28
|
],
|
|
34
29
|
"prettier": "@epic-web/config/prettier",
|
|
35
30
|
"devDependencies": {
|
|
@@ -43,8 +38,10 @@
|
|
|
43
38
|
"typescript": "^5"
|
|
44
39
|
},
|
|
45
40
|
"dependencies": {
|
|
41
|
+
"@inquirer/search": "^4.1.0",
|
|
46
42
|
"get-port": "^7.1.0",
|
|
47
43
|
"inquirer": "^13.2.1",
|
|
44
|
+
"match-sorter": "^8.2.0",
|
|
48
45
|
"onnxruntime-node": "^1.23.2",
|
|
49
46
|
"ora": "^9.1.0",
|
|
50
47
|
"remix": "3.0.0-alpha.0",
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
import path from 'node:path'
|
|
2
|
-
import {
|
|
3
|
-
|
|
4
|
-
|
|
2
|
+
import {
|
|
3
|
+
detectSpeechBounds,
|
|
4
|
+
checkSegmentHasSpeech,
|
|
5
|
+
} from '../src/speech-detection'
|
|
6
|
+
import { transcribeAudio } from '../src/whispercpp-transcribe'
|
|
7
|
+
import { clamp, formatSeconds } from '../src/utils'
|
|
5
8
|
import {
|
|
6
9
|
COMMAND_CLOSE_WORD,
|
|
7
10
|
COMMAND_WAKE_WORD,
|
|
@@ -51,6 +54,14 @@ import type {
|
|
|
51
54
|
} from './types'
|
|
52
55
|
import { createEditWorkspace } from './edits'
|
|
53
56
|
|
|
57
|
+
export type ChapterProgressReporter = {
|
|
58
|
+
start: (options: { stepCount: number; label?: string }) => void
|
|
59
|
+
step: (label: string) => void
|
|
60
|
+
setLabel: (label: string) => void
|
|
61
|
+
finish: (label?: string) => void
|
|
62
|
+
skip: (label: string) => void
|
|
63
|
+
}
|
|
64
|
+
|
|
54
65
|
export interface ChapterProcessingOptions {
|
|
55
66
|
inputPath: string
|
|
56
67
|
outputDir: string
|
|
@@ -64,6 +75,7 @@ export interface ChapterProcessingOptions {
|
|
|
64
75
|
writeLogs: boolean
|
|
65
76
|
dryRun: boolean
|
|
66
77
|
previousProcessedChapter?: ProcessedChapterInfo | null
|
|
78
|
+
progress?: ChapterProgressReporter
|
|
67
79
|
}
|
|
68
80
|
|
|
69
81
|
export interface ChapterProcessingResult {
|
|
@@ -97,11 +109,16 @@ export async function processChapter(
|
|
|
97
109
|
)
|
|
98
110
|
}
|
|
99
111
|
|
|
112
|
+
const progress = options.progress
|
|
113
|
+
const stepCount = options.dryRun ? 1 : options.enableTranscription ? 8 : 7
|
|
114
|
+
|
|
100
115
|
const outputBasePath = path.join(
|
|
101
116
|
options.outputDir,
|
|
102
117
|
`${formatChapterFilename(chapter)}${path.extname(options.inputPath)}`,
|
|
103
118
|
)
|
|
104
119
|
|
|
120
|
+
progress?.start({ stepCount, label: 'Starting' })
|
|
121
|
+
|
|
105
122
|
// Check minimum duration before processing
|
|
106
123
|
if (duration < options.minChapterDurationSeconds) {
|
|
107
124
|
logInfo(
|
|
@@ -118,6 +135,7 @@ export async function processChapter(
|
|
|
118
135
|
])
|
|
119
136
|
logWritten = true
|
|
120
137
|
}
|
|
138
|
+
progress?.skip('Skipped (short)')
|
|
121
139
|
return { status: 'skipped', skipReason: 'short-initial', logWritten }
|
|
122
140
|
}
|
|
123
141
|
|
|
@@ -126,6 +144,7 @@ export async function processChapter(
|
|
|
126
144
|
logInfo(
|
|
127
145
|
`[dry-run] Would process chapter ${chapter.index + 1}: ${chapter.title}`,
|
|
128
146
|
)
|
|
147
|
+
progress?.finish('Dry run')
|
|
129
148
|
return { status: 'processed', skipReason: 'dry-run', logWritten: false }
|
|
130
149
|
}
|
|
131
150
|
|
|
@@ -136,6 +155,7 @@ export async function processChapter(
|
|
|
136
155
|
|
|
137
156
|
try {
|
|
138
157
|
// Step 1: Extract raw segment with padding trimmed
|
|
158
|
+
progress?.step('Extracting segment')
|
|
139
159
|
const rawTrimStart = chapter.start + CONFIG.rawTrimPaddingSeconds
|
|
140
160
|
const rawTrimEnd = chapter.end - CONFIG.rawTrimPaddingSeconds
|
|
141
161
|
const rawDuration = rawTrimEnd - rawTrimStart
|
|
@@ -153,6 +173,7 @@ export async function processChapter(
|
|
|
153
173
|
})
|
|
154
174
|
|
|
155
175
|
// Step 2: Normalize audio
|
|
176
|
+
progress?.step('Normalizing audio')
|
|
156
177
|
const analysis = await analyzeLoudness(paths.rawPath, 0, rawDuration)
|
|
157
178
|
await renderChapter({
|
|
158
179
|
inputPath: paths.rawPath,
|
|
@@ -167,8 +188,10 @@ export async function processChapter(
|
|
|
167
188
|
let commandFilenameOverride: string | null = null
|
|
168
189
|
let hasEditCommand = false
|
|
169
190
|
let commandNotes: Array<{ value: string; window: TimeRange }> = []
|
|
191
|
+
let usedSpliceStep = false
|
|
170
192
|
|
|
171
193
|
if (options.enableTranscription) {
|
|
194
|
+
progress?.step('Transcribing audio')
|
|
172
195
|
const transcriptionResult = await transcribeAndAnalyze({
|
|
173
196
|
normalizedPath: paths.normalizedPath,
|
|
174
197
|
transcriptionAudioPath: paths.transcriptionAudioPath,
|
|
@@ -189,6 +212,7 @@ export async function processChapter(
|
|
|
189
212
|
logWritten = true
|
|
190
213
|
}
|
|
191
214
|
await safeUnlink(outputBasePath)
|
|
215
|
+
progress?.skip('Skipped (transcript)')
|
|
192
216
|
return {
|
|
193
217
|
status: 'skipped',
|
|
194
218
|
skipReason: transcriptionResult.hasBadTake
|
|
@@ -210,6 +234,8 @@ export async function processChapter(
|
|
|
210
234
|
`Combine previous command detected for chapter ${chapter.index + 1}, but no previous chapter available. Processing normally.`,
|
|
211
235
|
)
|
|
212
236
|
} else {
|
|
237
|
+
progress?.step('Combining previous')
|
|
238
|
+
usedSpliceStep = true
|
|
213
239
|
const combineResult = await handleCombinePrevious({
|
|
214
240
|
chapter,
|
|
215
241
|
previousProcessedChapter: options.previousProcessedChapter,
|
|
@@ -224,8 +250,10 @@ export async function processChapter(
|
|
|
224
250
|
})
|
|
225
251
|
// If combine failed (returned null), continue with normal processing
|
|
226
252
|
if (combineResult !== null) {
|
|
253
|
+
progress?.finish('Combined')
|
|
227
254
|
return combineResult
|
|
228
255
|
}
|
|
256
|
+
progress?.setLabel('Splicing commands')
|
|
229
257
|
// Otherwise, fall through to normal processing
|
|
230
258
|
}
|
|
231
259
|
}
|
|
@@ -239,6 +267,9 @@ export async function processChapter(
|
|
|
239
267
|
)
|
|
240
268
|
|
|
241
269
|
// Step 5: Handle command splicing
|
|
270
|
+
if (!usedSpliceStep) {
|
|
271
|
+
progress?.step('Splicing commands')
|
|
272
|
+
}
|
|
242
273
|
const spliceResult = await handleCommandSplicing({
|
|
243
274
|
commandWindows,
|
|
244
275
|
normalizedPath: paths.normalizedPath,
|
|
@@ -249,6 +280,7 @@ export async function processChapter(
|
|
|
249
280
|
})
|
|
250
281
|
|
|
251
282
|
// Step 6: Detect speech bounds
|
|
283
|
+
progress?.step('Detecting speech')
|
|
252
284
|
const speechBounds = await detectSpeechBounds(
|
|
253
285
|
spliceResult.sourcePath,
|
|
254
286
|
0,
|
|
@@ -272,6 +304,7 @@ export async function processChapter(
|
|
|
272
304
|
}
|
|
273
305
|
|
|
274
306
|
// Step 7: Apply speech padding
|
|
307
|
+
progress?.step('Trimming')
|
|
275
308
|
const paddedStart = clamp(
|
|
276
309
|
speechBounds.start - CONFIG.preSpeechPaddingSeconds,
|
|
277
310
|
0,
|
|
@@ -311,10 +344,12 @@ export async function processChapter(
|
|
|
311
344
|
logWritten = true
|
|
312
345
|
}
|
|
313
346
|
await safeUnlink(outputBasePath)
|
|
347
|
+
progress?.skip('Skipped (trimmed)')
|
|
314
348
|
return { status: 'skipped', skipReason: 'short-trimmed', logWritten }
|
|
315
349
|
}
|
|
316
350
|
|
|
317
351
|
// Step 9: Write final output
|
|
352
|
+
progress?.step('Writing output')
|
|
318
353
|
await extractChapterSegment({
|
|
319
354
|
inputPath: spliceResult.sourcePath,
|
|
320
355
|
outputPath: finalOutputPath,
|
|
@@ -323,6 +358,7 @@ export async function processChapter(
|
|
|
323
358
|
})
|
|
324
359
|
|
|
325
360
|
// Step 10: Verify no jarvis in final output
|
|
361
|
+
progress?.step('Verifying output')
|
|
326
362
|
let jarvisWarning: JarvisWarning | undefined
|
|
327
363
|
await extractTranscriptionAudio({
|
|
328
364
|
inputPath: finalOutputPath,
|
|
@@ -409,6 +445,7 @@ export async function processChapter(
|
|
|
409
445
|
processedDuration: trimmedDuration,
|
|
410
446
|
}
|
|
411
447
|
|
|
448
|
+
progress?.finish('Complete')
|
|
412
449
|
return {
|
|
413
450
|
status: 'processed',
|
|
414
451
|
jarvisWarning,
|
package/process-course/cli.ts
CHANGED
|
@@ -2,7 +2,7 @@ import path from 'node:path'
|
|
|
2
2
|
import yargs from 'yargs/yargs'
|
|
3
3
|
import { hideBin } from 'yargs/helpers'
|
|
4
4
|
import type { Argv, Arguments } from 'yargs'
|
|
5
|
-
import { getDefaultWhisperModelPath } from '../whispercpp-transcribe'
|
|
5
|
+
import { getDefaultWhisperModelPath } from '../src/whispercpp-transcribe'
|
|
6
6
|
import { DEFAULT_MIN_CHAPTER_SECONDS, TRANSCRIPTION_PHRASES } from './config'
|
|
7
7
|
import { normalizeSkipPhrases } from './utils/transcript'
|
|
8
8
|
import { parseChapterSelection } from './utils/chapter-selection'
|
|
@@ -357,17 +357,26 @@ if (import.meta.main) {
|
|
|
357
357
|
async function promptForEditsCommand(
|
|
358
358
|
prompter: Prompter,
|
|
359
359
|
): Promise<string[] | null> {
|
|
360
|
-
const selection = await prompter.
|
|
360
|
+
const selection = await prompter.search('Choose a command (type to filter)', [
|
|
361
361
|
{
|
|
362
|
-
name: 'Edit a single video using transcript text edits',
|
|
362
|
+
name: 'edit-video - Edit a single video using transcript text edits',
|
|
363
363
|
value: 'edit-video',
|
|
364
|
+
description:
|
|
365
|
+
'edit-video --input <file> --transcript <json> --edited <txt>',
|
|
366
|
+
keywords: ['transcript', 'cuts', 'remove', 'trim'],
|
|
364
367
|
},
|
|
365
368
|
{
|
|
366
|
-
name: 'Combine two videos with speech-aligned padding',
|
|
369
|
+
name: 'combine-videos - Combine two videos with speech-aligned padding',
|
|
367
370
|
value: 'combine-videos',
|
|
371
|
+
description: 'combine-videos --video1 <file> --video2 <file>',
|
|
372
|
+
keywords: ['merge', 'join', 'splice', 'padding'],
|
|
368
373
|
},
|
|
369
|
-
{
|
|
370
|
-
|
|
374
|
+
{
|
|
375
|
+
name: '--help - Show help',
|
|
376
|
+
value: 'help',
|
|
377
|
+
keywords: ['usage', '--help'],
|
|
378
|
+
},
|
|
379
|
+
{ name: 'exit - Exit', value: 'exit', keywords: ['quit', 'cancel'] },
|
|
371
380
|
])
|
|
372
381
|
if (selection === 'exit') {
|
|
373
382
|
return null
|
|
@@ -4,9 +4,9 @@ import { copyFile, mkdir, mkdtemp, rename, rm } from 'node:fs/promises'
|
|
|
4
4
|
import {
|
|
5
5
|
detectSpeechBounds,
|
|
6
6
|
checkSegmentHasSpeech,
|
|
7
|
-
} from '../../speech-detection'
|
|
7
|
+
} from '../../src/speech-detection'
|
|
8
8
|
import { extractChapterSegmentAccurate, concatSegments } from '../ffmpeg'
|
|
9
|
-
import { clamp, getMediaDurationSeconds } from '../../utils'
|
|
9
|
+
import { clamp, getMediaDurationSeconds } from '../../src/utils'
|
|
10
10
|
import { EDIT_CONFIG } from '../config'
|
|
11
11
|
import { editVideo } from './video-editor'
|
|
12
12
|
import {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import path from 'node:path'
|
|
2
2
|
import { copyFile, mkdir } from 'node:fs/promises'
|
|
3
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
3
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
4
4
|
import {
|
|
5
5
|
buildTranscriptWordsWithIndices,
|
|
6
6
|
generateTranscriptJson,
|
|
@@ -5,7 +5,7 @@ import { mkdtemp, readdir, rm } from 'node:fs/promises'
|
|
|
5
5
|
import yargs from 'yargs/yargs'
|
|
6
6
|
import { hideBin } from 'yargs/helpers'
|
|
7
7
|
import { extractTranscriptionAudio } from '../ffmpeg'
|
|
8
|
-
import { transcribeAudio } from '../../whispercpp-transcribe'
|
|
8
|
+
import { transcribeAudio } from '../../src/whispercpp-transcribe'
|
|
9
9
|
import { scaleTranscriptSegments } from '../jarvis-commands/parser'
|
|
10
10
|
import { EDIT_CONFIG } from '../config'
|
|
11
11
|
import {
|
|
@@ -13,7 +13,7 @@ import {
|
|
|
13
13
|
generateTranscriptJson,
|
|
14
14
|
generateTranscriptText,
|
|
15
15
|
} from './transcript-output'
|
|
16
|
-
import { getMediaDurationSeconds } from '../../utils'
|
|
16
|
+
import { getMediaDurationSeconds } from '../../src/utils'
|
|
17
17
|
|
|
18
18
|
async function main() {
|
|
19
19
|
const argv = yargs(hideBin(process.argv))
|
|
@@ -6,7 +6,7 @@ import yargs from 'yargs/yargs'
|
|
|
6
6
|
import { hideBin } from 'yargs/helpers'
|
|
7
7
|
import { extractChapterSegmentAccurate, concatSegments } from '../ffmpeg'
|
|
8
8
|
import { buildKeepRanges, mergeTimeRanges } from '../utils/time-ranges'
|
|
9
|
-
import { clamp, getMediaDurationSeconds } from '../../utils'
|
|
9
|
+
import { clamp, getMediaDurationSeconds } from '../../src/utils'
|
|
10
10
|
import type { TimeRange } from '../types'
|
|
11
11
|
|
|
12
12
|
export type RemoveRangesOptions = {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { readAudioSamples } from '../ffmpeg'
|
|
2
2
|
import { CONFIG, EDIT_CONFIG } from '../config'
|
|
3
|
-
import { clamp } from '../../utils'
|
|
3
|
+
import { clamp } from '../../src/utils'
|
|
4
4
|
import { mergeTimeRanges } from '../utils/time-ranges'
|
|
5
5
|
import { findLowestAmplitudeBoundaryProgressive } from '../utils/audio-analysis'
|
|
6
6
|
import type { TimeRange } from '../types'
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
1
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
2
2
|
import { buildTranscriptWords } from '../jarvis-commands/parser'
|
|
3
3
|
import type { TranscriptJson, TranscriptWordWithIndex } from './types'
|
|
4
4
|
|
package/process-course/ffmpeg.ts
CHANGED
|
@@ -2,7 +2,7 @@ import {
|
|
|
2
2
|
runCommand as runCommandBase,
|
|
3
3
|
runCommandBinary as runCommandBinaryBase,
|
|
4
4
|
formatSeconds,
|
|
5
|
-
} from '../utils'
|
|
5
|
+
} from '../src/utils'
|
|
6
6
|
import { CONFIG, TRANSCRIPTION_SAMPLE_RATE } from './config'
|
|
7
7
|
import { logCommand, logInfo, logWarn } from './logging'
|
|
8
8
|
import type { Chapter, LoudnormAnalysis } from './types'
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { test, expect } from 'bun:test'
|
|
2
2
|
import { scaleTranscriptSegments, extractTranscriptCommands } from './parser'
|
|
3
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
3
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
4
4
|
|
|
5
5
|
// Factory functions for test data
|
|
6
6
|
function createSegment(
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
1
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
2
2
|
import { CONFIG } from '../config'
|
|
3
3
|
import type { TimeRange } from '../types'
|
|
4
4
|
import { normalizeWords } from '../utils/transcript'
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { clamp } from '../../utils'
|
|
2
|
-
import { detectSpeechSegmentsWithVad } from '../../speech-detection'
|
|
1
|
+
import { clamp } from '../../src/utils'
|
|
2
|
+
import { detectSpeechSegmentsWithVad } from '../../src/speech-detection'
|
|
3
3
|
import { readAudioSamples } from '../ffmpeg'
|
|
4
4
|
import { CONFIG } from '../config'
|
|
5
5
|
import { logInfo } from '../logging'
|
|
6
|
-
import { formatSeconds } from '../../utils'
|
|
6
|
+
import { formatSeconds } from '../../src/utils'
|
|
7
7
|
import { mergeTimeRanges } from '../utils/time-ranges'
|
|
8
8
|
import {
|
|
9
9
|
buildSilenceGapsFromSpeech,
|
|
@@ -7,7 +7,7 @@ import {
|
|
|
7
7
|
normalizeWords,
|
|
8
8
|
} from './transcript'
|
|
9
9
|
import { TRANSCRIPTION_PHRASES } from '../config'
|
|
10
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
10
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
11
11
|
|
|
12
12
|
function createPhrases(...phrases: string[]): string[] {
|
|
13
13
|
return phrases
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
1
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
2
2
|
import type { TimeRange } from '../types'
|
|
3
3
|
import { TRANSCRIPTION_PHRASES } from '../config'
|
|
4
4
|
import { buildTranscriptWords } from '../jarvis-commands/parser'
|
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
import '
|
|
1
|
+
import path from 'node:path'
|
|
2
|
+
import '../app/config/init-env.ts'
|
|
2
3
|
|
|
3
4
|
import getPort from 'get-port'
|
|
4
|
-
import { getEnv } from '
|
|
5
|
-
import { createAppRouter } from '
|
|
6
|
-
import { createBundlingRoutes } from '
|
|
5
|
+
import { getEnv } from '../app/config/env.ts'
|
|
6
|
+
import { createAppRouter } from '../app/router.tsx'
|
|
7
|
+
import { createBundlingRoutes } from '../server/bundling.ts'
|
|
7
8
|
|
|
8
9
|
type AppServerOptions = {
|
|
9
10
|
host?: string
|
|
@@ -21,6 +22,7 @@ const SHORTCUT_COLORS: Record<string, string> = {
|
|
|
21
22
|
h: '\u001b[35m',
|
|
22
23
|
}
|
|
23
24
|
const ANSI_RESET = '\u001b[0m'
|
|
25
|
+
const APP_ROOT = path.resolve(import.meta.dirname, '..')
|
|
24
26
|
|
|
25
27
|
function colorizeShortcut(key: string) {
|
|
26
28
|
if (!COLOR_ENABLED) {
|
|
@@ -145,12 +147,12 @@ function setupShortcutHandling(options: {
|
|
|
145
147
|
}
|
|
146
148
|
|
|
147
149
|
function startServer(port: number, hostname: string) {
|
|
148
|
-
const router = createAppRouter(
|
|
150
|
+
const router = createAppRouter(APP_ROOT)
|
|
149
151
|
return Bun.serve({
|
|
150
152
|
port,
|
|
151
153
|
hostname,
|
|
152
154
|
idleTimeout: 30,
|
|
153
|
-
routes: createBundlingRoutes(
|
|
155
|
+
routes: createBundlingRoutes(APP_ROOT),
|
|
154
156
|
async fetch(request) {
|
|
155
157
|
try {
|
|
156
158
|
return await router.fetch(request)
|
package/{cli.ts → src/cli.ts}
RENAMED
|
@@ -4,20 +4,20 @@ import type { Arguments, CommandBuilder, CommandHandler } from 'yargs'
|
|
|
4
4
|
import yargs from 'yargs/yargs'
|
|
5
5
|
import { hideBin } from 'yargs/helpers'
|
|
6
6
|
import { startAppServer } from './app-server'
|
|
7
|
-
import { setLogHooks } from '
|
|
8
|
-
import { ensureFfmpegAvailable } from '
|
|
7
|
+
import { setLogHooks } from '../process-course/logging'
|
|
8
|
+
import { ensureFfmpegAvailable } from '../process-course/ffmpeg'
|
|
9
9
|
import {
|
|
10
10
|
VIDEO_EXTENSIONS,
|
|
11
11
|
normalizeProcessArgs,
|
|
12
12
|
configureProcessCommand,
|
|
13
|
-
} from '
|
|
13
|
+
} from '../process-course/cli'
|
|
14
14
|
import { runProcessCourse } from './process-course-video'
|
|
15
15
|
import {
|
|
16
16
|
configureEditVideoCommand,
|
|
17
17
|
configureCombineVideosCommand,
|
|
18
18
|
createCombineVideosHandler,
|
|
19
19
|
createEditVideoHandler,
|
|
20
|
-
} from '
|
|
20
|
+
} from '../process-course/edits/cli'
|
|
21
21
|
import { detectSpeechSegmentsForFile } from './speech-detection'
|
|
22
22
|
import {
|
|
23
23
|
getDefaultWhisperModelPath,
|
|
@@ -34,7 +34,7 @@ import {
|
|
|
34
34
|
type PathPicker,
|
|
35
35
|
type Prompter,
|
|
36
36
|
withSpinner,
|
|
37
|
-
} from '
|
|
37
|
+
} from '../cli-ux'
|
|
38
38
|
|
|
39
39
|
type CliUxContext = {
|
|
40
40
|
interactive: boolean
|
|
@@ -235,33 +235,49 @@ function createCliUxContext(): CliUxContext {
|
|
|
235
235
|
}
|
|
236
236
|
|
|
237
237
|
async function promptForCommand(prompter: Prompter): Promise<string[] | null> {
|
|
238
|
-
const selection = await prompter.
|
|
238
|
+
const selection = await prompter.search('Choose a command (type to filter)', [
|
|
239
239
|
{
|
|
240
|
-
name: 'Process chapters into separate files',
|
|
240
|
+
name: 'process - Process chapters into separate files',
|
|
241
241
|
value: 'process',
|
|
242
|
+
description: 'process [input...]',
|
|
243
|
+
keywords: ['chapters', 'course', 'split', 'export'],
|
|
242
244
|
},
|
|
243
245
|
{
|
|
244
|
-
name: 'Edit a single video using transcript text edits',
|
|
246
|
+
name: 'edit - Edit a single video using transcript text edits',
|
|
245
247
|
value: 'edit',
|
|
248
|
+
description: 'edit --input <file> --transcript <json> --edited <txt>',
|
|
249
|
+
keywords: ['transcript', 'cuts', 'remove', 'trim'],
|
|
246
250
|
},
|
|
247
251
|
{
|
|
248
|
-
name: 'Combine two videos with speech-aligned padding',
|
|
252
|
+
name: 'combine - Combine two videos with speech-aligned padding',
|
|
249
253
|
value: 'combine',
|
|
254
|
+
description: 'combine --video1 <file> --video2 <file>',
|
|
255
|
+
keywords: ['merge', 'join', 'splice', 'padding'],
|
|
250
256
|
},
|
|
251
257
|
{
|
|
252
|
-
name: 'Start the web UI server',
|
|
258
|
+
name: 'app start - Start the web UI server',
|
|
253
259
|
value: 'app-start',
|
|
260
|
+
description: 'app start --port <number> --host <host>',
|
|
261
|
+
keywords: ['app', 'ui', 'server', 'web', 'dashboard'],
|
|
254
262
|
},
|
|
255
263
|
{
|
|
256
|
-
name: 'Transcribe a single audio/video file',
|
|
264
|
+
name: 'transcribe - Transcribe a single audio/video file',
|
|
257
265
|
value: 'transcribe',
|
|
266
|
+
description: 'transcribe [input]',
|
|
267
|
+
keywords: ['whisper', 'speech', 'audio', 'subtitles'],
|
|
258
268
|
},
|
|
259
269
|
{
|
|
260
|
-
name: 'Show detected speech segments for a file',
|
|
270
|
+
name: 'detect-speech - Show detected speech segments for a file',
|
|
261
271
|
value: 'detect-speech',
|
|
272
|
+
description: 'detect-speech [input]',
|
|
273
|
+
keywords: ['speech', 'vad', 'silence', 'segments'],
|
|
262
274
|
},
|
|
263
|
-
{
|
|
264
|
-
|
|
275
|
+
{
|
|
276
|
+
name: '--help - Show help',
|
|
277
|
+
value: 'help',
|
|
278
|
+
keywords: ['usage', '--help'],
|
|
279
|
+
},
|
|
280
|
+
{ name: 'exit - Exit', value: 'exit', keywords: ['quit', 'cancel'] },
|
|
265
281
|
])
|
|
266
282
|
switch (selection) {
|
|
267
283
|
case 'exit':
|
|
@@ -1,25 +1,27 @@
|
|
|
1
1
|
#!/usr/bin/env bun
|
|
2
2
|
import path from 'node:path'
|
|
3
3
|
import { mkdir } from 'node:fs/promises'
|
|
4
|
-
import { ensureFfmpegAvailable, getChapters } from '
|
|
5
|
-
import { logInfo } from '
|
|
6
|
-
import { parseCliArgs, type CliArgs } from '
|
|
7
|
-
import { resolveChapterSelection } from '
|
|
8
|
-
import { removeDirIfEmpty } from '
|
|
9
|
-
import { writeJarvisLogs, writeSummaryLogs } from '
|
|
4
|
+
import { ensureFfmpegAvailable, getChapters } from '../process-course/ffmpeg'
|
|
5
|
+
import { logInfo } from '../process-course/logging'
|
|
6
|
+
import { parseCliArgs, type CliArgs } from '../process-course/cli'
|
|
7
|
+
import { resolveChapterSelection } from '../process-course/utils/chapter-selection'
|
|
8
|
+
import { removeDirIfEmpty } from '../process-course/utils/file-utils'
|
|
9
|
+
import { writeJarvisLogs, writeSummaryLogs } from '../process-course/summary'
|
|
10
10
|
import {
|
|
11
11
|
processChapter,
|
|
12
12
|
type ChapterProcessingOptions,
|
|
13
|
-
|
|
13
|
+
type ChapterProgressReporter,
|
|
14
|
+
} from '../process-course/chapter-processor'
|
|
14
15
|
import type {
|
|
15
16
|
JarvisEdit,
|
|
16
17
|
JarvisNote,
|
|
17
18
|
JarvisWarning,
|
|
18
19
|
ProcessedChapterInfo,
|
|
19
20
|
EditWorkspaceInfo,
|
|
20
|
-
} from '
|
|
21
|
+
} from '../process-course/types'
|
|
21
22
|
import { formatSeconds } from './utils'
|
|
22
23
|
import { checkSegmentHasSpeech } from './speech-detection'
|
|
24
|
+
import { setActiveSpinnerText } from '../cli-ux'
|
|
23
25
|
|
|
24
26
|
interface ProcessingSummary {
|
|
25
27
|
totalSelected: number
|
|
@@ -35,6 +37,137 @@ interface ProcessingSummary {
|
|
|
35
37
|
|
|
36
38
|
export type ProcessCourseOptions = Omit<CliArgs, 'shouldExit'>
|
|
37
39
|
|
|
40
|
+
const PROGRESS_BAR_WIDTH = 12
|
|
41
|
+
|
|
42
|
+
type SpinnerProgressContext = {
|
|
43
|
+
fileIndex: number
|
|
44
|
+
fileCount: number
|
|
45
|
+
fileName: string
|
|
46
|
+
chapterCount: number
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
type ChapterProgressContext = {
|
|
50
|
+
chapterIndex: number
|
|
51
|
+
chapterTitle: string
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
function clampProgress(value: number) {
|
|
55
|
+
return Math.max(0, Math.min(1, value))
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
function formatPercent(value: number) {
|
|
59
|
+
return `${Math.round(clampProgress(value) * 100)}%`
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
function formatProgressBar(value: number, width = PROGRESS_BAR_WIDTH) {
|
|
63
|
+
const clamped = clampProgress(value)
|
|
64
|
+
const filled = Math.round(clamped * width)
|
|
65
|
+
return `[${'#'.repeat(filled)}${'-'.repeat(width - filled)}]`
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
function truncateLabel(value: string, maxLength: number) {
|
|
69
|
+
const trimmed = value.trim()
|
|
70
|
+
if (trimmed.length <= maxLength) {
|
|
71
|
+
return trimmed
|
|
72
|
+
}
|
|
73
|
+
return `${trimmed.slice(0, Math.max(0, maxLength - 3))}...`
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
function buildProgressText(params: {
|
|
77
|
+
fileIndex: number
|
|
78
|
+
fileCount: number
|
|
79
|
+
fileName: string
|
|
80
|
+
chapterIndex: number
|
|
81
|
+
chapterCount: number
|
|
82
|
+
chapterTitle: string
|
|
83
|
+
stepIndex: number
|
|
84
|
+
stepCount: number
|
|
85
|
+
stepLabel: string
|
|
86
|
+
}) {
|
|
87
|
+
const chapterProgress =
|
|
88
|
+
params.stepCount > 0 ? params.stepIndex / params.stepCount : 0
|
|
89
|
+
const fileProgress =
|
|
90
|
+
params.chapterCount > 0
|
|
91
|
+
? (params.chapterIndex - 1 + chapterProgress) / params.chapterCount
|
|
92
|
+
: 1
|
|
93
|
+
const fileLabel =
|
|
94
|
+
params.fileCount > 1
|
|
95
|
+
? `File ${params.fileIndex}/${params.fileCount}`
|
|
96
|
+
: 'File'
|
|
97
|
+
const fileName = truncateLabel(params.fileName, 22)
|
|
98
|
+
const fileSegment = fileName ? `${fileLabel} ${fileName}` : fileLabel
|
|
99
|
+
const chapterLabel = `Chapter ${params.chapterIndex}/${params.chapterCount}`
|
|
100
|
+
const chapterTitle = truncateLabel(params.chapterTitle, 26)
|
|
101
|
+
const chapterSegment = chapterTitle
|
|
102
|
+
? `${chapterLabel} ${chapterTitle}`
|
|
103
|
+
: chapterLabel
|
|
104
|
+
const stepSegment = truncateLabel(params.stepLabel, 28) || 'Working'
|
|
105
|
+
return `Processing course | ${fileSegment} ${formatPercent(fileProgress)} ${formatProgressBar(fileProgress)} | ${chapterSegment} ${formatPercent(chapterProgress)} ${formatProgressBar(chapterProgress)} | ${stepSegment}`
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
function createSpinnerProgressReporter(context: SpinnerProgressContext) {
|
|
109
|
+
const chapterCount = Math.max(1, context.chapterCount)
|
|
110
|
+
return {
|
|
111
|
+
createChapterProgress({ chapterIndex, chapterTitle }: ChapterProgressContext) {
|
|
112
|
+
let stepIndex = 0
|
|
113
|
+
let stepCount = 1
|
|
114
|
+
let stepLabel = 'Starting'
|
|
115
|
+
|
|
116
|
+
const normalizeStepCount = (value: number) =>
|
|
117
|
+
Math.max(1, Math.round(value))
|
|
118
|
+
|
|
119
|
+
const update = () => {
|
|
120
|
+
setActiveSpinnerText(
|
|
121
|
+
buildProgressText({
|
|
122
|
+
fileIndex: context.fileIndex,
|
|
123
|
+
fileCount: context.fileCount,
|
|
124
|
+
fileName: context.fileName,
|
|
125
|
+
chapterIndex,
|
|
126
|
+
chapterCount,
|
|
127
|
+
chapterTitle,
|
|
128
|
+
stepIndex,
|
|
129
|
+
stepCount,
|
|
130
|
+
stepLabel,
|
|
131
|
+
}),
|
|
132
|
+
)
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
const progress: ChapterProgressReporter = {
|
|
136
|
+
start({ stepCount: initialCount, label }) {
|
|
137
|
+
stepCount = normalizeStepCount(initialCount)
|
|
138
|
+
stepIndex = 0
|
|
139
|
+
stepLabel = label ?? 'Starting'
|
|
140
|
+
update()
|
|
141
|
+
},
|
|
142
|
+
step(label) {
|
|
143
|
+
stepCount = normalizeStepCount(stepCount)
|
|
144
|
+
stepIndex = Math.min(stepIndex + 1, stepCount)
|
|
145
|
+
stepLabel = label
|
|
146
|
+
update()
|
|
147
|
+
},
|
|
148
|
+
setLabel(label) {
|
|
149
|
+
stepLabel = label
|
|
150
|
+
update()
|
|
151
|
+
},
|
|
152
|
+
finish(label) {
|
|
153
|
+
stepCount = normalizeStepCount(stepCount)
|
|
154
|
+
stepIndex = stepCount
|
|
155
|
+
stepLabel = label ?? 'Complete'
|
|
156
|
+
update()
|
|
157
|
+
},
|
|
158
|
+
skip(label) {
|
|
159
|
+
stepCount = normalizeStepCount(stepCount)
|
|
160
|
+
stepIndex = stepCount
|
|
161
|
+
stepLabel = label
|
|
162
|
+
update()
|
|
163
|
+
},
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
return progress
|
|
167
|
+
},
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
38
171
|
export async function runProcessCourse(options: ProcessCourseOptions) {
|
|
39
172
|
const {
|
|
40
173
|
inputPaths,
|
|
@@ -53,7 +186,7 @@ export async function runProcessCourse(options: ProcessCourseOptions) {
|
|
|
53
186
|
await ensureFfmpegAvailable()
|
|
54
187
|
|
|
55
188
|
// Process each input file in turn
|
|
56
|
-
for (const inputPath of inputPaths) {
|
|
189
|
+
for (const [fileIndex, inputPath] of inputPaths.entries()) {
|
|
57
190
|
// Determine output directory for this file
|
|
58
191
|
let fileOutputDir: string
|
|
59
192
|
if (outputDir) {
|
|
@@ -72,6 +205,8 @@ export async function runProcessCourse(options: ProcessCourseOptions) {
|
|
|
72
205
|
}
|
|
73
206
|
|
|
74
207
|
await processInputFile({
|
|
208
|
+
fileIndex: fileIndex + 1,
|
|
209
|
+
fileCount: inputPaths.length,
|
|
75
210
|
inputPath,
|
|
76
211
|
outputDir: fileOutputDir,
|
|
77
212
|
minChapterDurationSeconds,
|
|
@@ -97,19 +232,23 @@ export async function runProcessCourseCli(rawArgs?: string[]) {
|
|
|
97
232
|
}
|
|
98
233
|
|
|
99
234
|
async function processInputFile(options: {
|
|
235
|
+
fileIndex: number
|
|
236
|
+
fileCount: number
|
|
100
237
|
inputPath: string
|
|
101
238
|
outputDir: string
|
|
102
239
|
minChapterDurationSeconds: number
|
|
103
240
|
dryRun: boolean
|
|
104
241
|
keepIntermediates: boolean
|
|
105
242
|
writeLogs: boolean
|
|
106
|
-
chapterSelection: import('
|
|
243
|
+
chapterSelection: import('../process-course/types').ChapterSelection | null
|
|
107
244
|
enableTranscription: boolean
|
|
108
245
|
whisperModelPath: string
|
|
109
246
|
whisperLanguage: string
|
|
110
247
|
whisperBinaryPath: string | undefined
|
|
111
248
|
}) {
|
|
112
249
|
const {
|
|
250
|
+
fileIndex,
|
|
251
|
+
fileCount,
|
|
113
252
|
inputPath,
|
|
114
253
|
outputDir,
|
|
115
254
|
minChapterDurationSeconds,
|
|
@@ -168,6 +307,13 @@ async function processInputFile(options: {
|
|
|
168
307
|
? chapters.filter((chapter) => chapterIndexes.includes(chapter.index))
|
|
169
308
|
: chapters
|
|
170
309
|
|
|
310
|
+
const progressReporter = createSpinnerProgressReporter({
|
|
311
|
+
fileIndex,
|
|
312
|
+
fileCount,
|
|
313
|
+
fileName: path.basename(inputPath),
|
|
314
|
+
chapterCount: selectedChapters.length,
|
|
315
|
+
})
|
|
316
|
+
|
|
171
317
|
const summary: ProcessingSummary = {
|
|
172
318
|
totalSelected: selectedChapters.length,
|
|
173
319
|
processed: 0,
|
|
@@ -203,7 +349,11 @@ async function processInputFile(options: {
|
|
|
203
349
|
const processedChaptersWithSpeech: ProcessedChapterInfo[] = []
|
|
204
350
|
let previousProcessedChapter: ProcessedChapterInfo | null = null
|
|
205
351
|
|
|
206
|
-
for (const chapter of selectedChapters) {
|
|
352
|
+
for (const [chapterOffset, chapter] of selectedChapters.entries()) {
|
|
353
|
+
const chapterProgress = progressReporter.createChapterProgress({
|
|
354
|
+
chapterIndex: chapterOffset + 1,
|
|
355
|
+
chapterTitle: chapter.title,
|
|
356
|
+
})
|
|
207
357
|
// Determine which chapter to combine with
|
|
208
358
|
// Always use the most recent processed chapter with speech (if any)
|
|
209
359
|
const chapterToCombineWith: ProcessedChapterInfo | null =
|
|
@@ -222,6 +372,7 @@ async function processInputFile(options: {
|
|
|
222
372
|
const result = await processChapter(chapter, {
|
|
223
373
|
...processingOptions,
|
|
224
374
|
previousProcessedChapter: chapterToCombineWith,
|
|
375
|
+
progress: chapterProgress,
|
|
225
376
|
})
|
|
226
377
|
|
|
227
378
|
// Update summary based on result
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import path from 'node:path'
|
|
2
2
|
import { mkdir } from 'node:fs/promises'
|
|
3
3
|
import * as ort from 'onnxruntime-node'
|
|
4
|
-
import { readAudioSamples } from '
|
|
5
|
-
import { CONFIG } from '
|
|
4
|
+
import { readAudioSamples } from '../process-course/ffmpeg'
|
|
5
|
+
import { CONFIG } from '../process-course/config'
|
|
6
6
|
import { formatSeconds, getMediaDurationSeconds } from './utils'
|
|
7
|
-
import { speechFallback } from '
|
|
8
|
-
import type { SpeechBounds } from '
|
|
7
|
+
import { speechFallback } from '../process-course/utils/audio-analysis'
|
|
8
|
+
import type { SpeechBounds } from '../process-course/types'
|
|
9
9
|
|
|
10
10
|
export type VadConfig = {
|
|
11
11
|
vadWindowSamples: number
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import { test, expect } from 'bun:test'
|
|
2
|
+
import {
|
|
3
|
+
clamp,
|
|
4
|
+
formatCommand,
|
|
5
|
+
formatSeconds,
|
|
6
|
+
normalizeFilename,
|
|
7
|
+
runCommand,
|
|
8
|
+
toKebabCase,
|
|
9
|
+
} from './utils'
|
|
10
|
+
|
|
11
|
+
test('formatCommand quotes parts that include spaces', () => {
|
|
12
|
+
expect(formatCommand(['ffmpeg', '-i', 'my file.mp4'])).toBe(
|
|
13
|
+
'ffmpeg -i "my file.mp4"',
|
|
14
|
+
)
|
|
15
|
+
})
|
|
16
|
+
|
|
17
|
+
test('formatCommand keeps parts without spaces unchanged', () => {
|
|
18
|
+
expect(formatCommand(['echo', 'hello'])).toBe('echo hello')
|
|
19
|
+
})
|
|
20
|
+
|
|
21
|
+
test('formatSeconds formats to two decimals with suffix', () => {
|
|
22
|
+
expect(formatSeconds(1)).toBe('1.00s')
|
|
23
|
+
expect(formatSeconds(1.234)).toBe('1.23s')
|
|
24
|
+
})
|
|
25
|
+
|
|
26
|
+
test('clamp keeps values within range', () => {
|
|
27
|
+
expect(clamp(5, 0, 10)).toBe(5)
|
|
28
|
+
})
|
|
29
|
+
|
|
30
|
+
test('clamp enforces minimum bound', () => {
|
|
31
|
+
expect(clamp(-2, 0, 10)).toBe(0)
|
|
32
|
+
})
|
|
33
|
+
|
|
34
|
+
test('clamp enforces maximum bound', () => {
|
|
35
|
+
expect(clamp(12, 0, 10)).toBe(10)
|
|
36
|
+
})
|
|
37
|
+
|
|
38
|
+
test('toKebabCase trims, lowercases, and removes punctuation', () => {
|
|
39
|
+
expect(toKebabCase('Hello, World!')).toBe('hello-world')
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
test('toKebabCase collapses repeated separators', () => {
|
|
43
|
+
expect(toKebabCase(' React Hooks ')).toBe('react-hooks')
|
|
44
|
+
})
|
|
45
|
+
|
|
46
|
+
test('toKebabCase returns untitled for empty input', () => {
|
|
47
|
+
expect(toKebabCase(' ')).toBe('untitled')
|
|
48
|
+
})
|
|
49
|
+
|
|
50
|
+
test('normalizeFilename converts number words and dots', () => {
|
|
51
|
+
expect(normalizeFilename('Lesson One point Five')).toBe('lesson 01.05')
|
|
52
|
+
})
|
|
53
|
+
|
|
54
|
+
test('normalizeFilename trims and lowercases', () => {
|
|
55
|
+
expect(normalizeFilename(' Intro ')).toBe('intro')
|
|
56
|
+
})
|
|
57
|
+
|
|
58
|
+
test('runCommand captures stdout for successful command', async () => {
|
|
59
|
+
const result = await runCommand(['echo', 'hello'])
|
|
60
|
+
expect(result.exitCode).toBe(0)
|
|
61
|
+
expect(result.stdout.trim()).toBe('hello')
|
|
62
|
+
})
|
|
63
|
+
|
|
64
|
+
test('runCommand throws on non-zero exit without allowFailure', async () => {
|
|
65
|
+
await expect(runCommand(['false'])).rejects.toThrow('Command failed')
|
|
66
|
+
})
|
|
67
|
+
|
|
68
|
+
test('runCommand returns exit code when allowFailure is true', async () => {
|
|
69
|
+
const result = await runCommand(['false'], { allowFailure: true })
|
|
70
|
+
expect(result.exitCode).toBe(1)
|
|
71
|
+
})
|
|
File without changes
|
|
File without changes
|