eprec 1.5.0 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/app/assets/styles.css +18 -28
- package/package.json +8 -11
- package/process-course/chapter-processor.ts +6 -3
- package/process-course/cli.ts +1 -1
- package/process-course/edits/cli.ts +13 -5
- package/process-course/edits/combined-video-editor.ts +2 -2
- package/process-course/edits/edit-workspace.ts +1 -1
- package/process-course/edits/regenerate-transcript.ts +2 -2
- package/process-course/edits/remove-ranges.ts +1 -1
- package/process-course/edits/timestamp-refinement.ts +1 -1
- package/process-course/edits/transcript-output.test.ts +1 -1
- package/process-course/edits/transcript-output.ts +1 -1
- package/process-course/ffmpeg.ts +1 -1
- package/process-course/jarvis-commands/parser.test.ts +1 -1
- package/process-course/jarvis-commands/parser.ts +1 -1
- package/process-course/jarvis-commands/windows.ts +3 -3
- package/process-course/logging.ts +1 -1
- package/process-course/summary.ts +1 -1
- package/process-course/utils/filename.ts +1 -1
- package/process-course/utils/transcript.test.ts +1 -1
- package/process-course/utils/transcript.ts +1 -1
- package/{app-server.ts → src/app-server.ts} +8 -6
- package/{cli.ts → src/cli.ts} +30 -14
- package/{process-course-video.ts → src/process-course-video.ts} +9 -9
- package/{speech-detection.ts → src/speech-detection.ts} +4 -4
- package/src/utils.test.ts +71 -0
- /package/{utils.ts → src/utils.ts} +0 -0
- /package/{whispercpp-transcribe.ts → src/whispercpp-transcribe.ts} +0 -0
package/README.md
CHANGED
|
@@ -40,7 +40,7 @@ bun install
|
|
|
40
40
|
## Quick Start
|
|
41
41
|
|
|
42
42
|
```bash
|
|
43
|
-
bun process-course-video.ts "/path/to/input.mp4" "/path/to/output" \
|
|
43
|
+
bun src/process-course-video.ts "/path/to/input.mp4" "/path/to/output" \
|
|
44
44
|
--enable-transcription \
|
|
45
45
|
--keep-intermediates \
|
|
46
46
|
--write-logs
|
package/app/assets/styles.css
CHANGED
|
@@ -33,12 +33,7 @@
|
|
|
33
33
|
--color-danger-text: #b91c1c;
|
|
34
34
|
--color-danger-border: #fecaca;
|
|
35
35
|
--color-danger-border-strong: #fca5a5;
|
|
36
|
-
--font-family:
|
|
37
|
-
'Inter',
|
|
38
|
-
'Segoe UI',
|
|
39
|
-
system-ui,
|
|
40
|
-
-apple-system,
|
|
41
|
-
sans-serif;
|
|
36
|
+
--font-family: 'Inter', 'Segoe UI', system-ui, -apple-system, sans-serif;
|
|
42
37
|
--font-size-xs: 12px;
|
|
43
38
|
--font-size-sm: 14px;
|
|
44
39
|
--font-size-base: 16px;
|
|
@@ -63,10 +58,8 @@
|
|
|
63
58
|
--radius-lg: 12px;
|
|
64
59
|
--radius-xl: 16px;
|
|
65
60
|
--radius-pill: 999px;
|
|
66
|
-
--shadow-sm: 0 1px 2px
|
|
67
|
-
|
|
68
|
-
--shadow-md: 0 6px 16px
|
|
69
|
-
color-mix(in srgb, var(--color-text) 14%, transparent);
|
|
61
|
+
--shadow-sm: 0 1px 2px color-mix(in srgb, var(--color-text) 12%, transparent);
|
|
62
|
+
--shadow-md: 0 6px 16px color-mix(in srgb, var(--color-text) 14%, transparent);
|
|
70
63
|
--shadow-lg: 0 18px 38px
|
|
71
64
|
color-mix(in srgb, var(--color-text) 16%, transparent);
|
|
72
65
|
--transition-fast: 150ms ease;
|
|
@@ -82,18 +75,18 @@
|
|
|
82
75
|
@media (prefers-color-scheme: dark) {
|
|
83
76
|
:root {
|
|
84
77
|
color-scheme: dark;
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
78
|
+
--color-primary: #38bdf8;
|
|
79
|
+
--color-primary-hover: #0ea5e9;
|
|
80
|
+
--color-primary-active: #0284c7;
|
|
81
|
+
--color-on-primary: #0f172a;
|
|
82
|
+
--color-background: #0b1120;
|
|
83
|
+
--color-surface: #111827;
|
|
84
|
+
--color-surface-muted: #0f172a;
|
|
85
|
+
--color-surface-inverse: #f8fafc;
|
|
86
|
+
--color-text: #f8fafc;
|
|
87
|
+
--color-text-muted: #94a3b8;
|
|
88
|
+
--color-text-subtle: #cbd5e1;
|
|
89
|
+
--color-text-secondary: #e2e8f0;
|
|
97
90
|
--color-text-faint: #64748b;
|
|
98
91
|
--color-text-inverse: #0f172a;
|
|
99
92
|
--color-border: #334155;
|
|
@@ -110,12 +103,9 @@
|
|
|
110
103
|
--color-danger-text: #fecaca;
|
|
111
104
|
--color-danger-border: #ef4444;
|
|
112
105
|
--color-danger-border-strong: #fca5a5;
|
|
113
|
-
--shadow-sm: 0 1px 2px
|
|
114
|
-
|
|
115
|
-
--shadow-
|
|
116
|
-
color-mix(in srgb, #0f172a 14%, transparent);
|
|
117
|
-
--shadow-lg: 0 18px 38px
|
|
118
|
-
color-mix(in srgb, #0f172a 16%, transparent);
|
|
106
|
+
--shadow-sm: 0 1px 2px color-mix(in srgb, #0f172a 12%, transparent);
|
|
107
|
+
--shadow-md: 0 6px 16px color-mix(in srgb, #0f172a 14%, transparent);
|
|
108
|
+
--shadow-lg: 0 18px 38px color-mix(in srgb, #0f172a 16%, transparent);
|
|
119
109
|
}
|
|
120
110
|
}
|
|
121
111
|
|
package/package.json
CHANGED
|
@@ -1,35 +1,30 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "eprec",
|
|
3
3
|
"type": "module",
|
|
4
|
-
"version": "1.
|
|
4
|
+
"version": "1.6.0",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"repository": {
|
|
7
7
|
"type": "git",
|
|
8
8
|
"url": "https://github.com/epicweb-dev/eprec"
|
|
9
9
|
},
|
|
10
10
|
"scripts": {
|
|
11
|
-
"app:start": "bun --watch ./cli.ts app start",
|
|
11
|
+
"app:start": "bun --watch ./src/cli.ts app start",
|
|
12
12
|
"format": "prettier --write .",
|
|
13
|
-
"test": "bun test process-course utils.test.ts",
|
|
13
|
+
"test": "bun test process-course src/utils.test.ts",
|
|
14
14
|
"test:e2e": "bun test ./e2e",
|
|
15
|
-
"test:smoke": "bunx playwright test -c playwright-smoke-config.ts",
|
|
15
|
+
"test:smoke": "bunx playwright test -c playwright/playwright-smoke-config.ts",
|
|
16
16
|
"test:all": "bun test '**/*.test.ts'",
|
|
17
17
|
"validate": "bun run test"
|
|
18
18
|
},
|
|
19
19
|
"bin": {
|
|
20
|
-
"eprec": "./cli.ts"
|
|
20
|
+
"eprec": "./src/cli.ts"
|
|
21
21
|
},
|
|
22
22
|
"files": [
|
|
23
23
|
"app/**",
|
|
24
|
-
"app-server.ts",
|
|
25
|
-
"cli.ts",
|
|
26
24
|
"process-course/**",
|
|
27
|
-
"process-course-video.ts",
|
|
28
25
|
"public/**",
|
|
29
26
|
"server/**",
|
|
30
|
-
"
|
|
31
|
-
"utils.ts",
|
|
32
|
-
"whispercpp-transcribe.ts"
|
|
27
|
+
"src/**"
|
|
33
28
|
],
|
|
34
29
|
"prettier": "@epic-web/config/prettier",
|
|
35
30
|
"devDependencies": {
|
|
@@ -43,8 +38,10 @@
|
|
|
43
38
|
"typescript": "^5"
|
|
44
39
|
},
|
|
45
40
|
"dependencies": {
|
|
41
|
+
"@inquirer/search": "^4.1.0",
|
|
46
42
|
"get-port": "^7.1.0",
|
|
47
43
|
"inquirer": "^13.2.1",
|
|
44
|
+
"match-sorter": "^8.2.0",
|
|
48
45
|
"onnxruntime-node": "^1.23.2",
|
|
49
46
|
"ora": "^9.1.0",
|
|
50
47
|
"remix": "3.0.0-alpha.0",
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
import path from 'node:path'
|
|
2
|
-
import {
|
|
3
|
-
|
|
4
|
-
|
|
2
|
+
import {
|
|
3
|
+
detectSpeechBounds,
|
|
4
|
+
checkSegmentHasSpeech,
|
|
5
|
+
} from '../src/speech-detection'
|
|
6
|
+
import { transcribeAudio } from '../src/whispercpp-transcribe'
|
|
7
|
+
import { clamp, formatSeconds } from '../src/utils'
|
|
5
8
|
import {
|
|
6
9
|
COMMAND_CLOSE_WORD,
|
|
7
10
|
COMMAND_WAKE_WORD,
|
package/process-course/cli.ts
CHANGED
|
@@ -2,7 +2,7 @@ import path from 'node:path'
|
|
|
2
2
|
import yargs from 'yargs/yargs'
|
|
3
3
|
import { hideBin } from 'yargs/helpers'
|
|
4
4
|
import type { Argv, Arguments } from 'yargs'
|
|
5
|
-
import { getDefaultWhisperModelPath } from '../whispercpp-transcribe'
|
|
5
|
+
import { getDefaultWhisperModelPath } from '../src/whispercpp-transcribe'
|
|
6
6
|
import { DEFAULT_MIN_CHAPTER_SECONDS, TRANSCRIPTION_PHRASES } from './config'
|
|
7
7
|
import { normalizeSkipPhrases } from './utils/transcript'
|
|
8
8
|
import { parseChapterSelection } from './utils/chapter-selection'
|
|
@@ -357,17 +357,25 @@ if (import.meta.main) {
|
|
|
357
357
|
async function promptForEditsCommand(
|
|
358
358
|
prompter: Prompter,
|
|
359
359
|
): Promise<string[] | null> {
|
|
360
|
-
const selection = await prompter.
|
|
360
|
+
const selection = await prompter.search('Choose a command (type to filter)', [
|
|
361
361
|
{
|
|
362
|
-
name: 'Edit a single video using transcript text edits',
|
|
362
|
+
name: 'edit-video - Edit a single video using transcript text edits',
|
|
363
363
|
value: 'edit-video',
|
|
364
|
+
description: 'edit-video --input <file> --transcript <json> --edited <txt>',
|
|
365
|
+
keywords: ['transcript', 'cuts', 'remove', 'trim'],
|
|
364
366
|
},
|
|
365
367
|
{
|
|
366
|
-
name: 'Combine two videos with speech-aligned padding',
|
|
368
|
+
name: 'combine-videos - Combine two videos with speech-aligned padding',
|
|
367
369
|
value: 'combine-videos',
|
|
370
|
+
description: 'combine-videos --video1 <file> --video2 <file>',
|
|
371
|
+
keywords: ['merge', 'join', 'splice', 'padding'],
|
|
368
372
|
},
|
|
369
|
-
{
|
|
370
|
-
|
|
373
|
+
{
|
|
374
|
+
name: '--help - Show help',
|
|
375
|
+
value: 'help',
|
|
376
|
+
keywords: ['usage', '--help'],
|
|
377
|
+
},
|
|
378
|
+
{ name: 'exit - Exit', value: 'exit', keywords: ['quit', 'cancel'] },
|
|
371
379
|
])
|
|
372
380
|
if (selection === 'exit') {
|
|
373
381
|
return null
|
|
@@ -4,9 +4,9 @@ import { copyFile, mkdir, mkdtemp, rename, rm } from 'node:fs/promises'
|
|
|
4
4
|
import {
|
|
5
5
|
detectSpeechBounds,
|
|
6
6
|
checkSegmentHasSpeech,
|
|
7
|
-
} from '../../speech-detection'
|
|
7
|
+
} from '../../src/speech-detection'
|
|
8
8
|
import { extractChapterSegmentAccurate, concatSegments } from '../ffmpeg'
|
|
9
|
-
import { clamp, getMediaDurationSeconds } from '../../utils'
|
|
9
|
+
import { clamp, getMediaDurationSeconds } from '../../src/utils'
|
|
10
10
|
import { EDIT_CONFIG } from '../config'
|
|
11
11
|
import { editVideo } from './video-editor'
|
|
12
12
|
import {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import path from 'node:path'
|
|
2
2
|
import { copyFile, mkdir } from 'node:fs/promises'
|
|
3
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
3
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
4
4
|
import {
|
|
5
5
|
buildTranscriptWordsWithIndices,
|
|
6
6
|
generateTranscriptJson,
|
|
@@ -5,7 +5,7 @@ import { mkdtemp, readdir, rm } from 'node:fs/promises'
|
|
|
5
5
|
import yargs from 'yargs/yargs'
|
|
6
6
|
import { hideBin } from 'yargs/helpers'
|
|
7
7
|
import { extractTranscriptionAudio } from '../ffmpeg'
|
|
8
|
-
import { transcribeAudio } from '../../whispercpp-transcribe'
|
|
8
|
+
import { transcribeAudio } from '../../src/whispercpp-transcribe'
|
|
9
9
|
import { scaleTranscriptSegments } from '../jarvis-commands/parser'
|
|
10
10
|
import { EDIT_CONFIG } from '../config'
|
|
11
11
|
import {
|
|
@@ -13,7 +13,7 @@ import {
|
|
|
13
13
|
generateTranscriptJson,
|
|
14
14
|
generateTranscriptText,
|
|
15
15
|
} from './transcript-output'
|
|
16
|
-
import { getMediaDurationSeconds } from '../../utils'
|
|
16
|
+
import { getMediaDurationSeconds } from '../../src/utils'
|
|
17
17
|
|
|
18
18
|
async function main() {
|
|
19
19
|
const argv = yargs(hideBin(process.argv))
|
|
@@ -6,7 +6,7 @@ import yargs from 'yargs/yargs'
|
|
|
6
6
|
import { hideBin } from 'yargs/helpers'
|
|
7
7
|
import { extractChapterSegmentAccurate, concatSegments } from '../ffmpeg'
|
|
8
8
|
import { buildKeepRanges, mergeTimeRanges } from '../utils/time-ranges'
|
|
9
|
-
import { clamp, getMediaDurationSeconds } from '../../utils'
|
|
9
|
+
import { clamp, getMediaDurationSeconds } from '../../src/utils'
|
|
10
10
|
import type { TimeRange } from '../types'
|
|
11
11
|
|
|
12
12
|
export type RemoveRangesOptions = {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { readAudioSamples } from '../ffmpeg'
|
|
2
2
|
import { CONFIG, EDIT_CONFIG } from '../config'
|
|
3
|
-
import { clamp } from '../../utils'
|
|
3
|
+
import { clamp } from '../../src/utils'
|
|
4
4
|
import { mergeTimeRanges } from '../utils/time-ranges'
|
|
5
5
|
import { findLowestAmplitudeBoundaryProgressive } from '../utils/audio-analysis'
|
|
6
6
|
import type { TimeRange } from '../types'
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
1
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
2
2
|
import { buildTranscriptWords } from '../jarvis-commands/parser'
|
|
3
3
|
import type { TranscriptJson, TranscriptWordWithIndex } from './types'
|
|
4
4
|
|
package/process-course/ffmpeg.ts
CHANGED
|
@@ -2,7 +2,7 @@ import {
|
|
|
2
2
|
runCommand as runCommandBase,
|
|
3
3
|
runCommandBinary as runCommandBinaryBase,
|
|
4
4
|
formatSeconds,
|
|
5
|
-
} from '../utils'
|
|
5
|
+
} from '../src/utils'
|
|
6
6
|
import { CONFIG, TRANSCRIPTION_SAMPLE_RATE } from './config'
|
|
7
7
|
import { logCommand, logInfo, logWarn } from './logging'
|
|
8
8
|
import type { Chapter, LoudnormAnalysis } from './types'
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { test, expect } from 'bun:test'
|
|
2
2
|
import { scaleTranscriptSegments, extractTranscriptCommands } from './parser'
|
|
3
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
3
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
4
4
|
|
|
5
5
|
// Factory functions for test data
|
|
6
6
|
function createSegment(
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
1
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
2
2
|
import { CONFIG } from '../config'
|
|
3
3
|
import type { TimeRange } from '../types'
|
|
4
4
|
import { normalizeWords } from '../utils/transcript'
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { clamp } from '../../utils'
|
|
2
|
-
import { detectSpeechSegmentsWithVad } from '../../speech-detection'
|
|
1
|
+
import { clamp } from '../../src/utils'
|
|
2
|
+
import { detectSpeechSegmentsWithVad } from '../../src/speech-detection'
|
|
3
3
|
import { readAudioSamples } from '../ffmpeg'
|
|
4
4
|
import { CONFIG } from '../config'
|
|
5
5
|
import { logInfo } from '../logging'
|
|
6
|
-
import { formatSeconds } from '../../utils'
|
|
6
|
+
import { formatSeconds } from '../../src/utils'
|
|
7
7
|
import { mergeTimeRanges } from '../utils/time-ranges'
|
|
8
8
|
import {
|
|
9
9
|
buildSilenceGapsFromSpeech,
|
|
@@ -7,7 +7,7 @@ import {
|
|
|
7
7
|
normalizeWords,
|
|
8
8
|
} from './transcript'
|
|
9
9
|
import { TRANSCRIPTION_PHRASES } from '../config'
|
|
10
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
10
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
11
11
|
|
|
12
12
|
function createPhrases(...phrases: string[]): string[] {
|
|
13
13
|
return phrases
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { TranscriptSegment } from '../../whispercpp-transcribe'
|
|
1
|
+
import type { TranscriptSegment } from '../../src/whispercpp-transcribe'
|
|
2
2
|
import type { TimeRange } from '../types'
|
|
3
3
|
import { TRANSCRIPTION_PHRASES } from '../config'
|
|
4
4
|
import { buildTranscriptWords } from '../jarvis-commands/parser'
|
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
import '
|
|
1
|
+
import path from 'node:path'
|
|
2
|
+
import '../app/config/init-env.ts'
|
|
2
3
|
|
|
3
4
|
import getPort from 'get-port'
|
|
4
|
-
import { getEnv } from '
|
|
5
|
-
import { createAppRouter } from '
|
|
6
|
-
import { createBundlingRoutes } from '
|
|
5
|
+
import { getEnv } from '../app/config/env.ts'
|
|
6
|
+
import { createAppRouter } from '../app/router.tsx'
|
|
7
|
+
import { createBundlingRoutes } from '../server/bundling.ts'
|
|
7
8
|
|
|
8
9
|
type AppServerOptions = {
|
|
9
10
|
host?: string
|
|
@@ -21,6 +22,7 @@ const SHORTCUT_COLORS: Record<string, string> = {
|
|
|
21
22
|
h: '\u001b[35m',
|
|
22
23
|
}
|
|
23
24
|
const ANSI_RESET = '\u001b[0m'
|
|
25
|
+
const APP_ROOT = path.resolve(import.meta.dirname, '..')
|
|
24
26
|
|
|
25
27
|
function colorizeShortcut(key: string) {
|
|
26
28
|
if (!COLOR_ENABLED) {
|
|
@@ -145,12 +147,12 @@ function setupShortcutHandling(options: {
|
|
|
145
147
|
}
|
|
146
148
|
|
|
147
149
|
function startServer(port: number, hostname: string) {
|
|
148
|
-
const router = createAppRouter(
|
|
150
|
+
const router = createAppRouter(APP_ROOT)
|
|
149
151
|
return Bun.serve({
|
|
150
152
|
port,
|
|
151
153
|
hostname,
|
|
152
154
|
idleTimeout: 30,
|
|
153
|
-
routes: createBundlingRoutes(
|
|
155
|
+
routes: createBundlingRoutes(APP_ROOT),
|
|
154
156
|
async fetch(request) {
|
|
155
157
|
try {
|
|
156
158
|
return await router.fetch(request)
|
package/{cli.ts → src/cli.ts}
RENAMED
|
@@ -4,20 +4,20 @@ import type { Arguments, CommandBuilder, CommandHandler } from 'yargs'
|
|
|
4
4
|
import yargs from 'yargs/yargs'
|
|
5
5
|
import { hideBin } from 'yargs/helpers'
|
|
6
6
|
import { startAppServer } from './app-server'
|
|
7
|
-
import { setLogHooks } from '
|
|
8
|
-
import { ensureFfmpegAvailable } from '
|
|
7
|
+
import { setLogHooks } from '../process-course/logging'
|
|
8
|
+
import { ensureFfmpegAvailable } from '../process-course/ffmpeg'
|
|
9
9
|
import {
|
|
10
10
|
VIDEO_EXTENSIONS,
|
|
11
11
|
normalizeProcessArgs,
|
|
12
12
|
configureProcessCommand,
|
|
13
|
-
} from '
|
|
13
|
+
} from '../process-course/cli'
|
|
14
14
|
import { runProcessCourse } from './process-course-video'
|
|
15
15
|
import {
|
|
16
16
|
configureEditVideoCommand,
|
|
17
17
|
configureCombineVideosCommand,
|
|
18
18
|
createCombineVideosHandler,
|
|
19
19
|
createEditVideoHandler,
|
|
20
|
-
} from '
|
|
20
|
+
} from '../process-course/edits/cli'
|
|
21
21
|
import { detectSpeechSegmentsForFile } from './speech-detection'
|
|
22
22
|
import {
|
|
23
23
|
getDefaultWhisperModelPath,
|
|
@@ -34,7 +34,7 @@ import {
|
|
|
34
34
|
type PathPicker,
|
|
35
35
|
type Prompter,
|
|
36
36
|
withSpinner,
|
|
37
|
-
} from '
|
|
37
|
+
} from '../cli-ux'
|
|
38
38
|
|
|
39
39
|
type CliUxContext = {
|
|
40
40
|
interactive: boolean
|
|
@@ -235,33 +235,49 @@ function createCliUxContext(): CliUxContext {
|
|
|
235
235
|
}
|
|
236
236
|
|
|
237
237
|
async function promptForCommand(prompter: Prompter): Promise<string[] | null> {
|
|
238
|
-
const selection = await prompter.
|
|
238
|
+
const selection = await prompter.search('Choose a command (type to filter)', [
|
|
239
239
|
{
|
|
240
|
-
name: 'Process chapters into separate files',
|
|
240
|
+
name: 'process - Process chapters into separate files',
|
|
241
241
|
value: 'process',
|
|
242
|
+
description: 'process [input...]',
|
|
243
|
+
keywords: ['chapters', 'course', 'split', 'export'],
|
|
242
244
|
},
|
|
243
245
|
{
|
|
244
|
-
name: 'Edit a single video using transcript text edits',
|
|
246
|
+
name: 'edit - Edit a single video using transcript text edits',
|
|
245
247
|
value: 'edit',
|
|
248
|
+
description: 'edit --input <file> --transcript <json> --edited <txt>',
|
|
249
|
+
keywords: ['transcript', 'cuts', 'remove', 'trim'],
|
|
246
250
|
},
|
|
247
251
|
{
|
|
248
|
-
name: 'Combine two videos with speech-aligned padding',
|
|
252
|
+
name: 'combine - Combine two videos with speech-aligned padding',
|
|
249
253
|
value: 'combine',
|
|
254
|
+
description: 'combine --video1 <file> --video2 <file>',
|
|
255
|
+
keywords: ['merge', 'join', 'splice', 'padding'],
|
|
250
256
|
},
|
|
251
257
|
{
|
|
252
|
-
name: 'Start the web UI server',
|
|
258
|
+
name: 'app start - Start the web UI server',
|
|
253
259
|
value: 'app-start',
|
|
260
|
+
description: 'app start --port <number> --host <host>',
|
|
261
|
+
keywords: ['app', 'ui', 'server', 'web', 'dashboard'],
|
|
254
262
|
},
|
|
255
263
|
{
|
|
256
|
-
name: 'Transcribe a single audio/video file',
|
|
264
|
+
name: 'transcribe - Transcribe a single audio/video file',
|
|
257
265
|
value: 'transcribe',
|
|
266
|
+
description: 'transcribe [input]',
|
|
267
|
+
keywords: ['whisper', 'speech', 'audio', 'subtitles'],
|
|
258
268
|
},
|
|
259
269
|
{
|
|
260
|
-
name: 'Show detected speech segments for a file',
|
|
270
|
+
name: 'detect-speech - Show detected speech segments for a file',
|
|
261
271
|
value: 'detect-speech',
|
|
272
|
+
description: 'detect-speech [input]',
|
|
273
|
+
keywords: ['speech', 'vad', 'silence', 'segments'],
|
|
262
274
|
},
|
|
263
|
-
{
|
|
264
|
-
|
|
275
|
+
{
|
|
276
|
+
name: '--help - Show help',
|
|
277
|
+
value: 'help',
|
|
278
|
+
keywords: ['usage', '--help'],
|
|
279
|
+
},
|
|
280
|
+
{ name: 'exit - Exit', value: 'exit', keywords: ['quit', 'cancel'] },
|
|
265
281
|
])
|
|
266
282
|
switch (selection) {
|
|
267
283
|
case 'exit':
|
|
@@ -1,23 +1,23 @@
|
|
|
1
1
|
#!/usr/bin/env bun
|
|
2
2
|
import path from 'node:path'
|
|
3
3
|
import { mkdir } from 'node:fs/promises'
|
|
4
|
-
import { ensureFfmpegAvailable, getChapters } from '
|
|
5
|
-
import { logInfo } from '
|
|
6
|
-
import { parseCliArgs, type CliArgs } from '
|
|
7
|
-
import { resolveChapterSelection } from '
|
|
8
|
-
import { removeDirIfEmpty } from '
|
|
9
|
-
import { writeJarvisLogs, writeSummaryLogs } from '
|
|
4
|
+
import { ensureFfmpegAvailable, getChapters } from '../process-course/ffmpeg'
|
|
5
|
+
import { logInfo } from '../process-course/logging'
|
|
6
|
+
import { parseCliArgs, type CliArgs } from '../process-course/cli'
|
|
7
|
+
import { resolveChapterSelection } from '../process-course/utils/chapter-selection'
|
|
8
|
+
import { removeDirIfEmpty } from '../process-course/utils/file-utils'
|
|
9
|
+
import { writeJarvisLogs, writeSummaryLogs } from '../process-course/summary'
|
|
10
10
|
import {
|
|
11
11
|
processChapter,
|
|
12
12
|
type ChapterProcessingOptions,
|
|
13
|
-
} from '
|
|
13
|
+
} from '../process-course/chapter-processor'
|
|
14
14
|
import type {
|
|
15
15
|
JarvisEdit,
|
|
16
16
|
JarvisNote,
|
|
17
17
|
JarvisWarning,
|
|
18
18
|
ProcessedChapterInfo,
|
|
19
19
|
EditWorkspaceInfo,
|
|
20
|
-
} from '
|
|
20
|
+
} from '../process-course/types'
|
|
21
21
|
import { formatSeconds } from './utils'
|
|
22
22
|
import { checkSegmentHasSpeech } from './speech-detection'
|
|
23
23
|
|
|
@@ -103,7 +103,7 @@ async function processInputFile(options: {
|
|
|
103
103
|
dryRun: boolean
|
|
104
104
|
keepIntermediates: boolean
|
|
105
105
|
writeLogs: boolean
|
|
106
|
-
chapterSelection: import('
|
|
106
|
+
chapterSelection: import('../process-course/types').ChapterSelection | null
|
|
107
107
|
enableTranscription: boolean
|
|
108
108
|
whisperModelPath: string
|
|
109
109
|
whisperLanguage: string
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import path from 'node:path'
|
|
2
2
|
import { mkdir } from 'node:fs/promises'
|
|
3
3
|
import * as ort from 'onnxruntime-node'
|
|
4
|
-
import { readAudioSamples } from '
|
|
5
|
-
import { CONFIG } from '
|
|
4
|
+
import { readAudioSamples } from '../process-course/ffmpeg'
|
|
5
|
+
import { CONFIG } from '../process-course/config'
|
|
6
6
|
import { formatSeconds, getMediaDurationSeconds } from './utils'
|
|
7
|
-
import { speechFallback } from '
|
|
8
|
-
import type { SpeechBounds } from '
|
|
7
|
+
import { speechFallback } from '../process-course/utils/audio-analysis'
|
|
8
|
+
import type { SpeechBounds } from '../process-course/types'
|
|
9
9
|
|
|
10
10
|
export type VadConfig = {
|
|
11
11
|
vadWindowSamples: number
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import { test, expect } from 'bun:test'
|
|
2
|
+
import {
|
|
3
|
+
clamp,
|
|
4
|
+
formatCommand,
|
|
5
|
+
formatSeconds,
|
|
6
|
+
normalizeFilename,
|
|
7
|
+
runCommand,
|
|
8
|
+
toKebabCase,
|
|
9
|
+
} from './utils'
|
|
10
|
+
|
|
11
|
+
test('formatCommand quotes parts that include spaces', () => {
|
|
12
|
+
expect(formatCommand(['ffmpeg', '-i', 'my file.mp4'])).toBe(
|
|
13
|
+
'ffmpeg -i "my file.mp4"',
|
|
14
|
+
)
|
|
15
|
+
})
|
|
16
|
+
|
|
17
|
+
test('formatCommand keeps parts without spaces unchanged', () => {
|
|
18
|
+
expect(formatCommand(['echo', 'hello'])).toBe('echo hello')
|
|
19
|
+
})
|
|
20
|
+
|
|
21
|
+
test('formatSeconds formats to two decimals with suffix', () => {
|
|
22
|
+
expect(formatSeconds(1)).toBe('1.00s')
|
|
23
|
+
expect(formatSeconds(1.234)).toBe('1.23s')
|
|
24
|
+
})
|
|
25
|
+
|
|
26
|
+
test('clamp keeps values within range', () => {
|
|
27
|
+
expect(clamp(5, 0, 10)).toBe(5)
|
|
28
|
+
})
|
|
29
|
+
|
|
30
|
+
test('clamp enforces minimum bound', () => {
|
|
31
|
+
expect(clamp(-2, 0, 10)).toBe(0)
|
|
32
|
+
})
|
|
33
|
+
|
|
34
|
+
test('clamp enforces maximum bound', () => {
|
|
35
|
+
expect(clamp(12, 0, 10)).toBe(10)
|
|
36
|
+
})
|
|
37
|
+
|
|
38
|
+
test('toKebabCase trims, lowercases, and removes punctuation', () => {
|
|
39
|
+
expect(toKebabCase('Hello, World!')).toBe('hello-world')
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
test('toKebabCase collapses repeated separators', () => {
|
|
43
|
+
expect(toKebabCase(' React Hooks ')).toBe('react-hooks')
|
|
44
|
+
})
|
|
45
|
+
|
|
46
|
+
test('toKebabCase returns untitled for empty input', () => {
|
|
47
|
+
expect(toKebabCase(' ')).toBe('untitled')
|
|
48
|
+
})
|
|
49
|
+
|
|
50
|
+
test('normalizeFilename converts number words and dots', () => {
|
|
51
|
+
expect(normalizeFilename('Lesson One point Five')).toBe('lesson 01.05')
|
|
52
|
+
})
|
|
53
|
+
|
|
54
|
+
test('normalizeFilename trims and lowercases', () => {
|
|
55
|
+
expect(normalizeFilename(' Intro ')).toBe('intro')
|
|
56
|
+
})
|
|
57
|
+
|
|
58
|
+
test('runCommand captures stdout for successful command', async () => {
|
|
59
|
+
const result = await runCommand(['echo', 'hello'])
|
|
60
|
+
expect(result.exitCode).toBe(0)
|
|
61
|
+
expect(result.stdout.trim()).toBe('hello')
|
|
62
|
+
})
|
|
63
|
+
|
|
64
|
+
test('runCommand throws on non-zero exit without allowFailure', async () => {
|
|
65
|
+
await expect(runCommand(['false'])).rejects.toThrow('Command failed')
|
|
66
|
+
})
|
|
67
|
+
|
|
68
|
+
test('runCommand returns exit code when allowFailure is true', async () => {
|
|
69
|
+
const result = await runCommand(['false'], { allowFailure: true })
|
|
70
|
+
expect(result.exitCode).toBe(1)
|
|
71
|
+
})
|
|
File without changes
|
|
File without changes
|