eprec 0.0.1 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +122 -29
- package/app/assets/styles.css +129 -0
- package/app/client/app.tsx +37 -0
- package/app/client/counter.tsx +22 -0
- package/app/client/entry.tsx +8 -0
- package/app/components/layout.tsx +37 -0
- package/app/config/env.ts +31 -0
- package/app/config/import-map.ts +9 -0
- package/app/config/init-env.ts +3 -0
- package/app/config/routes.ts +5 -0
- package/app/helpers/render.ts +6 -0
- package/app/router.tsx +102 -0
- package/app/routes/index.tsx +50 -0
- package/app-server.ts +60 -0
- package/cli.ts +173 -0
- package/package.json +46 -7
- package/process-course/chapter-processor.ts +1037 -0
- package/process-course/cli.ts +236 -0
- package/process-course/config.ts +50 -0
- package/process-course/edits/cli.ts +167 -0
- package/process-course/edits/combined-video-editor.ts +316 -0
- package/process-course/edits/edit-workspace.ts +90 -0
- package/process-course/edits/index.ts +20 -0
- package/process-course/edits/regenerate-transcript.ts +84 -0
- package/process-course/edits/remove-ranges.test.ts +36 -0
- package/process-course/edits/remove-ranges.ts +287 -0
- package/process-course/edits/timestamp-refinement.test.ts +25 -0
- package/process-course/edits/timestamp-refinement.ts +172 -0
- package/process-course/edits/transcript-diff.test.ts +105 -0
- package/process-course/edits/transcript-diff.ts +214 -0
- package/process-course/edits/transcript-output.test.ts +50 -0
- package/process-course/edits/transcript-output.ts +36 -0
- package/process-course/edits/types.ts +26 -0
- package/process-course/edits/video-editor.ts +246 -0
- package/process-course/errors.test.ts +63 -0
- package/process-course/errors.ts +82 -0
- package/process-course/ffmpeg.ts +449 -0
- package/process-course/jarvis-commands/handlers.ts +71 -0
- package/process-course/jarvis-commands/index.ts +14 -0
- package/process-course/jarvis-commands/parser.test.ts +348 -0
- package/process-course/jarvis-commands/parser.ts +257 -0
- package/process-course/jarvis-commands/types.ts +46 -0
- package/process-course/jarvis-commands/windows.ts +254 -0
- package/process-course/logging.ts +24 -0
- package/process-course/paths.test.ts +59 -0
- package/process-course/paths.ts +53 -0
- package/process-course/summary.test.ts +209 -0
- package/process-course/summary.ts +210 -0
- package/process-course/types.ts +85 -0
- package/process-course/utils/audio-analysis.test.ts +348 -0
- package/process-course/utils/audio-analysis.ts +463 -0
- package/process-course/utils/chapter-selection.test.ts +307 -0
- package/process-course/utils/chapter-selection.ts +136 -0
- package/process-course/utils/file-utils.test.ts +83 -0
- package/process-course/utils/file-utils.ts +57 -0
- package/process-course/utils/filename.test.ts +27 -0
- package/process-course/utils/filename.ts +12 -0
- package/process-course/utils/time-ranges.test.ts +221 -0
- package/process-course/utils/time-ranges.ts +86 -0
- package/process-course/utils/transcript.test.ts +257 -0
- package/process-course/utils/transcript.ts +86 -0
- package/process-course/utils/video-editing.ts +44 -0
- package/process-course-video.ts +389 -0
- package/public/robots.txt +2 -0
- package/server/bundling.ts +210 -0
- package/speech-detection.ts +355 -0
- package/utils.ts +138 -0
- package/whispercpp-transcribe.ts +343 -0
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
import path from 'node:path'
|
|
2
|
+
import { mkdir } from 'node:fs/promises'
|
|
3
|
+
import { runCommand } from './utils'
|
|
4
|
+
|
|
5
|
+
const DEFAULT_MODEL_FILENAME = 'ggml-small.en.bin'
|
|
6
|
+
const DEFAULT_MODEL_URL =
|
|
7
|
+
'https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-small.en.bin'
|
|
8
|
+
const DEFAULT_LANGUAGE = 'en'
|
|
9
|
+
const DEFAULT_BINARY = 'whisper-cli'
|
|
10
|
+
|
|
11
|
+
type TranscribeOptions = {
|
|
12
|
+
modelPath?: string
|
|
13
|
+
language?: string
|
|
14
|
+
threads?: number
|
|
15
|
+
binaryPath?: string
|
|
16
|
+
outputBasePath?: string
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export type TranscriptSegment = {
|
|
20
|
+
start: number
|
|
21
|
+
end: number
|
|
22
|
+
text: string
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export type TranscriptionResult = {
|
|
26
|
+
text: string
|
|
27
|
+
segments: TranscriptSegment[]
|
|
28
|
+
segmentsSource: 'tokens' | 'segments' | 'transcription' | 'none'
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export function getDefaultWhisperModelPath() {
|
|
32
|
+
return path.resolve('.cache', 'whispercpp', DEFAULT_MODEL_FILENAME)
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export async function transcribeAudio(
|
|
36
|
+
audioPath: string,
|
|
37
|
+
options: TranscribeOptions = {},
|
|
38
|
+
): Promise<TranscriptionResult> {
|
|
39
|
+
const resolvedAudioPath = path.resolve(audioPath)
|
|
40
|
+
const resolvedModelPath = path.resolve(
|
|
41
|
+
options.modelPath ?? getDefaultWhisperModelPath(),
|
|
42
|
+
)
|
|
43
|
+
const language = (options.language ?? DEFAULT_LANGUAGE).trim() || 'en'
|
|
44
|
+
const binaryPath = options.binaryPath ?? DEFAULT_BINARY
|
|
45
|
+
const outputBasePath =
|
|
46
|
+
options.outputBasePath ??
|
|
47
|
+
path.join(
|
|
48
|
+
path.dirname(resolvedAudioPath),
|
|
49
|
+
`${path.parse(resolvedAudioPath).name}-transcript`,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
await ensureModelFile(resolvedModelPath)
|
|
53
|
+
|
|
54
|
+
const args = [
|
|
55
|
+
binaryPath,
|
|
56
|
+
'-m',
|
|
57
|
+
resolvedModelPath,
|
|
58
|
+
'-f',
|
|
59
|
+
resolvedAudioPath,
|
|
60
|
+
'-l',
|
|
61
|
+
language,
|
|
62
|
+
'-ojf',
|
|
63
|
+
'-otxt',
|
|
64
|
+
'-of',
|
|
65
|
+
outputBasePath,
|
|
66
|
+
]
|
|
67
|
+
|
|
68
|
+
if (options.threads && Number.isFinite(options.threads)) {
|
|
69
|
+
args.push('-t', String(options.threads))
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
const result = await runCommand(args)
|
|
73
|
+
const transcriptPath = `${outputBasePath}.txt`
|
|
74
|
+
const transcript = await readTranscriptText(transcriptPath, result.stdout)
|
|
75
|
+
const { segments, source } = await readTranscriptSegments(
|
|
76
|
+
`${outputBasePath}.json`,
|
|
77
|
+
)
|
|
78
|
+
const normalized = normalizeTranscriptText(transcript)
|
|
79
|
+
return { text: normalized, segments, segmentsSource: source }
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
async function ensureModelFile(modelPath: string) {
|
|
83
|
+
const file = Bun.file(modelPath)
|
|
84
|
+
if (await file.exists()) {
|
|
85
|
+
return
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
const defaultPath = getDefaultWhisperModelPath()
|
|
89
|
+
if (path.resolve(modelPath) !== path.resolve(defaultPath)) {
|
|
90
|
+
throw new Error(`Whisper model not found at ${modelPath}.`)
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
await mkdir(path.dirname(modelPath), { recursive: true })
|
|
94
|
+
const response = await fetch(DEFAULT_MODEL_URL)
|
|
95
|
+
if (!response.ok) {
|
|
96
|
+
throw new Error(
|
|
97
|
+
`Failed to download whisper.cpp model (${response.status} ${response.statusText}).`,
|
|
98
|
+
)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
const bytes = await response.arrayBuffer()
|
|
102
|
+
await Bun.write(modelPath, bytes)
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
async function readTranscriptText(transcriptPath: string, fallback: string) {
|
|
106
|
+
const transcriptFile = Bun.file(transcriptPath)
|
|
107
|
+
if (await transcriptFile.exists()) {
|
|
108
|
+
return transcriptFile.text()
|
|
109
|
+
}
|
|
110
|
+
if (fallback.trim().length > 0) {
|
|
111
|
+
return fallback
|
|
112
|
+
}
|
|
113
|
+
throw new Error('Whisper.cpp transcript output was empty.')
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
async function readTranscriptSegments(transcriptPath: string): Promise<{
|
|
117
|
+
segments: TranscriptSegment[]
|
|
118
|
+
source: TranscriptionResult['segmentsSource']
|
|
119
|
+
}> {
|
|
120
|
+
const transcriptFile = Bun.file(transcriptPath)
|
|
121
|
+
if (!(await transcriptFile.exists())) {
|
|
122
|
+
return { segments: [], source: 'none' }
|
|
123
|
+
}
|
|
124
|
+
const raw = await transcriptFile.text()
|
|
125
|
+
try {
|
|
126
|
+
const payload = JSON.parse(raw)
|
|
127
|
+
return parseTranscriptSegments(payload)
|
|
128
|
+
} catch (error) {
|
|
129
|
+
throw new Error(
|
|
130
|
+
`Failed to parse whisper.cpp JSON transcript: ${error instanceof Error ? error.message : error}`,
|
|
131
|
+
)
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
function parseTranscriptSegments(payload: unknown): {
|
|
136
|
+
segments: TranscriptSegment[]
|
|
137
|
+
source: TranscriptionResult['segmentsSource']
|
|
138
|
+
} {
|
|
139
|
+
if (!payload || typeof payload !== 'object') {
|
|
140
|
+
return { segments: [], source: 'none' }
|
|
141
|
+
}
|
|
142
|
+
const transcription = (payload as any).transcription
|
|
143
|
+
const tokenSegments = parseTokenSegments(transcription)
|
|
144
|
+
if (tokenSegments.length > 0) {
|
|
145
|
+
return {
|
|
146
|
+
segments: tokenSegments.sort((a, b) => a.start - b.start),
|
|
147
|
+
source: 'tokens',
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
const segments = parseSegmentsArray((payload as any).segments)
|
|
151
|
+
if (segments.length > 0) {
|
|
152
|
+
return {
|
|
153
|
+
segments: segments.sort((a, b) => a.start - b.start),
|
|
154
|
+
source: 'segments',
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
const transcriptionSegments = parseTranscriptionArray(transcription)
|
|
158
|
+
return {
|
|
159
|
+
segments: transcriptionSegments.sort((a, b) => a.start - b.start),
|
|
160
|
+
source: transcriptionSegments.length > 0 ? 'transcription' : 'none',
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
type TokenOffsets = { from: number; to: number }
|
|
165
|
+
|
|
166
|
+
function parseTokenSegments(rawTranscription: unknown): TranscriptSegment[] {
|
|
167
|
+
if (!Array.isArray(rawTranscription)) {
|
|
168
|
+
return []
|
|
169
|
+
}
|
|
170
|
+
const tokens = rawTranscription.flatMap((segment: any) =>
|
|
171
|
+
Array.isArray(segment?.tokens) ? segment.tokens : [],
|
|
172
|
+
)
|
|
173
|
+
if (tokens.length === 0) {
|
|
174
|
+
return []
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
const segments: TranscriptSegment[] = []
|
|
178
|
+
let currentWord = ''
|
|
179
|
+
let currentStart: number | null = null
|
|
180
|
+
let currentEnd: number | null = null
|
|
181
|
+
|
|
182
|
+
const flush = () => {
|
|
183
|
+
if (currentWord.trim() && currentStart !== null && currentEnd !== null) {
|
|
184
|
+
segments.push({
|
|
185
|
+
start: currentStart,
|
|
186
|
+
end: currentEnd,
|
|
187
|
+
text: currentWord.trim(),
|
|
188
|
+
})
|
|
189
|
+
}
|
|
190
|
+
currentWord = ''
|
|
191
|
+
currentStart = null
|
|
192
|
+
currentEnd = null
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
for (const token of tokens) {
|
|
196
|
+
if (!token || typeof token !== 'object') {
|
|
197
|
+
continue
|
|
198
|
+
}
|
|
199
|
+
const text = typeof token.text === 'string' ? token.text : ''
|
|
200
|
+
if (!text || text.startsWith('[_')) {
|
|
201
|
+
continue
|
|
202
|
+
}
|
|
203
|
+
const offsets = getTokenOffsets(token)
|
|
204
|
+
if (!offsets) {
|
|
205
|
+
continue
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
const hasLeadingSpace = /^\s/.test(text)
|
|
209
|
+
const cleaned = text.replace(/^\s+/, '')
|
|
210
|
+
if (!cleaned) {
|
|
211
|
+
continue
|
|
212
|
+
}
|
|
213
|
+
const isPunctuation = !/[a-z0-9]/i.test(cleaned)
|
|
214
|
+
|
|
215
|
+
if (hasLeadingSpace && currentWord) {
|
|
216
|
+
flush()
|
|
217
|
+
}
|
|
218
|
+
if (isPunctuation) {
|
|
219
|
+
if (currentWord) {
|
|
220
|
+
currentEnd = offsets.to / 1000
|
|
221
|
+
}
|
|
222
|
+
continue
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
if (!currentWord) {
|
|
226
|
+
currentStart = offsets.from / 1000
|
|
227
|
+
}
|
|
228
|
+
currentWord += cleaned
|
|
229
|
+
currentEnd = offsets.to / 1000
|
|
230
|
+
}
|
|
231
|
+
flush()
|
|
232
|
+
return segments
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
function getTokenOffsets(token: any): TokenOffsets | null {
|
|
236
|
+
const offsets = token?.offsets
|
|
237
|
+
const startMs = Number(offsets?.from)
|
|
238
|
+
const endMs = Number(offsets?.to)
|
|
239
|
+
if (!Number.isFinite(startMs) || !Number.isFinite(endMs)) {
|
|
240
|
+
return null
|
|
241
|
+
}
|
|
242
|
+
if (endMs < startMs) {
|
|
243
|
+
return null
|
|
244
|
+
}
|
|
245
|
+
return { from: startMs, to: endMs }
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
function parseSegmentsArray(rawSegments: unknown): TranscriptSegment[] {
|
|
249
|
+
if (!Array.isArray(rawSegments)) {
|
|
250
|
+
return []
|
|
251
|
+
}
|
|
252
|
+
return rawSegments
|
|
253
|
+
.map((segment: any) => {
|
|
254
|
+
const times = getSegmentTimes(segment)
|
|
255
|
+
if (!times) {
|
|
256
|
+
return null
|
|
257
|
+
}
|
|
258
|
+
const text =
|
|
259
|
+
typeof segment.text === 'string'
|
|
260
|
+
? segment.text
|
|
261
|
+
: typeof segment.transcript === 'string'
|
|
262
|
+
? segment.transcript
|
|
263
|
+
: ''
|
|
264
|
+
if (!text.trim()) {
|
|
265
|
+
return null
|
|
266
|
+
}
|
|
267
|
+
return {
|
|
268
|
+
start: times.start,
|
|
269
|
+
end: times.end,
|
|
270
|
+
text: text.trim(),
|
|
271
|
+
} satisfies TranscriptSegment
|
|
272
|
+
})
|
|
273
|
+
.filter((segment): segment is TranscriptSegment => Boolean(segment))
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
function parseTranscriptionArray(
|
|
277
|
+
rawTranscription: unknown,
|
|
278
|
+
): TranscriptSegment[] {
|
|
279
|
+
if (!Array.isArray(rawTranscription)) {
|
|
280
|
+
return []
|
|
281
|
+
}
|
|
282
|
+
return rawTranscription
|
|
283
|
+
.map((segment: any) => {
|
|
284
|
+
if (!segment || typeof segment !== 'object') {
|
|
285
|
+
return null
|
|
286
|
+
}
|
|
287
|
+
const offsets = (segment as any).offsets
|
|
288
|
+
if (!offsets || typeof offsets !== 'object') {
|
|
289
|
+
return null
|
|
290
|
+
}
|
|
291
|
+
const startMs = Number((offsets as any).from)
|
|
292
|
+
const endMs = Number((offsets as any).to)
|
|
293
|
+
if (!Number.isFinite(startMs) || !Number.isFinite(endMs)) {
|
|
294
|
+
return null
|
|
295
|
+
}
|
|
296
|
+
if (endMs <= startMs) {
|
|
297
|
+
return null
|
|
298
|
+
}
|
|
299
|
+
const text =
|
|
300
|
+
typeof (segment as any).text === 'string' ? (segment as any).text : ''
|
|
301
|
+
if (!text.trim()) {
|
|
302
|
+
return null
|
|
303
|
+
}
|
|
304
|
+
return {
|
|
305
|
+
start: startMs / 1000,
|
|
306
|
+
end: endMs / 1000,
|
|
307
|
+
text: text.trim(),
|
|
308
|
+
} satisfies TranscriptSegment
|
|
309
|
+
})
|
|
310
|
+
.filter((segment): segment is TranscriptSegment => Boolean(segment))
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
function getSegmentTimes(segment: any): { start: number; end: number } | null {
|
|
314
|
+
if (
|
|
315
|
+
segment &&
|
|
316
|
+
typeof segment.start === 'number' &&
|
|
317
|
+
typeof segment.end === 'number'
|
|
318
|
+
) {
|
|
319
|
+
if (segment.end > segment.start) {
|
|
320
|
+
return { start: segment.start, end: segment.end }
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
if (
|
|
324
|
+
segment &&
|
|
325
|
+
typeof segment.t0 === 'number' &&
|
|
326
|
+
typeof segment.t1 === 'number'
|
|
327
|
+
) {
|
|
328
|
+
const start = segment.t0 * 0.01
|
|
329
|
+
const end = segment.t1 * 0.01
|
|
330
|
+
if (end > start) {
|
|
331
|
+
return { start, end }
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
return null
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
function normalizeTranscriptText(text: string) {
|
|
338
|
+
return text
|
|
339
|
+
.toLowerCase()
|
|
340
|
+
.replace(/[^a-z0-9]+/g, ' ')
|
|
341
|
+
.replace(/\s+/g, ' ')
|
|
342
|
+
.trim()
|
|
343
|
+
}
|