pyannote-cpp-node 0.3.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +292 -10
- package/dist/Pipeline.d.ts +2 -0
- package/dist/Pipeline.d.ts.map +1 -1
- package/dist/Pipeline.js +15 -0
- package/dist/Pipeline.js.map +1 -1
- package/dist/binding.d.ts +4 -0
- package/dist/binding.d.ts.map +1 -1
- package/dist/binding.js.map +1 -1
- package/package.json +3 -3
package/README.md
CHANGED
|
@@ -7,7 +7,7 @@ Node.js native bindings for integrated Whisper transcription + speaker diarizati
|
|
|
7
7
|
|
|
8
8
|
## Overview
|
|
9
9
|
|
|
10
|
-
`pyannote-cpp-node` exposes the integrated C++ pipeline that combines
|
|
10
|
+
`pyannote-cpp-node` exposes the integrated C++ pipeline that combines Whisper transcription and speaker diarization into a single API.
|
|
11
11
|
|
|
12
12
|
Given 16 kHz mono PCM audio (`Float32Array`), it produces cumulative and final transcript segments shaped as:
|
|
13
13
|
|
|
@@ -15,16 +15,21 @@ Given 16 kHz mono PCM audio (`Float32Array`), it produces cumulative and final t
|
|
|
15
15
|
- segment start/duration in seconds
|
|
16
16
|
- segment text
|
|
17
17
|
|
|
18
|
-
The API supports
|
|
18
|
+
The API supports three modes: **offline** batch processing (`transcribeOffline`), **one-shot** streaming (`transcribe`), and **incremental** streaming (`createSession` + `push`/`finalize`). All heavy operations are asynchronous and run on libuv worker threads.
|
|
19
19
|
|
|
20
20
|
## Features
|
|
21
21
|
|
|
22
22
|
- Integrated transcription + diarization in one pipeline
|
|
23
23
|
- Speaker-labeled transcript segments with sentence-level text
|
|
24
|
-
-
|
|
25
|
-
-
|
|
24
|
+
- **Offline mode**: runs Whisper on the full audio at once + offline diarization (fastest for batch)
|
|
25
|
+
- **One-shot mode**: streaming pipeline with automatic chunking
|
|
26
|
+
- **Streaming mode**: incremental push/finalize with real-time `segments` events and `audio` chunk streaming
|
|
26
27
|
- Deterministic output for the same audio/models/config
|
|
27
28
|
- CoreML-accelerated inference on macOS
|
|
29
|
+
- **Shared model cache**: all models loaded once during `Pipeline.load()`, reused across offline/streaming/session modes
|
|
30
|
+
- **Runtime backend switching**: switch Whisper between GPU-only and CoreML-accelerated without reloading the pipeline
|
|
31
|
+
- **Progress reporting**: optional `onProgress` callback for `transcribeOffline` reports Whisper, diarization, and alignment phases
|
|
32
|
+
- **Real-time segment streaming**: optional `onSegment` callback for `transcribeOffline` delivers each Whisper segment (start, end, text) as it's produced — enables live transcript preview and time-based loading bars
|
|
28
33
|
- TypeScript-first API with complete type definitions
|
|
29
34
|
|
|
30
35
|
## Requirements
|
|
@@ -68,7 +73,9 @@ const pipeline = await Pipeline.load({
|
|
|
68
73
|
});
|
|
69
74
|
|
|
70
75
|
const audio = loadAudioAsFloat32Array('./audio-16khz-mono.wav');
|
|
71
|
-
|
|
76
|
+
|
|
77
|
+
// Offline mode — fastest for batch processing
|
|
78
|
+
const result = await pipeline.transcribeOffline(audio);
|
|
72
79
|
|
|
73
80
|
for (const segment of result.segments) {
|
|
74
81
|
const end = segment.start + segment.duration;
|
|
@@ -87,8 +94,12 @@ pipeline.close();
|
|
|
87
94
|
```typescript
|
|
88
95
|
class Pipeline {
|
|
89
96
|
static async load(config: ModelConfig): Promise<Pipeline>;
|
|
97
|
+
async transcribeOffline(audio: Float32Array, onProgress?: (phase: number, progress: number) => void, onSegment?: (start: number, end: number, text: string) => void): Promise<TranscriptionResult>;
|
|
90
98
|
async transcribe(audio: Float32Array): Promise<TranscriptionResult>;
|
|
99
|
+
setLanguage(language: string): void;
|
|
100
|
+
setDecodeOptions(options: DecodeOptions): void;
|
|
91
101
|
createSession(): PipelineSession;
|
|
102
|
+
async setUseCoreml(useCoreml: boolean): Promise<void>;
|
|
92
103
|
close(): void;
|
|
93
104
|
get isClosed(): boolean;
|
|
94
105
|
}
|
|
@@ -96,12 +107,85 @@ class Pipeline {
|
|
|
96
107
|
|
|
97
108
|
#### `static async load(config: ModelConfig): Promise<Pipeline>`
|
|
98
109
|
|
|
99
|
-
Validates model paths and
|
|
110
|
+
Validates model paths and loads all models (Whisper, CoreML segmentation/embedding, PLDA, and optionally VAD) into a shared cache on a background thread. Models are loaded once and reused across all subsequent `transcribe()`, `transcribeOffline()`, and `createSession()` calls — no redundant loading occurs when switching between modes. Models are freed only when `close()` is called.
|
|
111
|
+
|
|
112
|
+
#### `async transcribeOffline(audio: Float32Array, onProgress?, onSegment?): Promise<TranscriptionResult>`
|
|
113
|
+
|
|
114
|
+
Runs Whisper on the **entire** audio buffer in a single `whisper_full()` call, then runs offline diarization and WhisperX-style speaker alignment. This is the fastest mode for batch processing — no streaming infrastructure is involved.
|
|
115
|
+
|
|
116
|
+
The optional `onProgress` callback receives `(phase, progress)` updates:
|
|
117
|
+
|
|
118
|
+
| Phase | Value | Meaning |
|
|
119
|
+
| --- | --- | --- |
|
|
120
|
+
| `0` | `0`–`100` | Whisper transcription progress (percentage) |
|
|
121
|
+
| `1` | `0` | Diarization started |
|
|
122
|
+
| `2` | `0` | Speaker alignment started |
|
|
123
|
+
|
|
124
|
+
```typescript
|
|
125
|
+
const result = await pipeline.transcribeOffline(audio, (phase, progress) => {
|
|
126
|
+
if (phase === 0) console.log(`Transcribing: ${progress}%`);
|
|
127
|
+
if (phase === 1) console.log('Running diarization...');
|
|
128
|
+
if (phase === 2) console.log('Aligning speakers...');
|
|
129
|
+
});
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
The optional `onSegment` callback receives `(start, end, text)` for each Whisper segment as it's produced during transcription. Times are in seconds. This enables live transcript preview before diarization and alignment complete.
|
|
133
|
+
|
|
134
|
+
```typescript
|
|
135
|
+
const result = await pipeline.transcribeOffline(audio, undefined, (start, end, text) => {
|
|
136
|
+
console.log(`[${start.toFixed(2)}-${end.toFixed(2)}] ${text}`);
|
|
137
|
+
});
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
Both callbacks can be used simultaneously:
|
|
141
|
+
|
|
142
|
+
```typescript
|
|
143
|
+
const result = await pipeline.transcribeOffline(
|
|
144
|
+
audio,
|
|
145
|
+
(phase, progress) => {
|
|
146
|
+
if (phase === 0) updateProgressBar(progress);
|
|
147
|
+
},
|
|
148
|
+
(start, end, text) => {
|
|
149
|
+
appendToTranscriptPreview(start, end, text);
|
|
150
|
+
},
|
|
151
|
+
);
|
|
152
|
+
```
|
|
100
153
|
|
|
101
154
|
#### `async transcribe(audio: Float32Array): Promise<TranscriptionResult>`
|
|
102
155
|
|
|
103
|
-
Runs one-shot transcription + diarization
|
|
156
|
+
Runs one-shot transcription + diarization using the streaming pipeline internally (pushes 1-second chunks then finalizes).
|
|
157
|
+
|
|
158
|
+
#### `setLanguage(language: string): void`
|
|
104
159
|
|
|
160
|
+
Updates the Whisper decode language for subsequent `transcribe()` calls. This is a convenience shorthand for `setDecodeOptions({ language })`.
|
|
161
|
+
|
|
162
|
+
#### `setDecodeOptions(options: DecodeOptions): void`
|
|
163
|
+
|
|
164
|
+
Updates one or more Whisper decode options for subsequent `transcribe()` calls. Only the fields you pass are changed; others retain their current values. See `DecodeOptions` for available fields.
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
#### `async setUseCoreml(useCoreml: boolean): Promise<void>`
|
|
168
|
+
|
|
169
|
+
Switches the Whisper inference backend between GPU-only (`false`) and GPU+CoreML (`true`) at runtime. The method reloads the Whisper context on a background thread with the new `use_coreml` setting. The promise resolves when the new context is ready.
|
|
170
|
+
|
|
171
|
+
- If the requested mode matches the current mode, returns immediately (no reload).
|
|
172
|
+
- Throws if the pipeline is closed, busy, or models are not loaded.
|
|
173
|
+
- After switching, all subsequent `transcribe()`, `transcribeOffline()`, and streaming session calls use the new backend.
|
|
174
|
+
|
|
175
|
+
```typescript
|
|
176
|
+
// Start with GPU-only Whisper
|
|
177
|
+
const pipeline = await Pipeline.load({
|
|
178
|
+
...modelPaths,
|
|
179
|
+
useCoreml: false,
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
// Switch to CoreML-accelerated Whisper at runtime
|
|
183
|
+
await pipeline.setUseCoreml(true);
|
|
184
|
+
const result = await pipeline.transcribeOffline(audio);
|
|
185
|
+
|
|
186
|
+
// Switch back to GPU-only
|
|
187
|
+
await pipeline.setUseCoreml(false);
|
|
188
|
+
```
|
|
105
189
|
#### `createSession(): PipelineSession`
|
|
106
190
|
|
|
107
191
|
Creates an independent streaming session for incremental processing.
|
|
@@ -120,6 +204,8 @@ Returns `true` after `close()`.
|
|
|
120
204
|
```typescript
|
|
121
205
|
class PipelineSession extends EventEmitter {
|
|
122
206
|
async push(audio: Float32Array): Promise<boolean[]>;
|
|
207
|
+
setLanguage(language: string): void;
|
|
208
|
+
setDecodeOptions(options: DecodeOptions): void;
|
|
123
209
|
async finalize(): Promise<TranscriptionResult>;
|
|
124
210
|
close(): void;
|
|
125
211
|
get isClosed(): boolean;
|
|
@@ -146,6 +232,14 @@ Pushes an arbitrary number of samples into the streaming pipeline.
|
|
|
146
232
|
- First 10 seconds return an empty array because the pipeline needs a full 10-second window
|
|
147
233
|
- Chunk size is flexible; not restricted to 16,000-sample pushes
|
|
148
234
|
|
|
235
|
+
#### `setLanguage(language: string): void`
|
|
236
|
+
|
|
237
|
+
Updates the Whisper decode language on the live streaming session. Takes effect on the next Whisper decode run. Thread-safe — the change is pushed to the C++ pipeline immediately.
|
|
238
|
+
|
|
239
|
+
#### `setDecodeOptions(options: DecodeOptions): void`
|
|
240
|
+
|
|
241
|
+
Updates one or more Whisper decode options on the live streaming session. Takes effect on the next Whisper decode run. Thread-safe — changes are pushed to the C++ pipeline immediately. Only the fields you pass are changed; others retain their current values.
|
|
242
|
+
|
|
149
243
|
#### `async finalize(): Promise<TranscriptionResult>`
|
|
150
244
|
|
|
151
245
|
Flushes all stages, runs final recluster + alignment, and returns the definitive result.
|
|
@@ -284,6 +378,41 @@ export interface ModelConfig {
|
|
|
284
378
|
suppressNst?: boolean;
|
|
285
379
|
}
|
|
286
380
|
|
|
381
|
+
export interface DecodeOptions {
|
|
382
|
+
/** Language code (e.g., 'en', 'zh'). Omit for auto-detect. */
|
|
383
|
+
language?: string;
|
|
384
|
+
/** Translate non-English speech to English */
|
|
385
|
+
translate?: boolean;
|
|
386
|
+
/** Auto-detect spoken language. Overrides 'language' when true. */
|
|
387
|
+
detectLanguage?: boolean;
|
|
388
|
+
/** Number of threads for Whisper inference */
|
|
389
|
+
nThreads?: number;
|
|
390
|
+
/** Sampling temperature. 0.0 = greedy deterministic. */
|
|
391
|
+
temperature?: number;
|
|
392
|
+
/** Temperature increment for fallback retries */
|
|
393
|
+
temperatureInc?: number;
|
|
394
|
+
/** Disable temperature fallback. If true, temperatureInc is ignored. */
|
|
395
|
+
noFallback?: boolean;
|
|
396
|
+
/** Beam search size. -1 uses greedy decoding. >1 enables beam search. */
|
|
397
|
+
beamSize?: number;
|
|
398
|
+
/** Best-of-N sampling candidates for greedy decoding */
|
|
399
|
+
bestOf?: number;
|
|
400
|
+
/** Entropy threshold for decoder fallback */
|
|
401
|
+
entropyThold?: number;
|
|
402
|
+
/** Log probability threshold for decoder fallback */
|
|
403
|
+
logprobThold?: number;
|
|
404
|
+
/** No-speech probability threshold */
|
|
405
|
+
noSpeechThold?: number;
|
|
406
|
+
/** Initial prompt text to condition the decoder */
|
|
407
|
+
prompt?: string;
|
|
408
|
+
/** Don't use previous segment as context for next segment */
|
|
409
|
+
noContext?: boolean;
|
|
410
|
+
/** Suppress blank outputs at the beginning of segments */
|
|
411
|
+
suppressBlank?: boolean;
|
|
412
|
+
/** Suppress non-speech tokens */
|
|
413
|
+
suppressNst?: boolean;
|
|
414
|
+
}
|
|
415
|
+
|
|
287
416
|
export interface AlignedSegment {
|
|
288
417
|
/** Global speaker label (e.g., SPEAKER_00). */
|
|
289
418
|
speaker: string;
|
|
@@ -306,7 +435,68 @@ export interface TranscriptionResult {
|
|
|
306
435
|
|
|
307
436
|
## Usage Examples
|
|
308
437
|
|
|
309
|
-
###
|
|
438
|
+
### Offline transcription (recommended for batch)
|
|
439
|
+
|
|
440
|
+
```typescript
|
|
441
|
+
import { Pipeline } from 'pyannote-cpp-node';
|
|
442
|
+
|
|
443
|
+
async function runOffline(audio: Float32Array) {
|
|
444
|
+
const pipeline = await Pipeline.load({
|
|
445
|
+
segModelPath: './models/segmentation.gguf',
|
|
446
|
+
embModelPath: './models/embedding.gguf',
|
|
447
|
+
pldaPath: './models/plda.gguf',
|
|
448
|
+
coremlPath: './models/embedding.mlpackage',
|
|
449
|
+
segCoremlPath: './models/segmentation.mlpackage',
|
|
450
|
+
whisperModelPath: './models/ggml-large-v3-turbo-q5_0.bin',
|
|
451
|
+
});
|
|
452
|
+
|
|
453
|
+
// Runs Whisper on full audio at once + offline diarization
|
|
454
|
+
const result = await pipeline.transcribeOffline(audio);
|
|
455
|
+
|
|
456
|
+
for (const seg of result.segments) {
|
|
457
|
+
const end = seg.start + seg.duration;
|
|
458
|
+
console.log(`[${seg.speaker}] ${seg.start.toFixed(2)}-${end.toFixed(2)} ${seg.text.trim()}`);
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
pipeline.close();
|
|
462
|
+
}
|
|
463
|
+
```
|
|
464
|
+
|
|
465
|
+
### Offline transcription with progress and live transcript preview
|
|
466
|
+
|
|
467
|
+
```typescript
|
|
468
|
+
import { Pipeline } from 'pyannote-cpp-node';
|
|
469
|
+
|
|
470
|
+
async function runOfflineWithCallbacks(audio: Float32Array) {
|
|
471
|
+
const pipeline = await Pipeline.load({
|
|
472
|
+
segModelPath: './models/segmentation.gguf',
|
|
473
|
+
embModelPath: './models/embedding.gguf',
|
|
474
|
+
pldaPath: './models/plda.gguf',
|
|
475
|
+
coremlPath: './models/embedding.mlpackage',
|
|
476
|
+
segCoremlPath: './models/segmentation.mlpackage',
|
|
477
|
+
whisperModelPath: './models/ggml-large-v3-turbo-q5_0.bin',
|
|
478
|
+
});
|
|
479
|
+
|
|
480
|
+
const result = await pipeline.transcribeOffline(
|
|
481
|
+
audio,
|
|
482
|
+
// Progress callback — phase 0 is Whisper (0-100%), phase 1 is diarization, phase 2 is alignment
|
|
483
|
+
(phase, progress) => {
|
|
484
|
+
if (phase === 0) updateProgressBar(progress);
|
|
485
|
+
if (phase === 1) showStatus('Identifying speakers...');
|
|
486
|
+
if (phase === 2) showStatus('Aligning speakers to transcript...');
|
|
487
|
+
},
|
|
488
|
+
// Segment callback — each Whisper segment as it's produced (before diarization)
|
|
489
|
+
(start, end, text) => {
|
|
490
|
+
appendToLivePreview(`[${start.toFixed(2)}-${end.toFixed(2)}] ${text}`);
|
|
491
|
+
},
|
|
492
|
+
);
|
|
493
|
+
|
|
494
|
+
console.log(`Done: ${result.segments.length} speaker-labeled segments`);
|
|
495
|
+
pipeline.close();
|
|
496
|
+
}
|
|
497
|
+
```
|
|
498
|
+
|
|
499
|
+
### One-shot transcription (streaming internals)
|
|
310
500
|
|
|
311
501
|
```typescript
|
|
312
502
|
import { Pipeline } from 'pyannote-cpp-node';
|
|
@@ -321,6 +511,7 @@ async function runOneShot(audio: Float32Array) {
|
|
|
321
511
|
whisperModelPath: './models/ggml-large-v3-turbo-q5_0.bin',
|
|
322
512
|
});
|
|
323
513
|
|
|
514
|
+
// Uses streaming pipeline internally (push 1s chunks + finalize)
|
|
324
515
|
const result = await pipeline.transcribe(audio);
|
|
325
516
|
|
|
326
517
|
for (const seg of result.segments) {
|
|
@@ -419,6 +610,88 @@ const pipeline = await Pipeline.load({
|
|
|
419
610
|
});
|
|
420
611
|
```
|
|
421
612
|
|
|
613
|
+
### Changing language at runtime
|
|
614
|
+
|
|
615
|
+
```typescript
|
|
616
|
+
import { Pipeline } from 'pyannote-cpp-node';
|
|
617
|
+
|
|
618
|
+
const pipeline = await Pipeline.load({
|
|
619
|
+
segModelPath: './models/segmentation.gguf',
|
|
620
|
+
embModelPath: './models/embedding.gguf',
|
|
621
|
+
pldaPath: './models/plda.gguf',
|
|
622
|
+
coremlPath: './models/embedding.mlpackage',
|
|
623
|
+
segCoremlPath: './models/segmentation.mlpackage',
|
|
624
|
+
whisperModelPath: './models/ggml-large-v3-turbo-q5_0.bin',
|
|
625
|
+
language: 'en',
|
|
626
|
+
});
|
|
627
|
+
|
|
628
|
+
// First transcription in English
|
|
629
|
+
const result1 = await pipeline.transcribe(englishAudio);
|
|
630
|
+
|
|
631
|
+
// Switch to Korean for the next transcription
|
|
632
|
+
pipeline.setLanguage('ko');
|
|
633
|
+
const result2 = await pipeline.transcribe(koreanAudio);
|
|
634
|
+
|
|
635
|
+
// Or update multiple decode options at once
|
|
636
|
+
pipeline.setDecodeOptions({
|
|
637
|
+
language: 'zh',
|
|
638
|
+
temperature: 0.2,
|
|
639
|
+
beamSize: 5,
|
|
640
|
+
});
|
|
641
|
+
const result3 = await pipeline.transcribe(chineseAudio);
|
|
642
|
+
|
|
643
|
+
pipeline.close();
|
|
644
|
+
```
|
|
645
|
+
|
|
646
|
+
### Switching Whisper backend at runtime
|
|
647
|
+
|
|
648
|
+
```typescript
|
|
649
|
+
import { Pipeline } from 'pyannote-cpp-node';
|
|
650
|
+
|
|
651
|
+
// Start with GPU-only Whisper (default)
|
|
652
|
+
const pipeline = await Pipeline.load({
|
|
653
|
+
segModelPath: './models/segmentation.gguf',
|
|
654
|
+
embModelPath: './models/embedding.gguf',
|
|
655
|
+
pldaPath: './models/plda.gguf',
|
|
656
|
+
coremlPath: './models/embedding.mlpackage',
|
|
657
|
+
segCoremlPath: './models/segmentation.mlpackage',
|
|
658
|
+
whisperModelPath: './models/ggml-large-v3-turbo-q5_0.bin',
|
|
659
|
+
useCoreml: false,
|
|
660
|
+
});
|
|
661
|
+
|
|
662
|
+
// Switch to CoreML-accelerated Whisper encoder at runtime
|
|
663
|
+
// (requires ggml-large-v3-turbo-q5_0-encoder.mlmodelc next to the GGUF)
|
|
664
|
+
await pipeline.setUseCoreml(true);
|
|
665
|
+
const result1 = await pipeline.transcribeOffline(audio);
|
|
666
|
+
|
|
667
|
+
// Switch back to GPU-only
|
|
668
|
+
await pipeline.setUseCoreml(false);
|
|
669
|
+
const result2 = await pipeline.transcribeOffline(audio);
|
|
670
|
+
|
|
671
|
+
pipeline.close();
|
|
672
|
+
```
|
|
673
|
+
|
|
674
|
+
Streaming sessions also support runtime changes:
|
|
675
|
+
|
|
676
|
+
```typescript
|
|
677
|
+
const session = pipeline.createSession();
|
|
678
|
+
|
|
679
|
+
session.on('segments', (segments) => {
|
|
680
|
+
console.log(segments);
|
|
681
|
+
});
|
|
682
|
+
|
|
683
|
+
// Push English audio
|
|
684
|
+
await session.push(englishChunk);
|
|
685
|
+
|
|
686
|
+
// Switch language mid-stream — takes effect on the next Whisper decode
|
|
687
|
+
session.setLanguage('ko');
|
|
688
|
+
await session.push(koreanChunk);
|
|
689
|
+
|
|
690
|
+
const result = await session.finalize();
|
|
691
|
+
|
|
692
|
+
session.close();
|
|
693
|
+
```
|
|
694
|
+
|
|
422
695
|
## JSON Output Format
|
|
423
696
|
|
|
424
697
|
The pipeline returns this JSON shape:
|
|
@@ -447,7 +720,15 @@ All API methods expect decoded PCM samples; file decoding/resampling is handled
|
|
|
447
720
|
|
|
448
721
|
## Architecture
|
|
449
722
|
|
|
450
|
-
|
|
723
|
+
### Offline mode (`transcribeOffline`)
|
|
724
|
+
|
|
725
|
+
1. Single `whisper_full()` call on entire audio
|
|
726
|
+
2. Offline diarization (segmentation → powerset → embeddings → PLDA → AHC → VBx)
|
|
727
|
+
3. WhisperX-style alignment (speaker assignment by maximum segment overlap)
|
|
728
|
+
|
|
729
|
+
### Streaming mode (`transcribe` / `createSession`)
|
|
730
|
+
|
|
731
|
+
The streaming pipeline runs in 7 stages:
|
|
451
732
|
|
|
452
733
|
1. VAD silence filter (optional compression of long silence)
|
|
453
734
|
2. Audio buffer (stream-safe FIFO with timestamp tracking)
|
|
@@ -459,8 +740,9 @@ The integrated pipeline runs in 7 stages:
|
|
|
459
740
|
|
|
460
741
|
## Performance
|
|
461
742
|
|
|
743
|
+
- Offline transcription + diarization: **~12x real-time** (30s audio in 2.5s)
|
|
462
744
|
- Diarization only: **39x real-time**
|
|
463
|
-
- Integrated transcription + diarization: **~14.6x real-time**
|
|
745
|
+
- Integrated streaming transcription + diarization: **~14.6x real-time**
|
|
464
746
|
- 45-minute Korean meeting test (6 speakers): **2713s audio in 186s**
|
|
465
747
|
- Each Whisper segment maps 1:1 to a speaker-labeled segment (no merging)
|
|
466
748
|
- Speaker confusion rate: **2.55%**
|
package/dist/Pipeline.d.ts
CHANGED
|
@@ -5,9 +5,11 @@ export declare class Pipeline {
|
|
|
5
5
|
private constructor();
|
|
6
6
|
static load(config: ModelConfig): Promise<Pipeline>;
|
|
7
7
|
transcribe(audio: Float32Array): Promise<TranscriptionResult>;
|
|
8
|
+
transcribeOffline(audio: Float32Array, onProgress?: (phase: number, progress: number) => void, onSegment?: (start: number, end: number, text: string) => void): Promise<TranscriptionResult>;
|
|
8
9
|
setLanguage(language: string): void;
|
|
9
10
|
setDecodeOptions(options: DecodeOptions): void;
|
|
10
11
|
createSession(): PipelineSession;
|
|
12
|
+
setUseCoreml(useCoreml: boolean): Promise<void>;
|
|
11
13
|
close(): void;
|
|
12
14
|
get isClosed(): boolean;
|
|
13
15
|
}
|
package/dist/Pipeline.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"Pipeline.d.ts","sourceRoot":"","sources":["../src/Pipeline.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAC;AACvD,OAAO,KAAK,EAAE,WAAW,EAAE,mBAAmB,EAAE,aAAa,EAAE,MAAM,YAAY,CAAC;AAElF,qBAAa,QAAQ;IACnB,OAAO,CAAC,MAAM,CAAsB;IAEpC,OAAO;WAIM,IAAI,CAAC,MAAM,EAAE,WAAW,GAAG,OAAO,CAAC,QAAQ,CAAC;
|
|
1
|
+
{"version":3,"file":"Pipeline.d.ts","sourceRoot":"","sources":["../src/Pipeline.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAC;AACvD,OAAO,KAAK,EAAE,WAAW,EAAE,mBAAmB,EAAE,aAAa,EAAE,MAAM,YAAY,CAAC;AAElF,qBAAa,QAAQ;IACnB,OAAO,CAAC,MAAM,CAAsB;IAEpC,OAAO;WAIM,IAAI,CAAC,MAAM,EAAE,WAAW,GAAG,OAAO,CAAC,QAAQ,CAAC;IAkBnD,UAAU,CAAC,KAAK,EAAE,YAAY,GAAG,OAAO,CAAC,mBAAmB,CAAC;IAO7D,iBAAiB,CACrB,KAAK,EAAE,YAAY,EACnB,UAAU,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,KAAK,IAAI,EACtD,SAAS,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,KAAK,IAAI,GAC7D,OAAO,CAAC,mBAAmB,CAAC;IAO/B,WAAW,CAAC,QAAQ,EAAE,MAAM,GAAG,IAAI;IAKnC,gBAAgB,CAAC,OAAO,EAAE,aAAa,GAAG,IAAI;IAK9C,aAAa,IAAI,eAAe;IAW1B,YAAY,CAAC,SAAS,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IAKrD,KAAK,IAAI,IAAI;IACb,IAAI,QAAQ,IAAI,OAAO,CAAiC;CACzD"}
|
package/dist/Pipeline.js
CHANGED
|
@@ -21,6 +21,7 @@ export class Pipeline {
|
|
|
21
21
|
accessSync(config.vadModelPath);
|
|
22
22
|
const binding = getBinding();
|
|
23
23
|
const native = new binding.PipelineModel(config);
|
|
24
|
+
await native.loadModels();
|
|
24
25
|
return new Pipeline(native);
|
|
25
26
|
}
|
|
26
27
|
async transcribe(audio) {
|
|
@@ -32,6 +33,15 @@ export class Pipeline {
|
|
|
32
33
|
throw new Error('Audio must not be empty');
|
|
33
34
|
return this.native.transcribe(audio);
|
|
34
35
|
}
|
|
36
|
+
async transcribeOffline(audio, onProgress, onSegment) {
|
|
37
|
+
if (this.native.isClosed)
|
|
38
|
+
throw new Error('Pipeline is closed');
|
|
39
|
+
if (!(audio instanceof Float32Array))
|
|
40
|
+
throw new TypeError('Expected Float32Array');
|
|
41
|
+
if (audio.length === 0)
|
|
42
|
+
throw new Error('Audio must not be empty');
|
|
43
|
+
return this.native.transcribeOffline(audio, onProgress, onSegment);
|
|
44
|
+
}
|
|
35
45
|
setLanguage(language) {
|
|
36
46
|
if (this.native.isClosed)
|
|
37
47
|
throw new Error('Pipeline is closed');
|
|
@@ -50,6 +60,11 @@ export class Pipeline {
|
|
|
50
60
|
session._setNative(nativeSession);
|
|
51
61
|
return session;
|
|
52
62
|
}
|
|
63
|
+
async setUseCoreml(useCoreml) {
|
|
64
|
+
if (this.native.isClosed)
|
|
65
|
+
throw new Error('Pipeline is closed');
|
|
66
|
+
return this.native.switchWhisperMode(useCoreml);
|
|
67
|
+
}
|
|
53
68
|
close() { this.native.close(); }
|
|
54
69
|
get isClosed() { return this.native.isClosed; }
|
|
55
70
|
}
|
package/dist/Pipeline.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"Pipeline.js","sourceRoot":"","sources":["../src/Pipeline.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,UAAU,EAAE,MAAM,SAAS,CAAC;AACrC,OAAO,EAAE,UAAU,EAA4B,MAAM,cAAc,CAAC;AACpE,OAAO,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAC;AAGvD,MAAM,OAAO,QAAQ;IACX,MAAM,CAAsB;IAEpC,YAAoB,MAA2B;QAC7C,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;IACvB,CAAC;IAED,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,MAAmB;QACnC,MAAM,aAAa,GAAG;YACpB,MAAM,CAAC,YAAY;YACnB,MAAM,CAAC,YAAY;YACnB,MAAM,CAAC,QAAQ;YACf,MAAM,CAAC,UAAU;YACjB,MAAM,CAAC,aAAa;YACpB,MAAM,CAAC,gBAAgB;SACxB,CAAC;QACF,KAAK,MAAM,IAAI,IAAI,aAAa;YAAE,UAAU,CAAC,IAAI,CAAC,CAAC;QACnD,IAAI,MAAM,CAAC,YAAY;YAAE,UAAU,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC;QAEzD,MAAM,OAAO,GAAG,UAAU,EAAE,CAAC;QAC7B,MAAM,MAAM,GAAG,IAAI,OAAO,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC;QACjD,OAAO,IAAI,QAAQ,CAAC,MAAM,CAAC,CAAC;IAC9B,CAAC;IAED,KAAK,CAAC,UAAU,CAAC,KAAmB;QAClC,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,IAAI,CAAC,CAAC,KAAK,YAAY,YAAY,CAAC;YAAE,MAAM,IAAI,SAAS,CAAC,uBAAuB,CAAC,CAAC;QACnF,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC;YAAE,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;QACnE,OAAO,IAAI,CAAC,MAAM,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;IACvC,CAAC;IAED,WAAW,CAAC,QAAgB;QAC1B,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,IAAI,CAAC,MAAM,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC;IACpC,CAAC;IAED,gBAAgB,CAAC,OAAsB;QACrC,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,EAAE,GAAG,OAAO,EAAE,CAAC,CAAC;IAC/C,CAAC;IAED,aAAa;QACX,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,MAAM,OAAO,GAAG,IAAI,eAAe,EAAE,CAAC;QACtC,MAAM,aAAa,GAAG,IAAI,CAAC,MAAM,CAAC,aAAa,CAC7C,CAAC,QAAe,EAAE,EAAE,CAAC,OAAO,CAAC,mBAAmB,CAAC,QAAQ,CAAC,EAC1D,CAAC,KAAmB,EAAE,EAAE,CAAC,OAAO,CAAC,gBAAgB,CAAC,KAAK,CAAC,CACzD,CAAC;QACF,OAAO,CAAC,UAAU,CAAC,aAAa,CAAC,CAAC;QAClC,OAAO,OAAO,CAAC;IACjB,CAAC;IAED,KAAK,KAAW,IAAI,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC;IACtC,IAAI,QAAQ,KAAc,OAAO,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC;CACzD"}
|
|
1
|
+
{"version":3,"file":"Pipeline.js","sourceRoot":"","sources":["../src/Pipeline.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,UAAU,EAAE,MAAM,SAAS,CAAC;AACrC,OAAO,EAAE,UAAU,EAA4B,MAAM,cAAc,CAAC;AACpE,OAAO,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAC;AAGvD,MAAM,OAAO,QAAQ;IACX,MAAM,CAAsB;IAEpC,YAAoB,MAA2B;QAC7C,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;IACvB,CAAC;IAED,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,MAAmB;QACnC,MAAM,aAAa,GAAG;YACpB,MAAM,CAAC,YAAY;YACnB,MAAM,CAAC,YAAY;YACnB,MAAM,CAAC,QAAQ;YACf,MAAM,CAAC,UAAU;YACjB,MAAM,CAAC,aAAa;YACpB,MAAM,CAAC,gBAAgB;SACxB,CAAC;QACF,KAAK,MAAM,IAAI,IAAI,aAAa;YAAE,UAAU,CAAC,IAAI,CAAC,CAAC;QACnD,IAAI,MAAM,CAAC,YAAY;YAAE,UAAU,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC;QAEzD,MAAM,OAAO,GAAG,UAAU,EAAE,CAAC;QAC7B,MAAM,MAAM,GAAG,IAAI,OAAO,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC;QACjD,MAAM,MAAM,CAAC,UAAU,EAAE,CAAC;QAC1B,OAAO,IAAI,QAAQ,CAAC,MAAM,CAAC,CAAC;IAC9B,CAAC;IAED,KAAK,CAAC,UAAU,CAAC,KAAmB;QAClC,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,IAAI,CAAC,CAAC,KAAK,YAAY,YAAY,CAAC;YAAE,MAAM,IAAI,SAAS,CAAC,uBAAuB,CAAC,CAAC;QACnF,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC;YAAE,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;QACnE,OAAO,IAAI,CAAC,MAAM,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;IACvC,CAAC;IAED,KAAK,CAAC,iBAAiB,CACrB,KAAmB,EACnB,UAAsD,EACtD,SAA8D;QAE9D,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,IAAI,CAAC,CAAC,KAAK,YAAY,YAAY,CAAC;YAAE,MAAM,IAAI,SAAS,CAAC,uBAAuB,CAAC,CAAC;QACnF,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC;YAAE,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;QACnE,OAAO,IAAI,CAAC,MAAM,CAAC,iBAAiB,CAAC,KAAK,EAAE,UAAU,EAAE,SAAS,CAAC,CAAC;IACrE,CAAC;IAED,WAAW,CAAC,QAAgB;QAC1B,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,IAAI,CAAC,MAAM,CAAC,WAAW,CAAC,QAAQ,CAAC,CAAC;IACpC,CAAC;IAED,gBAAgB,CAAC,OAAsB;QACrC,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,EAAE,GAAG,OAAO,EAAE,CAAC,CAAC;IAC/C,CAAC;IAED,aAAa;QACX,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,MAAM,OAAO,GAAG,IAAI,eAAe,EAAE,CAAC;QACtC,MAAM,aAAa,GAAG,IAAI,CAAC,MAAM,CAAC,aAAa,CAC7C,CAAC,QAAe,EAAE,EAAE,CAAC,OAAO,CAAC,mBAAmB,CAAC,QAAQ,CAAC,EAC1D,CAAC,KAAmB,EAAE,EAAE,CAAC,OAAO,CAAC,gBAAgB,CAAC,KAAK,CAAC,CACzD,CAAC;QACF,OAAO,CAAC,UAAU,CAAC,aAAa,CAAC,CAAC;QAClC,OAAO,OAAO,CAAC;IACjB,CAAC;IAED,KAAK,CAAC,YAAY,CAAC,SAAkB;QACnC,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ;YAAE,MAAM,IAAI,KAAK,CAAC,oBAAoB,CAAC,CAAC;QAChE,OAAO,IAAI,CAAC,MAAM,CAAC,iBAAiB,CAAC,SAAS,CAAC,CAAC;IAClD,CAAC;IAED,KAAK,KAAW,IAAI,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC;IACtC,IAAI,QAAQ,KAAc,OAAO,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC;CACzD"}
|
package/dist/binding.d.ts
CHANGED
|
@@ -1,11 +1,15 @@
|
|
|
1
1
|
import type { ModelConfig, TranscriptionResult } from './types.js';
|
|
2
2
|
export interface NativePipelineModel {
|
|
3
3
|
transcribe(audio: Float32Array): Promise<TranscriptionResult>;
|
|
4
|
+
transcribeOffline(audio: Float32Array, onProgress?: (phase: number, progress: number) => void, onSegment?: (start: number, end: number, text: string) => void): Promise<TranscriptionResult>;
|
|
4
5
|
setLanguage(language: string): void;
|
|
5
6
|
setDecodeOptions(options: Record<string, unknown>): void;
|
|
6
7
|
createSession(segmentsCb: (segments: any[]) => void, audioCb: (audio: Float32Array) => void): NativePipelineSession;
|
|
7
8
|
close(): void;
|
|
8
9
|
isClosed: boolean;
|
|
10
|
+
loadModels(): Promise<void>;
|
|
11
|
+
isLoaded: boolean;
|
|
12
|
+
switchWhisperMode(useCoreml: boolean): Promise<void>;
|
|
9
13
|
}
|
|
10
14
|
export interface NativePipelineSession {
|
|
11
15
|
push(audio: Float32Array): Promise<boolean[]>;
|
package/dist/binding.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"binding.d.ts","sourceRoot":"","sources":["../src/binding.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAE,WAAW,EAAE,mBAAmB,EAAE,MAAM,YAAY,CAAC;AAInE,MAAM,WAAW,mBAAmB;IAClC,UAAU,CAAC,KAAK,EAAE,YAAY,GAAG,OAAO,CAAC,mBAAmB,CAAC,CAAC;IAC9D,WAAW,CAAC,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAC;IACpC,gBAAgB,CAAC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,IAAI,CAAC;IACzD,aAAa,CACX,UAAU,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,KAAK,IAAI,EACrC,OAAO,EAAE,CAAC,KAAK,EAAE,YAAY,KAAK,IAAI,GACrC,qBAAqB,CAAC;IACzB,KAAK,IAAI,IAAI,CAAC;IACd,QAAQ,EAAE,OAAO,CAAC;
|
|
1
|
+
{"version":3,"file":"binding.d.ts","sourceRoot":"","sources":["../src/binding.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAE,WAAW,EAAE,mBAAmB,EAAE,MAAM,YAAY,CAAC;AAInE,MAAM,WAAW,mBAAmB;IAClC,UAAU,CAAC,KAAK,EAAE,YAAY,GAAG,OAAO,CAAC,mBAAmB,CAAC,CAAC;IAC9D,iBAAiB,CAAC,KAAK,EAAE,YAAY,EAAE,UAAU,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,KAAK,IAAI,EAAE,SAAS,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,KAAK,IAAI,GAAG,OAAO,CAAC,mBAAmB,CAAC,CAAC;IAC7L,WAAW,CAAC,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAC;IACpC,gBAAgB,CAAC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,IAAI,CAAC;IACzD,aAAa,CACX,UAAU,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,KAAK,IAAI,EACrC,OAAO,EAAE,CAAC,KAAK,EAAE,YAAY,KAAK,IAAI,GACrC,qBAAqB,CAAC;IACzB,KAAK,IAAI,IAAI,CAAC;IACd,QAAQ,EAAE,OAAO,CAAC;IAClB,UAAU,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;IAC5B,QAAQ,EAAE,OAAO,CAAC;IAClB,iBAAiB,CAAC,SAAS,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACtD;AAED,MAAM,WAAW,qBAAqB;IACpC,IAAI,CAAC,KAAK,EAAE,YAAY,GAAG,OAAO,CAAC,OAAO,EAAE,CAAC,CAAC;IAC9C,WAAW,CAAC,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAC;IACpC,gBAAgB,CAAC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,IAAI,CAAC;IACzD,QAAQ,IAAI,OAAO,CAAC,mBAAmB,CAAC,CAAC;IACzC,KAAK,IAAI,IAAI,CAAC;IACd,QAAQ,EAAE,OAAO,CAAC;CACnB;AAED,MAAM,WAAW,aAAa;IAC5B,aAAa,EAAE,KAAK,MAAM,EAAE,WAAW,KAAK,mBAAmB,CAAC;IAChE,eAAe,EAAE,KAAK,GAAG,IAAI,EAAE,OAAO,EAAE,KAAK,qBAAqB,CAAC;CACpE;AAoCD,wBAAgB,UAAU,IAAI,aAAa,CAyB1C"}
|
package/dist/binding.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"binding.js","sourceRoot":"","sources":["../src/binding.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,aAAa,EAAE,MAAM,QAAQ,CAAC;AAIvC,MAAM,OAAO,GAAG,aAAa,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;
|
|
1
|
+
{"version":3,"file":"binding.js","sourceRoot":"","sources":["../src/binding.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,aAAa,EAAE,MAAM,QAAQ,CAAC;AAIvC,MAAM,OAAO,GAAG,aAAa,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;AAgC/C,IAAI,aAAa,GAAyB,IAAI,CAAC;AAE/C,SAAS,cAAc;IACrB,IAAI,OAAO,CAAC,QAAQ,KAAK,QAAQ,EAAE,CAAC;QAClC,MAAM,IAAI,KAAK,CACb,yBAAyB,OAAO,CAAC,QAAQ,oDAAoD,CAC9F,CAAC;IACJ,CAAC;IAED,IAAI,OAAO,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;QAC7B,OAAO,iCAAiC,CAAC;IAC3C,CAAC;IAED,IAAI,OAAO,CAAC,IAAI,KAAK,KAAK,EAAE,CAAC;QAC3B,OAAO,+BAA+B,CAAC;IACzC,CAAC;IAED,MAAM,IAAI,KAAK,CACb,sCAAsC,OAAO,CAAC,IAAI,8CAA8C,CACjG,CAAC;AACJ,CAAC;AAED,SAAS,eAAe,CAAC,KAAc;IACrC,IAAI,OAAO,KAAK,KAAK,QAAQ,IAAI,KAAK,KAAK,IAAI,EAAE,CAAC;QAChD,OAAO,KAAK,CAAC;IACf,CAAC;IAED,MAAM,SAAS,GAAG,KAAgC,CAAC;IACnD,OAAO,CACL,OAAO,SAAS,CAAC,aAAa,KAAK,UAAU;QAC7C,OAAO,SAAS,CAAC,eAAe,KAAK,UAAU,CAChD,CAAC;AACJ,CAAC;AAED,MAAM,UAAU,UAAU;IACxB,IAAI,aAAa,KAAK,IAAI,EAAE,CAAC;QAC3B,OAAO,aAAa,CAAC;IACvB,CAAC;IAED,MAAM,WAAW,GAAG,cAAc,EAAE,CAAC;IAErC,IAAI,MAAe,CAAC;IACpB,IAAI,CAAC;QACH,MAAM,GAAG,OAAO,CAAC,WAAW,CAAC,CAAC;IAChC,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,MAAM,OAAO,GAAG,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QACvE,MAAM,IAAI,KAAK,CACb,iCAAiC,WAAW,gEAAgE,OAAO,EAAE,CACtH,CAAC;IACJ,CAAC;IAED,IAAI,CAAC,eAAe,CAAC,MAAM,CAAC,EAAE,CAAC;QAC7B,MAAM,IAAI,KAAK,CACb,sCAAsC,WAAW,6DAA6D,CAC/G,CAAC;IACJ,CAAC;IAED,aAAa,GAAG,MAAM,CAAC;IACvB,OAAO,aAAa,CAAC;AACvB,CAAC"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "pyannote-cpp-node",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.5.0",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"main": "./dist/index.js",
|
|
6
6
|
"types": "./dist/index.d.ts",
|
|
@@ -17,8 +17,8 @@
|
|
|
17
17
|
"access": "public"
|
|
18
18
|
},
|
|
19
19
|
"optionalDependencies": {
|
|
20
|
-
"@pyannote-cpp-node/darwin-arm64": "0.
|
|
21
|
-
"@pyannote-cpp-node/darwin-x64": "0.
|
|
20
|
+
"@pyannote-cpp-node/darwin-arm64": "0.5.0",
|
|
21
|
+
"@pyannote-cpp-node/darwin-x64": "0.5.0"
|
|
22
22
|
},
|
|
23
23
|
"devDependencies": {
|
|
24
24
|
"typescript": "^5.7.0"
|