electron-native-speech 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/backend-loader.d.ts +11 -0
- package/dist/backend-loader.d.ts.map +1 -0
- package/dist/backend-loader.js +40 -0
- package/dist/backend-loader.js.map +1 -0
- package/dist/errors.d.ts +10 -0
- package/dist/errors.d.ts.map +1 -0
- package/dist/errors.js +24 -0
- package/dist/errors.js.map +1 -0
- package/dist/index.d.ts +46 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +70 -0
- package/dist/index.js.map +1 -0
- package/dist/types.d.ts +78 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +4 -0
- package/dist/types.js.map +1 -0
- package/package.json +43 -0
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { ISpeechBackend } from "./types";
|
|
2
|
+
/**
|
|
3
|
+
* Loads and returns the platform backend singleton.
|
|
4
|
+
* The backend module is resolved at runtime so that the core package
|
|
5
|
+
* does not hard-code a dependency on any platform-specific code.
|
|
6
|
+
*/
|
|
7
|
+
export declare function getBackend(): ISpeechBackend;
|
|
8
|
+
/** Replace the backend — useful for testing */
|
|
9
|
+
export declare function setBackend(backend: ISpeechBackend): void;
|
|
10
|
+
export declare function resetBackend(): void;
|
|
11
|
+
//# sourceMappingURL=backend-loader.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"backend-loader.d.ts","sourceRoot":"","sources":["../src/backend-loader.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,SAAS,CAAA;AAK7C;;;;GAIG;AACH,wBAAgB,UAAU,IAAI,cAAc,CAuB3C;AAED,+CAA+C;AAC/C,wBAAgB,UAAU,CAAC,OAAO,EAAE,cAAc,GAAG,IAAI,CAExD;AAED,wBAAgB,YAAY,IAAI,IAAI,CAKnC"}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.getBackend = getBackend;
|
|
4
|
+
exports.setBackend = setBackend;
|
|
5
|
+
exports.resetBackend = resetBackend;
|
|
6
|
+
const errors_1 = require("./errors");
|
|
7
|
+
let _backend = null;
|
|
8
|
+
/**
|
|
9
|
+
* Loads and returns the platform backend singleton.
|
|
10
|
+
* The backend module is resolved at runtime so that the core package
|
|
11
|
+
* does not hard-code a dependency on any platform-specific code.
|
|
12
|
+
*/
|
|
13
|
+
function getBackend() {
|
|
14
|
+
if (_backend)
|
|
15
|
+
return _backend;
|
|
16
|
+
const platform = process.platform;
|
|
17
|
+
if (platform === "darwin") {
|
|
18
|
+
try {
|
|
19
|
+
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
|
20
|
+
const mod = require("@electron-native-speech/backend-macos");
|
|
21
|
+
_backend = new mod.MacOSSpeechBackend();
|
|
22
|
+
return _backend;
|
|
23
|
+
}
|
|
24
|
+
catch {
|
|
25
|
+
(0, errors_1.throwSpeechError)("unavailable", "macOS speech backend not found. Run: npm install @electron-native-speech/backend-macos");
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
(0, errors_1.throwSpeechError)("unavailable", `Platform "${platform}" is not yet supported. electron-native-speech currently supports macOS.`);
|
|
29
|
+
}
|
|
30
|
+
/** Replace the backend — useful for testing */
|
|
31
|
+
function setBackend(backend) {
|
|
32
|
+
_backend = backend;
|
|
33
|
+
}
|
|
34
|
+
function resetBackend() {
|
|
35
|
+
if (_backend) {
|
|
36
|
+
_backend.dispose().catch(() => { });
|
|
37
|
+
}
|
|
38
|
+
_backend = null;
|
|
39
|
+
}
|
|
40
|
+
//# sourceMappingURL=backend-loader.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"backend-loader.js","sourceRoot":"","sources":["../src/backend-loader.ts"],"names":[],"mappings":";;AAUA,gCAuBC;AAGD,gCAEC;AAED,oCAKC;AA5CD,qCAA2C;AAE3C,IAAI,QAAQ,GAA0B,IAAI,CAAA;AAE1C;;;;GAIG;AACH,SAAgB,UAAU;IACxB,IAAI,QAAQ;QAAE,OAAO,QAAQ,CAAA;IAE7B,MAAM,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAA;IAEjC,IAAI,QAAQ,KAAK,QAAQ,EAAE,CAAC;QAC1B,IAAI,CAAC;YACH,iEAAiE;YACjE,MAAM,GAAG,GAAG,OAAO,CAAC,uCAAuC,CAAC,CAAA;YAC5D,QAAQ,GAAG,IAAI,GAAG,CAAC,kBAAkB,EAAoB,CAAA;YACzD,OAAO,QAAQ,CAAA;QACjB,CAAC;QAAC,MAAM,CAAC;YACP,IAAA,yBAAgB,EACd,aAAa,EACb,wFAAwF,CACzF,CAAA;QACH,CAAC;IACH,CAAC;IAED,IAAA,yBAAgB,EACd,aAAa,EACb,aAAa,QAAQ,0EAA0E,CAChG,CAAA;AACH,CAAC;AAED,+CAA+C;AAC/C,SAAgB,UAAU,CAAC,OAAuB;IAChD,QAAQ,GAAG,OAAO,CAAA;AACpB,CAAC;AAED,SAAgB,YAAY;IAC1B,IAAI,QAAQ,EAAE,CAAC;QACb,QAAQ,CAAC,OAAO,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE,GAAE,CAAC,CAAC,CAAA;IACpC,CAAC;IACD,QAAQ,GAAG,IAAI,CAAA;AACjB,CAAC"}
|
package/dist/errors.d.ts
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { SpeechError, SpeechErrorCode } from "./types";
|
|
2
|
+
export declare class SpeechRecognitionError extends Error {
|
|
3
|
+
readonly code: SpeechErrorCode;
|
|
4
|
+
readonly details?: unknown;
|
|
5
|
+
constructor(error: SpeechError);
|
|
6
|
+
toSpeechError(): SpeechError;
|
|
7
|
+
}
|
|
8
|
+
export declare function makeSpeechError(code: SpeechErrorCode, message: string, details?: unknown): SpeechError;
|
|
9
|
+
export declare function throwSpeechError(code: SpeechErrorCode, message: string, details?: unknown): never;
|
|
10
|
+
//# sourceMappingURL=errors.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"errors.d.ts","sourceRoot":"","sources":["../src/errors.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,WAAW,EAAE,eAAe,EAAE,MAAM,SAAS,CAAA;AAE3D,qBAAa,sBAAuB,SAAQ,KAAK;IAC/C,QAAQ,CAAC,IAAI,EAAE,eAAe,CAAA;IAC9B,QAAQ,CAAC,OAAO,CAAC,EAAE,OAAO,CAAA;gBAEd,KAAK,EAAE,WAAW;IAO9B,aAAa,IAAI,WAAW;CAG7B;AAED,wBAAgB,eAAe,CAC7B,IAAI,EAAE,eAAe,EACrB,OAAO,EAAE,MAAM,EACf,OAAO,CAAC,EAAE,OAAO,GAChB,WAAW,CAEb;AAED,wBAAgB,gBAAgB,CAC9B,IAAI,EAAE,eAAe,EACrB,OAAO,EAAE,MAAM,EACf,OAAO,CAAC,EAAE,OAAO,GAChB,KAAK,CAEP"}
|
package/dist/errors.js
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.SpeechRecognitionError = void 0;
|
|
4
|
+
exports.makeSpeechError = makeSpeechError;
|
|
5
|
+
exports.throwSpeechError = throwSpeechError;
|
|
6
|
+
class SpeechRecognitionError extends Error {
|
|
7
|
+
constructor(error) {
|
|
8
|
+
super(error.message);
|
|
9
|
+
this.name = "SpeechRecognitionError";
|
|
10
|
+
this.code = error.code;
|
|
11
|
+
this.details = error.details;
|
|
12
|
+
}
|
|
13
|
+
toSpeechError() {
|
|
14
|
+
return { code: this.code, message: this.message, details: this.details };
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
exports.SpeechRecognitionError = SpeechRecognitionError;
|
|
18
|
+
function makeSpeechError(code, message, details) {
|
|
19
|
+
return { code, message, details };
|
|
20
|
+
}
|
|
21
|
+
function throwSpeechError(code, message, details) {
|
|
22
|
+
throw new SpeechRecognitionError({ code, message, details });
|
|
23
|
+
}
|
|
24
|
+
//# sourceMappingURL=errors.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"errors.js","sourceRoot":"","sources":["../src/errors.ts"],"names":[],"mappings":";;;AAkBA,0CAMC;AAED,4CAMC;AA9BD,MAAa,sBAAuB,SAAQ,KAAK;IAI/C,YAAY,KAAkB;QAC5B,KAAK,CAAC,KAAK,CAAC,OAAO,CAAC,CAAA;QACpB,IAAI,CAAC,IAAI,GAAG,wBAAwB,CAAA;QACpC,IAAI,CAAC,IAAI,GAAG,KAAK,CAAC,IAAI,CAAA;QACtB,IAAI,CAAC,OAAO,GAAG,KAAK,CAAC,OAAO,CAAA;IAC9B,CAAC;IAED,aAAa;QACX,OAAO,EAAE,IAAI,EAAE,IAAI,CAAC,IAAI,EAAE,OAAO,EAAE,IAAI,CAAC,OAAO,EAAE,OAAO,EAAE,IAAI,CAAC,OAAO,EAAE,CAAA;IAC1E,CAAC;CACF;AAdD,wDAcC;AAED,SAAgB,eAAe,CAC7B,IAAqB,EACrB,OAAe,EACf,OAAiB;IAEjB,OAAO,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,CAAA;AACnC,CAAC;AAED,SAAgB,gBAAgB,CAC9B,IAAqB,EACrB,OAAe,EACf,OAAiB;IAEjB,MAAM,IAAI,sBAAsB,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,OAAO,EAAE,CAAC,CAAA;AAC9D,CAAC"}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
export type { TranscriptSegment, FileTranscriptionOptions, FileTranscriptionResult, SpeechSessionStartOptions, LiveSpeechResult, SpeechSessionState, SpeechSession, SpeechAvailability, SpeechError, SpeechErrorCode, ISpeechBackend, } from "./types";
|
|
2
|
+
export { SpeechRecognitionError, makeSpeechError } from "./errors";
|
|
3
|
+
export { setBackend, resetBackend } from "./backend-loader";
|
|
4
|
+
import type { FileTranscriptionOptions, FileTranscriptionResult, SpeechAvailability, SpeechSession } from "./types";
|
|
5
|
+
/**
|
|
6
|
+
* Check whether speech recognition is available in the current environment.
|
|
7
|
+
*
|
|
8
|
+
* Always call this before transcribing. It validates platform support,
|
|
9
|
+
* backend presence, and permission pre-conditions.
|
|
10
|
+
*
|
|
11
|
+
* @example
|
|
12
|
+
* const av = await getSpeechAvailability()
|
|
13
|
+
* if (!av.available) { console.error(av.reason); return }
|
|
14
|
+
*/
|
|
15
|
+
export declare function getSpeechAvailability(): Promise<SpeechAvailability>;
|
|
16
|
+
/**
|
|
17
|
+
* Transcribe a local audio or video file using the native OS speech engine.
|
|
18
|
+
*
|
|
19
|
+
* - Runs entirely on-device — no network requests.
|
|
20
|
+
* - Accepts any format AVFoundation can read (m4a, mp3, wav, mp4, mov, webm…).
|
|
21
|
+
* - Files in unsupported containers are automatically converted to a
|
|
22
|
+
* temporary intermediate before recognition.
|
|
23
|
+
*
|
|
24
|
+
* @example
|
|
25
|
+
* const result = await transcribeFile({ filePath: "/path/to/recording.m4a" })
|
|
26
|
+
* for (const seg of result.segments) {
|
|
27
|
+
* console.log(`[${seg.startMs}ms] ${seg.text}`)
|
|
28
|
+
* }
|
|
29
|
+
*/
|
|
30
|
+
export declare function transcribeFile(options: FileTranscriptionOptions): Promise<FileTranscriptionResult>;
|
|
31
|
+
/**
|
|
32
|
+
* Create a live microphone transcription session.
|
|
33
|
+
*
|
|
34
|
+
* The session object provides fine-grained control over the recognition
|
|
35
|
+
* lifecycle (start / stop / abort / dispose) and emits typed events.
|
|
36
|
+
*
|
|
37
|
+
* @example
|
|
38
|
+
* const session = await createSpeechSession()
|
|
39
|
+
* session.on("result", r => console.log(r.text))
|
|
40
|
+
* await session.start({ locale: "en-US", interimResults: true })
|
|
41
|
+
* // later…
|
|
42
|
+
* await session.stop()
|
|
43
|
+
* await session.dispose()
|
|
44
|
+
*/
|
|
45
|
+
export declare function createSpeechSession(): Promise<SpeechSession>;
|
|
46
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,YAAY,EACV,iBAAiB,EACjB,wBAAwB,EACxB,uBAAuB,EACvB,yBAAyB,EACzB,gBAAgB,EAChB,kBAAkB,EAClB,aAAa,EACb,kBAAkB,EAClB,WAAW,EACX,eAAe,EACf,cAAc,GACf,MAAM,SAAS,CAAA;AAEhB,OAAO,EAAE,sBAAsB,EAAE,eAAe,EAAE,MAAM,UAAU,CAAA;AAClE,OAAO,EAAE,UAAU,EAAE,YAAY,EAAE,MAAM,kBAAkB,CAAA;AAG3D,OAAO,KAAK,EAAE,wBAAwB,EAAE,uBAAuB,EAAE,kBAAkB,EAAE,aAAa,EAAE,MAAM,SAAS,CAAA;AAEnH;;;;;;;;;GASG;AACH,wBAAsB,qBAAqB,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAUzE;AAED;;;;;;;;;;;;;GAaG;AACH,wBAAsB,cAAc,CAClC,OAAO,EAAE,wBAAwB,GAChC,OAAO,CAAC,uBAAuB,CAAC,CAElC;AAED;;;;;;;;;;;;;GAaG;AACH,wBAAsB,mBAAmB,IAAI,OAAO,CAAC,aAAa,CAAC,CAElE"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.resetBackend = exports.setBackend = exports.makeSpeechError = exports.SpeechRecognitionError = void 0;
|
|
4
|
+
exports.getSpeechAvailability = getSpeechAvailability;
|
|
5
|
+
exports.transcribeFile = transcribeFile;
|
|
6
|
+
exports.createSpeechSession = createSpeechSession;
|
|
7
|
+
var errors_1 = require("./errors");
|
|
8
|
+
Object.defineProperty(exports, "SpeechRecognitionError", { enumerable: true, get: function () { return errors_1.SpeechRecognitionError; } });
|
|
9
|
+
Object.defineProperty(exports, "makeSpeechError", { enumerable: true, get: function () { return errors_1.makeSpeechError; } });
|
|
10
|
+
var backend_loader_1 = require("./backend-loader");
|
|
11
|
+
Object.defineProperty(exports, "setBackend", { enumerable: true, get: function () { return backend_loader_1.setBackend; } });
|
|
12
|
+
Object.defineProperty(exports, "resetBackend", { enumerable: true, get: function () { return backend_loader_1.resetBackend; } });
|
|
13
|
+
const backend_loader_2 = require("./backend-loader");
|
|
14
|
+
/**
|
|
15
|
+
* Check whether speech recognition is available in the current environment.
|
|
16
|
+
*
|
|
17
|
+
* Always call this before transcribing. It validates platform support,
|
|
18
|
+
* backend presence, and permission pre-conditions.
|
|
19
|
+
*
|
|
20
|
+
* @example
|
|
21
|
+
* const av = await getSpeechAvailability()
|
|
22
|
+
* if (!av.available) { console.error(av.reason); return }
|
|
23
|
+
*/
|
|
24
|
+
async function getSpeechAvailability() {
|
|
25
|
+
try {
|
|
26
|
+
return await (0, backend_loader_2.getBackend)().checkAvailability();
|
|
27
|
+
}
|
|
28
|
+
catch (err) {
|
|
29
|
+
return {
|
|
30
|
+
available: false,
|
|
31
|
+
platform: process.platform,
|
|
32
|
+
reason: err instanceof Error ? err.message : String(err),
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Transcribe a local audio or video file using the native OS speech engine.
|
|
38
|
+
*
|
|
39
|
+
* - Runs entirely on-device — no network requests.
|
|
40
|
+
* - Accepts any format AVFoundation can read (m4a, mp3, wav, mp4, mov, webm…).
|
|
41
|
+
* - Files in unsupported containers are automatically converted to a
|
|
42
|
+
* temporary intermediate before recognition.
|
|
43
|
+
*
|
|
44
|
+
* @example
|
|
45
|
+
* const result = await transcribeFile({ filePath: "/path/to/recording.m4a" })
|
|
46
|
+
* for (const seg of result.segments) {
|
|
47
|
+
* console.log(`[${seg.startMs}ms] ${seg.text}`)
|
|
48
|
+
* }
|
|
49
|
+
*/
|
|
50
|
+
async function transcribeFile(options) {
|
|
51
|
+
return (0, backend_loader_2.getBackend)().transcribeFile(options);
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Create a live microphone transcription session.
|
|
55
|
+
*
|
|
56
|
+
* The session object provides fine-grained control over the recognition
|
|
57
|
+
* lifecycle (start / stop / abort / dispose) and emits typed events.
|
|
58
|
+
*
|
|
59
|
+
* @example
|
|
60
|
+
* const session = await createSpeechSession()
|
|
61
|
+
* session.on("result", r => console.log(r.text))
|
|
62
|
+
* await session.start({ locale: "en-US", interimResults: true })
|
|
63
|
+
* // later…
|
|
64
|
+
* await session.stop()
|
|
65
|
+
* await session.dispose()
|
|
66
|
+
*/
|
|
67
|
+
async function createSpeechSession() {
|
|
68
|
+
return (0, backend_loader_2.getBackend)().createSession();
|
|
69
|
+
}
|
|
70
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;AA8BA,sDAUC;AAgBD,wCAIC;AAgBD,kDAEC;AAhED,mCAAkE;AAAzD,gHAAA,sBAAsB,OAAA;AAAE,yGAAA,eAAe,OAAA;AAChD,mDAA2D;AAAlD,4GAAA,UAAU,OAAA;AAAE,8GAAA,YAAY,OAAA;AAEjC,qDAA6C;AAG7C;;;;;;;;;GASG;AACI,KAAK,UAAU,qBAAqB;IACzC,IAAI,CAAC;QACH,OAAO,MAAM,IAAA,2BAAU,GAAE,CAAC,iBAAiB,EAAE,CAAA;IAC/C,CAAC;IAAC,OAAO,GAAY,EAAE,CAAC;QACtB,OAAO;YACL,SAAS,EAAE,KAAK;YAChB,QAAQ,EAAE,OAAO,CAAC,QAAQ;YAC1B,MAAM,EAAE,GAAG,YAAY,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;SACzD,CAAA;IACH,CAAC;AACH,CAAC;AAED;;;;;;;;;;;;;GAaG;AACI,KAAK,UAAU,cAAc,CAClC,OAAiC;IAEjC,OAAO,IAAA,2BAAU,GAAE,CAAC,cAAc,CAAC,OAAO,CAAC,CAAA;AAC7C,CAAC;AAED;;;;;;;;;;;;;GAaG;AACI,KAAK,UAAU,mBAAmB;IACvC,OAAO,IAAA,2BAAU,GAAE,CAAC,aAAa,EAAE,CAAA;AACrC,CAAC"}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
export type TranscriptSegment = {
|
|
2
|
+
/** Unique segment identifier within the result */
|
|
3
|
+
id: string;
|
|
4
|
+
/** Start time in milliseconds from the beginning of the audio */
|
|
5
|
+
startMs: number;
|
|
6
|
+
/** End time in milliseconds from the beginning of the audio */
|
|
7
|
+
endMs: number;
|
|
8
|
+
/** Transcribed text for this segment */
|
|
9
|
+
text: string;
|
|
10
|
+
/** Recognition confidence 0–1, if available from the platform */
|
|
11
|
+
confidence?: number;
|
|
12
|
+
};
|
|
13
|
+
export type FileTranscriptionOptions = {
|
|
14
|
+
/** Absolute path to the audio or video file */
|
|
15
|
+
filePath: string;
|
|
16
|
+
/**
|
|
17
|
+
* BCP-47 locale string, e.g. "en-US", "fr-FR".
|
|
18
|
+
* Defaults to device locale when omitted.
|
|
19
|
+
*/
|
|
20
|
+
locale?: string;
|
|
21
|
+
};
|
|
22
|
+
export type FileTranscriptionResult = {
|
|
23
|
+
segments: TranscriptSegment[];
|
|
24
|
+
/** Total audio duration in milliseconds, if determinable */
|
|
25
|
+
durationMs?: number;
|
|
26
|
+
/** Locale that was used for recognition */
|
|
27
|
+
locale?: string;
|
|
28
|
+
};
|
|
29
|
+
export type SpeechSessionStartOptions = {
|
|
30
|
+
/** BCP-47 locale string. Defaults to device locale. */
|
|
31
|
+
locale?: string;
|
|
32
|
+
/** Emit interim (partial) results in addition to final results */
|
|
33
|
+
interimResults?: boolean;
|
|
34
|
+
/** Keep recognizing after each phrase ends */
|
|
35
|
+
continuous?: boolean;
|
|
36
|
+
};
|
|
37
|
+
export type LiveSpeechResult = {
|
|
38
|
+
text: string;
|
|
39
|
+
isFinal: boolean;
|
|
40
|
+
confidence?: number;
|
|
41
|
+
/** Timestamp of the result in milliseconds since session start */
|
|
42
|
+
timestampMs?: number;
|
|
43
|
+
};
|
|
44
|
+
export type SpeechSessionState = "idle" | "starting" | "listening" | "stopping" | "stopped" | "error";
|
|
45
|
+
export interface SpeechSession {
|
|
46
|
+
start(options?: SpeechSessionStartOptions): Promise<void>;
|
|
47
|
+
stop(): Promise<void>;
|
|
48
|
+
abort(): Promise<void>;
|
|
49
|
+
dispose(): Promise<void>;
|
|
50
|
+
on(event: "result", listener: (result: LiveSpeechResult) => void): () => void;
|
|
51
|
+
on(event: "error", listener: (error: SpeechError) => void): () => void;
|
|
52
|
+
on(event: "state", listener: (state: SpeechSessionState) => void): () => void;
|
|
53
|
+
}
|
|
54
|
+
export type SpeechAvailability = {
|
|
55
|
+
available: boolean;
|
|
56
|
+
platform: string;
|
|
57
|
+
mode?: "file" | "live" | "both";
|
|
58
|
+
/** Human-readable explanation when available is false */
|
|
59
|
+
reason?: string;
|
|
60
|
+
details?: unknown;
|
|
61
|
+
};
|
|
62
|
+
export type SpeechErrorCode = "unavailable" | "permission-denied" | "unsupported-locale" | "unsupported-input" | "missing-audio-track" | "no-speech-detected" | "backend-failure" | "invalid-state" | "unknown";
|
|
63
|
+
export type SpeechError = {
|
|
64
|
+
code: SpeechErrorCode;
|
|
65
|
+
message: string;
|
|
66
|
+
details?: unknown;
|
|
67
|
+
};
|
|
68
|
+
/**
|
|
69
|
+
* Internal interface that platform backends must implement.
|
|
70
|
+
* Not part of the public SDK surface.
|
|
71
|
+
*/
|
|
72
|
+
export interface ISpeechBackend {
|
|
73
|
+
checkAvailability(): Promise<SpeechAvailability>;
|
|
74
|
+
transcribeFile(options: FileTranscriptionOptions): Promise<FileTranscriptionResult>;
|
|
75
|
+
createSession(): Promise<SpeechSession>;
|
|
76
|
+
dispose(): Promise<void>;
|
|
77
|
+
}
|
|
78
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAEA,MAAM,MAAM,iBAAiB,GAAG;IAC9B,kDAAkD;IAClD,EAAE,EAAE,MAAM,CAAA;IACV,iEAAiE;IACjE,OAAO,EAAE,MAAM,CAAA;IACf,+DAA+D;IAC/D,KAAK,EAAE,MAAM,CAAA;IACb,wCAAwC;IACxC,IAAI,EAAE,MAAM,CAAA;IACZ,iEAAiE;IACjE,UAAU,CAAC,EAAE,MAAM,CAAA;CACpB,CAAA;AAID,MAAM,MAAM,wBAAwB,GAAG;IACrC,+CAA+C;IAC/C,QAAQ,EAAE,MAAM,CAAA;IAChB;;;OAGG;IACH,MAAM,CAAC,EAAE,MAAM,CAAA;CAChB,CAAA;AAED,MAAM,MAAM,uBAAuB,GAAG;IACpC,QAAQ,EAAE,iBAAiB,EAAE,CAAA;IAC7B,4DAA4D;IAC5D,UAAU,CAAC,EAAE,MAAM,CAAA;IACnB,2CAA2C;IAC3C,MAAM,CAAC,EAAE,MAAM,CAAA;CAChB,CAAA;AAID,MAAM,MAAM,yBAAyB,GAAG;IACtC,uDAAuD;IACvD,MAAM,CAAC,EAAE,MAAM,CAAA;IACf,kEAAkE;IAClE,cAAc,CAAC,EAAE,OAAO,CAAA;IACxB,8CAA8C;IAC9C,UAAU,CAAC,EAAE,OAAO,CAAA;CACrB,CAAA;AAED,MAAM,MAAM,gBAAgB,GAAG;IAC7B,IAAI,EAAE,MAAM,CAAA;IACZ,OAAO,EAAE,OAAO,CAAA;IAChB,UAAU,CAAC,EAAE,MAAM,CAAA;IACnB,kEAAkE;IAClE,WAAW,CAAC,EAAE,MAAM,CAAA;CACrB,CAAA;AAED,MAAM,MAAM,kBAAkB,GAC1B,MAAM,GACN,UAAU,GACV,WAAW,GACX,UAAU,GACV,SAAS,GACT,OAAO,CAAA;AAEX,MAAM,WAAW,aAAa;IAC5B,KAAK,CAAC,OAAO,CAAC,EAAE,yBAAyB,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IACzD,IAAI,IAAI,OAAO,CAAC,IAAI,CAAC,CAAA;IACrB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC,CAAA;IACtB,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC,CAAA;IAExB,EAAE,CAAC,KAAK,EAAE,QAAQ,EAAE,QAAQ,EAAE,CAAC,MAAM,EAAE,gBAAgB,KAAK,IAAI,GAAG,MAAM,IAAI,CAAA;IAC7E,EAAE,CAAC,KAAK,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,WAAW,KAAK,IAAI,GAAG,MAAM,IAAI,CAAA;IACtE,EAAE,CAAC,KAAK,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,kBAAkB,KAAK,IAAI,GAAG,MAAM,IAAI,CAAA;CAC9E;AAID,MAAM,MAAM,kBAAkB,GAAG;IAC/B,SAAS,EAAE,OAAO,CAAA;IAClB,QAAQ,EAAE,MAAM,CAAA;IAChB,IAAI,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,CAAA;IAC/B,yDAAyD;IACzD,MAAM,CAAC,EAAE,MAAM,CAAA;IACf,OAAO,CAAC,EAAE,OAAO,CAAA;CAClB,CAAA;AAID,MAAM,MAAM,eAAe,GACvB,aAAa,GACb,mBAAmB,GACnB,oBAAoB,GACpB,mBAAmB,GACnB,qBAAqB,GACrB,oBAAoB,GACpB,iBAAiB,GACjB,eAAe,GACf,SAAS,CAAA;AAEb,MAAM,MAAM,WAAW,GAAG;IACxB,IAAI,EAAE,eAAe,CAAA;IACrB,OAAO,EAAE,MAAM,CAAA;IACf,OAAO,CAAC,EAAE,OAAO,CAAA;CAClB,CAAA;AAID;;;GAGG;AACH,MAAM,WAAW,cAAc;IAC7B,iBAAiB,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAAA;IAChD,cAAc,CAAC,OAAO,EAAE,wBAAwB,GAAG,OAAO,CAAC,uBAAuB,CAAC,CAAA;IACnF,aAAa,IAAI,OAAO,CAAC,aAAa,CAAC,CAAA;IACvC,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC,CAAA;CACzB"}
|
package/dist/types.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":";AAAA,iFAAiF"}
|
package/package.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "electron-native-speech",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Native OS speech transcription for Electron apps — fast, local, no cloud required",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"types": "dist/index.d.ts",
|
|
7
|
+
"exports": {
|
|
8
|
+
".": {
|
|
9
|
+
"require": "./dist/index.js",
|
|
10
|
+
"types": "./dist/index.d.ts"
|
|
11
|
+
}
|
|
12
|
+
},
|
|
13
|
+
"files": [
|
|
14
|
+
"dist/"
|
|
15
|
+
],
|
|
16
|
+
"scripts": {
|
|
17
|
+
"build": "tsc -p tsconfig.json",
|
|
18
|
+
"typecheck": "tsc -p tsconfig.json --noEmit",
|
|
19
|
+
"clean": "rm -rf dist"
|
|
20
|
+
},
|
|
21
|
+
"keywords": [
|
|
22
|
+
"electron",
|
|
23
|
+
"speech",
|
|
24
|
+
"transcription",
|
|
25
|
+
"macos",
|
|
26
|
+
"native",
|
|
27
|
+
"speech-recognition",
|
|
28
|
+
"offline",
|
|
29
|
+
"local"
|
|
30
|
+
],
|
|
31
|
+
"license": "MIT",
|
|
32
|
+
"devDependencies": {
|
|
33
|
+
"typescript": "^5.4.0"
|
|
34
|
+
},
|
|
35
|
+
"peerDependencies": {
|
|
36
|
+
"electron": ">=28.0.0"
|
|
37
|
+
},
|
|
38
|
+
"peerDependenciesMeta": {
|
|
39
|
+
"electron": {
|
|
40
|
+
"optional": true
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
}
|