@openvole/paw-stt 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +45 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +144 -0
- package/dist/index.js.map +1 -0
- package/package.json +29 -0
- package/vole-paw.json +18 -0
package/README.md
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# @openvole/paw-stt
|
|
2
|
+
|
|
3
|
+
[](https://www.npmjs.com/package/@openvole/paw-stt)
|
|
4
|
+
|
|
5
|
+
Speech-to-text tool Paw for OpenVole. Transcribe audio files using the OpenAI Whisper API.
|
|
6
|
+
|
|
7
|
+
## Install
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
npm install @openvole/paw-stt
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Configuration
|
|
14
|
+
|
|
15
|
+
Add to your Vole config:
|
|
16
|
+
|
|
17
|
+
```json
|
|
18
|
+
{
|
|
19
|
+
"paws": ["@openvole/paw-stt"]
|
|
20
|
+
}
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Environment Variables
|
|
24
|
+
|
|
25
|
+
| Variable | Required | Default | Description |
|
|
26
|
+
|---|---|---|---|
|
|
27
|
+
| `OPENAI_API_KEY` | Yes | — | OpenAI API key for Whisper access |
|
|
28
|
+
| `OPENAI_STT_MODEL` | No | `whisper-1` | Whisper model to use |
|
|
29
|
+
|
|
30
|
+
## Tool
|
|
31
|
+
|
|
32
|
+
### `stt_transcribe`
|
|
33
|
+
|
|
34
|
+
Transcribe an audio file to text using OpenAI Whisper.
|
|
35
|
+
|
|
36
|
+
**Parameters:**
|
|
37
|
+
|
|
38
|
+
- `file_path` (string, required) — Absolute path to the audio file.
|
|
39
|
+
- `language` (string, optional) — ISO-639-1 language code (e.g. `"en"`, `"fr"`). Whisper auto-detects if omitted.
|
|
40
|
+
|
|
41
|
+
**Supported formats:** mp3, wav, m4a, webm, mp4, mpeg, mpga, oga, ogg, flac.
|
|
42
|
+
|
|
43
|
+
## License
|
|
44
|
+
|
|
45
|
+
MIT
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
// src/index.ts
|
|
2
|
+
import { definePaw } from "@openvole/paw-sdk";
|
|
3
|
+
|
|
4
|
+
// src/paw.ts
|
|
5
|
+
import { z } from "@openvole/paw-sdk";
|
|
6
|
+
|
|
7
|
+
// src/whisper.ts
|
|
8
|
+
import { readFile, access } from "fs/promises";
|
|
9
|
+
import { basename, extname } from "path";
|
|
10
|
+
var SUPPORTED_EXTENSIONS = /* @__PURE__ */ new Set([
|
|
11
|
+
".mp3",
|
|
12
|
+
".wav",
|
|
13
|
+
".m4a",
|
|
14
|
+
".webm",
|
|
15
|
+
".mp4",
|
|
16
|
+
".mpeg",
|
|
17
|
+
".mpga",
|
|
18
|
+
".oga",
|
|
19
|
+
".ogg",
|
|
20
|
+
".flac"
|
|
21
|
+
]);
|
|
22
|
+
var MIME_TYPES = {
|
|
23
|
+
".mp3": "audio/mpeg",
|
|
24
|
+
".wav": "audio/wav",
|
|
25
|
+
".m4a": "audio/mp4",
|
|
26
|
+
".webm": "audio/webm",
|
|
27
|
+
".mp4": "audio/mp4",
|
|
28
|
+
".mpeg": "audio/mpeg",
|
|
29
|
+
".mpga": "audio/mpeg",
|
|
30
|
+
".oga": "audio/ogg",
|
|
31
|
+
".ogg": "audio/ogg",
|
|
32
|
+
".flac": "audio/flac"
|
|
33
|
+
};
|
|
34
|
+
var WhisperClient = class {
|
|
35
|
+
apiKey;
|
|
36
|
+
model;
|
|
37
|
+
constructor(apiKey, model = "whisper-1") {
|
|
38
|
+
this.apiKey = apiKey;
|
|
39
|
+
this.model = model;
|
|
40
|
+
}
|
|
41
|
+
async transcribe(filePath, language) {
|
|
42
|
+
try {
|
|
43
|
+
await access(filePath);
|
|
44
|
+
} catch {
|
|
45
|
+
throw new Error(`File not found: ${filePath}`);
|
|
46
|
+
}
|
|
47
|
+
const ext = extname(filePath).toLowerCase();
|
|
48
|
+
if (!SUPPORTED_EXTENSIONS.has(ext)) {
|
|
49
|
+
throw new Error(
|
|
50
|
+
`Unsupported audio format "${ext}". Supported formats: ${[...SUPPORTED_EXTENSIONS].map((e) => e.slice(1)).join(", ")}`
|
|
51
|
+
);
|
|
52
|
+
}
|
|
53
|
+
const fileBuffer = await readFile(filePath);
|
|
54
|
+
const mimeType = MIME_TYPES[ext] ?? "application/octet-stream";
|
|
55
|
+
const fileName = basename(filePath);
|
|
56
|
+
const formData = new FormData();
|
|
57
|
+
formData.append("file", new Blob([fileBuffer], { type: mimeType }), fileName);
|
|
58
|
+
formData.append("model", this.model);
|
|
59
|
+
if (language) {
|
|
60
|
+
formData.append("language", language);
|
|
61
|
+
}
|
|
62
|
+
const response = await fetch(
|
|
63
|
+
"https://api.openai.com/v1/audio/transcriptions",
|
|
64
|
+
{
|
|
65
|
+
method: "POST",
|
|
66
|
+
headers: {
|
|
67
|
+
Authorization: `Bearer ${this.apiKey}`
|
|
68
|
+
},
|
|
69
|
+
body: formData
|
|
70
|
+
}
|
|
71
|
+
);
|
|
72
|
+
if (!response.ok) {
|
|
73
|
+
const body = await response.text();
|
|
74
|
+
throw new Error(
|
|
75
|
+
`Whisper API error (${response.status}): ${body}`
|
|
76
|
+
);
|
|
77
|
+
}
|
|
78
|
+
const json = await response.json();
|
|
79
|
+
return json.text;
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
|
|
83
|
+
// src/paw.ts
|
|
84
|
+
var whisper;
|
|
85
|
+
function getWhisper() {
|
|
86
|
+
if (!whisper) {
|
|
87
|
+
throw new Error("WhisperClient not initialized \u2014 onLoad has not been called");
|
|
88
|
+
}
|
|
89
|
+
return whisper;
|
|
90
|
+
}
|
|
91
|
+
var paw = {
|
|
92
|
+
name: "@openvole/paw-stt",
|
|
93
|
+
version: "0.1.0",
|
|
94
|
+
description: "Speech-to-text tool using OpenAI Whisper",
|
|
95
|
+
brain: false,
|
|
96
|
+
tools: [
|
|
97
|
+
{
|
|
98
|
+
name: "stt_transcribe",
|
|
99
|
+
description: "Transcribe an audio file to text using OpenAI Whisper",
|
|
100
|
+
parameters: z.object({
|
|
101
|
+
file_path: z.string().describe("Absolute path to the audio file to transcribe"),
|
|
102
|
+
language: z.string().optional().describe('ISO-639-1 language code (e.g. "en", "fr", "de"). Optional \u2014 Whisper auto-detects if omitted')
|
|
103
|
+
}),
|
|
104
|
+
async execute(params) {
|
|
105
|
+
const { file_path, language } = params;
|
|
106
|
+
try {
|
|
107
|
+
const text = await getWhisper().transcribe(file_path, language);
|
|
108
|
+
return { text, file_path, language: language ?? null };
|
|
109
|
+
} catch (err) {
|
|
110
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
111
|
+
if (message.startsWith("File not found")) {
|
|
112
|
+
throw new Error(`File not found: ${file_path}`);
|
|
113
|
+
}
|
|
114
|
+
if (message.startsWith("Unsupported audio format")) {
|
|
115
|
+
throw new Error(message);
|
|
116
|
+
}
|
|
117
|
+
throw new Error(`Whisper transcription failed: ${message}`);
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
],
|
|
122
|
+
async onLoad() {
|
|
123
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
124
|
+
if (!apiKey) {
|
|
125
|
+
throw new Error(
|
|
126
|
+
"[paw-stt] OPENAI_API_KEY environment variable is required"
|
|
127
|
+
);
|
|
128
|
+
}
|
|
129
|
+
const model = process.env.OPENAI_STT_MODEL ?? "whisper-1";
|
|
130
|
+
whisper = new WhisperClient(apiKey, model);
|
|
131
|
+
console.log(`[paw-stt] loaded \u2014 model: ${model}`);
|
|
132
|
+
},
|
|
133
|
+
async onUnload() {
|
|
134
|
+
whisper = void 0;
|
|
135
|
+
console.log("[paw-stt] unloaded");
|
|
136
|
+
}
|
|
137
|
+
};
|
|
138
|
+
|
|
139
|
+
// src/index.ts
|
|
140
|
+
var index_default = definePaw(paw);
|
|
141
|
+
export {
|
|
142
|
+
index_default as default
|
|
143
|
+
};
|
|
144
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts","../src/paw.ts","../src/whisper.ts"],"sourcesContent":["import { definePaw } from '@openvole/paw-sdk'\nimport { paw } from './paw.js'\n\nexport default definePaw(paw)\n","import { z, type PawDefinition } from '@openvole/paw-sdk'\nimport { WhisperClient } from './whisper.js'\n\nlet whisper: WhisperClient | undefined\n\nfunction getWhisper(): WhisperClient {\n\tif (!whisper) {\n\t\tthrow new Error('WhisperClient not initialized — onLoad has not been called')\n\t}\n\treturn whisper\n}\n\nexport const paw: PawDefinition = {\n\tname: '@openvole/paw-stt',\n\tversion: '0.1.0',\n\tdescription: 'Speech-to-text tool using OpenAI Whisper',\n\tbrain: false,\n\n\ttools: [\n\t\t{\n\t\t\tname: 'stt_transcribe',\n\t\t\tdescription: 'Transcribe an audio file to text using OpenAI Whisper',\n\t\t\tparameters: z.object({\n\t\t\t\tfile_path: z.string().describe('Absolute path to the audio file to transcribe'),\n\t\t\t\tlanguage: z\n\t\t\t\t\t.string()\n\t\t\t\t\t.optional()\n\t\t\t\t\t.describe('ISO-639-1 language code (e.g. \"en\", \"fr\", \"de\"). Optional — Whisper auto-detects if omitted'),\n\t\t\t}),\n\t\t\tasync execute(params: unknown) {\n\t\t\t\tconst { file_path, language } = params as {\n\t\t\t\t\tfile_path: string\n\t\t\t\t\tlanguage?: string\n\t\t\t\t}\n\n\t\t\t\ttry {\n\t\t\t\t\tconst text = await getWhisper().transcribe(file_path, language)\n\t\t\t\t\treturn { text, file_path, language: language ?? null }\n\t\t\t\t} catch (err) {\n\t\t\t\t\tconst message =\n\t\t\t\t\t\terr instanceof Error ? err.message : String(err)\n\n\t\t\t\t\tif (message.startsWith('File not found')) {\n\t\t\t\t\t\tthrow new Error(`File not found: ${file_path}`)\n\t\t\t\t\t}\n\t\t\t\t\tif (message.startsWith('Unsupported audio format')) {\n\t\t\t\t\t\tthrow new Error(message)\n\t\t\t\t\t}\n\t\t\t\t\tthrow new Error(`Whisper transcription failed: ${message}`)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t],\n\n\tasync onLoad() {\n\t\tconst apiKey = process.env.OPENAI_API_KEY\n\t\tif (!apiKey) {\n\t\t\tthrow new Error(\n\t\t\t\t'[paw-stt] OPENAI_API_KEY environment variable is required',\n\t\t\t)\n\t\t}\n\n\t\tconst model = process.env.OPENAI_STT_MODEL ?? 'whisper-1'\n\t\twhisper = new WhisperClient(apiKey, model)\n\t\tconsole.log(`[paw-stt] loaded — model: ${model}`)\n\t},\n\n\tasync onUnload() {\n\t\twhisper = undefined\n\t\tconsole.log('[paw-stt] unloaded')\n\t},\n}\n","import { readFile, access } from 'node:fs/promises'\nimport { basename, extname } from 'node:path'\n\nconst SUPPORTED_EXTENSIONS = new Set([\n\t'.mp3',\n\t'.wav',\n\t'.m4a',\n\t'.webm',\n\t'.mp4',\n\t'.mpeg',\n\t'.mpga',\n\t'.oga',\n\t'.ogg',\n\t'.flac',\n])\n\nconst MIME_TYPES: Record<string, string> = {\n\t'.mp3': 'audio/mpeg',\n\t'.wav': 'audio/wav',\n\t'.m4a': 'audio/mp4',\n\t'.webm': 'audio/webm',\n\t'.mp4': 'audio/mp4',\n\t'.mpeg': 'audio/mpeg',\n\t'.mpga': 'audio/mpeg',\n\t'.oga': 'audio/ogg',\n\t'.ogg': 'audio/ogg',\n\t'.flac': 'audio/flac',\n}\n\nexport class WhisperClient {\n\tprivate apiKey: string\n\tprivate model: string\n\n\tconstructor(apiKey: string, model = 'whisper-1') {\n\t\tthis.apiKey = apiKey\n\t\tthis.model = model\n\t}\n\n\tasync transcribe(filePath: string, language?: string): Promise<string> {\n\t\t// Validate file exists\n\t\ttry {\n\t\t\tawait access(filePath)\n\t\t} catch {\n\t\t\tthrow new Error(`File not found: ${filePath}`)\n\t\t}\n\n\t\t// Validate file extension\n\t\tconst ext = extname(filePath).toLowerCase()\n\t\tif (!SUPPORTED_EXTENSIONS.has(ext)) {\n\t\t\tthrow new Error(\n\t\t\t\t`Unsupported audio format \"${ext}\". Supported formats: ${[...SUPPORTED_EXTENSIONS].map((e) => e.slice(1)).join(', ')}`,\n\t\t\t)\n\t\t}\n\n\t\tconst fileBuffer = await readFile(filePath)\n\t\tconst mimeType = MIME_TYPES[ext] ?? 'application/octet-stream'\n\t\tconst fileName = basename(filePath)\n\n\t\tconst formData = new FormData()\n\t\tformData.append('file', new Blob([fileBuffer], { type: mimeType }), fileName)\n\t\tformData.append('model', this.model)\n\t\tif (language) {\n\t\t\tformData.append('language', language)\n\t\t}\n\n\t\tconst response = await fetch(\n\t\t\t'https://api.openai.com/v1/audio/transcriptions',\n\t\t\t{\n\t\t\t\tmethod: 'POST',\n\t\t\t\theaders: {\n\t\t\t\t\tAuthorization: `Bearer ${this.apiKey}`,\n\t\t\t\t},\n\t\t\t\tbody: formData,\n\t\t\t},\n\t\t)\n\n\t\tif (!response.ok) {\n\t\t\tconst body = await response.text()\n\t\t\tthrow new Error(\n\t\t\t\t`Whisper API error (${response.status}): ${body}`,\n\t\t\t)\n\t\t}\n\n\t\tconst json = (await response.json()) as { text: string }\n\t\treturn json.text\n\t}\n}\n"],"mappings":";AAAA,SAAS,iBAAiB;;;ACA1B,SAAS,SAA6B;;;ACAtC,SAAS,UAAU,cAAc;AACjC,SAAS,UAAU,eAAe;AAElC,IAAM,uBAAuB,oBAAI,IAAI;AAAA,EACpC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACD,CAAC;AAED,IAAM,aAAqC;AAAA,EAC1C,QAAQ;AAAA,EACR,QAAQ;AAAA,EACR,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,SAAS;AAAA,EACT,QAAQ;AAAA,EACR,QAAQ;AAAA,EACR,SAAS;AACV;AAEO,IAAM,gBAAN,MAAoB;AAAA,EAClB;AAAA,EACA;AAAA,EAER,YAAY,QAAgB,QAAQ,aAAa;AAChD,SAAK,SAAS;AACd,SAAK,QAAQ;AAAA,EACd;AAAA,EAEA,MAAM,WAAW,UAAkB,UAAoC;AAEtE,QAAI;AACH,YAAM,OAAO,QAAQ;AAAA,IACtB,QAAQ;AACP,YAAM,IAAI,MAAM,mBAAmB,QAAQ,EAAE;AAAA,IAC9C;AAGA,UAAM,MAAM,QAAQ,QAAQ,EAAE,YAAY;AAC1C,QAAI,CAAC,qBAAqB,IAAI,GAAG,GAAG;AACnC,YAAM,IAAI;AAAA,QACT,6BAA6B,GAAG,yBAAyB,CAAC,GAAG,oBAAoB,EAAE,IAAI,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,EAAE,KAAK,IAAI,CAAC;AAAA,MACrH;AAAA,IACD;AAEA,UAAM,aAAa,MAAM,SAAS,QAAQ;AAC1C,UAAM,WAAW,WAAW,GAAG,KAAK;AACpC,UAAM,WAAW,SAAS,QAAQ;AAElC,UAAM,WAAW,IAAI,SAAS;AAC9B,aAAS,OAAO,QAAQ,IAAI,KAAK,CAAC,UAAU,GAAG,EAAE,MAAM,SAAS,CAAC,GAAG,QAAQ;AAC5E,aAAS,OAAO,SAAS,KAAK,KAAK;AACnC,QAAI,UAAU;AACb,eAAS,OAAO,YAAY,QAAQ;AAAA,IACrC;AAEA,UAAM,WAAW,MAAM;AAAA,MACtB;AAAA,MACA;AAAA,QACC,QAAQ;AAAA,QACR,SAAS;AAAA,UACR,eAAe,UAAU,KAAK,MAAM;AAAA,QACrC;AAAA,QACA,MAAM;AAAA,MACP;AAAA,IACD;AAEA,QAAI,CAAC,SAAS,IAAI;AACjB,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,YAAM,IAAI;AAAA,QACT,sBAAsB,SAAS,MAAM,MAAM,IAAI;AAAA,MAChD;AAAA,IACD;AAEA,UAAM,OAAQ,MAAM,SAAS,KAAK;AAClC,WAAO,KAAK;AAAA,EACb;AACD;;;ADnFA,IAAI;AAEJ,SAAS,aAA4B;AACpC,MAAI,CAAC,SAAS;AACb,UAAM,IAAI,MAAM,iEAA4D;AAAA,EAC7E;AACA,SAAO;AACR;AAEO,IAAM,MAAqB;AAAA,EACjC,MAAM;AAAA,EACN,SAAS;AAAA,EACT,aAAa;AAAA,EACb,OAAO;AAAA,EAEP,OAAO;AAAA,IACN;AAAA,MACC,MAAM;AAAA,MACN,aAAa;AAAA,MACb,YAAY,EAAE,OAAO;AAAA,QACpB,WAAW,EAAE,OAAO,EAAE,SAAS,+CAA+C;AAAA,QAC9E,UAAU,EACR,OAAO,EACP,SAAS,EACT,SAAS,kGAA6F;AAAA,MACzG,CAAC;AAAA,MACD,MAAM,QAAQ,QAAiB;AAC9B,cAAM,EAAE,WAAW,SAAS,IAAI;AAKhC,YAAI;AACH,gBAAM,OAAO,MAAM,WAAW,EAAE,WAAW,WAAW,QAAQ;AAC9D,iBAAO,EAAE,MAAM,WAAW,UAAU,YAAY,KAAK;AAAA,QACtD,SAAS,KAAK;AACb,gBAAM,UACL,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAEhD,cAAI,QAAQ,WAAW,gBAAgB,GAAG;AACzC,kBAAM,IAAI,MAAM,mBAAmB,SAAS,EAAE;AAAA,UAC/C;AACA,cAAI,QAAQ,WAAW,0BAA0B,GAAG;AACnD,kBAAM,IAAI,MAAM,OAAO;AAAA,UACxB;AACA,gBAAM,IAAI,MAAM,iCAAiC,OAAO,EAAE;AAAA,QAC3D;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,EAEA,MAAM,SAAS;AACd,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACZ,YAAM,IAAI;AAAA,QACT;AAAA,MACD;AAAA,IACD;AAEA,UAAM,QAAQ,QAAQ,IAAI,oBAAoB;AAC9C,cAAU,IAAI,cAAc,QAAQ,KAAK;AACzC,YAAQ,IAAI,kCAA6B,KAAK,EAAE;AAAA,EACjD;AAAA,EAEA,MAAM,WAAW;AAChB,cAAU;AACV,YAAQ,IAAI,oBAAoB;AAAA,EACjC;AACD;;;ADpEA,IAAO,gBAAQ,UAAU,GAAG;","names":[]}
|
package/package.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@openvole/paw-stt",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Speech-to-text tool Paw for OpenVole — transcribe audio with OpenAI Whisper",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"scripts": {
|
|
8
|
+
"build": "tsup",
|
|
9
|
+
"typecheck": "tsc --noEmit"
|
|
10
|
+
},
|
|
11
|
+
"devDependencies": {
|
|
12
|
+
"@types/node": "^22.0.0",
|
|
13
|
+
"tsup": "^8.3.0",
|
|
14
|
+
"typescript": "^5.6.0",
|
|
15
|
+
"@openvole/paw-sdk": "^0.3.0"
|
|
16
|
+
},
|
|
17
|
+
"engines": {
|
|
18
|
+
"node": ">=20.0.0"
|
|
19
|
+
},
|
|
20
|
+
"files": [
|
|
21
|
+
"dist",
|
|
22
|
+
"vole-paw.json",
|
|
23
|
+
"README.md"
|
|
24
|
+
],
|
|
25
|
+
"license": "MIT",
|
|
26
|
+
"peerDependencies": {
|
|
27
|
+
"@openvole/paw-sdk": "^0.3.0"
|
|
28
|
+
}
|
|
29
|
+
}
|
package/vole-paw.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@openvole/paw-stt",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Speech-to-text tool Paw using OpenAI Whisper",
|
|
5
|
+
"entry": "./dist/index.js",
|
|
6
|
+
"brain": false,
|
|
7
|
+
"inProcess": false,
|
|
8
|
+
"transport": "ipc",
|
|
9
|
+
"tools": [
|
|
10
|
+
{ "name": "stt_transcribe", "description": "Transcribe an audio file to text using OpenAI Whisper" }
|
|
11
|
+
],
|
|
12
|
+
"permissions": {
|
|
13
|
+
"network": ["api.openai.com"],
|
|
14
|
+
"listen": [],
|
|
15
|
+
"filesystem": [],
|
|
16
|
+
"env": ["OPENAI_API_KEY", "OPENAI_STT_MODEL"]
|
|
17
|
+
}
|
|
18
|
+
}
|