opencode-smart-voice-notify 1.2.0 → 1.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +167 -403
- package/example.config.jsonc +356 -356
- package/index.js +978 -937
- package/package.json +52 -48
- package/util/ai-messages.js +205 -207
- package/util/config.js +441 -441
package/package.json
CHANGED
|
@@ -1,48 +1,52 @@
|
|
|
1
|
-
{
|
|
2
|
-
"name": "opencode-smart-voice-notify",
|
|
3
|
-
"version": "1.2.
|
|
4
|
-
"description": "Smart voice notification plugin for OpenCode with multiple TTS engines (ElevenLabs, Edge TTS, Windows SAPI) and intelligent reminder system",
|
|
5
|
-
"main": "index.js",
|
|
6
|
-
"type": "module",
|
|
7
|
-
"author": "MasuRii",
|
|
8
|
-
"license": "MIT",
|
|
9
|
-
"keywords": [
|
|
10
|
-
"opencode",
|
|
11
|
-
"opencode-plugins",
|
|
12
|
-
"plugin",
|
|
13
|
-
"notification",
|
|
14
|
-
"tts",
|
|
15
|
-
"text-to-speech",
|
|
16
|
-
"elevenlabs",
|
|
17
|
-
"edge-tts",
|
|
18
|
-
"sapi",
|
|
19
|
-
"voice",
|
|
20
|
-
"alert",
|
|
21
|
-
"smart"
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
"
|
|
31
|
-
"
|
|
32
|
-
|
|
33
|
-
"
|
|
34
|
-
"
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
"
|
|
38
|
-
"
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
"
|
|
42
|
-
"
|
|
43
|
-
"
|
|
44
|
-
},
|
|
45
|
-
"
|
|
46
|
-
"@
|
|
47
|
-
|
|
48
|
-
}
|
|
1
|
+
{
|
|
2
|
+
"name": "opencode-smart-voice-notify",
|
|
3
|
+
"version": "1.2.3",
|
|
4
|
+
"description": "Smart voice notification plugin for OpenCode with multiple TTS engines (ElevenLabs, Edge TTS, Windows SAPI), AI-generated dynamic messages, and intelligent reminder system",
|
|
5
|
+
"main": "index.js",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"author": "MasuRii",
|
|
8
|
+
"license": "MIT",
|
|
9
|
+
"keywords": [
|
|
10
|
+
"opencode",
|
|
11
|
+
"opencode-plugins",
|
|
12
|
+
"plugin",
|
|
13
|
+
"notification",
|
|
14
|
+
"tts",
|
|
15
|
+
"text-to-speech",
|
|
16
|
+
"elevenlabs",
|
|
17
|
+
"edge-tts",
|
|
18
|
+
"sapi",
|
|
19
|
+
"voice",
|
|
20
|
+
"alert",
|
|
21
|
+
"smart",
|
|
22
|
+
"ai",
|
|
23
|
+
"ai-generated",
|
|
24
|
+
"ollama",
|
|
25
|
+
"local-ai"
|
|
26
|
+
],
|
|
27
|
+
"files": [
|
|
28
|
+
"index.js",
|
|
29
|
+
"util/",
|
|
30
|
+
"assets/",
|
|
31
|
+
"example.config.jsonc"
|
|
32
|
+
],
|
|
33
|
+
"repository": {
|
|
34
|
+
"type": "git",
|
|
35
|
+
"url": "git+https://github.com/MasuRii/opencode-smart-voice-notify.git"
|
|
36
|
+
},
|
|
37
|
+
"bugs": {
|
|
38
|
+
"url": "https://github.com/MasuRii/opencode-smart-voice-notify/issues"
|
|
39
|
+
},
|
|
40
|
+
"homepage": "https://github.com/MasuRii/opencode-smart-voice-notify#readme",
|
|
41
|
+
"engines": {
|
|
42
|
+
"node": ">=18.0.0",
|
|
43
|
+
"bun": ">=1.0.0"
|
|
44
|
+
},
|
|
45
|
+
"dependencies": {
|
|
46
|
+
"@elevenlabs/elevenlabs-js": "^2.30.0",
|
|
47
|
+
"msedge-tts": "^2.0.3"
|
|
48
|
+
},
|
|
49
|
+
"peerDependencies": {
|
|
50
|
+
"@opencode-ai/plugin": "^1.1.8"
|
|
51
|
+
}
|
|
52
|
+
}
|
package/util/ai-messages.js
CHANGED
|
@@ -1,207 +1,205 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* AI Message Generation Module
|
|
3
|
-
*
|
|
4
|
-
* Generates dynamic notification messages using OpenAI-compatible AI endpoints.
|
|
5
|
-
* Supports: Ollama, LM Studio, LocalAI, vLLM, llama.cpp, Jan.ai, etc.
|
|
6
|
-
*
|
|
7
|
-
* Uses native fetch() - no external dependencies required.
|
|
8
|
-
*/
|
|
9
|
-
|
|
10
|
-
import { getTTSConfig } from './tts.js';
|
|
11
|
-
|
|
12
|
-
/**
|
|
13
|
-
* Generate a message using an OpenAI-compatible AI endpoint
|
|
14
|
-
* @param {string} promptType - The type of prompt ('idle', 'permission', 'question', 'idleReminder', 'permissionReminder', 'questionReminder')
|
|
15
|
-
* @param {object} context - Optional context about the notification (for future use)
|
|
16
|
-
* @returns {Promise<string|null>} Generated message or null if failed
|
|
17
|
-
*/
|
|
18
|
-
export async function generateAIMessage(promptType, context = {}) {
|
|
19
|
-
const config = getTTSConfig();
|
|
20
|
-
|
|
21
|
-
// Check if AI messages are enabled
|
|
22
|
-
if (!config.enableAIMessages) {
|
|
23
|
-
return null;
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
// Get the prompt for this type
|
|
27
|
-
|
|
28
|
-
if (!prompt) {
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
//
|
|
35
|
-
|
|
36
|
-
if (
|
|
37
|
-
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
if (!
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
if (
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
if (
|
|
103
|
-
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
*
|
|
115
|
-
* @param {string
|
|
116
|
-
* @param {
|
|
117
|
-
* @
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
const
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
};
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
export default { generateAIMessage, getSmartMessage, testAIConnection };
|
|
1
|
+
/**
|
|
2
|
+
* AI Message Generation Module
|
|
3
|
+
*
|
|
4
|
+
* Generates dynamic notification messages using OpenAI-compatible AI endpoints.
|
|
5
|
+
* Supports: Ollama, LM Studio, LocalAI, vLLM, llama.cpp, Jan.ai, etc.
|
|
6
|
+
*
|
|
7
|
+
* Uses native fetch() - no external dependencies required.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import { getTTSConfig } from './tts.js';
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Generate a message using an OpenAI-compatible AI endpoint
|
|
14
|
+
* @param {string} promptType - The type of prompt ('idle', 'permission', 'question', 'idleReminder', 'permissionReminder', 'questionReminder')
|
|
15
|
+
* @param {object} context - Optional context about the notification (for future use)
|
|
16
|
+
* @returns {Promise<string|null>} Generated message or null if failed
|
|
17
|
+
*/
|
|
18
|
+
export async function generateAIMessage(promptType, context = {}) {
|
|
19
|
+
const config = getTTSConfig();
|
|
20
|
+
|
|
21
|
+
// Check if AI messages are enabled
|
|
22
|
+
if (!config.enableAIMessages) {
|
|
23
|
+
return null;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
// Get the prompt for this type
|
|
27
|
+
let prompt = config.aiPrompts?.[promptType];
|
|
28
|
+
if (!prompt) {
|
|
29
|
+
return null;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// Inject count context if multiple items
|
|
33
|
+
if (context.count && context.count > 1) {
|
|
34
|
+
// Use type-specific terminology
|
|
35
|
+
let itemType = 'items';
|
|
36
|
+
if (context.type === 'question') {
|
|
37
|
+
itemType = 'questions';
|
|
38
|
+
} else if (context.type === 'permission') {
|
|
39
|
+
itemType = 'permission requests';
|
|
40
|
+
}
|
|
41
|
+
prompt = `${prompt} Important: There are ${context.count} ${itemType} (not just one) waiting for the user's attention. Mention the count in your message.`;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
try {
|
|
45
|
+
// Build headers
|
|
46
|
+
const headers = { 'Content-Type': 'application/json' };
|
|
47
|
+
if (config.aiApiKey) {
|
|
48
|
+
headers['Authorization'] = `Bearer ${config.aiApiKey}`;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// Build endpoint URL (ensure it ends with /chat/completions)
|
|
52
|
+
let endpoint = config.aiEndpoint || 'http://localhost:11434/v1';
|
|
53
|
+
if (!endpoint.endsWith('/chat/completions')) {
|
|
54
|
+
endpoint = endpoint.replace(/\/$/, '') + '/chat/completions';
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Create abort controller for timeout
|
|
58
|
+
const controller = new AbortController();
|
|
59
|
+
const timeout = setTimeout(() => controller.abort(), config.aiTimeout || 15000);
|
|
60
|
+
|
|
61
|
+
// Make the request
|
|
62
|
+
const response = await fetch(endpoint, {
|
|
63
|
+
method: 'POST',
|
|
64
|
+
headers,
|
|
65
|
+
signal: controller.signal,
|
|
66
|
+
body: JSON.stringify({
|
|
67
|
+
model: config.aiModel || 'llama3',
|
|
68
|
+
messages: [
|
|
69
|
+
{
|
|
70
|
+
role: 'system',
|
|
71
|
+
content: 'You are a helpful assistant that generates short notification messages. Output only the message text, nothing else. No quotes, no explanations.'
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
role: 'user',
|
|
75
|
+
content: prompt
|
|
76
|
+
}
|
|
77
|
+
],
|
|
78
|
+
max_tokens: 1000, // High value to accommodate thinking models (e.g., Gemini 2.5) that use internal reasoning tokens
|
|
79
|
+
temperature: 0.7
|
|
80
|
+
})
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
clearTimeout(timeout);
|
|
84
|
+
|
|
85
|
+
if (!response.ok) {
|
|
86
|
+
return null;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
const data = await response.json();
|
|
90
|
+
|
|
91
|
+
// Extract the message content
|
|
92
|
+
const message = data.choices?.[0]?.message?.content?.trim();
|
|
93
|
+
|
|
94
|
+
if (!message) {
|
|
95
|
+
return null;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Clean up the message (remove quotes if AI added them)
|
|
99
|
+
let cleanMessage = message.replace(/^["']|["']$/g, '').trim();
|
|
100
|
+
|
|
101
|
+
// Validate message length (sanity check)
|
|
102
|
+
if (cleanMessage.length < 5 || cleanMessage.length > 200) {
|
|
103
|
+
return null;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
return cleanMessage;
|
|
107
|
+
|
|
108
|
+
} catch (error) {
|
|
109
|
+
return null;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* Get a smart message - tries AI first, falls back to static messages
|
|
115
|
+
* @param {string} eventType - 'idle', 'permission', 'question'
|
|
116
|
+
* @param {boolean} isReminder - Whether this is a reminder message
|
|
117
|
+
* @param {string[]} staticMessages - Array of static fallback messages
|
|
118
|
+
* @param {object} context - Optional context (e.g., { count: 3 } for batched notifications)
|
|
119
|
+
* @returns {Promise<string>} The message to speak
|
|
120
|
+
*/
|
|
121
|
+
export async function getSmartMessage(eventType, isReminder, staticMessages, context = {}) {
|
|
122
|
+
const config = getTTSConfig();
|
|
123
|
+
|
|
124
|
+
// Determine the prompt type
|
|
125
|
+
const promptType = isReminder ? `${eventType}Reminder` : eventType;
|
|
126
|
+
|
|
127
|
+
// Try AI generation if enabled
|
|
128
|
+
if (config.enableAIMessages) {
|
|
129
|
+
try {
|
|
130
|
+
const aiMessage = await generateAIMessage(promptType, context);
|
|
131
|
+
if (aiMessage) {
|
|
132
|
+
return aiMessage;
|
|
133
|
+
}
|
|
134
|
+
} catch (error) {
|
|
135
|
+
// Silently fall through to fallback
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Check if fallback is disabled
|
|
139
|
+
if (!config.aiFallbackToStatic) {
|
|
140
|
+
// Return a generic message if fallback disabled and AI failed
|
|
141
|
+
return 'Notification: Please check your screen.';
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// Fallback to static messages
|
|
146
|
+
if (!Array.isArray(staticMessages) || staticMessages.length === 0) {
|
|
147
|
+
return 'Notification';
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
return staticMessages[Math.floor(Math.random() * staticMessages.length)];
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Test connectivity to the AI endpoint
|
|
155
|
+
* @returns {Promise<{success: boolean, message: string, model?: string}>}
|
|
156
|
+
*/
|
|
157
|
+
export async function testAIConnection() {
|
|
158
|
+
const config = getTTSConfig();
|
|
159
|
+
|
|
160
|
+
if (!config.enableAIMessages) {
|
|
161
|
+
return { success: false, message: 'AI messages not enabled' };
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
try {
|
|
165
|
+
const headers = { 'Content-Type': 'application/json' };
|
|
166
|
+
if (config.aiApiKey) {
|
|
167
|
+
headers['Authorization'] = `Bearer ${config.aiApiKey}`;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
// Try to list models (simpler endpoint to test connectivity)
|
|
171
|
+
let endpoint = config.aiEndpoint || 'http://localhost:11434/v1';
|
|
172
|
+
endpoint = endpoint.replace(/\/$/, '') + '/models';
|
|
173
|
+
|
|
174
|
+
const controller = new AbortController();
|
|
175
|
+
const timeout = setTimeout(() => controller.abort(), 5000);
|
|
176
|
+
|
|
177
|
+
const response = await fetch(endpoint, {
|
|
178
|
+
method: 'GET',
|
|
179
|
+
headers,
|
|
180
|
+
signal: controller.signal
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
clearTimeout(timeout);
|
|
184
|
+
|
|
185
|
+
if (response.ok) {
|
|
186
|
+
const data = await response.json();
|
|
187
|
+
const models = data.data?.map(m => m.id) || [];
|
|
188
|
+
return {
|
|
189
|
+
success: true,
|
|
190
|
+
message: `Connected! Available models: ${models.slice(0, 3).join(', ')}${models.length > 3 ? '...' : ''}`,
|
|
191
|
+
models
|
|
192
|
+
};
|
|
193
|
+
} else {
|
|
194
|
+
return { success: false, message: `HTTP ${response.status}: ${response.statusText}` };
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
} catch (error) {
|
|
198
|
+
if (error.name === 'AbortError') {
|
|
199
|
+
return { success: false, message: 'Connection timed out' };
|
|
200
|
+
}
|
|
201
|
+
return { success: false, message: error.message };
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
export default { generateAIMessage, getSmartMessage, testAIConnection };
|