hyperclaw 4.0.0 → 4.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +49 -17
- package/dist/api-keys-guide-Bzig1R5W.js +149 -0
- package/dist/connector-DRv1ahC_.js +343 -0
- package/dist/delivery-B-SJqXLn.js +95 -0
- package/dist/delivery-VgFeuu2J.js +5 -0
- package/dist/hyperclawbot-DfMGowZC.js +480 -0
- package/dist/onboard-3q20ZyHj.js +9 -0
- package/dist/onboard-DnegOHMh.js +3026 -0
- package/dist/run-main.js +93 -94
- package/dist/runner-Bu--_RXw.js +810 -0
- package/dist/sdk/index.js +2 -2
- package/dist/sdk/index.mjs +2 -2
- package/dist/server-CCI1hv45.js +1047 -0
- package/dist/server-RBqwE_GN.js +4 -0
- package/dist/voice-transcription-CbQBToY0.js +138 -0
- package/package.json +1 -1
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
const require_chunk = require('./chunk-jS-bbMI5.js');
|
|
2
|
+
const fs_extra = require_chunk.__toESM(require("fs-extra"));
|
|
3
|
+
const path = require_chunk.__toESM(require("path"));
|
|
4
|
+
const os = require_chunk.__toESM(require("os"));
|
|
5
|
+
const https = require_chunk.__toESM(require("https"));
|
|
6
|
+
|
|
7
|
+
//#region src/services/voice-transcription.ts
|
|
8
|
+
const HC_DIR = path.default.join(os.default.homedir(), ".hyperclaw");
|
|
9
|
+
async function getConfig() {
|
|
10
|
+
try {
|
|
11
|
+
const cfg = await fs_extra.default.readJson(path.default.join(HC_DIR, "hyperclaw.json"));
|
|
12
|
+
const providerId = cfg?.provider?.providerId;
|
|
13
|
+
const apiKey = cfg?.provider?.apiKey;
|
|
14
|
+
return {
|
|
15
|
+
providerId,
|
|
16
|
+
apiKey
|
|
17
|
+
};
|
|
18
|
+
} catch {
|
|
19
|
+
return {};
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
async function transcribeWithWhisper(buffer, apiKey) {
|
|
23
|
+
return new Promise((resolve, reject) => {
|
|
24
|
+
const boundary = "----HyperClaw" + Date.now();
|
|
25
|
+
const header = [
|
|
26
|
+
`--${boundary}`,
|
|
27
|
+
"Content-Disposition: form-data; name=\"file\"; filename=\"audio.ogg\"",
|
|
28
|
+
"Content-Type: application/octet-stream",
|
|
29
|
+
"",
|
|
30
|
+
""
|
|
31
|
+
].join("\r\n");
|
|
32
|
+
const footer = `\r\n--${boundary}\r\nContent-Disposition: form-data; name="model"\r\n\r\nwhisper-1\r\n--${boundary}--\r\n`;
|
|
33
|
+
const body = Buffer.concat([
|
|
34
|
+
Buffer.from(header, "utf8"),
|
|
35
|
+
buffer,
|
|
36
|
+
Buffer.from(footer, "utf8")
|
|
37
|
+
]);
|
|
38
|
+
const req = https.default.request({
|
|
39
|
+
hostname: "api.openai.com",
|
|
40
|
+
port: 443,
|
|
41
|
+
path: "/v1/audio/transcriptions",
|
|
42
|
+
method: "POST",
|
|
43
|
+
headers: {
|
|
44
|
+
"Authorization": `Bearer ${apiKey}`,
|
|
45
|
+
"Content-Type": `multipart/form-data; boundary=${boundary}`,
|
|
46
|
+
"Content-Length": body.length
|
|
47
|
+
}
|
|
48
|
+
}, (res) => {
|
|
49
|
+
let data = "";
|
|
50
|
+
res.on("data", (c) => data += c);
|
|
51
|
+
res.on("end", () => {
|
|
52
|
+
try {
|
|
53
|
+
const j = JSON.parse(data);
|
|
54
|
+
resolve(j.text?.trim() || "[No transcription]");
|
|
55
|
+
} catch {
|
|
56
|
+
resolve(`[Transcription error: ${data.slice(0, 100)}]`);
|
|
57
|
+
}
|
|
58
|
+
});
|
|
59
|
+
});
|
|
60
|
+
req.on("error", reject);
|
|
61
|
+
req.write(body);
|
|
62
|
+
req.end();
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
async function transcribeWithGemini(buffer, apiKey) {
|
|
66
|
+
const base64 = buffer.toString("base64");
|
|
67
|
+
const payload = JSON.stringify({
|
|
68
|
+
contents: [{ parts: [{ text: "Transcribe this audio to text. Output only the transcription, no other text." }, { inlineData: {
|
|
69
|
+
mimeType: "audio/ogg",
|
|
70
|
+
data: base64
|
|
71
|
+
} }] }],
|
|
72
|
+
generationConfig: { maxOutputTokens: 1024 }
|
|
73
|
+
});
|
|
74
|
+
return new Promise((resolve, reject) => {
|
|
75
|
+
const req = https.default.request({
|
|
76
|
+
hostname: "generativelanguage.googleapis.com",
|
|
77
|
+
port: 443,
|
|
78
|
+
path: "/v1beta/models/gemini-2.0-flash:generateContent?key=" + encodeURIComponent(apiKey),
|
|
79
|
+
method: "POST",
|
|
80
|
+
headers: {
|
|
81
|
+
"Content-Type": "application/json",
|
|
82
|
+
"Content-Length": Buffer.byteLength(payload)
|
|
83
|
+
}
|
|
84
|
+
}, (res) => {
|
|
85
|
+
let data = "";
|
|
86
|
+
res.on("data", (c) => data += c);
|
|
87
|
+
res.on("end", () => {
|
|
88
|
+
try {
|
|
89
|
+
const j = JSON.parse(data);
|
|
90
|
+
const text = j.candidates?.[0]?.content?.parts?.[0]?.text?.trim();
|
|
91
|
+
resolve(text || "[No transcription]");
|
|
92
|
+
} catch {
|
|
93
|
+
resolve(`[Transcription error: ${data.slice(0, 100)}]`);
|
|
94
|
+
}
|
|
95
|
+
});
|
|
96
|
+
});
|
|
97
|
+
req.on("error", reject);
|
|
98
|
+
req.write(payload);
|
|
99
|
+
req.end();
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
/**
|
|
103
|
+
|
|
104
|
+
* Transcribe audio using configured provider or fallbacks.
|
|
105
|
+
|
|
106
|
+
* Providers: OpenAI (Whisper), Google (Gemini), OpenRouter.
|
|
107
|
+
|
|
108
|
+
* Env: OPENAI_API_KEY, WHISPER_API_KEY, GOOGLE_AI_API_KEY.
|
|
109
|
+
|
|
110
|
+
*/
|
|
111
|
+
async function transcribeVoiceNote(audioPathOrBuffer, apiKey) {
|
|
112
|
+
let buffer;
|
|
113
|
+
if (typeof audioPathOrBuffer === "string") buffer = await fs_extra.default.readFile(audioPathOrBuffer);
|
|
114
|
+
else buffer = audioPathOrBuffer;
|
|
115
|
+
const cfg = await getConfig();
|
|
116
|
+
const openaiKey = apiKey || process.env.OPENAI_API_KEY || process.env.WHISPER_API_KEY || (cfg.providerId === "openai" || cfg.providerId === "openrouter" ? cfg.apiKey : "");
|
|
117
|
+
const googleKey = process.env.GOOGLE_AI_API_KEY || (cfg.providerId === "google" ? cfg.apiKey : "");
|
|
118
|
+
if (cfg.providerId === "google" && googleKey) try {
|
|
119
|
+
return await transcribeWithGemini(buffer, googleKey);
|
|
120
|
+
} catch {}
|
|
121
|
+
if ((cfg.providerId === "openrouter" || cfg.providerId === "openai") && openaiKey) try {
|
|
122
|
+
return await transcribeWithWhisper(buffer, openaiKey);
|
|
123
|
+
} catch {}
|
|
124
|
+
if (openaiKey) try {
|
|
125
|
+
return await transcribeWithWhisper(buffer, openaiKey);
|
|
126
|
+
} catch (e) {
|
|
127
|
+
return `[Transcription failed: ${e.message}]`;
|
|
128
|
+
}
|
|
129
|
+
if (googleKey) try {
|
|
130
|
+
return await transcribeWithGemini(buffer, googleKey);
|
|
131
|
+
} catch (e) {
|
|
132
|
+
return `[Transcription failed: ${e.message}]`;
|
|
133
|
+
}
|
|
134
|
+
return "[Voice note — add OPENAI_API_KEY or GOOGLE_AI_API_KEY (or select OpenAI/Google provider in the wizard) for transcription]";
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
//#endregion
|
|
138
|
+
exports.transcribeVoiceNote = transcribeVoiceNote;
|