hyperclaw 5.2.6 → 5.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. package/dist/agents-routing-Cee7wrfi.js +398 -0
  2. package/dist/agents-routing-uNzqjgOr.js +6 -0
  3. package/dist/api-key-validation-DydlUth1.js +64 -0
  4. package/dist/api-keys-guide-DqRGa4x7.js +149 -0
  5. package/dist/audit-CX5poVMR.js +445 -0
  6. package/dist/bounty-tools-BoU1NN5N.js +211 -0
  7. package/dist/chat-B5Y8u68o.js +325 -0
  8. package/dist/chat-C07wF0mM.js +494 -0
  9. package/dist/claw-tasks-BLqd1a0Q.js +80 -0
  10. package/dist/config-BlLlJ6Er.js +7 -0
  11. package/dist/config-SgDxKyfV.js +261 -0
  12. package/dist/connector-DD5pq6wT.js +566 -0
  13. package/dist/cost-tracker-CgEXyEtz.js +103 -0
  14. package/dist/credentials-store-1LDtNReY.js +89 -0
  15. package/dist/credentials-store-BFxEJR00.js +7 -0
  16. package/dist/cron-tasks-1E6wKpTv.js +85 -0
  17. package/dist/daemon-B13UIqvL.js +7 -0
  18. package/dist/daemon-B4dlIs7x.js +7 -0
  19. package/dist/daemon-CzOW-1PS.js +421 -0
  20. package/dist/daemon-Dhy9qq9P.js +421 -0
  21. package/dist/delivery-B8YGmrq5.js +4 -0
  22. package/dist/delivery-D-ud3VlK.js +4 -0
  23. package/dist/delivery-DWsU6gx-.js +95 -0
  24. package/dist/delivery-DcK118_L.js +95 -0
  25. package/dist/destructive-gate-bnkroJWG.js +101 -0
  26. package/dist/engine-ByQ-0sWM.js +7 -0
  27. package/dist/engine-DgGG7aYJ.js +7 -0
  28. package/dist/engine-gnMjO743.js +327 -0
  29. package/dist/engine-w5V7Xp_6.js +327 -0
  30. package/dist/env-resolve-BdagnXhK.js +151 -0
  31. package/dist/env-resolve-NEtsGacw.js +10 -0
  32. package/dist/gmail-watch-setup-Brkj94Sy.js +42 -0
  33. package/dist/heartbeat-engine-D6oSpgxx.js +89 -0
  34. package/dist/hyperclawbot-DzP_v3iA.js +516 -0
  35. package/dist/hyperclawbot-PfthmAly.js +516 -0
  36. package/dist/inference-BHJFPEKL.js +8 -0
  37. package/dist/inference-vweXyh27.js +2854 -0
  38. package/dist/knowledge-graph-yeMPBPP8.js +134 -0
  39. package/dist/loader-BXYJl25e.js +6 -0
  40. package/dist/loader-gTdKBwiI.js +410 -0
  41. package/dist/logger-C3ZCDSkc.js +86 -0
  42. package/dist/manager-BI3izlRG.js +120 -0
  43. package/dist/manager-CQ3MWNHY.js +250 -0
  44. package/dist/mcp-CklaYr_6.js +142 -0
  45. package/dist/mcp-loader-9v8eUsNI.js +93 -0
  46. package/dist/mcp-loader-Btdmmj1A.js +93 -0
  47. package/dist/memory-auto-Bow9u_L7.js +306 -0
  48. package/dist/memory-auto-CgBAI8mm.js +5 -0
  49. package/dist/memory-integration-CveUEI3V.js +91 -0
  50. package/dist/moltbook-xP7VXKlX.js +81 -0
  51. package/dist/node-urDjajMt.js +226 -0
  52. package/dist/nodes-registry-Cd4eWw80.js +52 -0
  53. package/dist/oauth-flow-Bt-LaRHV.js +148 -0
  54. package/dist/oauth-provider-CVTKgj35.js +111 -0
  55. package/dist/observability-lwYm4aoB.js +89 -0
  56. package/dist/onboard-BFuPscDx.js +3865 -0
  57. package/dist/onboard-BV5Poocl.js +13 -0
  58. package/dist/onboard-BbJROE0s.js +13 -0
  59. package/dist/onboard-UiynbNQy.js +3909 -0
  60. package/dist/orchestrator-DMZkAVa5.js +189 -0
  61. package/dist/orchestrator-Wv1gLWH6.js +189 -0
  62. package/dist/orchestrator-gncKq3wS.js +6 -0
  63. package/dist/orchestrator-rVGu7yYr.js +6 -0
  64. package/dist/osint-chat-Hn8qpsC1.js +836 -0
  65. package/dist/osint-f9DeXT6j.js +283 -0
  66. package/dist/pending-approval-CHUZ_qT6.js +22 -0
  67. package/dist/run-main.js +68 -65
  68. package/dist/runner-B9xANekJ.js +1274 -0
  69. package/dist/runner-BccY5FGT.js +1274 -0
  70. package/dist/security--17Fxo5Z.js +4 -0
  71. package/dist/security-DP1qtJfL.js +90 -0
  72. package/dist/server-CnZ-Y6L-.js +1305 -0
  73. package/dist/server-DUDXqJSo.js +1365 -0
  74. package/dist/server-MpkM9aIZ.js +4 -0
  75. package/dist/server-ha59nA8a.js +4 -0
  76. package/dist/skill-runtime-BRp2D8Jb.js +5 -0
  77. package/dist/skill-runtime-C8bNwKwz.js +104 -0
  78. package/dist/skill-runtime-DUlp_2uW.js +104 -0
  79. package/dist/skill-runtime-RB7_EOoi.js +5 -0
  80. package/dist/src-B8LzGLSY.js +63 -0
  81. package/dist/src-BRDu1tbl.js +63 -0
  82. package/dist/src-BTIUlese.js +458 -0
  83. package/dist/src-DUoc6_kn.js +458 -0
  84. package/dist/sub-agent-tools-BgXug8yS.js +39 -0
  85. package/dist/sub-agent-tools-CldaMfd5.js +39 -0
  86. package/dist/tool-policy-DtGf3Ifq.js +189 -0
  87. package/dist/tts-elevenlabs-1zWD3QJw.js +64 -0
  88. package/dist/vision-BZG7_ybG.js +167 -0
  89. package/dist/vision-DV2ubVjr.js +167 -0
  90. package/dist/vision-tools-BZM7OifC.js +51 -0
  91. package/dist/vision-tools-COLYNbXb.js +5 -0
  92. package/dist/vision-tools-Cb86uMdh.js +5 -0
  93. package/dist/vision-tools-DlnxaZjg.js +51 -0
  94. package/dist/voice-transcription-BolTT_HG.js +170 -0
  95. package/dist/voice-transcription-j1jpOpH2.js +170 -0
  96. package/package.json +4 -1
  97. package/static/chat.html +9 -3
@@ -0,0 +1,51 @@
1
+ const require_chunk = require('./chunk-jS-bbMI5.js');
2
+
3
+ //#region packages/core/src/agent/vision-tools.ts
4
+ function getVisionTools(opts = {}) {
5
+ const { apiKey = "", provider = "anthropic" } = opts;
6
+ return [{
7
+ name: "analyze_image",
8
+ description: "Analyze an image using a vision model. Describe scenes, receipts, documents, photos. Supports file path, URL, or data URI.",
9
+ input_schema: {
10
+ type: "object",
11
+ properties: {
12
+ image: {
13
+ type: "string",
14
+ description: "Image path (~/path), URL (https://...), or data:image/...;base64,..."
15
+ },
16
+ prompt: {
17
+ type: "string",
18
+ description: "What to describe or extract (e.g. \"Describe this scene\", \"List items on this receipt\")"
19
+ }
20
+ },
21
+ required: ["image"]
22
+ },
23
+ handler: async (input) => {
24
+ if (!apiKey) return "Error: No API key configured for vision. Set provider.apiKey or run hyperclaw auth add.";
25
+ const image = input.image?.trim();
26
+ const prompt = input.prompt?.trim() || "Describe this image concisely.";
27
+ if (!image) return "Error: image is required";
28
+ try {
29
+ const { analyzeImage } = await Promise.resolve().then(() => require("./vision-DV2ubVjr.js"));
30
+ return await analyzeImage(image, prompt, apiKey, provider);
31
+ } catch (e) {
32
+ return `Error: ${e.message}`;
33
+ }
34
+ }
35
+ }];
36
+ }
37
+ var init_vision_tools = require_chunk.__esm({ "packages/core/src/agent/vision-tools.ts"() {} });
38
+
39
+ //#endregion
40
+ Object.defineProperty(exports, 'getVisionTools', {
41
+ enumerable: true,
42
+ get: function () {
43
+ return getVisionTools;
44
+ }
45
+ });
46
+ Object.defineProperty(exports, 'init_vision_tools', {
47
+ enumerable: true,
48
+ get: function () {
49
+ return init_vision_tools;
50
+ }
51
+ });
@@ -0,0 +1,170 @@
1
+ const require_chunk = require('./chunk-jS-bbMI5.js');
2
+ const require_paths = require('./paths-AIyBxIzm.js');
3
+ const require_paths$1 = require('./paths-DPovhojT.js');
4
+ const fs_extra = require_chunk.__toESM(require("fs-extra"));
5
+ const path = require_chunk.__toESM(require("path"));
6
+ const https = require_chunk.__toESM(require("https"));
7
+
8
+ //#region src/services/voice-transcription.ts
9
+ require_paths$1.init_paths();
10
+ const MAX_AUDIO_BYTES = 25 * 1024 * 1024;
11
+ const ALLOWED_AUDIO_EXTENSIONS = new Set([
12
+ ".ogg",
13
+ ".oga",
14
+ ".mp3",
15
+ ".wav",
16
+ ".m4a",
17
+ ".mp4",
18
+ ".mpeg",
19
+ ".webm"
20
+ ]);
21
+ function sanitizeForError(value) {
22
+ return String(value ?? "").replace(/[\r\n\t]+/g, " ").slice(0, 160);
23
+ }
24
+ async function loadAudioInput(audioPathOrBuffer) {
25
+ if (Buffer.isBuffer(audioPathOrBuffer)) {
26
+ if (audioPathOrBuffer.length === 0) throw new Error("Audio input is empty");
27
+ if (audioPathOrBuffer.length > MAX_AUDIO_BYTES) throw new Error(`Audio input exceeds ${MAX_AUDIO_BYTES} bytes`);
28
+ return audioPathOrBuffer;
29
+ }
30
+ const resolvedPath = path.default.resolve(audioPathOrBuffer);
31
+ const ext = path.default.extname(resolvedPath).toLowerCase();
32
+ if (!ALLOWED_AUDIO_EXTENSIONS.has(ext)) throw new Error(`Unsupported audio file type: ${ext || "unknown"}`);
33
+ const buf = await fs_extra.default.readFile(resolvedPath);
34
+ if (buf.length === 0) throw new Error("Audio input is empty");
35
+ if (buf.length > MAX_AUDIO_BYTES) throw new Error(`Audio input exceeds ${MAX_AUDIO_BYTES} bytes`);
36
+ return buf;
37
+ }
38
+ async function getConfig() {
39
+ try {
40
+ const cfg = await fs_extra.default.readJson(require_paths.getConfigPath());
41
+ const providerId = cfg?.provider?.providerId;
42
+ const apiKey = cfg?.provider?.apiKey;
43
+ return {
44
+ providerId,
45
+ apiKey
46
+ };
47
+ } catch {
48
+ return {};
49
+ }
50
+ }
51
+ async function transcribeWithWhisper(buffer, apiKey) {
52
+ return new Promise((resolve, reject) => {
53
+ const boundary = "----HyperClaw" + Date.now();
54
+ const header = [
55
+ `--${boundary}`,
56
+ "Content-Disposition: form-data; name=\"file\"; filename=\"audio.ogg\"",
57
+ "Content-Type: application/octet-stream",
58
+ "",
59
+ ""
60
+ ].join("\r\n");
61
+ const footer = `\r\n--${boundary}\r\nContent-Disposition: form-data; name="model"\r\n\r\nwhisper-1\r\n--${boundary}--\r\n`;
62
+ const body = Buffer.concat([
63
+ Buffer.from(header, "utf8"),
64
+ buffer,
65
+ Buffer.from(footer, "utf8")
66
+ ]);
67
+ const req = https.default.request({
68
+ hostname: "api.openai.com",
69
+ port: 443,
70
+ path: "/v1/audio/transcriptions",
71
+ method: "POST",
72
+ headers: {
73
+ "Authorization": `Bearer ${apiKey}`,
74
+ "Content-Type": `multipart/form-data; boundary=${boundary}`,
75
+ "Content-Length": body.length
76
+ }
77
+ }, (res) => {
78
+ let data = "";
79
+ res.on("data", (c) => data += c);
80
+ res.on("end", () => {
81
+ try {
82
+ const j = JSON.parse(data);
83
+ resolve(j.text?.trim() || "[No transcription]");
84
+ } catch {
85
+ resolve(`[Transcription error: ${data.slice(0, 100)}]`);
86
+ }
87
+ });
88
+ });
89
+ req.on("error", reject);
90
+ req.write(body);
91
+ req.end();
92
+ });
93
+ }
94
+ async function transcribeWithGemini(buffer, apiKey) {
95
+ const base64 = buffer.toString("base64");
96
+ const payload = JSON.stringify({
97
+ contents: [{ parts: [{ text: "Transcribe this audio to text. Output only the transcription, no other text." }, { inlineData: {
98
+ mimeType: "audio/ogg",
99
+ data: base64
100
+ } }] }],
101
+ generationConfig: { maxOutputTokens: 1024 }
102
+ });
103
+ return new Promise((resolve, reject) => {
104
+ const req = https.default.request({
105
+ hostname: "generativelanguage.googleapis.com",
106
+ port: 443,
107
+ path: "/v1beta/models/gemini-2.0-flash:generateContent?key=" + encodeURIComponent(apiKey),
108
+ method: "POST",
109
+ headers: {
110
+ "Content-Type": "application/json",
111
+ "Content-Length": Buffer.byteLength(payload)
112
+ }
113
+ }, (res) => {
114
+ let data = "";
115
+ res.on("data", (c) => data += c);
116
+ res.on("end", () => {
117
+ try {
118
+ const j = JSON.parse(data);
119
+ const text = j.candidates?.[0]?.content?.parts?.[0]?.text?.trim();
120
+ resolve(text || "[No transcription]");
121
+ } catch {
122
+ resolve(`[Transcription error: ${data.slice(0, 100)}]`);
123
+ }
124
+ });
125
+ });
126
+ req.on("error", reject);
127
+ req.write(payload);
128
+ req.end();
129
+ });
130
+ }
131
+ /**
132
+
133
+ * Transcribe audio using configured provider or fallbacks.
134
+
135
+ * Providers: OpenAI (Whisper), Google (Gemini), OpenRouter.
136
+
137
+ * Env: OPENAI_API_KEY, WHISPER_API_KEY, GOOGLE_AI_API_KEY.
138
+
139
+ */
140
+ async function transcribeVoiceNote(audioPathOrBuffer, apiKey) {
141
+ let buffer;
142
+ try {
143
+ buffer = await loadAudioInput(audioPathOrBuffer);
144
+ } catch (e) {
145
+ return `[Transcription failed: ${sanitizeForError(e?.message)}]`;
146
+ }
147
+ const cfg = await getConfig();
148
+ const openaiKey = apiKey || process.env.OPENAI_API_KEY || process.env.WHISPER_API_KEY || (cfg.providerId === "openai" || cfg.providerId === "openrouter" ? cfg.apiKey : "");
149
+ const googleKey = process.env.GOOGLE_AI_API_KEY || (cfg.providerId === "google" ? cfg.apiKey : "");
150
+ if (cfg.providerId === "google" && googleKey) try {
151
+ return await transcribeWithGemini(buffer, googleKey);
152
+ } catch {}
153
+ if ((cfg.providerId === "openrouter" || cfg.providerId === "openai") && openaiKey) try {
154
+ return await transcribeWithWhisper(buffer, openaiKey);
155
+ } catch {}
156
+ if (openaiKey) try {
157
+ return await transcribeWithWhisper(buffer, openaiKey);
158
+ } catch (e) {
159
+ return `[Transcription failed: ${sanitizeForError(e?.message)}]`;
160
+ }
161
+ if (googleKey) try {
162
+ return await transcribeWithGemini(buffer, googleKey);
163
+ } catch (e) {
164
+ return `[Transcription failed: ${sanitizeForError(e?.message)}]`;
165
+ }
166
+ return "[Voice note — add OPENAI_API_KEY or GOOGLE_AI_API_KEY (or select OpenAI/Google provider in the wizard) for transcription]";
167
+ }
168
+
169
+ //#endregion
170
+ exports.transcribeVoiceNote = transcribeVoiceNote;
@@ -0,0 +1,170 @@
1
+ const require_chunk = require('./chunk-jS-bbMI5.js');
2
+ const require_paths = require('./paths-AIyBxIzm.js');
3
+ const require_paths$1 = require('./paths-DPovhojT.js');
4
+ const fs_extra = require_chunk.__toESM(require("fs-extra"));
5
+ const path = require_chunk.__toESM(require("path"));
6
+ const https = require_chunk.__toESM(require("https"));
7
+
8
+ //#region src/services/voice-transcription.ts
9
+ require_paths$1.init_paths();
10
+ const MAX_AUDIO_BYTES = 25 * 1024 * 1024;
11
+ const ALLOWED_AUDIO_EXTENSIONS = new Set([
12
+ ".ogg",
13
+ ".oga",
14
+ ".mp3",
15
+ ".wav",
16
+ ".m4a",
17
+ ".mp4",
18
+ ".mpeg",
19
+ ".webm"
20
+ ]);
21
+ function sanitizeForError(value) {
22
+ return String(value ?? "").replace(/[\r\n\t]+/g, " ").slice(0, 160);
23
+ }
24
+ async function loadAudioInput(audioPathOrBuffer) {
25
+ if (Buffer.isBuffer(audioPathOrBuffer)) {
26
+ if (audioPathOrBuffer.length === 0) throw new Error("Audio input is empty");
27
+ if (audioPathOrBuffer.length > MAX_AUDIO_BYTES) throw new Error(`Audio input exceeds ${MAX_AUDIO_BYTES} bytes`);
28
+ return audioPathOrBuffer;
29
+ }
30
+ const resolvedPath = path.default.resolve(audioPathOrBuffer);
31
+ const ext = path.default.extname(resolvedPath).toLowerCase();
32
+ if (!ALLOWED_AUDIO_EXTENSIONS.has(ext)) throw new Error(`Unsupported audio file type: ${ext || "unknown"}`);
33
+ const buf = await fs_extra.default.readFile(resolvedPath);
34
+ if (buf.length === 0) throw new Error("Audio input is empty");
35
+ if (buf.length > MAX_AUDIO_BYTES) throw new Error(`Audio input exceeds ${MAX_AUDIO_BYTES} bytes`);
36
+ return buf;
37
+ }
38
+ async function getConfig() {
39
+ try {
40
+ const cfg = await fs_extra.default.readJson(require_paths.getConfigPath());
41
+ const providerId = cfg?.provider?.providerId;
42
+ const apiKey = cfg?.provider?.apiKey;
43
+ return {
44
+ providerId,
45
+ apiKey
46
+ };
47
+ } catch {
48
+ return {};
49
+ }
50
+ }
51
+ async function transcribeWithWhisper(buffer, apiKey) {
52
+ return new Promise((resolve, reject) => {
53
+ const boundary = "----HyperClaw" + Date.now();
54
+ const header = [
55
+ `--${boundary}`,
56
+ "Content-Disposition: form-data; name=\"file\"; filename=\"audio.ogg\"",
57
+ "Content-Type: application/octet-stream",
58
+ "",
59
+ ""
60
+ ].join("\r\n");
61
+ const footer = `\r\n--${boundary}\r\nContent-Disposition: form-data; name="model"\r\n\r\nwhisper-1\r\n--${boundary}--\r\n`;
62
+ const body = Buffer.concat([
63
+ Buffer.from(header, "utf8"),
64
+ buffer,
65
+ Buffer.from(footer, "utf8")
66
+ ]);
67
+ const req = https.default.request({
68
+ hostname: "api.openai.com",
69
+ port: 443,
70
+ path: "/v1/audio/transcriptions",
71
+ method: "POST",
72
+ headers: {
73
+ "Authorization": `Bearer ${apiKey}`,
74
+ "Content-Type": `multipart/form-data; boundary=${boundary}`,
75
+ "Content-Length": body.length
76
+ }
77
+ }, (res) => {
78
+ let data = "";
79
+ res.on("data", (c) => data += c);
80
+ res.on("end", () => {
81
+ try {
82
+ const j = JSON.parse(data);
83
+ resolve(j.text?.trim() || "[No transcription]");
84
+ } catch {
85
+ resolve(`[Transcription error: ${data.slice(0, 100)}]`);
86
+ }
87
+ });
88
+ });
89
+ req.on("error", reject);
90
+ req.write(body);
91
+ req.end();
92
+ });
93
+ }
94
+ async function transcribeWithGemini(buffer, apiKey) {
95
+ const base64 = buffer.toString("base64");
96
+ const payload = JSON.stringify({
97
+ contents: [{ parts: [{ text: "Transcribe this audio to text. Output only the transcription, no other text." }, { inlineData: {
98
+ mimeType: "audio/ogg",
99
+ data: base64
100
+ } }] }],
101
+ generationConfig: { maxOutputTokens: 1024 }
102
+ });
103
+ return new Promise((resolve, reject) => {
104
+ const req = https.default.request({
105
+ hostname: "generativelanguage.googleapis.com",
106
+ port: 443,
107
+ path: "/v1beta/models/gemini-2.0-flash:generateContent?key=" + encodeURIComponent(apiKey),
108
+ method: "POST",
109
+ headers: {
110
+ "Content-Type": "application/json",
111
+ "Content-Length": Buffer.byteLength(payload)
112
+ }
113
+ }, (res) => {
114
+ let data = "";
115
+ res.on("data", (c) => data += c);
116
+ res.on("end", () => {
117
+ try {
118
+ const j = JSON.parse(data);
119
+ const text = j.candidates?.[0]?.content?.parts?.[0]?.text?.trim();
120
+ resolve(text || "[No transcription]");
121
+ } catch {
122
+ resolve(`[Transcription error: ${data.slice(0, 100)}]`);
123
+ }
124
+ });
125
+ });
126
+ req.on("error", reject);
127
+ req.write(payload);
128
+ req.end();
129
+ });
130
+ }
131
+ /**
132
+
133
+ * Transcribe audio using configured provider or fallbacks.
134
+
135
+ * Providers: OpenAI (Whisper), Google (Gemini), OpenRouter.
136
+
137
+ * Env: OPENAI_API_KEY, WHISPER_API_KEY, GOOGLE_AI_API_KEY.
138
+
139
+ */
140
+ async function transcribeVoiceNote(audioPathOrBuffer, apiKey) {
141
+ let buffer;
142
+ try {
143
+ buffer = await loadAudioInput(audioPathOrBuffer);
144
+ } catch (e) {
145
+ return `[Transcription failed: ${sanitizeForError(e?.message)}]`;
146
+ }
147
+ const cfg = await getConfig();
148
+ const openaiKey = apiKey || process.env.OPENAI_API_KEY || process.env.WHISPER_API_KEY || (cfg.providerId === "openai" || cfg.providerId === "openrouter" ? cfg.apiKey : "");
149
+ const googleKey = process.env.GOOGLE_AI_API_KEY || (cfg.providerId === "google" ? cfg.apiKey : "");
150
+ if (cfg.providerId === "google" && googleKey) try {
151
+ return await transcribeWithGemini(buffer, googleKey);
152
+ } catch {}
153
+ if ((cfg.providerId === "openrouter" || cfg.providerId === "openai") && openaiKey) try {
154
+ return await transcribeWithWhisper(buffer, openaiKey);
155
+ } catch {}
156
+ if (openaiKey) try {
157
+ return await transcribeWithWhisper(buffer, openaiKey);
158
+ } catch (e) {
159
+ return `[Transcription failed: ${sanitizeForError(e?.message)}]`;
160
+ }
161
+ if (googleKey) try {
162
+ return await transcribeWithGemini(buffer, googleKey);
163
+ } catch (e) {
164
+ return `[Transcription failed: ${sanitizeForError(e?.message)}]`;
165
+ }
166
+ return "[Voice note — add OPENAI_API_KEY or GOOGLE_AI_API_KEY (or select OpenAI/Google provider in the wizard) for transcription]";
167
+ }
168
+
169
+ //#endregion
170
+ exports.transcribeVoiceNote = transcribeVoiceNote;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hyperclaw",
3
- "version": "5.2.6",
3
+ "version": "5.2.8",
4
4
  "description": "⚡ HyperClaw — AI Gateway Platform. The Lobster Evolution 🦅",
5
5
  "main": "dist/run-main.js",
6
6
  "bin": {
@@ -100,6 +100,8 @@
100
100
  "fs-extra": "^11.2.0",
101
101
  "gradient-string": "^2.0.2",
102
102
  "inquirer": "^8.2.7",
103
+ "marked": "^15.0.12",
104
+ "marked-terminal": "^7.3.0",
103
105
  "node-cron": "^3.0.3",
104
106
  "ora": "^5.4.1",
105
107
  "tar": "^7.0.0",
@@ -119,6 +121,7 @@
119
121
  "@types/figlet": "^1.5.8",
120
122
  "@types/fs-extra": "^11.0.4",
121
123
  "@types/inquirer": "^8.2.10",
124
+ "@types/marked-terminal": "^6.1.1",
122
125
  "@types/node": "^22.0.0",
123
126
  "@types/ws": "^8.5.8",
124
127
  "@vitest/coverage-v8": "^1.6.0",
package/static/chat.html CHANGED
@@ -139,10 +139,10 @@
139
139
  <!-- INPUT -->
140
140
  <div class="input-wrap">
141
141
  <form class="input-row" id="form">
142
- <textarea id="input" rows="1" placeholder="Message HyperClaw… (Enter to send, Shift+Enter for new line)" autocomplete="off"></textarea>
142
+ <textarea id="input" rows="1" placeholder="Your AI assistant awaits — type a message and press Enter to send · HyperClaw" autocomplete="off"></textarea>
143
143
  <button type="submit" id="send-btn">↑</button>
144
144
  </form>
145
- <div class="input-hint">🦅 Hyper · <span id="model-hint">—</span></div>
145
+ <div class="input-hint">🦅 HyperClaw · <span id="model-hint">—</span></div>
146
146
  </div>
147
147
 
148
148
  <script>
@@ -270,17 +270,23 @@
270
270
  if (last) last.appendChild(t);
271
271
  }
272
272
 
273
+ let workingTicker = null;
273
274
  function addTyping() {
274
275
  welcomeEl.style.display = 'none';
275
276
  const row = document.createElement('div');
276
277
  row.className = 'msg-row assistant';
277
278
  row.id = 'typing-row';
278
- row.innerHTML = '<div class="msg-avatar">🦅</div><div class="msg-bubble" style="padding:14px 18px"><span class="typing-dot"></span><span class="typing-dot"></span><span class="typing-dot"></span></div>';
279
+ let sec = 0;
280
+ row.innerHTML = '<div class="msg-avatar">🦅</div><div class="msg-bubble" style="padding:14px 18px"><span class="typing-dot"></span><span class="typing-dot"></span><span class="typing-dot"></span> <span id="working-text" style="margin-left:8px;color:var(--text2);font-size:12px">Working (0s)</span></div>';
279
281
  messagesEl.appendChild(row);
280
282
  messagesEl.scrollTop = messagesEl.scrollHeight;
283
+ workingTicker = setInterval(() => {
284
+ sec++; const el = document.getElementById('working-text'); if (el) el.textContent = 'Working (' + sec + 's)';
285
+ }, 1000);
281
286
  }
282
287
 
283
288
  function removeTyping() {
289
+ if (workingTicker) { clearInterval(workingTicker); workingTicker = null; }
284
290
  document.getElementById('typing-row')?.remove();
285
291
  }
286
292