@zhihand/mcp 0.22.0 → 0.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/daemon/dispatcher.js +9 -3
- package/dist/daemon/heartbeat.js +48 -18
- package/dist/daemon/prompt-listener.d.ts +2 -0
- package/dist/daemon/prompt-listener.js +46 -28
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/package.json +3 -2
- package/scripts/pty-wrap.py +143 -0
package/README.md
CHANGED
|
@@ -5,7 +5,7 @@ import os from "node:os";
|
|
|
5
5
|
import { fileURLToPath } from "node:url";
|
|
6
6
|
import { DEFAULT_MODELS } from "../core/config.js";
|
|
7
7
|
import { resolveGemini, resolveClaude, resolveCodex } from "../core/resolve-path.js";
|
|
8
|
-
const CLI_TIMEOUT =
|
|
8
|
+
const CLI_TIMEOUT = 300_000; // 300s (5min) per prompt — MCP tool chains need multiple turns
|
|
9
9
|
const SIGKILL_DELAY = 2_000; // 2s after SIGTERM
|
|
10
10
|
const MAX_OUTPUT_BYTES = 100 * 1024; // 100KB (for one-shot backends)
|
|
11
11
|
const MAX_HISTORY_TURNS = 20; // keep last N exchanges in conversation history
|
|
@@ -208,7 +208,7 @@ function pollGeminiSession(child, startTime, promptText, log, knownSessionFile,
|
|
|
208
208
|
}
|
|
209
209
|
closeChild(child);
|
|
210
210
|
settle({
|
|
211
|
-
text: "Gemini timed out after
|
|
211
|
+
text: "Gemini timed out after 5 minutes.",
|
|
212
212
|
success: false,
|
|
213
213
|
durationMs: elapsed,
|
|
214
214
|
});
|
|
@@ -539,7 +539,13 @@ async function dispatchClaudeWithHistory(prompt, startTime, log, model) {
|
|
|
539
539
|
const claudePath = resolveClaude();
|
|
540
540
|
log(`[claude] One-shot dispatch (history: ${conversationHistory.length} turns)`);
|
|
541
541
|
// Pass prompt via stdin (-p -) to avoid ARG_MAX limit with long conversation history
|
|
542
|
-
|
|
542
|
+
// --permission-mode bypassPermissions: auto-approve all tool calls (like gemini's --approval-mode yolo)
|
|
543
|
+
const child = spawn(claudePath, [
|
|
544
|
+
"-p", "-",
|
|
545
|
+
"--model", model,
|
|
546
|
+
"--output-format", "json",
|
|
547
|
+
"--permission-mode", "bypassPermissions",
|
|
548
|
+
], {
|
|
543
549
|
env: process.env,
|
|
544
550
|
stdio: ["pipe", "pipe", "pipe"],
|
|
545
551
|
detached: false,
|
package/dist/daemon/heartbeat.js
CHANGED
|
@@ -2,6 +2,7 @@ const HEARTBEAT_INTERVAL = 30_000; // 30s
|
|
|
2
2
|
const HEARTBEAT_RETRY_INTERVAL = 5_000; // 5s on failure
|
|
3
3
|
let heartbeatTimer;
|
|
4
4
|
let retryTimer;
|
|
5
|
+
let stopped = true;
|
|
5
6
|
let currentMeta = {};
|
|
6
7
|
/** Update the backend/model metadata that will be sent with the next heartbeat. */
|
|
7
8
|
export function setBrainMeta(meta) {
|
|
@@ -40,36 +41,65 @@ export async function sendBrainOffline(config) {
|
|
|
40
41
|
}
|
|
41
42
|
export function startHeartbeatLoop(config, log) {
|
|
42
43
|
let retrying = false;
|
|
44
|
+
stopped = false;
|
|
43
45
|
async function beat() {
|
|
46
|
+
// Skip main-timer beats while retry loop is active (avoids overlap & flapping)
|
|
47
|
+
if (retrying || stopped)
|
|
48
|
+
return;
|
|
44
49
|
const ok = await sendBrainOnline(config);
|
|
45
|
-
if (
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
const recovered = await sendBrainOnline(config);
|
|
51
|
-
if (recovered) {
|
|
52
|
-
retrying = false;
|
|
53
|
-
if (retryTimer) {
|
|
54
|
-
clearInterval(retryTimer);
|
|
55
|
-
retryTimer = undefined;
|
|
56
|
-
}
|
|
57
|
-
log("[heartbeat] Recovered.");
|
|
58
|
-
}
|
|
59
|
-
}, HEARTBEAT_RETRY_INTERVAL);
|
|
50
|
+
if (stopped)
|
|
51
|
+
return; // check after await — stopHeartbeatLoop() may have been called
|
|
52
|
+
if (ok) {
|
|
53
|
+
scheduleNextBeat();
|
|
54
|
+
return;
|
|
60
55
|
}
|
|
56
|
+
// Enter retry mode
|
|
57
|
+
retrying = true;
|
|
58
|
+
log("[heartbeat] Failed, retrying every 5s...");
|
|
59
|
+
scheduleRetry();
|
|
60
|
+
}
|
|
61
|
+
/** Recursive setTimeout for retry — waits for fetch to settle before scheduling next. */
|
|
62
|
+
function scheduleRetry() {
|
|
63
|
+
if (stopped)
|
|
64
|
+
return;
|
|
65
|
+
retryTimer = setTimeout(async () => {
|
|
66
|
+
if (!retrying || stopped)
|
|
67
|
+
return;
|
|
68
|
+
const recovered = await sendBrainOnline(config);
|
|
69
|
+
if (stopped)
|
|
70
|
+
return; // check after await
|
|
71
|
+
if (recovered) {
|
|
72
|
+
retrying = false;
|
|
73
|
+
retryTimer = undefined;
|
|
74
|
+
log("[heartbeat] Recovered.");
|
|
75
|
+
// Resume normal beat cycle
|
|
76
|
+
scheduleNextBeat();
|
|
77
|
+
return;
|
|
78
|
+
}
|
|
79
|
+
// Still failing — schedule another retry
|
|
80
|
+
if (retrying && !stopped)
|
|
81
|
+
scheduleRetry();
|
|
82
|
+
}, HEARTBEAT_RETRY_INTERVAL);
|
|
83
|
+
}
|
|
84
|
+
/** Schedule next normal heartbeat using setTimeout (not setInterval, to avoid overlap). */
|
|
85
|
+
function scheduleNextBeat() {
|
|
86
|
+
if (stopped)
|
|
87
|
+
return;
|
|
88
|
+
if (heartbeatTimer)
|
|
89
|
+
clearTimeout(heartbeatTimer);
|
|
90
|
+
heartbeatTimer = setTimeout(beat, HEARTBEAT_INTERVAL);
|
|
61
91
|
}
|
|
62
92
|
// Immediate first heartbeat
|
|
63
93
|
beat();
|
|
64
|
-
heartbeatTimer = setInterval(beat, HEARTBEAT_INTERVAL);
|
|
65
94
|
}
|
|
66
95
|
export function stopHeartbeatLoop() {
|
|
96
|
+
stopped = true;
|
|
67
97
|
if (heartbeatTimer) {
|
|
68
|
-
|
|
98
|
+
clearTimeout(heartbeatTimer);
|
|
69
99
|
heartbeatTimer = undefined;
|
|
70
100
|
}
|
|
71
101
|
if (retryTimer) {
|
|
72
|
-
|
|
102
|
+
clearTimeout(retryTimer);
|
|
73
103
|
retryTimer = undefined;
|
|
74
104
|
}
|
|
75
105
|
}
|
|
@@ -27,6 +27,8 @@ export declare class PromptListener {
|
|
|
27
27
|
private resetWatchdog;
|
|
28
28
|
private handleSSEEvent;
|
|
29
29
|
private startPolling;
|
|
30
|
+
/** Recursive setTimeout: waits for fetch to complete before scheduling next poll. */
|
|
31
|
+
private schedulePoll;
|
|
30
32
|
private stopPolling;
|
|
31
33
|
private poll;
|
|
32
34
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
const SSE_WATCHDOG_TIMEOUT =
|
|
1
|
+
const SSE_WATCHDOG_TIMEOUT = 120_000; // 120s no data → reconnect (servers may not send keepalive frequently)
|
|
2
2
|
const SSE_RECONNECT_DELAY = 3_000;
|
|
3
3
|
const POLL_INTERVAL = 2_000;
|
|
4
4
|
export class PromptListener {
|
|
@@ -24,7 +24,7 @@ export class PromptListener {
|
|
|
24
24
|
this.sseAbort?.abort();
|
|
25
25
|
this.sseAbort = null;
|
|
26
26
|
if (this.pollTimer) {
|
|
27
|
-
|
|
27
|
+
clearTimeout(this.pollTimer);
|
|
28
28
|
this.pollTimer = null;
|
|
29
29
|
}
|
|
30
30
|
}
|
|
@@ -63,34 +63,39 @@ export class PromptListener {
|
|
|
63
63
|
const decoder = new TextDecoder();
|
|
64
64
|
let buffer = "";
|
|
65
65
|
let watchdog = this.resetWatchdog();
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
else if (line === "" && eventData) {
|
|
82
|
-
try {
|
|
83
|
-
const event = JSON.parse(eventData);
|
|
84
|
-
this.handleSSEEvent(event);
|
|
66
|
+
try {
|
|
67
|
+
while (!this.stopped) {
|
|
68
|
+
const { done, value } = await reader.read();
|
|
69
|
+
if (done)
|
|
70
|
+
break;
|
|
71
|
+
// Reset watchdog on any data (including keepalive comments)
|
|
72
|
+
clearTimeout(watchdog);
|
|
73
|
+
watchdog = this.resetWatchdog();
|
|
74
|
+
buffer += decoder.decode(value, { stream: true });
|
|
75
|
+
const lines = buffer.split("\n");
|
|
76
|
+
buffer = lines.pop() ?? "";
|
|
77
|
+
let eventData = "";
|
|
78
|
+
for (const line of lines) {
|
|
79
|
+
if (line.startsWith("data: ")) {
|
|
80
|
+
eventData += (eventData ? "\n" : "") + line.slice(6);
|
|
85
81
|
}
|
|
86
|
-
|
|
87
|
-
|
|
82
|
+
else if (line === "" && eventData) {
|
|
83
|
+
try {
|
|
84
|
+
const event = JSON.parse(eventData);
|
|
85
|
+
this.handleSSEEvent(event);
|
|
86
|
+
}
|
|
87
|
+
catch {
|
|
88
|
+
// Malformed event
|
|
89
|
+
}
|
|
90
|
+
eventData = "";
|
|
88
91
|
}
|
|
89
|
-
eventData = "";
|
|
90
92
|
}
|
|
91
93
|
}
|
|
92
94
|
}
|
|
93
|
-
|
|
95
|
+
finally {
|
|
96
|
+
// Always clear watchdog — prevents leaked timer from aborting next connection
|
|
97
|
+
clearTimeout(watchdog);
|
|
98
|
+
}
|
|
94
99
|
}
|
|
95
100
|
catch (err) {
|
|
96
101
|
if (this.stopped)
|
|
@@ -104,7 +109,7 @@ export class PromptListener {
|
|
|
104
109
|
}
|
|
105
110
|
resetWatchdog() {
|
|
106
111
|
return setTimeout(() => {
|
|
107
|
-
this.log("[sse] Watchdog timeout (
|
|
112
|
+
this.log("[sse] Watchdog timeout (120s no data). Reconnecting...");
|
|
108
113
|
this.sseAbort?.abort();
|
|
109
114
|
}, SSE_WATCHDOG_TIMEOUT);
|
|
110
115
|
}
|
|
@@ -123,11 +128,24 @@ export class PromptListener {
|
|
|
123
128
|
startPolling() {
|
|
124
129
|
if (this.pollTimer)
|
|
125
130
|
return;
|
|
126
|
-
this.
|
|
131
|
+
this.schedulePoll();
|
|
132
|
+
}
|
|
133
|
+
/** Recursive setTimeout: waits for fetch to complete before scheduling next poll. */
|
|
134
|
+
schedulePoll() {
|
|
135
|
+
if (this.pollTimer)
|
|
136
|
+
return;
|
|
137
|
+
this.pollTimer = setTimeout(async () => {
|
|
138
|
+
this.pollTimer = null;
|
|
139
|
+
await this.poll();
|
|
140
|
+
// Schedule next poll only if SSE is still disconnected
|
|
141
|
+
if (!this.sseConnected && !this.stopped) {
|
|
142
|
+
this.schedulePoll();
|
|
143
|
+
}
|
|
144
|
+
}, POLL_INTERVAL);
|
|
127
145
|
}
|
|
128
146
|
stopPolling() {
|
|
129
147
|
if (this.pollTimer) {
|
|
130
|
-
|
|
148
|
+
clearTimeout(this.pollTimer);
|
|
131
149
|
this.pollTimer = null;
|
|
132
150
|
}
|
|
133
151
|
}
|
package/dist/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
1
|
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
-
export declare const PACKAGE_VERSION = "0.
|
|
2
|
+
export declare const PACKAGE_VERSION = "0.23.0";
|
|
3
3
|
export declare function createServer(deviceName?: string): McpServer;
|
|
4
4
|
export declare function startStdioServer(deviceName?: string): Promise<void>;
|
package/dist/index.js
CHANGED
|
@@ -5,7 +5,7 @@ import { controlSchema, screenshotSchema, pairSchema } from "./tools/schemas.js"
|
|
|
5
5
|
import { executeControl } from "./tools/control.js";
|
|
6
6
|
import { handleScreenshot } from "./tools/screenshot.js";
|
|
7
7
|
import { handlePair } from "./tools/pair.js";
|
|
8
|
-
export const PACKAGE_VERSION = "0.
|
|
8
|
+
export const PACKAGE_VERSION = "0.23.0";
|
|
9
9
|
export function createServer(deviceName) {
|
|
10
10
|
const server = new McpServer({
|
|
11
11
|
name: "zhihand",
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@zhihand/mcp",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.23.0",
|
|
4
4
|
"private": false,
|
|
5
5
|
"type": "module",
|
|
6
6
|
"description": "ZhiHand MCP Server — phone control tools for Claude Code, Codex, Gemini CLI, and OpenClaw",
|
|
@@ -22,7 +22,8 @@
|
|
|
22
22
|
"files": [
|
|
23
23
|
"README.md",
|
|
24
24
|
"bin/",
|
|
25
|
-
"dist/"
|
|
25
|
+
"dist/",
|
|
26
|
+
"scripts/"
|
|
26
27
|
],
|
|
27
28
|
"publishConfig": {
|
|
28
29
|
"access": "public"
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Thin PTY wrapper — runs argv[1:] inside a pseudo-terminal so that tools
|
|
3
|
+
requiring isatty(stdin)==True (e.g. ``gemini -i``) work from a daemon.
|
|
4
|
+
|
|
5
|
+
Output is forwarded to this process's stdout in real time.
|
|
6
|
+
Stdin from the parent process is forwarded to the child's PTY input,
|
|
7
|
+
enabling persistent interactive sessions (send new prompts after startup).
|
|
8
|
+
Exit code matches the child's exit code.
|
|
9
|
+
|
|
10
|
+
Signals (SIGTERM, SIGINT) are forwarded to the child process group so that
|
|
11
|
+
killing this wrapper also kills the tool underneath — no orphaned processes.
|
|
12
|
+
|
|
13
|
+
Usage: python3 pty-wrap.py gemini --approval-mode yolo --model flash -i "prompt"
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import os
|
|
17
|
+
import pty
|
|
18
|
+
import select
|
|
19
|
+
import signal
|
|
20
|
+
import subprocess
|
|
21
|
+
import sys
|
|
22
|
+
import time
|
|
23
|
+
|
|
24
|
+
SHUTDOWN_GRACE_SECONDS = 3
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def main() -> int:
|
|
28
|
+
if len(sys.argv) < 2:
|
|
29
|
+
sys.stderr.write("usage: pty-wrap.py COMMAND [ARGS...]\n")
|
|
30
|
+
return 1
|
|
31
|
+
|
|
32
|
+
master_fd, slave_fd = pty.openpty()
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
proc = subprocess.Popen(
|
|
36
|
+
sys.argv[1:],
|
|
37
|
+
stdin=slave_fd,
|
|
38
|
+
stdout=slave_fd,
|
|
39
|
+
stderr=slave_fd,
|
|
40
|
+
start_new_session=True,
|
|
41
|
+
close_fds=True,
|
|
42
|
+
)
|
|
43
|
+
except OSError as exc:
|
|
44
|
+
os.close(master_fd)
|
|
45
|
+
os.close(slave_fd)
|
|
46
|
+
sys.stderr.write(f"pty-wrap: exec failed: {exc}\n")
|
|
47
|
+
return 127
|
|
48
|
+
|
|
49
|
+
os.close(slave_fd)
|
|
50
|
+
os.set_blocking(master_fd, False)
|
|
51
|
+
|
|
52
|
+
# Set up stdin forwarding (parent → PTY master → child stdin)
|
|
53
|
+
stdin_fd = sys.stdin.fileno()
|
|
54
|
+
stdin_open = True
|
|
55
|
+
try:
|
|
56
|
+
os.set_blocking(stdin_fd, False)
|
|
57
|
+
except OSError:
|
|
58
|
+
stdin_open = False
|
|
59
|
+
|
|
60
|
+
# Forward SIGTERM/SIGINT to the child's process group
|
|
61
|
+
def _forward_signal(signum: int, _frame: object) -> None:
|
|
62
|
+
try:
|
|
63
|
+
os.killpg(proc.pid, signum)
|
|
64
|
+
except OSError:
|
|
65
|
+
pass
|
|
66
|
+
|
|
67
|
+
signal.signal(signal.SIGTERM, _forward_signal)
|
|
68
|
+
signal.signal(signal.SIGINT, _forward_signal)
|
|
69
|
+
|
|
70
|
+
# Drain PTY master while child is alive, forward stdin to child
|
|
71
|
+
while proc.poll() is None:
|
|
72
|
+
fds = [master_fd]
|
|
73
|
+
if stdin_open:
|
|
74
|
+
fds.append(stdin_fd)
|
|
75
|
+
try:
|
|
76
|
+
ready, _, _ = select.select(fds, [], [], 1.0)
|
|
77
|
+
except (OSError, InterruptedError):
|
|
78
|
+
break
|
|
79
|
+
|
|
80
|
+
if master_fd in ready:
|
|
81
|
+
try:
|
|
82
|
+
data = os.read(master_fd, 8192)
|
|
83
|
+
if data:
|
|
84
|
+
sys.stdout.buffer.write(data)
|
|
85
|
+
sys.stdout.buffer.flush()
|
|
86
|
+
except OSError:
|
|
87
|
+
break
|
|
88
|
+
|
|
89
|
+
if stdin_open and stdin_fd in ready:
|
|
90
|
+
try:
|
|
91
|
+
data = os.read(stdin_fd, 8192)
|
|
92
|
+
if data:
|
|
93
|
+
# Write all bytes, handling partial writes
|
|
94
|
+
offset = 0
|
|
95
|
+
while offset < len(data):
|
|
96
|
+
try:
|
|
97
|
+
written = os.write(master_fd, data[offset:])
|
|
98
|
+
offset += written
|
|
99
|
+
except BlockingIOError:
|
|
100
|
+
# PTY buffer full — wait briefly and retry
|
|
101
|
+
time.sleep(0.01)
|
|
102
|
+
else:
|
|
103
|
+
stdin_open = False # EOF on stdin
|
|
104
|
+
except BlockingIOError:
|
|
105
|
+
# read() got EAGAIN — no data yet, not an error
|
|
106
|
+
pass
|
|
107
|
+
except OSError:
|
|
108
|
+
stdin_open = False
|
|
109
|
+
|
|
110
|
+
# Final drain after child exits
|
|
111
|
+
try:
|
|
112
|
+
while True:
|
|
113
|
+
data = os.read(master_fd, 8192)
|
|
114
|
+
if not data:
|
|
115
|
+
break
|
|
116
|
+
sys.stdout.buffer.write(data)
|
|
117
|
+
sys.stdout.buffer.flush()
|
|
118
|
+
except OSError:
|
|
119
|
+
pass
|
|
120
|
+
|
|
121
|
+
os.close(master_fd)
|
|
122
|
+
|
|
123
|
+
# Ensure the entire process group is dead
|
|
124
|
+
if proc.poll() is None:
|
|
125
|
+
try:
|
|
126
|
+
os.killpg(proc.pid, signal.SIGTERM)
|
|
127
|
+
except OSError:
|
|
128
|
+
pass
|
|
129
|
+
deadline = time.monotonic() + SHUTDOWN_GRACE_SECONDS
|
|
130
|
+
while proc.poll() is None and time.monotonic() < deadline:
|
|
131
|
+
time.sleep(0.1)
|
|
132
|
+
if proc.poll() is None:
|
|
133
|
+
try:
|
|
134
|
+
os.killpg(proc.pid, signal.SIGKILL)
|
|
135
|
+
except OSError:
|
|
136
|
+
pass
|
|
137
|
+
proc.wait(timeout=2)
|
|
138
|
+
|
|
139
|
+
return proc.returncode or 0
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
if __name__ == "__main__":
|
|
143
|
+
sys.exit(main())
|