episoda 0.2.16 → 0.2.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/daemon/daemon-process.js +2319 -185
- package/dist/daemon/daemon-process.js.map +1 -1
- package/dist/hooks/post-commit +137 -0
- package/dist/index.js +337 -715
- package/dist/index.js.map +1 -1
- package/package.json +4 -1
|
@@ -6,9 +6,16 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
|
6
6
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
7
7
|
var __getProtoOf = Object.getPrototypeOf;
|
|
8
8
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
9
|
+
var __esm = (fn, res) => function __init() {
|
|
10
|
+
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
|
|
11
|
+
};
|
|
9
12
|
var __commonJS = (cb, mod) => function __require() {
|
|
10
13
|
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
11
14
|
};
|
|
15
|
+
var __export = (target, all) => {
|
|
16
|
+
for (var name in all)
|
|
17
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
18
|
+
};
|
|
12
19
|
var __copyProps = (to, from, except, desc) => {
|
|
13
20
|
if (from && typeof from === "object" || typeof from === "function") {
|
|
14
21
|
for (let key of __getOwnPropNames(from))
|
|
@@ -339,6 +346,9 @@ var require_git_executor = __commonJS({
|
|
|
339
346
|
return await this.executeBranchExists(command, cwd, options);
|
|
340
347
|
case "branch_has_commits":
|
|
341
348
|
return await this.executeBranchHasCommits(command, cwd, options);
|
|
349
|
+
// EP831: Find branch by prefix pattern
|
|
350
|
+
case "find_branch_by_prefix":
|
|
351
|
+
return await this.executeFindBranchByPrefix(command, cwd, options);
|
|
342
352
|
// EP598: Main branch check for production
|
|
343
353
|
case "main_branch_check":
|
|
344
354
|
return await this.executeMainBranchCheck(cwd, options);
|
|
@@ -684,6 +694,32 @@ var require_git_executor = __commonJS({
|
|
|
684
694
|
/**
|
|
685
695
|
* EP598: Execute main branch check - returns current branch, uncommitted files, and unpushed commits
|
|
686
696
|
*/
|
|
697
|
+
/**
|
|
698
|
+
* EP831: Find branch by prefix pattern
|
|
699
|
+
* Searches local and remote branches for one matching the prefix
|
|
700
|
+
*/
|
|
701
|
+
async executeFindBranchByPrefix(command, cwd, options) {
|
|
702
|
+
try {
|
|
703
|
+
const { stdout } = await execAsync("git branch -a", { cwd, timeout: options?.timeout || 1e4 });
|
|
704
|
+
const prefix = command.prefix;
|
|
705
|
+
const branches = stdout.split("\n").map((line) => line.replace(/^[\s*]*/, "").replace("remotes/origin/", "").trim()).filter((branch) => branch && !branch.includes("->"));
|
|
706
|
+
const matchingBranch = branches.find((branch) => branch.startsWith(prefix));
|
|
707
|
+
return {
|
|
708
|
+
success: true,
|
|
709
|
+
output: matchingBranch || "",
|
|
710
|
+
details: {
|
|
711
|
+
branchName: matchingBranch || void 0,
|
|
712
|
+
branchExists: !!matchingBranch
|
|
713
|
+
}
|
|
714
|
+
};
|
|
715
|
+
} catch (error) {
|
|
716
|
+
return {
|
|
717
|
+
success: false,
|
|
718
|
+
error: "UNKNOWN_ERROR",
|
|
719
|
+
output: error.message || "Failed to find branch"
|
|
720
|
+
};
|
|
721
|
+
}
|
|
722
|
+
}
|
|
687
723
|
async executeMainBranchCheck(cwd, options) {
|
|
688
724
|
try {
|
|
689
725
|
let currentBranch = "";
|
|
@@ -1489,15 +1525,15 @@ var require_git_executor = __commonJS({
|
|
|
1489
1525
|
try {
|
|
1490
1526
|
const { stdout: gitDir } = await execAsync("git rev-parse --git-dir", { cwd, timeout: 5e3 });
|
|
1491
1527
|
const gitDirPath = gitDir.trim();
|
|
1492
|
-
const
|
|
1528
|
+
const fs12 = await Promise.resolve().then(() => __importStar(require("fs"))).then((m) => m.promises);
|
|
1493
1529
|
const rebaseMergePath = `${gitDirPath}/rebase-merge`;
|
|
1494
1530
|
const rebaseApplyPath = `${gitDirPath}/rebase-apply`;
|
|
1495
1531
|
try {
|
|
1496
|
-
await
|
|
1532
|
+
await fs12.access(rebaseMergePath);
|
|
1497
1533
|
inRebase = true;
|
|
1498
1534
|
} catch {
|
|
1499
1535
|
try {
|
|
1500
|
-
await
|
|
1536
|
+
await fs12.access(rebaseApplyPath);
|
|
1501
1537
|
inRebase = true;
|
|
1502
1538
|
} catch {
|
|
1503
1539
|
inRebase = false;
|
|
@@ -1675,14 +1711,9 @@ var require_websocket_client = __commonJS({
|
|
|
1675
1711
|
var https_1 = __importDefault(require("https"));
|
|
1676
1712
|
var version_1 = require_version();
|
|
1677
1713
|
var ipv4Agent = new https_1.default.Agent({ family: 4 });
|
|
1678
|
-
var INITIAL_RECONNECT_DELAY = 1e3;
|
|
1679
|
-
var MAX_RECONNECT_DELAY = 6e4;
|
|
1680
|
-
var IDLE_RECONNECT_DELAY = 6e5;
|
|
1681
1714
|
var MAX_RETRY_DURATION = 6 * 60 * 60 * 1e3;
|
|
1682
1715
|
var IDLE_THRESHOLD = 60 * 60 * 1e3;
|
|
1683
|
-
var
|
|
1684
|
-
var RAPID_CLOSE_BACKOFF = 3e4;
|
|
1685
|
-
var CLIENT_HEARTBEAT_INTERVAL = 45e3;
|
|
1716
|
+
var CLIENT_HEARTBEAT_INTERVAL = 2e4;
|
|
1686
1717
|
var CLIENT_HEARTBEAT_TIMEOUT = 15e3;
|
|
1687
1718
|
var CONNECTION_TIMEOUT = 15e3;
|
|
1688
1719
|
var EpisodaClient2 = class {
|
|
@@ -1941,17 +1972,18 @@ var require_websocket_client = __commonJS({
|
|
|
1941
1972
|
});
|
|
1942
1973
|
}
|
|
1943
1974
|
/**
|
|
1944
|
-
* Schedule reconnection with
|
|
1975
|
+
* Schedule reconnection with simplified retry logic
|
|
1976
|
+
*
|
|
1977
|
+
* EP843: Simplified from complex exponential backoff to fast-fail approach
|
|
1945
1978
|
*
|
|
1946
|
-
*
|
|
1947
|
-
* -
|
|
1948
|
-
* -
|
|
1949
|
-
* -
|
|
1950
|
-
* -
|
|
1979
|
+
* Strategy:
|
|
1980
|
+
* - For graceful shutdown (server restart): Quick retry (500ms, 1s, 2s) up to 3 attempts
|
|
1981
|
+
* - For other disconnects: 1 retry after 1 second, then stop
|
|
1982
|
+
* - Always respect rate limits from server
|
|
1983
|
+
* - Surface errors quickly so user can take action
|
|
1951
1984
|
*
|
|
1952
|
-
*
|
|
1953
|
-
*
|
|
1954
|
-
* - Rapid close detection: if connection closes within 2s, apply longer backoff
|
|
1985
|
+
* This replaces the previous 6-hour retry with exponential backoff,
|
|
1986
|
+
* which masked problems and delayed error visibility.
|
|
1955
1987
|
*/
|
|
1956
1988
|
scheduleReconnect() {
|
|
1957
1989
|
if (this.isIntentionalDisconnect) {
|
|
@@ -1970,18 +2002,6 @@ var require_websocket_client = __commonJS({
|
|
|
1970
2002
|
clearTimeout(this.heartbeatTimeoutTimer);
|
|
1971
2003
|
this.heartbeatTimeoutTimer = void 0;
|
|
1972
2004
|
}
|
|
1973
|
-
if (!this.firstDisconnectTime) {
|
|
1974
|
-
this.firstDisconnectTime = Date.now();
|
|
1975
|
-
}
|
|
1976
|
-
const retryDuration = Date.now() - this.firstDisconnectTime;
|
|
1977
|
-
if (retryDuration >= MAX_RETRY_DURATION) {
|
|
1978
|
-
console.error(`[EpisodaClient] Maximum retry duration (6 hours) exceeded, giving up. Please restart the CLI.`);
|
|
1979
|
-
return;
|
|
1980
|
-
}
|
|
1981
|
-
if (this.reconnectAttempts > 0 && this.reconnectAttempts % 10 === 0) {
|
|
1982
|
-
const hoursRemaining = ((MAX_RETRY_DURATION - retryDuration) / (60 * 60 * 1e3)).toFixed(1);
|
|
1983
|
-
console.log(`[EpisodaClient] Still attempting to reconnect (attempt ${this.reconnectAttempts}, ${hoursRemaining}h remaining)...`);
|
|
1984
|
-
}
|
|
1985
2005
|
if (this.rateLimitBackoffUntil && Date.now() < this.rateLimitBackoffUntil) {
|
|
1986
2006
|
const waitTime = this.rateLimitBackoffUntil - Date.now();
|
|
1987
2007
|
console.log(`[EpisodaClient] Rate limited, waiting ${Math.round(waitTime / 1e3)}s before retry`);
|
|
@@ -1992,32 +2012,35 @@ var require_websocket_client = __commonJS({
|
|
|
1992
2012
|
}, waitTime);
|
|
1993
2013
|
return;
|
|
1994
2014
|
}
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
|
|
2005
|
-
} else if (isIdle) {
|
|
2006
|
-
baseDelay = IDLE_RECONNECT_DELAY;
|
|
2015
|
+
let delay;
|
|
2016
|
+
let shouldRetry = true;
|
|
2017
|
+
if (this.isGracefulShutdown) {
|
|
2018
|
+
if (this.reconnectAttempts >= 7) {
|
|
2019
|
+
console.error('[EpisodaClient] Server restart reconnection failed after 7 attempts. Run "episoda dev" to reconnect.');
|
|
2020
|
+
shouldRetry = false;
|
|
2021
|
+
} else {
|
|
2022
|
+
delay = Math.min(500 * Math.pow(2, this.reconnectAttempts), 5e3);
|
|
2023
|
+
console.log(`[EpisodaClient] Server restarting, reconnecting in ${delay}ms (attempt ${this.reconnectAttempts + 1}/7)`);
|
|
2024
|
+
}
|
|
2007
2025
|
} else {
|
|
2008
|
-
|
|
2009
|
-
|
|
2026
|
+
if (this.reconnectAttempts >= 1) {
|
|
2027
|
+
console.error('[EpisodaClient] Connection lost. Retry failed. Check server status or restart with "episoda dev".');
|
|
2028
|
+
shouldRetry = false;
|
|
2029
|
+
} else {
|
|
2030
|
+
delay = 1e3;
|
|
2031
|
+
console.log("[EpisodaClient] Connection lost, retrying in 1 second...");
|
|
2032
|
+
}
|
|
2010
2033
|
}
|
|
2011
|
-
|
|
2012
|
-
|
|
2013
|
-
|
|
2014
|
-
|
|
2015
|
-
|
|
2016
|
-
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
console.log(`[EpisodaClient] Reconnecting in ${Math.round(delay / 1e3)}s (attempt ${this.reconnectAttempts}, ${shutdownType}${idleStatus}${rapidStatus})`);
|
|
2034
|
+
if (!shouldRetry) {
|
|
2035
|
+
this.emit({
|
|
2036
|
+
type: "disconnected",
|
|
2037
|
+
code: 1006,
|
|
2038
|
+
reason: "Reconnection attempts exhausted",
|
|
2039
|
+
willReconnect: false
|
|
2040
|
+
});
|
|
2041
|
+
return;
|
|
2020
2042
|
}
|
|
2043
|
+
this.reconnectAttempts++;
|
|
2021
2044
|
this.reconnectTimeout = setTimeout(() => {
|
|
2022
2045
|
console.log("[EpisodaClient] Attempting reconnection...");
|
|
2023
2046
|
this.connect(this.url, this.token, this.machineId, {
|
|
@@ -2026,13 +2049,13 @@ var require_websocket_client = __commonJS({
|
|
|
2026
2049
|
osArch: this.osArch,
|
|
2027
2050
|
daemonPid: this.daemonPid
|
|
2028
2051
|
}).then(() => {
|
|
2029
|
-
console.log("[EpisodaClient] Reconnection successful
|
|
2052
|
+
console.log("[EpisodaClient] Reconnection successful");
|
|
2030
2053
|
this.reconnectAttempts = 0;
|
|
2031
2054
|
this.isGracefulShutdown = false;
|
|
2032
2055
|
this.firstDisconnectTime = void 0;
|
|
2033
2056
|
this.rateLimitBackoffUntil = void 0;
|
|
2034
2057
|
}).catch((error) => {
|
|
2035
|
-
console.error("[EpisodaClient] Reconnection failed:", error);
|
|
2058
|
+
console.error("[EpisodaClient] Reconnection failed:", error.message);
|
|
2036
2059
|
});
|
|
2037
2060
|
}, delay);
|
|
2038
2061
|
}
|
|
@@ -2113,36 +2136,36 @@ var require_auth = __commonJS({
|
|
|
2113
2136
|
};
|
|
2114
2137
|
})();
|
|
2115
2138
|
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
2116
|
-
exports2.getConfigDir =
|
|
2139
|
+
exports2.getConfigDir = getConfigDir6;
|
|
2117
2140
|
exports2.getConfigPath = getConfigPath;
|
|
2118
|
-
exports2.loadConfig =
|
|
2141
|
+
exports2.loadConfig = loadConfig3;
|
|
2119
2142
|
exports2.saveConfig = saveConfig2;
|
|
2120
2143
|
exports2.validateToken = validateToken;
|
|
2121
|
-
var
|
|
2122
|
-
var
|
|
2123
|
-
var
|
|
2144
|
+
var fs12 = __importStar(require("fs"));
|
|
2145
|
+
var path13 = __importStar(require("path"));
|
|
2146
|
+
var os5 = __importStar(require("os"));
|
|
2124
2147
|
var child_process_1 = require("child_process");
|
|
2125
2148
|
var DEFAULT_CONFIG_FILE = "config.json";
|
|
2126
|
-
function
|
|
2127
|
-
return process.env.EPISODA_CONFIG_DIR ||
|
|
2149
|
+
function getConfigDir6() {
|
|
2150
|
+
return process.env.EPISODA_CONFIG_DIR || path13.join(os5.homedir(), ".episoda");
|
|
2128
2151
|
}
|
|
2129
2152
|
function getConfigPath(configPath) {
|
|
2130
2153
|
if (configPath) {
|
|
2131
2154
|
return configPath;
|
|
2132
2155
|
}
|
|
2133
|
-
return
|
|
2156
|
+
return path13.join(getConfigDir6(), DEFAULT_CONFIG_FILE);
|
|
2134
2157
|
}
|
|
2135
2158
|
function ensureConfigDir(configPath) {
|
|
2136
|
-
const dir =
|
|
2137
|
-
const isNew = !
|
|
2159
|
+
const dir = path13.dirname(configPath);
|
|
2160
|
+
const isNew = !fs12.existsSync(dir);
|
|
2138
2161
|
if (isNew) {
|
|
2139
|
-
|
|
2162
|
+
fs12.mkdirSync(dir, { recursive: true, mode: 448 });
|
|
2140
2163
|
}
|
|
2141
2164
|
if (process.platform === "darwin") {
|
|
2142
|
-
const nosyncPath =
|
|
2143
|
-
if (isNew || !
|
|
2165
|
+
const nosyncPath = path13.join(dir, ".nosync");
|
|
2166
|
+
if (isNew || !fs12.existsSync(nosyncPath)) {
|
|
2144
2167
|
try {
|
|
2145
|
-
|
|
2168
|
+
fs12.writeFileSync(nosyncPath, "", { mode: 384 });
|
|
2146
2169
|
(0, child_process_1.execSync)(`xattr -w com.apple.fileprovider.ignore 1 "${dir}"`, {
|
|
2147
2170
|
stdio: "ignore",
|
|
2148
2171
|
timeout: 5e3
|
|
@@ -2152,13 +2175,13 @@ var require_auth = __commonJS({
|
|
|
2152
2175
|
}
|
|
2153
2176
|
}
|
|
2154
2177
|
}
|
|
2155
|
-
async function
|
|
2178
|
+
async function loadConfig3(configPath) {
|
|
2156
2179
|
const fullPath = getConfigPath(configPath);
|
|
2157
|
-
if (!
|
|
2180
|
+
if (!fs12.existsSync(fullPath)) {
|
|
2158
2181
|
return null;
|
|
2159
2182
|
}
|
|
2160
2183
|
try {
|
|
2161
|
-
const content =
|
|
2184
|
+
const content = fs12.readFileSync(fullPath, "utf8");
|
|
2162
2185
|
const config = JSON.parse(content);
|
|
2163
2186
|
return config;
|
|
2164
2187
|
} catch (error) {
|
|
@@ -2171,7 +2194,7 @@ var require_auth = __commonJS({
|
|
|
2171
2194
|
ensureConfigDir(fullPath);
|
|
2172
2195
|
try {
|
|
2173
2196
|
const content = JSON.stringify(config, null, 2);
|
|
2174
|
-
|
|
2197
|
+
fs12.writeFileSync(fullPath, content, { mode: 384 });
|
|
2175
2198
|
} catch (error) {
|
|
2176
2199
|
throw new Error(`Failed to save config: ${error instanceof Error ? error.message : String(error)}`);
|
|
2177
2200
|
}
|
|
@@ -2275,12 +2298,49 @@ var require_dist = __commonJS({
|
|
|
2275
2298
|
}
|
|
2276
2299
|
});
|
|
2277
2300
|
|
|
2301
|
+
// src/utils/port-check.ts
|
|
2302
|
+
var port_check_exports = {};
|
|
2303
|
+
__export(port_check_exports, {
|
|
2304
|
+
getServerPort: () => getServerPort,
|
|
2305
|
+
isPortInUse: () => isPortInUse
|
|
2306
|
+
});
|
|
2307
|
+
async function isPortInUse(port) {
|
|
2308
|
+
return new Promise((resolve2) => {
|
|
2309
|
+
const server = net2.createServer();
|
|
2310
|
+
server.once("error", (err) => {
|
|
2311
|
+
if (err.code === "EADDRINUSE") {
|
|
2312
|
+
resolve2(true);
|
|
2313
|
+
} else {
|
|
2314
|
+
resolve2(false);
|
|
2315
|
+
}
|
|
2316
|
+
});
|
|
2317
|
+
server.once("listening", () => {
|
|
2318
|
+
server.close();
|
|
2319
|
+
resolve2(false);
|
|
2320
|
+
});
|
|
2321
|
+
server.listen(port);
|
|
2322
|
+
});
|
|
2323
|
+
}
|
|
2324
|
+
function getServerPort() {
|
|
2325
|
+
if (process.env.PORT) {
|
|
2326
|
+
return parseInt(process.env.PORT, 10);
|
|
2327
|
+
}
|
|
2328
|
+
return 3e3;
|
|
2329
|
+
}
|
|
2330
|
+
var net2;
|
|
2331
|
+
var init_port_check = __esm({
|
|
2332
|
+
"src/utils/port-check.ts"() {
|
|
2333
|
+
"use strict";
|
|
2334
|
+
net2 = __toESM(require("net"));
|
|
2335
|
+
}
|
|
2336
|
+
});
|
|
2337
|
+
|
|
2278
2338
|
// package.json
|
|
2279
2339
|
var require_package = __commonJS({
|
|
2280
2340
|
"package.json"(exports2, module2) {
|
|
2281
2341
|
module2.exports = {
|
|
2282
2342
|
name: "episoda",
|
|
2283
|
-
version: "0.2.
|
|
2343
|
+
version: "0.2.18",
|
|
2284
2344
|
description: "CLI tool for Episoda local development workflow orchestration",
|
|
2285
2345
|
main: "dist/index.js",
|
|
2286
2346
|
types: "dist/index.d.ts",
|
|
@@ -2311,6 +2371,9 @@ var require_package = __commonJS({
|
|
|
2311
2371
|
ws: "^8.18.0",
|
|
2312
2372
|
zod: "^4.0.10"
|
|
2313
2373
|
},
|
|
2374
|
+
optionalDependencies: {
|
|
2375
|
+
"@anthropic-ai/claude-code": "^1.0.0"
|
|
2376
|
+
},
|
|
2314
2377
|
devDependencies: {
|
|
2315
2378
|
"@episoda/core": "*",
|
|
2316
2379
|
"@types/node": "^20.11.24",
|
|
@@ -2648,7 +2711,7 @@ var IPCServer = class {
|
|
|
2648
2711
|
};
|
|
2649
2712
|
|
|
2650
2713
|
// src/daemon/daemon-process.ts
|
|
2651
|
-
var
|
|
2714
|
+
var import_core7 = __toESM(require_dist());
|
|
2652
2715
|
|
|
2653
2716
|
// src/utils/update-checker.ts
|
|
2654
2717
|
var import_child_process2 = require("child_process");
|
|
@@ -3070,6 +3133,160 @@ async function grepDirectoryRecursive(basePath, currentPath, searchPattern, file
|
|
|
3070
3133
|
}
|
|
3071
3134
|
}
|
|
3072
3135
|
}
|
|
3136
|
+
async function handleFileEdit(command, projectPath) {
|
|
3137
|
+
const { path: filePath, oldString, newString, replaceAll = false } = command;
|
|
3138
|
+
const validPath = validatePath(filePath, projectPath);
|
|
3139
|
+
if (!validPath) {
|
|
3140
|
+
return {
|
|
3141
|
+
success: false,
|
|
3142
|
+
error: "Invalid path: directory traversal not allowed"
|
|
3143
|
+
};
|
|
3144
|
+
}
|
|
3145
|
+
try {
|
|
3146
|
+
if (!fs4.existsSync(validPath)) {
|
|
3147
|
+
return {
|
|
3148
|
+
success: false,
|
|
3149
|
+
error: "File not found"
|
|
3150
|
+
};
|
|
3151
|
+
}
|
|
3152
|
+
const stats = fs4.statSync(validPath);
|
|
3153
|
+
if (stats.isDirectory()) {
|
|
3154
|
+
return {
|
|
3155
|
+
success: false,
|
|
3156
|
+
error: "Path is a directory, not a file"
|
|
3157
|
+
};
|
|
3158
|
+
}
|
|
3159
|
+
const MAX_EDIT_SIZE = 10 * 1024 * 1024;
|
|
3160
|
+
if (stats.size > MAX_EDIT_SIZE) {
|
|
3161
|
+
return {
|
|
3162
|
+
success: false,
|
|
3163
|
+
error: `File too large for edit operation: ${stats.size} bytes exceeds limit of ${MAX_EDIT_SIZE} bytes (10MB). Use write-file for large files.`
|
|
3164
|
+
};
|
|
3165
|
+
}
|
|
3166
|
+
const content = fs4.readFileSync(validPath, "utf8");
|
|
3167
|
+
const occurrences = content.split(oldString).length - 1;
|
|
3168
|
+
if (occurrences === 0) {
|
|
3169
|
+
return {
|
|
3170
|
+
success: false,
|
|
3171
|
+
error: "old_string not found in file. Make sure it matches exactly, including whitespace."
|
|
3172
|
+
};
|
|
3173
|
+
}
|
|
3174
|
+
if (occurrences > 1 && !replaceAll) {
|
|
3175
|
+
return {
|
|
3176
|
+
success: false,
|
|
3177
|
+
error: `old_string found ${occurrences} times. Use replaceAll=true to replace all, or provide more context.`
|
|
3178
|
+
};
|
|
3179
|
+
}
|
|
3180
|
+
const newContent = replaceAll ? content.split(oldString).join(newString) : content.replace(oldString, newString);
|
|
3181
|
+
const replacements = replaceAll ? occurrences : 1;
|
|
3182
|
+
const tempPath = `${validPath}.tmp.${Date.now()}`;
|
|
3183
|
+
fs4.writeFileSync(tempPath, newContent, "utf8");
|
|
3184
|
+
fs4.renameSync(tempPath, validPath);
|
|
3185
|
+
const newSize = Buffer.byteLength(newContent, "utf8");
|
|
3186
|
+
return {
|
|
3187
|
+
success: true,
|
|
3188
|
+
replacements,
|
|
3189
|
+
newSize
|
|
3190
|
+
};
|
|
3191
|
+
} catch (error) {
|
|
3192
|
+
const errMsg = error instanceof Error ? error.message : String(error);
|
|
3193
|
+
const isPermissionError = errMsg.includes("EACCES") || errMsg.includes("permission");
|
|
3194
|
+
return {
|
|
3195
|
+
success: false,
|
|
3196
|
+
error: isPermissionError ? "Permission denied" : "Failed to edit file"
|
|
3197
|
+
};
|
|
3198
|
+
}
|
|
3199
|
+
}
|
|
3200
|
+
async function handleFileDelete(command, projectPath) {
|
|
3201
|
+
const { path: filePath, recursive = false } = command;
|
|
3202
|
+
const validPath = validatePath(filePath, projectPath);
|
|
3203
|
+
if (!validPath) {
|
|
3204
|
+
return {
|
|
3205
|
+
success: false,
|
|
3206
|
+
error: "Invalid path: directory traversal not allowed"
|
|
3207
|
+
};
|
|
3208
|
+
}
|
|
3209
|
+
const normalizedProjectPath = path5.resolve(projectPath);
|
|
3210
|
+
if (validPath === normalizedProjectPath) {
|
|
3211
|
+
return {
|
|
3212
|
+
success: false,
|
|
3213
|
+
error: "Cannot delete project root directory"
|
|
3214
|
+
};
|
|
3215
|
+
}
|
|
3216
|
+
try {
|
|
3217
|
+
if (!fs4.existsSync(validPath)) {
|
|
3218
|
+
return {
|
|
3219
|
+
success: false,
|
|
3220
|
+
error: "Path not found"
|
|
3221
|
+
};
|
|
3222
|
+
}
|
|
3223
|
+
const stats = fs4.statSync(validPath);
|
|
3224
|
+
const isDirectory = stats.isDirectory();
|
|
3225
|
+
if (isDirectory && !recursive) {
|
|
3226
|
+
return {
|
|
3227
|
+
success: false,
|
|
3228
|
+
error: "Cannot delete directory without recursive=true"
|
|
3229
|
+
};
|
|
3230
|
+
}
|
|
3231
|
+
if (isDirectory) {
|
|
3232
|
+
fs4.rmSync(validPath, { recursive: true, force: true });
|
|
3233
|
+
} else {
|
|
3234
|
+
fs4.unlinkSync(validPath);
|
|
3235
|
+
}
|
|
3236
|
+
return {
|
|
3237
|
+
success: true,
|
|
3238
|
+
deleted: true,
|
|
3239
|
+
pathType: isDirectory ? "directory" : "file"
|
|
3240
|
+
};
|
|
3241
|
+
} catch (error) {
|
|
3242
|
+
const errMsg = error instanceof Error ? error.message : String(error);
|
|
3243
|
+
const isPermissionError = errMsg.includes("EACCES") || errMsg.includes("permission");
|
|
3244
|
+
return {
|
|
3245
|
+
success: false,
|
|
3246
|
+
error: isPermissionError ? "Permission denied" : "Failed to delete"
|
|
3247
|
+
};
|
|
3248
|
+
}
|
|
3249
|
+
}
|
|
3250
|
+
async function handleFileMkdir(command, projectPath) {
|
|
3251
|
+
const { path: dirPath, mode = "0755" } = command;
|
|
3252
|
+
const validPath = validatePath(dirPath, projectPath);
|
|
3253
|
+
if (!validPath) {
|
|
3254
|
+
return {
|
|
3255
|
+
success: false,
|
|
3256
|
+
error: "Invalid path: directory traversal not allowed"
|
|
3257
|
+
};
|
|
3258
|
+
}
|
|
3259
|
+
try {
|
|
3260
|
+
if (fs4.existsSync(validPath)) {
|
|
3261
|
+
const stats = fs4.statSync(validPath);
|
|
3262
|
+
if (stats.isDirectory()) {
|
|
3263
|
+
return {
|
|
3264
|
+
success: true,
|
|
3265
|
+
created: false
|
|
3266
|
+
// Already exists
|
|
3267
|
+
};
|
|
3268
|
+
} else {
|
|
3269
|
+
return {
|
|
3270
|
+
success: false,
|
|
3271
|
+
error: "Path exists but is a file, not a directory"
|
|
3272
|
+
};
|
|
3273
|
+
}
|
|
3274
|
+
}
|
|
3275
|
+
const modeNum = parseInt(mode, 8);
|
|
3276
|
+
fs4.mkdirSync(validPath, { recursive: true, mode: modeNum });
|
|
3277
|
+
return {
|
|
3278
|
+
success: true,
|
|
3279
|
+
created: true
|
|
3280
|
+
};
|
|
3281
|
+
} catch (error) {
|
|
3282
|
+
const errMsg = error instanceof Error ? error.message : String(error);
|
|
3283
|
+
const isPermissionError = errMsg.includes("EACCES") || errMsg.includes("permission");
|
|
3284
|
+
return {
|
|
3285
|
+
success: false,
|
|
3286
|
+
error: isPermissionError ? "Permission denied" : "Failed to create directory"
|
|
3287
|
+
};
|
|
3288
|
+
}
|
|
3289
|
+
}
|
|
3073
3290
|
|
|
3074
3291
|
// src/daemon/handlers/exec-handler.ts
|
|
3075
3292
|
var import_child_process3 = require("child_process");
|
|
@@ -3312,6 +3529,10 @@ async function ensureCloudflared() {
|
|
|
3312
3529
|
// src/tunnel/tunnel-manager.ts
|
|
3313
3530
|
var import_child_process5 = require("child_process");
|
|
3314
3531
|
var import_events = require("events");
|
|
3532
|
+
var fs6 = __toESM(require("fs"));
|
|
3533
|
+
var path7 = __toESM(require("path"));
|
|
3534
|
+
var os2 = __toESM(require("os"));
|
|
3535
|
+
var TUNNEL_PID_DIR = path7.join(os2.homedir(), ".episoda", "tunnels");
|
|
3315
3536
|
var TUNNEL_URL_REGEX = /https:\/\/[a-z0-9-]+\.trycloudflare\.com/i;
|
|
3316
3537
|
var DEFAULT_RECONNECT_CONFIG = {
|
|
3317
3538
|
maxRetries: 5,
|
|
@@ -3324,8 +3545,204 @@ var TunnelManager = class extends import_events.EventEmitter {
|
|
|
3324
3545
|
super();
|
|
3325
3546
|
this.tunnelStates = /* @__PURE__ */ new Map();
|
|
3326
3547
|
this.cloudflaredPath = null;
|
|
3548
|
+
/**
|
|
3549
|
+
* EP877: Mutex locks to prevent concurrent tunnel starts for the same module
|
|
3550
|
+
*/
|
|
3551
|
+
this.startLocks = /* @__PURE__ */ new Map();
|
|
3327
3552
|
this.reconnectConfig = { ...DEFAULT_RECONNECT_CONFIG, ...config };
|
|
3328
3553
|
}
|
|
3554
|
+
/**
|
|
3555
|
+
* EP877: Ensure PID directory exists
|
|
3556
|
+
* EP904: Added proper error handling and logging
|
|
3557
|
+
*/
|
|
3558
|
+
ensurePidDir() {
|
|
3559
|
+
try {
|
|
3560
|
+
if (!fs6.existsSync(TUNNEL_PID_DIR)) {
|
|
3561
|
+
console.log(`[Tunnel] EP904: Creating PID directory: ${TUNNEL_PID_DIR}`);
|
|
3562
|
+
fs6.mkdirSync(TUNNEL_PID_DIR, { recursive: true });
|
|
3563
|
+
console.log(`[Tunnel] EP904: PID directory created successfully`);
|
|
3564
|
+
}
|
|
3565
|
+
} catch (error) {
|
|
3566
|
+
console.error(`[Tunnel] EP904: Failed to create PID directory ${TUNNEL_PID_DIR}:`, error);
|
|
3567
|
+
throw error;
|
|
3568
|
+
}
|
|
3569
|
+
}
|
|
3570
|
+
/**
|
|
3571
|
+
* EP877: Get PID file path for a module
|
|
3572
|
+
*/
|
|
3573
|
+
getPidFilePath(moduleUid) {
|
|
3574
|
+
return path7.join(TUNNEL_PID_DIR, `${moduleUid}.pid`);
|
|
3575
|
+
}
|
|
3576
|
+
/**
|
|
3577
|
+
* EP877: Write PID to file for tracking across restarts
|
|
3578
|
+
* EP904: Enhanced logging and error visibility
|
|
3579
|
+
*/
|
|
3580
|
+
writePidFile(moduleUid, pid) {
|
|
3581
|
+
try {
|
|
3582
|
+
this.ensurePidDir();
|
|
3583
|
+
const pidPath = this.getPidFilePath(moduleUid);
|
|
3584
|
+
fs6.writeFileSync(pidPath, pid.toString(), "utf8");
|
|
3585
|
+
console.log(`[Tunnel] EP904: Wrote PID ${pid} for ${moduleUid} to ${pidPath}`);
|
|
3586
|
+
} catch (error) {
|
|
3587
|
+
console.error(`[Tunnel] EP904: Failed to write PID file for ${moduleUid}:`, error);
|
|
3588
|
+
}
|
|
3589
|
+
}
|
|
3590
|
+
/**
|
|
3591
|
+
* EP877: Read PID from file
|
|
3592
|
+
*/
|
|
3593
|
+
readPidFile(moduleUid) {
|
|
3594
|
+
try {
|
|
3595
|
+
const pidPath = this.getPidFilePath(moduleUid);
|
|
3596
|
+
if (!fs6.existsSync(pidPath)) {
|
|
3597
|
+
return null;
|
|
3598
|
+
}
|
|
3599
|
+
const pid = parseInt(fs6.readFileSync(pidPath, "utf8").trim(), 10);
|
|
3600
|
+
return isNaN(pid) ? null : pid;
|
|
3601
|
+
} catch (error) {
|
|
3602
|
+
return null;
|
|
3603
|
+
}
|
|
3604
|
+
}
|
|
3605
|
+
/**
|
|
3606
|
+
* EP877: Remove PID file
|
|
3607
|
+
*/
|
|
3608
|
+
removePidFile(moduleUid) {
|
|
3609
|
+
try {
|
|
3610
|
+
const pidPath = this.getPidFilePath(moduleUid);
|
|
3611
|
+
if (fs6.existsSync(pidPath)) {
|
|
3612
|
+
fs6.unlinkSync(pidPath);
|
|
3613
|
+
console.log(`[Tunnel] EP877: Removed PID file for ${moduleUid}`);
|
|
3614
|
+
}
|
|
3615
|
+
} catch (error) {
|
|
3616
|
+
console.error(`[Tunnel] EP877: Failed to remove PID file for ${moduleUid}:`, error);
|
|
3617
|
+
}
|
|
3618
|
+
}
|
|
3619
|
+
/**
|
|
3620
|
+
* EP877: Check if a process is running by PID
|
|
3621
|
+
*/
|
|
3622
|
+
isProcessRunning(pid) {
|
|
3623
|
+
try {
|
|
3624
|
+
process.kill(pid, 0);
|
|
3625
|
+
return true;
|
|
3626
|
+
} catch {
|
|
3627
|
+
return false;
|
|
3628
|
+
}
|
|
3629
|
+
}
|
|
3630
|
+
/**
|
|
3631
|
+
* EP877: Kill a process by PID
|
|
3632
|
+
*/
|
|
3633
|
+
killByPid(pid, signal = "SIGTERM") {
|
|
3634
|
+
try {
|
|
3635
|
+
process.kill(pid, signal);
|
|
3636
|
+
console.log(`[Tunnel] EP877: Sent ${signal} to PID ${pid}`);
|
|
3637
|
+
return true;
|
|
3638
|
+
} catch (error) {
|
|
3639
|
+
return false;
|
|
3640
|
+
}
|
|
3641
|
+
}
|
|
3642
|
+
/**
|
|
3643
|
+
* EP877: Find all cloudflared processes using pgrep
|
|
3644
|
+
*/
|
|
3645
|
+
findCloudflaredProcesses() {
|
|
3646
|
+
try {
|
|
3647
|
+
const output = (0, import_child_process5.execSync)("pgrep -f cloudflared", { encoding: "utf8" });
|
|
3648
|
+
return output.trim().split("\n").map((pid) => parseInt(pid, 10)).filter((pid) => !isNaN(pid));
|
|
3649
|
+
} catch {
|
|
3650
|
+
return [];
|
|
3651
|
+
}
|
|
3652
|
+
}
|
|
3653
|
+
/**
|
|
3654
|
+
* EP904: Get the port a cloudflared process is tunneling to
|
|
3655
|
+
* Extracts port from process command line arguments
|
|
3656
|
+
*/
|
|
3657
|
+
getProcessPort(pid) {
|
|
3658
|
+
try {
|
|
3659
|
+
const output = (0, import_child_process5.execSync)(`ps -p ${pid} -o args=`, { encoding: "utf8" }).trim();
|
|
3660
|
+
const portMatch = output.match(/--url\s+https?:\/\/localhost:(\d+)/);
|
|
3661
|
+
if (portMatch) {
|
|
3662
|
+
return parseInt(portMatch[1], 10);
|
|
3663
|
+
}
|
|
3664
|
+
return null;
|
|
3665
|
+
} catch {
|
|
3666
|
+
return null;
|
|
3667
|
+
}
|
|
3668
|
+
}
|
|
3669
|
+
/**
|
|
3670
|
+
* EP904: Find cloudflared processes on a specific port
|
|
3671
|
+
*/
|
|
3672
|
+
findCloudflaredOnPort(port) {
|
|
3673
|
+
const allProcesses = this.findCloudflaredProcesses();
|
|
3674
|
+
return allProcesses.filter((pid) => this.getProcessPort(pid) === port);
|
|
3675
|
+
}
|
|
3676
|
+
/**
|
|
3677
|
+
* EP904: Kill all cloudflared processes on a specific port
|
|
3678
|
+
* Returns the PIDs that were killed
|
|
3679
|
+
*/
|
|
3680
|
+
async killCloudflaredOnPort(port) {
|
|
3681
|
+
const pidsOnPort = this.findCloudflaredOnPort(port);
|
|
3682
|
+
const killed = [];
|
|
3683
|
+
for (const pid of pidsOnPort) {
|
|
3684
|
+
const isTracked = Array.from(this.tunnelStates.values()).some((s) => s.info.pid === pid);
|
|
3685
|
+
console.log(`[Tunnel] EP904: Found cloudflared PID ${pid} on port ${port} (tracked: ${isTracked})`);
|
|
3686
|
+
this.killByPid(pid, "SIGTERM");
|
|
3687
|
+
await new Promise((resolve2) => setTimeout(resolve2, 500));
|
|
3688
|
+
if (this.isProcessRunning(pid)) {
|
|
3689
|
+
this.killByPid(pid, "SIGKILL");
|
|
3690
|
+
await new Promise((resolve2) => setTimeout(resolve2, 200));
|
|
3691
|
+
}
|
|
3692
|
+
killed.push(pid);
|
|
3693
|
+
}
|
|
3694
|
+
if (killed.length > 0) {
|
|
3695
|
+
console.log(`[Tunnel] EP904: Killed ${killed.length} cloudflared process(es) on port ${port}: ${killed.join(", ")}`);
|
|
3696
|
+
}
|
|
3697
|
+
return killed;
|
|
3698
|
+
}
|
|
3699
|
+
/**
|
|
3700
|
+
* EP877: Cleanup orphaned cloudflared processes on startup
|
|
3701
|
+
* Kills any cloudflared processes that have PID files but aren't tracked in memory,
|
|
3702
|
+
* and any cloudflared processes that don't have corresponding PID files.
|
|
3703
|
+
*/
|
|
3704
|
+
async cleanupOrphanedProcesses() {
|
|
3705
|
+
const cleaned = [];
|
|
3706
|
+
try {
|
|
3707
|
+
this.ensurePidDir();
|
|
3708
|
+
const pidFiles = fs6.readdirSync(TUNNEL_PID_DIR).filter((f) => f.endsWith(".pid"));
|
|
3709
|
+
for (const pidFile of pidFiles) {
|
|
3710
|
+
const moduleUid = pidFile.replace(".pid", "");
|
|
3711
|
+
const pid = this.readPidFile(moduleUid);
|
|
3712
|
+
if (pid && this.isProcessRunning(pid)) {
|
|
3713
|
+
if (!this.tunnelStates.has(moduleUid)) {
|
|
3714
|
+
console.log(`[Tunnel] EP877: Found orphaned process PID ${pid} for ${moduleUid}, killing...`);
|
|
3715
|
+
this.killByPid(pid, "SIGTERM");
|
|
3716
|
+
await new Promise((resolve2) => setTimeout(resolve2, 1e3));
|
|
3717
|
+
if (this.isProcessRunning(pid)) {
|
|
3718
|
+
this.killByPid(pid, "SIGKILL");
|
|
3719
|
+
}
|
|
3720
|
+
cleaned.push(pid);
|
|
3721
|
+
}
|
|
3722
|
+
}
|
|
3723
|
+
this.removePidFile(moduleUid);
|
|
3724
|
+
}
|
|
3725
|
+
const runningPids = this.findCloudflaredProcesses();
|
|
3726
|
+
const trackedPids = Array.from(this.tunnelStates.values()).map((s) => s.info.pid).filter((pid) => pid !== void 0);
|
|
3727
|
+
for (const pid of runningPids) {
|
|
3728
|
+
if (!trackedPids.includes(pid) && !cleaned.includes(pid)) {
|
|
3729
|
+
console.log(`[Tunnel] EP877: Found untracked cloudflared process PID ${pid}, killing...`);
|
|
3730
|
+
this.killByPid(pid, "SIGTERM");
|
|
3731
|
+
await new Promise((resolve2) => setTimeout(resolve2, 500));
|
|
3732
|
+
if (this.isProcessRunning(pid)) {
|
|
3733
|
+
this.killByPid(pid, "SIGKILL");
|
|
3734
|
+
}
|
|
3735
|
+
cleaned.push(pid);
|
|
3736
|
+
}
|
|
3737
|
+
}
|
|
3738
|
+
if (cleaned.length > 0) {
|
|
3739
|
+
console.log(`[Tunnel] EP877: Cleaned up ${cleaned.length} orphaned cloudflared processes: ${cleaned.join(", ")}`);
|
|
3740
|
+
}
|
|
3741
|
+
} catch (error) {
|
|
3742
|
+
console.error("[Tunnel] EP877: Error during orphan cleanup:", error);
|
|
3743
|
+
}
|
|
3744
|
+
return { cleaned: cleaned.length, pids: cleaned };
|
|
3745
|
+
}
|
|
3329
3746
|
/**
|
|
3330
3747
|
* Emit typed tunnel events
|
|
3331
3748
|
*/
|
|
@@ -3334,10 +3751,16 @@ var TunnelManager = class extends import_events.EventEmitter {
|
|
|
3334
3751
|
}
|
|
3335
3752
|
/**
|
|
3336
3753
|
* Initialize the tunnel manager
|
|
3337
|
-
* Ensures cloudflared is available
|
|
3754
|
+
* Ensures cloudflared is available and cleans up orphaned processes
|
|
3755
|
+
*
|
|
3756
|
+
* EP877: Now includes orphaned process cleanup on startup
|
|
3338
3757
|
*/
|
|
3339
3758
|
async initialize() {
|
|
3340
3759
|
this.cloudflaredPath = await ensureCloudflared();
|
|
3760
|
+
const cleanup = await this.cleanupOrphanedProcesses();
|
|
3761
|
+
if (cleanup.cleaned > 0) {
|
|
3762
|
+
console.log(`[Tunnel] EP877: Initialization cleaned up ${cleanup.cleaned} orphaned processes`);
|
|
3763
|
+
}
|
|
3341
3764
|
}
|
|
3342
3765
|
/**
|
|
3343
3766
|
* EP672-9: Calculate delay for exponential backoff
|
|
@@ -3411,6 +3834,9 @@ var TunnelManager = class extends import_events.EventEmitter {
|
|
|
3411
3834
|
});
|
|
3412
3835
|
tunnelInfo.process = process2;
|
|
3413
3836
|
tunnelInfo.pid = process2.pid;
|
|
3837
|
+
if (process2.pid) {
|
|
3838
|
+
this.writePidFile(moduleUid, process2.pid);
|
|
3839
|
+
}
|
|
3414
3840
|
const state = existingState || {
|
|
3415
3841
|
info: tunnelInfo,
|
|
3416
3842
|
options,
|
|
@@ -3512,9 +3938,31 @@ var TunnelManager = class extends import_events.EventEmitter {
|
|
|
3512
3938
|
}
|
|
3513
3939
|
/**
|
|
3514
3940
|
* Start a tunnel for a module
|
|
3941
|
+
*
|
|
3942
|
+
* EP877: Uses mutex lock to prevent concurrent starts for the same module
|
|
3515
3943
|
*/
|
|
3516
3944
|
async startTunnel(options) {
|
|
3517
3945
|
const { moduleUid } = options;
|
|
3946
|
+
const existingLock = this.startLocks.get(moduleUid);
|
|
3947
|
+
if (existingLock) {
|
|
3948
|
+
console.log(`[Tunnel] EP877: Waiting for existing start operation for ${moduleUid}`);
|
|
3949
|
+
return existingLock;
|
|
3950
|
+
}
|
|
3951
|
+
const startPromise = this.startTunnelWithLock(options);
|
|
3952
|
+
this.startLocks.set(moduleUid, startPromise);
|
|
3953
|
+
try {
|
|
3954
|
+
return await startPromise;
|
|
3955
|
+
} finally {
|
|
3956
|
+
this.startLocks.delete(moduleUid);
|
|
3957
|
+
}
|
|
3958
|
+
}
|
|
3959
|
+
/**
|
|
3960
|
+
* EP877: Internal start implementation with lock already held
|
|
3961
|
+
* EP901: Enhanced to clean up ALL orphaned cloudflared processes before starting
|
|
3962
|
+
* EP904: Added port-based deduplication to prevent multiple tunnels on same port
|
|
3963
|
+
*/
|
|
3964
|
+
async startTunnelWithLock(options) {
|
|
3965
|
+
const { moduleUid, port = 3e3 } = options;
|
|
3518
3966
|
const existingState = this.tunnelStates.get(moduleUid);
|
|
3519
3967
|
if (existingState) {
|
|
3520
3968
|
if (existingState.info.status === "connected") {
|
|
@@ -3522,14 +3970,45 @@ var TunnelManager = class extends import_events.EventEmitter {
|
|
|
3522
3970
|
}
|
|
3523
3971
|
await this.stopTunnel(moduleUid);
|
|
3524
3972
|
}
|
|
3973
|
+
const orphanPid = this.readPidFile(moduleUid);
|
|
3974
|
+
if (orphanPid && this.isProcessRunning(orphanPid)) {
|
|
3975
|
+
console.log(`[Tunnel] EP877: Killing orphaned process ${orphanPid} for ${moduleUid} before starting new tunnel`);
|
|
3976
|
+
this.killByPid(orphanPid, "SIGTERM");
|
|
3977
|
+
await new Promise((resolve2) => setTimeout(resolve2, 500));
|
|
3978
|
+
if (this.isProcessRunning(orphanPid)) {
|
|
3979
|
+
this.killByPid(orphanPid, "SIGKILL");
|
|
3980
|
+
}
|
|
3981
|
+
this.removePidFile(moduleUid);
|
|
3982
|
+
}
|
|
3983
|
+
const killedOnPort = await this.killCloudflaredOnPort(port);
|
|
3984
|
+
if (killedOnPort.length > 0) {
|
|
3985
|
+
console.log(`[Tunnel] EP904: Pre-start port cleanup killed ${killedOnPort.length} process(es) on port ${port}`);
|
|
3986
|
+
await new Promise((resolve2) => setTimeout(resolve2, 300));
|
|
3987
|
+
}
|
|
3988
|
+
const cleanup = await this.cleanupOrphanedProcesses();
|
|
3989
|
+
if (cleanup.cleaned > 0) {
|
|
3990
|
+
console.log(`[Tunnel] EP901: Pre-start cleanup removed ${cleanup.cleaned} orphaned processes`);
|
|
3991
|
+
}
|
|
3525
3992
|
return this.startTunnelProcess(options);
|
|
3526
3993
|
}
|
|
3527
3994
|
/**
|
|
3528
3995
|
* Stop a tunnel for a module
|
|
3996
|
+
*
|
|
3997
|
+
* EP877: Enhanced to handle cleanup via PID file when in-memory state is missing
|
|
3529
3998
|
*/
|
|
3530
3999
|
async stopTunnel(moduleUid) {
|
|
3531
4000
|
const state = this.tunnelStates.get(moduleUid);
|
|
3532
4001
|
if (!state) {
|
|
4002
|
+
const orphanPid = this.readPidFile(moduleUid);
|
|
4003
|
+
if (orphanPid && this.isProcessRunning(orphanPid)) {
|
|
4004
|
+
console.log(`[Tunnel] EP877: Stopping orphaned process ${orphanPid} for ${moduleUid} via PID file`);
|
|
4005
|
+
this.killByPid(orphanPid, "SIGTERM");
|
|
4006
|
+
await new Promise((resolve2) => setTimeout(resolve2, 1e3));
|
|
4007
|
+
if (this.isProcessRunning(orphanPid)) {
|
|
4008
|
+
this.killByPid(orphanPid, "SIGKILL");
|
|
4009
|
+
}
|
|
4010
|
+
}
|
|
4011
|
+
this.removePidFile(moduleUid);
|
|
3533
4012
|
return;
|
|
3534
4013
|
}
|
|
3535
4014
|
state.intentionallyStopped = true;
|
|
@@ -3553,6 +4032,7 @@ var TunnelManager = class extends import_events.EventEmitter {
|
|
|
3553
4032
|
});
|
|
3554
4033
|
});
|
|
3555
4034
|
}
|
|
4035
|
+
this.removePidFile(moduleUid);
|
|
3556
4036
|
this.tunnelStates.delete(moduleUid);
|
|
3557
4037
|
this.emitEvent({ type: "stopped", moduleUid });
|
|
3558
4038
|
}
|
|
@@ -3602,31 +4082,599 @@ function getTunnelManager() {
|
|
|
3602
4082
|
return tunnelManagerInstance;
|
|
3603
4083
|
}
|
|
3604
4084
|
|
|
3605
|
-
// src/
|
|
4085
|
+
// src/tunnel/tunnel-api.ts
|
|
4086
|
+
var import_core5 = __toESM(require_dist());
|
|
4087
|
+
async function clearTunnelUrl(moduleUid) {
|
|
4088
|
+
if (!moduleUid || moduleUid === "LOCAL") {
|
|
4089
|
+
return;
|
|
4090
|
+
}
|
|
4091
|
+
const config = await (0, import_core5.loadConfig)();
|
|
4092
|
+
if (!config?.access_token) {
|
|
4093
|
+
return;
|
|
4094
|
+
}
|
|
4095
|
+
try {
|
|
4096
|
+
const apiUrl = config.api_url || "https://episoda.dev";
|
|
4097
|
+
await fetch(`${apiUrl}/api/modules/${moduleUid}/tunnel`, {
|
|
4098
|
+
method: "DELETE",
|
|
4099
|
+
headers: {
|
|
4100
|
+
"Authorization": `Bearer ${config.access_token}`
|
|
4101
|
+
}
|
|
4102
|
+
});
|
|
4103
|
+
} catch {
|
|
4104
|
+
}
|
|
4105
|
+
}
|
|
4106
|
+
|
|
4107
|
+
// src/agent/claude-binary.ts
|
|
3606
4108
|
var import_child_process6 = require("child_process");
|
|
4109
|
+
var path8 = __toESM(require("path"));
|
|
4110
|
+
var fs7 = __toESM(require("fs"));
|
|
4111
|
+
var cachedBinaryPath = null;
|
|
4112
|
+
function isValidClaudeBinary(binaryPath) {
|
|
4113
|
+
try {
|
|
4114
|
+
fs7.accessSync(binaryPath, fs7.constants.X_OK);
|
|
4115
|
+
const version = (0, import_child_process6.execSync)(`"${binaryPath}" --version`, {
|
|
4116
|
+
encoding: "utf-8",
|
|
4117
|
+
timeout: 5e3,
|
|
4118
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
4119
|
+
}).trim();
|
|
4120
|
+
if (version && /\d+\.\d+/.test(version)) {
|
|
4121
|
+
console.log(`[AgentManager] Found Claude Code at ${binaryPath}: v${version}`);
|
|
4122
|
+
return true;
|
|
4123
|
+
}
|
|
4124
|
+
return false;
|
|
4125
|
+
} catch {
|
|
4126
|
+
return false;
|
|
4127
|
+
}
|
|
4128
|
+
}
|
|
4129
|
+
async function ensureClaudeBinary() {
|
|
4130
|
+
if (cachedBinaryPath) {
|
|
4131
|
+
return cachedBinaryPath;
|
|
4132
|
+
}
|
|
4133
|
+
try {
|
|
4134
|
+
const pathResult = (0, import_child_process6.execSync)("which claude", {
|
|
4135
|
+
encoding: "utf-8",
|
|
4136
|
+
timeout: 5e3,
|
|
4137
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
4138
|
+
}).trim();
|
|
4139
|
+
if (pathResult && isValidClaudeBinary(pathResult)) {
|
|
4140
|
+
cachedBinaryPath = pathResult;
|
|
4141
|
+
return cachedBinaryPath;
|
|
4142
|
+
}
|
|
4143
|
+
} catch {
|
|
4144
|
+
}
|
|
4145
|
+
const bundledPaths = [
|
|
4146
|
+
// In production: node_modules/.bin/claude
|
|
4147
|
+
path8.join(__dirname, "..", "..", "node_modules", ".bin", "claude"),
|
|
4148
|
+
// In monorepo development: packages/episoda/node_modules/.bin/claude
|
|
4149
|
+
path8.join(__dirname, "..", "..", "..", "..", "node_modules", ".bin", "claude"),
|
|
4150
|
+
// Root monorepo node_modules
|
|
4151
|
+
path8.join(__dirname, "..", "..", "..", "..", "..", "node_modules", ".bin", "claude")
|
|
4152
|
+
];
|
|
4153
|
+
for (const bundledPath of bundledPaths) {
|
|
4154
|
+
if (fs7.existsSync(bundledPath) && isValidClaudeBinary(bundledPath)) {
|
|
4155
|
+
cachedBinaryPath = bundledPath;
|
|
4156
|
+
return cachedBinaryPath;
|
|
4157
|
+
}
|
|
4158
|
+
}
|
|
4159
|
+
try {
|
|
4160
|
+
const npxResult = (0, import_child_process6.execSync)("npx --yes @anthropic-ai/claude-code --version", {
|
|
4161
|
+
encoding: "utf-8",
|
|
4162
|
+
timeout: 3e4,
|
|
4163
|
+
// npx might need to download
|
|
4164
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
4165
|
+
}).trim();
|
|
4166
|
+
if (npxResult && /\d+\.\d+/.test(npxResult)) {
|
|
4167
|
+
cachedBinaryPath = "npx:@anthropic-ai/claude-code";
|
|
4168
|
+
console.log(`[AgentManager] Using npx to run Claude Code: v${npxResult}`);
|
|
4169
|
+
return cachedBinaryPath;
|
|
4170
|
+
}
|
|
4171
|
+
} catch {
|
|
4172
|
+
}
|
|
4173
|
+
throw new Error(
|
|
4174
|
+
"Claude Code not found. Please install it globally with: npm install -g @anthropic-ai/claude-code"
|
|
4175
|
+
);
|
|
4176
|
+
}
|
|
3607
4177
|
|
|
3608
|
-
// src/
|
|
3609
|
-
var
|
|
3610
|
-
|
|
4178
|
+
// src/agent/agent-manager.ts
|
|
4179
|
+
var import_child_process7 = require("child_process");
|
|
4180
|
+
var path9 = __toESM(require("path"));
|
|
4181
|
+
var fs8 = __toESM(require("fs"));
|
|
4182
|
+
var os3 = __toESM(require("os"));
|
|
4183
|
+
var instance = null;
|
|
4184
|
+
function getAgentManager() {
|
|
4185
|
+
if (!instance) {
|
|
4186
|
+
instance = new AgentManager();
|
|
4187
|
+
}
|
|
4188
|
+
return instance;
|
|
4189
|
+
}
|
|
4190
|
+
var AgentManager = class {
|
|
4191
|
+
constructor() {
|
|
4192
|
+
this.sessions = /* @__PURE__ */ new Map();
|
|
4193
|
+
this.processes = /* @__PURE__ */ new Map();
|
|
4194
|
+
this.initialized = false;
|
|
4195
|
+
this.pidDir = path9.join(os3.homedir(), ".episoda", "agent-pids");
|
|
4196
|
+
}
|
|
4197
|
+
/**
|
|
4198
|
+
* Initialize the agent manager
|
|
4199
|
+
* - Ensure Claude Code is available
|
|
4200
|
+
* - Clean up any orphaned processes from previous daemon runs
|
|
4201
|
+
*/
|
|
4202
|
+
async initialize() {
|
|
4203
|
+
if (this.initialized) {
|
|
4204
|
+
return;
|
|
4205
|
+
}
|
|
4206
|
+
console.log("[AgentManager] Initializing...");
|
|
4207
|
+
if (!fs8.existsSync(this.pidDir)) {
|
|
4208
|
+
fs8.mkdirSync(this.pidDir, { recursive: true });
|
|
4209
|
+
}
|
|
4210
|
+
await this.cleanupOrphanedProcesses();
|
|
4211
|
+
try {
|
|
4212
|
+
await ensureClaudeBinary();
|
|
4213
|
+
console.log("[AgentManager] Claude Code binary verified");
|
|
4214
|
+
} catch (error) {
|
|
4215
|
+
console.warn("[AgentManager] Claude Code not available:", error instanceof Error ? error.message : error);
|
|
4216
|
+
}
|
|
4217
|
+
this.initialized = true;
|
|
4218
|
+
console.log("[AgentManager] Initialized");
|
|
4219
|
+
}
|
|
4220
|
+
/**
|
|
4221
|
+
* Start a new agent session
|
|
4222
|
+
*
|
|
4223
|
+
* Creates the session record but doesn't spawn the process yet.
|
|
4224
|
+
* The process is spawned on the first message.
|
|
4225
|
+
*/
|
|
4226
|
+
async startSession(options) {
|
|
4227
|
+
const { sessionId, moduleId, moduleUid, projectPath, message, credentials, systemPrompt, onChunk, onComplete, onError } = options;
|
|
4228
|
+
if (this.sessions.has(sessionId)) {
|
|
4229
|
+
return { success: false, error: "Session already exists" };
|
|
4230
|
+
}
|
|
4231
|
+
const oauthToken = credentials?.oauthToken || credentials?.apiKey;
|
|
4232
|
+
if (!oauthToken) {
|
|
4233
|
+
return { success: false, error: "Missing OAuth token in credentials. Please connect your Claude account in Settings." };
|
|
4234
|
+
}
|
|
4235
|
+
credentials.oauthToken = oauthToken;
|
|
4236
|
+
try {
|
|
4237
|
+
await ensureClaudeBinary();
|
|
4238
|
+
} catch (error) {
|
|
4239
|
+
return {
|
|
4240
|
+
success: false,
|
|
4241
|
+
error: error instanceof Error ? error.message : "Claude Code not available"
|
|
4242
|
+
};
|
|
4243
|
+
}
|
|
4244
|
+
const session = {
|
|
4245
|
+
sessionId,
|
|
4246
|
+
moduleId,
|
|
4247
|
+
moduleUid,
|
|
4248
|
+
projectPath,
|
|
4249
|
+
credentials,
|
|
4250
|
+
systemPrompt,
|
|
4251
|
+
status: "starting",
|
|
4252
|
+
startedAt: /* @__PURE__ */ new Date(),
|
|
4253
|
+
lastActivityAt: /* @__PURE__ */ new Date()
|
|
4254
|
+
};
|
|
4255
|
+
this.sessions.set(sessionId, session);
|
|
4256
|
+
console.log(`[AgentManager] Started session ${sessionId} for ${moduleUid}`);
|
|
4257
|
+
return this.sendMessage({
|
|
4258
|
+
sessionId,
|
|
4259
|
+
message,
|
|
4260
|
+
isFirstMessage: true,
|
|
4261
|
+
onChunk,
|
|
4262
|
+
onComplete,
|
|
4263
|
+
onError
|
|
4264
|
+
});
|
|
4265
|
+
}
|
|
4266
|
+
/**
|
|
4267
|
+
* Send a message to an agent session
|
|
4268
|
+
*
|
|
4269
|
+
* Spawns a new Claude Code process for each message.
|
|
4270
|
+
* Uses --print for non-interactive mode and --output-format stream-json for structured output.
|
|
4271
|
+
* Subsequent messages use --resume with the claudeSessionId for conversation continuity.
|
|
4272
|
+
*/
|
|
4273
|
+
async sendMessage(options) {
|
|
4274
|
+
const { sessionId, message, isFirstMessage, claudeSessionId, onChunk, onComplete, onError } = options;
|
|
4275
|
+
const session = this.sessions.get(sessionId);
|
|
4276
|
+
if (!session) {
|
|
4277
|
+
return { success: false, error: "Session not found" };
|
|
4278
|
+
}
|
|
4279
|
+
session.lastActivityAt = /* @__PURE__ */ new Date();
|
|
4280
|
+
session.status = "running";
|
|
4281
|
+
try {
|
|
4282
|
+
const binaryPath = await ensureClaudeBinary();
|
|
4283
|
+
const args = [
|
|
4284
|
+
"--print",
|
|
4285
|
+
// Non-interactive mode
|
|
4286
|
+
"--output-format",
|
|
4287
|
+
"stream-json",
|
|
4288
|
+
// Structured streaming output
|
|
4289
|
+
"--verbose"
|
|
4290
|
+
// Required for stream-json with --print
|
|
4291
|
+
];
|
|
4292
|
+
if (isFirstMessage && session.systemPrompt) {
|
|
4293
|
+
args.push("--system-prompt", session.systemPrompt);
|
|
4294
|
+
}
|
|
4295
|
+
if (claudeSessionId) {
|
|
4296
|
+
args.push("--resume", claudeSessionId);
|
|
4297
|
+
session.claudeSessionId = claudeSessionId;
|
|
4298
|
+
}
|
|
4299
|
+
args.push("--", message);
|
|
4300
|
+
console.log(`[AgentManager] Spawning Claude Code for session ${sessionId}`);
|
|
4301
|
+
console.log(`[AgentManager] Command: ${binaryPath} ${args.join(" ").substring(0, 100)}...`);
|
|
4302
|
+
let spawnCmd;
|
|
4303
|
+
let spawnArgs;
|
|
4304
|
+
if (binaryPath.startsWith("npx:")) {
|
|
4305
|
+
spawnCmd = "npx";
|
|
4306
|
+
spawnArgs = ["--yes", binaryPath.replace("npx:", ""), ...args];
|
|
4307
|
+
} else {
|
|
4308
|
+
spawnCmd = binaryPath;
|
|
4309
|
+
spawnArgs = args;
|
|
4310
|
+
}
|
|
4311
|
+
const claudeDir = path9.join(os3.homedir(), ".claude");
|
|
4312
|
+
const credentialsPath = path9.join(claudeDir, ".credentials.json");
|
|
4313
|
+
if (!fs8.existsSync(claudeDir)) {
|
|
4314
|
+
fs8.mkdirSync(claudeDir, { recursive: true });
|
|
4315
|
+
}
|
|
4316
|
+
const credentialsContent = JSON.stringify({
|
|
4317
|
+
claudeAiOauth: {
|
|
4318
|
+
accessToken: session.credentials.oauthToken
|
|
4319
|
+
}
|
|
4320
|
+
}, null, 2);
|
|
4321
|
+
fs8.writeFileSync(credentialsPath, credentialsContent, { mode: 384 });
|
|
4322
|
+
console.log("[AgentManager] EP936: Wrote OAuth credentials to ~/.claude/.credentials.json");
|
|
4323
|
+
const childProcess = (0, import_child_process7.spawn)(spawnCmd, spawnArgs, {
|
|
4324
|
+
cwd: session.projectPath,
|
|
4325
|
+
env: {
|
|
4326
|
+
...process.env,
|
|
4327
|
+
// Disable color output for cleaner JSON parsing
|
|
4328
|
+
NO_COLOR: "1",
|
|
4329
|
+
FORCE_COLOR: "0"
|
|
4330
|
+
},
|
|
4331
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
4332
|
+
});
|
|
4333
|
+
this.processes.set(sessionId, childProcess);
|
|
4334
|
+
childProcess.stdin?.end();
|
|
4335
|
+
if (childProcess.pid) {
|
|
4336
|
+
session.pid = childProcess.pid;
|
|
4337
|
+
this.writePidFile(sessionId, childProcess.pid);
|
|
4338
|
+
}
|
|
4339
|
+
childProcess.stderr?.on("data", (data) => {
|
|
4340
|
+
console.error(`[AgentManager] stderr: ${data.toString()}`);
|
|
4341
|
+
});
|
|
4342
|
+
childProcess.on("error", (error) => {
|
|
4343
|
+
console.error(`[AgentManager] Process spawn error:`, error);
|
|
4344
|
+
});
|
|
4345
|
+
let stdoutBuffer = "";
|
|
4346
|
+
let extractedSessionId;
|
|
4347
|
+
childProcess.stdout?.on("data", (data) => {
|
|
4348
|
+
stdoutBuffer += data.toString();
|
|
4349
|
+
const lines = stdoutBuffer.split("\n");
|
|
4350
|
+
stdoutBuffer = lines.pop() || "";
|
|
4351
|
+
for (const line of lines) {
|
|
4352
|
+
if (!line.trim()) continue;
|
|
4353
|
+
try {
|
|
4354
|
+
const parsed = JSON.parse(line);
|
|
4355
|
+
switch (parsed.type) {
|
|
4356
|
+
case "assistant":
|
|
4357
|
+
if (parsed.message?.content) {
|
|
4358
|
+
for (const block of parsed.message.content) {
|
|
4359
|
+
if (block.type === "text" && block.text) {
|
|
4360
|
+
onChunk(block.text);
|
|
4361
|
+
}
|
|
4362
|
+
}
|
|
4363
|
+
}
|
|
4364
|
+
break;
|
|
4365
|
+
case "content_block_delta":
|
|
4366
|
+
if (parsed.delta?.text) {
|
|
4367
|
+
onChunk(parsed.delta.text);
|
|
4368
|
+
}
|
|
4369
|
+
break;
|
|
4370
|
+
case "result":
|
|
4371
|
+
if (parsed.session_id) {
|
|
4372
|
+
extractedSessionId = parsed.session_id;
|
|
4373
|
+
session.claudeSessionId = extractedSessionId;
|
|
4374
|
+
}
|
|
4375
|
+
if (parsed.result?.session_id) {
|
|
4376
|
+
extractedSessionId = parsed.result.session_id;
|
|
4377
|
+
session.claudeSessionId = extractedSessionId;
|
|
4378
|
+
}
|
|
4379
|
+
break;
|
|
4380
|
+
case "system":
|
|
4381
|
+
if (parsed.session_id) {
|
|
4382
|
+
extractedSessionId = parsed.session_id;
|
|
4383
|
+
session.claudeSessionId = extractedSessionId;
|
|
4384
|
+
}
|
|
4385
|
+
break;
|
|
4386
|
+
case "error":
|
|
4387
|
+
onError(parsed.error?.message || parsed.message || "Unknown error from Claude Code");
|
|
4388
|
+
break;
|
|
4389
|
+
default:
|
|
4390
|
+
console.log(`[AgentManager] Unknown stream-json type: ${parsed.type}`);
|
|
4391
|
+
}
|
|
4392
|
+
} catch (parseError) {
|
|
4393
|
+
if (line.trim()) {
|
|
4394
|
+
onChunk(line + "\n");
|
|
4395
|
+
}
|
|
4396
|
+
}
|
|
4397
|
+
}
|
|
4398
|
+
});
|
|
4399
|
+
let stderrBuffer = "";
|
|
4400
|
+
childProcess.stderr?.on("data", (data) => {
|
|
4401
|
+
stderrBuffer += data.toString();
|
|
4402
|
+
});
|
|
4403
|
+
childProcess.on("exit", (code, signal) => {
|
|
4404
|
+
console.log(`[AgentManager] Claude Code exited for session ${sessionId}: code=${code}, signal=${signal}`);
|
|
4405
|
+
this.processes.delete(sessionId);
|
|
4406
|
+
this.removePidFile(sessionId);
|
|
4407
|
+
if (code === 0) {
|
|
4408
|
+
session.status = "stopped";
|
|
4409
|
+
onComplete(extractedSessionId || session.claudeSessionId);
|
|
4410
|
+
} else if (signal === "SIGINT") {
|
|
4411
|
+
session.status = "stopped";
|
|
4412
|
+
onComplete(extractedSessionId || session.claudeSessionId);
|
|
4413
|
+
} else {
|
|
4414
|
+
session.status = "error";
|
|
4415
|
+
const errorMsg = stderrBuffer.trim() || `Process exited with code ${code}`;
|
|
4416
|
+
onError(errorMsg);
|
|
4417
|
+
}
|
|
4418
|
+
});
|
|
4419
|
+
childProcess.on("error", (error) => {
|
|
4420
|
+
console.error(`[AgentManager] Process error for session ${sessionId}:`, error);
|
|
4421
|
+
session.status = "error";
|
|
4422
|
+
this.processes.delete(sessionId);
|
|
4423
|
+
this.removePidFile(sessionId);
|
|
4424
|
+
onError(error.message);
|
|
4425
|
+
});
|
|
4426
|
+
return { success: true };
|
|
4427
|
+
} catch (error) {
|
|
4428
|
+
session.status = "error";
|
|
4429
|
+
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
4430
|
+
onError(errorMsg);
|
|
4431
|
+
return { success: false, error: errorMsg };
|
|
4432
|
+
}
|
|
4433
|
+
}
|
|
4434
|
+
/**
|
|
4435
|
+
* Abort an agent session (SIGINT)
|
|
4436
|
+
*
|
|
4437
|
+
* Sends SIGINT to the Claude Code process to abort the current operation.
|
|
4438
|
+
*/
|
|
4439
|
+
async abortSession(sessionId) {
|
|
4440
|
+
const process2 = this.processes.get(sessionId);
|
|
4441
|
+
if (process2 && !process2.killed) {
|
|
4442
|
+
console.log(`[AgentManager] Aborting session ${sessionId} with SIGINT`);
|
|
4443
|
+
process2.kill("SIGINT");
|
|
4444
|
+
}
|
|
4445
|
+
const session = this.sessions.get(sessionId);
|
|
4446
|
+
if (session) {
|
|
4447
|
+
session.status = "stopping";
|
|
4448
|
+
}
|
|
4449
|
+
}
|
|
4450
|
+
/**
|
|
4451
|
+
* Stop an agent session gracefully
|
|
4452
|
+
*
|
|
4453
|
+
* Sends SIGINT and waits for process to exit.
|
|
4454
|
+
* If it doesn't exit within 5 seconds, sends SIGTERM.
|
|
4455
|
+
*/
|
|
4456
|
+
async stopSession(sessionId) {
|
|
4457
|
+
const process2 = this.processes.get(sessionId);
|
|
4458
|
+
const session = this.sessions.get(sessionId);
|
|
4459
|
+
if (session) {
|
|
4460
|
+
session.status = "stopping";
|
|
4461
|
+
}
|
|
4462
|
+
if (process2 && !process2.killed) {
|
|
4463
|
+
console.log(`[AgentManager] Stopping session ${sessionId}`);
|
|
4464
|
+
process2.kill("SIGINT");
|
|
4465
|
+
await new Promise((resolve2) => {
|
|
4466
|
+
const timeout = setTimeout(() => {
|
|
4467
|
+
if (!process2.killed) {
|
|
4468
|
+
console.log(`[AgentManager] Force killing session ${sessionId}`);
|
|
4469
|
+
process2.kill("SIGTERM");
|
|
4470
|
+
}
|
|
4471
|
+
resolve2();
|
|
4472
|
+
}, 5e3);
|
|
4473
|
+
process2.once("exit", () => {
|
|
4474
|
+
clearTimeout(timeout);
|
|
4475
|
+
resolve2();
|
|
4476
|
+
});
|
|
4477
|
+
});
|
|
4478
|
+
}
|
|
4479
|
+
this.sessions.delete(sessionId);
|
|
4480
|
+
this.processes.delete(sessionId);
|
|
4481
|
+
this.removePidFile(sessionId);
|
|
4482
|
+
console.log(`[AgentManager] Session ${sessionId} stopped`);
|
|
4483
|
+
}
|
|
4484
|
+
/**
|
|
4485
|
+
* Stop all active sessions
|
|
4486
|
+
*/
|
|
4487
|
+
async stopAllSessions() {
|
|
4488
|
+
const sessionIds = Array.from(this.sessions.keys());
|
|
4489
|
+
await Promise.all(sessionIds.map((id) => this.stopSession(id)));
|
|
4490
|
+
}
|
|
4491
|
+
/**
|
|
4492
|
+
* Get session info
|
|
4493
|
+
*/
|
|
4494
|
+
getSession(sessionId) {
|
|
4495
|
+
return this.sessions.get(sessionId);
|
|
4496
|
+
}
|
|
4497
|
+
/**
|
|
4498
|
+
* Get all active sessions
|
|
4499
|
+
*/
|
|
4500
|
+
getAllSessions() {
|
|
4501
|
+
return Array.from(this.sessions.values());
|
|
4502
|
+
}
|
|
4503
|
+
/**
|
|
4504
|
+
* Check if a session exists
|
|
4505
|
+
*/
|
|
4506
|
+
hasSession(sessionId) {
|
|
4507
|
+
return this.sessions.has(sessionId);
|
|
4508
|
+
}
|
|
4509
|
+
/**
|
|
4510
|
+
* Clean up orphaned processes from previous daemon runs
|
|
4511
|
+
*
|
|
4512
|
+
* Reads PID files from ~/.episoda/agent-pids/ and kills any
|
|
4513
|
+
* processes that are still running.
|
|
4514
|
+
*/
|
|
4515
|
+
async cleanupOrphanedProcesses() {
|
|
4516
|
+
let cleaned = 0;
|
|
4517
|
+
if (!fs8.existsSync(this.pidDir)) {
|
|
4518
|
+
return { cleaned };
|
|
4519
|
+
}
|
|
4520
|
+
const pidFiles = fs8.readdirSync(this.pidDir).filter((f) => f.endsWith(".pid"));
|
|
4521
|
+
for (const pidFile of pidFiles) {
|
|
4522
|
+
const pidPath = path9.join(this.pidDir, pidFile);
|
|
4523
|
+
try {
|
|
4524
|
+
const pidStr = fs8.readFileSync(pidPath, "utf-8").trim();
|
|
4525
|
+
const pid = parseInt(pidStr, 10);
|
|
4526
|
+
if (!isNaN(pid)) {
|
|
4527
|
+
try {
|
|
4528
|
+
process.kill(pid, 0);
|
|
4529
|
+
console.log(`[AgentManager] Killing orphaned process ${pid}`);
|
|
4530
|
+
process.kill(pid, "SIGTERM");
|
|
4531
|
+
cleaned++;
|
|
4532
|
+
} catch {
|
|
4533
|
+
}
|
|
4534
|
+
}
|
|
4535
|
+
fs8.unlinkSync(pidPath);
|
|
4536
|
+
} catch (error) {
|
|
4537
|
+
console.warn(`[AgentManager] Error cleaning PID file ${pidFile}:`, error);
|
|
4538
|
+
}
|
|
4539
|
+
}
|
|
4540
|
+
if (cleaned > 0) {
|
|
4541
|
+
console.log(`[AgentManager] Cleaned up ${cleaned} orphaned process(es)`);
|
|
4542
|
+
}
|
|
4543
|
+
return { cleaned };
|
|
4544
|
+
}
|
|
4545
|
+
/**
|
|
4546
|
+
* Write PID file for session tracking
|
|
4547
|
+
*/
|
|
4548
|
+
writePidFile(sessionId, pid) {
|
|
4549
|
+
const pidPath = path9.join(this.pidDir, `${sessionId}.pid`);
|
|
4550
|
+
fs8.writeFileSync(pidPath, pid.toString());
|
|
4551
|
+
}
|
|
4552
|
+
/**
|
|
4553
|
+
* Remove PID file for session
|
|
4554
|
+
*/
|
|
4555
|
+
removePidFile(sessionId) {
|
|
4556
|
+
const pidPath = path9.join(this.pidDir, `${sessionId}.pid`);
|
|
4557
|
+
try {
|
|
4558
|
+
if (fs8.existsSync(pidPath)) {
|
|
4559
|
+
fs8.unlinkSync(pidPath);
|
|
4560
|
+
}
|
|
4561
|
+
} catch {
|
|
4562
|
+
}
|
|
4563
|
+
}
|
|
4564
|
+
};
|
|
4565
|
+
|
|
4566
|
+
// src/utils/dev-server.ts
|
|
4567
|
+
var import_child_process8 = require("child_process");
|
|
4568
|
+
init_port_check();
|
|
4569
|
+
var import_core6 = __toESM(require_dist());
|
|
4570
|
+
var import_http = __toESM(require("http"));
|
|
4571
|
+
var fs9 = __toESM(require("fs"));
|
|
4572
|
+
var path10 = __toESM(require("path"));
|
|
4573
|
+
var MAX_RESTART_ATTEMPTS = 5;
|
|
4574
|
+
var INITIAL_RESTART_DELAY_MS = 2e3;
|
|
4575
|
+
var MAX_RESTART_DELAY_MS = 3e4;
|
|
4576
|
+
var MAX_LOG_SIZE_BYTES = 5 * 1024 * 1024;
|
|
4577
|
+
var NODE_MEMORY_LIMIT_MB = 2048;
|
|
4578
|
+
var activeServers = /* @__PURE__ */ new Map();
|
|
4579
|
+
function getLogsDir() {
|
|
4580
|
+
const logsDir = path10.join((0, import_core6.getConfigDir)(), "logs");
|
|
4581
|
+
if (!fs9.existsSync(logsDir)) {
|
|
4582
|
+
fs9.mkdirSync(logsDir, { recursive: true });
|
|
4583
|
+
}
|
|
4584
|
+
return logsDir;
|
|
4585
|
+
}
|
|
4586
|
+
function getLogFilePath(moduleUid) {
|
|
4587
|
+
return path10.join(getLogsDir(), `dev-${moduleUid}.log`);
|
|
4588
|
+
}
|
|
4589
|
+
function rotateLogIfNeeded(logPath) {
|
|
4590
|
+
try {
|
|
4591
|
+
if (fs9.existsSync(logPath)) {
|
|
4592
|
+
const stats = fs9.statSync(logPath);
|
|
4593
|
+
if (stats.size > MAX_LOG_SIZE_BYTES) {
|
|
4594
|
+
const backupPath = `${logPath}.1`;
|
|
4595
|
+
if (fs9.existsSync(backupPath)) {
|
|
4596
|
+
fs9.unlinkSync(backupPath);
|
|
4597
|
+
}
|
|
4598
|
+
fs9.renameSync(logPath, backupPath);
|
|
4599
|
+
console.log(`[DevServer] EP932: Rotated log file for ${path10.basename(logPath)}`);
|
|
4600
|
+
}
|
|
4601
|
+
}
|
|
4602
|
+
} catch (error) {
|
|
4603
|
+
console.warn(`[DevServer] EP932: Failed to rotate log:`, error);
|
|
4604
|
+
}
|
|
4605
|
+
}
|
|
4606
|
+
function writeToLog(logPath, line, isError = false) {
|
|
4607
|
+
try {
|
|
4608
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString();
|
|
4609
|
+
const prefix = isError ? "ERR" : "OUT";
|
|
4610
|
+
const logLine = `[${timestamp}] [${prefix}] ${line}
|
|
4611
|
+
`;
|
|
4612
|
+
fs9.appendFileSync(logPath, logLine);
|
|
4613
|
+
} catch {
|
|
4614
|
+
}
|
|
4615
|
+
}
|
|
4616
|
+
async function isDevServerHealthy(port, timeoutMs = 5e3) {
|
|
3611
4617
|
return new Promise((resolve2) => {
|
|
3612
|
-
const
|
|
3613
|
-
|
|
3614
|
-
|
|
4618
|
+
const req = import_http.default.request(
|
|
4619
|
+
{
|
|
4620
|
+
hostname: "localhost",
|
|
4621
|
+
port,
|
|
4622
|
+
path: "/",
|
|
4623
|
+
method: "HEAD",
|
|
4624
|
+
timeout: timeoutMs
|
|
4625
|
+
},
|
|
4626
|
+
(res) => {
|
|
3615
4627
|
resolve2(true);
|
|
3616
|
-
} else {
|
|
3617
|
-
resolve2(false);
|
|
3618
4628
|
}
|
|
4629
|
+
);
|
|
4630
|
+
req.on("error", () => {
|
|
4631
|
+
resolve2(false);
|
|
3619
4632
|
});
|
|
3620
|
-
|
|
3621
|
-
|
|
4633
|
+
req.on("timeout", () => {
|
|
4634
|
+
req.destroy();
|
|
3622
4635
|
resolve2(false);
|
|
3623
4636
|
});
|
|
3624
|
-
|
|
4637
|
+
req.end();
|
|
3625
4638
|
});
|
|
3626
4639
|
}
|
|
3627
|
-
|
|
3628
|
-
|
|
3629
|
-
|
|
4640
|
+
async function killProcessOnPort(port) {
|
|
4641
|
+
try {
|
|
4642
|
+
const result = (0, import_child_process8.execSync)(`lsof -ti:${port} 2>/dev/null || true`, { encoding: "utf8" }).trim();
|
|
4643
|
+
if (!result) {
|
|
4644
|
+
console.log(`[DevServer] EP929: No process found on port ${port}`);
|
|
4645
|
+
return true;
|
|
4646
|
+
}
|
|
4647
|
+
const pids = result.split("\n").filter(Boolean);
|
|
4648
|
+
console.log(`[DevServer] EP929: Found ${pids.length} process(es) on port ${port}: ${pids.join(", ")}`);
|
|
4649
|
+
for (const pid of pids) {
|
|
4650
|
+
try {
|
|
4651
|
+
(0, import_child_process8.execSync)(`kill -15 ${pid} 2>/dev/null || true`, { encoding: "utf8" });
|
|
4652
|
+
console.log(`[DevServer] EP929: Sent SIGTERM to PID ${pid}`);
|
|
4653
|
+
} catch {
|
|
4654
|
+
}
|
|
4655
|
+
}
|
|
4656
|
+
await new Promise((resolve2) => setTimeout(resolve2, 1e3));
|
|
4657
|
+
for (const pid of pids) {
|
|
4658
|
+
try {
|
|
4659
|
+
(0, import_child_process8.execSync)(`kill -0 ${pid} 2>/dev/null`, { encoding: "utf8" });
|
|
4660
|
+
(0, import_child_process8.execSync)(`kill -9 ${pid} 2>/dev/null || true`, { encoding: "utf8" });
|
|
4661
|
+
console.log(`[DevServer] EP929: Force killed PID ${pid}`);
|
|
4662
|
+
} catch {
|
|
4663
|
+
}
|
|
4664
|
+
}
|
|
4665
|
+
await new Promise((resolve2) => setTimeout(resolve2, 500));
|
|
4666
|
+
const stillInUse = await isPortInUse(port);
|
|
4667
|
+
if (stillInUse) {
|
|
4668
|
+
console.error(`[DevServer] EP929: Port ${port} still in use after kill attempts`);
|
|
4669
|
+
return false;
|
|
4670
|
+
}
|
|
4671
|
+
console.log(`[DevServer] EP929: Successfully freed port ${port}`);
|
|
4672
|
+
return true;
|
|
4673
|
+
} catch (error) {
|
|
4674
|
+
console.error(`[DevServer] EP929: Error killing process on port ${port}:`, error);
|
|
4675
|
+
return false;
|
|
4676
|
+
}
|
|
4677
|
+
}
|
|
3630
4678
|
async function waitForPort(port, timeoutMs = 3e4) {
|
|
3631
4679
|
const startTime = Date.now();
|
|
3632
4680
|
const checkInterval = 500;
|
|
@@ -3638,58 +4686,141 @@ async function waitForPort(port, timeoutMs = 3e4) {
|
|
|
3638
4686
|
}
|
|
3639
4687
|
return false;
|
|
3640
4688
|
}
|
|
3641
|
-
|
|
4689
|
+
function calculateRestartDelay(restartCount) {
|
|
4690
|
+
const delay = INITIAL_RESTART_DELAY_MS * Math.pow(2, restartCount);
|
|
4691
|
+
return Math.min(delay, MAX_RESTART_DELAY_MS);
|
|
4692
|
+
}
|
|
4693
|
+
function spawnDevServerProcess(projectPath, port, moduleUid, logPath) {
|
|
4694
|
+
rotateLogIfNeeded(logPath);
|
|
4695
|
+
const nodeOptions = process.env.NODE_OPTIONS || "";
|
|
4696
|
+
const memoryFlag = `--max-old-space-size=${NODE_MEMORY_LIMIT_MB}`;
|
|
4697
|
+
const enhancedNodeOptions = nodeOptions.includes("max-old-space-size") ? nodeOptions : `${nodeOptions} ${memoryFlag}`.trim();
|
|
4698
|
+
const devProcess = (0, import_child_process8.spawn)("npm", ["run", "dev"], {
|
|
4699
|
+
cwd: projectPath,
|
|
4700
|
+
env: {
|
|
4701
|
+
...process.env,
|
|
4702
|
+
PORT: String(port),
|
|
4703
|
+
NODE_OPTIONS: enhancedNodeOptions
|
|
4704
|
+
},
|
|
4705
|
+
stdio: ["ignore", "pipe", "pipe"],
|
|
4706
|
+
detached: false
|
|
4707
|
+
});
|
|
4708
|
+
devProcess.stdout?.on("data", (data) => {
|
|
4709
|
+
const line = data.toString().trim();
|
|
4710
|
+
if (line) {
|
|
4711
|
+
console.log(`[DevServer:${moduleUid}] ${line}`);
|
|
4712
|
+
writeToLog(logPath, line, false);
|
|
4713
|
+
}
|
|
4714
|
+
});
|
|
4715
|
+
devProcess.stderr?.on("data", (data) => {
|
|
4716
|
+
const line = data.toString().trim();
|
|
4717
|
+
if (line) {
|
|
4718
|
+
console.error(`[DevServer:${moduleUid}] ${line}`);
|
|
4719
|
+
writeToLog(logPath, line, true);
|
|
4720
|
+
}
|
|
4721
|
+
});
|
|
4722
|
+
return devProcess;
|
|
4723
|
+
}
|
|
4724
|
+
async function handleProcessExit(moduleUid, code, signal) {
|
|
4725
|
+
const serverInfo = activeServers.get(moduleUid);
|
|
4726
|
+
if (!serverInfo) {
|
|
4727
|
+
return;
|
|
4728
|
+
}
|
|
4729
|
+
const exitReason = signal ? `signal ${signal}` : `code ${code}`;
|
|
4730
|
+
console.log(`[DevServer] EP932: Process for ${moduleUid} exited with ${exitReason}`);
|
|
4731
|
+
writeToLog(serverInfo.logFile || "", `Process exited with ${exitReason}`, true);
|
|
4732
|
+
if (!serverInfo.autoRestartEnabled) {
|
|
4733
|
+
console.log(`[DevServer] EP932: Auto-restart disabled for ${moduleUid}`);
|
|
4734
|
+
activeServers.delete(moduleUid);
|
|
4735
|
+
return;
|
|
4736
|
+
}
|
|
4737
|
+
if (serverInfo.restartCount >= MAX_RESTART_ATTEMPTS) {
|
|
4738
|
+
console.error(`[DevServer] EP932: Max restart attempts (${MAX_RESTART_ATTEMPTS}) reached for ${moduleUid}`);
|
|
4739
|
+
writeToLog(serverInfo.logFile || "", `Max restart attempts reached, giving up`, true);
|
|
4740
|
+
activeServers.delete(moduleUid);
|
|
4741
|
+
return;
|
|
4742
|
+
}
|
|
4743
|
+
const delay = calculateRestartDelay(serverInfo.restartCount);
|
|
4744
|
+
console.log(`[DevServer] EP932: Restarting ${moduleUid} in ${delay}ms (attempt ${serverInfo.restartCount + 1}/${MAX_RESTART_ATTEMPTS})`);
|
|
4745
|
+
writeToLog(serverInfo.logFile || "", `Scheduling restart in ${delay}ms (attempt ${serverInfo.restartCount + 1})`, false);
|
|
4746
|
+
await new Promise((resolve2) => setTimeout(resolve2, delay));
|
|
4747
|
+
if (!activeServers.has(moduleUid)) {
|
|
4748
|
+
console.log(`[DevServer] EP932: Server ${moduleUid} was removed during restart delay, aborting restart`);
|
|
4749
|
+
return;
|
|
4750
|
+
}
|
|
4751
|
+
const logPath = serverInfo.logFile || getLogFilePath(moduleUid);
|
|
4752
|
+
const newProcess = spawnDevServerProcess(serverInfo.projectPath, serverInfo.port, moduleUid, logPath);
|
|
4753
|
+
const updatedInfo = {
|
|
4754
|
+
...serverInfo,
|
|
4755
|
+
process: newProcess,
|
|
4756
|
+
restartCount: serverInfo.restartCount + 1,
|
|
4757
|
+
lastRestartAt: /* @__PURE__ */ new Date()
|
|
4758
|
+
};
|
|
4759
|
+
activeServers.set(moduleUid, updatedInfo);
|
|
4760
|
+
newProcess.on("exit", (newCode, newSignal) => {
|
|
4761
|
+
handleProcessExit(moduleUid, newCode, newSignal);
|
|
4762
|
+
});
|
|
4763
|
+
newProcess.on("error", (error) => {
|
|
4764
|
+
console.error(`[DevServer] EP932: Process error for ${moduleUid}:`, error);
|
|
4765
|
+
writeToLog(logPath, `Process error: ${error.message}`, true);
|
|
4766
|
+
});
|
|
4767
|
+
const serverReady = await waitForPort(serverInfo.port, 6e4);
|
|
4768
|
+
if (serverReady) {
|
|
4769
|
+
console.log(`[DevServer] EP932: Server ${moduleUid} restarted successfully`);
|
|
4770
|
+
writeToLog(logPath, `Server restarted successfully`, false);
|
|
4771
|
+
updatedInfo.restartCount = 0;
|
|
4772
|
+
} else {
|
|
4773
|
+
console.error(`[DevServer] EP932: Server ${moduleUid} failed to restart`);
|
|
4774
|
+
writeToLog(logPath, `Server failed to restart within timeout`, true);
|
|
4775
|
+
}
|
|
4776
|
+
}
|
|
4777
|
+
async function startDevServer(projectPath, port = 3e3, moduleUid = "default", options = {}) {
|
|
4778
|
+
const autoRestart = options.autoRestart ?? true;
|
|
3642
4779
|
if (await isPortInUse(port)) {
|
|
3643
4780
|
console.log(`[DevServer] Server already running on port ${port}`);
|
|
3644
4781
|
return { success: true, alreadyRunning: true };
|
|
3645
4782
|
}
|
|
3646
4783
|
if (activeServers.has(moduleUid)) {
|
|
3647
4784
|
const existing = activeServers.get(moduleUid);
|
|
3648
|
-
if (existing && !existing.killed) {
|
|
4785
|
+
if (existing && !existing.process.killed) {
|
|
3649
4786
|
console.log(`[DevServer] Process already exists for ${moduleUid}`);
|
|
3650
4787
|
return { success: true, alreadyRunning: true };
|
|
3651
4788
|
}
|
|
3652
4789
|
}
|
|
3653
|
-
console.log(`[DevServer] Starting dev server for ${moduleUid} on port ${port}...`);
|
|
4790
|
+
console.log(`[DevServer] EP932: Starting dev server for ${moduleUid} on port ${port} (auto-restart: ${autoRestart})...`);
|
|
3654
4791
|
try {
|
|
3655
|
-
const
|
|
3656
|
-
|
|
3657
|
-
|
|
3658
|
-
|
|
3659
|
-
|
|
3660
|
-
|
|
3661
|
-
|
|
3662
|
-
|
|
3663
|
-
|
|
3664
|
-
|
|
3665
|
-
|
|
3666
|
-
|
|
3667
|
-
|
|
3668
|
-
|
|
3669
|
-
|
|
3670
|
-
});
|
|
3671
|
-
devProcess.stderr?.on("data", (data) => {
|
|
3672
|
-
const line = data.toString().trim();
|
|
3673
|
-
if (line) {
|
|
3674
|
-
console.error(`[DevServer:${moduleUid}] ${line}`);
|
|
3675
|
-
}
|
|
3676
|
-
});
|
|
4792
|
+
const logPath = getLogFilePath(moduleUid);
|
|
4793
|
+
const devProcess = spawnDevServerProcess(projectPath, port, moduleUid, logPath);
|
|
4794
|
+
const serverInfo = {
|
|
4795
|
+
process: devProcess,
|
|
4796
|
+
moduleUid,
|
|
4797
|
+
projectPath,
|
|
4798
|
+
port,
|
|
4799
|
+
startedAt: /* @__PURE__ */ new Date(),
|
|
4800
|
+
restartCount: 0,
|
|
4801
|
+
lastRestartAt: null,
|
|
4802
|
+
autoRestartEnabled: autoRestart,
|
|
4803
|
+
logFile: logPath
|
|
4804
|
+
};
|
|
4805
|
+
activeServers.set(moduleUid, serverInfo);
|
|
4806
|
+
writeToLog(logPath, `Starting dev server on port ${port}`, false);
|
|
3677
4807
|
devProcess.on("exit", (code, signal) => {
|
|
3678
|
-
|
|
3679
|
-
activeServers.delete(moduleUid);
|
|
4808
|
+
handleProcessExit(moduleUid, code, signal);
|
|
3680
4809
|
});
|
|
3681
4810
|
devProcess.on("error", (error) => {
|
|
3682
4811
|
console.error(`[DevServer] Process error for ${moduleUid}:`, error);
|
|
3683
|
-
|
|
4812
|
+
writeToLog(logPath, `Process error: ${error.message}`, true);
|
|
3684
4813
|
});
|
|
3685
4814
|
console.log(`[DevServer] Waiting for server to start on port ${port}...`);
|
|
3686
4815
|
const serverReady = await waitForPort(port, 6e4);
|
|
3687
4816
|
if (!serverReady) {
|
|
3688
4817
|
devProcess.kill();
|
|
3689
4818
|
activeServers.delete(moduleUid);
|
|
4819
|
+
writeToLog(logPath, `Failed to start within timeout`, true);
|
|
3690
4820
|
return { success: false, error: "Dev server failed to start within timeout" };
|
|
3691
4821
|
}
|
|
3692
4822
|
console.log(`[DevServer] Server started successfully on port ${port}`);
|
|
4823
|
+
writeToLog(logPath, `Server started successfully`, false);
|
|
3693
4824
|
return { success: true };
|
|
3694
4825
|
} catch (error) {
|
|
3695
4826
|
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
@@ -3698,27 +4829,65 @@ async function startDevServer(projectPath, port = 3e3, moduleUid = "default") {
|
|
|
3698
4829
|
}
|
|
3699
4830
|
}
|
|
3700
4831
|
async function stopDevServer(moduleUid) {
|
|
3701
|
-
const
|
|
3702
|
-
if (
|
|
4832
|
+
const serverInfo = activeServers.get(moduleUid);
|
|
4833
|
+
if (!serverInfo) {
|
|
4834
|
+
return;
|
|
4835
|
+
}
|
|
4836
|
+
serverInfo.autoRestartEnabled = false;
|
|
4837
|
+
if (!serverInfo.process.killed) {
|
|
3703
4838
|
console.log(`[DevServer] Stopping server for ${moduleUid}`);
|
|
3704
|
-
|
|
4839
|
+
if (serverInfo.logFile) {
|
|
4840
|
+
writeToLog(serverInfo.logFile, `Stopping server (manual stop)`, false);
|
|
4841
|
+
}
|
|
4842
|
+
serverInfo.process.kill("SIGTERM");
|
|
3705
4843
|
await new Promise((resolve2) => setTimeout(resolve2, 2e3));
|
|
3706
|
-
if (!
|
|
3707
|
-
|
|
4844
|
+
if (!serverInfo.process.killed) {
|
|
4845
|
+
serverInfo.process.kill("SIGKILL");
|
|
3708
4846
|
}
|
|
3709
|
-
activeServers.delete(moduleUid);
|
|
3710
4847
|
}
|
|
4848
|
+
activeServers.delete(moduleUid);
|
|
4849
|
+
}
|
|
4850
|
+
async function restartDevServer(moduleUid) {
|
|
4851
|
+
const serverInfo = activeServers.get(moduleUid);
|
|
4852
|
+
if (!serverInfo) {
|
|
4853
|
+
return { success: false, error: `No dev server found for ${moduleUid}` };
|
|
4854
|
+
}
|
|
4855
|
+
const { projectPath, port, autoRestartEnabled, logFile } = serverInfo;
|
|
4856
|
+
console.log(`[DevServer] EP932: Restarting server for ${moduleUid}...`);
|
|
4857
|
+
if (logFile) {
|
|
4858
|
+
writeToLog(logFile, `Manual restart requested`, false);
|
|
4859
|
+
}
|
|
4860
|
+
await stopDevServer(moduleUid);
|
|
4861
|
+
await new Promise((resolve2) => setTimeout(resolve2, 1e3));
|
|
4862
|
+
if (await isPortInUse(port)) {
|
|
4863
|
+
await killProcessOnPort(port);
|
|
4864
|
+
}
|
|
4865
|
+
return startDevServer(projectPath, port, moduleUid, { autoRestart: autoRestartEnabled });
|
|
4866
|
+
}
|
|
4867
|
+
function getDevServerStatus() {
|
|
4868
|
+
const now = Date.now();
|
|
4869
|
+
return Array.from(activeServers.values()).map((info) => ({
|
|
4870
|
+
moduleUid: info.moduleUid,
|
|
4871
|
+
port: info.port,
|
|
4872
|
+
pid: info.process.pid,
|
|
4873
|
+
startedAt: info.startedAt,
|
|
4874
|
+
uptime: Math.floor((now - info.startedAt.getTime()) / 1e3),
|
|
4875
|
+
restartCount: info.restartCount,
|
|
4876
|
+
lastRestartAt: info.lastRestartAt,
|
|
4877
|
+
autoRestartEnabled: info.autoRestartEnabled,
|
|
4878
|
+
logFile: info.logFile
|
|
4879
|
+
}));
|
|
3711
4880
|
}
|
|
3712
4881
|
async function ensureDevServer(projectPath, port = 3e3, moduleUid = "default") {
|
|
3713
4882
|
if (await isPortInUse(port)) {
|
|
3714
4883
|
return { success: true };
|
|
3715
4884
|
}
|
|
3716
|
-
return startDevServer(projectPath, port, moduleUid);
|
|
4885
|
+
return startDevServer(projectPath, port, moduleUid, { autoRestart: true });
|
|
3717
4886
|
}
|
|
3718
4887
|
|
|
3719
4888
|
// src/utils/port-detect.ts
|
|
3720
|
-
var
|
|
3721
|
-
var
|
|
4889
|
+
var fs10 = __toESM(require("fs"));
|
|
4890
|
+
var path11 = __toESM(require("path"));
|
|
3722
4891
|
var DEFAULT_PORT = 3e3;
|
|
3723
4892
|
function detectDevPort(projectPath) {
|
|
3724
4893
|
const envPort = getPortFromEnv(projectPath);
|
|
@@ -3736,15 +4905,15 @@ function detectDevPort(projectPath) {
|
|
|
3736
4905
|
}
|
|
3737
4906
|
function getPortFromEnv(projectPath) {
|
|
3738
4907
|
const envPaths = [
|
|
3739
|
-
|
|
3740
|
-
|
|
3741
|
-
|
|
3742
|
-
|
|
4908
|
+
path11.join(projectPath, ".env"),
|
|
4909
|
+
path11.join(projectPath, ".env.local"),
|
|
4910
|
+
path11.join(projectPath, ".env.development"),
|
|
4911
|
+
path11.join(projectPath, ".env.development.local")
|
|
3743
4912
|
];
|
|
3744
4913
|
for (const envPath of envPaths) {
|
|
3745
4914
|
try {
|
|
3746
|
-
if (!
|
|
3747
|
-
const content =
|
|
4915
|
+
if (!fs10.existsSync(envPath)) continue;
|
|
4916
|
+
const content = fs10.readFileSync(envPath, "utf-8");
|
|
3748
4917
|
const lines = content.split("\n");
|
|
3749
4918
|
for (const line of lines) {
|
|
3750
4919
|
const match = line.match(/^\s*PORT\s*=\s*["']?(\d+)["']?\s*(?:#.*)?$/);
|
|
@@ -3761,10 +4930,10 @@ function getPortFromEnv(projectPath) {
|
|
|
3761
4930
|
return null;
|
|
3762
4931
|
}
|
|
3763
4932
|
function getPortFromPackageJson(projectPath) {
|
|
3764
|
-
const packageJsonPath =
|
|
4933
|
+
const packageJsonPath = path11.join(projectPath, "package.json");
|
|
3765
4934
|
try {
|
|
3766
|
-
if (!
|
|
3767
|
-
const content =
|
|
4935
|
+
if (!fs10.existsSync(packageJsonPath)) return null;
|
|
4936
|
+
const content = fs10.readFileSync(packageJsonPath, "utf-8");
|
|
3768
4937
|
const pkg = JSON.parse(content);
|
|
3769
4938
|
const devScript = pkg.scripts?.dev;
|
|
3770
4939
|
if (!devScript) return null;
|
|
@@ -3788,11 +4957,79 @@ function getPortFromPackageJson(projectPath) {
|
|
|
3788
4957
|
}
|
|
3789
4958
|
|
|
3790
4959
|
// src/daemon/daemon-process.ts
|
|
3791
|
-
var
|
|
3792
|
-
var
|
|
3793
|
-
var
|
|
4960
|
+
var fs11 = __toESM(require("fs"));
|
|
4961
|
+
var os4 = __toESM(require("os"));
|
|
4962
|
+
var path12 = __toESM(require("path"));
|
|
3794
4963
|
var packageJson = require_package();
|
|
3795
|
-
|
|
4964
|
+
async function ensureValidToken(config, bufferMs = 5 * 60 * 1e3) {
|
|
4965
|
+
const now = Date.now();
|
|
4966
|
+
const expiresAt = config.expires_at || 0;
|
|
4967
|
+
if (expiresAt > now + bufferMs) {
|
|
4968
|
+
return config;
|
|
4969
|
+
}
|
|
4970
|
+
if (!config.refresh_token) {
|
|
4971
|
+
console.warn("[Daemon] EP904: Token expired but no refresh_token available");
|
|
4972
|
+
return config;
|
|
4973
|
+
}
|
|
4974
|
+
console.log("[Daemon] EP904: Access token expired or expiring soon, refreshing...");
|
|
4975
|
+
try {
|
|
4976
|
+
const apiUrl = config.api_url || "https://episoda.dev";
|
|
4977
|
+
const response = await fetch(`${apiUrl}/api/oauth/token`, {
|
|
4978
|
+
method: "POST",
|
|
4979
|
+
headers: { "Content-Type": "application/json" },
|
|
4980
|
+
body: JSON.stringify({
|
|
4981
|
+
grant_type: "refresh_token",
|
|
4982
|
+
refresh_token: config.refresh_token
|
|
4983
|
+
})
|
|
4984
|
+
});
|
|
4985
|
+
if (!response.ok) {
|
|
4986
|
+
const errorData = await response.json().catch(() => ({}));
|
|
4987
|
+
console.error(`[Daemon] EP904: Token refresh failed: ${response.status} ${errorData.error || response.statusText}`);
|
|
4988
|
+
return config;
|
|
4989
|
+
}
|
|
4990
|
+
const tokenResponse = await response.json();
|
|
4991
|
+
const updatedConfig = {
|
|
4992
|
+
...config,
|
|
4993
|
+
access_token: tokenResponse.access_token,
|
|
4994
|
+
refresh_token: tokenResponse.refresh_token || config.refresh_token,
|
|
4995
|
+
expires_at: now + tokenResponse.expires_in * 1e3
|
|
4996
|
+
};
|
|
4997
|
+
await (0, import_core7.saveConfig)(updatedConfig);
|
|
4998
|
+
console.log("[Daemon] EP904: Access token refreshed successfully");
|
|
4999
|
+
return updatedConfig;
|
|
5000
|
+
} catch (error) {
|
|
5001
|
+
console.error("[Daemon] EP904: Token refresh error:", error instanceof Error ? error.message : error);
|
|
5002
|
+
return config;
|
|
5003
|
+
}
|
|
5004
|
+
}
|
|
5005
|
+
async function fetchWithAuth(url, options = {}, retryOnUnauthorized = true) {
|
|
5006
|
+
let config = await (0, import_core7.loadConfig)();
|
|
5007
|
+
if (!config?.access_token) {
|
|
5008
|
+
throw new Error("No access token configured");
|
|
5009
|
+
}
|
|
5010
|
+
config = await ensureValidToken(config);
|
|
5011
|
+
const headers = {
|
|
5012
|
+
...options.headers,
|
|
5013
|
+
"Authorization": `Bearer ${config.access_token}`,
|
|
5014
|
+
"Content-Type": "application/json"
|
|
5015
|
+
};
|
|
5016
|
+
let response = await fetch(url, { ...options, headers });
|
|
5017
|
+
if (response.status === 401 && retryOnUnauthorized && config.refresh_token) {
|
|
5018
|
+
console.log("[Daemon] EP904: Received 401, attempting token refresh and retry...");
|
|
5019
|
+
const refreshedConfig = await ensureValidToken({ ...config, expires_at: 0 });
|
|
5020
|
+
if (refreshedConfig.access_token !== config.access_token) {
|
|
5021
|
+
const retryHeaders = {
|
|
5022
|
+
...options.headers,
|
|
5023
|
+
"Authorization": `Bearer ${refreshedConfig.access_token}`,
|
|
5024
|
+
"Content-Type": "application/json"
|
|
5025
|
+
};
|
|
5026
|
+
response = await fetch(url, { ...options, headers: retryHeaders });
|
|
5027
|
+
}
|
|
5028
|
+
}
|
|
5029
|
+
return response;
|
|
5030
|
+
}
|
|
5031
|
+
var Daemon = class _Daemon {
|
|
5032
|
+
// 60 seconds
|
|
3796
5033
|
constructor() {
|
|
3797
5034
|
this.machineId = "";
|
|
3798
5035
|
this.deviceId = null;
|
|
@@ -3812,8 +5049,43 @@ var Daemon = class {
|
|
|
3812
5049
|
this.pendingConnections = /* @__PURE__ */ new Set();
|
|
3813
5050
|
// projectPath
|
|
3814
5051
|
this.shuttingDown = false;
|
|
5052
|
+
// EP822: Periodic tunnel polling interval
|
|
5053
|
+
this.tunnelPollInterval = null;
|
|
5054
|
+
// 15 seconds
|
|
5055
|
+
// EP822: Prevent concurrent tunnel syncs (backpressure guard)
|
|
5056
|
+
this.tunnelSyncInProgress = false;
|
|
5057
|
+
// EP833: Track consecutive health check failures per tunnel
|
|
5058
|
+
this.tunnelHealthFailures = /* @__PURE__ */ new Map();
|
|
5059
|
+
// 3 second timeout for health checks
|
|
5060
|
+
// EP911: Track last reported health status to avoid unnecessary DB writes
|
|
5061
|
+
this.lastReportedHealthStatus = /* @__PURE__ */ new Map();
|
|
5062
|
+
// moduleUid -> status
|
|
5063
|
+
// EP837: Prevent concurrent commit syncs (backpressure guard)
|
|
5064
|
+
this.commitSyncInProgress = false;
|
|
5065
|
+
// EP843: Per-module mutex for tunnel operations
|
|
5066
|
+
// Prevents race conditions between autoStartTunnels and module_state_changed handler
|
|
5067
|
+
this.tunnelOperationLocks = /* @__PURE__ */ new Map();
|
|
5068
|
+
// moduleUid -> operation promise
|
|
5069
|
+
// EP929: Health check polling interval (restored from EP843 removal)
|
|
5070
|
+
// Health checks are orthogonal to push-based state sync - they detect dead tunnels
|
|
5071
|
+
this.healthCheckInterval = null;
|
|
5072
|
+
this.healthCheckInProgress = false;
|
|
3815
5073
|
this.ipcServer = new IPCServer();
|
|
3816
5074
|
}
|
|
5075
|
+
static {
|
|
5076
|
+
this.TUNNEL_POLL_INTERVAL_MS = 15e3;
|
|
5077
|
+
}
|
|
5078
|
+
static {
|
|
5079
|
+
// moduleUid -> consecutive failures
|
|
5080
|
+
this.HEALTH_CHECK_FAILURE_THRESHOLD = 2;
|
|
5081
|
+
}
|
|
5082
|
+
static {
|
|
5083
|
+
// Restart after 2 consecutive failures
|
|
5084
|
+
this.HEALTH_CHECK_TIMEOUT_MS = 3e3;
|
|
5085
|
+
}
|
|
5086
|
+
static {
|
|
5087
|
+
this.HEALTH_CHECK_INTERVAL_MS = 6e4;
|
|
5088
|
+
}
|
|
3817
5089
|
/**
|
|
3818
5090
|
* Start the daemon
|
|
3819
5091
|
*/
|
|
@@ -3821,7 +5093,7 @@ var Daemon = class {
|
|
|
3821
5093
|
console.log("[Daemon] Starting Episoda daemon...");
|
|
3822
5094
|
this.machineId = await getMachineId();
|
|
3823
5095
|
console.log(`[Daemon] Machine ID: ${this.machineId}`);
|
|
3824
|
-
const config = await (0,
|
|
5096
|
+
const config = await (0, import_core7.loadConfig)();
|
|
3825
5097
|
if (config?.device_id) {
|
|
3826
5098
|
this.deviceId = config.device_id;
|
|
3827
5099
|
console.log(`[Daemon] Loaded cached Device ID (UUID): ${this.deviceId}`);
|
|
@@ -3830,6 +5102,8 @@ var Daemon = class {
|
|
|
3830
5102
|
console.log("[Daemon] IPC server started");
|
|
3831
5103
|
this.registerIPCHandlers();
|
|
3832
5104
|
await this.restoreConnections();
|
|
5105
|
+
await this.cleanupOrphanedTunnels();
|
|
5106
|
+
this.startHealthCheckPolling();
|
|
3833
5107
|
this.setupShutdownHandlers();
|
|
3834
5108
|
console.log("[Daemon] Daemon started successfully");
|
|
3835
5109
|
this.checkAndNotifyUpdates();
|
|
@@ -3863,16 +5137,20 @@ var Daemon = class {
|
|
|
3863
5137
|
id: p.id,
|
|
3864
5138
|
path: p.path,
|
|
3865
5139
|
name: p.name,
|
|
3866
|
-
|
|
5140
|
+
// EP843: Use actual WebSocket state instead of liveConnections Set
|
|
5141
|
+
// This is more reliable as it checks the real connection state
|
|
5142
|
+
connected: this.isWebSocketOpen(p.path),
|
|
5143
|
+
// Keep liveConnections for backwards compatibility and debugging
|
|
5144
|
+
liveConnectionsHas: this.liveConnections.has(p.path)
|
|
3867
5145
|
}));
|
|
3868
5146
|
return {
|
|
3869
5147
|
running: true,
|
|
3870
5148
|
machineId: this.machineId,
|
|
3871
5149
|
deviceId: this.deviceId,
|
|
3872
5150
|
// EP726: UUID for unified device identification
|
|
3873
|
-
hostname:
|
|
3874
|
-
platform:
|
|
3875
|
-
arch:
|
|
5151
|
+
hostname: os4.hostname(),
|
|
5152
|
+
platform: os4.platform(),
|
|
5153
|
+
arch: os4.arch(),
|
|
3876
5154
|
projects
|
|
3877
5155
|
};
|
|
3878
5156
|
});
|
|
@@ -3937,10 +5215,12 @@ var Daemon = class {
|
|
|
3937
5215
|
name: p.name,
|
|
3938
5216
|
inConnectionsMap: this.connections.has(p.path),
|
|
3939
5217
|
inLiveConnections: this.liveConnections.has(p.path),
|
|
5218
|
+
// EP843: Add actual WebSocket state
|
|
5219
|
+
wsOpen: this.isWebSocketOpen(p.path),
|
|
3940
5220
|
isHealthy: this.isConnectionHealthy(p.path)
|
|
3941
5221
|
}));
|
|
3942
|
-
const healthyCount = projects.filter((p) => p.
|
|
3943
|
-
const staleCount = projects.filter((p) => p.inConnectionsMap && !p.
|
|
5222
|
+
const healthyCount = projects.filter((p) => p.wsOpen).length;
|
|
5223
|
+
const staleCount = projects.filter((p) => p.inConnectionsMap && !p.wsOpen).length;
|
|
3944
5224
|
return {
|
|
3945
5225
|
totalProjects: projects.length,
|
|
3946
5226
|
healthyConnections: healthyCount,
|
|
@@ -3948,6 +5228,86 @@ var Daemon = class {
|
|
|
3948
5228
|
projects
|
|
3949
5229
|
};
|
|
3950
5230
|
});
|
|
5231
|
+
this.ipcServer.on("verify-server-connection", async () => {
|
|
5232
|
+
const config = await (0, import_core7.loadConfig)();
|
|
5233
|
+
if (!config?.access_token || !config?.api_url) {
|
|
5234
|
+
return {
|
|
5235
|
+
verified: false,
|
|
5236
|
+
error: "No authentication configured",
|
|
5237
|
+
localConnected: false,
|
|
5238
|
+
serverConnected: false
|
|
5239
|
+
};
|
|
5240
|
+
}
|
|
5241
|
+
const projects = getAllProjects();
|
|
5242
|
+
const localConnected = projects.some((p) => this.isWebSocketOpen(p.path));
|
|
5243
|
+
let serverConnected = false;
|
|
5244
|
+
let serverMachineId = null;
|
|
5245
|
+
let serverError = null;
|
|
5246
|
+
try {
|
|
5247
|
+
const response = await fetch(`${config.api_url}/api/cli/status`, {
|
|
5248
|
+
headers: {
|
|
5249
|
+
"Authorization": `Bearer ${config.access_token}`,
|
|
5250
|
+
"Content-Type": "application/json"
|
|
5251
|
+
}
|
|
5252
|
+
});
|
|
5253
|
+
if (response.ok) {
|
|
5254
|
+
const data = await response.json();
|
|
5255
|
+
serverConnected = data.connected === true;
|
|
5256
|
+
serverMachineId = data.machine_id || null;
|
|
5257
|
+
} else {
|
|
5258
|
+
serverError = `Server returned ${response.status}`;
|
|
5259
|
+
}
|
|
5260
|
+
} catch (err) {
|
|
5261
|
+
serverError = err instanceof Error ? err.message : "Network error";
|
|
5262
|
+
}
|
|
5263
|
+
const machineMatch = serverMachineId === this.machineId;
|
|
5264
|
+
return {
|
|
5265
|
+
verified: true,
|
|
5266
|
+
localConnected,
|
|
5267
|
+
serverConnected,
|
|
5268
|
+
machineMatch,
|
|
5269
|
+
machineId: this.machineId,
|
|
5270
|
+
serverMachineId,
|
|
5271
|
+
serverError,
|
|
5272
|
+
// Overall status: both local and server must agree
|
|
5273
|
+
actuallyConnected: localConnected && serverConnected && machineMatch
|
|
5274
|
+
};
|
|
5275
|
+
});
|
|
5276
|
+
this.ipcServer.on("tunnel-status", async () => {
|
|
5277
|
+
const tunnelManager = getTunnelManager();
|
|
5278
|
+
const tunnels = tunnelManager.getAllTunnels();
|
|
5279
|
+
return { tunnels };
|
|
5280
|
+
});
|
|
5281
|
+
this.ipcServer.on("tunnel-stop", async (params) => {
|
|
5282
|
+
const { moduleUid } = params;
|
|
5283
|
+
if (!moduleUid) {
|
|
5284
|
+
return { success: false, error: "Module UID is required" };
|
|
5285
|
+
}
|
|
5286
|
+
const tunnelManager = getTunnelManager();
|
|
5287
|
+
if (!tunnelManager.hasTunnel(moduleUid)) {
|
|
5288
|
+
return { success: false, error: "No tunnel found for this module" };
|
|
5289
|
+
}
|
|
5290
|
+
await tunnelManager.stopTunnel(moduleUid);
|
|
5291
|
+
await stopDevServer(moduleUid);
|
|
5292
|
+
await clearTunnelUrl(moduleUid);
|
|
5293
|
+
this.lastReportedHealthStatus.delete(moduleUid);
|
|
5294
|
+
this.tunnelHealthFailures.delete(moduleUid);
|
|
5295
|
+
console.log(`[Daemon] EP823: Tunnel stopped for ${moduleUid}`);
|
|
5296
|
+
return { success: true };
|
|
5297
|
+
});
|
|
5298
|
+
this.ipcServer.on("dev-server-restart", async (params) => {
|
|
5299
|
+
const { moduleUid } = params;
|
|
5300
|
+
if (!moduleUid) {
|
|
5301
|
+
return { success: false, error: "Module UID is required" };
|
|
5302
|
+
}
|
|
5303
|
+
console.log(`[Daemon] EP932: Dev server restart requested for ${moduleUid}`);
|
|
5304
|
+
const result = await restartDevServer(moduleUid);
|
|
5305
|
+
return result;
|
|
5306
|
+
});
|
|
5307
|
+
this.ipcServer.on("dev-server-status", async () => {
|
|
5308
|
+
const status = getDevServerStatus();
|
|
5309
|
+
return { success: true, servers: status };
|
|
5310
|
+
});
|
|
3951
5311
|
}
|
|
3952
5312
|
/**
|
|
3953
5313
|
* Restore WebSocket connections for tracked projects
|
|
@@ -3962,13 +5322,62 @@ var Daemon = class {
|
|
|
3962
5322
|
}
|
|
3963
5323
|
}
|
|
3964
5324
|
}
|
|
3965
|
-
/**
|
|
3966
|
-
* EP805: Check if a connection is healthy (exists AND is live)
|
|
3967
|
-
* A connection can exist in the Map but be dead if WebSocket disconnected
|
|
3968
|
-
*/
|
|
3969
|
-
isConnectionHealthy(projectPath) {
|
|
3970
|
-
return this.connections.has(projectPath) && this.liveConnections.has(projectPath);
|
|
3971
|
-
}
|
|
5325
|
+
/**
|
|
5326
|
+
* EP805: Check if a connection is healthy (exists AND is live)
|
|
5327
|
+
* A connection can exist in the Map but be dead if WebSocket disconnected
|
|
5328
|
+
*/
|
|
5329
|
+
isConnectionHealthy(projectPath) {
|
|
5330
|
+
return this.connections.has(projectPath) && this.liveConnections.has(projectPath);
|
|
5331
|
+
}
|
|
5332
|
+
/**
|
|
5333
|
+
* EP843: Check if a connection's WebSocket is actually open
|
|
5334
|
+
*
|
|
5335
|
+
* This checks the actual WebSocket state, not just our tracking Sets.
|
|
5336
|
+
* More reliable than liveConnections Set which can become stale.
|
|
5337
|
+
*
|
|
5338
|
+
* @param projectPath - The project path to check
|
|
5339
|
+
* @returns true if WebSocket exists and is in OPEN state
|
|
5340
|
+
*/
|
|
5341
|
+
isWebSocketOpen(projectPath) {
|
|
5342
|
+
const connection = this.connections.get(projectPath);
|
|
5343
|
+
if (!connection) return false;
|
|
5344
|
+
return connection.client.getStatus().connected;
|
|
5345
|
+
}
|
|
5346
|
+
/**
|
|
5347
|
+
* EP843: Acquire a per-module lock for tunnel operations
|
|
5348
|
+
*
|
|
5349
|
+
* Prevents race conditions between:
|
|
5350
|
+
* - autoStartTunnelsForProject() on auth_success
|
|
5351
|
+
* - module_state_changed event handler
|
|
5352
|
+
* - Multiple rapid state transitions
|
|
5353
|
+
*
|
|
5354
|
+
* @param moduleUid - The module UID to lock
|
|
5355
|
+
* @param operation - Async operation to run while holding the lock
|
|
5356
|
+
* @returns Result of the operation
|
|
5357
|
+
*/
|
|
5358
|
+
async withTunnelLock(moduleUid, operation) {
|
|
5359
|
+
const existingLock = this.tunnelOperationLocks.get(moduleUid);
|
|
5360
|
+
if (existingLock) {
|
|
5361
|
+
console.log(`[Daemon] EP843: Tunnel operation already in progress for ${moduleUid}, waiting...`);
|
|
5362
|
+
try {
|
|
5363
|
+
await existingLock;
|
|
5364
|
+
} catch {
|
|
5365
|
+
}
|
|
5366
|
+
}
|
|
5367
|
+
let releaseLock;
|
|
5368
|
+
const lockPromise = new Promise((resolve2) => {
|
|
5369
|
+
releaseLock = resolve2;
|
|
5370
|
+
});
|
|
5371
|
+
this.tunnelOperationLocks.set(moduleUid, lockPromise);
|
|
5372
|
+
try {
|
|
5373
|
+
return await operation();
|
|
5374
|
+
} finally {
|
|
5375
|
+
releaseLock();
|
|
5376
|
+
if (this.tunnelOperationLocks.get(moduleUid) === lockPromise) {
|
|
5377
|
+
this.tunnelOperationLocks.delete(moduleUid);
|
|
5378
|
+
}
|
|
5379
|
+
}
|
|
5380
|
+
}
|
|
3972
5381
|
/**
|
|
3973
5382
|
* Connect to a project's WebSocket
|
|
3974
5383
|
*/
|
|
@@ -3994,7 +5403,7 @@ var Daemon = class {
|
|
|
3994
5403
|
console.warn(`[Daemon] Stale connection detected for ${projectPath}, forcing reconnection`);
|
|
3995
5404
|
await this.disconnectProject(projectPath);
|
|
3996
5405
|
}
|
|
3997
|
-
const config = await (0,
|
|
5406
|
+
const config = await (0, import_core7.loadConfig)();
|
|
3998
5407
|
if (!config || !config.access_token) {
|
|
3999
5408
|
throw new Error("No access token found. Please run: episoda auth");
|
|
4000
5409
|
}
|
|
@@ -4008,8 +5417,8 @@ var Daemon = class {
|
|
|
4008
5417
|
const wsPort = process.env.EPISODA_WS_PORT || "3001";
|
|
4009
5418
|
const wsUrl = `${wsProtocol}//${serverUrlObj.hostname}:${wsPort}`;
|
|
4010
5419
|
console.log(`[Daemon] Connecting to ${wsUrl} for project ${projectId}...`);
|
|
4011
|
-
const client = new
|
|
4012
|
-
const gitExecutor = new
|
|
5420
|
+
const client = new import_core7.EpisodaClient();
|
|
5421
|
+
const gitExecutor = new import_core7.GitExecutor();
|
|
4013
5422
|
const connection = {
|
|
4014
5423
|
projectId,
|
|
4015
5424
|
projectPath,
|
|
@@ -4060,6 +5469,15 @@ var Daemon = class {
|
|
|
4060
5469
|
case "file:write":
|
|
4061
5470
|
result = await handleFileWrite(cmd, projectPath);
|
|
4062
5471
|
break;
|
|
5472
|
+
case "file:edit":
|
|
5473
|
+
result = await handleFileEdit(cmd, projectPath);
|
|
5474
|
+
break;
|
|
5475
|
+
case "file:delete":
|
|
5476
|
+
result = await handleFileDelete(cmd, projectPath);
|
|
5477
|
+
break;
|
|
5478
|
+
case "file:mkdir":
|
|
5479
|
+
result = await handleFileMkdir(cmd, projectPath);
|
|
5480
|
+
break;
|
|
4063
5481
|
case "file:list":
|
|
4064
5482
|
result = await handleFileList(cmd, projectPath);
|
|
4065
5483
|
break;
|
|
@@ -4109,7 +5527,7 @@ var Daemon = class {
|
|
|
4109
5527
|
const port = cmd.port || detectDevPort(projectPath);
|
|
4110
5528
|
const previewUrl = `https://${cmd.moduleUid.toLowerCase()}-${cmd.projectUid.toLowerCase()}.episoda.site`;
|
|
4111
5529
|
const reportTunnelStatus = async (data) => {
|
|
4112
|
-
const config2 = await (0,
|
|
5530
|
+
const config2 = await (0, import_core7.loadConfig)();
|
|
4113
5531
|
if (config2?.access_token) {
|
|
4114
5532
|
try {
|
|
4115
5533
|
const apiUrl = config2.api_url || "https://episoda.dev";
|
|
@@ -4201,7 +5619,7 @@ var Daemon = class {
|
|
|
4201
5619
|
} else if (cmd.action === "stop") {
|
|
4202
5620
|
await tunnelManager.stopTunnel(cmd.moduleUid);
|
|
4203
5621
|
await stopDevServer(cmd.moduleUid);
|
|
4204
|
-
const config2 = await (0,
|
|
5622
|
+
const config2 = await (0, import_core7.loadConfig)();
|
|
4205
5623
|
if (config2?.access_token) {
|
|
4206
5624
|
try {
|
|
4207
5625
|
const apiUrl = config2.api_url || "https://episoda.dev";
|
|
@@ -4241,6 +5659,122 @@ var Daemon = class {
|
|
|
4241
5659
|
}
|
|
4242
5660
|
}
|
|
4243
5661
|
});
|
|
5662
|
+
client.on("agent_command", async (message) => {
|
|
5663
|
+
if (message.type === "agent_command" && message.command) {
|
|
5664
|
+
const cmd = message.command;
|
|
5665
|
+
console.log(`[Daemon] EP912: Received agent command for ${projectId}:`, cmd.action);
|
|
5666
|
+
client.updateActivity();
|
|
5667
|
+
const createStreamingCallbacks = (sessionId, commandId) => ({
|
|
5668
|
+
onChunk: async (chunk) => {
|
|
5669
|
+
try {
|
|
5670
|
+
await client.send({
|
|
5671
|
+
type: "agent_result",
|
|
5672
|
+
commandId,
|
|
5673
|
+
result: { success: true, status: "chunk", sessionId, chunk }
|
|
5674
|
+
});
|
|
5675
|
+
} catch (sendError) {
|
|
5676
|
+
console.error(`[Daemon] EP912: Failed to send chunk (WebSocket may be disconnected):`, sendError);
|
|
5677
|
+
}
|
|
5678
|
+
},
|
|
5679
|
+
onComplete: async (claudeSessionId) => {
|
|
5680
|
+
try {
|
|
5681
|
+
await client.send({
|
|
5682
|
+
type: "agent_result",
|
|
5683
|
+
commandId,
|
|
5684
|
+
result: { success: true, status: "complete", sessionId, claudeSessionId }
|
|
5685
|
+
});
|
|
5686
|
+
} catch (sendError) {
|
|
5687
|
+
console.error(`[Daemon] EP912: Failed to send complete (WebSocket may be disconnected):`, sendError);
|
|
5688
|
+
}
|
|
5689
|
+
},
|
|
5690
|
+
onError: async (error) => {
|
|
5691
|
+
try {
|
|
5692
|
+
await client.send({
|
|
5693
|
+
type: "agent_result",
|
|
5694
|
+
commandId,
|
|
5695
|
+
result: { success: false, status: "error", sessionId, error }
|
|
5696
|
+
});
|
|
5697
|
+
} catch (sendError) {
|
|
5698
|
+
console.error(`[Daemon] EP912: Failed to send error (WebSocket may be disconnected):`, sendError);
|
|
5699
|
+
}
|
|
5700
|
+
}
|
|
5701
|
+
});
|
|
5702
|
+
try {
|
|
5703
|
+
const agentManager = getAgentManager();
|
|
5704
|
+
await agentManager.initialize();
|
|
5705
|
+
let result;
|
|
5706
|
+
if (cmd.action === "start") {
|
|
5707
|
+
const callbacks = createStreamingCallbacks(cmd.sessionId, message.id);
|
|
5708
|
+
const startResult = await agentManager.startSession({
|
|
5709
|
+
sessionId: cmd.sessionId,
|
|
5710
|
+
moduleId: cmd.moduleId,
|
|
5711
|
+
moduleUid: cmd.moduleUid,
|
|
5712
|
+
projectPath,
|
|
5713
|
+
message: cmd.message,
|
|
5714
|
+
credentials: cmd.credentials,
|
|
5715
|
+
systemPrompt: cmd.systemPrompt,
|
|
5716
|
+
...callbacks
|
|
5717
|
+
});
|
|
5718
|
+
result = {
|
|
5719
|
+
success: startResult.success,
|
|
5720
|
+
status: startResult.success ? "started" : "error",
|
|
5721
|
+
sessionId: cmd.sessionId,
|
|
5722
|
+
error: startResult.error
|
|
5723
|
+
};
|
|
5724
|
+
} else if (cmd.action === "message") {
|
|
5725
|
+
const callbacks = createStreamingCallbacks(cmd.sessionId, message.id);
|
|
5726
|
+
const sendResult = await agentManager.sendMessage({
|
|
5727
|
+
sessionId: cmd.sessionId,
|
|
5728
|
+
message: cmd.message,
|
|
5729
|
+
isFirstMessage: false,
|
|
5730
|
+
claudeSessionId: cmd.claudeSessionId,
|
|
5731
|
+
...callbacks
|
|
5732
|
+
});
|
|
5733
|
+
result = {
|
|
5734
|
+
success: sendResult.success,
|
|
5735
|
+
status: sendResult.success ? "started" : "error",
|
|
5736
|
+
sessionId: cmd.sessionId,
|
|
5737
|
+
error: sendResult.error
|
|
5738
|
+
};
|
|
5739
|
+
} else if (cmd.action === "abort") {
|
|
5740
|
+
await agentManager.abortSession(cmd.sessionId);
|
|
5741
|
+
result = { success: true, status: "aborted", sessionId: cmd.sessionId };
|
|
5742
|
+
} else if (cmd.action === "stop") {
|
|
5743
|
+
await agentManager.stopSession(cmd.sessionId);
|
|
5744
|
+
result = { success: true, status: "complete", sessionId: cmd.sessionId };
|
|
5745
|
+
} else {
|
|
5746
|
+
result = {
|
|
5747
|
+
success: false,
|
|
5748
|
+
status: "error",
|
|
5749
|
+
sessionId: cmd.sessionId || "unknown",
|
|
5750
|
+
error: `Unknown agent action: ${cmd.action}`
|
|
5751
|
+
};
|
|
5752
|
+
}
|
|
5753
|
+
await client.send({
|
|
5754
|
+
type: "agent_result",
|
|
5755
|
+
commandId: message.id,
|
|
5756
|
+
result
|
|
5757
|
+
});
|
|
5758
|
+
console.log(`[Daemon] EP912: Agent command ${cmd.action} completed for session ${cmd.action === "start" || cmd.action === "message" ? cmd.sessionId : cmd.sessionId}`);
|
|
5759
|
+
} catch (error) {
|
|
5760
|
+
try {
|
|
5761
|
+
await client.send({
|
|
5762
|
+
type: "agent_result",
|
|
5763
|
+
commandId: message.id,
|
|
5764
|
+
result: {
|
|
5765
|
+
success: false,
|
|
5766
|
+
status: "error",
|
|
5767
|
+
sessionId: cmd.sessionId || "unknown",
|
|
5768
|
+
error: error instanceof Error ? error.message : String(error)
|
|
5769
|
+
}
|
|
5770
|
+
});
|
|
5771
|
+
} catch (sendError) {
|
|
5772
|
+
console.error(`[Daemon] EP912: Failed to send error result (WebSocket may be disconnected):`, sendError);
|
|
5773
|
+
}
|
|
5774
|
+
console.error(`[Daemon] EP912: Agent command execution error:`, error);
|
|
5775
|
+
}
|
|
5776
|
+
}
|
|
5777
|
+
});
|
|
4244
5778
|
client.on("shutdown", async (message) => {
|
|
4245
5779
|
const shutdownMessage = message;
|
|
4246
5780
|
const reason = shutdownMessage.reason || "unknown";
|
|
@@ -4276,6 +5810,91 @@ var Daemon = class {
|
|
|
4276
5810
|
this.flyMachineId = authMessage.flyMachineId;
|
|
4277
5811
|
console.log(`[Daemon] Fly Machine ID: ${this.flyMachineId}`);
|
|
4278
5812
|
}
|
|
5813
|
+
this.autoStartTunnelsForProject(projectPath, projectId).catch((error) => {
|
|
5814
|
+
console.error(`[Daemon] EP819: Failed to auto-start tunnels:`, error);
|
|
5815
|
+
});
|
|
5816
|
+
});
|
|
5817
|
+
client.on("module_state_changed", async (message) => {
|
|
5818
|
+
if (message.type === "module_state_changed") {
|
|
5819
|
+
const { moduleUid, state, previousState, branchName, devMode } = message;
|
|
5820
|
+
console.log(`[Daemon] EP843: Module ${moduleUid} state changed: ${previousState} \u2192 ${state}`);
|
|
5821
|
+
if (devMode !== "local") {
|
|
5822
|
+
console.log(`[Daemon] EP843: Skipping tunnel action for ${moduleUid} (mode: ${devMode || "unknown"})`);
|
|
5823
|
+
return;
|
|
5824
|
+
}
|
|
5825
|
+
const tunnelManager = getTunnelManager();
|
|
5826
|
+
await tunnelManager.initialize();
|
|
5827
|
+
await this.withTunnelLock(moduleUid, async () => {
|
|
5828
|
+
const isInActiveZone = state === "ready" || state === "doing" || state === "review";
|
|
5829
|
+
const wasInActiveZone = previousState === "ready" || previousState === "doing" || previousState === "review";
|
|
5830
|
+
const startingWork = previousState === "ready" && state === "doing";
|
|
5831
|
+
const tunnelNotRunning = !tunnelManager.hasTunnel(moduleUid);
|
|
5832
|
+
const needsCrashRecovery = isInActiveZone && tunnelNotRunning;
|
|
5833
|
+
if (startingWork || needsCrashRecovery) {
|
|
5834
|
+
if (tunnelManager.hasTunnel(moduleUid)) {
|
|
5835
|
+
console.log(`[Daemon] EP843: Tunnel already running for ${moduleUid}, skipping start`);
|
|
5836
|
+
return;
|
|
5837
|
+
}
|
|
5838
|
+
console.log(`[Daemon] EP843: Starting tunnel for ${moduleUid} (${previousState} \u2192 ${state})`);
|
|
5839
|
+
try {
|
|
5840
|
+
const port = detectDevPort(projectPath);
|
|
5841
|
+
const devServerResult = await ensureDevServer(projectPath, port, moduleUid);
|
|
5842
|
+
if (!devServerResult.success) {
|
|
5843
|
+
console.error(`[Daemon] EP843: Dev server failed for ${moduleUid}: ${devServerResult.error}`);
|
|
5844
|
+
return;
|
|
5845
|
+
}
|
|
5846
|
+
const config2 = await (0, import_core7.loadConfig)();
|
|
5847
|
+
const apiUrl = config2?.api_url || "https://episoda.dev";
|
|
5848
|
+
const startResult = await tunnelManager.startTunnel({
|
|
5849
|
+
moduleUid,
|
|
5850
|
+
port,
|
|
5851
|
+
onUrl: async (url) => {
|
|
5852
|
+
console.log(`[Daemon] EP843: Tunnel URL for ${moduleUid}: ${url}`);
|
|
5853
|
+
try {
|
|
5854
|
+
await fetchWithAuth(`${apiUrl}/api/modules/${moduleUid}/tunnel`, {
|
|
5855
|
+
method: "POST",
|
|
5856
|
+
body: JSON.stringify({ tunnel_url: url })
|
|
5857
|
+
});
|
|
5858
|
+
} catch (err) {
|
|
5859
|
+
console.warn(`[Daemon] EP843: Failed to report tunnel URL:`, err instanceof Error ? err.message : err);
|
|
5860
|
+
}
|
|
5861
|
+
},
|
|
5862
|
+
onStatusChange: (status, error) => {
|
|
5863
|
+
if (status === "error") {
|
|
5864
|
+
console.error(`[Daemon] EP843: Tunnel error for ${moduleUid}: ${error}`);
|
|
5865
|
+
}
|
|
5866
|
+
}
|
|
5867
|
+
});
|
|
5868
|
+
if (startResult.success) {
|
|
5869
|
+
console.log(`[Daemon] EP843: Tunnel started for ${moduleUid}`);
|
|
5870
|
+
} else {
|
|
5871
|
+
console.error(`[Daemon] EP843: Tunnel failed for ${moduleUid}: ${startResult.error}`);
|
|
5872
|
+
}
|
|
5873
|
+
} catch (error) {
|
|
5874
|
+
console.error(`[Daemon] EP843: Error starting tunnel for ${moduleUid}:`, error);
|
|
5875
|
+
}
|
|
5876
|
+
}
|
|
5877
|
+
if (state === "done" && wasInActiveZone) {
|
|
5878
|
+
console.log(`[Daemon] EP933: Stopping tunnel for ${moduleUid} (${previousState} \u2192 done)`);
|
|
5879
|
+
try {
|
|
5880
|
+
await tunnelManager.stopTunnel(moduleUid);
|
|
5881
|
+
console.log(`[Daemon] EP843: Tunnel stopped for ${moduleUid}`);
|
|
5882
|
+
const config2 = await (0, import_core7.loadConfig)();
|
|
5883
|
+
const apiUrl = config2?.api_url || "https://episoda.dev";
|
|
5884
|
+
try {
|
|
5885
|
+
await fetchWithAuth(`${apiUrl}/api/modules/${moduleUid}/tunnel`, {
|
|
5886
|
+
method: "POST",
|
|
5887
|
+
body: JSON.stringify({ tunnel_url: null })
|
|
5888
|
+
});
|
|
5889
|
+
} catch (err) {
|
|
5890
|
+
console.warn(`[Daemon] EP843: Failed to clear tunnel URL:`, err instanceof Error ? err.message : err);
|
|
5891
|
+
}
|
|
5892
|
+
} catch (error) {
|
|
5893
|
+
console.error(`[Daemon] EP843: Error stopping tunnel for ${moduleUid}:`, error);
|
|
5894
|
+
}
|
|
5895
|
+
}
|
|
5896
|
+
});
|
|
5897
|
+
}
|
|
4279
5898
|
});
|
|
4280
5899
|
client.on("error", (message) => {
|
|
4281
5900
|
console.error(`[Daemon] Server error for ${projectId}:`, message);
|
|
@@ -4293,8 +5912,8 @@ var Daemon = class {
|
|
|
4293
5912
|
let daemonPid;
|
|
4294
5913
|
try {
|
|
4295
5914
|
const pidPath = getPidFilePath();
|
|
4296
|
-
if (
|
|
4297
|
-
const pidStr =
|
|
5915
|
+
if (fs11.existsSync(pidPath)) {
|
|
5916
|
+
const pidStr = fs11.readFileSync(pidPath, "utf-8").trim();
|
|
4298
5917
|
daemonPid = parseInt(pidStr, 10);
|
|
4299
5918
|
}
|
|
4300
5919
|
} catch (pidError) {
|
|
@@ -4318,9 +5937,9 @@ var Daemon = class {
|
|
|
4318
5937
|
client.once("auth_error", errorHandler);
|
|
4319
5938
|
});
|
|
4320
5939
|
await client.connect(wsUrl, config.access_token, this.machineId, {
|
|
4321
|
-
hostname:
|
|
4322
|
-
osPlatform:
|
|
4323
|
-
osArch:
|
|
5940
|
+
hostname: os4.hostname(),
|
|
5941
|
+
osPlatform: os4.platform(),
|
|
5942
|
+
osArch: os4.arch(),
|
|
4324
5943
|
daemonPid
|
|
4325
5944
|
});
|
|
4326
5945
|
console.log(`[Daemon] Successfully connected to project ${projectId}`);
|
|
@@ -4373,29 +5992,29 @@ var Daemon = class {
|
|
|
4373
5992
|
*/
|
|
4374
5993
|
async configureGitUser(projectPath, userId, workspaceId, machineId, projectId, deviceId) {
|
|
4375
5994
|
try {
|
|
4376
|
-
const { execSync:
|
|
4377
|
-
|
|
5995
|
+
const { execSync: execSync6 } = await import("child_process");
|
|
5996
|
+
execSync6(`git config episoda.userId ${userId}`, {
|
|
4378
5997
|
cwd: projectPath,
|
|
4379
5998
|
encoding: "utf8",
|
|
4380
5999
|
stdio: "pipe"
|
|
4381
6000
|
});
|
|
4382
|
-
|
|
6001
|
+
execSync6(`git config episoda.workspaceId ${workspaceId}`, {
|
|
4383
6002
|
cwd: projectPath,
|
|
4384
6003
|
encoding: "utf8",
|
|
4385
6004
|
stdio: "pipe"
|
|
4386
6005
|
});
|
|
4387
|
-
|
|
6006
|
+
execSync6(`git config episoda.machineId ${machineId}`, {
|
|
4388
6007
|
cwd: projectPath,
|
|
4389
6008
|
encoding: "utf8",
|
|
4390
6009
|
stdio: "pipe"
|
|
4391
6010
|
});
|
|
4392
|
-
|
|
6011
|
+
execSync6(`git config episoda.projectId ${projectId}`, {
|
|
4393
6012
|
cwd: projectPath,
|
|
4394
6013
|
encoding: "utf8",
|
|
4395
6014
|
stdio: "pipe"
|
|
4396
6015
|
});
|
|
4397
6016
|
if (deviceId) {
|
|
4398
|
-
|
|
6017
|
+
execSync6(`git config episoda.deviceId ${deviceId}`, {
|
|
4399
6018
|
cwd: projectPath,
|
|
4400
6019
|
encoding: "utf8",
|
|
4401
6020
|
stdio: "pipe"
|
|
@@ -4414,28 +6033,28 @@ var Daemon = class {
|
|
|
4414
6033
|
* - Main branch protection (pre-commit blocks direct commits to main)
|
|
4415
6034
|
*/
|
|
4416
6035
|
async installGitHooks(projectPath) {
|
|
4417
|
-
const hooks = ["post-checkout", "pre-commit"];
|
|
4418
|
-
const hooksDir =
|
|
4419
|
-
if (!
|
|
6036
|
+
const hooks = ["post-checkout", "pre-commit", "post-commit"];
|
|
6037
|
+
const hooksDir = path12.join(projectPath, ".git", "hooks");
|
|
6038
|
+
if (!fs11.existsSync(hooksDir)) {
|
|
4420
6039
|
console.warn(`[Daemon] Hooks directory not found: ${hooksDir}`);
|
|
4421
6040
|
return;
|
|
4422
6041
|
}
|
|
4423
6042
|
for (const hookName of hooks) {
|
|
4424
6043
|
try {
|
|
4425
|
-
const hookPath =
|
|
4426
|
-
const bundledHookPath =
|
|
4427
|
-
if (!
|
|
6044
|
+
const hookPath = path12.join(hooksDir, hookName);
|
|
6045
|
+
const bundledHookPath = path12.join(__dirname, "..", "hooks", hookName);
|
|
6046
|
+
if (!fs11.existsSync(bundledHookPath)) {
|
|
4428
6047
|
console.warn(`[Daemon] Bundled hook not found: ${bundledHookPath}`);
|
|
4429
6048
|
continue;
|
|
4430
6049
|
}
|
|
4431
|
-
const hookContent =
|
|
4432
|
-
if (
|
|
4433
|
-
const existingContent =
|
|
6050
|
+
const hookContent = fs11.readFileSync(bundledHookPath, "utf-8");
|
|
6051
|
+
if (fs11.existsSync(hookPath)) {
|
|
6052
|
+
const existingContent = fs11.readFileSync(hookPath, "utf-8");
|
|
4434
6053
|
if (existingContent === hookContent) {
|
|
4435
6054
|
continue;
|
|
4436
6055
|
}
|
|
4437
6056
|
}
|
|
4438
|
-
|
|
6057
|
+
fs11.writeFileSync(hookPath, hookContent, { mode: 493 });
|
|
4439
6058
|
console.log(`[Daemon] Installed git hook: ${hookName}`);
|
|
4440
6059
|
} catch (error) {
|
|
4441
6060
|
console.warn(`[Daemon] Failed to install ${hookName} hook:`, error instanceof Error ? error.message : error);
|
|
@@ -4450,7 +6069,7 @@ var Daemon = class {
|
|
|
4450
6069
|
*/
|
|
4451
6070
|
async cacheDeviceId(deviceId) {
|
|
4452
6071
|
try {
|
|
4453
|
-
const config = await (0,
|
|
6072
|
+
const config = await (0, import_core7.loadConfig)();
|
|
4454
6073
|
if (!config) {
|
|
4455
6074
|
console.warn("[Daemon] Cannot cache device ID - no config found");
|
|
4456
6075
|
return;
|
|
@@ -4463,12 +6082,518 @@ var Daemon = class {
|
|
|
4463
6082
|
device_id: deviceId,
|
|
4464
6083
|
machine_id: this.machineId
|
|
4465
6084
|
};
|
|
4466
|
-
await (0,
|
|
6085
|
+
await (0, import_core7.saveConfig)(updatedConfig);
|
|
4467
6086
|
console.log(`[Daemon] Cached device ID to config: ${deviceId}`);
|
|
4468
6087
|
} catch (error) {
|
|
4469
6088
|
console.warn("[Daemon] Failed to cache device ID:", error instanceof Error ? error.message : error);
|
|
4470
6089
|
}
|
|
4471
6090
|
}
|
|
6091
|
+
/**
|
|
6092
|
+
* EP819: Auto-start tunnels for active local modules on daemon connect/reconnect
|
|
6093
|
+
*
|
|
6094
|
+
* Queries for modules in doing/review state with dev_mode=local that don't have
|
|
6095
|
+
* an active tunnel_url, and starts tunnels for each.
|
|
6096
|
+
*/
|
|
6097
|
+
async autoStartTunnelsForProject(projectPath, projectUid) {
|
|
6098
|
+
console.log(`[Daemon] EP819: Checking for active local modules to auto-start tunnels...`);
|
|
6099
|
+
try {
|
|
6100
|
+
const config = await (0, import_core7.loadConfig)();
|
|
6101
|
+
if (!config?.access_token) {
|
|
6102
|
+
console.warn(`[Daemon] EP819: No access token, skipping tunnel auto-start`);
|
|
6103
|
+
return;
|
|
6104
|
+
}
|
|
6105
|
+
const apiUrl = config.api_url || "https://episoda.dev";
|
|
6106
|
+
const response = await fetchWithAuth(
|
|
6107
|
+
`${apiUrl}/api/modules?state=ready,doing,review&fields=id,uid,dev_mode,tunnel_url,checkout_machine_id`
|
|
6108
|
+
);
|
|
6109
|
+
if (!response.ok) {
|
|
6110
|
+
console.warn(`[Daemon] EP819: Failed to fetch modules: ${response.status}`);
|
|
6111
|
+
return;
|
|
6112
|
+
}
|
|
6113
|
+
const data = await response.json();
|
|
6114
|
+
const modules = data.modules || [];
|
|
6115
|
+
const tunnelManager = getTunnelManager();
|
|
6116
|
+
await tunnelManager.initialize();
|
|
6117
|
+
const localModulesNeedingTunnel = modules.filter(
|
|
6118
|
+
(m) => m.dev_mode === "local" && (!m.checkout_machine_id || m.checkout_machine_id === this.deviceId) && !tunnelManager.hasTunnel(m.uid)
|
|
6119
|
+
);
|
|
6120
|
+
if (localModulesNeedingTunnel.length === 0) {
|
|
6121
|
+
console.log(`[Daemon] EP819: No local modules need tunnel auto-start`);
|
|
6122
|
+
return;
|
|
6123
|
+
}
|
|
6124
|
+
console.log(`[Daemon] EP819: Found ${localModulesNeedingTunnel.length} local modules needing tunnels`);
|
|
6125
|
+
for (const module2 of localModulesNeedingTunnel) {
|
|
6126
|
+
const moduleUid = module2.uid;
|
|
6127
|
+
const port = detectDevPort(projectPath);
|
|
6128
|
+
console.log(`[Daemon] EP819: Auto-starting tunnel for ${moduleUid} on port ${port}`);
|
|
6129
|
+
const reportTunnelStatus = async (statusData) => {
|
|
6130
|
+
try {
|
|
6131
|
+
const statusResponse = await fetchWithAuth(`${apiUrl}/api/modules/${moduleUid}/tunnel`, {
|
|
6132
|
+
method: "POST",
|
|
6133
|
+
body: JSON.stringify(statusData)
|
|
6134
|
+
});
|
|
6135
|
+
if (statusResponse.ok) {
|
|
6136
|
+
console.log(`[Daemon] EP819: Tunnel status reported for ${moduleUid}`);
|
|
6137
|
+
} else {
|
|
6138
|
+
console.warn(`[Daemon] EP819: Failed to report tunnel status: ${statusResponse.statusText}`);
|
|
6139
|
+
}
|
|
6140
|
+
} catch (reportError) {
|
|
6141
|
+
console.warn(`[Daemon] EP819: Error reporting tunnel status:`, reportError);
|
|
6142
|
+
}
|
|
6143
|
+
};
|
|
6144
|
+
(async () => {
|
|
6145
|
+
await this.withTunnelLock(moduleUid, async () => {
|
|
6146
|
+
if (tunnelManager.hasTunnel(moduleUid)) {
|
|
6147
|
+
console.log(`[Daemon] EP819: Tunnel already running for ${moduleUid}, skipping auto-start`);
|
|
6148
|
+
return;
|
|
6149
|
+
}
|
|
6150
|
+
const MAX_RETRIES = 3;
|
|
6151
|
+
const RETRY_DELAY_MS = 2e3;
|
|
6152
|
+
await reportTunnelStatus({
|
|
6153
|
+
tunnel_started_at: (/* @__PURE__ */ new Date()).toISOString(),
|
|
6154
|
+
tunnel_error: null
|
|
6155
|
+
});
|
|
6156
|
+
try {
|
|
6157
|
+
console.log(`[Daemon] EP819: Ensuring dev server is running for ${moduleUid}...`);
|
|
6158
|
+
const devServerResult = await ensureDevServer(projectPath, port, moduleUid);
|
|
6159
|
+
if (!devServerResult.success) {
|
|
6160
|
+
const errorMsg2 = `Dev server failed to start: ${devServerResult.error}`;
|
|
6161
|
+
console.error(`[Daemon] EP819: ${errorMsg2}`);
|
|
6162
|
+
await reportTunnelStatus({ tunnel_error: errorMsg2 });
|
|
6163
|
+
return;
|
|
6164
|
+
}
|
|
6165
|
+
console.log(`[Daemon] EP819: Dev server ready on port ${port}`);
|
|
6166
|
+
let lastError;
|
|
6167
|
+
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
|
|
6168
|
+
console.log(`[Daemon] EP819: Starting tunnel for ${moduleUid} (attempt ${attempt}/${MAX_RETRIES})...`);
|
|
6169
|
+
const startResult = await tunnelManager.startTunnel({
|
|
6170
|
+
moduleUid,
|
|
6171
|
+
port,
|
|
6172
|
+
onUrl: async (url) => {
|
|
6173
|
+
console.log(`[Daemon] EP819: Tunnel URL for ${moduleUid}: ${url}`);
|
|
6174
|
+
await reportTunnelStatus({
|
|
6175
|
+
tunnel_url: url,
|
|
6176
|
+
tunnel_error: null
|
|
6177
|
+
});
|
|
6178
|
+
},
|
|
6179
|
+
onStatusChange: (status, error) => {
|
|
6180
|
+
if (status === "error") {
|
|
6181
|
+
console.error(`[Daemon] EP819: Tunnel error for ${moduleUid}: ${error}`);
|
|
6182
|
+
reportTunnelStatus({ tunnel_error: error || "Tunnel connection error" });
|
|
6183
|
+
} else if (status === "reconnecting") {
|
|
6184
|
+
console.log(`[Daemon] EP819: Tunnel reconnecting for ${moduleUid}...`);
|
|
6185
|
+
}
|
|
6186
|
+
}
|
|
6187
|
+
});
|
|
6188
|
+
if (startResult.success) {
|
|
6189
|
+
console.log(`[Daemon] EP819: Tunnel started successfully for ${moduleUid}`);
|
|
6190
|
+
return;
|
|
6191
|
+
}
|
|
6192
|
+
lastError = startResult.error;
|
|
6193
|
+
console.warn(`[Daemon] EP819: Tunnel start attempt ${attempt} failed: ${lastError}`);
|
|
6194
|
+
if (attempt < MAX_RETRIES) {
|
|
6195
|
+
console.log(`[Daemon] EP819: Retrying in ${RETRY_DELAY_MS}ms...`);
|
|
6196
|
+
await new Promise((resolve2) => setTimeout(resolve2, RETRY_DELAY_MS));
|
|
6197
|
+
}
|
|
6198
|
+
}
|
|
6199
|
+
const errorMsg = `Tunnel failed after ${MAX_RETRIES} attempts: ${lastError}`;
|
|
6200
|
+
console.error(`[Daemon] EP819: ${errorMsg}`);
|
|
6201
|
+
await reportTunnelStatus({ tunnel_error: errorMsg });
|
|
6202
|
+
} catch (error) {
|
|
6203
|
+
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
6204
|
+
console.error(`[Daemon] EP819: Async tunnel startup error:`, error);
|
|
6205
|
+
await reportTunnelStatus({ tunnel_error: errorMsg });
|
|
6206
|
+
}
|
|
6207
|
+
});
|
|
6208
|
+
})();
|
|
6209
|
+
}
|
|
6210
|
+
} catch (error) {
|
|
6211
|
+
console.error(`[Daemon] EP819: Error auto-starting tunnels:`, error);
|
|
6212
|
+
}
|
|
6213
|
+
}
|
|
6214
|
+
// EP843: startTunnelPolling() removed - replaced by push-based state sync
|
|
6215
|
+
// See module_state_changed handler for the new implementation
|
|
6216
|
+
/**
|
|
6217
|
+
* EP822: Stop periodic tunnel polling
|
|
6218
|
+
* EP843: Kept for cleanup during shutdown, but interval is never started
|
|
6219
|
+
*/
|
|
6220
|
+
stopTunnelPolling() {
|
|
6221
|
+
if (this.tunnelPollInterval) {
|
|
6222
|
+
clearInterval(this.tunnelPollInterval);
|
|
6223
|
+
this.tunnelPollInterval = null;
|
|
6224
|
+
console.log("[Daemon] EP822: Tunnel polling stopped");
|
|
6225
|
+
}
|
|
6226
|
+
}
|
|
6227
|
+
/**
|
|
6228
|
+
* EP929: Start health check polling
|
|
6229
|
+
*
|
|
6230
|
+
* Restored from EP843 removal. Health checks run every 60 seconds to:
|
|
6231
|
+
* - Verify running tunnels are still responsive
|
|
6232
|
+
* - Detect dead tunnels that haven't been cleaned up
|
|
6233
|
+
* - Auto-restart unhealthy tunnels after consecutive failures
|
|
6234
|
+
*
|
|
6235
|
+
* This is orthogonal to the push-based state sync (module_state_changed events).
|
|
6236
|
+
* State sync handles start/stop based on module transitions.
|
|
6237
|
+
* Health checks handle detecting and recovering from tunnel crashes.
|
|
6238
|
+
*/
|
|
6239
|
+
startHealthCheckPolling() {
|
|
6240
|
+
if (this.healthCheckInterval) {
|
|
6241
|
+
console.log("[Daemon] EP929: Health check polling already running");
|
|
6242
|
+
return;
|
|
6243
|
+
}
|
|
6244
|
+
console.log(`[Daemon] EP929: Starting health check polling (every ${_Daemon.HEALTH_CHECK_INTERVAL_MS / 1e3}s)`);
|
|
6245
|
+
this.healthCheckInterval = setInterval(async () => {
|
|
6246
|
+
if (this.healthCheckInProgress) {
|
|
6247
|
+
console.log("[Daemon] EP929: Health check still in progress, skipping");
|
|
6248
|
+
return;
|
|
6249
|
+
}
|
|
6250
|
+
this.healthCheckInProgress = true;
|
|
6251
|
+
try {
|
|
6252
|
+
const config = await (0, import_core7.loadConfig)();
|
|
6253
|
+
if (config?.access_token) {
|
|
6254
|
+
await this.performHealthChecks(config);
|
|
6255
|
+
}
|
|
6256
|
+
} catch (error) {
|
|
6257
|
+
console.error("[Daemon] EP929: Health check error:", error instanceof Error ? error.message : error);
|
|
6258
|
+
} finally {
|
|
6259
|
+
this.healthCheckInProgress = false;
|
|
6260
|
+
}
|
|
6261
|
+
}, _Daemon.HEALTH_CHECK_INTERVAL_MS);
|
|
6262
|
+
}
|
|
6263
|
+
/**
|
|
6264
|
+
* EP929: Stop health check polling
|
|
6265
|
+
*/
|
|
6266
|
+
stopHealthCheckPolling() {
|
|
6267
|
+
if (this.healthCheckInterval) {
|
|
6268
|
+
clearInterval(this.healthCheckInterval);
|
|
6269
|
+
this.healthCheckInterval = null;
|
|
6270
|
+
console.log("[Daemon] EP929: Health check polling stopped");
|
|
6271
|
+
}
|
|
6272
|
+
}
|
|
6273
|
+
/**
|
|
6274
|
+
* EP822: Clean up orphaned tunnels from previous daemon runs
|
|
6275
|
+
* EP904: Enhanced to aggressively clean ALL cloudflared processes then restart
|
|
6276
|
+
* tunnels for qualifying modules (doing/review state, dev_mode=local)
|
|
6277
|
+
*
|
|
6278
|
+
* When the daemon crashes or is killed, tunnels may continue running.
|
|
6279
|
+
* This method:
|
|
6280
|
+
* 1. Kills ALL cloudflared processes (aggressive cleanup for clean slate)
|
|
6281
|
+
* 2. Queries API for modules that should have tunnels
|
|
6282
|
+
* 3. Restarts tunnels for qualifying modules
|
|
6283
|
+
*/
|
|
6284
|
+
async cleanupOrphanedTunnels() {
|
|
6285
|
+
try {
|
|
6286
|
+
const tunnelManager = getTunnelManager();
|
|
6287
|
+
await tunnelManager.initialize();
|
|
6288
|
+
const runningTunnels = tunnelManager.getAllTunnels();
|
|
6289
|
+
if (runningTunnels.length > 0) {
|
|
6290
|
+
console.log(`[Daemon] EP904: Stopping ${runningTunnels.length} tracked tunnel(s)...`);
|
|
6291
|
+
for (const tunnel of runningTunnels) {
|
|
6292
|
+
try {
|
|
6293
|
+
await tunnelManager.stopTunnel(tunnel.moduleUid);
|
|
6294
|
+
await stopDevServer(tunnel.moduleUid);
|
|
6295
|
+
} catch (error) {
|
|
6296
|
+
console.error(`[Daemon] EP904: Failed to stop tunnel for ${tunnel.moduleUid}:`, error);
|
|
6297
|
+
}
|
|
6298
|
+
}
|
|
6299
|
+
}
|
|
6300
|
+
const cleanup = await tunnelManager.cleanupOrphanedProcesses();
|
|
6301
|
+
if (cleanup.cleaned > 0) {
|
|
6302
|
+
console.log(`[Daemon] EP904: Killed ${cleanup.cleaned} orphaned cloudflared process(es)`);
|
|
6303
|
+
}
|
|
6304
|
+
console.log("[Daemon] EP904: Orphaned tunnel cleanup complete - clean slate ready");
|
|
6305
|
+
} catch (error) {
|
|
6306
|
+
console.error("[Daemon] EP904: Failed to clean up orphaned tunnels:", error);
|
|
6307
|
+
}
|
|
6308
|
+
}
|
|
6309
|
+
// EP843: syncTunnelsWithActiveModules() removed - replaced by push-based state sync
|
|
6310
|
+
// See module_state_changed handler for the new implementation
|
|
6311
|
+
/**
|
|
6312
|
+
* EP833: Perform health checks on all running tunnels
|
|
6313
|
+
* Checks both tunnel URL and local dev server responsiveness
|
|
6314
|
+
*/
|
|
6315
|
+
async performHealthChecks(config) {
|
|
6316
|
+
const tunnelManager = getTunnelManager();
|
|
6317
|
+
const runningTunnels = tunnelManager.getAllTunnels();
|
|
6318
|
+
if (runningTunnels.length === 0) {
|
|
6319
|
+
return;
|
|
6320
|
+
}
|
|
6321
|
+
const apiUrl = config.api_url || "https://episoda.dev";
|
|
6322
|
+
for (const tunnel of runningTunnels) {
|
|
6323
|
+
if (tunnel.status !== "connected") {
|
|
6324
|
+
continue;
|
|
6325
|
+
}
|
|
6326
|
+
const isHealthy = await this.checkTunnelHealth(tunnel);
|
|
6327
|
+
if (isHealthy) {
|
|
6328
|
+
this.tunnelHealthFailures.delete(tunnel.moduleUid);
|
|
6329
|
+
await this.reportTunnelHealth(tunnel.moduleUid, "healthy", config);
|
|
6330
|
+
} else {
|
|
6331
|
+
const failures = (this.tunnelHealthFailures.get(tunnel.moduleUid) || 0) + 1;
|
|
6332
|
+
this.tunnelHealthFailures.set(tunnel.moduleUid, failures);
|
|
6333
|
+
console.log(`[Daemon] EP833: Health check failed for ${tunnel.moduleUid} (${failures}/${_Daemon.HEALTH_CHECK_FAILURE_THRESHOLD})`);
|
|
6334
|
+
if (failures >= _Daemon.HEALTH_CHECK_FAILURE_THRESHOLD) {
|
|
6335
|
+
console.log(`[Daemon] EP833: Tunnel unhealthy for ${tunnel.moduleUid}, restarting...`);
|
|
6336
|
+
await this.withTunnelLock(tunnel.moduleUid, async () => {
|
|
6337
|
+
await this.restartTunnel(tunnel.moduleUid, tunnel.port);
|
|
6338
|
+
});
|
|
6339
|
+
this.tunnelHealthFailures.delete(tunnel.moduleUid);
|
|
6340
|
+
await this.reportTunnelHealth(tunnel.moduleUid, "unhealthy", config);
|
|
6341
|
+
}
|
|
6342
|
+
}
|
|
6343
|
+
}
|
|
6344
|
+
}
|
|
6345
|
+
/**
|
|
6346
|
+
* EP833: Check if a tunnel is healthy
|
|
6347
|
+
* Verifies both the tunnel URL and local dev server respond
|
|
6348
|
+
*/
|
|
6349
|
+
async checkTunnelHealth(tunnel) {
|
|
6350
|
+
try {
|
|
6351
|
+
const controller = new AbortController();
|
|
6352
|
+
const timeout = setTimeout(() => controller.abort(), _Daemon.HEALTH_CHECK_TIMEOUT_MS);
|
|
6353
|
+
const response = await fetch(tunnel.url, {
|
|
6354
|
+
method: "HEAD",
|
|
6355
|
+
signal: controller.signal
|
|
6356
|
+
});
|
|
6357
|
+
clearTimeout(timeout);
|
|
6358
|
+
if (response.status >= 500) {
|
|
6359
|
+
console.log(`[Daemon] EP833: Tunnel URL returned ${response.status} for ${tunnel.moduleUid}`);
|
|
6360
|
+
return false;
|
|
6361
|
+
}
|
|
6362
|
+
} catch (error) {
|
|
6363
|
+
console.log(`[Daemon] EP833: Tunnel URL unreachable for ${tunnel.moduleUid}:`, error instanceof Error ? error.message : error);
|
|
6364
|
+
return false;
|
|
6365
|
+
}
|
|
6366
|
+
try {
|
|
6367
|
+
const controller = new AbortController();
|
|
6368
|
+
const timeout = setTimeout(() => controller.abort(), 2e3);
|
|
6369
|
+
const localResponse = await fetch(`http://localhost:${tunnel.port}`, {
|
|
6370
|
+
method: "HEAD",
|
|
6371
|
+
signal: controller.signal
|
|
6372
|
+
});
|
|
6373
|
+
clearTimeout(timeout);
|
|
6374
|
+
if (localResponse.status >= 500) {
|
|
6375
|
+
console.log(`[Daemon] EP833: Local dev server returned ${localResponse.status} for ${tunnel.moduleUid}`);
|
|
6376
|
+
return false;
|
|
6377
|
+
}
|
|
6378
|
+
} catch (error) {
|
|
6379
|
+
console.log(`[Daemon] EP833: Local dev server unreachable for ${tunnel.moduleUid}:`, error instanceof Error ? error.message : error);
|
|
6380
|
+
return false;
|
|
6381
|
+
}
|
|
6382
|
+
return true;
|
|
6383
|
+
}
|
|
6384
|
+
/**
|
|
6385
|
+
* EP833: Restart a failed tunnel
|
|
6386
|
+
* EP932: Now uses restartDevServer() for robust dev server restart with auto-restart
|
|
6387
|
+
*/
|
|
6388
|
+
async restartTunnel(moduleUid, port) {
|
|
6389
|
+
const tunnelManager = getTunnelManager();
|
|
6390
|
+
try {
|
|
6391
|
+
await tunnelManager.stopTunnel(moduleUid);
|
|
6392
|
+
const config = await (0, import_core7.loadConfig)();
|
|
6393
|
+
if (!config?.access_token) {
|
|
6394
|
+
console.error(`[Daemon] EP833: No access token for tunnel restart`);
|
|
6395
|
+
return;
|
|
6396
|
+
}
|
|
6397
|
+
const apiUrl = config.api_url || "https://episoda.dev";
|
|
6398
|
+
const devServerResult = await restartDevServer(moduleUid);
|
|
6399
|
+
if (!devServerResult.success) {
|
|
6400
|
+
console.log(`[Daemon] EP932: No tracked server for ${moduleUid}, looking up project...`);
|
|
6401
|
+
let projectId = null;
|
|
6402
|
+
try {
|
|
6403
|
+
const moduleResponse = await fetchWithAuth(`${apiUrl}/api/modules/${moduleUid}`);
|
|
6404
|
+
if (moduleResponse.ok) {
|
|
6405
|
+
const moduleData = await moduleResponse.json();
|
|
6406
|
+
projectId = moduleData.moduleRecord?.project_id ?? null;
|
|
6407
|
+
}
|
|
6408
|
+
} catch (e) {
|
|
6409
|
+
console.warn(`[Daemon] EP833: Failed to fetch module details for project lookup`);
|
|
6410
|
+
}
|
|
6411
|
+
const trackedProjects = getAllProjects();
|
|
6412
|
+
let project = projectId ? trackedProjects.find((p) => p.id === projectId) : trackedProjects[0];
|
|
6413
|
+
if (!project && trackedProjects.length > 0) {
|
|
6414
|
+
project = trackedProjects[0];
|
|
6415
|
+
console.warn(`[Daemon] EP833: Could not find project ${projectId}, using fallback`);
|
|
6416
|
+
}
|
|
6417
|
+
if (!project) {
|
|
6418
|
+
console.error(`[Daemon] EP833: No project found for tunnel restart`);
|
|
6419
|
+
return;
|
|
6420
|
+
}
|
|
6421
|
+
const { isPortInUse: isPortInUse2 } = await Promise.resolve().then(() => (init_port_check(), port_check_exports));
|
|
6422
|
+
if (await isPortInUse2(port)) {
|
|
6423
|
+
console.log(`[Daemon] EP932: Port ${port} in use, checking health...`);
|
|
6424
|
+
const healthy = await isDevServerHealthy(port);
|
|
6425
|
+
if (!healthy) {
|
|
6426
|
+
console.log(`[Daemon] EP932: Dev server on port ${port} is not responding, killing process...`);
|
|
6427
|
+
await killProcessOnPort(port);
|
|
6428
|
+
}
|
|
6429
|
+
}
|
|
6430
|
+
const startResult2 = await ensureDevServer(project.path, port, moduleUid);
|
|
6431
|
+
if (!startResult2.success) {
|
|
6432
|
+
console.error(`[Daemon] EP932: Failed to start dev server: ${startResult2.error}`);
|
|
6433
|
+
return;
|
|
6434
|
+
}
|
|
6435
|
+
}
|
|
6436
|
+
console.log(`[Daemon] EP932: Dev server ready, restarting tunnel for ${moduleUid}...`);
|
|
6437
|
+
const startResult = await tunnelManager.startTunnel({
|
|
6438
|
+
moduleUid,
|
|
6439
|
+
port,
|
|
6440
|
+
onUrl: async (url) => {
|
|
6441
|
+
console.log(`[Daemon] EP833: Tunnel restarted for ${moduleUid}: ${url}`);
|
|
6442
|
+
try {
|
|
6443
|
+
await fetchWithAuth(`${apiUrl}/api/modules/${moduleUid}/tunnel`, {
|
|
6444
|
+
method: "POST",
|
|
6445
|
+
body: JSON.stringify({
|
|
6446
|
+
tunnel_url: url,
|
|
6447
|
+
tunnel_error: null
|
|
6448
|
+
})
|
|
6449
|
+
});
|
|
6450
|
+
} catch (e) {
|
|
6451
|
+
console.warn(`[Daemon] EP833: Failed to report restarted tunnel URL`);
|
|
6452
|
+
}
|
|
6453
|
+
}
|
|
6454
|
+
});
|
|
6455
|
+
if (startResult.success) {
|
|
6456
|
+
console.log(`[Daemon] EP833: Tunnel restart successful for ${moduleUid}`);
|
|
6457
|
+
} else {
|
|
6458
|
+
console.error(`[Daemon] EP833: Tunnel restart failed for ${moduleUid}: ${startResult.error}`);
|
|
6459
|
+
}
|
|
6460
|
+
} catch (error) {
|
|
6461
|
+
console.error(`[Daemon] EP833: Error restarting tunnel for ${moduleUid}:`, error);
|
|
6462
|
+
}
|
|
6463
|
+
}
|
|
6464
|
+
/**
|
|
6465
|
+
* EP833: Report tunnel health status to the API
|
|
6466
|
+
* EP904: Use fetchWithAuth for token refresh
|
|
6467
|
+
* EP911: Only report when status CHANGES to reduce DB writes
|
|
6468
|
+
*/
|
|
6469
|
+
async reportTunnelHealth(moduleUid, healthStatus, config) {
|
|
6470
|
+
if (!config.access_token) {
|
|
6471
|
+
return;
|
|
6472
|
+
}
|
|
6473
|
+
const lastStatus = this.lastReportedHealthStatus.get(moduleUid);
|
|
6474
|
+
if (lastStatus === healthStatus) {
|
|
6475
|
+
return;
|
|
6476
|
+
}
|
|
6477
|
+
const apiUrl = config.api_url || "https://episoda.dev";
|
|
6478
|
+
try {
|
|
6479
|
+
await fetchWithAuth(`${apiUrl}/api/modules/${moduleUid}/health`, {
|
|
6480
|
+
method: "PATCH",
|
|
6481
|
+
body: JSON.stringify({
|
|
6482
|
+
tunnel_health_status: healthStatus,
|
|
6483
|
+
tunnel_last_health_check: (/* @__PURE__ */ new Date()).toISOString()
|
|
6484
|
+
})
|
|
6485
|
+
});
|
|
6486
|
+
this.lastReportedHealthStatus.set(moduleUid, healthStatus);
|
|
6487
|
+
} catch (error) {
|
|
6488
|
+
console.warn(`[Daemon] EP833: Failed to report health for ${moduleUid}:`, error instanceof Error ? error.message : error);
|
|
6489
|
+
}
|
|
6490
|
+
}
|
|
6491
|
+
/**
|
|
6492
|
+
* EP833: Kill processes matching a pattern
|
|
6493
|
+
* Used to clean up orphaned cloudflared processes
|
|
6494
|
+
*/
|
|
6495
|
+
async killProcessesByPattern(pattern) {
|
|
6496
|
+
const { exec } = await import("child_process");
|
|
6497
|
+
const { promisify } = await import("util");
|
|
6498
|
+
const execAsync = promisify(exec);
|
|
6499
|
+
try {
|
|
6500
|
+
const { stdout } = await execAsync(`pgrep -f "${pattern}"`);
|
|
6501
|
+
const pids = stdout.trim().split("\n").filter(Boolean);
|
|
6502
|
+
if (pids.length === 0) {
|
|
6503
|
+
return 0;
|
|
6504
|
+
}
|
|
6505
|
+
let killed = 0;
|
|
6506
|
+
for (const pid of pids) {
|
|
6507
|
+
try {
|
|
6508
|
+
await execAsync(`kill ${pid}`);
|
|
6509
|
+
killed++;
|
|
6510
|
+
} catch {
|
|
6511
|
+
}
|
|
6512
|
+
}
|
|
6513
|
+
if (killed > 0) {
|
|
6514
|
+
console.log(`[Daemon] EP833: Killed ${killed} process(es) matching: ${pattern}`);
|
|
6515
|
+
}
|
|
6516
|
+
return killed;
|
|
6517
|
+
} catch {
|
|
6518
|
+
return 0;
|
|
6519
|
+
}
|
|
6520
|
+
}
|
|
6521
|
+
/**
|
|
6522
|
+
* EP833: Kill process using a specific port
|
|
6523
|
+
* Used to clean up dev servers when stopping tunnels
|
|
6524
|
+
*/
|
|
6525
|
+
async killProcessOnPort(port) {
|
|
6526
|
+
const { exec } = await import("child_process");
|
|
6527
|
+
const { promisify } = await import("util");
|
|
6528
|
+
const execAsync = promisify(exec);
|
|
6529
|
+
try {
|
|
6530
|
+
const { stdout } = await execAsync(`lsof -ti :${port}`);
|
|
6531
|
+
const pids = stdout.trim().split("\n").filter(Boolean);
|
|
6532
|
+
if (pids.length === 0) {
|
|
6533
|
+
return false;
|
|
6534
|
+
}
|
|
6535
|
+
let killed = false;
|
|
6536
|
+
for (const pid of pids) {
|
|
6537
|
+
try {
|
|
6538
|
+
await execAsync(`kill ${pid}`);
|
|
6539
|
+
killed = true;
|
|
6540
|
+
} catch {
|
|
6541
|
+
}
|
|
6542
|
+
}
|
|
6543
|
+
if (killed) {
|
|
6544
|
+
console.log(`[Daemon] EP833: Killed process(es) on port ${port}`);
|
|
6545
|
+
}
|
|
6546
|
+
return killed;
|
|
6547
|
+
} catch {
|
|
6548
|
+
return false;
|
|
6549
|
+
}
|
|
6550
|
+
}
|
|
6551
|
+
/**
|
|
6552
|
+
* EP833: Find orphaned cloudflared processes
|
|
6553
|
+
* Returns process info for cloudflared processes not tracked by TunnelManager
|
|
6554
|
+
*/
|
|
6555
|
+
async findOrphanedCloudflaredProcesses() {
|
|
6556
|
+
const { exec } = await import("child_process");
|
|
6557
|
+
const { promisify } = await import("util");
|
|
6558
|
+
const execAsync = promisify(exec);
|
|
6559
|
+
try {
|
|
6560
|
+
const { stdout } = await execAsync("ps aux | grep cloudflared | grep -v grep");
|
|
6561
|
+
const lines = stdout.trim().split("\n").filter(Boolean);
|
|
6562
|
+
const tunnelManager = getTunnelManager();
|
|
6563
|
+
const trackedModules = new Set(tunnelManager.getAllTunnels().map((t) => t.moduleUid));
|
|
6564
|
+
const orphaned = [];
|
|
6565
|
+
for (const line of lines) {
|
|
6566
|
+
const parts = line.trim().split(/\s+/);
|
|
6567
|
+
const pid = parts[1];
|
|
6568
|
+
if (pid) {
|
|
6569
|
+
orphaned.push({ pid });
|
|
6570
|
+
}
|
|
6571
|
+
}
|
|
6572
|
+
if (orphaned.length > trackedModules.size) {
|
|
6573
|
+
console.log(`[Daemon] EP833: Found ${orphaned.length} cloudflared processes but only ${trackedModules.size} tracked tunnels`);
|
|
6574
|
+
}
|
|
6575
|
+
return orphaned.length > trackedModules.size ? orphaned : [];
|
|
6576
|
+
} catch {
|
|
6577
|
+
return [];
|
|
6578
|
+
}
|
|
6579
|
+
}
|
|
6580
|
+
/**
|
|
6581
|
+
* EP833: Clean up orphaned cloudflared processes
|
|
6582
|
+
* Called during sync to ensure no zombie tunnels are running
|
|
6583
|
+
*/
|
|
6584
|
+
async cleanupOrphanedCloudflaredProcesses() {
|
|
6585
|
+
const orphaned = await this.findOrphanedCloudflaredProcesses();
|
|
6586
|
+
if (orphaned.length === 0) {
|
|
6587
|
+
return;
|
|
6588
|
+
}
|
|
6589
|
+
console.log(`[Daemon] EP833: Cleaning up ${orphaned.length} potentially orphaned cloudflared process(es)`);
|
|
6590
|
+
const killed = await this.killProcessesByPattern("cloudflared tunnel");
|
|
6591
|
+
if (killed > 0) {
|
|
6592
|
+
console.log(`[Daemon] EP833: Cleaned up ${killed} orphaned cloudflared process(es)`);
|
|
6593
|
+
}
|
|
6594
|
+
}
|
|
6595
|
+
// EP843: syncLocalCommits() removed - replaced by GitHub webhook push handler
|
|
6596
|
+
// See /api/webhooks/github handlePushEvent() for the new implementation
|
|
4472
6597
|
/**
|
|
4473
6598
|
* Gracefully shutdown daemon
|
|
4474
6599
|
*/
|
|
@@ -4476,6 +6601,8 @@ var Daemon = class {
|
|
|
4476
6601
|
if (this.shuttingDown) return;
|
|
4477
6602
|
this.shuttingDown = true;
|
|
4478
6603
|
console.log("[Daemon] Shutting down...");
|
|
6604
|
+
this.stopTunnelPolling();
|
|
6605
|
+
this.stopHealthCheckPolling();
|
|
4479
6606
|
for (const [projectPath, connection] of this.connections) {
|
|
4480
6607
|
if (connection.reconnectTimer) {
|
|
4481
6608
|
clearTimeout(connection.reconnectTimer);
|
|
@@ -4490,6 +6617,13 @@ var Daemon = class {
|
|
|
4490
6617
|
} catch (error) {
|
|
4491
6618
|
console.error("[Daemon] Failed to stop tunnels:", error);
|
|
4492
6619
|
}
|
|
6620
|
+
try {
|
|
6621
|
+
const agentManager = getAgentManager();
|
|
6622
|
+
await agentManager.stopAllSessions();
|
|
6623
|
+
console.log("[Daemon] All agent sessions stopped");
|
|
6624
|
+
} catch (error) {
|
|
6625
|
+
console.error("[Daemon] Failed to stop agent sessions:", error);
|
|
6626
|
+
}
|
|
4493
6627
|
await this.ipcServer.stop();
|
|
4494
6628
|
console.log("[Daemon] Shutdown complete");
|
|
4495
6629
|
}
|
|
@@ -4501,8 +6635,8 @@ var Daemon = class {
|
|
|
4501
6635
|
await this.shutdown();
|
|
4502
6636
|
try {
|
|
4503
6637
|
const pidPath = getPidFilePath();
|
|
4504
|
-
if (
|
|
4505
|
-
|
|
6638
|
+
if (fs11.existsSync(pidPath)) {
|
|
6639
|
+
fs11.unlinkSync(pidPath);
|
|
4506
6640
|
console.log("[Daemon] PID file cleaned up");
|
|
4507
6641
|
}
|
|
4508
6642
|
} catch (error) {
|