adaptoclaw 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +3 -0
- package/dist/channels/amocrm.d.ts +20 -0
- package/dist/channels/amocrm.d.ts.map +1 -0
- package/dist/channels/amocrm.js +233 -0
- package/dist/channels/amocrm.js.map +1 -0
- package/dist/channels/factory.d.ts +24 -0
- package/dist/channels/factory.d.ts.map +1 -0
- package/dist/channels/factory.js +41 -0
- package/dist/channels/factory.js.map +1 -0
- package/dist/channels/telegram.d.ts +28 -0
- package/dist/channels/telegram.d.ts.map +1 -0
- package/dist/channels/telegram.js +432 -0
- package/dist/channels/telegram.js.map +1 -0
- package/dist/channels/whatsapp.d.ts +88 -0
- package/dist/channels/whatsapp.d.ts.map +1 -0
- package/dist/channels/whatsapp.js +502 -0
- package/dist/channels/whatsapp.js.map +1 -0
- package/dist/config-types.d.ts +43 -0
- package/dist/config-types.d.ts.map +1 -0
- package/dist/config-types.js +2 -0
- package/dist/config-types.js.map +1 -0
- package/dist/config.d.ts +32 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/config.js +71 -0
- package/dist/config.js.map +1 -0
- package/dist/container-runner.d.ts +47 -0
- package/dist/container-runner.d.ts.map +1 -0
- package/dist/container-runner.js +494 -0
- package/dist/container-runner.js.map +1 -0
- package/dist/container-runtime.d.ts +13 -0
- package/dist/container-runtime.d.ts.map +1 -0
- package/dist/container-runtime.js +90 -0
- package/dist/container-runtime.js.map +1 -0
- package/dist/core/engine.d.ts +64 -0
- package/dist/core/engine.d.ts.map +1 -0
- package/dist/core/engine.js +782 -0
- package/dist/core/engine.js.map +1 -0
- package/dist/create.d.ts +4 -0
- package/dist/create.d.ts.map +1 -0
- package/dist/create.js +5 -0
- package/dist/create.js.map +1 -0
- package/dist/db.d.ts +67 -0
- package/dist/db.d.ts.map +1 -0
- package/dist/db.js +454 -0
- package/dist/db.js.map +1 -0
- package/dist/env.d.ts +8 -0
- package/dist/env.d.ts.map +1 -0
- package/dist/env.js +40 -0
- package/dist/env.js.map +1 -0
- package/dist/group-folder.d.ts +5 -0
- package/dist/group-folder.d.ts.map +1 -0
- package/dist/group-folder.js +44 -0
- package/dist/group-folder.js.map +1 -0
- package/dist/group-queue.d.ts +52 -0
- package/dist/group-queue.d.ts.map +1 -0
- package/dist/group-queue.js +321 -0
- package/dist/group-queue.js.map +1 -0
- package/dist/health.d.ts +5 -0
- package/dist/health.d.ts.map +1 -0
- package/dist/health.js +70 -0
- package/dist/health.js.map +1 -0
- package/dist/hooks/runner.d.ts +13 -0
- package/dist/hooks/runner.d.ts.map +1 -0
- package/dist/hooks/runner.js +42 -0
- package/dist/hooks/runner.js.map +1 -0
- package/dist/hooks/types.d.ts +33 -0
- package/dist/hooks/types.d.ts.map +1 -0
- package/dist/hooks/types.js +2 -0
- package/dist/hooks/types.js.map +1 -0
- package/dist/index.d.ts +10 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +38 -0
- package/dist/index.js.map +1 -0
- package/dist/ipc.d.ts +36 -0
- package/dist/ipc.d.ts.map +1 -0
- package/dist/ipc.js +441 -0
- package/dist/ipc.js.map +1 -0
- package/dist/logger.d.ts +3 -0
- package/dist/logger.d.ts.map +1 -0
- package/dist/logger.js +14 -0
- package/dist/logger.js.map +1 -0
- package/dist/media-cleanup.d.ts +2 -0
- package/dist/media-cleanup.d.ts.map +1 -0
- package/dist/media-cleanup.js +51 -0
- package/dist/media-cleanup.js.map +1 -0
- package/dist/media.d.ts +2 -0
- package/dist/media.d.ts.map +1 -0
- package/dist/media.js +22 -0
- package/dist/media.js.map +1 -0
- package/dist/mount-security.d.ts +34 -0
- package/dist/mount-security.d.ts.map +1 -0
- package/dist/mount-security.js +321 -0
- package/dist/mount-security.js.map +1 -0
- package/dist/rate-limiter.d.ts +10 -0
- package/dist/rate-limiter.d.ts.map +1 -0
- package/dist/rate-limiter.js +31 -0
- package/dist/rate-limiter.js.map +1 -0
- package/dist/router.d.ts +11 -0
- package/dist/router.d.ts.map +1 -0
- package/dist/router.js +79 -0
- package/dist/router.js.map +1 -0
- package/dist/rqlite.d.ts +36 -0
- package/dist/rqlite.d.ts.map +1 -0
- package/dist/rqlite.js +98 -0
- package/dist/rqlite.js.map +1 -0
- package/dist/runtime-config.d.ts +9 -0
- package/dist/runtime-config.d.ts.map +1 -0
- package/dist/runtime-config.js +14 -0
- package/dist/runtime-config.js.map +1 -0
- package/dist/task-scheduler.d.ts +14 -0
- package/dist/task-scheduler.d.ts.map +1 -0
- package/dist/task-scheduler.js +189 -0
- package/dist/task-scheduler.js.map +1 -0
- package/dist/types.d.ts +96 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/dist/whatsapp-auth.d.ts +2 -0
- package/dist/whatsapp-auth.d.ts.map +1 -0
- package/dist/whatsapp-auth.js +141 -0
- package/dist/whatsapp-auth.js.map +1 -0
- package/package.json +58 -0
package/dist/config.js
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import path from 'path';
|
|
2
|
+
import { readEnvFile } from './env.js';
|
|
3
|
+
// Read config values from .env (falls back to process.env).
|
|
4
|
+
// Secrets are NOT read here — they stay on disk and are loaded only
|
|
5
|
+
// where needed (container-runner.ts) to avoid leaking to child processes.
|
|
6
|
+
const envConfig = readEnvFile([
|
|
7
|
+
'ASSISTANT_NAME',
|
|
8
|
+
'ASSISTANT_HAS_OWN_NUMBER',
|
|
9
|
+
'TELEGRAM_BOT_TOKEN',
|
|
10
|
+
'TELEGRAM_ONLY',
|
|
11
|
+
'WA_PHONE_NUMBER',
|
|
12
|
+
'WA_AUTO_REGISTER',
|
|
13
|
+
'WA_LISTEN_ONLY',
|
|
14
|
+
'RQLITE_URL',
|
|
15
|
+
'DRY_RUN',
|
|
16
|
+
'AMOCRM_ENABLED',
|
|
17
|
+
'AMOCRM_BASE_URL',
|
|
18
|
+
'AMOCRM_WEBHOOK_PORT',
|
|
19
|
+
'AMOCRM_SALESBOT_ID',
|
|
20
|
+
'AMOCRM_RESPONSE_FIELD_ID',
|
|
21
|
+
'AMOCRM_GROUP_FOLDER',
|
|
22
|
+
'AMOCRM_LIVE_LEADS',
|
|
23
|
+
]);
|
|
24
|
+
export const ASSISTANT_NAME = process.env.ASSISTANT_NAME || envConfig.ASSISTANT_NAME || 'Andy';
|
|
25
|
+
export const ASSISTANT_HAS_OWN_NUMBER = (process.env.ASSISTANT_HAS_OWN_NUMBER || envConfig.ASSISTANT_HAS_OWN_NUMBER) === 'true';
|
|
26
|
+
export const POLL_INTERVAL = 2000;
|
|
27
|
+
export const SCHEDULER_POLL_INTERVAL = 60000;
|
|
28
|
+
// Absolute paths needed for container mounts
|
|
29
|
+
const PROJECT_ROOT = process.cwd();
|
|
30
|
+
const HOME_DIR = process.env.HOME || '/Users/user';
|
|
31
|
+
// Mount security: allowlist stored OUTSIDE project root, never mounted into containers
|
|
32
|
+
export const MOUNT_ALLOWLIST_PATH = path.join(HOME_DIR, '.config', 'nanoclaw', 'mount-allowlist.json');
|
|
33
|
+
export const STORE_DIR = path.resolve(PROJECT_ROOT, 'store');
|
|
34
|
+
export const GROUPS_DIR = path.resolve(PROJECT_ROOT, 'groups');
|
|
35
|
+
export const DATA_DIR = path.resolve(PROJECT_ROOT, 'data');
|
|
36
|
+
export const MAIN_GROUP_FOLDER = 'main';
|
|
37
|
+
export const CONTAINER_IMAGE = process.env.CONTAINER_IMAGE || 'nanoclaw-agent:latest';
|
|
38
|
+
export const CONTAINER_TIMEOUT = parseInt(process.env.CONTAINER_TIMEOUT || '1800000', 10);
|
|
39
|
+
export const CONTAINER_MAX_OUTPUT_SIZE = parseInt(process.env.CONTAINER_MAX_OUTPUT_SIZE || '10485760', 10); // 10MB default
|
|
40
|
+
export const IPC_POLL_INTERVAL = 1000;
|
|
41
|
+
export const IDLE_TIMEOUT = parseInt(process.env.IDLE_TIMEOUT || '1800000', 10); // 30min default — how long to keep container alive after last result
|
|
42
|
+
export const MAX_CONCURRENT_CONTAINERS = Math.max(1, parseInt(process.env.MAX_CONCURRENT_CONTAINERS || '5', 10) || 5);
|
|
43
|
+
function escapeRegex(str) {
|
|
44
|
+
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
|
45
|
+
}
|
|
46
|
+
export const TRIGGER_PATTERN = new RegExp(`^@${escapeRegex(ASSISTANT_NAME)}\\b`, 'i');
|
|
47
|
+
// Timezone for scheduled tasks (cron expressions, etc.)
|
|
48
|
+
// Uses system timezone by default
|
|
49
|
+
export const TIMEZONE = process.env.TZ || Intl.DateTimeFormat().resolvedOptions().timeZone;
|
|
50
|
+
// Telegram configuration
|
|
51
|
+
export const MEDIA_DIR = path.join(DATA_DIR, 'media');
|
|
52
|
+
export const TELEGRAM_BOT_TOKEN = process.env.TELEGRAM_BOT_TOKEN || envConfig.TELEGRAM_BOT_TOKEN || '';
|
|
53
|
+
export const TELEGRAM_ONLY = (process.env.TELEGRAM_ONLY || envConfig.TELEGRAM_ONLY) === 'true';
|
|
54
|
+
export const WA_PHONE_NUMBER = process.env.WA_PHONE_NUMBER || envConfig.WA_PHONE_NUMBER || '';
|
|
55
|
+
export const WA_AUTO_REGISTER = (process.env.WA_AUTO_REGISTER || envConfig.WA_AUTO_REGISTER) === 'true';
|
|
56
|
+
export const WA_LISTEN_ONLY = (process.env.WA_LISTEN_ONLY || envConfig.WA_LISTEN_ONLY) === 'true';
|
|
57
|
+
export const DRY_RUN = (process.env.DRY_RUN || envConfig.DRY_RUN) === 'true';
|
|
58
|
+
// AmoCRM configuration
|
|
59
|
+
// Set AMOCRM_ENABLED=false to temporarily disable AmoCRM channel (webhook won't start, no messages processed)
|
|
60
|
+
export const AMOCRM_ENABLED = (process.env.AMOCRM_ENABLED || envConfig.AMOCRM_ENABLED || 'true') !== 'false';
|
|
61
|
+
export const AMOCRM_BASE_URL = process.env.AMOCRM_BASE_URL || envConfig.AMOCRM_BASE_URL || '';
|
|
62
|
+
export const AMOCRM_WEBHOOK_PORT = parseInt(process.env.AMOCRM_WEBHOOK_PORT || envConfig.AMOCRM_WEBHOOK_PORT || '3200', 10);
|
|
63
|
+
export const AMOCRM_SALESBOT_ID = parseInt(process.env.AMOCRM_SALESBOT_ID || envConfig.AMOCRM_SALESBOT_ID || '0', 10);
|
|
64
|
+
export const AMOCRM_RESPONSE_FIELD_ID = parseInt(process.env.AMOCRM_RESPONSE_FIELD_ID || envConfig.AMOCRM_RESPONSE_FIELD_ID || '0', 10);
|
|
65
|
+
export const AMOCRM_GROUP_FOLDER = process.env.AMOCRM_GROUP_FOLDER || envConfig.AMOCRM_GROUP_FOLDER || 'pvefilm';
|
|
66
|
+
// Comma-separated lead IDs that receive real responses. Empty = all DRY_RUN.
|
|
67
|
+
export const AMOCRM_LIVE_LEADS = new Set((process.env.AMOCRM_LIVE_LEADS || envConfig.AMOCRM_LIVE_LEADS || '')
|
|
68
|
+
.split(',')
|
|
69
|
+
.map((s) => s.trim())
|
|
70
|
+
.filter(Boolean));
|
|
71
|
+
//# sourceMappingURL=config.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"config.js","sourceRoot":"","sources":["../src/config.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,MAAM,CAAC;AAExB,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAEvC,4DAA4D;AAC5D,oEAAoE;AACpE,0EAA0E;AAC1E,MAAM,SAAS,GAAG,WAAW,CAAC;IAC5B,gBAAgB;IAChB,0BAA0B;IAC1B,oBAAoB;IACpB,eAAe;IACf,iBAAiB;IACjB,kBAAkB;IAClB,gBAAgB;IAChB,YAAY;IACZ,SAAS;IACT,gBAAgB;IAChB,iBAAiB;IACjB,qBAAqB;IACrB,oBAAoB;IACpB,0BAA0B;IAC1B,qBAAqB;IACrB,mBAAmB;CACpB,CAAC,CAAC;AAEH,MAAM,CAAC,MAAM,cAAc,GACzB,OAAO,CAAC,GAAG,CAAC,cAAc,IAAI,SAAS,CAAC,cAAc,IAAI,MAAM,CAAC;AACnE,MAAM,CAAC,MAAM,wBAAwB,GACnC,CAAC,OAAO,CAAC,GAAG,CAAC,wBAAwB,IAAI,SAAS,CAAC,wBAAwB,CAAC,KAAK,MAAM,CAAC;AAC1F,MAAM,CAAC,MAAM,aAAa,GAAG,IAAI,CAAC;AAClC,MAAM,CAAC,MAAM,uBAAuB,GAAG,KAAK,CAAC;AAE7C,6CAA6C;AAC7C,MAAM,YAAY,GAAG,OAAO,CAAC,GAAG,EAAE,CAAC;AACnC,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,IAAI,IAAI,aAAa,CAAC;AAEnD,uFAAuF;AACvF,MAAM,CAAC,MAAM,oBAAoB,GAAG,IAAI,CAAC,IAAI,CAC3C,QAAQ,EACR,SAAS,EACT,UAAU,EACV,sBAAsB,CACvB,CAAC;AACF,MAAM,CAAC,MAAM,SAAS,GAAG,IAAI,CAAC,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAC7D,MAAM,CAAC,MAAM,UAAU,GAAG,IAAI,CAAC,OAAO,CAAC,YAAY,EAAE,QAAQ,CAAC,CAAC;AAC/D,MAAM,CAAC,MAAM,QAAQ,GAAG,IAAI,CAAC,OAAO,CAAC,YAAY,EAAE,MAAM,CAAC,CAAC;AAC3D,MAAM,CAAC,MAAM,iBAAiB,GAAG,MAAM,CAAC;AAExC,MAAM,CAAC,MAAM,eAAe,GAC1B,OAAO,CAAC,GAAG,CAAC,eAAe,IAAI,uBAAuB,CAAC;AACzD,MAAM,CAAC,MAAM,iBAAiB,GAAG,QAAQ,CACvC,OAAO,CAAC,GAAG,CAAC,iBAAiB,IAAI,SAAS,EAC1C,EAAE,CACH,CAAC;AACF,MAAM,CAAC,MAAM,yBAAyB,GAAG,QAAQ,CAC/C,OAAO,CAAC,GAAG,CAAC,yBAAyB,IAAI,UAAU,EACnD,EAAE,CACH,CAAC,CAAC,eAAe;AAClB,MAAM,CAAC,MAAM,iBAAiB,GAAG,IAAI,CAAC;AACtC,MAAM,CAAC,MAAM,YAAY,GAAG,QAAQ,CAClC,OAAO,CAAC,GAAG,CAAC,YAAY,IAAI,SAAS,EACrC,EAAE,CACH,CAAC,CAAC,qEAAqE;AACxE,MAAM,CAAC,MAAM,yBAAyB,GAAG,IAAI,CAAC,GAAG,CAC/C,CAAC,EACD,QAAQ,CAAC,OAAO,CAAC,GAAG,CAAC,yBAAyB,IAAI,GAAG,EAAE,EAAE,CAAC,IAAI,CAAC,CAChE,CAAC;AAEF,SAAS,WAAW,CAAC,GAAW;IAC9B,OAAO,GAAG,CAAC,OAAO,CAAC,qBAAqB,EAAE,MAAM,CAAC,CAAC;AACpD,CAAC;AAED,MAAM,CAAC,MAAM,eAAe,GAAG,IAAI,MAAM,CACvC,KAAK,WAAW,CAAC,cAAc,CAAC,KAAK,EACrC,GAAG,CACJ,CAAC;AAEF,wDAAwD;AACxD,kCAAkC;AAClC,MAAM,CAAC,MAAM,QAAQ,GACnB,OAAO,CAAC,GAAG,CAAC,EAAE,IAAI,IAAI,CAAC,cAAc,EAAE,CAAC,eAAe,EAAE,CAAC,QAAQ,CAAC;AAErE,yBAAyB;AACzB,MAAM,CAAC,MAAM,SAAS,GAAG,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,OAAO,CAAC,CAAC;AAEtD,MAAM,CAAC,MAAM,kBAAkB,GAC7B,OAAO,CAAC,GAAG,CAAC,kBAAkB,IAAI,SAAS,CAAC,kBAAkB,IAAI,EAAE,CAAC;AACvE,MAAM,CAAC,MAAM,aAAa,GACxB,CAAC,OAAO,CAAC,GAAG,CAAC,aAAa,IAAI,SAAS,CAAC,aAAa,CAAC,KAAK,MAAM,CAAC;AACpE,MAAM,CAAC,MAAM,eAAe,GAC1B,OAAO,CAAC,GAAG,CAAC,eAAe,IAAI,SAAS,CAAC,eAAe,IAAI,EAAE,CAAC;AACjE,MAAM,CAAC,MAAM,gBAAgB,GAC3B,CAAC,OAAO,CAAC,GAAG,CAAC,gBAAgB,IAAI,SAAS,CAAC,gBAAgB,CAAC,KAAK,MAAM,CAAC;AAC1E,MAAM,CAAC,MAAM,cAAc,GACzB,CAAC,OAAO,CAAC,GAAG,CAAC,cAAc,IAAI,SAAS,CAAC,cAAc,CAAC,KAAK,MAAM,CAAC;AACtE,MAAM,CAAC,MAAM,OAAO,GAClB,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,IAAI,SAAS,CAAC,OAAO,CAAC,KAAK,MAAM,CAAC;AAExD,uBAAuB;AACvB,8GAA8G;AAC9G,MAAM,CAAC,MAAM,cAAc,GACzB,CAAC,OAAO,CAAC,GAAG,CAAC,cAAc,IAAI,SAAS,CAAC,cAAc,IAAI,MAAM,CAAC,KAAK,OAAO,CAAC;AACjF,MAAM,CAAC,MAAM,eAAe,GAC1B,OAAO,CAAC,GAAG,CAAC,eAAe,IAAI,SAAS,CAAC,eAAe,IAAI,EAAE,CAAC;AACjE,MAAM,CAAC,MAAM,mBAAmB,GAAG,QAAQ,CACzC,OAAO,CAAC,GAAG,CAAC,mBAAmB,IAAI,SAAS,CAAC,mBAAmB,IAAI,MAAM,EAC1E,EAAE,CACH,CAAC;AACF,MAAM,CAAC,MAAM,kBAAkB,GAAG,QAAQ,CACxC,OAAO,CAAC,GAAG,CAAC,kBAAkB,IAAI,SAAS,CAAC,kBAAkB,IAAI,GAAG,EACrE,EAAE,CACH,CAAC;AACF,MAAM,CAAC,MAAM,wBAAwB,GAAG,QAAQ,CAC9C,OAAO,CAAC,GAAG,CAAC,wBAAwB,IAAI,SAAS,CAAC,wBAAwB,IAAI,GAAG,EACjF,EAAE,CACH,CAAC;AACF,MAAM,CAAC,MAAM,mBAAmB,GAC9B,OAAO,CAAC,GAAG,CAAC,mBAAmB,IAAI,SAAS,CAAC,mBAAmB,IAAI,SAAS,CAAC;AAChF,6EAA6E;AAC7E,MAAM,CAAC,MAAM,iBAAiB,GAAG,IAAI,GAAG,CACtC,CAAC,OAAO,CAAC,GAAG,CAAC,iBAAiB,IAAI,SAAS,CAAC,iBAAiB,IAAI,EAAE,CAAC;KACjE,KAAK,CAAC,GAAG,CAAC;KACV,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC;KACpB,MAAM,CAAC,OAAO,CAAC,CACnB,CAAC"}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Container Runner for NanoClaw
|
|
3
|
+
* Spawns agent execution in containers and handles IPC
|
|
4
|
+
*/
|
|
5
|
+
import { ChildProcess } from 'child_process';
|
|
6
|
+
import { RegisteredGroup } from './types.js';
|
|
7
|
+
export interface ContainerInput {
|
|
8
|
+
prompt: string;
|
|
9
|
+
sessionId?: string;
|
|
10
|
+
groupFolder: string;
|
|
11
|
+
chatJid: string;
|
|
12
|
+
isMain: boolean;
|
|
13
|
+
isScheduledTask?: boolean;
|
|
14
|
+
isBackgroundTask?: boolean;
|
|
15
|
+
backgroundTaskId?: string;
|
|
16
|
+
secrets?: Record<string, string>;
|
|
17
|
+
model?: string;
|
|
18
|
+
}
|
|
19
|
+
export interface ContainerOutput {
|
|
20
|
+
status: 'success' | 'error';
|
|
21
|
+
result: string | null;
|
|
22
|
+
newSessionId?: string;
|
|
23
|
+
error?: string;
|
|
24
|
+
}
|
|
25
|
+
export declare function runContainerAgent(group: RegisteredGroup, input: ContainerInput, onProcess: (proc: ChildProcess, containerName: string) => void, onOutput?: (output: ContainerOutput) => Promise<void>): Promise<ContainerOutput>;
|
|
26
|
+
export declare function writeTasksSnapshot(groupFolder: string, isMain: boolean, tasks: Array<{
|
|
27
|
+
id: string;
|
|
28
|
+
groupFolder: string;
|
|
29
|
+
prompt: string;
|
|
30
|
+
schedule_type: string;
|
|
31
|
+
schedule_value: string;
|
|
32
|
+
status: string;
|
|
33
|
+
next_run: string | null;
|
|
34
|
+
}>): void;
|
|
35
|
+
export interface AvailableGroup {
|
|
36
|
+
jid: string;
|
|
37
|
+
name: string;
|
|
38
|
+
lastActivity: string;
|
|
39
|
+
isRegistered: boolean;
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Write available groups snapshot for the container to read.
|
|
43
|
+
* Only main group can see all available groups (for activation).
|
|
44
|
+
* Non-main groups only see their own registration status.
|
|
45
|
+
*/
|
|
46
|
+
export declare function writeGroupsSnapshot(groupFolder: string, isMain: boolean, groups: AvailableGroup[], registeredJids: Set<string>): void;
|
|
47
|
+
//# sourceMappingURL=container-runner.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"container-runner.d.ts","sourceRoot":"","sources":["../src/container-runner.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,OAAO,EAAE,YAAY,EAAe,MAAM,eAAe,CAAC;AAkB1D,OAAO,EAAE,eAAe,EAAE,MAAM,YAAY,CAAC;AAM7C,MAAM,WAAW,cAAc;IAC7B,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,OAAO,CAAC;IAChB,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B,gBAAgB,CAAC,EAAE,OAAO,CAAC;IAC3B,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,WAAW,eAAe;IAC9B,MAAM,EAAE,SAAS,GAAG,OAAO,CAAC;IAC5B,MAAM,EAAE,MAAM,GAAG,IAAI,CAAC;IACtB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAqOD,wBAAsB,iBAAiB,CACrC,KAAK,EAAE,eAAe,EACtB,KAAK,EAAE,cAAc,EACrB,SAAS,EAAE,CAAC,IAAI,EAAE,YAAY,EAAE,aAAa,EAAE,MAAM,KAAK,IAAI,EAC9D,QAAQ,CAAC,EAAE,CAAC,MAAM,EAAE,eAAe,KAAK,OAAO,CAAC,IAAI,CAAC,GACpD,OAAO,CAAC,eAAe,CAAC,CAmU1B;AAED,wBAAgB,kBAAkB,CAChC,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,OAAO,EACf,KAAK,EAAE,KAAK,CAAC;IACX,EAAE,EAAE,MAAM,CAAC;IACX,WAAW,EAAE,MAAM,CAAC;IACpB,MAAM,EAAE,MAAM,CAAC;IACf,aAAa,EAAE,MAAM,CAAC;IACtB,cAAc,EAAE,MAAM,CAAC;IACvB,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAC;CACzB,CAAC,GACD,IAAI,CAYN;AAED,MAAM,WAAW,cAAc;IAC7B,GAAG,EAAE,MAAM,CAAC;IACZ,IAAI,EAAE,MAAM,CAAC;IACb,YAAY,EAAE,MAAM,CAAC;IACrB,YAAY,EAAE,OAAO,CAAC;CACvB;AAED;;;;GAIG;AACH,wBAAgB,mBAAmB,CACjC,WAAW,EAAE,MAAM,EACnB,MAAM,EAAE,OAAO,EACf,MAAM,EAAE,cAAc,EAAE,EACxB,cAAc,EAAE,GAAG,CAAC,MAAM,CAAC,GAC1B,IAAI,CAmBN"}
|
|
@@ -0,0 +1,494 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Container Runner for NanoClaw
|
|
3
|
+
* Spawns agent execution in containers and handles IPC
|
|
4
|
+
*/
|
|
5
|
+
import { exec, spawn } from 'child_process';
|
|
6
|
+
import fs from 'fs';
|
|
7
|
+
import path from 'path';
|
|
8
|
+
import { CONTAINER_IMAGE, CONTAINER_MAX_OUTPUT_SIZE, CONTAINER_TIMEOUT, DATA_DIR, GROUPS_DIR, IDLE_TIMEOUT, } from './config.js';
|
|
9
|
+
import { logContainerRun } from './db.js';
|
|
10
|
+
import { readEnvFile } from './env.js';
|
|
11
|
+
import { resolveGroupFolderPath, resolveGroupIpcPath } from './group-folder.js';
|
|
12
|
+
import { logger } from './logger.js';
|
|
13
|
+
import { CONTAINER_RUNTIME_BIN, readonlyMountArgs, stopContainer } from './container-runtime.js';
|
|
14
|
+
import { validateAdditionalMounts } from './mount-security.js';
|
|
15
|
+
// Sentinel markers for robust output parsing (must match agent-runner)
|
|
16
|
+
const OUTPUT_START_MARKER = '---NANOCLAW_OUTPUT_START---';
|
|
17
|
+
const OUTPUT_END_MARKER = '---NANOCLAW_OUTPUT_END---';
|
|
18
|
+
function buildVolumeMounts(group, isMain) {
|
|
19
|
+
const mounts = [];
|
|
20
|
+
const projectRoot = process.cwd();
|
|
21
|
+
const groupDir = resolveGroupFolderPath(group.folder);
|
|
22
|
+
if (isMain) {
|
|
23
|
+
// Main gets the project root read-only. Writable paths the agent needs
|
|
24
|
+
// (group folder, IPC, .claude/) are mounted separately below.
|
|
25
|
+
// Read-only prevents the agent from modifying host application code
|
|
26
|
+
// (src/, dist/, package.json, etc.) which would bypass the sandbox
|
|
27
|
+
// entirely on next restart.
|
|
28
|
+
mounts.push({
|
|
29
|
+
hostPath: projectRoot,
|
|
30
|
+
containerPath: '/workspace/project',
|
|
31
|
+
readonly: true,
|
|
32
|
+
});
|
|
33
|
+
// Main also gets its group folder as the working directory
|
|
34
|
+
mounts.push({
|
|
35
|
+
hostPath: groupDir,
|
|
36
|
+
containerPath: '/workspace/group',
|
|
37
|
+
readonly: false,
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
else {
|
|
41
|
+
// Other groups only get their own folder
|
|
42
|
+
mounts.push({
|
|
43
|
+
hostPath: groupDir,
|
|
44
|
+
containerPath: '/workspace/group',
|
|
45
|
+
readonly: false,
|
|
46
|
+
});
|
|
47
|
+
// Global memory directory (read-only for non-main)
|
|
48
|
+
// Only directory mounts are supported, not file mounts
|
|
49
|
+
const globalDir = path.join(GROUPS_DIR, 'global');
|
|
50
|
+
if (fs.existsSync(globalDir)) {
|
|
51
|
+
mounts.push({
|
|
52
|
+
hostPath: globalDir,
|
|
53
|
+
containerPath: '/workspace/global',
|
|
54
|
+
readonly: true,
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
// Per-group Claude sessions directory (isolated from other groups)
|
|
59
|
+
// Each group gets their own .claude/ to prevent cross-group session access
|
|
60
|
+
const groupSessionsDir = path.join(DATA_DIR, 'sessions', group.folder, '.claude');
|
|
61
|
+
fs.mkdirSync(groupSessionsDir, { recursive: true });
|
|
62
|
+
const settingsFile = path.join(groupSessionsDir, 'settings.json');
|
|
63
|
+
if (!fs.existsSync(settingsFile)) {
|
|
64
|
+
fs.writeFileSync(settingsFile, JSON.stringify({
|
|
65
|
+
env: {
|
|
66
|
+
// Enable agent swarms (subagent orchestration)
|
|
67
|
+
// https://code.claude.com/docs/en/agent-teams#orchestrate-teams-of-claude-code-sessions
|
|
68
|
+
CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS: '1',
|
|
69
|
+
// Load CLAUDE.md from additional mounted directories
|
|
70
|
+
// https://code.claude.com/docs/en/memory#load-memory-from-additional-directories
|
|
71
|
+
CLAUDE_CODE_ADDITIONAL_DIRECTORIES_CLAUDE_MD: '1',
|
|
72
|
+
// Enable Claude's memory feature (persists user preferences between sessions)
|
|
73
|
+
// https://code.claude.com/docs/en/memory#manage-auto-memory
|
|
74
|
+
CLAUDE_CODE_DISABLE_AUTO_MEMORY: '0',
|
|
75
|
+
},
|
|
76
|
+
}, null, 2) + '\n');
|
|
77
|
+
}
|
|
78
|
+
// Sync skills from container/skills/ into each group's .claude/skills/
|
|
79
|
+
const skillsSrc = path.join(process.cwd(), 'container', 'skills');
|
|
80
|
+
const skillsDst = path.join(groupSessionsDir, 'skills');
|
|
81
|
+
if (fs.existsSync(skillsSrc)) {
|
|
82
|
+
for (const skillDir of fs.readdirSync(skillsSrc)) {
|
|
83
|
+
const srcDir = path.join(skillsSrc, skillDir);
|
|
84
|
+
if (!fs.statSync(srcDir).isDirectory())
|
|
85
|
+
continue;
|
|
86
|
+
const dstDir = path.join(skillsDst, skillDir);
|
|
87
|
+
fs.cpSync(srcDir, dstDir, { recursive: true });
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
mounts.push({
|
|
91
|
+
hostPath: groupSessionsDir,
|
|
92
|
+
containerPath: '/home/node/.claude',
|
|
93
|
+
readonly: false,
|
|
94
|
+
});
|
|
95
|
+
// Per-group IPC namespace: each group gets its own IPC directory
|
|
96
|
+
// This prevents cross-group privilege escalation via IPC
|
|
97
|
+
const groupIpcDir = resolveGroupIpcPath(group.folder);
|
|
98
|
+
fs.mkdirSync(path.join(groupIpcDir, 'messages'), { recursive: true });
|
|
99
|
+
fs.mkdirSync(path.join(groupIpcDir, 'tasks'), { recursive: true });
|
|
100
|
+
fs.mkdirSync(path.join(groupIpcDir, 'input'), { recursive: true });
|
|
101
|
+
fs.mkdirSync(path.join(groupIpcDir, 'wa_requests'), { recursive: true });
|
|
102
|
+
fs.mkdirSync(path.join(groupIpcDir, 'wa_responses'), { recursive: true });
|
|
103
|
+
mounts.push({
|
|
104
|
+
hostPath: groupIpcDir,
|
|
105
|
+
containerPath: '/workspace/ipc',
|
|
106
|
+
readonly: false,
|
|
107
|
+
});
|
|
108
|
+
// Copy agent-runner source into a per-group writable location so agents
|
|
109
|
+
// can customize it (add tools, change behavior) without affecting other
|
|
110
|
+
// groups. Recompiled on container startup via entrypoint.sh.
|
|
111
|
+
const agentRunnerSrc = path.join(projectRoot, 'container', 'agent-runner', 'src');
|
|
112
|
+
const groupAgentRunnerDir = path.join(DATA_DIR, 'sessions', group.folder, 'agent-runner-src');
|
|
113
|
+
if (!fs.existsSync(groupAgentRunnerDir) && fs.existsSync(agentRunnerSrc)) {
|
|
114
|
+
fs.cpSync(agentRunnerSrc, groupAgentRunnerDir, { recursive: true });
|
|
115
|
+
}
|
|
116
|
+
mounts.push({
|
|
117
|
+
hostPath: groupAgentRunnerDir,
|
|
118
|
+
containerPath: '/app/src',
|
|
119
|
+
readonly: false,
|
|
120
|
+
});
|
|
121
|
+
// Persistent browser profile (cookies, localStorage, sessions)
|
|
122
|
+
const browserProfileDir = path.join(DATA_DIR, 'sessions', group.folder, 'browser-profile');
|
|
123
|
+
fs.mkdirSync(browserProfileDir, { recursive: true });
|
|
124
|
+
mounts.push({
|
|
125
|
+
hostPath: browserProfileDir,
|
|
126
|
+
containerPath: '/home/node/.browser-profile',
|
|
127
|
+
readonly: false,
|
|
128
|
+
});
|
|
129
|
+
// Media directory — shared between host and container for media files
|
|
130
|
+
const mediaDir = path.join(DATA_DIR, 'media');
|
|
131
|
+
fs.mkdirSync(mediaDir, { recursive: true });
|
|
132
|
+
mounts.push({
|
|
133
|
+
hostPath: mediaDir,
|
|
134
|
+
containerPath: '/workspace/media',
|
|
135
|
+
readonly: false, // writable so agent can save screenshots/files for sending
|
|
136
|
+
});
|
|
137
|
+
// NCALayer adapter + .p12 key for digital signing
|
|
138
|
+
const adapterDir = path.join(projectRoot, 'container', 'ncalayer-adapter');
|
|
139
|
+
if (fs.existsSync(adapterDir)) {
|
|
140
|
+
mounts.push({
|
|
141
|
+
hostPath: adapterDir,
|
|
142
|
+
containerPath: '/workspace/ncalayer-adapter',
|
|
143
|
+
readonly: true,
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
const p12Env = readEnvFile(['P12_PATH']);
|
|
147
|
+
const p12Path = p12Env.P12_PATH || '';
|
|
148
|
+
if (p12Path && fs.existsSync(p12Path)) {
|
|
149
|
+
const keysDir = path.dirname(p12Path);
|
|
150
|
+
mounts.push({
|
|
151
|
+
hostPath: keysDir,
|
|
152
|
+
containerPath: '/workspace/keys',
|
|
153
|
+
readonly: true,
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
// Additional mounts validated against external allowlist (tamper-proof from containers)
|
|
157
|
+
if (group.containerConfig?.additionalMounts) {
|
|
158
|
+
const validatedMounts = validateAdditionalMounts(group.containerConfig.additionalMounts, group.name, isMain);
|
|
159
|
+
mounts.push(...validatedMounts);
|
|
160
|
+
}
|
|
161
|
+
return mounts;
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Read allowed secrets from .env for passing to the container via stdin.
|
|
165
|
+
* Secrets are never written to disk or mounted as files.
|
|
166
|
+
*/
|
|
167
|
+
function readSecrets() {
|
|
168
|
+
return readEnvFile(['CLAUDE_CODE_OAUTH_TOKEN', 'ANTHROPIC_API_KEY']);
|
|
169
|
+
}
|
|
170
|
+
function buildContainerArgs(mounts, containerName, model, chatJid) {
|
|
171
|
+
const args = ['run', '-i', '--rm', '--name', containerName];
|
|
172
|
+
// Run as host user so bind-mounted files are accessible.
|
|
173
|
+
// Skip when running as root (uid 0), as the container's node user (uid 1000),
|
|
174
|
+
// or when getuid is unavailable (native Windows without WSL).
|
|
175
|
+
const hostUid = process.getuid?.();
|
|
176
|
+
const hostGid = process.getgid?.();
|
|
177
|
+
if (hostUid != null && hostUid !== 0 && hostUid !== 1000) {
|
|
178
|
+
args.push('--user', `${hostUid}:${hostGid}`);
|
|
179
|
+
args.push('-e', 'HOME=/home/node');
|
|
180
|
+
}
|
|
181
|
+
// Pass CLAUDE_MODEL to container — input.model overrides env default
|
|
182
|
+
const effectiveModel = model || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6';
|
|
183
|
+
args.push('-e', `CLAUDE_MODEL=${effectiveModel}`);
|
|
184
|
+
// Persistent browser profile — cookies and sessions survive container restarts
|
|
185
|
+
args.push('-e', 'AGENT_BROWSER_PROFILE=/home/node/.browser-profile');
|
|
186
|
+
// Mobile device emulation — lighter pages, saves tokens
|
|
187
|
+
args.push('-e', 'AGENT_BROWSER_USER_AGENT=Mozilla/5.0 (Linux; Android 14; SM-A546B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36');
|
|
188
|
+
// NCALayer adapter: connect to nanoclaw-net so agent can reach NCANode
|
|
189
|
+
const ncaEnv = readEnvFile(['P12_PASSWORD']);
|
|
190
|
+
args.push('--network', 'nanoclaw-net');
|
|
191
|
+
if (ncaEnv.P12_PASSWORD) {
|
|
192
|
+
args.push('-e', `P12_PASSWORD=${ncaEnv.P12_PASSWORD}`);
|
|
193
|
+
args.push('-e', 'NCANODE_URL=http://ncanode:14579');
|
|
194
|
+
if (chatJid)
|
|
195
|
+
args.push('-e', `CHAT_JID=${chatJid}`);
|
|
196
|
+
}
|
|
197
|
+
for (const mount of mounts) {
|
|
198
|
+
if (mount.readonly) {
|
|
199
|
+
args.push(...readonlyMountArgs(mount.hostPath, mount.containerPath));
|
|
200
|
+
}
|
|
201
|
+
else {
|
|
202
|
+
args.push('-v', `${mount.hostPath}:${mount.containerPath}`);
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
args.push(CONTAINER_IMAGE);
|
|
206
|
+
return args;
|
|
207
|
+
}
|
|
208
|
+
export async function runContainerAgent(group, input, onProcess, onOutput) {
|
|
209
|
+
const startTime = Date.now();
|
|
210
|
+
const groupDir = resolveGroupFolderPath(group.folder);
|
|
211
|
+
fs.mkdirSync(groupDir, { recursive: true });
|
|
212
|
+
const mounts = buildVolumeMounts(group, input.isMain);
|
|
213
|
+
const safeName = group.folder.replace(/[^a-zA-Z0-9-]/g, '-');
|
|
214
|
+
const containerName = `nanoclaw-${safeName}-${Date.now()}`;
|
|
215
|
+
const containerArgs = buildContainerArgs(mounts, containerName, input.model, input.chatJid);
|
|
216
|
+
logger.debug({
|
|
217
|
+
group: group.name,
|
|
218
|
+
containerName,
|
|
219
|
+
mounts: mounts.map((m) => `${m.hostPath} -> ${m.containerPath}${m.readonly ? ' (ro)' : ''}`),
|
|
220
|
+
containerArgs: containerArgs.join(' '),
|
|
221
|
+
}, 'Container mount configuration');
|
|
222
|
+
logger.info({
|
|
223
|
+
group: group.name,
|
|
224
|
+
containerName,
|
|
225
|
+
mountCount: mounts.length,
|
|
226
|
+
isMain: input.isMain,
|
|
227
|
+
}, 'Spawning container agent');
|
|
228
|
+
return new Promise((resolve) => {
|
|
229
|
+
const container = spawn(CONTAINER_RUNTIME_BIN, containerArgs, {
|
|
230
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
231
|
+
});
|
|
232
|
+
onProcess(container, containerName);
|
|
233
|
+
let stdout = '';
|
|
234
|
+
let stderr = '';
|
|
235
|
+
let stdoutTruncated = false;
|
|
236
|
+
let stderrTruncated = false;
|
|
237
|
+
// Pass secrets via stdin (never written to disk or mounted as files)
|
|
238
|
+
input.secrets = readSecrets();
|
|
239
|
+
container.stdin.write(JSON.stringify(input));
|
|
240
|
+
container.stdin.end();
|
|
241
|
+
// Remove secrets from input so they don't appear in logs
|
|
242
|
+
delete input.secrets;
|
|
243
|
+
// Streaming output: parse OUTPUT_START/END marker pairs as they arrive
|
|
244
|
+
let parseBuffer = '';
|
|
245
|
+
let newSessionId;
|
|
246
|
+
let outputChain = Promise.resolve();
|
|
247
|
+
container.stdout.on('data', (data) => {
|
|
248
|
+
const chunk = data.toString();
|
|
249
|
+
// Always accumulate for logging
|
|
250
|
+
if (!stdoutTruncated) {
|
|
251
|
+
const remaining = CONTAINER_MAX_OUTPUT_SIZE - stdout.length;
|
|
252
|
+
if (chunk.length > remaining) {
|
|
253
|
+
stdout += chunk.slice(0, remaining);
|
|
254
|
+
stdoutTruncated = true;
|
|
255
|
+
logger.warn({ group: group.name, size: stdout.length }, 'Container stdout truncated due to size limit');
|
|
256
|
+
}
|
|
257
|
+
else {
|
|
258
|
+
stdout += chunk;
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
// Stream-parse for output markers
|
|
262
|
+
if (onOutput) {
|
|
263
|
+
parseBuffer += chunk;
|
|
264
|
+
let startIdx;
|
|
265
|
+
while ((startIdx = parseBuffer.indexOf(OUTPUT_START_MARKER)) !== -1) {
|
|
266
|
+
const endIdx = parseBuffer.indexOf(OUTPUT_END_MARKER, startIdx);
|
|
267
|
+
if (endIdx === -1)
|
|
268
|
+
break; // Incomplete pair, wait for more data
|
|
269
|
+
const jsonStr = parseBuffer
|
|
270
|
+
.slice(startIdx + OUTPUT_START_MARKER.length, endIdx)
|
|
271
|
+
.trim();
|
|
272
|
+
parseBuffer = parseBuffer.slice(endIdx + OUTPUT_END_MARKER.length);
|
|
273
|
+
try {
|
|
274
|
+
const parsed = JSON.parse(jsonStr);
|
|
275
|
+
if (parsed.newSessionId) {
|
|
276
|
+
newSessionId = parsed.newSessionId;
|
|
277
|
+
}
|
|
278
|
+
hadStreamingOutput = true;
|
|
279
|
+
// Activity detected — reset the hard timeout
|
|
280
|
+
resetTimeout();
|
|
281
|
+
// Call onOutput for all markers (including null results)
|
|
282
|
+
// so idle timers start even for "silent" query completions.
|
|
283
|
+
outputChain = outputChain.then(() => onOutput(parsed));
|
|
284
|
+
}
|
|
285
|
+
catch (err) {
|
|
286
|
+
logger.warn({ group: group.name, error: err }, 'Failed to parse streamed output chunk');
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
});
|
|
291
|
+
container.stderr.on('data', (data) => {
|
|
292
|
+
const chunk = data.toString();
|
|
293
|
+
const lines = chunk.trim().split('\n');
|
|
294
|
+
for (const line of lines) {
|
|
295
|
+
if (line)
|
|
296
|
+
logger.debug({ container: group.folder }, line);
|
|
297
|
+
}
|
|
298
|
+
// Don't reset timeout on stderr — SDK writes debug logs continuously.
|
|
299
|
+
// Timeout only resets on actual output (OUTPUT_MARKER in stdout).
|
|
300
|
+
if (stderrTruncated)
|
|
301
|
+
return;
|
|
302
|
+
const remaining = CONTAINER_MAX_OUTPUT_SIZE - stderr.length;
|
|
303
|
+
if (chunk.length > remaining) {
|
|
304
|
+
stderr += chunk.slice(0, remaining);
|
|
305
|
+
stderrTruncated = true;
|
|
306
|
+
logger.warn({ group: group.name, size: stderr.length }, 'Container stderr truncated due to size limit');
|
|
307
|
+
}
|
|
308
|
+
else {
|
|
309
|
+
stderr += chunk;
|
|
310
|
+
}
|
|
311
|
+
});
|
|
312
|
+
let timedOut = false;
|
|
313
|
+
let hadStreamingOutput = false;
|
|
314
|
+
const configTimeout = group.containerConfig?.timeout || CONTAINER_TIMEOUT;
|
|
315
|
+
// Grace period: hard timeout must be at least IDLE_TIMEOUT + 30s so the
|
|
316
|
+
// graceful _close sentinel has time to trigger before the hard kill fires.
|
|
317
|
+
const timeoutMs = Math.max(configTimeout, IDLE_TIMEOUT + 30_000);
|
|
318
|
+
const killOnTimeout = () => {
|
|
319
|
+
timedOut = true;
|
|
320
|
+
logger.error({ group: group.name, containerName }, 'Container timeout, stopping gracefully');
|
|
321
|
+
exec(stopContainer(containerName), { timeout: 15000 }, (err) => {
|
|
322
|
+
if (err) {
|
|
323
|
+
logger.warn({ group: group.name, containerName, err }, 'Graceful stop failed, force killing');
|
|
324
|
+
container.kill('SIGKILL');
|
|
325
|
+
}
|
|
326
|
+
});
|
|
327
|
+
};
|
|
328
|
+
let timeout = setTimeout(killOnTimeout, timeoutMs);
|
|
329
|
+
// Reset the timeout whenever there's activity (streaming output)
|
|
330
|
+
const resetTimeout = () => {
|
|
331
|
+
clearTimeout(timeout);
|
|
332
|
+
timeout = setTimeout(killOnTimeout, timeoutMs);
|
|
333
|
+
};
|
|
334
|
+
container.on('close', (code) => {
|
|
335
|
+
clearTimeout(timeout);
|
|
336
|
+
const duration = Date.now() - startTime;
|
|
337
|
+
if (timedOut) {
|
|
338
|
+
logContainerRun({
|
|
339
|
+
group_folder: group.folder,
|
|
340
|
+
container_name: containerName,
|
|
341
|
+
duration_ms: duration,
|
|
342
|
+
exit_code: code,
|
|
343
|
+
is_timeout: true,
|
|
344
|
+
log_text: `TIMEOUT | Had Streaming Output: ${hadStreamingOutput}`,
|
|
345
|
+
metadata: { hadStreamingOutput },
|
|
346
|
+
});
|
|
347
|
+
// Timeout after output = idle cleanup, not failure.
|
|
348
|
+
// The agent already sent its response; this is just the
|
|
349
|
+
// container being reaped after the idle period expired.
|
|
350
|
+
if (hadStreamingOutput) {
|
|
351
|
+
logger.info({ group: group.name, containerName, duration, code }, 'Container timed out after output (idle cleanup)');
|
|
352
|
+
outputChain.then(() => {
|
|
353
|
+
resolve({
|
|
354
|
+
status: 'success',
|
|
355
|
+
result: null,
|
|
356
|
+
newSessionId,
|
|
357
|
+
});
|
|
358
|
+
});
|
|
359
|
+
return;
|
|
360
|
+
}
|
|
361
|
+
logger.error({ group: group.name, containerName, duration, code }, 'Container timed out with no output');
|
|
362
|
+
resolve({
|
|
363
|
+
status: 'error',
|
|
364
|
+
result: null,
|
|
365
|
+
error: `Container timed out after ${configTimeout}ms`,
|
|
366
|
+
});
|
|
367
|
+
return;
|
|
368
|
+
}
|
|
369
|
+
const isVerbose = process.env.LOG_LEVEL === 'debug' || process.env.LOG_LEVEL === 'trace';
|
|
370
|
+
const isError = code !== 0;
|
|
371
|
+
const logText = isVerbose || isError
|
|
372
|
+
? [
|
|
373
|
+
`Exit Code: ${code}`,
|
|
374
|
+
`Duration: ${duration}ms`,
|
|
375
|
+
`Stderr${stderrTruncated ? ' (TRUNCATED)' : ''}: ${stderr.slice(-2000)}`,
|
|
376
|
+
`Stdout${stdoutTruncated ? ' (TRUNCATED)' : ''}: ${stdout.slice(-2000)}`,
|
|
377
|
+
].join('\n')
|
|
378
|
+
: `Exit Code: ${code} | Duration: ${duration}ms | Prompt: ${input.prompt.length} chars`;
|
|
379
|
+
logContainerRun({
|
|
380
|
+
group_folder: group.folder,
|
|
381
|
+
container_name: containerName,
|
|
382
|
+
duration_ms: duration,
|
|
383
|
+
exit_code: code,
|
|
384
|
+
is_timeout: false,
|
|
385
|
+
log_text: logText,
|
|
386
|
+
metadata: { isMain: input.isMain, stdoutTruncated, stderrTruncated },
|
|
387
|
+
});
|
|
388
|
+
logger.debug({ containerName, isVerbose }, 'Container log written');
|
|
389
|
+
if (code !== 0) {
|
|
390
|
+
logger.error({
|
|
391
|
+
group: group.name,
|
|
392
|
+
code,
|
|
393
|
+
duration,
|
|
394
|
+
stderr,
|
|
395
|
+
stdout,
|
|
396
|
+
containerName,
|
|
397
|
+
}, 'Container exited with error');
|
|
398
|
+
resolve({
|
|
399
|
+
status: 'error',
|
|
400
|
+
result: null,
|
|
401
|
+
error: `Container exited with code ${code}: ${stderr.slice(-200)}`,
|
|
402
|
+
});
|
|
403
|
+
return;
|
|
404
|
+
}
|
|
405
|
+
// Streaming mode: wait for output chain to settle, return completion marker
|
|
406
|
+
if (onOutput) {
|
|
407
|
+
outputChain.then(() => {
|
|
408
|
+
logger.info({ group: group.name, duration, newSessionId }, 'Container completed (streaming mode)');
|
|
409
|
+
resolve({
|
|
410
|
+
status: 'success',
|
|
411
|
+
result: null,
|
|
412
|
+
newSessionId,
|
|
413
|
+
});
|
|
414
|
+
});
|
|
415
|
+
return;
|
|
416
|
+
}
|
|
417
|
+
// Legacy mode: parse the last output marker pair from accumulated stdout
|
|
418
|
+
try {
|
|
419
|
+
// Extract JSON between sentinel markers for robust parsing
|
|
420
|
+
const startIdx = stdout.indexOf(OUTPUT_START_MARKER);
|
|
421
|
+
const endIdx = stdout.indexOf(OUTPUT_END_MARKER);
|
|
422
|
+
let jsonLine;
|
|
423
|
+
if (startIdx !== -1 && endIdx !== -1 && endIdx > startIdx) {
|
|
424
|
+
jsonLine = stdout
|
|
425
|
+
.slice(startIdx + OUTPUT_START_MARKER.length, endIdx)
|
|
426
|
+
.trim();
|
|
427
|
+
}
|
|
428
|
+
else {
|
|
429
|
+
// Fallback: last non-empty line (backwards compatibility)
|
|
430
|
+
const lines = stdout.trim().split('\n');
|
|
431
|
+
jsonLine = lines[lines.length - 1];
|
|
432
|
+
}
|
|
433
|
+
const output = JSON.parse(jsonLine);
|
|
434
|
+
logger.info({
|
|
435
|
+
group: group.name,
|
|
436
|
+
duration,
|
|
437
|
+
status: output.status,
|
|
438
|
+
hasResult: !!output.result,
|
|
439
|
+
}, 'Container completed');
|
|
440
|
+
resolve(output);
|
|
441
|
+
}
|
|
442
|
+
catch (err) {
|
|
443
|
+
logger.error({
|
|
444
|
+
group: group.name,
|
|
445
|
+
stdout,
|
|
446
|
+
stderr,
|
|
447
|
+
error: err,
|
|
448
|
+
}, 'Failed to parse container output');
|
|
449
|
+
resolve({
|
|
450
|
+
status: 'error',
|
|
451
|
+
result: null,
|
|
452
|
+
error: `Failed to parse container output: ${err instanceof Error ? err.message : String(err)}`,
|
|
453
|
+
});
|
|
454
|
+
}
|
|
455
|
+
});
|
|
456
|
+
container.on('error', (err) => {
|
|
457
|
+
clearTimeout(timeout);
|
|
458
|
+
logger.error({ group: group.name, containerName, error: err }, 'Container spawn error');
|
|
459
|
+
resolve({
|
|
460
|
+
status: 'error',
|
|
461
|
+
result: null,
|
|
462
|
+
error: `Container spawn error: ${err.message}`,
|
|
463
|
+
});
|
|
464
|
+
});
|
|
465
|
+
});
|
|
466
|
+
}
|
|
467
|
+
export function writeTasksSnapshot(groupFolder, isMain, tasks) {
|
|
468
|
+
// Write filtered tasks to the group's IPC directory
|
|
469
|
+
const groupIpcDir = resolveGroupIpcPath(groupFolder);
|
|
470
|
+
fs.mkdirSync(groupIpcDir, { recursive: true });
|
|
471
|
+
// Main sees all tasks, others only see their own
|
|
472
|
+
const filteredTasks = isMain
|
|
473
|
+
? tasks
|
|
474
|
+
: tasks.filter((t) => t.groupFolder === groupFolder);
|
|
475
|
+
const tasksFile = path.join(groupIpcDir, 'current_tasks.json');
|
|
476
|
+
fs.writeFileSync(tasksFile, JSON.stringify(filteredTasks, null, 2));
|
|
477
|
+
}
|
|
478
|
+
/**
|
|
479
|
+
* Write available groups snapshot for the container to read.
|
|
480
|
+
* Only main group can see all available groups (for activation).
|
|
481
|
+
* Non-main groups only see their own registration status.
|
|
482
|
+
*/
|
|
483
|
+
export function writeGroupsSnapshot(groupFolder, isMain, groups, registeredJids) {
|
|
484
|
+
const groupIpcDir = resolveGroupIpcPath(groupFolder);
|
|
485
|
+
fs.mkdirSync(groupIpcDir, { recursive: true });
|
|
486
|
+
// Main sees all groups; others see nothing (they can't activate groups)
|
|
487
|
+
const visibleGroups = isMain ? groups : [];
|
|
488
|
+
const groupsFile = path.join(groupIpcDir, 'available_groups.json');
|
|
489
|
+
fs.writeFileSync(groupsFile, JSON.stringify({
|
|
490
|
+
groups: visibleGroups,
|
|
491
|
+
lastSync: new Date().toISOString(),
|
|
492
|
+
}, null, 2));
|
|
493
|
+
}
|
|
494
|
+
//# sourceMappingURL=container-runner.js.map
|