@sulala/agent-os 0.1.28 → 0.1.30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -0
- package/dashboard-dist/assets/index-BBmAFz5e.js +75 -0
- package/dashboard-dist/assets/index-Bu-NzOKR.js +83 -0
- package/dashboard-dist/assets/index-LNX1FrFY.css +1 -0
- package/dashboard-dist/index.html +2 -2
- package/dist/cli.js +625 -94
- package/dist/index.js +622 -91
- package/package.json +1 -1
package/dist/cli.js
CHANGED
|
@@ -1148,14 +1148,28 @@ async function callLLM(options) {
|
|
|
1148
1148
|
body.tools = tools;
|
|
1149
1149
|
body.tool_choice = "auto";
|
|
1150
1150
|
}
|
|
1151
|
-
const
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1151
|
+
const timeoutMs = Number(process.env.AGENT_OS_LLM_TIMEOUT_MS ?? 120000) || 120000;
|
|
1152
|
+
const controller = new AbortController;
|
|
1153
|
+
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
|
1154
|
+
let res;
|
|
1155
|
+
try {
|
|
1156
|
+
res = await fetch(url, {
|
|
1157
|
+
method: "POST",
|
|
1158
|
+
headers: {
|
|
1159
|
+
"Content-Type": "application/json",
|
|
1160
|
+
Authorization: `Bearer ${key}`
|
|
1161
|
+
},
|
|
1162
|
+
body: JSON.stringify(body),
|
|
1163
|
+
signal: controller.signal
|
|
1164
|
+
});
|
|
1165
|
+
} catch (err) {
|
|
1166
|
+
if (controller.signal.aborted) {
|
|
1167
|
+
throw new Error(`LLM request timed out after ${timeoutMs}ms`);
|
|
1168
|
+
}
|
|
1169
|
+
throw err;
|
|
1170
|
+
} finally {
|
|
1171
|
+
clearTimeout(timeout);
|
|
1172
|
+
}
|
|
1159
1173
|
if (!res.ok) {
|
|
1160
1174
|
const text = await res.text();
|
|
1161
1175
|
throw new Error(`LLM API error ${res.status}: ${text.slice(0, 200)}`);
|
|
@@ -1192,14 +1206,28 @@ async function* callLLMStream(options) {
|
|
|
1192
1206
|
body.tools = tools;
|
|
1193
1207
|
body.tool_choice = "auto";
|
|
1194
1208
|
}
|
|
1195
|
-
const
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1209
|
+
const timeoutMs = Number(process.env.AGENT_OS_LLM_TIMEOUT_MS ?? 120000) || 120000;
|
|
1210
|
+
const controller = new AbortController;
|
|
1211
|
+
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
|
1212
|
+
let res;
|
|
1213
|
+
try {
|
|
1214
|
+
res = await fetch(url, {
|
|
1215
|
+
method: "POST",
|
|
1216
|
+
headers: {
|
|
1217
|
+
"Content-Type": "application/json",
|
|
1218
|
+
Authorization: `Bearer ${key}`
|
|
1219
|
+
},
|
|
1220
|
+
body: JSON.stringify(body),
|
|
1221
|
+
signal: controller.signal
|
|
1222
|
+
});
|
|
1223
|
+
} catch (err) {
|
|
1224
|
+
if (controller.signal.aborted) {
|
|
1225
|
+
throw new Error(`LLM stream request timed out after ${timeoutMs}ms`);
|
|
1226
|
+
}
|
|
1227
|
+
throw err;
|
|
1228
|
+
} finally {
|
|
1229
|
+
clearTimeout(timeout);
|
|
1230
|
+
}
|
|
1203
1231
|
if (!res.ok) {
|
|
1204
1232
|
const text = await res.text();
|
|
1205
1233
|
throw new Error(`LLM API error ${res.status}: ${text.slice(0, 200)}`);
|
|
@@ -8707,6 +8735,11 @@ var init_skill_extract = __esm(() => {
|
|
|
8707
8735
|
});
|
|
8708
8736
|
|
|
8709
8737
|
// src/skills/skill-tools.ts
|
|
8738
|
+
function skillDebug(msg) {
|
|
8739
|
+
if (!DEBUG_SKILLS)
|
|
8740
|
+
return;
|
|
8741
|
+
console.log(msg);
|
|
8742
|
+
}
|
|
8710
8743
|
function getSkillBaseUrl() {
|
|
8711
8744
|
const env = process.env.AGENT_OS_SKILL_BASE_URL?.trim();
|
|
8712
8745
|
if (env)
|
|
@@ -8821,14 +8854,14 @@ function createDocOnlyRequestTool(skillId, apiBase, credentials, authScheme, aut
|
|
|
8821
8854
|
}
|
|
8822
8855
|
}
|
|
8823
8856
|
const finalUrl = urlObj.toString();
|
|
8824
|
-
|
|
8857
|
+
skillDebug(`[skill:${skillId}] ${method} ${finalUrl} (auth: ${token ? "yes" : "no"})`);
|
|
8825
8858
|
try {
|
|
8826
8859
|
const res = await fetch(finalUrl, {
|
|
8827
8860
|
method,
|
|
8828
8861
|
headers,
|
|
8829
8862
|
body: method !== "GET" && method !== "HEAD" && body != null ? JSON.stringify(body) : undefined
|
|
8830
8863
|
});
|
|
8831
|
-
|
|
8864
|
+
skillDebug(`[skill:${skillId}] ${method} ${finalUrl} -> ${res.status}`);
|
|
8832
8865
|
const text = await res.text();
|
|
8833
8866
|
let data;
|
|
8834
8867
|
try {
|
|
@@ -8875,14 +8908,14 @@ function createTokenRequestTool(skillId, toolId, baseUrl, description) {
|
|
|
8875
8908
|
"Content-Type": "application/json",
|
|
8876
8909
|
Authorization: `Bearer ${token}`
|
|
8877
8910
|
};
|
|
8878
|
-
|
|
8911
|
+
skillDebug(`[skill:${skillId}] ${method} ${url} (token: yes)`);
|
|
8879
8912
|
try {
|
|
8880
8913
|
const res = await fetch(url, {
|
|
8881
8914
|
method,
|
|
8882
8915
|
headers,
|
|
8883
8916
|
body: method !== "GET" && method !== "HEAD" && body != null ? JSON.stringify(body) : undefined
|
|
8884
8917
|
});
|
|
8885
|
-
|
|
8918
|
+
skillDebug(`[skill:${skillId}] ${method} ${url} -> ${res.status}`);
|
|
8886
8919
|
const text = await res.text();
|
|
8887
8920
|
let data;
|
|
8888
8921
|
try {
|
|
@@ -8899,7 +8932,10 @@ function createTokenRequestTool(skillId, toolId, baseUrl, description) {
|
|
|
8899
8932
|
}
|
|
8900
8933
|
};
|
|
8901
8934
|
}
|
|
8902
|
-
var
|
|
8935
|
+
var DEBUG_SKILLS;
|
|
8936
|
+
var init_skill_tools = __esm(() => {
|
|
8937
|
+
DEBUG_SKILLS = (process.env.AGENT_OS_DEBUG ?? "").trim() === "1" || (process.env.AGENT_OS_DEBUG_SKILLS ?? "").trim() === "1";
|
|
8938
|
+
});
|
|
8903
8939
|
|
|
8904
8940
|
// src/skills/loader.ts
|
|
8905
8941
|
import { readFile as readFile5, readdir as readdir3, cp, mkdir as mkdir3, writeFile as writeFile3, rm } from "fs/promises";
|
|
@@ -23201,13 +23237,34 @@ var init_events = __esm(() => {
|
|
|
23201
23237
|
// src/core/runtime.ts
|
|
23202
23238
|
import { readFile as readFile6 } from "fs/promises";
|
|
23203
23239
|
import { join as join8 } from "path";
|
|
23240
|
+
function isRateLimitError(err) {
|
|
23241
|
+
const msg = errorMessage(err);
|
|
23242
|
+
return msg.includes("429") || /rate_limit|rate limit/i.test(msg);
|
|
23243
|
+
}
|
|
23204
23244
|
function formatLLMErrorForUser(err) {
|
|
23205
23245
|
const msg = errorMessage(err);
|
|
23206
|
-
if (
|
|
23246
|
+
if (isRateLimitError(err)) {
|
|
23207
23247
|
return `Request limit reached. If this task requires a skill the agent doesn't have, install it from hub.sulala.ai and add it to this agent to avoid repeated attempts. Otherwise try again later.`;
|
|
23208
23248
|
}
|
|
23209
23249
|
return msg;
|
|
23210
23250
|
}
|
|
23251
|
+
async function callLLMWithRetry(options) {
|
|
23252
|
+
let lastErr;
|
|
23253
|
+
for (let attempt = 0;attempt <= RATE_LIMIT_RETRIES; attempt++) {
|
|
23254
|
+
try {
|
|
23255
|
+
return await callLLM(options);
|
|
23256
|
+
} catch (err) {
|
|
23257
|
+
lastErr = err;
|
|
23258
|
+
if (attempt < RATE_LIMIT_RETRIES && isRateLimitError(err)) {
|
|
23259
|
+
const delay = RATE_LIMIT_BACKOFF_MS[attempt] ?? 8000;
|
|
23260
|
+
await new Promise((r) => setTimeout(r, delay));
|
|
23261
|
+
continue;
|
|
23262
|
+
}
|
|
23263
|
+
throw err;
|
|
23264
|
+
}
|
|
23265
|
+
}
|
|
23266
|
+
throw lastErr;
|
|
23267
|
+
}
|
|
23211
23268
|
function runAgentInner(options) {
|
|
23212
23269
|
return (async () => {
|
|
23213
23270
|
const { task, agent, conversationHistory = [], maxTurnsOverride } = options;
|
|
@@ -23255,7 +23312,7 @@ function runAgentInner(options) {
|
|
|
23255
23312
|
while (turns < maxTurns) {
|
|
23256
23313
|
turns++;
|
|
23257
23314
|
try {
|
|
23258
|
-
const response = await
|
|
23315
|
+
const response = await callLLMWithRetry({
|
|
23259
23316
|
model: agent.model,
|
|
23260
23317
|
messages,
|
|
23261
23318
|
tools,
|
|
@@ -23406,7 +23463,7 @@ function runAgentInner(options) {
|
|
|
23406
23463
|
...messages,
|
|
23407
23464
|
{ role: "user", content: "Summarize the tool results above in one short paragraph for the user. Reply only with the summary, no tool calls." }
|
|
23408
23465
|
];
|
|
23409
|
-
const summaryResponse = await
|
|
23466
|
+
const summaryResponse = await callLLMWithRetry({
|
|
23410
23467
|
model: agent.model,
|
|
23411
23468
|
messages: summaryMessages,
|
|
23412
23469
|
tools: undefined
|
|
@@ -23699,7 +23756,7 @@ Use the following documentation to know how to call skill APIs. When you have a
|
|
|
23699
23756
|
|
|
23700
23757
|
`);
|
|
23701
23758
|
}
|
|
23702
|
-
var MAX_HISTORY_TURNS = 20, SKILL_REQUIRED_MESSAGE = "This task requires a skill that this agent doesn't have. Install the skill from hub.sulala.ai (Dashboard \u2192 Skills \u2192 install from store), then add it to this agent in Edit agent.", WORKSPACE_PROMPT_FILES;
|
|
23759
|
+
var MAX_HISTORY_TURNS = 20, SKILL_REQUIRED_MESSAGE = "This task requires a skill that this agent doesn't have. Install the skill from hub.sulala.ai (Dashboard \u2192 Skills \u2192 install from store), then add it to this agent in Edit agent.", RATE_LIMIT_RETRIES = 3, RATE_LIMIT_BACKOFF_MS, WORKSPACE_PROMPT_FILES;
|
|
23703
23760
|
var init_runtime = __esm(() => {
|
|
23704
23761
|
init_llm();
|
|
23705
23762
|
init_tool_registry();
|
|
@@ -23710,6 +23767,7 @@ var init_runtime = __esm(() => {
|
|
|
23710
23767
|
init_events();
|
|
23711
23768
|
init_config();
|
|
23712
23769
|
init_agent_registry();
|
|
23770
|
+
RATE_LIMIT_BACKOFF_MS = [2000, 4000, 8000];
|
|
23713
23771
|
WORKSPACE_PROMPT_FILES = ["IDENTITY.md", "USER.md", "SYSTEM.md", "TOOLS.md"];
|
|
23714
23772
|
});
|
|
23715
23773
|
|
|
@@ -23854,6 +23912,75 @@ function validateGraph(graph) {
|
|
|
23854
23912
|
throw new Error("Graph.edges must be an array");
|
|
23855
23913
|
}
|
|
23856
23914
|
}
|
|
23915
|
+
function graphDebug(msg) {
|
|
23916
|
+
if (!DEBUG_GRAPHS)
|
|
23917
|
+
return;
|
|
23918
|
+
console.log(msg);
|
|
23919
|
+
}
|
|
23920
|
+
function truncatePredecessorOutput(text, maxChars) {
|
|
23921
|
+
if (maxChars <= 0 || text.length <= maxChars)
|
|
23922
|
+
return text;
|
|
23923
|
+
return text.slice(0, maxChars) + `
|
|
23924
|
+
|
|
23925
|
+
\u2026 [output truncated for next node]`;
|
|
23926
|
+
}
|
|
23927
|
+
function buildGraphNodeTaskInput(args) {
|
|
23928
|
+
const { graphId, initialInput, nodeId, agentId, predecessors, successors } = args;
|
|
23929
|
+
const predBlock = predecessors.length === 0 ? "(none)" : predecessors.map((p) => `---
|
|
23930
|
+
from: ${p.node_id} (${p.agent_id})
|
|
23931
|
+
content:
|
|
23932
|
+
${p.output.trim() || "(empty)"}`).join(`
|
|
23933
|
+
|
|
23934
|
+
`);
|
|
23935
|
+
const nextBlock = successors.length === 0 ? "(this is the final node; no next agent)" : successors.map((s, idx) => `${idx + 1}. ${s.node_id} (${s.agent_id || "agent"})`).join(`
|
|
23936
|
+
`);
|
|
23937
|
+
return `You are running as part of a multi-agent graph pipeline.
|
|
23938
|
+
|
|
23939
|
+
Graph: ${graphId}
|
|
23940
|
+
Current node: ${nodeId} (${agentId})
|
|
23941
|
+
|
|
23942
|
+
Next node(s) in the pipeline:
|
|
23943
|
+
${nextBlock}
|
|
23944
|
+
|
|
23945
|
+
User goal (original input):
|
|
23946
|
+
${initialInput.trim()}
|
|
23947
|
+
|
|
23948
|
+
Handoff from previous node(s):
|
|
23949
|
+
${predBlock}
|
|
23950
|
+
|
|
23951
|
+
Rules (strict):
|
|
23952
|
+
- Do NOT ask the user follow-up questions. If something is missing, make the most reasonable assumptions and proceed.
|
|
23953
|
+
- Treat the previous node outputs as authoritative context and continue the work.
|
|
23954
|
+
- Produce output that the NEXT node(s) above can immediately use.
|
|
23955
|
+
- If you used tools, include the final results in your output (not just \u201CI did it\u201D).
|
|
23956
|
+
|
|
23957
|
+
Required output format:
|
|
23958
|
+
Return a single JSON object only (no markdown), with:
|
|
23959
|
+
{
|
|
23960
|
+
"done": boolean,
|
|
23961
|
+
"result": "your main deliverable (what you produced in this node)",
|
|
23962
|
+
"next": "exact instruction to the next node(s) above about what to do with result. Address them by role, e.g. 'Source Verify Agent: verify these facts by checking reputable sources and flag anything inconsistent.'",
|
|
23963
|
+
"artifacts": { "key": "value" }
|
|
23964
|
+
}
|
|
23965
|
+
|
|
23966
|
+
Now do your node's job.`;
|
|
23967
|
+
}
|
|
23968
|
+
async function withTimeout(promise2, timeoutMs, label) {
|
|
23969
|
+
if (!Number.isFinite(timeoutMs) || timeoutMs <= 0)
|
|
23970
|
+
return promise2;
|
|
23971
|
+
let timeout;
|
|
23972
|
+
try {
|
|
23973
|
+
return await Promise.race([
|
|
23974
|
+
promise2,
|
|
23975
|
+
new Promise((_, reject) => {
|
|
23976
|
+
timeout = setTimeout(() => reject(new Error(`${label} timed out after ${timeoutMs}ms`)), timeoutMs);
|
|
23977
|
+
})
|
|
23978
|
+
]);
|
|
23979
|
+
} finally {
|
|
23980
|
+
if (timeout)
|
|
23981
|
+
clearTimeout(timeout);
|
|
23982
|
+
}
|
|
23983
|
+
}
|
|
23857
23984
|
function topologicalLevels(graph) {
|
|
23858
23985
|
const nodes = graph.nodes.map((n) => n.id);
|
|
23859
23986
|
const incoming = new Map;
|
|
@@ -23905,20 +24032,50 @@ function getPredecessors(graph) {
|
|
|
23905
24032
|
p.sort();
|
|
23906
24033
|
return pred;
|
|
23907
24034
|
}
|
|
24035
|
+
function getSuccessors(graph) {
|
|
24036
|
+
const succ = new Map;
|
|
24037
|
+
for (const n of graph.nodes)
|
|
24038
|
+
succ.set(n.id, []);
|
|
24039
|
+
for (const edge of graph.edges) {
|
|
24040
|
+
if (succ.has(edge.from)) {
|
|
24041
|
+
succ.get(edge.from).push(edge.to);
|
|
24042
|
+
}
|
|
24043
|
+
}
|
|
24044
|
+
for (const [, s] of succ)
|
|
24045
|
+
s.sort();
|
|
24046
|
+
return succ;
|
|
24047
|
+
}
|
|
23908
24048
|
async function runGraph(options) {
|
|
23909
|
-
const {
|
|
24049
|
+
const {
|
|
24050
|
+
graph,
|
|
24051
|
+
input,
|
|
24052
|
+
max_turns_per_node = DEFAULT_GRAPH_MAX_TURNS_PER_NODE,
|
|
24053
|
+
max_predecessor_output_chars = DEFAULT_GRAPH_MAX_PREDECESSOR_OUTPUT_CHARS
|
|
24054
|
+
} = options;
|
|
24055
|
+
const nodeTimeoutMs = Number(process.env.AGENT_OS_GRAPH_NODE_TIMEOUT_MS ?? DEFAULT_GRAPH_NODE_TIMEOUT_MS) || DEFAULT_GRAPH_NODE_TIMEOUT_MS;
|
|
23910
24056
|
const levels = topologicalLevels(graph);
|
|
23911
24057
|
const predecessors = getPredecessors(graph);
|
|
24058
|
+
const successors = getSuccessors(graph);
|
|
23912
24059
|
const outputs = new Map;
|
|
23913
24060
|
const nodeResults = [];
|
|
23914
|
-
|
|
24061
|
+
const inputPreview = input.length > 80 ? input.slice(0, 80) + "\u2026" : input;
|
|
24062
|
+
graphDebug(`[graph] Running graph "${graph.id}" (sync), input: ${JSON.stringify(inputPreview)}`);
|
|
24063
|
+
for (let levelIndex = 0;levelIndex < levels.length; levelIndex++) {
|
|
24064
|
+
const level = levels[levelIndex];
|
|
24065
|
+
const nodeNames = level.map((id) => {
|
|
24066
|
+
const n = graph.nodes.find((nn) => nn.id === id);
|
|
24067
|
+
return n ? `${n.id} (${n.agent})` : id;
|
|
24068
|
+
}).join(", ");
|
|
24069
|
+
graphDebug(`[graph] Level ${levelIndex}: running nodes one by one: ${nodeNames}`);
|
|
23915
24070
|
const runOne = async (nodeId) => {
|
|
23916
24071
|
const node = graph.nodes.find((n) => n.id === nodeId);
|
|
23917
24072
|
if (!node) {
|
|
24073
|
+
graphDebug(`[graph] node ${nodeId} failed: Node not found`);
|
|
23918
24074
|
return { node_id: nodeId, agent_id: "", success: false, output: "", error: "Node not found" };
|
|
23919
24075
|
}
|
|
23920
24076
|
const agent = await getAgent(node.agent);
|
|
23921
24077
|
if (!agent) {
|
|
24078
|
+
graphDebug(`[graph] node ${node.id} failed: Agent not found (${node.agent})`);
|
|
23922
24079
|
return {
|
|
23923
24080
|
node_id: node.id,
|
|
23924
24081
|
agent_id: node.agent,
|
|
@@ -23928,29 +24085,55 @@ async function runGraph(options) {
|
|
|
23928
24085
|
};
|
|
23929
24086
|
}
|
|
23930
24087
|
const preds = predecessors.get(node.id) ?? [];
|
|
23931
|
-
const
|
|
23932
|
-
|
|
23933
|
-
|
|
23934
|
-
|
|
23935
|
-
|
|
23936
|
-
|
|
23937
|
-
|
|
24088
|
+
const predOutputs = preds.map((p) => {
|
|
24089
|
+
const raw = outputs.get(p) ?? "";
|
|
24090
|
+
const clipped = max_predecessor_output_chars > 0 ? truncatePredecessorOutput(raw, max_predecessor_output_chars) : raw;
|
|
24091
|
+
return { node_id: p, agent_id: graph.nodes.find((n) => n.id === p)?.agent ?? "", output: clipped };
|
|
24092
|
+
}).filter((p) => Boolean(p.output?.trim()));
|
|
24093
|
+
const nextIds = successors.get(node.id) ?? [];
|
|
24094
|
+
const nextMeta = nextIds.map((nid) => ({
|
|
24095
|
+
node_id: nid,
|
|
24096
|
+
agent_id: graph.nodes.find((n) => n.id === nid)?.agent ?? ""
|
|
24097
|
+
}));
|
|
24098
|
+
const taskInput = buildGraphNodeTaskInput({
|
|
24099
|
+
graphId: graph.id,
|
|
24100
|
+
initialInput: input,
|
|
24101
|
+
nodeId: node.id,
|
|
24102
|
+
agentId: node.agent,
|
|
24103
|
+
predecessors: predOutputs,
|
|
24104
|
+
successors: nextMeta
|
|
23938
24105
|
});
|
|
23939
|
-
|
|
23940
|
-
|
|
23941
|
-
|
|
23942
|
-
|
|
23943
|
-
|
|
23944
|
-
|
|
23945
|
-
|
|
23946
|
-
|
|
24106
|
+
graphDebug(`[graph] node ${node.id} (${node.agent}) running\u2026`);
|
|
24107
|
+
try {
|
|
24108
|
+
const result = await withTimeout(runAgent({
|
|
24109
|
+
agent,
|
|
24110
|
+
task: taskInput,
|
|
24111
|
+
maxTurnsOverride: max_turns_per_node
|
|
24112
|
+
}), nodeTimeoutMs, `[graph] node ${node.id} (${node.agent})`);
|
|
24113
|
+
outputs.set(node.id, result.output || "");
|
|
24114
|
+
const outPreview = (result.output?.length ?? 0) > 60 ? (result.output ?? "").slice(0, 60) + "\u2026" : result.output ?? "";
|
|
24115
|
+
graphDebug(`[graph] node ${node.id} done: success=${result.success}${result.error ? ` error=${result.error}` : ""} output=${JSON.stringify(outPreview)}`);
|
|
24116
|
+
return {
|
|
24117
|
+
node_id: node.id,
|
|
24118
|
+
agent_id: node.agent,
|
|
24119
|
+
success: result.success,
|
|
24120
|
+
output: result.output,
|
|
24121
|
+
error: result.error
|
|
24122
|
+
};
|
|
24123
|
+
} catch (err) {
|
|
24124
|
+
const msg = errorMessage(err);
|
|
24125
|
+
graphDebug(`[graph] node ${node.id} failed: ${msg}`);
|
|
24126
|
+
outputs.set(node.id, "");
|
|
24127
|
+
return { node_id: node.id, agent_id: node.agent, success: false, output: "", error: msg };
|
|
24128
|
+
}
|
|
23947
24129
|
};
|
|
23948
|
-
const
|
|
23949
|
-
|
|
24130
|
+
for (const nodeId of level) {
|
|
24131
|
+
const r = await runOne(nodeId);
|
|
23950
24132
|
nodeResults.push(r);
|
|
23951
24133
|
}
|
|
23952
24134
|
}
|
|
23953
24135
|
const last = nodeResults[nodeResults.length - 1];
|
|
24136
|
+
graphDebug(`[graph] Graph "${graph.id}" finished (sync). success=${last ? last.success : false}`);
|
|
23954
24137
|
return {
|
|
23955
24138
|
success: last ? last.success : false,
|
|
23956
24139
|
output: last ? last.output : "",
|
|
@@ -23958,20 +24141,37 @@ async function runGraph(options) {
|
|
|
23958
24141
|
};
|
|
23959
24142
|
}
|
|
23960
24143
|
async function runGraphStream(options, onEvent) {
|
|
23961
|
-
const {
|
|
24144
|
+
const {
|
|
24145
|
+
graph,
|
|
24146
|
+
input,
|
|
24147
|
+
max_turns_per_node = DEFAULT_GRAPH_MAX_TURNS_PER_NODE,
|
|
24148
|
+
max_predecessor_output_chars = DEFAULT_GRAPH_MAX_PREDECESSOR_OUTPUT_CHARS
|
|
24149
|
+
} = options;
|
|
24150
|
+
const nodeTimeoutMs = Number(process.env.AGENT_OS_GRAPH_NODE_TIMEOUT_MS ?? DEFAULT_GRAPH_NODE_TIMEOUT_MS) || DEFAULT_GRAPH_NODE_TIMEOUT_MS;
|
|
23962
24151
|
const levels = topologicalLevels(graph);
|
|
23963
24152
|
const predecessors = getPredecessors(graph);
|
|
24153
|
+
const successors = getSuccessors(graph);
|
|
23964
24154
|
const outputs = new Map;
|
|
23965
24155
|
const nodeResults = [];
|
|
24156
|
+
const inputPreview = input.length > 80 ? input.slice(0, 80) + "\u2026" : input;
|
|
24157
|
+
graphDebug(`[graph] Running graph "${graph.id}", input: ${JSON.stringify(inputPreview)}`);
|
|
23966
24158
|
try {
|
|
23967
|
-
for (
|
|
24159
|
+
for (let levelIndex = 0;levelIndex < levels.length; levelIndex++) {
|
|
24160
|
+
const level = levels[levelIndex];
|
|
24161
|
+
const nodeNames = level.map((id) => {
|
|
24162
|
+
const n = graph.nodes.find((nn) => nn.id === id);
|
|
24163
|
+
return n ? `${n.id} (${n.agent})` : id;
|
|
24164
|
+
}).join(", ");
|
|
24165
|
+
graphDebug(`[graph] Level ${levelIndex}: running nodes one by one: ${nodeNames}`);
|
|
23968
24166
|
const runOne = async (nodeId) => {
|
|
23969
24167
|
const node = graph.nodes.find((n) => n.id === nodeId);
|
|
23970
24168
|
if (!node) {
|
|
24169
|
+
graphDebug(`[graph] node ${nodeId} failed: Node not found`);
|
|
23971
24170
|
return { node_id: nodeId, agent_id: "", success: false, output: "", error: "Node not found" };
|
|
23972
24171
|
}
|
|
23973
24172
|
const agent = await getAgent(node.agent);
|
|
23974
24173
|
if (!agent) {
|
|
24174
|
+
graphDebug(`[graph] node ${node.id} failed: Agent not found (${node.agent})`);
|
|
23975
24175
|
return {
|
|
23976
24176
|
node_id: node.id,
|
|
23977
24177
|
agent_id: node.agent,
|
|
@@ -23981,29 +24181,66 @@ async function runGraphStream(options, onEvent) {
|
|
|
23981
24181
|
};
|
|
23982
24182
|
}
|
|
23983
24183
|
const preds = predecessors.get(node.id) ?? [];
|
|
23984
|
-
const
|
|
23985
|
-
|
|
23986
|
-
|
|
23987
|
-
|
|
23988
|
-
|
|
23989
|
-
|
|
23990
|
-
|
|
24184
|
+
const predOutputs = preds.map((p) => {
|
|
24185
|
+
const raw = outputs.get(p) ?? "";
|
|
24186
|
+
const clipped = max_predecessor_output_chars > 0 ? truncatePredecessorOutput(raw, max_predecessor_output_chars) : raw;
|
|
24187
|
+
return { node_id: p, agent_id: graph.nodes.find((n) => n.id === p)?.agent ?? "", output: clipped };
|
|
24188
|
+
}).filter((p) => Boolean(p.output?.trim()));
|
|
24189
|
+
const nextIds = successors.get(node.id) ?? [];
|
|
24190
|
+
const nextMeta = nextIds.map((nid) => ({
|
|
24191
|
+
node_id: nid,
|
|
24192
|
+
agent_id: graph.nodes.find((n) => n.id === nid)?.agent ?? ""
|
|
24193
|
+
}));
|
|
24194
|
+
const taskInput = buildGraphNodeTaskInput({
|
|
24195
|
+
graphId: graph.id,
|
|
24196
|
+
initialInput: input,
|
|
24197
|
+
nodeId: node.id,
|
|
24198
|
+
agentId: node.agent,
|
|
24199
|
+
predecessors: predOutputs,
|
|
24200
|
+
successors: nextMeta
|
|
23991
24201
|
});
|
|
23992
|
-
|
|
23993
|
-
|
|
23994
|
-
|
|
23995
|
-
|
|
23996
|
-
|
|
23997
|
-
|
|
23998
|
-
|
|
23999
|
-
|
|
24000
|
-
|
|
24001
|
-
|
|
24002
|
-
|
|
24202
|
+
graphDebug(`[graph] node ${node.id} (${node.agent}) running\u2026`);
|
|
24203
|
+
try {
|
|
24204
|
+
const result = await withTimeout(runAgent({
|
|
24205
|
+
agent,
|
|
24206
|
+
task: taskInput,
|
|
24207
|
+
maxTurnsOverride: max_turns_per_node
|
|
24208
|
+
}), nodeTimeoutMs, `[graph] node ${node.id} (${node.agent})`);
|
|
24209
|
+
outputs.set(node.id, result.output || "");
|
|
24210
|
+
const payload = {
|
|
24211
|
+
node_id: node.id,
|
|
24212
|
+
agent_id: node.agent,
|
|
24213
|
+
success: result.success,
|
|
24214
|
+
output: result.output,
|
|
24215
|
+
error: result.error
|
|
24216
|
+
};
|
|
24217
|
+
nodeResults.push(payload);
|
|
24218
|
+
const outPreview = (result.output?.length ?? 0) > 60 ? (result.output ?? "").slice(0, 60) + "\u2026" : result.output ?? "";
|
|
24219
|
+
graphDebug(`[graph] node ${node.id} done: success=${result.success}${result.error ? ` error=${result.error}` : ""} output=${JSON.stringify(outPreview)}`);
|
|
24220
|
+
onEvent({ type: "node_done", ...payload });
|
|
24221
|
+
return payload;
|
|
24222
|
+
} catch (err) {
|
|
24223
|
+
const msg = errorMessage(err);
|
|
24224
|
+
graphDebug(`[graph] node ${node.id} failed: ${msg}`);
|
|
24225
|
+
const payload = {
|
|
24226
|
+
node_id: node.id,
|
|
24227
|
+
agent_id: node.agent,
|
|
24228
|
+
success: false,
|
|
24229
|
+
output: "",
|
|
24230
|
+
error: msg
|
|
24231
|
+
};
|
|
24232
|
+
outputs.set(node.id, "");
|
|
24233
|
+
nodeResults.push(payload);
|
|
24234
|
+
onEvent({ type: "node_done", ...payload });
|
|
24235
|
+
return payload;
|
|
24236
|
+
}
|
|
24003
24237
|
};
|
|
24004
|
-
|
|
24238
|
+
for (const nodeId of level) {
|
|
24239
|
+
await runOne(nodeId);
|
|
24240
|
+
}
|
|
24005
24241
|
}
|
|
24006
24242
|
const last = nodeResults[nodeResults.length - 1];
|
|
24243
|
+
graphDebug(`[graph] Graph "${graph.id}" finished. success=${last ? last.success : false}`);
|
|
24007
24244
|
onEvent({
|
|
24008
24245
|
type: "done",
|
|
24009
24246
|
success: last ? last.success : false,
|
|
@@ -24012,15 +24249,17 @@ async function runGraphStream(options, onEvent) {
|
|
|
24012
24249
|
});
|
|
24013
24250
|
} catch (err) {
|
|
24014
24251
|
const msg = errorMessage(err);
|
|
24252
|
+
console.error(`[graph] Graph "${graph.id}" error:`, msg);
|
|
24015
24253
|
onEvent({ type: "error", message: msg });
|
|
24016
24254
|
throw err;
|
|
24017
24255
|
}
|
|
24018
24256
|
}
|
|
24019
|
-
var DEFAULT_GRAPHS_DIR, DEFAULT_GRAPH_MAX_TURNS_PER_NODE = 5;
|
|
24257
|
+
var DEFAULT_GRAPHS_DIR, DEFAULT_GRAPH_MAX_TURNS_PER_NODE = 5, DEFAULT_GRAPH_MAX_PREDECESSOR_OUTPUT_CHARS = 4000, DEFAULT_GRAPH_NODE_TIMEOUT_MS = 120000, DEBUG_GRAPHS;
|
|
24020
24258
|
var init_graphs = __esm(() => {
|
|
24021
24259
|
init_agent_registry();
|
|
24022
24260
|
init_runtime();
|
|
24023
24261
|
DEFAULT_GRAPHS_DIR = join9(process.env.HOME || process.env.USERPROFILE || "~", ".agent-os", "graphs");
|
|
24262
|
+
DEBUG_GRAPHS = (process.env.AGENT_OS_DEBUG ?? "").trim() === "1" || (process.env.AGENT_OS_DEBUG_GRAPHS ?? "").trim() === "1";
|
|
24024
24263
|
});
|
|
24025
24264
|
|
|
24026
24265
|
// src/http/utils.ts
|
|
@@ -25965,9 +26204,166 @@ var init_tasks = __esm(() => {
|
|
|
25965
26204
|
TASKS_DIR = process.env.AGENT_OS_TASKS_DIR || join10(process.env.HOME || process.env.USERPROFILE || "~", ".agent-os", "tasks");
|
|
25966
26205
|
TASKS_FILE = join10(TASKS_DIR, "tasks.log");
|
|
25967
26206
|
memoryTaskStore = new MemoryTaskStore;
|
|
25968
|
-
maxWorkers = Math.max(1, (os.cpus?.().length ?? 1) * 2);
|
|
26207
|
+
maxWorkers = process.env.AGENT_OS_MAX_WORKERS != null ? Math.max(1, parseInt(process.env.AGENT_OS_MAX_WORKERS, 10) || 1) : Math.max(1, (os.cpus?.().length ?? 1) * 2);
|
|
25969
26208
|
});
|
|
25970
26209
|
|
|
26210
|
+
// src/core/prompt-gate.ts
|
|
26211
|
+
function looksLikeOnlyUrl(text) {
|
|
26212
|
+
const t = text.trim();
|
|
26213
|
+
if (!t)
|
|
26214
|
+
return false;
|
|
26215
|
+
const urlRegex = /https?:\/\/\S+/g;
|
|
26216
|
+
const urls = t.match(urlRegex) ?? [];
|
|
26217
|
+
if (urls.length !== 1)
|
|
26218
|
+
return false;
|
|
26219
|
+
const withoutUrl = t.replace(urlRegex, "").replace(/[^\p{L}\p{N}]+/gu, " ").trim();
|
|
26220
|
+
return withoutUrl.length <= 12;
|
|
26221
|
+
}
|
|
26222
|
+
function hasAny(text, needles) {
|
|
26223
|
+
const t = text.toLowerCase();
|
|
26224
|
+
return needles.some((n) => t.includes(n));
|
|
26225
|
+
}
|
|
26226
|
+
function genericTemplate(mode) {
|
|
26227
|
+
const scope = mode.kind === "graph_run" ? `Graph: ${mode.graph_id}` : `Agent: ${mode.agent_id}`;
|
|
26228
|
+
return `Goal (${scope}):
|
|
26229
|
+
|
|
26230
|
+
Constraints (optional but recommended):
|
|
26231
|
+
- How many results? (e.g. 1)
|
|
26232
|
+
- Allowed sources: [general knowledge only] [web allowed]
|
|
26233
|
+
- If web allowed: max pages/sources (e.g. 1) + what to do if blocked (stop/skip)
|
|
26234
|
+
- Output format: (e.g. JSON) + required keys
|
|
26235
|
+
- Length limit: (e.g. <= 120 words)
|
|
26236
|
+
|
|
26237
|
+
If this involves actions (CRM / posting / updating):
|
|
26238
|
+
- Which system?
|
|
26239
|
+
- Exact fields to write
|
|
26240
|
+
- Failure policy (return payload only / retry / stop)`;
|
|
26241
|
+
}
|
|
26242
|
+
function genericSuggestions(text) {
|
|
26243
|
+
const wantsCrm = hasAny(text, ["crm", "hubspot", "salesforce", "pipeline", "deal", "contact", "sync", "update", "create"]);
|
|
26244
|
+
const wantsWeb = hasAny(text, ["http://", "https://", "browse", "web", "site", "link", "scrape", "crawl", "research"]);
|
|
26245
|
+
return [
|
|
26246
|
+
{
|
|
26247
|
+
id: "fast_no_tools",
|
|
26248
|
+
title: "Fast (no tools)",
|
|
26249
|
+
prompt: `Do NOT browse the web or call any tools.
|
|
26250
|
+
Use general knowledge only.
|
|
26251
|
+
Return strict JSON only.
|
|
26252
|
+
Keep it under 120 words.`
|
|
26253
|
+
},
|
|
26254
|
+
{
|
|
26255
|
+
id: "balanced_bounded_web",
|
|
26256
|
+
title: "Balanced (bounded web)",
|
|
26257
|
+
prompt: `If you browse the web, browse at most ONE page total. If blocked/slow, reply exactly: "BLOCKED".
|
|
26258
|
+
Return strict JSON only.
|
|
26259
|
+
Keep it under 160 words.`
|
|
26260
|
+
},
|
|
26261
|
+
...wantsWeb ? [
|
|
26262
|
+
{
|
|
26263
|
+
id: "thorough_bounded",
|
|
26264
|
+
title: "Thorough (bounded)",
|
|
26265
|
+
prompt: `Use up to 3 sources/pages max.
|
|
26266
|
+
Stop once you have enough for ONE high-confidence answer.
|
|
26267
|
+
Return strict JSON only.
|
|
26268
|
+
If any source is blocked, skip it.`
|
|
26269
|
+
}
|
|
26270
|
+
] : [],
|
|
26271
|
+
...wantsCrm ? [
|
|
26272
|
+
{
|
|
26273
|
+
id: "action_only_no_research",
|
|
26274
|
+
title: "Action-only (no research)",
|
|
26275
|
+
prompt: `Do NOT browse the web.
|
|
26276
|
+
If required info is missing, ask for it first.
|
|
26277
|
+
Then perform the action.
|
|
26278
|
+
If the action fails, return the JSON payload you would write.`
|
|
26279
|
+
}
|
|
26280
|
+
] : []
|
|
26281
|
+
];
|
|
26282
|
+
}
|
|
26283
|
+
function detectRiskScore(text) {
|
|
26284
|
+
let score = 0;
|
|
26285
|
+
if (hasAny(text, ["find", "research", "scrape", "crawl", "lookup", "look up", "analyze", "compare", "summarize"]))
|
|
26286
|
+
score += 2;
|
|
26287
|
+
if (hasAny(text, ["lead", "prospect", "enterprise", "companies", "list", "enrich"]))
|
|
26288
|
+
score += 2;
|
|
26289
|
+
if (hasAny(text, ["http://", "https://", "website", "link", "url"]))
|
|
26290
|
+
score += 2;
|
|
26291
|
+
if (hasAny(text, ["crm", "hubspot", "salesforce", "pipeline", "deal", "contact", "sync", "update", "create"]))
|
|
26292
|
+
score += 3;
|
|
26293
|
+
if (text.length > 400)
|
|
26294
|
+
score += 1;
|
|
26295
|
+
return score;
|
|
26296
|
+
}
|
|
26297
|
+
function detectBoundsScore(text) {
|
|
26298
|
+
let score = 0;
|
|
26299
|
+
const t = text.toLowerCase();
|
|
26300
|
+
if (/\bjson\b/.test(t) || hasAny(t, ["output json", "return json", "keys:"]))
|
|
26301
|
+
score += 2;
|
|
26302
|
+
if (/\b(one|1|exactly\s+\d+)\b/.test(t))
|
|
26303
|
+
score += 2;
|
|
26304
|
+
if (hasAny(t, ["do not browse", "don't browse", "no tools", "do not call any tools", "web allowed"]))
|
|
26305
|
+
score += 2;
|
|
26306
|
+
if (hasAny(t, ["max", "at most", "no more than", "one page", "one source", "sources", "limit"]))
|
|
26307
|
+
score += 1;
|
|
26308
|
+
if (hasAny(t, ["under", "<=", "words", "characters"]))
|
|
26309
|
+
score += 1;
|
|
26310
|
+
if (hasAny(t, ["if blocked", "if slow", "timeout", "skip", "stop"]))
|
|
26311
|
+
score += 1;
|
|
26312
|
+
return score;
|
|
26313
|
+
}
|
|
26314
|
+
function shouldGate(mode, text) {
|
|
26315
|
+
const risk = detectRiskScore(text);
|
|
26316
|
+
const bounds = detectBoundsScore(text);
|
|
26317
|
+
const graphBias = mode.kind === "graph_run" ? 1 : 0;
|
|
26318
|
+
return risk + graphBias >= 4 && bounds <= 2;
|
|
26319
|
+
}
|
|
26320
|
+
function promptGate(mode, input) {
|
|
26321
|
+
const text = (input ?? "").trim();
|
|
26322
|
+
if (!text) {
|
|
26323
|
+
return {
|
|
26324
|
+
decision: "needs_clarification",
|
|
26325
|
+
reason: "Empty prompt",
|
|
26326
|
+
template: genericTemplate(mode),
|
|
26327
|
+
suggestions: genericSuggestions(text),
|
|
26328
|
+
questions: [{ id: "goal", prompt: "What is the goal?", example: "Find one lead and draft an intro message" }]
|
|
26329
|
+
};
|
|
26330
|
+
}
|
|
26331
|
+
if (looksLikeOnlyUrl(text)) {
|
|
26332
|
+
return {
|
|
26333
|
+
decision: "needs_clarification",
|
|
26334
|
+
reason: "Prompt is mostly a URL; missing goal/constraints",
|
|
26335
|
+
template: genericTemplate(mode),
|
|
26336
|
+
suggestions: genericSuggestions(text),
|
|
26337
|
+
questions: [
|
|
26338
|
+
{ id: "task", prompt: "What should I do with this link?", example: "Extract 1 company lead and summarize why it's a fit" },
|
|
26339
|
+
{ id: "constraints", prompt: "Any constraints (geo/ICP/format)?" }
|
|
26340
|
+
]
|
|
26341
|
+
};
|
|
26342
|
+
}
|
|
26343
|
+
if (shouldGate(mode, text)) {
|
|
26344
|
+
const riskBits = [];
|
|
26345
|
+
if (hasAny(text, ["lead", "prospect", "companies"]))
|
|
26346
|
+
riskBits.push("lead finding");
|
|
26347
|
+
if (hasAny(text, ["http://", "https://", "link", "url", "website"]))
|
|
26348
|
+
riskBits.push("web access");
|
|
26349
|
+
if (hasAny(text, ["crm", "hubspot", "salesforce", "sync", "update", "create"]))
|
|
26350
|
+
riskBits.push("actions/CRM");
|
|
26351
|
+
const reason = riskBits.length > 0 ? `This request may trigger ${riskBits.join(" + ")} without clear limits. Choose a safe mode or add bounds (format, limits, tool policy).` : "This request is high-cost without clear limits. Add bounds or choose a safe mode.";
|
|
26352
|
+
return {
|
|
26353
|
+
decision: "needs_clarification",
|
|
26354
|
+
reason,
|
|
26355
|
+
template: genericTemplate(mode),
|
|
26356
|
+
suggestions: genericSuggestions(text),
|
|
26357
|
+
questions: [
|
|
26358
|
+
{ id: "count", prompt: "How many results do you want?", example: "1" },
|
|
26359
|
+
{ id: "tools", prompt: "Should I use the web/tools or general knowledge only?", example: "General knowledge only (fast)" },
|
|
26360
|
+
{ id: "format", prompt: "What output format do you want?", example: "JSON with keys: company, reason, next_step" }
|
|
26361
|
+
]
|
|
26362
|
+
};
|
|
26363
|
+
}
|
|
26364
|
+
return { decision: "allow" };
|
|
26365
|
+
}
|
|
26366
|
+
|
|
25971
26367
|
// src/http/handlers.ts
|
|
25972
26368
|
import { readFile as readFile9, writeFile as writeFile5, stat } from "fs/promises";
|
|
25973
26369
|
import { join as join11 } from "path";
|
|
@@ -26004,6 +26400,17 @@ async function handleRun(req, memoryStore) {
|
|
|
26004
26400
|
conversationHistory = getConversationHistoryForRun(memoryStore, conversation_id.trim());
|
|
26005
26401
|
} catch {}
|
|
26006
26402
|
}
|
|
26403
|
+
const gate = promptGate({ kind: "agent_run", agent_id }, task);
|
|
26404
|
+
if (gate.decision === "needs_clarification") {
|
|
26405
|
+
return jsonResponse({
|
|
26406
|
+
success: false,
|
|
26407
|
+
gated: true,
|
|
26408
|
+
reason: gate.reason,
|
|
26409
|
+
template: gate.template,
|
|
26410
|
+
suggestions: gate.suggestions,
|
|
26411
|
+
questions: gate.questions
|
|
26412
|
+
}, 400);
|
|
26413
|
+
}
|
|
26007
26414
|
try {
|
|
26008
26415
|
const result = await runAgent({ agent, task, conversationHistory });
|
|
26009
26416
|
return jsonResponse(result);
|
|
@@ -26034,6 +26441,26 @@ async function handleRunStream(req, memoryStore) {
|
|
|
26034
26441
|
conversationHistory = getConversationHistoryForRun(memoryStore, conversation_id.trim());
|
|
26035
26442
|
} catch {}
|
|
26036
26443
|
}
|
|
26444
|
+
const gate = promptGate({ kind: "agent_run", agent_id }, task);
|
|
26445
|
+
if (gate.decision === "needs_clarification") {
|
|
26446
|
+
let send2 = function(type, data) {
|
|
26447
|
+
return `event: ${type}
|
|
26448
|
+
data: ${JSON.stringify(data)}
|
|
26449
|
+
|
|
26450
|
+
`;
|
|
26451
|
+
};
|
|
26452
|
+
const msg = `${gate.reason}
|
|
26453
|
+
|
|
26454
|
+
${gate.template}`;
|
|
26455
|
+
const encoder2 = new TextEncoder;
|
|
26456
|
+
const stream2 = new ReadableStream({
|
|
26457
|
+
start(controller) {
|
|
26458
|
+
controller.enqueue(encoder2.encode(send2("error", { message: msg, gated: true, suggestions: gate.suggestions, questions: gate.questions })));
|
|
26459
|
+
controller.close();
|
|
26460
|
+
}
|
|
26461
|
+
});
|
|
26462
|
+
return new Response(stream2, { headers: SSE_HEADERS });
|
|
26463
|
+
}
|
|
26037
26464
|
const encoder = new TextEncoder;
|
|
26038
26465
|
function send(type, data) {
|
|
26039
26466
|
return `event: ${type}
|
|
@@ -26110,6 +26537,32 @@ async function handleTasks(req, url) {
|
|
|
26110
26537
|
if (typeof task !== "string") {
|
|
26111
26538
|
return jsonResponse({ error: "Missing required field: task" }, 400);
|
|
26112
26539
|
}
|
|
26540
|
+
if (graph_id && !agent_id) {
|
|
26541
|
+
const gate = promptGate({ kind: "graph_run", graph_id }, task);
|
|
26542
|
+
if (gate.decision === "needs_clarification") {
|
|
26543
|
+
return jsonResponse({
|
|
26544
|
+
success: false,
|
|
26545
|
+
gated: true,
|
|
26546
|
+
reason: gate.reason,
|
|
26547
|
+
template: gate.template,
|
|
26548
|
+
suggestions: gate.suggestions,
|
|
26549
|
+
questions: gate.questions
|
|
26550
|
+
}, 400);
|
|
26551
|
+
}
|
|
26552
|
+
}
|
|
26553
|
+
if (agent_id && !graph_id) {
|
|
26554
|
+
const gate = promptGate({ kind: "agent_run", agent_id }, task);
|
|
26555
|
+
if (gate.decision === "needs_clarification") {
|
|
26556
|
+
return jsonResponse({
|
|
26557
|
+
success: false,
|
|
26558
|
+
gated: true,
|
|
26559
|
+
reason: gate.reason,
|
|
26560
|
+
template: gate.template,
|
|
26561
|
+
suggestions: gate.suggestions,
|
|
26562
|
+
questions: gate.questions
|
|
26563
|
+
}, 400);
|
|
26564
|
+
}
|
|
26565
|
+
}
|
|
26113
26566
|
if (graph_id && !agent_id) {
|
|
26114
26567
|
const t = await enqueueGraphTask(graph_id, task);
|
|
26115
26568
|
return jsonResponse({ task: t }, 202);
|
|
@@ -26370,6 +26823,16 @@ async function handleGraphRun(req) {
|
|
|
26370
26823
|
if (!graph) {
|
|
26371
26824
|
return jsonResponse({ error: `Graph not found: ${graph_id}` }, 404);
|
|
26372
26825
|
}
|
|
26826
|
+
const gate = promptGate({ kind: "graph_run", graph_id }, input);
|
|
26827
|
+
if (gate.decision === "needs_clarification") {
|
|
26828
|
+
return jsonResponse({
|
|
26829
|
+
success: false,
|
|
26830
|
+
gated: true,
|
|
26831
|
+
reason: gate.reason,
|
|
26832
|
+
template: gate.template,
|
|
26833
|
+
questions: gate.questions
|
|
26834
|
+
}, 400);
|
|
26835
|
+
}
|
|
26373
26836
|
try {
|
|
26374
26837
|
const result = await runGraph({ graph, input });
|
|
26375
26838
|
return jsonResponse(result);
|
|
@@ -26390,6 +26853,9 @@ async function handleGraphRunStream(req) {
|
|
|
26390
26853
|
if (!graph) {
|
|
26391
26854
|
return jsonResponse({ error: `Graph not found: ${graph_id}` }, 404);
|
|
26392
26855
|
}
|
|
26856
|
+
if ((process.env.AGENT_OS_DEBUG ?? "").trim() === "1" || (process.env.AGENT_OS_DEBUG_GRAPHS ?? "").trim() === "1") {
|
|
26857
|
+
console.log(`[graph] Graph chat stream requested: graph_id=${graph_id}, input length=${input?.length ?? 0}`);
|
|
26858
|
+
}
|
|
26393
26859
|
const encoder = new TextEncoder;
|
|
26394
26860
|
function send(type, data) {
|
|
26395
26861
|
return `event: ${type}
|
|
@@ -26397,12 +26863,58 @@ data: ${JSON.stringify(data)}
|
|
|
26397
26863
|
|
|
26398
26864
|
`;
|
|
26399
26865
|
}
|
|
26866
|
+
const gate = promptGate({ kind: "graph_run", graph_id }, input);
|
|
26867
|
+
if (gate.decision === "needs_clarification") {
|
|
26868
|
+
const msg = `${gate.reason}
|
|
26869
|
+
|
|
26870
|
+
${gate.template}`;
|
|
26871
|
+
const stream2 = new ReadableStream({
|
|
26872
|
+
start(controller) {
|
|
26873
|
+
controller.enqueue(encoder.encode(send("error", { message: msg, gated: true, questions: gate.questions })));
|
|
26874
|
+
controller.close();
|
|
26875
|
+
}
|
|
26876
|
+
});
|
|
26877
|
+
return new Response(stream2, { headers: SSE_HEADERS });
|
|
26878
|
+
}
|
|
26879
|
+
const KEEPALIVE_INTERVAL_MS = 30000;
|
|
26400
26880
|
const stream = new ReadableStream({
|
|
26401
26881
|
async start(controller) {
|
|
26882
|
+
let closed = false;
|
|
26883
|
+
function safeEnqueue(data) {
|
|
26884
|
+
if (closed)
|
|
26885
|
+
return;
|
|
26886
|
+
try {
|
|
26887
|
+
controller.enqueue(data);
|
|
26888
|
+
} catch {
|
|
26889
|
+
closed = true;
|
|
26890
|
+
}
|
|
26891
|
+
}
|
|
26892
|
+
function safeClose() {
|
|
26893
|
+
if (closed)
|
|
26894
|
+
return;
|
|
26895
|
+
try {
|
|
26896
|
+
controller.close();
|
|
26897
|
+
} catch {}
|
|
26898
|
+
closed = true;
|
|
26899
|
+
}
|
|
26900
|
+
const keepalive = setInterval(() => {
|
|
26901
|
+
if (closed) {
|
|
26902
|
+
clearInterval(keepalive);
|
|
26903
|
+
return;
|
|
26904
|
+
}
|
|
26905
|
+
try {
|
|
26906
|
+
controller.enqueue(encoder.encode(`: keepalive
|
|
26907
|
+
|
|
26908
|
+
`));
|
|
26909
|
+
} catch {
|
|
26910
|
+
clearInterval(keepalive);
|
|
26911
|
+
closed = true;
|
|
26912
|
+
}
|
|
26913
|
+
}, KEEPALIVE_INTERVAL_MS);
|
|
26402
26914
|
try {
|
|
26403
26915
|
await runGraphStream({ graph, input }, (ev) => {
|
|
26404
26916
|
if (ev.type === "node_done") {
|
|
26405
|
-
|
|
26917
|
+
safeEnqueue(encoder.encode(send("node_done", {
|
|
26406
26918
|
node_id: ev.node_id,
|
|
26407
26919
|
agent_id: ev.agent_id,
|
|
26408
26920
|
success: ev.success,
|
|
@@ -26410,20 +26922,21 @@ data: ${JSON.stringify(data)}
|
|
|
26410
26922
|
error: ev.error
|
|
26411
26923
|
})));
|
|
26412
26924
|
} else if (ev.type === "done") {
|
|
26413
|
-
|
|
26925
|
+
safeEnqueue(encoder.encode(send("done", {
|
|
26414
26926
|
success: ev.success,
|
|
26415
26927
|
output: ev.output,
|
|
26416
26928
|
node_results: ev.node_results
|
|
26417
26929
|
})));
|
|
26418
26930
|
} else if (ev.type === "error") {
|
|
26419
|
-
|
|
26931
|
+
safeEnqueue(encoder.encode(send("error", { message: ev.message })));
|
|
26420
26932
|
}
|
|
26421
26933
|
});
|
|
26422
26934
|
} catch (err) {
|
|
26423
26935
|
const msg = errorMessage(err);
|
|
26424
|
-
|
|
26936
|
+
safeEnqueue(encoder.encode(send("error", { message: msg })));
|
|
26425
26937
|
} finally {
|
|
26426
|
-
|
|
26938
|
+
clearInterval(keepalive);
|
|
26939
|
+
safeClose();
|
|
26427
26940
|
}
|
|
26428
26941
|
}
|
|
26429
26942
|
});
|
|
@@ -26684,7 +27197,7 @@ async function getDefaultAgent(config2, defaultAgentIdKey) {
|
|
|
26684
27197
|
}
|
|
26685
27198
|
return agents[0] ?? null;
|
|
26686
27199
|
}
|
|
26687
|
-
async function
|
|
27200
|
+
async function runOneWithConversation(memoryStore, agent, conversationId, userId, task, sendReply) {
|
|
26688
27201
|
memoryStore.ensureConversation(conversationId, agent.id, userId);
|
|
26689
27202
|
const conversationHistory = getConversationHistoryForRun(memoryStore, conversationId);
|
|
26690
27203
|
memoryStore.insertConversationMessage({
|
|
@@ -26705,10 +27218,20 @@ async function runAgentWithConversation(memoryStore, agent, conversationId, user
|
|
|
26705
27218
|
});
|
|
26706
27219
|
await sendReply(output);
|
|
26707
27220
|
}
|
|
27221
|
+
async function runAgentWithConversation(memoryStore, agent, conversationId, userId, task, sendReply) {
|
|
27222
|
+
const prev = conversationTails.get(conversationId) ?? Promise.resolve();
|
|
27223
|
+
const next = prev.then(() => runOneWithConversation(memoryStore, agent, conversationId, userId, task, sendReply)).catch((err) => {
|
|
27224
|
+
console.error(`[channel] conversation ${conversationId} error:`, err);
|
|
27225
|
+
});
|
|
27226
|
+
conversationTails.set(conversationId, next);
|
|
27227
|
+
return next;
|
|
27228
|
+
}
|
|
27229
|
+
var conversationTails;
|
|
26708
27230
|
var init_channel_run = __esm(() => {
|
|
26709
27231
|
init_agent_registry();
|
|
26710
27232
|
init_runtime();
|
|
26711
27233
|
init_utils();
|
|
27234
|
+
conversationTails = new Map;
|
|
26712
27235
|
});
|
|
26713
27236
|
|
|
26714
27237
|
// src/http/telegram.ts
|
|
@@ -26834,22 +27357,45 @@ async function handleTelegramWebhook(req, memoryStore) {
|
|
|
26834
27357
|
}
|
|
26835
27358
|
function startTelegramPolling(memoryStore) {
|
|
26836
27359
|
let offset = 0;
|
|
27360
|
+
let webhookDeletedForToken = null;
|
|
27361
|
+
let loggedPollingStarted = false;
|
|
26837
27362
|
async function poll() {
|
|
26838
27363
|
const config2 = await readConfig();
|
|
26839
27364
|
const botToken = config2.telegram_bot_token?.trim();
|
|
26840
27365
|
if (!botToken) {
|
|
27366
|
+
webhookDeletedForToken = null;
|
|
27367
|
+
loggedPollingStarted = false;
|
|
26841
27368
|
setTimeout(poll, POLLING_INTERVAL_MS);
|
|
26842
27369
|
return;
|
|
26843
27370
|
}
|
|
26844
27371
|
try {
|
|
26845
|
-
const
|
|
26846
|
-
const res = await fetch(url);
|
|
27372
|
+
const res = await fetch(`https://api.telegram.org/bot${botToken}/getWebhookInfo`);
|
|
26847
27373
|
if (!res.ok) {
|
|
26848
27374
|
setTimeout(poll, POLLING_INTERVAL_MS);
|
|
26849
27375
|
return;
|
|
26850
27376
|
}
|
|
26851
27377
|
const data = await res.json();
|
|
26852
|
-
const
|
|
27378
|
+
const webhookUrl = data?.result?.url?.trim();
|
|
27379
|
+
if (webhookUrl) {
|
|
27380
|
+
setTimeout(poll, POLLING_INTERVAL_MS);
|
|
27381
|
+
return;
|
|
27382
|
+
}
|
|
27383
|
+
if (webhookDeletedForToken !== botToken) {
|
|
27384
|
+
await fetch(`https://api.telegram.org/bot${botToken}/deleteWebhook`).catch(() => {});
|
|
27385
|
+
webhookDeletedForToken = botToken;
|
|
27386
|
+
}
|
|
27387
|
+
if (!loggedPollingStarted) {
|
|
27388
|
+
console.info("[telegram] polling started (no webhook set; no HTTPS required).");
|
|
27389
|
+
loggedPollingStarted = true;
|
|
27390
|
+
}
|
|
27391
|
+
const url = `https://api.telegram.org/bot${botToken}/getUpdates?offset=${offset}&timeout=${GET_UPDATES_TIMEOUT}`;
|
|
27392
|
+
const getRes = await fetch(url);
|
|
27393
|
+
if (!getRes.ok) {
|
|
27394
|
+
setTimeout(poll, POLLING_INTERVAL_MS);
|
|
27395
|
+
return;
|
|
27396
|
+
}
|
|
27397
|
+
const updateData = await getRes.json();
|
|
27398
|
+
const updates = Array.isArray(updateData?.result) ? updateData.result : [];
|
|
26853
27399
|
for (const update of updates) {
|
|
26854
27400
|
if (update.update_id != null && update.update_id >= offset) {
|
|
26855
27401
|
offset = update.update_id + 1;
|
|
@@ -26863,22 +27409,7 @@ function startTelegramPolling(memoryStore) {
|
|
|
26863
27409
|
}
|
|
26864
27410
|
setTimeout(poll, 0);
|
|
26865
27411
|
}
|
|
26866
|
-
|
|
26867
|
-
const botToken = config2.telegram_bot_token?.trim();
|
|
26868
|
-
if (!botToken)
|
|
26869
|
-
return;
|
|
26870
|
-
const res = await fetch(`https://api.telegram.org/bot${botToken}/getWebhookInfo`);
|
|
26871
|
-
if (!res.ok)
|
|
26872
|
-
return;
|
|
26873
|
-
const data = await res.json();
|
|
26874
|
-
const webhookUrl = data?.result?.url?.trim();
|
|
26875
|
-
if (webhookUrl) {
|
|
26876
|
-
return;
|
|
26877
|
-
}
|
|
26878
|
-
await fetch(`https://api.telegram.org/bot${botToken}/deleteWebhook`).catch(() => {});
|
|
26879
|
-
console.log("[telegram] polling started (no webhook set; no HTTPS required).");
|
|
26880
|
-
poll();
|
|
26881
|
-
}).catch(() => {});
|
|
27412
|
+
poll();
|
|
26882
27413
|
}
|
|
26883
27414
|
var POLLING_INTERVAL_MS = 1000, GET_UPDATES_TIMEOUT = 30;
|
|
26884
27415
|
var init_telegram = __esm(() => {
|
|
@@ -30286,7 +30817,7 @@ async function startServer() {
|
|
|
30286
30817
|
server = Bun.serve({
|
|
30287
30818
|
port: PORT,
|
|
30288
30819
|
hostname: HOST,
|
|
30289
|
-
idleTimeout:
|
|
30820
|
+
idleTimeout: 255,
|
|
30290
30821
|
routes: wrapRouteHandlers(createRoutes(), dashboardSecret),
|
|
30291
30822
|
fetch(req, server2) {
|
|
30292
30823
|
const url = new URL(req.url);
|
|
@@ -30337,7 +30868,7 @@ async function startServer() {
|
|
|
30337
30868
|
}
|
|
30338
30869
|
throw err;
|
|
30339
30870
|
}
|
|
30340
|
-
console.
|
|
30871
|
+
console.info(`Agent OS server running at ${server.url}`);
|
|
30341
30872
|
startTelegramPolling(memoryStore);
|
|
30342
30873
|
}
|
|
30343
30874
|
var PORT, DASHBOARD_DIST, HOST, EVENT_TYPES, wsClients, MEMORY_DB_PATH, memoryStore;
|