0agent 1.0.6 → 1.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/daemon.mjs +137 -4
- package/package.json +1 -1
package/dist/daemon.mjs
CHANGED
|
@@ -1691,10 +1691,12 @@ var SessionManager = class {
|
|
|
1691
1691
|
inferenceEngine;
|
|
1692
1692
|
eventBus;
|
|
1693
1693
|
graph;
|
|
1694
|
+
llm;
|
|
1694
1695
|
constructor(deps = {}) {
|
|
1695
1696
|
this.inferenceEngine = deps.inferenceEngine;
|
|
1696
1697
|
this.eventBus = deps.eventBus;
|
|
1697
1698
|
this.graph = deps.graph;
|
|
1699
|
+
this.llm = deps.llm;
|
|
1698
1700
|
}
|
|
1699
1701
|
/**
|
|
1700
1702
|
* Create a new session with status 'pending'.
|
|
@@ -1862,9 +1864,25 @@ var SessionManager = class {
|
|
|
1862
1864
|
} else {
|
|
1863
1865
|
this.addStep(session.id, "No inference engine connected \u2014 executing task directly");
|
|
1864
1866
|
}
|
|
1865
|
-
this.addStep(session.id, "
|
|
1866
|
-
|
|
1867
|
-
this.
|
|
1867
|
+
this.addStep(session.id, "Calling LLM\u2026");
|
|
1868
|
+
let output = "";
|
|
1869
|
+
if (this.llm?.isConfigured) {
|
|
1870
|
+
try {
|
|
1871
|
+
const systemPrompt = enrichedReq.context?.system_context ? String(enrichedReq.context.system_context) : `You are 0agent, a helpful AI assistant. Complete the user's task directly and concisely. If the task involves creating files, writing code, or running commands, provide the exact output needed.`;
|
|
1872
|
+
const llmRes = await this.llm.complete([
|
|
1873
|
+
{ role: "user", content: enrichedReq.task }
|
|
1874
|
+
], systemPrompt);
|
|
1875
|
+
output = llmRes.content;
|
|
1876
|
+
this.addStep(session.id, `LLM responded (${llmRes.tokens_used} tokens, ${llmRes.model})`);
|
|
1877
|
+
} catch (llmErr) {
|
|
1878
|
+
const msg = llmErr instanceof Error ? llmErr.message : String(llmErr);
|
|
1879
|
+
this.addStep(session.id, `LLM error: ${msg}`);
|
|
1880
|
+
output = `Error calling LLM: ${msg}`;
|
|
1881
|
+
}
|
|
1882
|
+
} else {
|
|
1883
|
+
output = session.plan?.reasoning ?? "No LLM configured \u2014 add API key to ~/.0agent/config.yaml";
|
|
1884
|
+
this.addStep(session.id, "No LLM configured (set api_key in ~/.0agent/config.yaml)");
|
|
1885
|
+
}
|
|
1868
1886
|
this.completeSession(session.id, {
|
|
1869
1887
|
output,
|
|
1870
1888
|
plan: session.plan ?? null,
|
|
@@ -2530,6 +2548,107 @@ var HTTPServer = class {
|
|
|
2530
2548
|
}
|
|
2531
2549
|
};
|
|
2532
2550
|
|
|
2551
|
+
// packages/daemon/src/LLMExecutor.ts
|
|
2552
|
+
var LLMExecutor = class {
|
|
2553
|
+
constructor(config) {
|
|
2554
|
+
this.config = config;
|
|
2555
|
+
}
|
|
2556
|
+
async complete(messages, system) {
|
|
2557
|
+
switch (this.config.provider) {
|
|
2558
|
+
case "anthropic":
|
|
2559
|
+
return this.callAnthropic(messages, system);
|
|
2560
|
+
case "openai":
|
|
2561
|
+
return this.callOpenAI(messages, system);
|
|
2562
|
+
case "xai":
|
|
2563
|
+
return this.callOpenAI(messages, system, "https://api.x.ai/v1");
|
|
2564
|
+
case "gemini":
|
|
2565
|
+
return this.callOpenAI(messages, system, "https://generativelanguage.googleapis.com/v1beta/openai");
|
|
2566
|
+
case "ollama":
|
|
2567
|
+
return this.callOllama(messages, system);
|
|
2568
|
+
default:
|
|
2569
|
+
return this.callOpenAI(messages, system);
|
|
2570
|
+
}
|
|
2571
|
+
}
|
|
2572
|
+
async callAnthropic(messages, system) {
|
|
2573
|
+
const body = {
|
|
2574
|
+
model: this.config.model,
|
|
2575
|
+
max_tokens: 8192,
|
|
2576
|
+
messages: messages.filter((m) => m.role !== "system").map((m) => ({ role: m.role, content: m.content }))
|
|
2577
|
+
};
|
|
2578
|
+
if (system) body.system = system;
|
|
2579
|
+
else {
|
|
2580
|
+
const sysMsg = messages.find((m) => m.role === "system");
|
|
2581
|
+
if (sysMsg) body.system = sysMsg.content;
|
|
2582
|
+
}
|
|
2583
|
+
const res = await fetch("https://api.anthropic.com/v1/messages", {
|
|
2584
|
+
method: "POST",
|
|
2585
|
+
headers: {
|
|
2586
|
+
"Content-Type": "application/json",
|
|
2587
|
+
"x-api-key": this.config.api_key,
|
|
2588
|
+
"anthropic-version": "2023-06-01"
|
|
2589
|
+
},
|
|
2590
|
+
body: JSON.stringify(body)
|
|
2591
|
+
});
|
|
2592
|
+
if (!res.ok) {
|
|
2593
|
+
const err = await res.text();
|
|
2594
|
+
throw new Error(`Anthropic API error ${res.status}: ${err}`);
|
|
2595
|
+
}
|
|
2596
|
+
const data = await res.json();
|
|
2597
|
+
return {
|
|
2598
|
+
content: data.content.filter((c) => c.type === "text").map((c) => c.text).join(""),
|
|
2599
|
+
tokens_used: (data.usage?.input_tokens ?? 0) + (data.usage?.output_tokens ?? 0),
|
|
2600
|
+
model: data.model
|
|
2601
|
+
};
|
|
2602
|
+
}
|
|
2603
|
+
async callOpenAI(messages, system, baseUrl = "https://api.openai.com/v1") {
|
|
2604
|
+
const allMessages = [];
|
|
2605
|
+
const sysContent = system ?? messages.find((m) => m.role === "system")?.content;
|
|
2606
|
+
if (sysContent) allMessages.push({ role: "system", content: sysContent });
|
|
2607
|
+
allMessages.push(...messages.filter((m) => m.role !== "system").map((m) => ({ role: m.role, content: m.content })));
|
|
2608
|
+
const res = await fetch(`${this.config.base_url ?? baseUrl}/chat/completions`, {
|
|
2609
|
+
method: "POST",
|
|
2610
|
+
headers: {
|
|
2611
|
+
"Content-Type": "application/json",
|
|
2612
|
+
"Authorization": `Bearer ${this.config.api_key}`
|
|
2613
|
+
},
|
|
2614
|
+
body: JSON.stringify({
|
|
2615
|
+
model: this.config.model,
|
|
2616
|
+
messages: allMessages,
|
|
2617
|
+
max_tokens: 8192
|
|
2618
|
+
})
|
|
2619
|
+
});
|
|
2620
|
+
if (!res.ok) {
|
|
2621
|
+
const err = await res.text();
|
|
2622
|
+
throw new Error(`OpenAI API error ${res.status}: ${err}`);
|
|
2623
|
+
}
|
|
2624
|
+
const data = await res.json();
|
|
2625
|
+
return {
|
|
2626
|
+
content: data.choices[0]?.message?.content ?? "",
|
|
2627
|
+
tokens_used: data.usage?.total_tokens ?? 0,
|
|
2628
|
+
model: data.model
|
|
2629
|
+
};
|
|
2630
|
+
}
|
|
2631
|
+
async callOllama(messages, system) {
|
|
2632
|
+
const baseUrl = this.config.base_url ?? "http://localhost:11434";
|
|
2633
|
+
const allMessages = [];
|
|
2634
|
+
const sysContent = system ?? messages.find((m) => m.role === "system")?.content;
|
|
2635
|
+
if (sysContent) allMessages.push({ role: "system", content: sysContent });
|
|
2636
|
+
allMessages.push(...messages.filter((m) => m.role !== "system"));
|
|
2637
|
+
const res = await fetch(`${baseUrl}/api/chat`, {
|
|
2638
|
+
method: "POST",
|
|
2639
|
+
headers: { "Content-Type": "application/json" },
|
|
2640
|
+
body: JSON.stringify({ model: this.config.model, messages: allMessages, stream: false })
|
|
2641
|
+
});
|
|
2642
|
+
if (!res.ok) throw new Error(`Ollama error ${res.status}`);
|
|
2643
|
+
const data = await res.json();
|
|
2644
|
+
return { content: data.message.content, tokens_used: data.eval_count ?? 0, model: this.config.model };
|
|
2645
|
+
}
|
|
2646
|
+
get isConfigured() {
|
|
2647
|
+
if (this.config.provider === "ollama") return true;
|
|
2648
|
+
return !!this.config.api_key?.trim();
|
|
2649
|
+
}
|
|
2650
|
+
};
|
|
2651
|
+
|
|
2533
2652
|
// packages/daemon/src/ZeroAgentDaemon.ts
|
|
2534
2653
|
var ZeroAgentDaemon = class {
|
|
2535
2654
|
config = null;
|
|
@@ -2562,10 +2681,24 @@ var ZeroAgentDaemon = class {
|
|
|
2562
2681
|
this.inferenceEngine = new InferenceEngine(this.graph, resolver, policy);
|
|
2563
2682
|
this.skillRegistry = new SkillRegistry();
|
|
2564
2683
|
await this.skillRegistry.loadAll();
|
|
2684
|
+
const defaultLLM = this.config.llm_providers.find((p) => p.is_default) ?? this.config.llm_providers[0];
|
|
2685
|
+
const llmExecutor = defaultLLM ? new LLMExecutor({
|
|
2686
|
+
provider: defaultLLM.provider,
|
|
2687
|
+
model: defaultLLM.model,
|
|
2688
|
+
api_key: defaultLLM.api_key ?? "",
|
|
2689
|
+
base_url: defaultLLM.base_url
|
|
2690
|
+
}) : void 0;
|
|
2691
|
+
if (llmExecutor?.isConfigured) {
|
|
2692
|
+
console.log(`[0agent] LLM: ${defaultLLM?.provider}/${defaultLLM?.model}`);
|
|
2693
|
+
} else {
|
|
2694
|
+
console.warn("[0agent] No LLM API key configured \u2014 tasks will not call the LLM");
|
|
2695
|
+
}
|
|
2565
2696
|
this.eventBus = new WebSocketEventBus();
|
|
2566
2697
|
this.sessionManager = new SessionManager({
|
|
2567
2698
|
inferenceEngine: this.inferenceEngine,
|
|
2568
|
-
eventBus: this.eventBus
|
|
2699
|
+
eventBus: this.eventBus,
|
|
2700
|
+
graph: this.graph,
|
|
2701
|
+
llm: llmExecutor
|
|
2569
2702
|
});
|
|
2570
2703
|
this.backgroundWorkers = new BackgroundWorkers({
|
|
2571
2704
|
graph: this.graph,
|