daemora 1.0.7 → 1.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/SOUL.md CHANGED
@@ -12,7 +12,7 @@ You are **Daemora** — the user's personal AI that lives on their machine. You'
12
12
 
13
13
  **You figure things out.** Read the file. Check the context. Run the command. Search for it. Load a skill. Check memory. Only ask when you genuinely need a decision from the user — never ask about things you can discover yourself.
14
14
 
15
- **You talk like a person.** You're not a customer support bot. No "I'd be happy to help!" No "What can I help you with today?" No "I have successfully completed the task." Talk like a capable person who just did something — brief, natural, real. If someone says "hey", say "hey" back. If you sent an email, say what you told them, not the Message ID.
15
+ **You talk like a sharp, friendly coworker.** Warm but efficient. Adapt your energy to the user. Never sound robotic, rehearsed, or corporate.
16
16
 
17
17
  ## What "Done" Means
18
18
 
@@ -153,16 +153,14 @@ Never use phrases like "permission restrictions", "this environment", "access li
153
153
 
154
154
  ## Communication Style
155
155
 
156
- **Talk like a real person texting a coworker. Not a support bot. Not a corporate assistant.**
157
-
158
- - Short, casual, direct. Match the user's energy and tone.
159
- - No preambles, no postambles, no filler. No "Great question!", no "I'd be happy to help!", no "Let me know if there's anything else!".
160
- - Never narrate your own actions. Report results, not process.
161
- - After completing a task, confirm what happened in the user's terms. Never expose internal details like Message IDs, session IDs, task IDs, or tool names.
162
- - When asked about your capabilities, answer conversationally. Don't list tool names or technical internals.
163
- - When asked about sub-agents or specialists, describe them in plain language. Not session IDs or technical keys.
164
- - Greetings get greetings. Acknowledgments get acknowledgments. Don't reach for tools on conversational messages.
165
- - When something failed, say what failed and what you tried. Ask for a decision only if you need one.
156
+ - Be natural, warm, and direct. Match the user's tone and energy.
157
+ - Greetings get greetings. Casual messages get casual responses. Don't reach for tools on conversational messages.
158
+ - Report results in plain language from the user's perspective. Brief — 1-3 sentences unless detail is needed.
159
+ - Never expose internal details in responses: no tool names, IDs, JSON, or technical artifacts.
160
+ - Never use filler phrases, sycophantic openers, or robotic sign-offs.
161
+ - Never narrate your process. Report outcomes, not steps.
162
+ - Never ask permission to proceed. Just do the work. Only confirm before destructive actions.
163
+ - When discussing capabilities, answer conversationally not with technical lists.
166
164
 
167
165
  ## Engineering Principles
168
166
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "daemora",
3
- "version": "1.0.7",
3
+ "version": "1.0.8",
4
4
  "description": "A powerful open-source AI agent that runs on your machine. Connects to any AI model, any MCP server, any channel. Fully autonomous - plans, codes, tests, browses, emails, and manages your tools without asking permission.",
5
5
  "main": "src/index.js",
6
6
  "bin": {
@@ -373,13 +373,14 @@ function renderOperationalGuidelines() {
373
373
  return `# Operational Guidelines
374
374
 
375
375
  ## Tone & Style
376
- - Be concise. 1-3 lines per response. No filler phrases.
377
- - Report what you DID in past tense. Don't narrate tool calls.
378
- - Don't ask "shall I proceed?"just do the work. Only confirm before destructive actions.
376
+ - Natural, warm, direct. Match the user's tone. Never robotic or sycophantic.
377
+ - Final responses: 1-3 sentences. Report outcomes, not process.
378
+ - Casual messages get casual responsesdon't reach for tools on conversational input.
379
+ - Never expose internal details (tool names, IDs, JSON) in final responses.
379
380
 
380
381
  ## Understanding Requirements
381
- - Infer implied intent from vague requests. "make it look better" → spacing, typography, contrast, responsive.
382
- - If truly ambiguous (two valid outcomes), ask ONE focused question. Otherwise just do it.
382
+ - Infer implied intent from vague requests.
383
+ - If truly ambiguous, ask ONE focused question. Otherwise just do it.
383
384
  - Match existing code style, patterns, and conventions.
384
385
 
385
386
  ## Workflow: Read → Act → Verify → Fix → Report
package/src/cli.js CHANGED
@@ -1880,114 +1880,42 @@ async function handleChannels() {
1880
1880
 
1881
1881
  async function handleModels() {
1882
1882
  const { select, isCancel } = await import("@clack/prompts");
1883
+ const { models: modelRegistry } = await import("./config/models.js");
1883
1884
  const w = 67;
1884
1885
  const line = chalk.hex(P.cyan)("━".repeat(w));
1885
1886
  const rowLine = chalk.hex(P.border)("─".repeat(w));
1886
1887
 
1887
- const PROVIDERS = [
1888
- {
1889
- name: "OpenAI", prefix: "openai", envKey: "OPENAI_API_KEY",
1890
- models: [
1891
- // GPT-5.4
1892
- { id: "gpt-5.4", desc: "GPT-5.4 flagship", price: "$2.50/$15", isNew: true },
1893
- { id: "gpt-5.4-pro", desc: "GPT-5.4 Pro — highest capability", price: "$30/$180", isNew: true },
1894
- // GPT-5.2
1895
- { id: "gpt-5.2", desc: "GPT-5.2 flagship (Dec 2025)", price: "$1.75/$14", isNew: true },
1896
- { id: "gpt-5.2-pro", desc: "GPT-5.2 Pro — extended reasoning", price: "$21/$168", isNew: true },
1897
- // GPT-5.1 / 5
1898
- { id: "gpt-5.1", desc: "GPT-5.1 (Nov 2025)", price: "$1.25/$10", isNew: true },
1899
- { id: "gpt-5", desc: "GPT-5 flagship (Aug 2025)", price: "$1.25/$10" },
1900
- { id: "gpt-5-pro", desc: "GPT-5 Pro — most powerful", price: "$15/$120" },
1901
- { id: "gpt-5-mini", desc: "GPT-5 Mini — fast & cheap", price: "$0.25/$2" },
1902
- { id: "gpt-5-nano", desc: "GPT-5 Nano — cheapest GPT-5", price: "$0.05/$0.40" },
1903
- // Codex
1904
- { id: "gpt-5.3-codex", desc: "Latest coding model (2025)", price: "$1.75/$14", isNew: true },
1905
- { id: "gpt-5.1-codex", desc: "GPT-5.1 Codex — coding", price: "$1.25/$10", isNew: true },
1906
- { id: "gpt-5-codex", desc: "GPT-5 Codex — coding", price: "$1.25/$10" },
1907
- // o-series reasoning
1908
- { id: "o3-pro", desc: "Best reasoning — most thorough", price: "$20/$80" },
1909
- { id: "o3", desc: "Advanced reasoning (Apr 2025)", price: "$2/$8" },
1910
- { id: "o4-mini", desc: "Fast reasoning (Apr 2025)", price: "$1.10/$4.40" },
1911
- { id: "o1-pro", desc: "o1 Pro powerful reasoning", price: "$150/$600" },
1912
- { id: "o1", desc: "o1 reasoning model", price: "$15/$60" },
1913
- { id: "o3-mini", desc: "Lightweight reasoning", price: "$1.10/$4.40" },
1914
- // GPT-4.1 (1M context)
1915
- { id: "gpt-4.1", desc: "1M context, best instruction following", price: "$2/$8" },
1916
- { id: "gpt-4.1-mini", desc: "1M context, fast & affordable (default)", price: "$0.40/$1.60" },
1917
- { id: "gpt-4.1-nano", desc: "1M context, fastest & cheapest", price: "$0.10/$0.40" },
1918
- // GPT-4o & specialized
1919
- { id: "gpt-4o", desc: "Vision + text (128K ctx)", price: "$2.50/$10" },
1920
- { id: "gpt-4o-mini", desc: "GPT-4o Mini (128K ctx)", price: "$0.15/$0.60" },
1921
- { id: "computer-use-preview", desc: "Computer use / GUI automation", price: "$3/$12" },
1922
- ],
1923
- },
1924
- {
1925
- name: "Anthropic", prefix: "anthropic", envKey: "ANTHROPIC_API_KEY",
1926
- models: [
1927
- { id: "claude-opus-4-6", desc: "Most intelligent — complex reasoning", price: "$5/$25", isNew: true },
1928
- { id: "claude-opus-4-5", desc: "Opus 4.5 — complex multi-step tasks", price: "$5/$25", isNew: true },
1929
- { id: "claude-opus-4-1", desc: "Opus 4.1 — long-duration complex tasks", price: "$15/$75" },
1930
- { id: "claude-opus-4", desc: "Opus 4 — extended thinking", price: "$15/$75" },
1931
- { id: "claude-sonnet-4-6", desc: "Best speed/intelligence — coding & agents", price: "$3/$15", isNew: true },
1932
- { id: "claude-sonnet-4-5", desc: "Sonnet 4.5 — coding & agentic tasks", price: "$3/$15" },
1933
- { id: "claude-sonnet-4", desc: "Sonnet 4 — balanced performance", price: "$3/$15" },
1934
- { id: "claude-haiku-4-5", desc: "Fastest — high-volume, cost-sensitive", price: "$1/$5" },
1935
- { id: "claude-haiku-3-5", desc: "3.5 Haiku — fast previous gen", price: "$0.80/$4" },
1936
- { id: "claude-haiku-3", desc: "Haiku 3 — cheapest Claude", price: "$0.25/$1.25" },
1937
- ],
1938
- },
1939
- {
1940
- name: "Google", prefix: "google", envKey: "GOOGLE_AI_API_KEY",
1941
- models: [
1942
- { id: "gemini-3.1-pro-preview", desc: "Latest — complex tasks, reasoning", price: "$2/$12", isNew: true },
1943
- { id: "gemini-3.1-flash-lite-preview", desc: "Latest — cost-efficient & fast", price: "$0.25/$1.50", isNew: true },
1944
- { id: "gemini-3-pro-preview", desc: "Gemini 3 Pro — advanced reasoning", price: "$2/$12", isNew: true },
1945
- { id: "gemini-3-flash-preview", desc: "Gemini 3 Flash — fast & cheap", price: "$0.50/$3", isNew: true },
1946
- { id: "gemini-2.5-pro", desc: "GA — complex reasoning & coding (1M)", price: "$1.25/$10" },
1947
- { id: "gemini-2.5-flash", desc: "Fast & cost-effective for high-volume", price: "$0.30/$2.50" },
1948
- { id: "gemini-2.5-flash-lite", desc: "Speed-optimised for high-throughput", price: "$0.10/$0.40" },
1949
- { id: "gemini-2.0-flash", desc: "Previous gen flash", price: "$0.15/$0.60" },
1950
- { id: "gemini-2.0-flash-lite", desc: "Cheapest Gemini", price: "$0.075/$0.30" },
1951
- ],
1952
- },
1953
- {
1954
- name: "xAI", prefix: "xai", envKey: "XAI_API_KEY",
1955
- models: [
1956
- { id: "grok-4", desc: "Grok 4 — latest & most capable (Jul 2025)", isNew: true },
1957
- { id: "grok-3-beta", desc: "Grok 3 Beta — 131K ctx" },
1958
- { id: "grok-3-mini-beta", desc: "Grok 3 Mini — fast, 131K ctx" },
1959
- ],
1960
- },
1961
- {
1962
- name: "DeepSeek", prefix: "deepseek", envKey: "DEEPSEEK_API_KEY",
1963
- models: [
1964
- { id: "deepseek-chat", desc: "DeepSeek V3 — excellent coder (128K ctx)" },
1965
- { id: "deepseek-reasoner", desc: "DeepSeek R1 — chain-of-thought reasoning" },
1966
- ],
1967
- },
1968
- {
1969
- name: "Mistral", prefix: "mistral", envKey: "MISTRAL_API_KEY",
1970
- models: [
1971
- { id: "mistral-large-2512", desc: "Flagship — best quality (Dec 2025)", isNew: true },
1972
- { id: "mistral-medium-3", desc: "Balanced capability & speed (May 2025)" },
1973
- { id: "codestral-2508", desc: "Code specialist (Aug 2025)" },
1974
- { id: "mistral-small-3.2-24b", desc: "Lightweight, runs locally (24B params)" },
1975
- ],
1976
- },
1977
- {
1978
- name: "Ollama (local)", prefix: "ollama", configured: true,
1979
- models: [
1980
- { id: "llama4-maverick", desc: "Llama 4 Maverick — 17B MoE, 1M ctx, multimodal", price: "free", isNew: true },
1981
- { id: "llama4-scout", desc: "Llama 4 Scout — 17B MoE, 10M ctx", price: "free", isNew: true },
1982
- { id: "llama3.3", desc: "Llama 3.3 70B — best open model (Dec 2024)", price: "free" },
1983
- { id: "qwen2.5", desc: "Qwen 2.5 72B — strong coder", price: "free" },
1984
- { id: "deepseek-r1", desc: "DeepSeek-R1 local — reasoning", price: "free" },
1985
- { id: "mistral", desc: "Mistral 7B — fast small model", price: "free" },
1986
- { id: "phi4", desc: "Phi-4 14B — Microsoft small model", price: "free" },
1987
- { id: "codellama", desc: "CodeLlama — code specialised", price: "free" },
1988
- ],
1989
- },
1990
- ];
1888
+ // ── Build providers dynamically from model registry ─────────────────────
1889
+ const providerEnvKeys = {
1890
+ openai: "OPENAI_API_KEY", anthropic: "ANTHROPIC_API_KEY", google: "GOOGLE_AI_API_KEY",
1891
+ xai: "XAI_API_KEY", deepseek: "DEEPSEEK_API_KEY", mistral: "MISTRAL_API_KEY", ollama: null,
1892
+ };
1893
+ const providerNames = {
1894
+ openai: "OpenAI", anthropic: "Anthropic", google: "Google", xai: "xAI",
1895
+ deepseek: "DeepSeek", mistral: "Mistral", ollama: "Ollama (local)",
1896
+ };
1897
+
1898
+ const providerMap = new Map();
1899
+ for (const [fullId, meta] of Object.entries(modelRegistry)) {
1900
+ const prov = meta.provider;
1901
+ if (!providerMap.has(prov)) {
1902
+ providerMap.set(prov, {
1903
+ name: providerNames[prov] || prov,
1904
+ prefix: prov,
1905
+ envKey: providerEnvKeys[prov] || `${prov.toUpperCase()}_API_KEY`,
1906
+ configured: prov === "ollama" ? true : undefined,
1907
+ models: [],
1908
+ });
1909
+ }
1910
+ const inputPrice = meta.costPer1kInput ? `$${(meta.costPer1kInput * 1000).toFixed(2)}` : null;
1911
+ const outputPrice = meta.costPer1kOutput ? `$${(meta.costPer1kOutput * 1000).toFixed(2)}` : null;
1912
+ const price = prov === "ollama" ? "free" : (inputPrice && outputPrice ? `${inputPrice}/${outputPrice}` : null);
1913
+ const ctx = meta.contextWindow ? `${Math.round(meta.contextWindow / 1000)}K ctx` : "";
1914
+ const caps = (meta.capabilities || []).filter(c => c !== "text" && c !== "tools").join(", ");
1915
+ const desc = [caps, ctx].filter(Boolean).join(" · ") || meta.model;
1916
+ providerMap.get(prov).models.push({ id: meta.model, desc, price });
1917
+ }
1918
+ const PROVIDERS = [...providerMap.values()];
1991
1919
 
1992
1920
  const routingRows = [
1993
1921
  ["DEFAULT_MODEL", process.env.DEFAULT_MODEL || chalk.hex(P.muted)("openai:gpt-4.1-mini (built-in default)")],
@@ -214,6 +214,20 @@ class MCPManager {
214
214
  }
215
215
  }
216
216
 
217
+ // Check args for placeholder patterns (e.g. connection strings, paths)
218
+ if (cfg.args && Array.isArray(cfg.args)) {
219
+ const hasArgPlaceholder = cfg.args.some(v =>
220
+ typeof v === "string" && (
221
+ /user:pass@/i.test(v) || /\/Users\/you\//i.test(v) || /YOUR_/i.test(v)
222
+ || /your-.*-here/i.test(v) || /example\.com/i.test(v) || /changeme/i.test(v)
223
+ )
224
+ );
225
+ if (hasArgPlaceholder) {
226
+ console.log(`[MCPManager] Skipping "${name}" - args contain placeholder values. Configure via UI or CLI.`);
227
+ return false;
228
+ }
229
+ }
230
+
217
231
  if (cfg.headers) {
218
232
  const expandedHeaders = Object.entries(cfg.headers).map(([k, v]) => {
219
233
  if (typeof v === "string") {