neoagent 1.5.0 → 1.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "neoagent",
3
- "version": "1.5.0",
3
+ "version": "1.5.2",
4
4
  "description": "Proactive personal AI agent with no limits",
5
5
  "license": "MIT",
6
6
  "main": "server/index.js",
@@ -593,6 +593,13 @@
593
593
  <span>Create disabled draft skills from successful multi-step runs</span>
594
594
  </label>
595
595
  </div>
596
+ <div class="form-group">
597
+ <label class="form-label">Smart Selection</label>
598
+ <label class="flex items-center gap-2" style="cursor:pointer;">
599
+ <input type="checkbox" id="settingSmarterModelSelector" autocomplete="off" data-bwignore="true">
600
+ <span>Automatically select best model based on task type (coding, planning, etc.)</span>
601
+ </label>
602
+ </div>
596
603
  <div class="form-group">
597
604
  <label class="form-label">Default Chat Model</label>
598
605
  <select id="settingDefaultChatModel" class="input" autocomplete="off" data-bwignore="true">
@@ -605,6 +612,13 @@
605
612
  <option value="auto">Smart Selector (Auto)</option>
606
613
  </select>
607
614
  </div>
615
+ <div class="form-group">
616
+ <label class="form-label">Fallback Model</label>
617
+ <select id="settingFallbackModelId" class="input" autocomplete="off" data-bwignore="true">
618
+ <option value="gpt-5-nano">GPT-5 Nano (Default Cloud Fallback)</option>
619
+ </select>
620
+ <small class="text-muted" style="font-size: 11px; display: block; margin-top: 4px;">Used if the primary model fails or is offline.</small>
621
+ </div>
608
622
  <div class="form-group">
609
623
  <label class="form-label">Smart Selector Allowed Models</label>
610
624
  <div id="modelCheckboxesContainer" style="display:flex; flex-direction:column; gap:8px;">
@@ -1849,6 +1849,9 @@ $("#settingsBtn").addEventListener("click", async () => {
1849
1849
  $("#settingAutoSkillLearning").checked =
1850
1850
  settings.auto_skill_learning !== false &&
1851
1851
  settings.auto_skill_learning !== "false";
1852
+ $("#settingSmarterModelSelector").checked =
1853
+ settings.smarter_model_selector !== false &&
1854
+ settings.smarter_model_selector !== "false";
1852
1855
 
1853
1856
  const enabledModels = Array.isArray(settings.enabled_models) ? settings.enabled_models : (meta.models || []).map(m => m.id);
1854
1857
 
@@ -1858,6 +1861,8 @@ $("#settingsBtn").addEventListener("click", async () => {
1858
1861
  if (chatModelSelect && subagentModelSelect && meta.models) {
1859
1862
  chatModelSelect.innerHTML = '<option value="auto">Smart Selector (Auto)</option>';
1860
1863
  subagentModelSelect.innerHTML = '<option value="auto">Smart Selector (Auto)</option>';
1864
+ const fallbackModelSelect = $("#settingFallbackModelId");
1865
+ if (fallbackModelSelect) fallbackModelSelect.innerHTML = "";
1861
1866
 
1862
1867
  for (const modelDef of meta.models) {
1863
1868
  const chatOption = document.createElement("option");
@@ -1869,10 +1874,20 @@ $("#settingsBtn").addEventListener("click", async () => {
1869
1874
  subagentOption.value = modelDef.id;
1870
1875
  subagentOption.textContent = modelDef.label;
1871
1876
  subagentModelSelect.appendChild(subagentOption);
1877
+
1878
+ if (fallbackModelSelect) {
1879
+ const fallbackOption = document.createElement("option");
1880
+ fallbackOption.value = modelDef.id;
1881
+ fallbackOption.textContent = modelDef.label;
1882
+ fallbackModelSelect.appendChild(fallbackOption);
1883
+ }
1872
1884
  }
1873
1885
 
1874
1886
  chatModelSelect.value = settings.default_chat_model || "auto";
1875
1887
  subagentModelSelect.value = settings.default_subagent_model || "auto";
1888
+ if ($("#settingFallbackModelId")) {
1889
+ $("#settingFallbackModelId").value = settings.fallback_model_id || "gpt-5-nano";
1890
+ }
1876
1891
 
1877
1892
  const indicator = $("#modelIndicator");
1878
1893
  if (indicator) {
@@ -1947,9 +1962,11 @@ $("#saveSettings").addEventListener("click", async () => {
1947
1962
  heartbeat_enabled: $("#settingHeartbeat").checked,
1948
1963
  headless_browser: $("#settingHeadlessBrowser").checked,
1949
1964
  auto_skill_learning: $("#settingAutoSkillLearning").checked,
1965
+ smarter_model_selector: $("#settingSmarterModelSelector").checked,
1950
1966
  enabled_models: enabledModels,
1951
1967
  default_chat_model: defaultChatModel,
1952
- default_subagent_model: defaultSubagentModel
1968
+ default_subagent_model: defaultSubagentModel,
1969
+ fallback_model_id: $("#settingFallbackModelId") ? $("#settingFallbackModelId").value : 'gpt-5-nano'
1953
1970
  },
1954
1971
  });
1955
1972
 
@@ -29,9 +29,10 @@ function readUpdateStatus() {
29
29
  }
30
30
 
31
31
  // Get supported models metadata
32
- router.get('/meta/models', (req, res) => {
33
- const { SUPPORTED_MODELS } = require('../services/ai/models');
34
- res.json({ models: SUPPORTED_MODELS });
32
+ router.get('/meta/models', async (req, res) => {
33
+ const { getSupportedModels } = require('../services/ai/models');
34
+ const models = await getSupportedModels();
35
+ res.json({ models });
35
36
  });
36
37
 
37
38
  // Get all settings
@@ -18,16 +18,19 @@ function generateTitle(task) {
18
18
  return cleaned.slice(0, 90);
19
19
  }
20
20
 
21
- function getProviderForUser(userId, task = '', isSubagent = false, modelOverride = null) {
22
- const { SUPPORTED_MODELS, createProviderInstance } = require('./models');
21
+ async function getProviderForUser(userId, task = '', isSubagent = false, modelOverride = null) {
22
+ const { getSupportedModels, createProviderInstance } = require('./models');
23
+ const models = await getSupportedModels();
23
24
 
24
25
  let enabledIds = [];
25
26
  let defaultChatModel = 'auto';
26
27
  let defaultSubagentModel = 'auto';
27
28
 
29
+ let smarterSelection = true;
30
+
28
31
  try {
29
- const rows = db.prepare('SELECT key, value FROM user_settings WHERE user_id = ? AND key IN (?, ?, ?)')
30
- .all(userId, 'enabled_models', 'default_chat_model', 'default_subagent_model');
32
+ const rows = db.prepare('SELECT key, value FROM user_settings WHERE user_id = ? AND key IN (?, ?, ?, ?)')
33
+ .all(userId, 'enabled_models', 'default_chat_model', 'default_subagent_model', 'smarter_model_selector');
31
34
 
32
35
  for (const row of rows) {
33
36
  if (!row.value) continue;
@@ -40,22 +43,23 @@ function getProviderForUser(userId, task = '', isSubagent = false, modelOverride
40
43
  if (row.key === 'enabled_models') enabledIds = parsedVal;
41
44
  if (row.key === 'default_chat_model') defaultChatModel = parsedVal;
42
45
  if (row.key === 'default_subagent_model') defaultSubagentModel = parsedVal;
46
+ if (row.key === 'smarter_model_selector') smarterSelection = parsedVal !== false && parsedVal !== 'false';
43
47
  }
44
48
  } catch (e) {
45
49
  console.error('Failed to fetch model settings:', e.message);
46
50
  }
47
51
 
48
52
  if (!Array.isArray(enabledIds) || enabledIds.length === 0) {
49
- enabledIds = SUPPORTED_MODELS.map((m) => m.id);
53
+ enabledIds = models.map((m) => m.id);
50
54
  }
51
55
 
52
- const availableModels = SUPPORTED_MODELS.filter((m) => enabledIds.includes(m.id));
53
- const fallbackModel = availableModels.length > 0 ? availableModels[0] : SUPPORTED_MODELS[0];
56
+ const availableModels = models.filter((m) => enabledIds.includes(m.id));
57
+ const fallbackModel = availableModels.length > 0 ? availableModels[0] : models[0];
54
58
  let selectedModelDef = fallbackModel;
55
59
  const userSelectedDefault = isSubagent ? defaultSubagentModel : defaultChatModel;
56
60
 
57
61
  if (modelOverride && typeof modelOverride === 'string') {
58
- const requested = SUPPORTED_MODELS.find((m) => m.id === modelOverride.trim());
62
+ const requested = models.find((m) => m.id === modelOverride.trim());
59
63
  if (requested && enabledIds.includes(requested.id)) {
60
64
  selectedModelDef = requested;
61
65
  return {
@@ -67,12 +71,24 @@ function getProviderForUser(userId, task = '', isSubagent = false, modelOverride
67
71
  }
68
72
 
69
73
  if (userSelectedDefault && userSelectedDefault !== 'auto') {
70
- selectedModelDef = SUPPORTED_MODELS.find((m) => m.id === userSelectedDefault) || fallbackModel;
74
+ selectedModelDef = models.find((m) => m.id === userSelectedDefault) || fallbackModel;
71
75
  } else {
72
76
  const taskStr = String(task || '').toLowerCase();
73
- const isPlanning = /\b(plan|think|analy[sz]e|complex|step by step)\b/.test(taskStr);
77
+
78
+ // Basic detection
79
+ let isPlanning = /\b(plan|think|analy[sz]e|complex|step by step)\b/.test(taskStr);
80
+ let isCoding = false;
81
+
82
+ // Enhanced detection if enabled
83
+ if (smarterSelection) {
84
+ isPlanning = isPlanning || /\b(reason|strategy|logical|math|complex)\b/.test(taskStr);
85
+ isCoding = /\b(code|program|script|debug|refactor|function|implementation|logic)\b/.test(taskStr);
86
+ }
87
+
74
88
  if (isPlanning) {
75
89
  selectedModelDef = availableModels.find((m) => m.purpose === 'planning') || fallbackModel;
90
+ } else if (isCoding) {
91
+ selectedModelDef = availableModels.find((m) => m.purpose === 'coding') || availableModels.find((m) => m.purpose === 'planning') || fallbackModel;
76
92
  } else if (isSubagent) {
77
93
  selectedModelDef = availableModels.find((m) => m.purpose === 'fast') || fallbackModel;
78
94
  } else {
@@ -225,7 +241,7 @@ class AgentEngine {
225
241
  const triggerType = options.triggerType || 'user';
226
242
  ensureDefaultAiSettings(userId);
227
243
  const aiSettings = getAiSettings(userId);
228
- const { provider, model, providerName } = getProviderForUser(userId, userMessage, triggerType === 'subagent', _modelOverride);
244
+ const { provider, model, providerName } = await getProviderForUser(userId, userMessage, triggerType === 'subagent', _modelOverride);
229
245
 
230
246
  const runId = options.runId || uuidv4();
231
247
  const conversationId = options.conversationId;
@@ -302,28 +318,70 @@ class AgentEngine {
302
318
  let streamContent = '';
303
319
  const callOptions = { model, reasoningEffort: this.getReasoningEffort(providerName, options) };
304
320
 
305
- if (options.stream !== false) {
306
- const gen = provider.stream(messages, tools, callOptions);
307
- for await (const chunk of gen) {
308
- if (chunk.type === 'content') {
309
- streamContent += chunk.content;
310
- this.emit(userId, 'run:stream', { runId, content: streamContent, iteration });
311
- }
312
- if (chunk.type === 'done') {
313
- response = chunk;
321
+ const tryModelCall = async (retryForFallback = true) => {
322
+ try {
323
+ if (options.stream !== false) {
324
+ const gen = provider.stream(messages, tools, callOptions);
325
+ for await (const chunk of gen) {
326
+ if (chunk.type === 'content') {
327
+ streamContent += chunk.content;
328
+ this.emit(userId, 'run:stream', { runId, content: streamContent, iteration });
329
+ }
330
+ if (chunk.type === 'done') {
331
+ response = chunk;
332
+ }
333
+ if (chunk.type === 'tool_calls') {
334
+ response = {
335
+ content: chunk.content || streamContent,
336
+ toolCalls: chunk.toolCalls,
337
+ finishReason: 'tool_calls',
338
+ usage: chunk.usage || null
339
+ };
340
+ }
341
+ }
342
+ } else {
343
+ response = await provider.chat(messages, tools, callOptions);
314
344
  }
315
- if (chunk.type === 'tool_calls') {
316
- response = {
317
- content: chunk.content || streamContent,
318
- toolCalls: chunk.toolCalls,
319
- finishReason: 'tool_calls',
320
- usage: chunk.usage || null
321
- };
345
+ } catch (err) {
346
+ console.error(`[Engine] Model call failed (${model}):`, err.message);
347
+ if (retryForFallback && aiSettings.fallback_model_id && aiSettings.fallback_model_id !== model) {
348
+ console.log(`[Engine] Attempting fallback to: ${aiSettings.fallback_model_id}`);
349
+ const fallback = await getProviderForUser(userId, userMessage, triggerType === 'subagent', aiSettings.fallback_model_id);
350
+ // Update local state for the retry
351
+ const nextProvider = fallback.provider;
352
+ const nextModel = fallback.model;
353
+ const nextProviderName = fallback.providerName;
354
+
355
+ // Recursive call once
356
+ const retryOptions = { ...callOptions, model: nextModel, reasoningEffort: this.getReasoningEffort(nextProviderName, options) };
357
+
358
+ if (options.stream !== false) {
359
+ const gen = nextProvider.stream(messages, tools, retryOptions);
360
+ for await (const chunk of gen) {
361
+ if (chunk.type === 'content') {
362
+ streamContent += chunk.content;
363
+ this.emit(userId, 'run:stream', { runId, content: streamContent, iteration });
364
+ }
365
+ if (chunk.type === 'done') response = chunk;
366
+ if (chunk.type === 'tool_calls') {
367
+ response = {
368
+ content: chunk.content || streamContent,
369
+ toolCalls: chunk.toolCalls,
370
+ finishReason: 'tool_calls',
371
+ usage: chunk.usage || null
372
+ };
373
+ }
374
+ }
375
+ } else {
376
+ response = await nextProvider.chat(messages, tools, retryOptions);
377
+ }
378
+ } else {
379
+ throw err;
322
380
  }
323
381
  }
324
- } else {
325
- response = await provider.chat(messages, tools, callOptions);
326
- }
382
+ };
383
+
384
+ await tryModelCall();
327
385
 
328
386
  if (!response) {
329
387
  response = { content: streamContent, toolCalls: [], finishReason: 'stop', usage: null };
@@ -1,8 +1,9 @@
1
1
  const { GrokProvider } = require('./providers/grok');
2
2
  const { OpenAIProvider } = require('./providers/openai');
3
3
  const { GoogleProvider } = require('./providers/google');
4
+ const { OllamaProvider } = require('./providers/ollama');
4
5
 
5
- const SUPPORTED_MODELS = [
6
+ const STATIC_MODELS = [
6
7
  {
7
8
  id: 'grok-4-1-fast-reasoning',
8
9
  label: 'Grok 4.1 (Personality / Default)',
@@ -26,9 +27,73 @@ const SUPPORTED_MODELS = [
26
27
  label: 'Gemini 3.1 Flash Lite (Preview)',
27
28
  provider: 'google',
28
29
  purpose: 'general'
30
+ },
31
+ {
32
+ id: 'llama3.1:8b',
33
+ label: 'Llama 3.1 8B (Local / General)',
34
+ provider: 'ollama',
35
+ purpose: 'general'
36
+ },
37
+ {
38
+ id: 'phi4-mini',
39
+ label: 'Phi-4 Mini (Local / Fast)',
40
+ provider: 'ollama',
41
+ purpose: 'fast'
42
+ },
43
+ {
44
+ id: 'phi4',
45
+ label: 'Phi-4 (Local / Planning)',
46
+ provider: 'ollama',
47
+ purpose: 'planning'
48
+ },
49
+ {
50
+ id: 'qwen2.5-coder:7b',
51
+ label: 'Qwen 2.5 Coder 7B (Local / Coding)',
52
+ provider: 'ollama',
53
+ purpose: 'coding'
29
54
  }
30
55
  ];
31
56
 
57
+ let dynamicModels = [];
58
+ let lastRefresh = 0;
59
+ const REFRESH_INTERVAL = 30000; // 30 seconds
60
+
61
+ async function getSupportedModels() {
62
+ const now = Date.now();
63
+ if (now - lastRefresh > REFRESH_INTERVAL) {
64
+ await refreshDynamicModels();
65
+ }
66
+
67
+ const all = [...STATIC_MODELS];
68
+ const staticIds = new Set(STATIC_MODELS.map(m => m.id));
69
+
70
+ for (const dm of dynamicModels) {
71
+ if (!staticIds.has(dm.id)) {
72
+ all.push(dm);
73
+ }
74
+ }
75
+
76
+ return all;
77
+ }
78
+
79
+ async function refreshDynamicModels() {
80
+ try {
81
+ const ollama = new OllamaProvider({ baseUrl: process.env.OLLAMA_URL });
82
+ const models = await ollama.listModels();
83
+
84
+ dynamicModels = models.map(name => ({
85
+ id: name,
86
+ label: `${name} (Ollama / Local)`,
87
+ provider: 'ollama',
88
+ purpose: 'general'
89
+ }));
90
+
91
+ lastRefresh = Date.now();
92
+ } catch (err) {
93
+ console.warn('[Models] Failed to refresh Ollama models:', err.message);
94
+ }
95
+ }
96
+
32
97
  function createProviderInstance(providerStr) {
33
98
  if (providerStr === 'grok') {
34
99
  return new GrokProvider({ apiKey: process.env.XAI_API_KEY });
@@ -36,11 +101,14 @@ function createProviderInstance(providerStr) {
36
101
  return new OpenAIProvider({ apiKey: process.env.OPENAI_API_KEY });
37
102
  } else if (providerStr === 'google') {
38
103
  return new GoogleProvider({ apiKey: process.env.GOOGLE_AI_KEY });
104
+ } else if (providerStr === 'ollama') {
105
+ return new OllamaProvider({ baseUrl: process.env.OLLAMA_URL });
39
106
  }
40
107
  throw new Error(`Unknown provider: ${providerStr}`);
41
108
  }
42
109
 
43
110
  module.exports = {
44
- SUPPORTED_MODELS,
111
+ SUPPORTED_MODELS: STATIC_MODELS, // Backward compatibility
112
+ getSupportedModels,
45
113
  createProviderInstance
46
114
  };
@@ -19,6 +19,32 @@ class OllamaProvider extends BaseProvider {
19
19
  }
20
20
  }
21
21
 
22
+ async ensureModel(model) {
23
+ const models = await this.listModels();
24
+ // Normalization: Ollama often adds :latest if no tag is specified
25
+ const normalizedModel = model.includes(':') ? model : `${model}:latest`;
26
+ const found = models.some(m => m === model || m === normalizedModel);
27
+
28
+ if (found) return true;
29
+
30
+ console.log(`[Ollama] Model '${model}' not found, pulling from registry...`);
31
+ try {
32
+ const res = await fetch(`${this.baseUrl}/api/pull`, {
33
+ method: 'POST',
34
+ headers: { 'Content-Type': 'application/json' },
35
+ body: JSON.stringify({ name: model, stream: false })
36
+ });
37
+ if (!res.ok) throw new Error(`Pull failed: ${res.statusText}`);
38
+ console.log(`[Ollama] Model '${model}' pulled successfully.`);
39
+ // Refresh local model list
40
+ await this.listModels();
41
+ return true;
42
+ } catch (e) {
43
+ console.error(`[Ollama] Failed to pull model '${model}':`, e.message);
44
+ throw e;
45
+ }
46
+ }
47
+
22
48
  getContextWindow(model) {
23
49
  return 128000;
24
50
  }
@@ -36,6 +62,7 @@ class OllamaProvider extends BaseProvider {
36
62
 
37
63
  async chat(messages, tools = [], options = {}) {
38
64
  const model = options.model || this.config.model || 'llama3.1';
65
+ await this.ensureModel(model);
39
66
  const body = {
40
67
  model,
41
68
  messages: messages.map(m => ({
@@ -86,6 +113,7 @@ class OllamaProvider extends BaseProvider {
86
113
 
87
114
  async *stream(messages, tools = [], options = {}) {
88
115
  const model = options.model || this.config.model || 'llama3.1';
116
+ await this.ensureModel(model);
89
117
  const body = {
90
118
  model,
91
119
  messages: messages.map(m => ({
@@ -5,7 +5,9 @@ const DEFAULT_AI_SETTINGS = Object.freeze({
5
5
  chat_history_window: 8,
6
6
  tool_replay_budget_chars: 1200,
7
7
  subagent_max_iterations: 6,
8
- auto_skill_learning: true
8
+ auto_skill_learning: true,
9
+ fallback_model_id: 'gpt-5-nano',
10
+ smarter_model_selector: true
9
11
  });
10
12
 
11
13
  function parseSettingValue(value) {
@@ -21,14 +23,16 @@ function ensureDefaultAiSettings(userId) {
21
23
  if (!userId) return { ...DEFAULT_AI_SETTINGS };
22
24
 
23
25
  const existing = db.prepare(
24
- 'SELECT key, value FROM user_settings WHERE user_id = ? AND key IN (?, ?, ?, ?, ?)'
26
+ 'SELECT key, value FROM user_settings WHERE user_id = ? AND key IN (?, ?, ?, ?, ?, ?, ?)'
25
27
  ).all(
26
28
  userId,
27
29
  'cost_mode',
28
30
  'chat_history_window',
29
31
  'tool_replay_budget_chars',
30
32
  'subagent_max_iterations',
31
- 'auto_skill_learning'
33
+ 'auto_skill_learning',
34
+ 'fallback_model_id',
35
+ 'smarter_model_selector'
32
36
  );
33
37
 
34
38
  const seen = new Set(existing.map((row) => row.key));
@@ -49,14 +53,16 @@ function getAiSettings(userId) {
49
53
  if (!userId) return { ...DEFAULT_AI_SETTINGS };
50
54
 
51
55
  const rows = db.prepare(
52
- 'SELECT key, value FROM user_settings WHERE user_id = ? AND key IN (?, ?, ?, ?, ?)'
56
+ 'SELECT key, value FROM user_settings WHERE user_id = ? AND key IN (?, ?, ?, ?, ?, ?, ?)'
53
57
  ).all(
54
58
  userId,
55
59
  'cost_mode',
56
60
  'chat_history_window',
57
61
  'tool_replay_budget_chars',
58
62
  'subagent_max_iterations',
59
- 'auto_skill_learning'
63
+ 'auto_skill_learning',
64
+ 'fallback_model_id',
65
+ 'smarter_model_selector'
60
66
  );
61
67
 
62
68
  const settings = { ...DEFAULT_AI_SETTINGS };
@@ -69,6 +75,8 @@ function getAiSettings(userId) {
69
75
  settings.subagent_max_iterations = Math.max(2, Math.min(Number(settings.subagent_max_iterations) || DEFAULT_AI_SETTINGS.subagent_max_iterations, 12));
70
76
  settings.cost_mode = typeof settings.cost_mode === 'string' ? settings.cost_mode : DEFAULT_AI_SETTINGS.cost_mode;
71
77
  settings.auto_skill_learning = settings.auto_skill_learning !== false && settings.auto_skill_learning !== 'false';
78
+ settings.smarter_model_selector = settings.smarter_model_selector !== false && settings.smarter_model_selector !== 'false';
79
+ settings.fallback_model_id = typeof settings.fallback_model_id === 'string' ? settings.fallback_model_id : DEFAULT_AI_SETTINGS.fallback_model_id;
72
80
 
73
81
  return settings;
74
82
  }
@@ -4,44 +4,44 @@ const db = require('../../db/database');
4
4
  const { DATA_DIR } = require('../../../runtime/paths');
5
5
 
6
6
  function compactText(text, maxChars = 120) {
7
- const str = String(text || '').replace(/\s+/g, ' ').trim();
8
- if (str.length <= maxChars) return str;
9
- const trimmed = str.slice(0, maxChars);
10
- const sentenceBreak = Math.max(trimmed.lastIndexOf('. '), trimmed.lastIndexOf('; '), trimmed.lastIndexOf(', '));
11
- if (sentenceBreak > 40) return trimmed.slice(0, sentenceBreak + 1).trim();
12
- return `${trimmed.trim()}...`;
7
+ const str = String(text || '').replace(/\s+/g, ' ').trim();
8
+ if (str.length <= maxChars) return str;
9
+ const trimmed = str.slice(0, maxChars);
10
+ const sentenceBreak = Math.max(trimmed.lastIndexOf('. '), trimmed.lastIndexOf('; '), trimmed.lastIndexOf(', '));
11
+ if (sentenceBreak > 40) return trimmed.slice(0, sentenceBreak + 1).trim();
12
+ return `${trimmed.trim()}...`;
13
13
  }
14
14
 
15
15
  function compactToolDefinition(tool, options = {}) {
16
- const compact = {
17
- name: tool.name,
18
- parameters: {
19
- ...(tool.parameters || { type: 'object', properties: {} }),
20
- properties: {}
16
+ const compact = {
17
+ name: tool.name,
18
+ parameters: {
19
+ ...(tool.parameters || { type: 'object', properties: {} }),
20
+ properties: {}
21
+ }
22
+ };
23
+
24
+ if (options.includeDescriptions) {
25
+ compact.description = compactText(tool.description, 120);
21
26
  }
22
- };
23
-
24
- if (options.includeDescriptions) {
25
- compact.description = compactText(tool.description, 120);
26
- }
27
-
28
- if (tool.parameters?.properties) {
29
- const properties = {};
30
- for (const [key, value] of Object.entries(tool.parameters.properties)) {
31
- properties[key] = { ...value };
32
- if (options.includeDescriptions && value.description) {
33
- properties[key].description = compactText(value.description, 70);
34
- } else {
35
- delete properties[key].description;
36
- }
27
+
28
+ if (tool.parameters?.properties) {
29
+ const properties = {};
30
+ for (const [key, value] of Object.entries(tool.parameters.properties)) {
31
+ properties[key] = { ...value };
32
+ if (options.includeDescriptions && value.description) {
33
+ properties[key].description = compactText(value.description, 70);
34
+ } else {
35
+ delete properties[key].description;
36
+ }
37
+ }
38
+ compact.parameters = {
39
+ ...compact.parameters,
40
+ properties
41
+ };
37
42
  }
38
- compact.parameters = {
39
- ...compact.parameters,
40
- properties
41
- };
42
- }
43
43
 
44
- return compact;
44
+ return compact;
45
45
  }
46
46
 
47
47
  /**
@@ -1199,7 +1199,7 @@ async function executeTool(toolName, args, context, engine) {
1199
1199
  const mimeMap = { '.png': 'image/png', '.gif': 'image/gif', '.webp': 'image/webp', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg' };
1200
1200
  const mime = mimeMap[ext] || 'image/jpeg';
1201
1201
  const { getProviderForUser } = require('./engine');
1202
- const { provider: visionProvider, model: visionModel } = getProviderForUser(userId);
1202
+ const { provider: visionProvider, model: visionModel } = await getProviderForUser(userId);
1203
1203
  const visionResponse = await visionProvider.chat(
1204
1204
  [{
1205
1205
  role: 'user', content: [
@@ -11,13 +11,10 @@ const {
11
11
  } = require('./embeddings');
12
12
  const { AGENT_DATA_DIR } = require('../../../runtime/paths');
13
13
 
14
- /**
15
- * Derive the active AI provider name from user settings so the right
16
- * embedding model is selected automatically (e.g. Gemini when using Google).
17
- */
18
- function getActiveProvider(userId) {
14
+ async function getActiveProvider(userId) {
19
15
  try {
20
- const { SUPPORTED_MODELS } = require('../ai/models');
16
+ const { getSupportedModels } = require('../ai/models');
17
+ const models = await getSupportedModels();
21
18
  const rows = db.prepare('SELECT key, value FROM user_settings WHERE user_id = ? AND key IN (?, ?)')
22
19
  .all(userId || 1, 'default_chat_model', 'enabled_models');
23
20
 
@@ -36,7 +33,7 @@ function getActiveProvider(userId) {
36
33
  : (Array.isArray(enabledIds) && enabledIds.length > 0 ? enabledIds[0] : null);
37
34
 
38
35
  if (modelId) {
39
- const def = SUPPORTED_MODELS.find(m => m.id === modelId);
36
+ const def = models.find(m => m.id === modelId);
40
37
  if (def) return def.provider;
41
38
  }
42
39
  } catch { }
@@ -118,7 +115,7 @@ class MemoryManager {
118
115
  category = CATEGORIES.includes(category) ? category : 'episodic';
119
116
  importance = Math.max(1, Math.min(10, Number(importance) || 5));
120
117
 
121
- const embedding = await getEmbedding(content, getActiveProvider(userId));
118
+ const embedding = await getEmbedding(content, await getActiveProvider(userId));
122
119
 
123
120
  // Dedup check: compare against existing non-archived memories for this user
124
121
  const existing = db.prepare(
@@ -171,7 +168,7 @@ class MemoryManager {
171
168
 
172
169
  if (!all.length) return [];
173
170
 
174
- const queryVec = await getEmbedding(query, getActiveProvider(userId));
171
+ const queryVec = await getEmbedding(query, await getActiveProvider(userId));
175
172
 
176
173
  const scored = all.map(mem => {
177
174
  let score = 0;
@@ -235,7 +232,7 @@ class MemoryManager {
235
232
 
236
233
  let newEmbed = mem.embedding;
237
234
  if (content && content !== mem.content) {
238
- const vec = await getEmbedding(newContent, getActiveProvider(null));
235
+ const vec = await getEmbedding(newContent, await getActiveProvider(null));
239
236
  newEmbed = vec ? serializeEmbedding(vec) : mem.embedding;
240
237
  }
241
238
 
@@ -81,7 +81,7 @@ function setupWebSocket(io, services) {
81
81
  .run(userId, result.runId, 'assistant', result.content, JSON.stringify({ tokens: result.totalTokens }));
82
82
  }
83
83
 
84
- const { provider, model } = getProviderForUser(userId, task, false, options?.model || null);
84
+ const { provider, model } = await getProviderForUser(userId, task, false, options?.model || null);
85
85
  refreshWebChatSummary(userId, provider, model, aiSettings.chat_history_window).catch((summaryErr) => {
86
86
  console.error('[WS] Web summary refresh failed:', summaryErr.message);
87
87
  });