neoagent 2.2.1-beta.4 → 2.2.1-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -48,6 +48,7 @@ const AGENT_SETTING_KEYS = new Set([
48
48
  'ai_provider_configs',
49
49
  'default_chat_model',
50
50
  'default_subagent_model',
51
+ 'default_speech_model',
51
52
  'enabled_models',
52
53
  'voice_stt_provider',
53
54
  'voice_stt_model',
@@ -39,6 +39,7 @@ const {
39
39
  } = require('./interim');
40
40
 
41
41
  const MAX_CONSECUTIVE_TOOL_FAILURES = 3;
42
+ const WIDGET_REFRESH_MAX_ITERATIONS = 6;
42
43
 
43
44
  function generateTitle(task) {
44
45
  if (!task || typeof task !== 'string') return 'Untitled';
@@ -1324,8 +1325,9 @@ class AgentEngine {
1324
1325
  runMeta.toolPids.delete(pid);
1325
1326
  }
1326
1327
 
1327
- getIterationLimit(triggerType, aiSettings) {
1328
+ getIterationLimit(triggerType, aiSettings, options = {}) {
1328
1329
  if (triggerType === 'subagent') return aiSettings.subagent_max_iterations;
1330
+ if (options.widgetId) return Math.min(this.maxIterations, WIDGET_REFRESH_MAX_ITERATIONS);
1329
1331
  return this.maxIterations;
1330
1332
  }
1331
1333
 
@@ -1441,7 +1443,14 @@ class AgentEngine {
1441
1443
  }
1442
1444
 
1443
1445
  async run(userId, userMessage, options = {}) {
1444
- return this.runWithModel(userId, userMessage, options, null);
1446
+ return this.runWithModel(
1447
+ userId,
1448
+ userMessage,
1449
+ options,
1450
+ typeof options.model === 'string' && options.model.trim()
1451
+ ? options.model.trim()
1452
+ : null,
1453
+ );
1445
1454
  }
1446
1455
 
1447
1456
  async runWithModel(userId, userMessage, options = {}, _modelOverride = null) {
@@ -1460,7 +1469,7 @@ class AgentEngine {
1460
1469
  Number(options.historyWindow || aiSettings.chat_history_window) || aiSettings.chat_history_window,
1461
1470
  );
1462
1471
  const toolReplayBudget = aiSettings.tool_replay_budget_chars;
1463
- const maxIterations = this.getIterationLimit(triggerType, aiSettings);
1472
+ const maxIterations = this.getIterationLimit(triggerType, aiSettings, options);
1464
1473
  const providerStatusConfig = {
1465
1474
  agentId,
1466
1475
  onStatus: (status) => {
@@ -1500,6 +1509,7 @@ class AgentEngine {
1500
1509
  explicitMessageSent: carriedExplicitMessageSent,
1501
1510
  lastSentMessage: carriedExplicitMessageSent ? carriedVisibleMessage : '',
1502
1511
  sentMessages: [],
1512
+ widgetSnapshotSaved: false,
1503
1513
  triggerType,
1504
1514
  triggerSource,
1505
1515
  startedAt: Date.now(),
@@ -2085,6 +2095,14 @@ class AgentEngine {
2085
2095
  if (runMeta) {
2086
2096
  runMeta.lastToolName = toolName;
2087
2097
  runMeta.lastToolTarget = toolName === 'send_message' ? toolArgs.to : null;
2098
+ if (toolName === 'save_widget_snapshot' && !toolErrorMessage) {
2099
+ runMeta.widgetSnapshotSaved = true;
2100
+ }
2101
+ }
2102
+
2103
+ if (toolName === 'save_widget_snapshot' && !toolErrorMessage) {
2104
+ lastContent = 'Widget snapshot updated.';
2105
+ break;
2088
2106
  }
2089
2107
 
2090
2108
  if (runMeta?.terminalInterim) {
@@ -2094,6 +2112,7 @@ class AgentEngine {
2094
2112
 
2095
2113
  if (this.isRunStopped(runId)) break;
2096
2114
  if (this.getRunMeta(runId)?.terminalInterim) break;
2115
+ if (this.getRunMeta(runId)?.widgetSnapshotSaved) break;
2097
2116
  if (!this.activeRuns.has(runId)) break;
2098
2117
  }
2099
2118
 
@@ -2112,6 +2131,9 @@ class AgentEngine {
2112
2131
  if (runMeta?.terminalInterim) {
2113
2132
  lastContent = '';
2114
2133
  }
2134
+ if (runMeta?.widgetSnapshotSaved && !lastContent) {
2135
+ lastContent = 'Widget snapshot updated.';
2136
+ }
2115
2137
  const messagingSent = runMeta?.messagingSent || false;
2116
2138
  const lastToolWasMessaging = runMeta?.lastToolName === 'send_message' || runMeta?.lastToolName === 'make_call';
2117
2139
 
@@ -2140,7 +2162,11 @@ class AgentEngine {
2140
2162
  }
2141
2163
  }
2142
2164
 
2143
- if (!normalizeOutgoingMessage(lastContent, options?.source || null) && !messagingSent) {
2165
+ if (
2166
+ !normalizeOutgoingMessage(lastContent, options?.source || null)
2167
+ && !messagingSent
2168
+ && runMeta?.widgetSnapshotSaved !== true
2169
+ ) {
2144
2170
  if (iteration >= maxIterations) {
2145
2171
  throw new Error(`Iteration limit reached before explicit completion after ${maxIterations} iterations.`);
2146
2172
  }
@@ -101,6 +101,7 @@ function createDefaultAiSettings() {
101
101
  enabled_models: [],
102
102
  default_chat_model: 'auto',
103
103
  default_subagent_model: 'auto',
104
+ default_speech_model: 'auto',
104
105
  ai_provider_configs: createDefaultProviderConfigs(),
105
106
  voice_runtime_mode: 'live',
106
107
  voice_live_provider: 'openai',
@@ -284,6 +285,9 @@ function getAiSettings(userId, agentId = null) {
284
285
  settings.default_subagent_model = typeof settings.default_subagent_model === 'string' && settings.default_subagent_model.trim()
285
286
  ? settings.default_subagent_model
286
287
  : DEFAULT_AI_SETTINGS.default_subagent_model;
288
+ settings.default_speech_model = typeof settings.default_speech_model === 'string' && settings.default_speech_model.trim()
289
+ ? settings.default_speech_model.trim()
290
+ : DEFAULT_AI_SETTINGS.default_speech_model;
287
291
  settings.voice_runtime_mode = normalizeRuntimeMode(settings.voice_runtime_mode);
288
292
  settings.voice_live_provider = normalizeLiveProvider(settings.voice_live_provider);
289
293
  settings.voice_live_model = resolveLiveModel(settings.voice_live_provider, settings.voice_live_model);
@@ -154,6 +154,7 @@ async function processQueuedMessage({
154
154
  const runOptions = isVoiceLikeMessage(msg)
155
155
  ? buildVoiceMessagingRunOptions({
156
156
  runId,
157
+ userId,
157
158
  agentId,
158
159
  conversationId,
159
160
  msg,
@@ -595,6 +595,41 @@ async function synthesizeVoiceReply(text, options = {}) {
595
595
 
596
596
  // Minimum characters before flushing a sentence chunk to TTS to avoid tiny requests.
597
597
  const MIN_SENTENCE_CHUNK_CHARS = 80;
598
+ const MAX_TTS_CHUNK_CHARS = 220;
599
+
600
+ function splitOversizeChunk(text, maxChars = MAX_TTS_CHUNK_CHARS) {
601
+ const normalized = String(text || '').trim();
602
+ if (!normalized) return [];
603
+ if (normalized.length <= maxChars) return [normalized];
604
+
605
+ const words = normalized.split(/\s+/).filter(Boolean);
606
+ if (words.length <= 1) {
607
+ const slices = [];
608
+ for (let index = 0; index < normalized.length; index += maxChars) {
609
+ slices.push(normalized.slice(index, index + maxChars).trim());
610
+ }
611
+ return slices.filter(Boolean);
612
+ }
613
+
614
+ const chunks = [];
615
+ let pending = '';
616
+ for (const word of words) {
617
+ const candidate = pending ? `${pending} ${word}` : word;
618
+ if (pending && candidate.length > maxChars) {
619
+ chunks.push(pending);
620
+ pending = word;
621
+ continue;
622
+ }
623
+ if (!pending && candidate.length > maxChars) {
624
+ chunks.push(...splitOversizeChunk(word, maxChars));
625
+ pending = '';
626
+ continue;
627
+ }
628
+ pending = candidate;
629
+ }
630
+ if (pending) chunks.push(pending);
631
+ return chunks;
632
+ }
598
633
 
599
634
  function splitIntoSentenceChunks(text) {
600
635
  const normalized = String(text || '').trim();
@@ -609,13 +644,13 @@ function splitIntoSentenceChunks(text) {
609
644
  const piece = part.trim();
610
645
  if (!piece) continue;
611
646
  pending = pending ? `${pending} ${piece}` : piece;
612
- if (pending.length >= MIN_SENTENCE_CHUNK_CHARS) {
613
- chunks.push(pending);
647
+ if (pending.length >= MIN_SENTENCE_CHUNK_CHARS || pending.length >= MAX_TTS_CHUNK_CHARS) {
648
+ chunks.push(...splitOversizeChunk(pending));
614
649
  pending = '';
615
650
  }
616
651
  }
617
652
 
618
- if (pending) chunks.push(pending);
653
+ if (pending) chunks.push(...splitOversizeChunk(pending));
619
654
  return chunks.length ? chunks : [normalized];
620
655
  }
621
656
 
@@ -628,8 +663,8 @@ async function synthesizeVoiceReplyStream(text, options = {}, onChunk) {
628
663
  const { provider, model, voice } = normalizeVoiceSynthesisOptions(options);
629
664
  const chunks = splitIntoSentenceChunks(content);
630
665
 
631
- const run = (async () => {
632
- for (const chunk of chunks) {
666
+ for (const chunk of chunks) {
667
+ const run = (async () => {
633
668
  if (provider === 'openai') {
634
669
  await streamWithOpenAi(chunk, model, voice, options, onChunk);
635
670
  } else if (provider === 'deepgram') {
@@ -637,9 +672,9 @@ async function synthesizeVoiceReplyStream(text, options = {}, onChunk) {
637
672
  } else {
638
673
  await streamWithGemini(chunk, model, voice, options, onChunk);
639
674
  }
640
- }
641
- })();
642
- await withTimeout(run, options.timeoutMs, `${provider} TTS stream`);
675
+ })();
676
+ await withTimeout(run, options.timeoutMs, `${provider} TTS stream`);
677
+ }
643
678
  }
644
679
 
645
680
  module.exports = {
@@ -1,6 +1,7 @@
1
1
  'use strict';
2
2
 
3
3
  const { buildPlatformFormattingGuide } = require('../messaging/formatting_guides');
4
+ const { getAiSettings } = require('../ai/settings');
4
5
 
5
6
  const VOICE_HISTORY_WINDOW = 4;
6
7
  const VOICE_REASONING_EFFORT = 'low';
@@ -67,13 +68,17 @@ function buildVoiceMessagingPrompt(msg = {}) {
67
68
 
68
69
  function buildVoiceMessagingRunOptions({
69
70
  runId,
71
+ userId,
70
72
  agentId = null,
71
73
  conversationId,
72
74
  msg,
73
75
  }) {
76
+ const aiSettings = getAiSettings(userId, agentId);
77
+ const speechModel = String(aiSettings.default_speech_model || 'auto').trim();
74
78
  return {
75
79
  runId,
76
80
  agentId,
81
+ model: speechModel !== 'auto' ? speechModel : null,
77
82
  triggerSource: 'messaging',
78
83
  conversationId,
79
84
  source: msg.platform,
@@ -92,12 +97,16 @@ function buildVoiceMessagingRunOptions({
92
97
  }
93
98
 
94
99
  function buildDirectVoiceRunOptions({
100
+ userId,
95
101
  agentId = null,
96
102
  conversationId,
97
103
  platform = 'voice_assistant',
98
104
  }) {
105
+ const aiSettings = getAiSettings(userId, agentId);
106
+ const speechModel = String(aiSettings.default_speech_model || 'auto').trim();
99
107
  return {
100
108
  agentId,
109
+ model: speechModel !== 'auto' ? speechModel : null,
101
110
  conversationId,
102
111
  triggerSource: platform,
103
112
  skipConversationHistory: true,
@@ -377,7 +377,7 @@ class VoiceRuntimeManager {
377
377
  ...normalized,
378
378
  apiKey: runtime.apiKey,
379
379
  baseUrl: runtime.baseUrl,
380
- timeoutMs: 12000,
380
+ timeoutMs: 20000,
381
381
  });
382
382
  }
383
383
  return attempts;
@@ -90,6 +90,7 @@ async function runVoiceTranscriptTurn({
90
90
  });
91
91
  const conversationId = memoryManager.getDefaultWebConversationId(userId, { agentId });
92
92
  const runOptions = buildDirectVoiceRunOptions({
93
+ userId,
93
94
  agentId,
94
95
  conversationId,
95
96
  platform,
@@ -527,6 +527,7 @@ class WidgetService {
527
527
  '{"title":"","kicker":"","subtitle":"","body":"","metric":"","metricLabel":"","secondaryMetric":"","secondaryLabel":"","tertiaryMetric":"","tertiaryLabel":"","trend":{"label":"","direction":"flat"},"progress":{"value":0,"max":100,"label":""},"rows":[{"label":"","value":""}],"chips":[""],"iconToken":"","accentToken":"","backgroundToken":"","surfaceColor":"","updatedAt":"","deepLink":""}',
528
528
  'Rules:',
529
529
  '- Do not change the template or layout variant.',
530
+ '- Once you have enough accurate data, call save_widget_snapshot exactly once and stop. Do not keep exploring after saving.',
530
531
  '- Keep rows to at most 3 and chips to at most 3.',
531
532
  '- Prefer concrete data over generic prose. Use metric + supporting fields whenever live data exists.',
532
533
  '- Make the widget immediately useful at a glance. Avoid filler copy, duplicated labels, or repeating the widget name unless it helps identify the subject.',