@ducci/jarvis 1.0.10 → 1.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/docs/agent.md CHANGED
@@ -198,7 +198,7 @@ Seed tool included for sanity checks:
198
198
  Jarvis uses the provider tool-calling API:
199
199
 
200
200
  1. The model returns an assistant message containing a `tool_calls` array.
201
- 2. Jarvis appends that assistant message to the conversation history as-is.
201
+ 2. Jarvis normalizes each tool call before appending to the conversation history: if `function.arguments` is missing or empty, it is set to `"{}"`. Some models (especially smaller/free ones) omit `arguments` for no-arg tools. Storing a malformed tool call would cause the next API request to fail with a 400 validation error.
202
202
  3. Jarvis executes those tools in order, serially.
203
203
  4. Each tool result is appended to the conversation as a `role: "tool"` message with a matching `tool_call_id`.
204
204
  5. Jarvis calls the model again with the updated conversation.
@@ -447,7 +447,13 @@ Tool inputs/outputs:
447
447
 
448
448
  - Model call failures: try the selected model once, then one fallback model attempt. If both fail, end the run with a `500` error and a clear message.
449
449
  - Tool failures: pass the error result back to the model and continue the loop. Best case would be that the next model response include another tool call to fix the previous tool call. All tool errors (especially `exec` failures) must be reported in the `logSummary` with enough detail for a human to understand the cause.
450
- - Malformed JSON on final response: log the failure and stop the run with a formatted error message.
450
+ - Malformed JSON on final response: attempt two recovery steps before giving up:
451
+ 1. **Fallback model retry** — call the fallback model with the same conversation messages (the bad response is not saved to the session yet). If this produces valid JSON, use it and continue normally.
452
+ 2. **Nudge retry** — if the fallback model also returns non-JSON, append a temporary nudge message to the conversation (not saved to the session) and call `callModelWithFallback` once more:
453
+ ```
454
+ Your previous response was not valid JSON. Respond only with the required JSON object: {"response": "...", "logSummary": "..."}
455
+ ```
456
+ 3. **Give up** — if all three attempts fail, return `format_error` without pushing any assistant content to the session. The nudge message is never persisted regardless of outcome.
451
457
 
452
458
  **Error Payload Structure**:
453
459
 
@@ -462,6 +468,14 @@ Tool inputs/outputs:
462
468
  - Use `500 Internal Server Error` for API failures, tool runtime errors, or model communication issues.
463
469
  - Always append a log entry on failure so the outcome is visible in the session log.
464
470
 
471
+ **Synthetic error note on failure**: when a run ends with `model_error` or `format_error`, a synthetic assistant message is appended to the session before saving:
472
+
473
+ ```
474
+ [System: Previous run failed (model_error): <logSummary>. Error detail: <errorDetail JSON>]
475
+ ```
476
+
477
+ The full `errorDetail` (provider error body, HTTP status, etc.) is included so the model has enough information to understand and potentially recover from the failure without needing to call `read_session_log`. Without this, the session would contain a dangling user message with no reply, and the model would have no way to understand or recover from the failure.
478
+
465
479
  Model configuration:
466
480
 
467
481
  - Selected model ID is stored in the same config file created during setup.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ducci/jarvis",
3
- "version": "1.0.10",
3
+ "version": "1.0.12",
4
4
  "description": "A fully automated agent system that lives on a server.",
5
5
  "main": "./src/index.js",
6
6
  "type": "module",
@@ -6,6 +6,9 @@ import { loadTools, getToolDefinitions, executeTool } from './tools.js';
6
6
  import { appendLog } from './logging.js';
7
7
  import chalk from 'chalk';
8
8
 
9
+ const FORMAT_NUDGE = 'Your previous response was not valid JSON. Respond only with the required JSON object: {"response": "...", "logSummary": "..."}';
10
+ const LOOP_DETECTION_THRESHOLD = 3;
11
+
9
12
  const WRAP_UP_NOTE = `[System: You have reached the iteration limit. This is your final response for this run.
10
13
  Respond with your normal JSON, but add a checkpoint field:
11
14
 
@@ -67,6 +70,7 @@ async function runAgentLoop(client, config, session, prepareMessages) {
67
70
  let toolDefs = getToolDefinitions(tools);
68
71
  let iteration = 0;
69
72
  const runToolCalls = [];
73
+ const loopTracker = new Map();
70
74
  let done = false;
71
75
  let response = '';
72
76
  let logSummary = '';
@@ -112,7 +116,13 @@ async function runAgentLoop(client, config, session, prepareMessages) {
112
116
  session.messages.push({
113
117
  role: 'assistant',
114
118
  content: assistantMessage.content || null,
115
- tool_calls: assistantMessage.tool_calls,
119
+ tool_calls: assistantMessage.tool_calls.map(tc => ({
120
+ ...tc,
121
+ function: {
122
+ ...tc.function,
123
+ arguments: tc.function.arguments || '{}',
124
+ },
125
+ })),
116
126
  });
117
127
 
118
128
  let toolsModified = false;
@@ -146,6 +156,17 @@ async function runAgentLoop(client, config, session, prepareMessages) {
146
156
  tool_call_id: toolCall.id,
147
157
  content: resultStr,
148
158
  });
159
+
160
+ const callKey = `${toolName}|${JSON.stringify(toolArgs)}|${resultStr}`;
161
+ loopTracker.set(callKey, (loopTracker.get(callKey) || 0) + 1);
162
+ }
163
+
164
+ const loopDetected = [...loopTracker.values()].some(count => count >= LOOP_DETECTION_THRESHOLD);
165
+ if (loopDetected) {
166
+ session.messages.push({
167
+ role: 'user',
168
+ content: '[System: Loop detected. You are repeatedly calling the same tools with identical arguments and getting identical results. Stop calling tools and provide your final answer now based on what you already know.]',
169
+ });
149
170
  }
150
171
 
151
172
  // Reload tools if any were created/updated this iteration
@@ -158,20 +179,45 @@ async function runAgentLoop(client, config, session, prepareMessages) {
158
179
  }
159
180
 
160
181
  // No tool calls — final response
161
- const content = assistantMessage.content || '';
162
- session.messages.push({ role: 'assistant', content });
182
+ // Delay pushing to session until we have a valid response (recovery may replace it)
183
+ let content = assistantMessage.content || '';
184
+ let parsed = null;
163
185
 
164
186
  try {
165
- const parsed = JSON.parse(content);
166
- response = parsed.response || content;
167
- logSummary = parsed.logSummary || '';
187
+ parsed = JSON.parse(content);
168
188
  } catch {
189
+ // Step 1: retry with fallback model
190
+ try {
191
+ const fallbackResult = await callModel(client, config.fallbackModel, preparedMessages, toolDefs);
192
+ const fallbackContent = fallbackResult.choices[0]?.message?.content || '';
193
+ parsed = JSON.parse(fallbackContent);
194
+ content = fallbackContent;
195
+ } catch {
196
+ // Step 2: nudge retry via both models
197
+ try {
198
+ const nudgeMessages = [...preparedMessages, { role: 'user', content: FORMAT_NUDGE }];
199
+ const nudgeResult = await callModelWithFallback(client, config, nudgeMessages, toolDefs);
200
+ const nudgeContent = nudgeResult.choices[0]?.message?.content || '';
201
+ parsed = JSON.parse(nudgeContent);
202
+ content = nudgeContent;
203
+ } catch {
204
+ // Give up
205
+ }
206
+ }
207
+ }
208
+
209
+ if (!parsed) {
210
+ // Don't push bad content — handleChat will inject a synthetic error note
169
211
  response = content;
170
- logSummary = 'Model returned non-JSON final response.';
212
+ logSummary = 'Model returned non-JSON final response after recovery attempts.';
171
213
  status = 'format_error';
172
214
  return { iteration, response, logSummary, status, runToolCalls, checkpoint: null, rawResponse: content };
173
215
  }
174
216
 
217
+ session.messages.push({ role: 'assistant', content });
218
+ response = parsed.response || content;
219
+ logSummary = parsed.logSummary || '';
220
+
175
221
  done = true;
176
222
  break;
177
223
  }
@@ -212,31 +258,47 @@ async function runAgentLoop(client, config, session, prepareMessages) {
212
258
  };
213
259
  }
214
260
 
215
- const wrapUpContent = wrapUpResult.choices[0].message.content || '';
216
- // Store the wrap-up response (but NOT the temporary system note)
217
- session.messages.push({ role: 'assistant', content: wrapUpContent });
261
+ let wrapUpContent = wrapUpResult.choices[0].message.content || '';
262
+ let parsedWrapUp = null;
218
263
 
264
+ // Try JSON parse; if it fails, nudge retry (Layer 2)
219
265
  try {
220
- const parsed = JSON.parse(wrapUpContent);
221
- response = parsed.response || '';
222
- logSummary = parsed.logSummary || '';
266
+ parsedWrapUp = JSON.parse(wrapUpContent);
267
+ } catch {
268
+ try {
269
+ const nudgeMessages = [...wrapUpMessages, { role: 'user', content: FORMAT_NUDGE }];
270
+ const nudgeResult = await callModelWithFallback(client, config, nudgeMessages, []);
271
+ const nudgeContent = nudgeResult.choices[0]?.message?.content || '';
272
+ parsedWrapUp = JSON.parse(nudgeContent);
273
+ wrapUpContent = nudgeContent;
274
+ } catch {
275
+ // Layer 3: use raw text as best-effort response below
276
+ }
277
+ }
278
+
279
+ // Store the wrap-up response (but NOT the temporary system note)
280
+ session.messages.push({ role: 'assistant', content: wrapUpContent });
223
281
 
224
- if (parsed.checkpoint) {
282
+ if (parsedWrapUp) {
283
+ response = parsedWrapUp.response || '';
284
+ logSummary = parsedWrapUp.logSummary || '';
285
+ if (parsedWrapUp.checkpoint) {
225
286
  return {
226
287
  iteration,
227
288
  response,
228
289
  logSummary,
229
290
  status: 'checkpoint_reached',
230
291
  runToolCalls,
231
- checkpoint: parsed.checkpoint,
292
+ checkpoint: parsedWrapUp.checkpoint,
232
293
  };
233
294
  }
234
- } catch {
295
+ status = 'ok';
296
+ } else {
297
+ // Layer 3: use raw text — user gets a real response instead of an error
235
298
  response = wrapUpContent;
236
- logSummary = 'Wrap-up response was not valid JSON.';
299
+ logSummary = 'Wrap-up response was not valid JSON after retry.';
300
+ status = 'ok';
237
301
  }
238
-
239
- status = 'checkpoint_reached';
240
302
  }
241
303
 
242
304
  return { iteration, response, logSummary, status, runToolCalls, checkpoint: null };
@@ -303,6 +365,16 @@ export async function handleChat(config, requestSessionId, userMessage) {
303
365
  if (run.contextInfo) logEntry.contextInfo = run.contextInfo;
304
366
  if (run.rawResponse) logEntry.rawResponse = run.rawResponse;
305
367
  appendLog(sessionId, logEntry);
368
+
369
+ // Inject synthetic error note so the model has context on the next user turn
370
+ if (finalStatus === 'model_error' || finalStatus === 'format_error') {
371
+ const errorDetail = run.errorDetail ? ` Error detail: ${JSON.stringify(run.errorDetail)}` : '';
372
+ session.messages.push({
373
+ role: 'assistant',
374
+ content: `[System: Previous run failed (${finalStatus}): ${finalLogSummary}.${errorDetail}]`,
375
+ });
376
+ }
377
+
306
378
  break;
307
379
  }
308
380