@ouro.bot/cli 0.1.0-alpha.50 → 0.1.0-alpha.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/changelog.json CHANGED
@@ -1,6 +1,24 @@
1
1
  {
2
2
  "_note": "This changelog is maintained as part of the PR/version-bump workflow. Agent-curated, not auto-generated. Agents read this file directly via read_file to understand what changed between versions.",
3
3
  "versions": [
4
+ {
5
+ "version": "0.1.0-alpha.52",
6
+ "changes": [
7
+ "Trusted 1:1 chats can now act live across active group threads: the harness resolves candidate target chats, carries explicit trust context into the model, and delivers messages into the right live BlueBubbles or Teams session instead of only queueing for later.",
8
+ "People discovered through a relevant live group are now bootstrapped as acquaintances with shared-group context, so the agent gets a socially truthful model of who is merely unknown versus who is known through the current group.",
9
+ "Cross-chat work now returns a truthful outcome back to the asking chat, and bridge suggestions can span different outward relationships when one live piece of work is clearly happening across them."
10
+ ]
11
+ },
12
+ {
13
+ "version": "0.1.0-alpha.51",
14
+ "changes": [
15
+ "Agents can now adjust their own reasoning depth mid-conversation via a new set_reasoning_effort tool, with effort levels derived from a central model capabilities registry.",
16
+ "Anthropic extended thinking is now enabled with adaptive effort, thinking blocks are captured during streaming (including signatures and redacted blocks), persisted on conversation history, and faithfully round-tripped across turns.",
17
+ "Anthropic max_tokens now uses the model's actual output ceiling from the registry instead of a hardcoded 4096, removing artificial response length constraints.",
18
+ "Codex assistant messages are now annotated with phase labels (commentary vs final_answer) so GPT-5.4 can distinguish intermediate reasoning from completed responses in its own history.",
19
+ "Azure and Codex reasoning effort is now dynamic from the agent loop instead of hardcoded to medium."
20
+ ]
21
+ },
4
22
  {
5
23
  "version": "0.1.0-alpha.50",
6
24
  "changes": [
@@ -5,6 +5,7 @@ exports.buildActiveWorkFrame = buildActiveWorkFrame;
5
5
  exports.formatActiveWorkFrame = formatActiveWorkFrame;
6
6
  const runtime_1 = require("../nerves/runtime");
7
7
  const state_machine_1 = require("./bridges/state-machine");
8
+ const target_resolution_1 = require("./target-resolution");
8
9
  function activityPriority(source) {
9
10
  return source === "friend-facing" ? 0 : 1;
10
11
  }
@@ -31,16 +32,28 @@ function hasSharedObligationPressure(input) {
31
32
  || summarizeLiveTasks(input.taskBoard).length > 0;
32
33
  }
33
34
  function suggestBridgeForActiveWork(input) {
34
- const candidateSessions = input.friendSessions
35
- .filter((session) => !input.currentSession
36
- || session.friendId !== input.currentSession.friendId
37
- || session.channel !== input.currentSession.channel
38
- || session.key !== input.currentSession.key)
39
- .sort(compareActivity);
40
- const targetSession = candidateSessions[0] ?? null;
41
- if (!targetSession || !hasSharedObligationPressure(input)) {
35
+ const targetCandidates = (input.targetCandidates ?? [])
36
+ .filter((candidate) => {
37
+ if (candidate.delivery.mode === "blocked") {
38
+ return false;
39
+ }
40
+ if (candidate.activitySource !== "friend-facing" || candidate.channel === "inner") {
41
+ return false;
42
+ }
43
+ if (!input.currentSession) {
44
+ return true;
45
+ }
46
+ return !(candidate.friendId === input.currentSession.friendId
47
+ && candidate.channel === input.currentSession.channel
48
+ && candidate.key === input.currentSession.key);
49
+ })
50
+ .sort((a, b) => {
51
+ return b.lastActivityMs - a.lastActivityMs;
52
+ });
53
+ if (!hasSharedObligationPressure(input) || targetCandidates.length !== 1) {
42
54
  return null;
43
55
  }
56
+ const targetSession = targetCandidates[0];
44
57
  const activeBridge = input.bridges.find(isActiveBridge) ?? null;
45
58
  if (activeBridge) {
46
59
  const alreadyAttached = activeBridge.attachedSessions.some((session) => session.friendId === targetSession.friendId
@@ -53,14 +66,14 @@ function suggestBridgeForActiveWork(input) {
53
66
  kind: "attach-existing",
54
67
  bridgeId: activeBridge.id,
55
68
  targetSession,
56
- reason: "same-friend-shared-work",
69
+ reason: "shared-work-candidate",
57
70
  };
58
71
  }
59
72
  return {
60
73
  kind: "begin-new",
61
74
  targetSession,
62
75
  objectiveHint: input.currentObligation?.trim() || "keep this shared work aligned",
63
- reason: "same-friend-shared-work",
76
+ reason: "shared-work-candidate",
64
77
  };
65
78
  }
66
79
  function formatSessionLabel(session) {
@@ -95,13 +108,14 @@ function buildActiveWorkFrame(input) {
95
108
  freshestForCurrentFriend: friendSessions[0] ?? null,
96
109
  otherLiveSessionsForCurrentFriend: friendSessions,
97
110
  },
111
+ targetCandidates: input.targetCandidates ?? [],
98
112
  bridgeSuggestion: suggestBridgeForActiveWork({
99
113
  currentSession: input.currentSession,
100
114
  currentObligation: input.currentObligation,
101
115
  mustResolveBeforeHandoff: input.mustResolveBeforeHandoff,
102
116
  bridges: input.bridges,
103
117
  taskBoard: input.taskBoard,
104
- friendSessions,
118
+ targetCandidates: input.targetCandidates,
105
119
  }),
106
120
  };
107
121
  (0, runtime_1.emitNervesEvent)({
@@ -144,6 +158,13 @@ function formatActiveWorkFrame(frame) {
144
158
  if (frame.friendActivity?.freshestForCurrentFriend) {
145
159
  lines.push(`freshest friend-facing session: ${formatSessionLabel(frame.friendActivity.freshestForCurrentFriend)}`);
146
160
  }
161
+ const targetCandidatesBlock = frame.targetCandidates && frame.targetCandidates.length > 0
162
+ ? (0, target_resolution_1.formatTargetSessionCandidates)(frame.targetCandidates)
163
+ : "";
164
+ if (targetCandidatesBlock) {
165
+ lines.push("");
166
+ lines.push(targetCandidatesBlock);
167
+ }
147
168
  if (frame.bridgeSuggestion) {
148
169
  if (frame.bridgeSuggestion.kind === "attach-existing") {
149
170
  lines.push(`suggested bridge: attach ${frame.bridgeSuggestion.bridgeId} -> ${formatSessionLabel(frame.bridgeSuggestion.targetSession)}`);
@@ -331,7 +331,12 @@ async function runAgent(messages, callbacks, channel, signal, options) {
331
331
  // so turn execution remains consistent and non-fatal.
332
332
  if (channel) {
333
333
  try {
334
- const refreshed = await (0, prompt_1.buildSystem)(channel, options, currentContext);
334
+ const buildSystemOptions = {
335
+ ...options,
336
+ providerCapabilities: providerRuntime.capabilities,
337
+ supportedReasoningEfforts: providerRuntime.supportedReasoningEfforts,
338
+ };
339
+ const refreshed = await (0, prompt_1.buildSystem)(channel, buildSystemOptions, currentContext);
335
340
  upsertSystemPrompt(messages, refreshed);
336
341
  }
337
342
  catch (error) {
@@ -362,13 +367,22 @@ async function runAgent(messages, callbacks, channel, signal, options) {
362
367
  let completion;
363
368
  let sawSteeringFollowUp = false;
364
369
  let mustResolveBeforeHandoffActive = options?.mustResolveBeforeHandoff === true;
370
+ let currentReasoningEffort = "medium";
365
371
  // Prevent MaxListenersExceeded warning — each iteration adds a listener
366
372
  try {
367
373
  require("events").setMaxListeners(50, signal);
368
374
  }
369
375
  catch { /* unsupported */ }
370
376
  const toolPreferences = currentContext?.friend?.toolPreferences;
371
- const baseTools = options?.tools ?? (0, tools_1.getToolsForChannel)(channel ? (0, channel_1.getChannelCapabilities)(channel) : undefined, toolPreferences && Object.keys(toolPreferences).length > 0 ? toolPreferences : undefined, currentContext);
377
+ const baseTools = options?.tools ?? (0, tools_1.getToolsForChannel)(channel ? (0, channel_1.getChannelCapabilities)(channel) : undefined, toolPreferences && Object.keys(toolPreferences).length > 0 ? toolPreferences : undefined, currentContext, providerRuntime.capabilities);
378
+ // Augment tool context with reasoning effort controls from provider
379
+ const augmentedToolContext = options?.toolContext
380
+ ? {
381
+ ...options.toolContext,
382
+ supportedReasoningEfforts: providerRuntime.supportedReasoningEfforts,
383
+ setReasoningEffort: (level) => { currentReasoningEffort = level; },
384
+ }
385
+ : undefined;
372
386
  // Rebase provider-owned turn state from canonical messages at user-turn start.
373
387
  // This prevents stale provider caches from replaying prior-turn context.
374
388
  providerRuntime.resetTurnState(messages);
@@ -412,6 +426,7 @@ async function runAgent(messages, callbacks, channel, signal, options) {
412
426
  signal,
413
427
  traceId,
414
428
  toolChoiceRequired,
429
+ reasoningEffort: currentReasoningEffort,
415
430
  });
416
431
  // Track usage from the latest API call
417
432
  if (result.usage)
@@ -435,6 +450,17 @@ async function runAgent(messages, callbacks, channel, signal, options) {
435
450
  if (reasoningItems.length > 0) {
436
451
  msg._reasoning_items = reasoningItems;
437
452
  }
453
+ // Store thinking blocks (Anthropic) on the assistant message for round-tripping
454
+ const thinkingItems = result.outputItems.filter((item) => "type" in item && (item.type === "thinking" || item.type === "redacted_thinking"));
455
+ if (thinkingItems.length > 0) {
456
+ msg._thinking_blocks = thinkingItems;
457
+ }
458
+ // Phase annotation for Codex provider
459
+ const hasPhaseAnnotation = providerRuntime.capabilities.has("phase-annotation");
460
+ const isSoleFinalAnswer = result.toolCalls.length === 1 && result.toolCalls[0].name === "final_answer";
461
+ if (hasPhaseAnnotation) {
462
+ msg.phase = isSoleFinalAnswer ? "final_answer" : "commentary";
463
+ }
438
464
  if (!result.toolCalls.length) {
439
465
  // No tool calls — accept response as-is.
440
466
  // (Kick detection disabled; tool_choice: required + final_answer
@@ -444,7 +470,6 @@ async function runAgent(messages, callbacks, channel, signal, options) {
444
470
  }
445
471
  else {
446
472
  // Check for final_answer sole call: intercept before tool execution
447
- const isSoleFinalAnswer = result.toolCalls.length === 1 && result.toolCalls[0].name === "final_answer";
448
473
  if (isSoleFinalAnswer) {
449
474
  // Extract answer from the tool call arguments.
450
475
  // Supports: {"answer":"text","intent":"..."} or "text" (JSON string).
@@ -536,7 +561,7 @@ async function runAgent(messages, callbacks, channel, signal, options) {
536
561
  let success;
537
562
  try {
538
563
  const execToolFn = options?.execTool ?? tools_1.execTool;
539
- toolResult = await execToolFn(tc.name, args, options?.toolContext);
564
+ toolResult = await execToolFn(tc.name, args, augmentedToolContext ?? options?.toolContext);
540
565
  success = true;
541
566
  }
542
567
  catch (e) {
@@ -0,0 +1,146 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.deliverCrossChatMessage = deliverCrossChatMessage;
4
+ const types_1 = require("../mind/friends/types");
5
+ const runtime_1 = require("../nerves/runtime");
6
+ function buildPendingEnvelope(request, agentName, now) {
7
+ return {
8
+ from: agentName,
9
+ friendId: request.friendId,
10
+ channel: request.channel,
11
+ key: request.key,
12
+ content: request.content,
13
+ timestamp: now,
14
+ };
15
+ }
16
+ function queueForLater(request, deps, detail) {
17
+ deps.queuePending(buildPendingEnvelope(request, deps.agentName, (deps.now ?? Date.now)()));
18
+ return {
19
+ status: "queued_for_later",
20
+ detail,
21
+ };
22
+ }
23
+ function isExplicitlyAuthorized(request) {
24
+ return request.intent === "explicit_cross_chat"
25
+ && Boolean(request.authorizingSession)
26
+ && (0, types_1.isTrustedLevel)(request.authorizingSession?.trustLevel);
27
+ }
28
+ async function deliverCrossChatMessage(request, deps) {
29
+ (0, runtime_1.emitNervesEvent)({
30
+ component: "engine",
31
+ event: "engine.cross_chat_delivery_start",
32
+ message: "resolving cross-chat delivery",
33
+ meta: {
34
+ friendId: request.friendId,
35
+ channel: request.channel,
36
+ key: request.key,
37
+ intent: request.intent,
38
+ authorizingTrustLevel: request.authorizingSession?.trustLevel ?? null,
39
+ },
40
+ });
41
+ if (request.intent === "generic_outreach") {
42
+ const result = queueForLater(request, deps, "generic outreach stays queued until the target session is next active");
43
+ (0, runtime_1.emitNervesEvent)({
44
+ component: "engine",
45
+ event: "engine.cross_chat_delivery_end",
46
+ message: "queued generic outreach for later delivery",
47
+ meta: {
48
+ friendId: request.friendId,
49
+ channel: request.channel,
50
+ key: request.key,
51
+ status: result.status,
52
+ },
53
+ });
54
+ return result;
55
+ }
56
+ if (!isExplicitlyAuthorized(request)) {
57
+ const result = {
58
+ status: "blocked",
59
+ detail: "explicit cross-chat delivery requires a trusted asking session",
60
+ };
61
+ (0, runtime_1.emitNervesEvent)({
62
+ level: "warn",
63
+ component: "engine",
64
+ event: "engine.cross_chat_delivery_end",
65
+ message: "blocked explicit cross-chat delivery",
66
+ meta: {
67
+ friendId: request.friendId,
68
+ channel: request.channel,
69
+ key: request.key,
70
+ status: result.status,
71
+ },
72
+ });
73
+ return result;
74
+ }
75
+ const deliverer = deps.deliverers?.[request.channel];
76
+ if (!deliverer) {
77
+ const result = queueForLater(request, deps, "live delivery unavailable right now; queued for the next active turn");
78
+ (0, runtime_1.emitNervesEvent)({
79
+ component: "engine",
80
+ event: "engine.cross_chat_delivery_end",
81
+ message: "queued explicit cross-chat delivery because no live deliverer was available",
82
+ meta: {
83
+ friendId: request.friendId,
84
+ channel: request.channel,
85
+ key: request.key,
86
+ status: result.status,
87
+ },
88
+ });
89
+ return result;
90
+ }
91
+ try {
92
+ const direct = await deliverer(request);
93
+ if (direct.status === "delivered_now" || direct.status === "blocked" || direct.status === "failed") {
94
+ const result = {
95
+ status: direct.status,
96
+ detail: direct.detail,
97
+ };
98
+ (0, runtime_1.emitNervesEvent)({
99
+ level: result.status === "failed" ? "error" : result.status === "blocked" ? "warn" : "info",
100
+ component: "engine",
101
+ event: "engine.cross_chat_delivery_end",
102
+ message: "completed direct cross-chat delivery resolution",
103
+ meta: {
104
+ friendId: request.friendId,
105
+ channel: request.channel,
106
+ key: request.key,
107
+ status: result.status,
108
+ },
109
+ });
110
+ return result;
111
+ }
112
+ const result = queueForLater(request, deps, direct.detail.trim() || "live delivery unavailable right now; queued for the next active turn");
113
+ (0, runtime_1.emitNervesEvent)({
114
+ component: "engine",
115
+ event: "engine.cross_chat_delivery_end",
116
+ message: "queued explicit cross-chat delivery after adapter reported unavailability",
117
+ meta: {
118
+ friendId: request.friendId,
119
+ channel: request.channel,
120
+ key: request.key,
121
+ status: result.status,
122
+ },
123
+ });
124
+ return result;
125
+ }
126
+ catch (error) {
127
+ const result = {
128
+ status: "failed",
129
+ detail: error instanceof Error ? error.message : String(error),
130
+ };
131
+ (0, runtime_1.emitNervesEvent)({
132
+ level: "error",
133
+ component: "engine",
134
+ event: "engine.cross_chat_delivery_end",
135
+ message: "cross-chat delivery threw unexpectedly",
136
+ meta: {
137
+ friendId: request.friendId,
138
+ channel: request.channel,
139
+ key: request.key,
140
+ status: result.status,
141
+ reason: result.detail,
142
+ },
143
+ });
144
+ return result;
145
+ }
146
+ }
@@ -0,0 +1,40 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.MODEL_CAPABILITIES = void 0;
4
+ exports.getModelCapabilities = getModelCapabilities;
5
+ const runtime_1 = require("../nerves/runtime");
6
+ exports.MODEL_CAPABILITIES = {
7
+ "claude-opus-4-6": {
8
+ reasoningEffort: ["low", "medium", "high", "max"],
9
+ thinkingFormat: "anthropic",
10
+ maxOutputTokens: 128000,
11
+ },
12
+ "claude-sonnet-4-6": {
13
+ reasoningEffort: ["low", "medium", "high"],
14
+ thinkingFormat: "anthropic",
15
+ maxOutputTokens: 64000,
16
+ },
17
+ "gpt-5.4": {
18
+ reasoningEffort: ["low", "medium", "high"],
19
+ phase: true,
20
+ maxOutputTokens: 100000,
21
+ },
22
+ "gpt-5.3-codex": {
23
+ reasoningEffort: ["low", "medium", "high"],
24
+ phase: true,
25
+ maxOutputTokens: 100000,
26
+ },
27
+ };
28
+ const EMPTY_CAPABILITIES = Object.freeze({});
29
+ function getModelCapabilities(modelId) {
30
+ (0, runtime_1.emitNervesEvent)({
31
+ component: "engine",
32
+ event: "engine.model_capabilities_lookup",
33
+ message: `model capabilities lookup: ${modelId}`,
34
+ meta: { modelId, found: modelId in exports.MODEL_CAPABILITIES },
35
+ });
36
+ const entry = exports.MODEL_CAPABILITIES[modelId];
37
+ if (entry)
38
+ return entry;
39
+ return { ...EMPTY_CAPABILITIES };
40
+ }
@@ -3,12 +3,14 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
3
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.toAnthropicMessages = toAnthropicMessages;
6
7
  exports.createAnthropicProviderRuntime = createAnthropicProviderRuntime;
7
8
  const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
8
9
  const config_1 = require("../config");
9
10
  const identity_1 = require("../identity");
10
11
  const runtime_1 = require("../../nerves/runtime");
11
12
  const streaming_1 = require("../streaming");
13
+ const model_capabilities_1 = require("../model-capabilities");
12
14
  const ANTHROPIC_SETUP_TOKEN_PREFIX = "sk-ant-oat01-";
13
15
  const ANTHROPIC_SETUP_TOKEN_MIN_LENGTH = 80;
14
16
  const ANTHROPIC_OAUTH_BETA_HEADER = "claude-code-20250219,oauth-2025-04-20,fine-grained-tool-streaming-2025-05-14,interleaved-thinking-2025-05-14";
@@ -93,6 +95,18 @@ function toAnthropicMessages(messages) {
93
95
  if (msg.role === "assistant") {
94
96
  const assistant = msg;
95
97
  const blocks = [];
98
+ // Restore thinking blocks before text/tool_use blocks
99
+ const thinkingBlocks = assistant._thinking_blocks;
100
+ if (thinkingBlocks) {
101
+ for (const tb of thinkingBlocks) {
102
+ if (tb.type === "thinking") {
103
+ blocks.push({ type: "thinking", thinking: tb.thinking, signature: tb.signature });
104
+ }
105
+ else {
106
+ blocks.push({ type: "redacted_thinking", data: tb.data });
107
+ }
108
+ }
109
+ }
96
110
  const text = toAnthropicTextContent(assistant.content);
97
111
  if (text) {
98
112
  blocks.push({ type: "text", text });
@@ -195,11 +209,14 @@ function withAnthropicAuthGuidance(error) {
195
209
  async function streamAnthropicMessages(client, model, request) {
196
210
  const { system, messages } = toAnthropicMessages(request.messages);
197
211
  const anthropicTools = toAnthropicTools(request.activeTools);
212
+ const modelCaps = (0, model_capabilities_1.getModelCapabilities)(model);
213
+ const maxTokens = modelCaps.maxOutputTokens ?? 16384;
198
214
  const params = {
199
215
  model,
200
- max_tokens: 4096,
216
+ max_tokens: maxTokens,
201
217
  messages,
202
218
  stream: true,
219
+ thinking: { type: "adaptive", effort: request.reasoningEffort ?? "medium" },
203
220
  };
204
221
  if (system)
205
222
  params.system = system;
@@ -219,6 +236,8 @@ async function streamAnthropicMessages(client, model, request) {
219
236
  let streamStarted = false;
220
237
  let usage;
221
238
  const toolCalls = new Map();
239
+ const thinkingBlocks = new Map();
240
+ const redactedBlocks = new Map();
222
241
  const answerStreamer = new streaming_1.FinalAnswerStreamer(request.callbacks);
223
242
  try {
224
243
  for await (const event of response) {
@@ -227,8 +246,14 @@ async function streamAnthropicMessages(client, model, request) {
227
246
  const eventType = String(event.type ?? "");
228
247
  if (eventType === "content_block_start") {
229
248
  const block = event.content_block;
230
- if (block?.type === "tool_use") {
231
- const index = Number(event.index);
249
+ const index = Number(event.index);
250
+ if (block?.type === "thinking") {
251
+ thinkingBlocks.set(index, { type: "thinking", thinking: "", signature: "" });
252
+ }
253
+ else if (block?.type === "redacted_thinking") {
254
+ redactedBlocks.set(index, { type: "redacted_thinking", data: String(block.data ?? "") });
255
+ }
256
+ else if (block?.type === "tool_use") {
232
257
  const rawInput = block.input;
233
258
  const input = rawInput && typeof rawInput === "object"
234
259
  ? JSON.stringify(rawInput)
@@ -265,7 +290,19 @@ async function streamAnthropicMessages(client, model, request) {
265
290
  request.callbacks.onModelStreamStart();
266
291
  streamStarted = true;
267
292
  }
268
- request.callbacks.onReasoningChunk(String(delta?.thinking ?? ""));
293
+ const thinkingText = String(delta?.thinking ?? "");
294
+ request.callbacks.onReasoningChunk(thinkingText);
295
+ const thinkingIndex = Number(event.index);
296
+ const thinkingBlock = thinkingBlocks.get(thinkingIndex);
297
+ if (thinkingBlock)
298
+ thinkingBlock.thinking += thinkingText;
299
+ continue;
300
+ }
301
+ if (deltaType === "signature_delta") {
302
+ const sigIndex = Number(event.index);
303
+ const sigBlock = thinkingBlocks.get(sigIndex);
304
+ if (sigBlock)
305
+ sigBlock.signature += String(delta?.signature ?? "");
269
306
  continue;
270
307
  }
271
308
  if (deltaType === "input_json_delta") {
@@ -301,10 +338,18 @@ async function streamAnthropicMessages(client, model, request) {
301
338
  catch (error) {
302
339
  throw withAnthropicAuthGuidance(error);
303
340
  }
341
+ // Collect all thinking blocks (regular + redacted) sorted by index to preserve ordering
342
+ const allThinkingIndices = [...thinkingBlocks.keys(), ...redactedBlocks.keys()].sort((a, b) => a - b);
343
+ const outputItems = allThinkingIndices.map((idx) => {
344
+ const tb = thinkingBlocks.get(idx);
345
+ if (tb)
346
+ return tb;
347
+ return redactedBlocks.get(idx);
348
+ });
304
349
  return {
305
350
  content,
306
351
  toolCalls: [...toolCalls.values()],
307
- outputItems: [],
352
+ outputItems,
308
353
  usage,
309
354
  finalAnswerStreamed: answerStreamer.streamed,
310
355
  };
@@ -320,6 +365,10 @@ function createAnthropicProviderRuntime() {
320
365
  if (!(anthropicConfig.model && anthropicConfig.setupToken)) {
321
366
  throw new Error(getAnthropicReauthGuidance("provider 'anthropic' is selected in agent.json but providers.anthropic.model/setupToken is incomplete in secrets.json."));
322
367
  }
368
+ const modelCaps = (0, model_capabilities_1.getModelCapabilities)(anthropicConfig.model);
369
+ const capabilities = new Set();
370
+ if (modelCaps.reasoningEffort)
371
+ capabilities.add("reasoning-effort");
323
372
  const credential = resolveAnthropicSetupTokenCredential();
324
373
  const client = new sdk_1.default({
325
374
  authToken: credential.token,
@@ -333,6 +382,8 @@ function createAnthropicProviderRuntime() {
333
382
  id: "anthropic",
334
383
  model: anthropicConfig.model,
335
384
  client,
385
+ capabilities,
386
+ supportedReasoningEfforts: modelCaps.reasoningEffort,
336
387
  resetTurnState(_messages) {
337
388
  // Anthropic request payload is derived from canonical messages each turn.
338
389
  },
@@ -5,6 +5,7 @@ const openai_1 = require("openai");
5
5
  const config_1 = require("../config");
6
6
  const runtime_1 = require("../../nerves/runtime");
7
7
  const streaming_1 = require("../streaming");
8
+ const model_capabilities_1 = require("../model-capabilities");
8
9
  function createAzureProviderRuntime() {
9
10
  (0, runtime_1.emitNervesEvent)({
10
11
  component: "engine",
@@ -16,6 +17,10 @@ function createAzureProviderRuntime() {
16
17
  if (!(azureConfig.apiKey && azureConfig.endpoint && azureConfig.deployment && azureConfig.modelName)) {
17
18
  throw new Error("provider 'azure' is selected in agent.json but providers.azure is incomplete in secrets.json.");
18
19
  }
20
+ const modelCaps = (0, model_capabilities_1.getModelCapabilities)(azureConfig.modelName);
21
+ const capabilities = new Set();
22
+ if (modelCaps.reasoningEffort)
23
+ capabilities.add("reasoning-effort");
19
24
  const client = new openai_1.AzureOpenAI({
20
25
  apiKey: azureConfig.apiKey,
21
26
  endpoint: azureConfig.endpoint.replace(/\/openai.*$/, ""),
@@ -30,6 +35,8 @@ function createAzureProviderRuntime() {
30
35
  id: "azure",
31
36
  model: azureConfig.modelName,
32
37
  client,
38
+ capabilities,
39
+ supportedReasoningEfforts: modelCaps.reasoningEffort,
33
40
  resetTurnState(messages) {
34
41
  const { instructions, input } = (0, streaming_1.toResponsesInput)(messages);
35
42
  nativeInput = input;
@@ -48,7 +55,7 @@ function createAzureProviderRuntime() {
48
55
  input: nativeInput,
49
56
  instructions: nativeInstructions,
50
57
  tools: (0, streaming_1.toResponsesTools)(request.activeTools),
51
- reasoning: { effort: "medium", summary: "detailed" },
58
+ reasoning: { effort: request.reasoningEffort ?? "medium", summary: "detailed" },
52
59
  stream: true,
53
60
  store: false,
54
61
  include: ["reasoning.encrypted_content"],
@@ -8,6 +8,7 @@ const openai_1 = __importDefault(require("openai"));
8
8
  const config_1 = require("../config");
9
9
  const runtime_1 = require("../../nerves/runtime");
10
10
  const streaming_1 = require("../streaming");
11
+ const model_capabilities_1 = require("../model-capabilities");
11
12
  function createMinimaxProviderRuntime() {
12
13
  (0, runtime_1.emitNervesEvent)({
13
14
  component: "engine",
@@ -19,6 +20,8 @@ function createMinimaxProviderRuntime() {
19
20
  if (!minimaxConfig.apiKey) {
20
21
  throw new Error("provider 'minimax' is selected in agent.json but providers.minimax.apiKey is missing in secrets.json.");
21
22
  }
23
+ // Registry consulted; MiniMax models return empty defaults (no capabilities to derive)
24
+ (0, model_capabilities_1.getModelCapabilities)(minimaxConfig.model);
22
25
  const client = new openai_1.default({
23
26
  apiKey: minimaxConfig.apiKey,
24
27
  baseURL: "https://api.minimaxi.chat/v1",
@@ -29,6 +32,7 @@ function createMinimaxProviderRuntime() {
29
32
  id: "minimax",
30
33
  model: minimaxConfig.model,
31
34
  client,
35
+ capabilities: new Set(),
32
36
  resetTurnState(_messages) {
33
37
  // No provider-owned turn state for chat-completions providers.
34
38
  },
@@ -9,6 +9,7 @@ const config_1 = require("../config");
9
9
  const identity_1 = require("../identity");
10
10
  const runtime_1 = require("../../nerves/runtime");
11
11
  const streaming_1 = require("../streaming");
12
+ const model_capabilities_1 = require("../model-capabilities");
12
13
  const OPENAI_CODEX_AUTH_FAILURE_MARKERS = [
13
14
  "authentication failed",
14
15
  "unauthorized",
@@ -106,6 +107,12 @@ function createOpenAICodexProviderRuntime() {
106
107
  if (!chatgptAccountId) {
107
108
  throw new Error(getOpenAICodexReauthGuidance("OpenAI Codex OAuth access token is missing a chatgpt_account_id claim required for chatgpt.com/backend-api/codex."));
108
109
  }
110
+ const modelCaps = (0, model_capabilities_1.getModelCapabilities)(codexConfig.model);
111
+ const capabilities = new Set();
112
+ if (modelCaps.reasoningEffort)
113
+ capabilities.add("reasoning-effort");
114
+ if (modelCaps.phase)
115
+ capabilities.add("phase-annotation");
109
116
  const client = new openai_1.default({
110
117
  apiKey: token,
111
118
  baseURL: OPENAI_CODEX_BACKEND_BASE_URL,
@@ -123,6 +130,8 @@ function createOpenAICodexProviderRuntime() {
123
130
  id: "openai-codex",
124
131
  model: codexConfig.model,
125
132
  client,
133
+ capabilities,
134
+ supportedReasoningEfforts: modelCaps.reasoningEffort,
126
135
  resetTurnState(messages) {
127
136
  const { instructions, input } = (0, streaming_1.toResponsesInput)(messages);
128
137
  nativeInput = input;
@@ -141,7 +150,7 @@ function createOpenAICodexProviderRuntime() {
141
150
  input: nativeInput,
142
151
  instructions: nativeInstructions,
143
152
  tools: (0, streaming_1.toResponsesTools)(request.activeTools),
144
- reasoning: { effort: "medium", summary: "detailed" },
153
+ reasoning: { effort: request.reasoningEffort ?? "medium", summary: "detailed" },
145
154
  stream: true,
146
155
  store: false,
147
156
  include: ["reasoning.encrypted_content"],
@@ -185,7 +185,10 @@ function toResponsesInput(messages) {
185
185
  }
186
186
  }
187
187
  if (a.content) {
188
- input.push({ role: "assistant", content: typeof a.content === "string" ? a.content : "" });
188
+ const assistantItem = { role: "assistant", content: typeof a.content === "string" ? a.content : "" };
189
+ if (a.phase)
190
+ assistantItem.phase = a.phase;
191
+ input.push(assistantItem);
189
192
  }
190
193
  if (a.tool_calls) {
191
194
  for (const tc of a.tool_calls) {