open-agents-ai 0.187.370 → 0.187.371
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +77 -11
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -515664,6 +515664,26 @@ ${todoItems}
|
|
|
515664
515664
|
</system-reminder>`;
|
|
515665
515665
|
return { shouldInject: true, content, reason: "injected" };
|
|
515666
515666
|
}
|
|
515667
|
+
function stripThinkBlocks(s2) {
|
|
515668
|
+
if (!s2)
|
|
515669
|
+
return s2;
|
|
515670
|
+
return s2.replace(/<think>[\s\S]*?<\/think>/g, "").trim();
|
|
515671
|
+
}
|
|
515672
|
+
function computeEffectiveThink(params) {
|
|
515673
|
+
if (process.env["OA_FORCE_NO_THINK"] === "1")
|
|
515674
|
+
return false;
|
|
515675
|
+
if (params.hasTools)
|
|
515676
|
+
return false;
|
|
515677
|
+
if (typeof params.requestThink === "boolean")
|
|
515678
|
+
return params.requestThink;
|
|
515679
|
+
if (process.env["OA_THINK_AUTO"] === "1" && Array.isArray(params.messages)) {
|
|
515680
|
+
const blob = params.messages.filter((m2) => m2.role === "user" || m2.role === "system").map((m2) => typeof m2.content === "string" ? m2.content : "").join("\n").toLowerCase();
|
|
515681
|
+
if (/\b(plan|decompose|analyze(?:\s+complex)?|step\s*by\s*step|reason through|think through)\b/.test(blob)) {
|
|
515682
|
+
return true;
|
|
515683
|
+
}
|
|
515684
|
+
}
|
|
515685
|
+
return params.defaultThink;
|
|
515686
|
+
}
|
|
515667
515687
|
var SYSTEM_PROMPT, SYSTEM_PROMPT_MEDIUM, SYSTEM_PROMPT_SMALL, VISUAL_TOOLS, AUDIO_TOOLS, SOCIAL_TOOLS, SPATIAL_TOOLS, CODE_TOOLS, AgenticRunner, OllamaAgenticBackend;
|
|
515668
515688
|
var init_agenticRunner = __esm({
|
|
515669
515689
|
"packages/orchestrator/dist/agenticRunner.js"() {
|
|
@@ -521116,7 +521136,7 @@ ${description}`
|
|
|
521116
521136
|
this.baseUrl = normalizeBaseUrl(baseUrl);
|
|
521117
521137
|
this.model = model;
|
|
521118
521138
|
this.apiKey = apiKey ?? "";
|
|
521119
|
-
this.thinking = thinking ??
|
|
521139
|
+
this.thinking = thinking ?? false;
|
|
521120
521140
|
this._isAnthropic = /api\.anthropic\.com/i.test(baseUrl);
|
|
521121
521141
|
}
|
|
521122
521142
|
/** Set multiple API keys for round-robin rotation per request */
|
|
@@ -521151,13 +521171,24 @@ ${description}`
|
|
|
521151
521171
|
if (this._isAnthropic) {
|
|
521152
521172
|
return this._anthropicChatCompletion(request);
|
|
521153
521173
|
}
|
|
521174
|
+
const cleanedMessages = request.messages.map((m2) => m2.role === "assistant" && typeof m2.content === "string" ? { ...m2, content: stripThinkBlocks(m2.content) } : m2);
|
|
521175
|
+
const effectiveThink = computeEffectiveThink({
|
|
521176
|
+
requestThink: request.think,
|
|
521177
|
+
defaultThink: this.thinking,
|
|
521178
|
+
hasTools: Array.isArray(request.tools) && request.tools.length > 0,
|
|
521179
|
+
messages: cleanedMessages
|
|
521180
|
+
});
|
|
521181
|
+
let effectiveMaxTokens = request.maxTokens;
|
|
521182
|
+
if (effectiveThink === true && (effectiveMaxTokens ?? 0) < 4096) {
|
|
521183
|
+
effectiveMaxTokens = 4096;
|
|
521184
|
+
}
|
|
521154
521185
|
const body = {
|
|
521155
521186
|
model: this.model,
|
|
521156
|
-
messages:
|
|
521187
|
+
messages: cleanedMessages,
|
|
521157
521188
|
tools: request.tools,
|
|
521158
521189
|
temperature: request.temperature,
|
|
521159
|
-
max_tokens:
|
|
521160
|
-
think:
|
|
521190
|
+
max_tokens: effectiveMaxTokens,
|
|
521191
|
+
think: effectiveThink
|
|
521161
521192
|
};
|
|
521162
521193
|
const fetchOpts = {
|
|
521163
521194
|
method: "POST",
|
|
@@ -521314,15 +521345,26 @@ ${description}`
|
|
|
521314
521345
|
* The existing chatCompletion() method is completely unmodified.
|
|
521315
521346
|
*/
|
|
521316
521347
|
async *chatCompletionStream(request) {
|
|
521348
|
+
const cleanedMessages = request.messages.map((m2) => m2.role === "assistant" && typeof m2.content === "string" ? { ...m2, content: stripThinkBlocks(m2.content) } : m2);
|
|
521349
|
+
const effectiveThink = computeEffectiveThink({
|
|
521350
|
+
requestThink: request.think,
|
|
521351
|
+
defaultThink: this.thinking,
|
|
521352
|
+
hasTools: Array.isArray(request.tools) && request.tools.length > 0,
|
|
521353
|
+
messages: cleanedMessages
|
|
521354
|
+
});
|
|
521355
|
+
let effectiveMaxTokens = request.maxTokens;
|
|
521356
|
+
if (effectiveThink === true && (effectiveMaxTokens ?? 0) < 4096) {
|
|
521357
|
+
effectiveMaxTokens = 4096;
|
|
521358
|
+
}
|
|
521317
521359
|
const body = {
|
|
521318
521360
|
model: this.model,
|
|
521319
|
-
messages:
|
|
521361
|
+
messages: cleanedMessages,
|
|
521320
521362
|
tools: request.tools,
|
|
521321
521363
|
temperature: request.temperature,
|
|
521322
|
-
max_tokens:
|
|
521364
|
+
max_tokens: effectiveMaxTokens,
|
|
521323
521365
|
stream: true,
|
|
521324
521366
|
stream_options: { include_usage: true },
|
|
521325
|
-
think:
|
|
521367
|
+
think: effectiveThink
|
|
521326
521368
|
};
|
|
521327
521369
|
const streamFetchOpts = {
|
|
521328
521370
|
method: "POST",
|
|
@@ -546123,12 +546165,36 @@ Clone a new voice: /voice clone <wav-file> [name]`);
|
|
|
546123
546165
|
return "handled";
|
|
546124
546166
|
}
|
|
546125
546167
|
case "think": {
|
|
546126
|
-
const
|
|
546168
|
+
const token = (arg || "").trim().toLowerCase();
|
|
546169
|
+
const desc = (s2) => s2 ? "🧠 models that support reasoning (Qwen3, DeepSeek-R1, etc.) will show their thinking chain; tool calls still run direct" : "⚡ direct-answer mode (reasoning suppressed); recommended for tool-heavy workflows";
|
|
546170
|
+
if (token === "status" || token === "?") {
|
|
546171
|
+
const cur = ctx3.config.thinking ?? false;
|
|
546172
|
+
renderInfo2(`Thinking mode: ${cur ? "on" : "off"} — ${desc(cur)}`);
|
|
546173
|
+
if (process.env["OA_THINK_AUTO"] === "1") renderInfo2("Auto-heuristic active (OA_THINK_AUTO=1)");
|
|
546174
|
+
if (process.env["OA_FORCE_NO_THINK"] === "1") renderWarning2("OA_FORCE_NO_THINK=1 forces off regardless of /think setting");
|
|
546175
|
+
return "handled";
|
|
546176
|
+
}
|
|
546177
|
+
if (token === "auto") {
|
|
546178
|
+
process.env["OA_THINK_AUTO"] = "1";
|
|
546179
|
+
renderInfo2("Thinking auto-heuristic enabled: /think flips on when user message contains plan/decompose/analyze/step-by-step/reason-through. Tool calls still force off. Unset with OA_THINK_AUTO=0.");
|
|
546180
|
+
return "handled";
|
|
546181
|
+
}
|
|
546182
|
+
let isOn;
|
|
546183
|
+
if (token === "on" || token === "true" || token === "yes" || token === "1") {
|
|
546184
|
+
isOn = true;
|
|
546185
|
+
ctx3.config.thinking = true;
|
|
546186
|
+
} else if (token === "off" || token === "false" || token === "no" || token === "0") {
|
|
546187
|
+
isOn = false;
|
|
546188
|
+
ctx3.config.thinking = false;
|
|
546189
|
+
} else {
|
|
546190
|
+
isOn = ctx3.thinkToggle();
|
|
546191
|
+
}
|
|
546127
546192
|
const save2 = hasLocal ? ctx3.saveLocalSettings.bind(ctx3) : ctx3.saveSettings.bind(ctx3);
|
|
546128
546193
|
save2({ thinking: isOn });
|
|
546129
|
-
renderInfo2(
|
|
546130
|
-
|
|
546131
|
-
|
|
546194
|
+
renderInfo2(`Thinking mode: ${isOn ? "on" : "off"}${hasLocal ? " (project-local)" : ""} — ${desc(isOn)}`);
|
|
546195
|
+
if (isOn) {
|
|
546196
|
+
renderInfo2("Note: max_tokens will auto-raise to ≥4096 per request to prevent <think> truncation.");
|
|
546197
|
+
}
|
|
546132
546198
|
return "handled";
|
|
546133
546199
|
}
|
|
546134
546200
|
case "tools": {
|
package/package.json
CHANGED