chainlesschain 0.38.1 → 0.40.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -35,6 +35,10 @@ import {
35
35
  import { storeMemory, consolidateMemory } from "../lib/hierarchical-memory.js";
36
36
  import { CLIContextEngineering } from "../lib/cli-context-engineering.js";
37
37
  import { createChatFn } from "../lib/cowork-adapter.js";
38
+ import {
39
+ detectTaskType,
40
+ selectModelForTask,
41
+ } from "../lib/task-model-selector.js";
38
42
  import { executeHooks, HookEvents } from "../lib/hook-manager.js";
39
43
  import { CLIPermanentMemory } from "../lib/permanent-memory.js";
40
44
  import { CLIAutonomousAgent, GoalStatus } from "../lib/autonomous-agent.js";
@@ -565,6 +569,7 @@ async function chatWithTools(rawMessages, options) {
565
569
  dashscope: "https://dashscope.aliyuncs.com/compatible-mode/v1",
566
570
  mistral: "https://api.mistral.ai/v1",
567
571
  gemini: "https://generativelanguage.googleapis.com/v1beta/openai",
572
+ volcengine: "https://ark.cn-beijing.volces.com/api/v3",
568
573
  };
569
574
 
570
575
  const providerApiKeyEnvs = {
@@ -573,6 +578,7 @@ async function chatWithTools(rawMessages, options) {
573
578
  dashscope: "DASHSCOPE_API_KEY",
574
579
  mistral: "MISTRAL_API_KEY",
575
580
  gemini: "GEMINI_API_KEY",
581
+ volcengine: "VOLCENGINE_API_KEY",
576
582
  };
577
583
 
578
584
  const url =
@@ -582,7 +588,7 @@ async function chatWithTools(rawMessages, options) {
582
588
 
583
589
  if (!url) {
584
590
  throw new Error(
585
- `Unsupported provider: ${provider}. Supported: ollama, anthropic, openai, deepseek, dashscope, mistral, gemini`,
591
+ `Unsupported provider: ${provider}. Supported: ollama, anthropic, openai, deepseek, dashscope, mistral, gemini, volcengine`,
586
592
  );
587
593
  }
588
594
 
@@ -596,6 +602,7 @@ async function chatWithTools(rawMessages, options) {
596
602
  dashscope: "qwen-turbo",
597
603
  mistral: "mistral-large-latest",
598
604
  gemini: "gemini-2.0-flash",
605
+ volcengine: "doubao-seed-1-6-251015",
599
606
  };
600
607
 
601
608
  const response = await fetch(`${url}/chat/completions`, {
@@ -769,7 +776,7 @@ export async function startAgentRepl(options = {}) {
769
776
  let model = options.model || "qwen2:7b";
770
777
  let provider = options.provider || "ollama";
771
778
  const baseUrl = options.baseUrl || "http://localhost:11434";
772
- const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
779
+ const apiKey = options.apiKey || null;
773
780
 
774
781
  // Bootstrap runtime (best-effort, DB not required)
775
782
  let db = null;
@@ -974,6 +981,7 @@ export async function startAgentRepl(options = {}) {
974
981
  "dashscope",
975
982
  "mistral",
976
983
  "gemini",
984
+ "volcengine",
977
985
  ];
978
986
  if (supported.includes(arg)) {
979
987
  provider = arg;
@@ -987,7 +995,7 @@ export async function startAgentRepl(options = {}) {
987
995
  logger.info(`Current provider: ${chalk.cyan(provider)}`);
988
996
  logger.info(
989
997
  chalk.gray(
990
- "Available: ollama, anthropic, openai, deepseek, dashscope, mistral, gemini",
998
+ "Available: ollama, anthropic, openai, deepseek, dashscope, mistral, gemini, volcengine",
991
999
  ),
992
1000
  );
993
1001
  }
@@ -1556,11 +1564,24 @@ export async function startAgentRepl(options = {}) {
1556
1564
  // Add user message
1557
1565
  messages.push({ role: "user", content: trimmed });
1558
1566
 
1567
+ // Auto-select best model based on task type
1568
+ let activeModel = model;
1569
+ const taskDetection = detectTaskType(trimmed);
1570
+ if (taskDetection.confidence > 0.3) {
1571
+ const recommended = selectModelForTask(provider, taskDetection.taskType);
1572
+ if (recommended && recommended !== activeModel) {
1573
+ activeModel = recommended;
1574
+ logger.info(
1575
+ chalk.gray(`[auto] ${taskDetection.name} → ${activeModel}`),
1576
+ );
1577
+ }
1578
+ }
1579
+
1559
1580
  try {
1560
1581
  process.stdout.write("\n");
1561
1582
  const response = await agentLoop(messages, {
1562
1583
  provider,
1563
- model,
1584
+ model: activeModel,
1564
1585
  baseUrl,
1565
1586
  apiKey,
1566
1587
  contextEngine,
@@ -11,6 +11,7 @@
11
11
  import readline from "readline";
12
12
  import chalk from "chalk";
13
13
  import { logger } from "../lib/logger.js";
14
+ import { BUILT_IN_PROVIDERS } from "../lib/llm-providers.js";
14
15
 
15
16
  const SLASH_COMMANDS = {
16
17
  "/exit": "Exit the chat",
@@ -128,7 +129,7 @@ export async function startChatRepl(options = {}) {
128
129
  let model = options.model || "qwen2:7b";
129
130
  let provider = options.provider || "ollama";
130
131
  const baseUrl = options.baseUrl || "http://localhost:11434";
131
- const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
132
+ const apiKey = options.apiKey || null;
132
133
 
133
134
  const messages = [];
134
135
 
@@ -236,14 +237,21 @@ export async function startChatRepl(options = {}) {
236
237
 
237
238
  if (provider === "ollama") {
238
239
  response = await streamOllama(messages, model, baseUrl, onToken);
239
- } else if (provider === "openai") {
240
+ } else {
241
+ // OpenAI-compatible providers (openai, volcengine, deepseek, dashscope, mistral, gemini, anthropic-proxy)
242
+ const providerDef = BUILT_IN_PROVIDERS[provider];
240
243
  const url =
241
244
  baseUrl !== "http://localhost:11434"
242
245
  ? baseUrl
243
- : "https://api.openai.com/v1";
244
- response = await streamOpenAI(messages, model, url, apiKey, onToken);
245
- } else {
246
- throw new Error(`Unsupported provider: ${provider}`);
246
+ : providerDef?.baseUrl || "https://api.openai.com/v1";
247
+ const key =
248
+ apiKey ||
249
+ (providerDef?.apiKeyEnv ? process.env[providerDef.apiKeyEnv] : null);
250
+ if (!key)
251
+ throw new Error(
252
+ `API key required for ${provider} (set ${providerDef?.apiKeyEnv || "API key"})`,
253
+ );
254
+ response = await streamOpenAI(messages, model, url, key, onToken);
247
255
  }
248
256
 
249
257
  process.stdout.write("\n\n");