claudish 3.3.9 → 3.3.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +142 -83
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -56301,31 +56301,51 @@ var init_remote_provider_types = __esm(() => {
56301
56301
  "gemini-3.0-flash": { inputCostPer1M: 0.1, outputCostPer1M: 0.4 },
56302
56302
  "gemini-2.0-flash": { inputCostPer1M: 0.1, outputCostPer1M: 0.4 },
56303
56303
  "gemini-2.0-flash-thinking": { inputCostPer1M: 0.1, outputCostPer1M: 0.4 },
56304
- default: { inputCostPer1M: 0.5, outputCostPer1M: 2 }
56304
+ default: { inputCostPer1M: 0.5, outputCostPer1M: 2, isEstimate: true }
56305
56305
  };
56306
56306
  OPENAI_PRICING = {
56307
- "gpt-5": { inputCostPer1M: 2, outputCostPer1M: 8 },
56308
- "gpt-5.2": { inputCostPer1M: 2.5, outputCostPer1M: 10 },
56309
- "gpt-5-turbo": { inputCostPer1M: 1.5, outputCostPer1M: 6 },
56310
- "gpt-5.1-codex": { inputCostPer1M: 3, outputCostPer1M: 12 },
56307
+ "gpt-5.2": { inputCostPer1M: 1.75, outputCostPer1M: 14 },
56308
+ "gpt-5.2-codex": { inputCostPer1M: 1.75, outputCostPer1M: 14 },
56309
+ "gpt-5.2-chat-latest": { inputCostPer1M: 1.75, outputCostPer1M: 14 },
56310
+ "gpt-5.2-pro": { inputCostPer1M: 21, outputCostPer1M: 168 },
56311
+ "gpt-5.1": { inputCostPer1M: 1.25, outputCostPer1M: 10 },
56312
+ "gpt-5.1-codex": { inputCostPer1M: 1.25, outputCostPer1M: 10 },
56313
+ "gpt-5.1-codex-max": { inputCostPer1M: 1.25, outputCostPer1M: 10 },
56314
+ "gpt-5.1-codex-mini": { inputCostPer1M: 0.25, outputCostPer1M: 2 },
56315
+ "gpt-5.1-chat-latest": { inputCostPer1M: 1.25, outputCostPer1M: 10 },
56316
+ "gpt-5": { inputCostPer1M: 1.25, outputCostPer1M: 10 },
56317
+ "gpt-5-codex": { inputCostPer1M: 1.25, outputCostPer1M: 10 },
56318
+ "gpt-5-chat-latest": { inputCostPer1M: 1.25, outputCostPer1M: 10 },
56319
+ "gpt-5-mini": { inputCostPer1M: 0.25, outputCostPer1M: 2 },
56320
+ "gpt-5-nano": { inputCostPer1M: 0.05, outputCostPer1M: 0.4 },
56321
+ "gpt-5-pro": { inputCostPer1M: 15, outputCostPer1M: 120 },
56322
+ "gpt-4.1": { inputCostPer1M: 2, outputCostPer1M: 8 },
56323
+ "gpt-4.1-mini": { inputCostPer1M: 0.4, outputCostPer1M: 1.6 },
56324
+ "gpt-4.1-nano": { inputCostPer1M: 0.1, outputCostPer1M: 0.4 },
56311
56325
  "gpt-4o": { inputCostPer1M: 2.5, outputCostPer1M: 10 },
56326
+ "gpt-4o-2024-05-13": { inputCostPer1M: 5, outputCostPer1M: 15 },
56312
56327
  "gpt-4o-mini": { inputCostPer1M: 0.15, outputCostPer1M: 0.6 },
56313
56328
  "gpt-4o-audio": { inputCostPer1M: 2.5, outputCostPer1M: 10 },
56314
56329
  o1: { inputCostPer1M: 15, outputCostPer1M: 60 },
56315
- "o1-mini": { inputCostPer1M: 3, outputCostPer1M: 12 },
56316
- "o1-preview": { inputCostPer1M: 15, outputCostPer1M: 60 },
56317
- o3: { inputCostPer1M: 15, outputCostPer1M: 60 },
56318
- "o3-mini": { inputCostPer1M: 3, outputCostPer1M: 12 },
56330
+ "o1-mini": { inputCostPer1M: 1.1, outputCostPer1M: 4.4 },
56331
+ "o1-pro": { inputCostPer1M: 150, outputCostPer1M: 600 },
56332
+ o3: { inputCostPer1M: 2, outputCostPer1M: 8 },
56333
+ "o3-mini": { inputCostPer1M: 1.1, outputCostPer1M: 4.4 },
56334
+ "o3-pro": { inputCostPer1M: 20, outputCostPer1M: 80 },
56335
+ "o3-deep-research": { inputCostPer1M: 10, outputCostPer1M: 40 },
56336
+ "o4-mini": { inputCostPer1M: 1.1, outputCostPer1M: 4.4 },
56337
+ "o4-mini-deep-research": { inputCostPer1M: 2, outputCostPer1M: 8 },
56319
56338
  "gpt-4-turbo": { inputCostPer1M: 10, outputCostPer1M: 30 },
56320
56339
  "gpt-4-turbo-preview": { inputCostPer1M: 10, outputCostPer1M: 30 },
56321
56340
  "gpt-4": { inputCostPer1M: 30, outputCostPer1M: 60 },
56322
56341
  "gpt-3.5-turbo": { inputCostPer1M: 0.5, outputCostPer1M: 1.5 },
56323
- default: { inputCostPer1M: 2, outputCostPer1M: 8 }
56342
+ "codex-mini-latest": { inputCostPer1M: 1.5, outputCostPer1M: 6 },
56343
+ default: { inputCostPer1M: 2, outputCostPer1M: 8, isEstimate: true }
56324
56344
  };
56325
56345
  MINIMAX_PRICING = {
56326
56346
  "minimax-m2.1": { inputCostPer1M: 0.12, outputCostPer1M: 0.48 },
56327
56347
  "minimax-m2": { inputCostPer1M: 0.12, outputCostPer1M: 0.48 },
56328
- default: { inputCostPer1M: 0.12, outputCostPer1M: 0.48 }
56348
+ default: { inputCostPer1M: 0.12, outputCostPer1M: 0.48, isEstimate: true }
56329
56349
  };
56330
56350
  KIMI_PRICING = {
56331
56351
  "kimi-k2-thinking-turbo": { inputCostPer1M: 0.32, outputCostPer1M: 0.48 },
@@ -56333,13 +56353,13 @@ var init_remote_provider_types = __esm(() => {
56333
56353
  "kimi-k2-turbo-preview": { inputCostPer1M: 0.2, outputCostPer1M: 0.4 },
56334
56354
  "kimi-k2-0905-preview": { inputCostPer1M: 0.2, outputCostPer1M: 0.4 },
56335
56355
  "kimi-k2": { inputCostPer1M: 0.2, outputCostPer1M: 0.4 },
56336
- default: { inputCostPer1M: 0.32, outputCostPer1M: 0.48 }
56356
+ default: { inputCostPer1M: 0.32, outputCostPer1M: 0.48, isEstimate: true }
56337
56357
  };
56338
56358
  GLM_PRICING = {
56339
56359
  "glm-4.7": { inputCostPer1M: 0.16, outputCostPer1M: 0.8 },
56340
56360
  "glm-4": { inputCostPer1M: 0.16, outputCostPer1M: 0.8 },
56341
56361
  "glm-4-plus": { inputCostPer1M: 0.5, outputCostPer1M: 2 },
56342
- default: { inputCostPer1M: 0.16, outputCostPer1M: 0.8 }
56362
+ default: { inputCostPer1M: 0.16, outputCostPer1M: 0.8, isEstimate: true }
56343
56363
  };
56344
56364
  });
56345
56365
 
@@ -56980,7 +57000,7 @@ class OpenAIHandler {
56980
57000
  }
56981
57001
  return `${this.provider.baseUrl}${this.provider.apiPath}`;
56982
57002
  }
56983
- writeTokenFile(input, output) {
57003
+ writeTokenFile(input, output, isEstimate) {
56984
57004
  try {
56985
57005
  const total = input + output;
56986
57006
  const leftPct = this.contextWindow > 0 ? Math.max(0, Math.min(100, Math.round((this.contextWindow - total) / this.contextWindow * 100))) : 100;
@@ -56993,6 +57013,9 @@ class OpenAIHandler {
56993
57013
  context_left_percent: leftPct,
56994
57014
  updated_at: Date.now()
56995
57015
  };
57016
+ if (isEstimate) {
57017
+ data.cost_is_estimate = true;
57018
+ }
56996
57019
  const claudishDir = join9(homedir5(), ".claudish");
56997
57020
  mkdirSync6(claudishDir, { recursive: true });
56998
57021
  writeFileSync8(join9(claudishDir, `tokens-${this.port}.json`), JSON.stringify(data), "utf-8");
@@ -57001,13 +57024,23 @@ class OpenAIHandler {
57001
57024
  }
57002
57025
  }
57003
57026
  updateTokenTracking(inputTokens, outputTokens) {
57004
- const incrementalInputTokens = Math.max(0, inputTokens - this.sessionInputTokens);
57005
- this.sessionInputTokens = inputTokens;
57027
+ let incrementalInputTokens;
57028
+ if (inputTokens >= this.sessionInputTokens) {
57029
+ incrementalInputTokens = inputTokens - this.sessionInputTokens;
57030
+ this.sessionInputTokens = inputTokens;
57031
+ } else if (inputTokens < this.sessionInputTokens * 0.5) {
57032
+ incrementalInputTokens = inputTokens;
57033
+ log(`[OpenAIHandler] Token tracking: detected concurrent conversation (${inputTokens} < ${this.sessionInputTokens}), charging full input`);
57034
+ } else {
57035
+ incrementalInputTokens = inputTokens;
57036
+ this.sessionInputTokens = inputTokens;
57037
+ log(`[OpenAIHandler] Token tracking: ambiguous token decrease (${inputTokens} vs ${this.sessionInputTokens}), charging full input`);
57038
+ }
57006
57039
  this.sessionOutputTokens += outputTokens;
57007
57040
  const pricing = this.getPricing();
57008
57041
  const cost = incrementalInputTokens / 1e6 * pricing.inputCostPer1M + outputTokens / 1e6 * pricing.outputCostPer1M;
57009
57042
  this.sessionTotalCost += cost;
57010
- this.writeTokenFile(inputTokens, this.sessionOutputTokens);
57043
+ this.writeTokenFile(Math.max(inputTokens, this.sessionInputTokens), this.sessionOutputTokens, pricing.isEstimate);
57011
57044
  }
57012
57045
  convertMessages(claudeRequest) {
57013
57046
  return convertMessagesToOpenAI(claudeRequest, `openai/${this.modelName}`, filterIdentity);
@@ -57158,7 +57191,7 @@ class OpenAIHandler {
57158
57191
  payload.instructions = claudeRequest.system;
57159
57192
  }
57160
57193
  if (claudeRequest.max_tokens) {
57161
- payload.max_output_tokens = claudeRequest.max_tokens;
57194
+ payload.max_output_tokens = Math.max(16, claudeRequest.max_tokens);
57162
57195
  }
57163
57196
  if (tools.length > 0) {
57164
57197
  payload.tools = tools.map((tool) => {
@@ -57188,10 +57221,22 @@ class OpenAIHandler {
57188
57221
  let outputTokens = 0;
57189
57222
  let hasTextContent = false;
57190
57223
  let hasToolUse = false;
57224
+ let lastActivity = Date.now();
57225
+ let pingInterval = null;
57226
+ let isClosed = false;
57191
57227
  const functionCalls = new Map;
57192
57228
  const stream = new ReadableStream({
57193
57229
  start: async (controller) => {
57194
- const messageStart = {
57230
+ const send = (event, data) => {
57231
+ if (!isClosed) {
57232
+ controller.enqueue(encoder.encode(`event: ${event}
57233
+ data: ${JSON.stringify(data)}
57234
+
57235
+ `));
57236
+ }
57237
+ };
57238
+ log(`[OpenAIHandler] Sending message_start with placeholder tokens`);
57239
+ send("message_start", {
57195
57240
  type: "message_start",
57196
57241
  message: {
57197
57242
  id: `msg_${Date.now()}`,
@@ -57203,16 +57248,19 @@ class OpenAIHandler {
57203
57248
  stop_sequence: null,
57204
57249
  usage: { input_tokens: 100, output_tokens: 1 }
57205
57250
  }
57206
- };
57207
- controller.enqueue(encoder.encode(`event: message_start
57208
- data: ${JSON.stringify(messageStart)}
57209
-
57210
- `));
57251
+ });
57252
+ send("ping", { type: "ping" });
57253
+ pingInterval = setInterval(() => {
57254
+ if (!isClosed && Date.now() - lastActivity > 1000) {
57255
+ send("ping", { type: "ping" });
57256
+ }
57257
+ }, 1000);
57211
57258
  try {
57212
57259
  while (true) {
57213
57260
  const { done, value } = await reader.read();
57214
57261
  if (done)
57215
57262
  break;
57263
+ lastActivity = Date.now();
57216
57264
  buffer += decoder.decode(value, { stream: true });
57217
57265
  const lines = buffer.split(`
57218
57266
  `);
@@ -57236,86 +57284,99 @@ data: ${JSON.stringify(messageStart)}
57236
57284
  }
57237
57285
  if (event.type === "response.output_text.delta") {
57238
57286
  if (!hasTextContent) {
57239
- const blockStart = {
57287
+ send("content_block_start", {
57240
57288
  type: "content_block_start",
57241
57289
  index: blockIndex,
57242
57290
  content_block: { type: "text", text: "" }
57243
- };
57244
- controller.enqueue(encoder.encode(`event: content_block_start
57245
- data: ${JSON.stringify(blockStart)}
57246
-
57247
- `));
57291
+ });
57248
57292
  hasTextContent = true;
57249
57293
  }
57250
- const delta = {
57294
+ send("content_block_delta", {
57251
57295
  type: "content_block_delta",
57252
57296
  index: blockIndex,
57253
57297
  delta: { type: "text_delta", text: event.delta || "" }
57254
- };
57255
- controller.enqueue(encoder.encode(`event: content_block_delta
57256
- data: ${JSON.stringify(delta)}
57257
-
57258
- `));
57298
+ });
57259
57299
  } else if (event.type === "response.output_item.added") {
57300
+ if (getLogLevel() === "debug" && event.item?.type) {
57301
+ log(`[OpenAIHandler] Output item added: type=${event.item.type}, id=${event.item.id || event.item.call_id || "unknown"}`);
57302
+ }
57260
57303
  if (event.item?.type === "function_call") {
57261
- const callId = event.item.call_id || event.item.id;
57304
+ const itemId = event.item.id;
57305
+ const openaiCallId = event.item.call_id || itemId;
57306
+ const callId = openaiCallId.startsWith("toolu_") ? openaiCallId : `toolu_${openaiCallId.replace(/^fc_/, "")}`;
57307
+ const fnName = event.item.name || "";
57262
57308
  const fnIndex = blockIndex + functionCalls.size + (hasTextContent ? 1 : 0);
57263
- functionCalls.set(callId, {
57264
- name: event.item.name || "",
57309
+ log(`[OpenAIHandler] Function call: itemId=${itemId}, openaiCallId=${openaiCallId}, claudeId=${callId}, name=${fnName}, index=${fnIndex}`);
57310
+ const fnCallData = {
57311
+ name: fnName,
57265
57312
  arguments: "",
57266
- index: fnIndex
57267
- });
57313
+ index: fnIndex,
57314
+ claudeId: callId
57315
+ };
57316
+ functionCalls.set(openaiCallId, fnCallData);
57317
+ if (itemId && itemId !== openaiCallId) {
57318
+ functionCalls.set(itemId, fnCallData);
57319
+ }
57268
57320
  if (hasTextContent && !hasToolUse) {
57269
- const blockStop = { type: "content_block_stop", index: blockIndex };
57270
- controller.enqueue(encoder.encode(`event: content_block_stop
57271
- data: ${JSON.stringify(blockStop)}
57272
-
57273
- `));
57321
+ send("content_block_stop", { type: "content_block_stop", index: blockIndex });
57274
57322
  blockIndex++;
57275
57323
  }
57276
- const toolStart = {
57324
+ send("content_block_start", {
57277
57325
  type: "content_block_start",
57278
57326
  index: fnIndex,
57279
57327
  content_block: {
57280
57328
  type: "tool_use",
57281
57329
  id: callId,
57282
- name: event.item.name || "",
57330
+ name: fnName,
57283
57331
  input: {}
57284
57332
  }
57285
- };
57286
- controller.enqueue(encoder.encode(`event: content_block_start
57287
- data: ${JSON.stringify(toolStart)}
57288
-
57289
- `));
57333
+ });
57290
57334
  hasToolUse = true;
57335
+ } else if (event.item?.type === "reasoning") {
57336
+ log(`[OpenAIHandler] Reasoning block started`);
57337
+ }
57338
+ } else if (event.type === "response.reasoning_summary_text.delta") {
57339
+ if (!hasTextContent) {
57340
+ send("content_block_start", {
57341
+ type: "content_block_start",
57342
+ index: blockIndex,
57343
+ content_block: { type: "text", text: "" }
57344
+ });
57345
+ hasTextContent = true;
57291
57346
  }
57347
+ send("content_block_delta", {
57348
+ type: "content_block_delta",
57349
+ index: blockIndex,
57350
+ delta: { type: "text_delta", text: event.delta || "" }
57351
+ });
57292
57352
  } else if (event.type === "response.function_call_arguments.delta") {
57293
57353
  const callId = event.call_id || event.item_id;
57354
+ if (getLogLevel() === "debug" && !functionCalls.has(callId)) {
57355
+ log(`[OpenAIHandler] Argument delta lookup failed: callId=${callId}, stored keys=[${Array.from(functionCalls.keys()).join(", ")}]`);
57356
+ }
57294
57357
  const fnCall = functionCalls.get(callId);
57295
57358
  if (fnCall) {
57296
57359
  fnCall.arguments += event.delta || "";
57297
- const delta = {
57360
+ send("content_block_delta", {
57298
57361
  type: "content_block_delta",
57299
57362
  index: fnCall.index,
57300
57363
  delta: { type: "input_json_delta", partial_json: event.delta || "" }
57301
- };
57302
- controller.enqueue(encoder.encode(`event: content_block_delta
57303
- data: ${JSON.stringify(delta)}
57304
-
57305
- `));
57364
+ });
57306
57365
  }
57307
57366
  } else if (event.type === "response.output_item.done") {
57308
57367
  if (event.item?.type === "function_call") {
57309
57368
  const callId = event.item.call_id || event.item.id;
57310
- const fnCall = functionCalls.get(callId);
57369
+ const fnCall = functionCalls.get(callId) || functionCalls.get(event.item.id);
57311
57370
  if (fnCall) {
57312
- const blockStop = { type: "content_block_stop", index: fnCall.index };
57313
- controller.enqueue(encoder.encode(`event: content_block_stop
57314
- data: ${JSON.stringify(blockStop)}
57315
-
57316
- `));
57371
+ send("content_block_stop", { type: "content_block_stop", index: fnCall.index });
57317
57372
  }
57318
57373
  }
57374
+ } else if (event.type === "response.incomplete") {
57375
+ log(`[OpenAIHandler] Response incomplete: ${event.reason || "unknown reason"}`);
57376
+ if (event.response?.usage) {
57377
+ inputTokens = event.response.usage.input_tokens || inputTokens;
57378
+ outputTokens = event.response.usage.output_tokens || outputTokens;
57379
+ }
57319
57380
  } else if (event.type === "response.completed" || event.type === "response.done") {
57320
57381
  if (event.response?.usage) {
57321
57382
  inputTokens = event.response.usage.input_tokens || 0;
@@ -57332,31 +57393,29 @@ data: ${JSON.stringify(blockStop)}
57332
57393
  }
57333
57394
  }
57334
57395
  }
57335
- if (hasTextContent && !hasToolUse) {
57336
- const blockStop = { type: "content_block_stop", index: blockIndex };
57337
- controller.enqueue(encoder.encode(`event: content_block_stop
57338
- data: ${JSON.stringify(blockStop)}
57339
-
57340
- `));
57396
+ if (pingInterval) {
57397
+ clearInterval(pingInterval);
57398
+ pingInterval = null;
57399
+ }
57400
+ if (hasTextContent) {
57401
+ send("content_block_stop", { type: "content_block_stop", index: blockIndex });
57341
57402
  }
57342
57403
  const stopReason = hasToolUse ? "tool_use" : "end_turn";
57343
- const messageDelta = {
57404
+ send("message_delta", {
57344
57405
  type: "message_delta",
57345
57406
  delta: { stop_reason: stopReason, stop_sequence: null },
57346
57407
  usage: { input_tokens: inputTokens, output_tokens: outputTokens }
57347
- };
57348
- controller.enqueue(encoder.encode(`event: message_delta
57349
- data: ${JSON.stringify(messageDelta)}
57350
-
57351
- `));
57352
- const messageStop = { type: "message_stop" };
57353
- controller.enqueue(encoder.encode(`event: message_stop
57354
- data: ${JSON.stringify(messageStop)}
57355
-
57356
- `));
57408
+ });
57409
+ send("message_stop", { type: "message_stop" });
57410
+ isClosed = true;
57357
57411
  this.updateTokenTracking(inputTokens, outputTokens);
57358
57412
  controller.close();
57359
57413
  } catch (error46) {
57414
+ if (pingInterval) {
57415
+ clearInterval(pingInterval);
57416
+ pingInterval = null;
57417
+ }
57418
+ isClosed = true;
57360
57419
  log(`[OpenAIHandler] Responses streaming error: ${error46}`);
57361
57420
  controller.error(error46);
57362
57421
  }
@@ -58202,7 +58261,7 @@ function createTempSettingsFile(modelDisplay, port) {
58202
58261
  const DIM2 = "\\033[2m";
58203
58262
  const RESET2 = "\\033[0m";
58204
58263
  const BOLD2 = "\\033[1m";
58205
- statusCommand = `JSON=$(cat) && DIR=$(basename "$(pwd)") && [ \${#DIR} -gt 15 ] && DIR="\${DIR:0:12}..." || true && CTX=100 && COST="0" && if [ -f "${tokenFilePath}" ]; then TOKENS=$(cat "${tokenFilePath}" 2>/dev/null) && REAL_CTX=$(echo "$TOKENS" | grep -o '"context_left_percent":[0-9]*' | grep -o '[0-9]*') && if [ ! -z "$REAL_CTX" ]; then CTX="$REAL_CTX"; fi; fi && COST=$(echo "$JSON" | grep -o '"total_cost_usd":[0-9.]*' | cut -d: -f2) && [ -z "$COST" ] && COST="0" || true && if [ "$CLAUDISH_IS_LOCAL" = "true" ]; then COST_DISPLAY="LOCAL"; else COST_DISPLAY=$(printf "\\$%.3f" "$COST"); fi && printf "${CYAN2}${BOLD2}%s${RESET2} ${DIM2}•${RESET2} ${YELLOW2}%s${RESET2} ${DIM2}•${RESET2} ${GREEN2}%s${RESET2} ${DIM2}•${RESET2} ${MAGENTA}%s%%${RESET2}\\n" "$DIR" "$CLAUDISH_ACTIVE_MODEL_NAME" "$COST_DISPLAY" "$CTX"`;
58264
+ statusCommand = `JSON=$(cat) && DIR=$(basename "$(pwd)") && [ \${#DIR} -gt 15 ] && DIR="\${DIR:0:12}..." || true && CTX=100 && COST="0" && if [ -f "${tokenFilePath}" ]; then TOKENS=$(cat "${tokenFilePath}" 2>/dev/null) && REAL_CTX=$(echo "$TOKENS" | grep -o '"context_left_percent":[0-9]*' | grep -o '[0-9]*') && if [ ! -z "$REAL_CTX" ]; then CTX="$REAL_CTX"; fi && REAL_COST=$(echo "$TOKENS" | grep -o '"total_cost":[0-9.]*' | cut -d: -f2) && if [ ! -z "$REAL_COST" ]; then COST="$REAL_COST"; fi; fi && if [ "$CLAUDISH_IS_LOCAL" = "true" ]; then COST_DISPLAY="LOCAL"; else COST_DISPLAY=$(printf "\\$%.3f" "$COST"); fi && printf "${CYAN2}${BOLD2}%s${RESET2} ${DIM2}•${RESET2} ${YELLOW2}%s${RESET2} ${DIM2}•${RESET2} ${GREEN2}%s${RESET2} ${DIM2}•${RESET2} ${MAGENTA}%s%%${RESET2}\\n" "$DIR" "$CLAUDISH_ACTIVE_MODEL_NAME" "$COST_DISPLAY" "$CTX"`;
58206
58265
  }
58207
58266
  const settings = {
58208
58267
  statusLine: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claudish",
3
- "version": "3.3.9",
3
+ "version": "3.3.11",
4
4
  "description": "Run Claude Code with any model - OpenRouter, Ollama, LM Studio & local models",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",