@anthropic-ai/claude-code 2.0.20 → 2.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/cli.js +992 -984
  2. package/package.json +1 -1
  3. package/sdk.d.ts +10 -2
  4. package/sdk.mjs +11 -2
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@anthropic-ai/claude-code",
3
- "version": "2.0.20",
3
+ "version": "2.0.22",
4
4
  "main": "sdk.mjs",
5
5
  "types": "sdk.d.ts",
6
6
  "bin": {
package/sdk.d.ts CHANGED
@@ -384,8 +384,6 @@ export type SDKHookResponseMessage = SDKMessageBase & {
384
384
  subtype: 'hook_response';
385
385
  hook_name: string;
386
386
  hook_event: string;
387
- tool_name?: string;
388
- response: SyncHookJSONOutput;
389
387
  stdout: string;
390
388
  stderr: string;
391
389
  exit_code?: number;
@@ -400,6 +398,16 @@ export interface Query extends AsyncGenerator<SDKMessage, void> {
400
398
  interrupt(): Promise<void>;
401
399
  setPermissionMode(mode: PermissionMode): Promise<void>;
402
400
  setModel(model?: string): Promise<void>;
401
+ /**
402
+ * Set the maximum number of thinking tokens the model is allowed to use
403
+ * when generating its response. This can be used to limit the amount of
404
+ * tokens the model uses for its response, which can help control cost and
405
+ * latency.
406
+ *
407
+ * Use `null` to clear any previously set limit and allow the model to
408
+ * use the default maximum thinking tokens.
409
+ */
410
+ setMaxThinkingTokens(maxThinkingTokens: number | null): Promise<void>;
403
411
  supportedCommands(): Promise<SlashCommand[]>;
404
412
  supportedModels(): Promise<ModelInfo[]>;
405
413
  mcpServerStatus(): Promise<McpServerStatus[]>;
package/sdk.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  // (c) Anthropic PBC. All rights reserved. Use is subject to the Legal Agreements outlined here: https://docs.claude.com/en/docs/claude-code/legal-and-compliance.
3
3
 
4
- // Version: 2.0.20
4
+ // Version: 2.0.22
5
5
 
6
6
  // Want to see the unminified source? We're hiring!
7
7
  // https://job-boards.greenhouse.io/anthropic/jobs/4816199008
@@ -7403,6 +7403,10 @@ var isDebugToStdErr = memoize_default(() => {
7403
7403
  return process.argv.includes("--debug-to-stderr") || process.argv.includes("-d2e");
7404
7404
  });
7405
7405
  function shouldLogDebugMessage(message) {
7406
+ if (false) {}
7407
+ if (typeof process === "undefined" || typeof process.versions === "undefined" || typeof process.versions.node === "undefined") {
7408
+ return false;
7409
+ }
7406
7410
  const filter = getDebugFilter();
7407
7411
  return shouldShowDebugMessage(message, filter);
7408
7412
  }
@@ -7423,7 +7427,6 @@ function logForDebugging(message, { level } = {
7423
7427
  writeToStderr(output);
7424
7428
  return;
7425
7429
  }
7426
- if (false) {}
7427
7430
  if (!getFsImplementation().existsSync(dirname(getDebugLogPath()))) {
7428
7431
  getFsImplementation().mkdirSync(dirname(getDebugLogPath()));
7429
7432
  }
@@ -7675,6 +7678,12 @@ class Query {
7675
7678
  model
7676
7679
  });
7677
7680
  }
7681
+ async setMaxThinkingTokens(maxThinkingTokens) {
7682
+ await this.request({
7683
+ subtype: "set_max_thinking_tokens",
7684
+ max_thinking_tokens: maxThinkingTokens
7685
+ });
7686
+ }
7678
7687
  request(request) {
7679
7688
  const requestId = Math.random().toString(36).substring(2, 15);
7680
7689
  const sdkRequest = {