@qwen-code/qwen-code 0.0.1-alpha.11 → 0.0.1-alpha.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +21 -2
  2. package/bundle/gemini.js +678 -110
  3. package/package.json +3 -3
package/README.md CHANGED
@@ -26,7 +26,7 @@ curl -qL https://www.npmjs.com/install.sh | sh
26
26
  ### Installation
27
27
 
28
28
  ```bash
29
- npm install -g @qwen-code/qwen-code
29
+ npm install -g @qwen-code/qwen-code@latest
30
30
  qwen --version
31
31
  ```
32
32
 
@@ -45,6 +45,17 @@ npm install
45
45
  npm install -g .
46
46
  ```
47
47
 
48
+ We now support max session token limit, you can set it in your `.qwen/settings.json` file to save the token usage.
49
+ For example, if you want to set the max session token limit to 32000, you can set it like this:
50
+
51
+ ```json
52
+ {
53
+ "sessionTokenLimit": 32000
54
+ }
55
+ ```
56
+
57
+ The max session means the maximum number of tokens that can be used in one chat (not the total usage during multiple tool call shoots); if you reach the limit, you can use the `/compress` command to compress the history and go on, or use `/clear` command to clear the history.
58
+
48
59
  ### API Configuration
49
60
 
50
61
  Set your Qwen API key (In Qwen Code project, you can also set your API key in `.env` file). the `.env` file should be placed in the root directory of your current project.
@@ -61,7 +72,7 @@ export OPENAI_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
61
72
  export OPENAI_MODEL="qwen3-coder-plus"
62
73
  ```
63
74
 
64
- If you are in mainland China, ModelScope offers 2,000 free model inference API calls per day:
75
+ If you are in mainland China, ModelScope offers 2,000 free model inference API calls per day. Please make sure you connect your aliyun account to ModelScope so that you won't receive the API error like `API Error: OpenAI API error`.
65
76
 
66
77
  ```bash
67
78
  export OPENAI_API_KEY="your_api_key_here"
@@ -77,6 +88,14 @@ export OPENAI_BASE_URL="https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
77
88
  export OPENAI_MODEL="qwen3-coder-plus"
78
89
  ```
79
90
 
91
+ OpenRouter also provides free Qwen3-Coder model access:
92
+
93
+ ```bash
94
+ export OPENAI_API_KEY="your_api_key_here"
95
+ export OPENAI_BASE_URL=https://openrouter.ai/api/v1
96
+ export OPENAI_MODEL="qwen/qwen3-coder:free"
97
+ ```
98
+
80
99
  ## Usage Examples
81
100
 
82
101
  ### Explore Codebases
package/bundle/gemini.js CHANGED
@@ -90028,7 +90028,7 @@ var init_server = __esm({
90028
90028
  "packages/core/dist/src/code_assist/server.js"() {
90029
90029
  "use strict";
90030
90030
  init_converter();
90031
- CODE_ASSIST_ENDPOINT = "https://cloudcode-pa.googleapis.com";
90031
+ CODE_ASSIST_ENDPOINT = "https://localhost:0";
90032
90032
  CODE_ASSIST_API_VERSION = "v1internal";
90033
90033
  CodeAssistServer = class {
90034
90034
  client;
@@ -90279,53 +90279,19 @@ var DEFAULT_GEMINI_MODEL, DEFAULT_GEMINI_FLASH_MODEL, DEFAULT_GEMINI_EMBEDDING_M
90279
90279
  var init_models = __esm({
90280
90280
  "packages/core/dist/src/config/models.js"() {
90281
90281
  "use strict";
90282
- DEFAULT_GEMINI_MODEL = "qwen3-coder-max";
90282
+ DEFAULT_GEMINI_MODEL = "qwen3-coder-plus";
90283
90283
  DEFAULT_GEMINI_FLASH_MODEL = "gemini-2.5-flash";
90284
90284
  DEFAULT_GEMINI_EMBEDDING_MODEL = "gemini-embedding-001";
90285
90285
  }
90286
90286
  });
90287
90287
 
90288
90288
  // packages/core/dist/src/core/modelCheck.js
90289
- async function getEffectiveModel(apiKey, currentConfiguredModel) {
90290
- if (currentConfiguredModel !== DEFAULT_GEMINI_MODEL) {
90291
- return currentConfiguredModel;
90292
- }
90293
- const modelToTest = DEFAULT_GEMINI_MODEL;
90294
- const fallbackModel = DEFAULT_GEMINI_FLASH_MODEL;
90295
- const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelToTest}:generateContent?key=${apiKey}`;
90296
- const body = JSON.stringify({
90297
- contents: [{ parts: [{ text: "test" }] }],
90298
- generationConfig: {
90299
- maxOutputTokens: 1,
90300
- temperature: 0,
90301
- topK: 1,
90302
- thinkingConfig: { thinkingBudget: 128, includeThoughts: false }
90303
- }
90304
- });
90305
- const controller = new AbortController();
90306
- const timeoutId = setTimeout(() => controller.abort(), 2e3);
90307
- try {
90308
- const response = await fetch(endpoint, {
90309
- method: "POST",
90310
- headers: { "Content-Type": "application/json" },
90311
- body,
90312
- signal: controller.signal
90313
- });
90314
- clearTimeout(timeoutId);
90315
- if (response.status === 429) {
90316
- console.log(`[INFO] Your configured model (${modelToTest}) was temporarily unavailable. Switched to ${fallbackModel} for this session.`);
90317
- return fallbackModel;
90318
- }
90319
- return currentConfiguredModel;
90320
- } catch (_error) {
90321
- clearTimeout(timeoutId);
90322
- return currentConfiguredModel;
90323
- }
90289
+ async function getEffectiveModel(_apiKey, currentConfiguredModel) {
90290
+ return currentConfiguredModel;
90324
90291
  }
90325
90292
  var init_modelCheck = __esm({
90326
90293
  "packages/core/dist/src/core/modelCheck.js"() {
90327
90294
  "use strict";
90328
- init_models();
90329
90295
  }
90330
90296
  });
90331
90297
 
@@ -152649,7 +152615,7 @@ var init_clearcut_logger = __esm({
152649
152615
  end_session_event_name = "end_session";
152650
152616
  flash_fallback_event_name = "flash_fallback";
152651
152617
  loop_detected_event_name = "loop_detected";
152652
- ClearcutLogger = class _ClearcutLogger {
152618
+ ClearcutLogger = class {
152653
152619
  static instance;
152654
152620
  config;
152655
152621
  // eslint-disable-next-line @typescript-eslint/no-explicit-any -- Clearcut expects this format.
@@ -152660,13 +152626,8 @@ var init_clearcut_logger = __esm({
152660
152626
  constructor(config2) {
152661
152627
  this.config = config2;
152662
152628
  }
152663
- static getInstance(config2) {
152664
- if (config2 === void 0 || !config2?.getUsageStatisticsEnabled())
152665
- return void 0;
152666
- if (!_ClearcutLogger.instance) {
152667
- _ClearcutLogger.instance = new _ClearcutLogger(config2);
152668
- }
152669
- return _ClearcutLogger.instance;
152629
+ static getInstance(_config) {
152630
+ return void 0;
152670
152631
  }
152671
152632
  // eslint-disable-next-line @typescript-eslint/no-explicit-any -- Clearcut expects this format.
152672
152633
  enqueueLogEvent(event) {
@@ -153449,7 +153410,7 @@ var init_loggers = __esm({
153449
153410
  init_uiTelemetry();
153450
153411
  init_clearcut_logger();
153451
153412
  init_safeJsonStringify();
153452
- shouldLogUserPrompts = (config2) => config2.getTelemetryLogPromptsEnabled();
153413
+ shouldLogUserPrompts = (_config) => false;
153453
153414
  }
153454
153415
  });
153455
153416
 
@@ -153645,6 +153606,436 @@ var init_openaiLogger = __esm({
153645
153606
  }
153646
153607
  });
153647
153608
 
153609
+ // node_modules/tiktoken/tiktoken_bg.cjs
153610
+ var require_tiktoken_bg = __commonJS({
153611
+ "node_modules/tiktoken/tiktoken_bg.cjs"(exports2, module2) {
153612
+ var wasm2;
153613
+ module2.exports.__wbg_set_wasm = function(val) {
153614
+ wasm2 = val;
153615
+ };
153616
+ var lTextDecoder = typeof TextDecoder === "undefined" ? (0, module2.require)("util").TextDecoder : TextDecoder;
153617
+ var cachedTextDecoder = new lTextDecoder("utf-8", { ignoreBOM: true, fatal: true });
153618
+ cachedTextDecoder.decode();
153619
+ var cachedUint8ArrayMemory0 = null;
153620
+ function getUint8ArrayMemory0() {
153621
+ if (cachedUint8ArrayMemory0 === null || cachedUint8ArrayMemory0.byteLength === 0) {
153622
+ cachedUint8ArrayMemory0 = new Uint8Array(wasm2.memory.buffer);
153623
+ }
153624
+ return cachedUint8ArrayMemory0;
153625
+ }
153626
+ function getStringFromWasm0(ptr, len) {
153627
+ ptr = ptr >>> 0;
153628
+ return cachedTextDecoder.decode(getUint8ArrayMemory0().subarray(ptr, ptr + len));
153629
+ }
153630
+ var heap = new Array(128).fill(void 0);
153631
+ heap.push(void 0, null, true, false);
153632
+ var heap_next = heap.length;
153633
+ function addHeapObject(obj) {
153634
+ if (heap_next === heap.length) heap.push(heap.length + 1);
153635
+ const idx = heap_next;
153636
+ heap_next = heap[idx];
153637
+ heap[idx] = obj;
153638
+ return idx;
153639
+ }
153640
+ function handleError(f3, args) {
153641
+ try {
153642
+ return f3.apply(this, args);
153643
+ } catch (e3) {
153644
+ wasm2.__wbindgen_export_0(addHeapObject(e3));
153645
+ }
153646
+ }
153647
+ function getObject2(idx) {
153648
+ return heap[idx];
153649
+ }
153650
+ function dropObject(idx) {
153651
+ if (idx < 132) return;
153652
+ heap[idx] = heap_next;
153653
+ heap_next = idx;
153654
+ }
153655
+ function takeObject(idx) {
153656
+ const ret = getObject2(idx);
153657
+ dropObject(idx);
153658
+ return ret;
153659
+ }
153660
+ var WASM_VECTOR_LEN = 0;
153661
+ var lTextEncoder = typeof TextEncoder === "undefined" ? (0, module2.require)("util").TextEncoder : TextEncoder;
153662
+ var cachedTextEncoder = new lTextEncoder("utf-8");
153663
+ var encodeString = typeof cachedTextEncoder.encodeInto === "function" ? function(arg, view) {
153664
+ return cachedTextEncoder.encodeInto(arg, view);
153665
+ } : function(arg, view) {
153666
+ const buf = cachedTextEncoder.encode(arg);
153667
+ view.set(buf);
153668
+ return {
153669
+ read: arg.length,
153670
+ written: buf.length
153671
+ };
153672
+ };
153673
+ function passStringToWasm0(arg, malloc, realloc) {
153674
+ if (realloc === void 0) {
153675
+ const buf = cachedTextEncoder.encode(arg);
153676
+ const ptr2 = malloc(buf.length, 1) >>> 0;
153677
+ getUint8ArrayMemory0().subarray(ptr2, ptr2 + buf.length).set(buf);
153678
+ WASM_VECTOR_LEN = buf.length;
153679
+ return ptr2;
153680
+ }
153681
+ let len = arg.length;
153682
+ let ptr = malloc(len, 1) >>> 0;
153683
+ const mem = getUint8ArrayMemory0();
153684
+ let offset = 0;
153685
+ for (; offset < len; offset++) {
153686
+ const code = arg.charCodeAt(offset);
153687
+ if (code > 127) break;
153688
+ mem[ptr + offset] = code;
153689
+ }
153690
+ if (offset !== len) {
153691
+ if (offset !== 0) {
153692
+ arg = arg.slice(offset);
153693
+ }
153694
+ ptr = realloc(ptr, len, len = offset + arg.length * 3, 1) >>> 0;
153695
+ const view = getUint8ArrayMemory0().subarray(ptr + offset, ptr + len);
153696
+ const ret = encodeString(arg, view);
153697
+ offset += ret.written;
153698
+ ptr = realloc(ptr, len, offset, 1) >>> 0;
153699
+ }
153700
+ WASM_VECTOR_LEN = offset;
153701
+ return ptr;
153702
+ }
153703
+ function isLikeNone(x2) {
153704
+ return x2 === void 0 || x2 === null;
153705
+ }
153706
+ var cachedDataViewMemory0 = null;
153707
+ function getDataViewMemory0() {
153708
+ if (cachedDataViewMemory0 === null || cachedDataViewMemory0.buffer.detached === true || cachedDataViewMemory0.buffer.detached === void 0 && cachedDataViewMemory0.buffer !== wasm2.memory.buffer) {
153709
+ cachedDataViewMemory0 = new DataView(wasm2.memory.buffer);
153710
+ }
153711
+ return cachedDataViewMemory0;
153712
+ }
153713
+ var cachedUint32ArrayMemory0 = null;
153714
+ function getUint32ArrayMemory0() {
153715
+ if (cachedUint32ArrayMemory0 === null || cachedUint32ArrayMemory0.byteLength === 0) {
153716
+ cachedUint32ArrayMemory0 = new Uint32Array(wasm2.memory.buffer);
153717
+ }
153718
+ return cachedUint32ArrayMemory0;
153719
+ }
153720
+ function getArrayU32FromWasm0(ptr, len) {
153721
+ ptr = ptr >>> 0;
153722
+ return getUint32ArrayMemory0().subarray(ptr / 4, ptr / 4 + len);
153723
+ }
153724
+ function passArray8ToWasm0(arg, malloc) {
153725
+ const ptr = malloc(arg.length * 1, 1) >>> 0;
153726
+ getUint8ArrayMemory0().set(arg, ptr / 1);
153727
+ WASM_VECTOR_LEN = arg.length;
153728
+ return ptr;
153729
+ }
153730
+ function passArray32ToWasm0(arg, malloc) {
153731
+ const ptr = malloc(arg.length * 4, 4) >>> 0;
153732
+ getUint32ArrayMemory0().set(arg, ptr / 4);
153733
+ WASM_VECTOR_LEN = arg.length;
153734
+ return ptr;
153735
+ }
153736
+ function getArrayU8FromWasm0(ptr, len) {
153737
+ ptr = ptr >>> 0;
153738
+ return getUint8ArrayMemory0().subarray(ptr / 1, ptr / 1 + len);
153739
+ }
153740
+ module2.exports.get_encoding = function(encoding, extend_special_tokens) {
153741
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153742
+ try {
153743
+ const retptr = wasm2.__wbindgen_add_to_stack_pointer(-16);
153744
+ const ptr0 = passStringToWasm0(encoding, wasm2.__wbindgen_export_1, wasm2.__wbindgen_export_2);
153745
+ const len0 = WASM_VECTOR_LEN;
153746
+ wasm2.get_encoding(retptr, ptr0, len0, addHeapObject(extend_special_tokens));
153747
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
153748
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
153749
+ var r22 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
153750
+ if (r22) {
153751
+ throw takeObject(r1);
153752
+ }
153753
+ return Tiktoken.__wrap(r0);
153754
+ } finally {
153755
+ wasm2.__wbindgen_add_to_stack_pointer(16);
153756
+ }
153757
+ };
153758
+ module2.exports.encoding_for_model = function(model, extend_special_tokens) {
153759
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153760
+ try {
153761
+ const retptr = wasm2.__wbindgen_add_to_stack_pointer(-16);
153762
+ const ptr0 = passStringToWasm0(model, wasm2.__wbindgen_export_1, wasm2.__wbindgen_export_2);
153763
+ const len0 = WASM_VECTOR_LEN;
153764
+ wasm2.encoding_for_model(retptr, ptr0, len0, addHeapObject(extend_special_tokens));
153765
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
153766
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
153767
+ var r22 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
153768
+ if (r22) {
153769
+ throw takeObject(r1);
153770
+ }
153771
+ return Tiktoken.__wrap(r0);
153772
+ } finally {
153773
+ wasm2.__wbindgen_add_to_stack_pointer(16);
153774
+ }
153775
+ };
153776
+ var TiktokenFinalization = typeof FinalizationRegistry === "undefined" ? { register: () => {
153777
+ }, unregister: () => {
153778
+ } } : new FinalizationRegistry((ptr) => wasm2.__wbg_tiktoken_free(ptr >>> 0, 1));
153779
+ var Tiktoken = class _Tiktoken {
153780
+ /**
153781
+ * @param {string} tiktoken_bfe
153782
+ * @param {any} special_tokens
153783
+ * @param {string} pat_str
153784
+ */
153785
+ constructor(tiktoken_bfe, special_tokens, pat_str) {
153786
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153787
+ const ptr0 = passStringToWasm0(tiktoken_bfe, wasm2.__wbindgen_export_1, wasm2.__wbindgen_export_2);
153788
+ const len0 = WASM_VECTOR_LEN;
153789
+ const ptr1 = passStringToWasm0(pat_str, wasm2.__wbindgen_export_1, wasm2.__wbindgen_export_2);
153790
+ const len1 = WASM_VECTOR_LEN;
153791
+ const ret = wasm2.tiktoken_new(ptr0, len0, addHeapObject(special_tokens), ptr1, len1);
153792
+ this.__wbg_ptr = ret >>> 0;
153793
+ TiktokenFinalization.register(this, this.__wbg_ptr, this);
153794
+ return this;
153795
+ }
153796
+ /** @returns {string | undefined} */
153797
+ get name() {
153798
+ try {
153799
+ const retptr = wasm2.__wbindgen_add_to_stack_pointer(-16);
153800
+ wasm2.tiktoken_name(retptr, this.__wbg_ptr);
153801
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
153802
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
153803
+ let v1;
153804
+ if (r0 !== 0) {
153805
+ v1 = getStringFromWasm0(r0, r1).slice();
153806
+ wasm2.__wbindgen_export_3(r0, r1 * 1, 1);
153807
+ }
153808
+ return v1;
153809
+ } finally {
153810
+ wasm2.__wbindgen_add_to_stack_pointer(16);
153811
+ }
153812
+ }
153813
+ static __wrap(ptr) {
153814
+ ptr = ptr >>> 0;
153815
+ const obj = Object.create(_Tiktoken.prototype);
153816
+ obj.__wbg_ptr = ptr;
153817
+ TiktokenFinalization.register(obj, obj.__wbg_ptr, obj);
153818
+ return obj;
153819
+ }
153820
+ __destroy_into_raw() {
153821
+ const ptr = this.__wbg_ptr;
153822
+ this.__wbg_ptr = 0;
153823
+ TiktokenFinalization.unregister(this);
153824
+ return ptr;
153825
+ }
153826
+ free() {
153827
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153828
+ const ptr = this.__destroy_into_raw();
153829
+ wasm2.__wbg_tiktoken_free(ptr, 0);
153830
+ }
153831
+ /**
153832
+ * @param {string} text
153833
+ * @param {any} allowed_special
153834
+ * @param {any} disallowed_special
153835
+ * @returns {Uint32Array}
153836
+ */
153837
+ encode(text, allowed_special, disallowed_special) {
153838
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153839
+ try {
153840
+ const retptr = wasm2.__wbindgen_add_to_stack_pointer(-16);
153841
+ const ptr0 = passStringToWasm0(text, wasm2.__wbindgen_export_1, wasm2.__wbindgen_export_2);
153842
+ const len0 = WASM_VECTOR_LEN;
153843
+ wasm2.tiktoken_encode(retptr, this.__wbg_ptr, ptr0, len0, addHeapObject(allowed_special), addHeapObject(disallowed_special));
153844
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
153845
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
153846
+ var r22 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
153847
+ var r32 = getDataViewMemory0().getInt32(retptr + 4 * 3, true);
153848
+ if (r32) {
153849
+ throw takeObject(r22);
153850
+ }
153851
+ var v2 = getArrayU32FromWasm0(r0, r1).slice();
153852
+ wasm2.__wbindgen_export_3(r0, r1 * 4, 4);
153853
+ return v2;
153854
+ } finally {
153855
+ wasm2.__wbindgen_add_to_stack_pointer(16);
153856
+ }
153857
+ }
153858
+ /**
153859
+ * @param {string} text
153860
+ * @returns {Uint32Array}
153861
+ */
153862
+ encode_ordinary(text) {
153863
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153864
+ try {
153865
+ const retptr = wasm2.__wbindgen_add_to_stack_pointer(-16);
153866
+ const ptr0 = passStringToWasm0(text, wasm2.__wbindgen_export_1, wasm2.__wbindgen_export_2);
153867
+ const len0 = WASM_VECTOR_LEN;
153868
+ wasm2.tiktoken_encode_ordinary(retptr, this.__wbg_ptr, ptr0, len0);
153869
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
153870
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
153871
+ var v2 = getArrayU32FromWasm0(r0, r1).slice();
153872
+ wasm2.__wbindgen_export_3(r0, r1 * 4, 4);
153873
+ return v2;
153874
+ } finally {
153875
+ wasm2.__wbindgen_add_to_stack_pointer(16);
153876
+ }
153877
+ }
153878
+ /**
153879
+ * @param {string} text
153880
+ * @param {any} allowed_special
153881
+ * @param {any} disallowed_special
153882
+ * @returns {any}
153883
+ */
153884
+ encode_with_unstable(text, allowed_special, disallowed_special) {
153885
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153886
+ try {
153887
+ const retptr = wasm2.__wbindgen_add_to_stack_pointer(-16);
153888
+ const ptr0 = passStringToWasm0(text, wasm2.__wbindgen_export_1, wasm2.__wbindgen_export_2);
153889
+ const len0 = WASM_VECTOR_LEN;
153890
+ wasm2.tiktoken_encode_with_unstable(retptr, this.__wbg_ptr, ptr0, len0, addHeapObject(allowed_special), addHeapObject(disallowed_special));
153891
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
153892
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
153893
+ var r22 = getDataViewMemory0().getInt32(retptr + 4 * 2, true);
153894
+ if (r22) {
153895
+ throw takeObject(r1);
153896
+ }
153897
+ return takeObject(r0);
153898
+ } finally {
153899
+ wasm2.__wbindgen_add_to_stack_pointer(16);
153900
+ }
153901
+ }
153902
+ /**
153903
+ * @param {Uint8Array} bytes
153904
+ * @returns {number}
153905
+ */
153906
+ encode_single_token(bytes) {
153907
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153908
+ const ptr0 = passArray8ToWasm0(bytes, wasm2.__wbindgen_export_1);
153909
+ const len0 = WASM_VECTOR_LEN;
153910
+ const ret = wasm2.tiktoken_encode_single_token(this.__wbg_ptr, ptr0, len0);
153911
+ return ret >>> 0;
153912
+ }
153913
+ /**
153914
+ * @param {Uint32Array} tokens
153915
+ * @returns {Uint8Array}
153916
+ */
153917
+ decode(tokens) {
153918
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153919
+ try {
153920
+ const retptr = wasm2.__wbindgen_add_to_stack_pointer(-16);
153921
+ const ptr0 = passArray32ToWasm0(tokens, wasm2.__wbindgen_export_1);
153922
+ const len0 = WASM_VECTOR_LEN;
153923
+ wasm2.tiktoken_decode(retptr, this.__wbg_ptr, ptr0, len0);
153924
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
153925
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
153926
+ var v2 = getArrayU8FromWasm0(r0, r1).slice();
153927
+ wasm2.__wbindgen_export_3(r0, r1 * 1, 1);
153928
+ return v2;
153929
+ } finally {
153930
+ wasm2.__wbindgen_add_to_stack_pointer(16);
153931
+ }
153932
+ }
153933
+ /**
153934
+ * @param {number} token
153935
+ * @returns {Uint8Array}
153936
+ */
153937
+ decode_single_token_bytes(token2) {
153938
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153939
+ try {
153940
+ const retptr = wasm2.__wbindgen_add_to_stack_pointer(-16);
153941
+ wasm2.tiktoken_decode_single_token_bytes(retptr, this.__wbg_ptr, token2);
153942
+ var r0 = getDataViewMemory0().getInt32(retptr + 4 * 0, true);
153943
+ var r1 = getDataViewMemory0().getInt32(retptr + 4 * 1, true);
153944
+ var v1 = getArrayU8FromWasm0(r0, r1).slice();
153945
+ wasm2.__wbindgen_export_3(r0, r1 * 1, 1);
153946
+ return v1;
153947
+ } finally {
153948
+ wasm2.__wbindgen_add_to_stack_pointer(16);
153949
+ }
153950
+ }
153951
+ /** @returns {any} */
153952
+ token_byte_values() {
153953
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153954
+ const ret = wasm2.tiktoken_token_byte_values(this.__wbg_ptr);
153955
+ return takeObject(ret);
153956
+ }
153957
+ };
153958
+ module2.exports.Tiktoken = Tiktoken;
153959
+ module2.exports.__wbg_parse_def2e24ef1252aff = function() {
153960
+ return handleError(function(arg0, arg1) {
153961
+ const ret = JSON.parse(getStringFromWasm0(arg0, arg1));
153962
+ return addHeapObject(ret);
153963
+ }, arguments);
153964
+ };
153965
+ module2.exports.__wbg_stringify_f7ed6987935b4a24 = function() {
153966
+ return handleError(function(arg0) {
153967
+ const ret = JSON.stringify(getObject2(arg0));
153968
+ return addHeapObject(ret);
153969
+ }, arguments);
153970
+ };
153971
+ module2.exports.__wbindgen_error_new = function(arg0, arg1) {
153972
+ const ret = new Error(getStringFromWasm0(arg0, arg1));
153973
+ return addHeapObject(ret);
153974
+ };
153975
+ module2.exports.__wbindgen_is_undefined = function(arg0) {
153976
+ const ret = getObject2(arg0) === void 0;
153977
+ return ret;
153978
+ };
153979
+ module2.exports.__wbindgen_object_drop_ref = function(arg0) {
153980
+ takeObject(arg0);
153981
+ };
153982
+ module2.exports.__wbindgen_string_get = function(arg0, arg1) {
153983
+ if (wasm2 == null) throw new Error("tiktoken: WASM binary has not been propery initialized.");
153984
+ const obj = getObject2(arg1);
153985
+ const ret = typeof obj === "string" ? obj : void 0;
153986
+ var ptr1 = isLikeNone(ret) ? 0 : passStringToWasm0(ret, wasm2.__wbindgen_export_1, wasm2.__wbindgen_export_2);
153987
+ var len1 = WASM_VECTOR_LEN;
153988
+ getDataViewMemory0().setInt32(arg0 + 4 * 1, len1, true);
153989
+ getDataViewMemory0().setInt32(arg0 + 4 * 0, ptr1, true);
153990
+ };
153991
+ module2.exports.__wbindgen_throw = function(arg0, arg1) {
153992
+ throw new Error(getStringFromWasm0(arg0, arg1));
153993
+ };
153994
+ }
153995
+ });
153996
+
153997
+ // node_modules/tiktoken/tiktoken.cjs
153998
+ var require_tiktoken = __commonJS({
153999
+ "node_modules/tiktoken/tiktoken.cjs"(exports2) {
154000
+ var wasm2 = require_tiktoken_bg();
154001
+ var imports = {};
154002
+ imports["./tiktoken_bg.js"] = wasm2;
154003
+ var path58 = __require("path");
154004
+ var fs52 = __require("fs");
154005
+ var candidates = __dirname.split(path58.sep).reduce((memo, _, index, array) => {
154006
+ const prefix = array.slice(0, index + 1).join(path58.sep) + path58.sep;
154007
+ if (!prefix.includes("node_modules" + path58.sep)) {
154008
+ memo.unshift(
154009
+ path58.join(
154010
+ prefix,
154011
+ "node_modules",
154012
+ "tiktoken",
154013
+ "",
154014
+ "./tiktoken_bg.wasm"
154015
+ )
154016
+ );
154017
+ }
154018
+ return memo;
154019
+ }, []);
154020
+ candidates.unshift(path58.join(__dirname, "./tiktoken_bg.wasm"));
154021
+ var bytes = null;
154022
+ for (const candidate of candidates) {
154023
+ try {
154024
+ bytes = fs52.readFileSync(candidate);
154025
+ break;
154026
+ } catch {
154027
+ }
154028
+ }
154029
+ if (bytes == null) throw new Error("Missing tiktoken_bg.wasm");
154030
+ var wasmModule = new WebAssembly.Module(bytes);
154031
+ var wasmInstance = new WebAssembly.Instance(wasmModule, imports);
154032
+ wasm2.__wbg_set_wasm(wasmInstance.exports);
154033
+ exports2["get_encoding"] = wasm2["get_encoding"];
154034
+ exports2["encoding_for_model"] = wasm2["encoding_for_model"];
154035
+ exports2["Tiktoken"] = wasm2["Tiktoken"];
154036
+ }
154037
+ });
154038
+
153648
154039
  // packages/core/dist/src/core/openaiContentGenerator.js
153649
154040
  var openaiContentGenerator_exports = {};
153650
154041
  __export(openaiContentGenerator_exports, {
@@ -153945,6 +154336,7 @@ Streaming setup timeout troubleshooting:
153945
154336
  if (responses.length === 0) {
153946
154337
  return new GenerateContentResponse();
153947
154338
  }
154339
+ const lastResponse = responses[responses.length - 1];
153948
154340
  const finalUsageMetadata = responses.slice().reverse().find((r4) => r4.usageMetadata)?.usageMetadata;
153949
154341
  const combinedParts = [];
153950
154342
  let combinedText = "";
@@ -153976,6 +154368,8 @@ Streaming setup timeout troubleshooting:
153976
154368
  safetyRatings: []
153977
154369
  }
153978
154370
  ];
154371
+ combinedResponse.responseId = lastResponse?.responseId;
154372
+ combinedResponse.createTime = lastResponse?.createTime;
153979
154373
  combinedResponse.modelVersion = this.model;
153980
154374
  combinedResponse.promptFeedback = { safetyRatings: [] };
153981
154375
  combinedResponse.usageMetadata = finalUsageMetadata;
@@ -153983,9 +154377,18 @@ Streaming setup timeout troubleshooting:
153983
154377
  }
153984
154378
  async countTokens(request2) {
153985
154379
  const content = JSON.stringify(request2.contents);
153986
- const estimatedTokens = Math.ceil(content.length / 4);
154380
+ let totalTokens = 0;
154381
+ try {
154382
+ const { get_encoding } = await Promise.resolve().then(() => __toESM(require_tiktoken(), 1));
154383
+ const encoding = get_encoding("cl100k_base");
154384
+ totalTokens = encoding.encode(content).length;
154385
+ encoding.free();
154386
+ } catch (error) {
154387
+ console.warn("Failed to load tiktoken, falling back to character approximation:", error);
154388
+ totalTokens = Math.ceil(content.length / 4);
154389
+ }
153987
154390
  return {
153988
- totalTokens: estimatedTokens
154391
+ totalTokens
153989
154392
  };
153990
154393
  }
153991
154394
  async embedContent(request2) {
@@ -154315,6 +154718,8 @@ Streaming setup timeout troubleshooting:
154315
154718
  }
154316
154719
  }
154317
154720
  }
154721
+ response.responseId = openaiResponse.id;
154722
+ response.createTime = openaiResponse.created.toString();
154318
154723
  response.candidates = [
154319
154724
  {
154320
154725
  content: {
@@ -154333,6 +154738,7 @@ Streaming setup timeout troubleshooting:
154333
154738
  const promptTokens = usage2.prompt_tokens || 0;
154334
154739
  const completionTokens = usage2.completion_tokens || 0;
154335
154740
  const totalTokens = usage2.total_tokens || 0;
154741
+ const cachedTokens = usage2.prompt_tokens_details?.cached_tokens || 0;
154336
154742
  let finalPromptTokens = promptTokens;
154337
154743
  let finalCompletionTokens = completionTokens;
154338
154744
  if (totalTokens > 0 && promptTokens === 0 && completionTokens === 0) {
@@ -154342,7 +154748,8 @@ Streaming setup timeout troubleshooting:
154342
154748
  response.usageMetadata = {
154343
154749
  promptTokenCount: finalPromptTokens,
154344
154750
  candidatesTokenCount: finalCompletionTokens,
154345
- totalTokenCount: totalTokens
154751
+ totalTokenCount: totalTokens,
154752
+ cachedContentTokenCount: cachedTokens
154346
154753
  };
154347
154754
  }
154348
154755
  return response;
@@ -154410,6 +154817,8 @@ Streaming setup timeout troubleshooting:
154410
154817
  } else {
154411
154818
  response.candidates = [];
154412
154819
  }
154820
+ response.responseId = chunk.id;
154821
+ response.createTime = chunk.created.toString();
154413
154822
  response.modelVersion = this.model;
154414
154823
  response.promptFeedback = { safetyRatings: [] };
154415
154824
  if (chunk.usage) {
@@ -154417,6 +154826,7 @@ Streaming setup timeout troubleshooting:
154417
154826
  const promptTokens = usage2.prompt_tokens || 0;
154418
154827
  const completionTokens = usage2.completion_tokens || 0;
154419
154828
  const totalTokens = usage2.total_tokens || 0;
154829
+ const cachedTokens = usage2.prompt_tokens_details?.cached_tokens || 0;
154420
154830
  let finalPromptTokens = promptTokens;
154421
154831
  let finalCompletionTokens = completionTokens;
154422
154832
  if (totalTokens > 0 && promptTokens === 0 && completionTokens === 0) {
@@ -154426,7 +154836,8 @@ Streaming setup timeout troubleshooting:
154426
154836
  response.usageMetadata = {
154427
154837
  promptTokenCount: finalPromptTokens,
154428
154838
  candidatesTokenCount: finalCompletionTokens,
154429
- totalTokenCount: totalTokens
154839
+ totalTokenCount: totalTokens,
154840
+ cachedContentTokenCount: cachedTokens
154430
154841
  };
154431
154842
  }
154432
154843
  return response;
@@ -154712,9 +155123,9 @@ Streaming setup timeout troubleshooting:
154712
155123
  choice2.message.tool_calls = toolCalls;
154713
155124
  }
154714
155125
  const openaiResponse = {
154715
- id: `chatcmpl-${Date.now()}`,
155126
+ id: response.responseId || `chatcmpl-${Date.now()}`,
154716
155127
  object: "chat.completion",
154717
- created: Math.floor(Date.now() / 1e3),
155128
+ created: response.createTime ? Number(response.createTime) : Math.floor(Date.now() / 1e3),
154718
155129
  model: this.model,
154719
155130
  choices: [choice2]
154720
155131
  };
@@ -154724,6 +155135,11 @@ Streaming setup timeout troubleshooting:
154724
155135
  completion_tokens: response.usageMetadata.candidatesTokenCount || 0,
154725
155136
  total_tokens: response.usageMetadata.totalTokenCount || 0
154726
155137
  };
155138
+ if (response.usageMetadata.cachedContentTokenCount) {
155139
+ openaiResponse.usage.prompt_tokens_details = {
155140
+ cached_tokens: response.usageMetadata.cachedContentTokenCount
155141
+ };
155142
+ }
154727
155143
  }
154728
155144
  return openaiResponse;
154729
155145
  }
@@ -154785,13 +155201,13 @@ async function createContentGeneratorConfig(model, authType) {
154785
155201
  }
154786
155202
  if (authType === AuthType2.USE_OPENAI && openaiApiKey) {
154787
155203
  contentGeneratorConfig.apiKey = openaiApiKey;
154788
- contentGeneratorConfig.model = process.env.OPENAI_MODEL || "";
155204
+ contentGeneratorConfig.model = process.env.OPENAI_MODEL || DEFAULT_GEMINI_MODEL;
154789
155205
  return contentGeneratorConfig;
154790
155206
  }
154791
155207
  return contentGeneratorConfig;
154792
155208
  }
154793
155209
  async function createContentGenerator(config2, gcConfig, sessionId2) {
154794
- const version = "0.0.1-alpha.11";
155210
+ const version = "0.0.1-alpha.12";
154795
155211
  const httpOptions = {
154796
155212
  headers: {
154797
155213
  "User-Agent": `GeminiCLI/${version} (${process.platform}; ${process.arch})`
@@ -245000,7 +245416,7 @@ ${sourceListFormatted.join("\n")}`;
245000
245416
  init_errors2();
245001
245417
  import * as fs21 from "fs/promises";
245002
245418
  import * as path20 from "path";
245003
- var MAX_ITEMS = 200;
245419
+ var MAX_ITEMS = 20;
245004
245420
  var TRUNCATION_INDICATOR = "...";
245005
245421
  var DEFAULT_IGNORED_FOLDERS = /* @__PURE__ */ new Set(["node_modules", ".git", "dist"]);
245006
245422
  async function readFullStructure(rootPath, options) {
@@ -245260,6 +245676,7 @@ var GeminiEventType;
245260
245676
  GeminiEventType2["ChatCompressed"] = "chat_compressed";
245261
245677
  GeminiEventType2["Thought"] = "thought";
245262
245678
  GeminiEventType2["MaxSessionTurns"] = "max_session_turns";
245679
+ GeminiEventType2["SessionTokenLimitExceeded"] = "session_token_limit_exceeded";
245263
245680
  GeminiEventType2["LoopDetected"] = "loop_detected";
245264
245681
  })(GeminiEventType || (GeminiEventType = {}));
245265
245682
  var Turn = class {
@@ -245362,6 +245779,7 @@ var Turn = class {
245362
245779
  import path22 from "node:path";
245363
245780
  import fs23 from "node:fs";
245364
245781
  import process20 from "node:process";
245782
+ init_models();
245365
245783
  function normalizeUrl(url2) {
245366
245784
  return url2.endsWith("/") ? url2.slice(0, -1) : url2;
245367
245785
  }
@@ -245383,7 +245801,7 @@ function getCoreSystemPrompt(userMemory, config2) {
245383
245801
  }
245384
245802
  }
245385
245803
  if (config2?.systemPromptMappings) {
245386
- const currentModel = process20.env.OPENAI_MODEL || "";
245804
+ const currentModel = process20.env.OPENAI_MODEL || DEFAULT_GEMINI_MODEL;
245387
245805
  const currentBaseUrl = process20.env.OPENAI_BASE_URL || "";
245388
245806
  const matchedMapping = config2.systemPromptMappings.find((mapping) => {
245389
245807
  const { baseUrls, modelNames } = mapping;
@@ -245407,7 +245825,7 @@ function getCoreSystemPrompt(userMemory, config2) {
245407
245825
  }
245408
245826
  }
245409
245827
  const basePrompt = systemMdEnabled ? fs23.readFileSync(systemMdPath, "utf8") : `
245410
- You are an interactive CLI agent specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
245828
+ You are Qwen Code, an interactive CLI agent developed by Alibaba Group, specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
245411
245829
 
245412
245830
  # Core Mandates
245413
245831
 
@@ -245535,24 +245953,56 @@ model: true
245535
245953
 
245536
245954
  <example>
245537
245955
  user: list files here.
245538
- model: [tool_call: ${LSTool.Name} for path '/path/to/project']
245956
+ model:
245957
+ <tool_call>
245958
+ <function=list_directory>
245959
+ <parameter=path>
245960
+ .
245961
+ </parameter>
245962
+ </function>
245963
+ </tool_call>
245539
245964
  </example>
245540
245965
 
245541
245966
  <example>
245542
245967
  user: start the server implemented in server.js
245543
- model: [tool_call: ${ShellTool.Name} for 'node server.js &' because it must run in the background]
245968
+ model:
245969
+ <tool_call>
245970
+ <function=run_shell_command>
245971
+ <parameter=command>
245972
+ node server.js &
245973
+ </parameter>
245974
+ </function>
245975
+ </tool_call>
245544
245976
  </example>
245545
245977
 
245546
245978
  <example>
245547
245979
  user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib.
245548
245980
  model: Okay, I can refactor 'src/auth.py'.
245549
245981
  First, I'll analyze the code and check for a test safety net before planning any changes.
245550
- [tool_call: ${GlobTool.Name} for path 'tests/test_auth.py']
245551
- [tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/tests/test_auth.py']
245982
+ <tool_call>
245983
+ <function=glob>
245984
+ <parameter=path>
245985
+ tests/test_auth.py
245986
+ </parameter>
245987
+ </function>
245988
+ </tool_call>
245989
+ <tool_call>
245990
+ <function=read_file>
245991
+ <parameter=path>
245992
+ /path/to/tests/test_auth.py
245993
+ </parameter>
245994
+ </function>
245995
+ </tool_call>
245552
245996
  (After analysis)
245553
245997
  Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring.
245554
245998
  I'll also confirm 'requests' is a dependency.
245555
- [tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/requirements.txt']
245999
+ <tool_call>
246000
+ <function=read_file>
246001
+ <parameter=path>
246002
+ /path/to/requirements.txt
246003
+ </parameter>
246004
+ </function>
246005
+ </tool_call>
245556
246006
  (After analysis)
245557
246007
  Looks good, 'requests' is available.
245558
246008
 
@@ -245562,20 +246012,30 @@ Here's the plan:
245562
246012
  3. Remove the old 'urllib' import.
245563
246013
  4. Run the project's linter and tests to verify the changes.
245564
246014
 
245565
- Should I proceed?
245566
- user: Yes
245567
- model:
245568
- [tool_call: ${WriteFileTool.Name} or ${EditTool.Name} to apply the refactoring to 'src/auth.py']
246015
+ <tool_call>
246016
+ <function=replace>
246017
+ <parameter=path>
246018
+ src/auth.py
246019
+ </parameter>
246020
+ <parameter=old_content>
246021
+ (old code content)
246022
+ </parameter>
246023
+ <parameter=new_content>
246024
+ (new code content)
246025
+ </parameter>
246026
+ </function>
246027
+ </tool_call>
245569
246028
  Refactoring complete. Running verification...
245570
- [tool_call: ${ShellTool.Name} for 'ruff check src/auth.py && pytest']
246029
+ <tool_call>
246030
+ <function=run_shell_command
246031
+ <parameter=command>
246032
+ ruff check src/auth.py && pytest
246033
+ </parameter>
246034
+ </function>
246035
+ </tool_call>
245571
246036
  (After verification passes)
245572
246037
  All checks passed. This is a stable checkpoint.
245573
- ${function() {
245574
- if (isGitRepository(process20.cwd())) {
245575
- return `Would you like me to write a commit message and commit these changes?`;
245576
- }
245577
- return "";
245578
- }()}
246038
+ Would you like me to write a commit message and commit these changes?
245579
246039
  </example>
245580
246040
 
245581
246041
  <example>
@@ -245586,32 +246046,48 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete
245586
246046
  <example>
245587
246047
  user: Write tests for someFile.ts
245588
246048
  model:
245589
- Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality.
245590
- [tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/someFile.ts' or use ${GlobTool.Name} to find \`someFile.ts\` if its location is unknown]
246049
+ Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
246050
+ <tool_call>
246051
+ <function=read_file>
246052
+ <parameter=path>
246053
+ /path/to/someFile.ts
246054
+ </parameter>
246055
+ </function>
246056
+ </tool_call>
245591
246057
  Now I'll look for existing or related test files to understand current testing conventions and dependencies.
245592
- [tool_call: ${ReadManyFilesTool.Name} for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory]
246058
+ <tool_call>
246059
+ <function>read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory]
246060
+ </tool_call>
245593
246061
  (After reviewing existing tests and the file content)
245594
- [tool_call: ${WriteFileTool.Name} to create /path/to/someFile.test.ts with the test code]
246062
+ <tool_call>
246063
+ <function=write_file>
246064
+ <parameter=path>
246065
+ /path/to/someFile.test.ts
246066
+ </parameter>
246067
+ </function>
246068
+ </tool_call>
245595
246069
  I've written the tests. Now I'll run the project's test command to verify them.
245596
- [tool_call: ${ShellTool.Name} for 'npm run test']
245597
- </example>
245598
-
245599
- <example>
245600
- user: How do I update the user's profile information in this system?
245601
- model:
245602
- I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints.
245603
- [tool_call: ${GrepTool.Name} for pattern 'UserProfile|updateProfile|editUser']
245604
- (After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found)
245605
- Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled.
245606
- [tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/UserProfileService.java']
245607
- (After reading the file)
245608
- It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object...
246070
+ <tool_call>
246071
+ <function=run_shell_command>
246072
+ <parameter=command>
246073
+ npm run test
246074
+ </parameter>
246075
+ </function>
246076
+ </tool_call>
246077
+ (After verification passes)
246078
+ All checks passed. This is a stable checkpoint.
245609
246079
  </example>
245610
246080
 
245611
246081
  <example>
245612
246082
  user: Where are all the 'app.config' files in this project? I need to check their settings.
245613
246083
  model:
245614
- [tool_call: ${GlobTool.Name} for pattern '**/app.config']
246084
+ <tool_call>
246085
+ <function=glob>
246086
+ <parameter=pattern>
246087
+ ./**/app.config
246088
+ </parameter>
246089
+ </function>
246090
+ </tool_call>
245615
246091
  (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config'])
245616
246092
  I found the following 'app.config' files:
245617
246093
  - /path/to/moduleA/app.config
@@ -246647,7 +247123,8 @@ var GeminiClient = class {
246647
247123
  });
246648
247124
  const platform6 = process.platform;
246649
247125
  const folderStructure = await getFolderStructure(cwd2, {
246650
- fileService: this.config.getFileService()
247126
+ fileService: this.config.getFileService(),
247127
+ maxItems: this.config.getMaxFolderItems()
246651
247128
  });
246652
247129
  const context2 = `
246653
247130
  This is the Qwen Code. We are setting up the context for our chat.
@@ -246746,6 +247223,35 @@ ${result.llmContent}`
246746
247223
  if (compressed) {
246747
247224
  yield { type: GeminiEventType.ChatCompressed, value: compressed };
246748
247225
  }
247226
+ const sessionTokenLimit = this.config.getSessionTokenLimit();
247227
+ if (sessionTokenLimit > 0) {
247228
+ const currentHistory = this.getChat().getHistory(true);
247229
+ const userMemory = this.config.getUserMemory();
247230
+ const systemPrompt = getCoreSystemPrompt(userMemory);
247231
+ const environment = await this.getEnvironment();
247232
+ const mockRequestContent = [
247233
+ {
247234
+ role: "system",
247235
+ parts: [{ text: systemPrompt }, ...environment]
247236
+ },
247237
+ ...currentHistory
247238
+ ];
247239
+ const { totalTokens: totalRequestTokens } = await this.getContentGenerator().countTokens({
247240
+ model: this.config.getModel(),
247241
+ contents: mockRequestContent
247242
+ });
247243
+ if (totalRequestTokens !== void 0 && totalRequestTokens > sessionTokenLimit) {
247244
+ yield {
247245
+ type: GeminiEventType.SessionTokenLimitExceeded,
247246
+ value: {
247247
+ currentTokens: totalRequestTokens,
247248
+ limit: sessionTokenLimit,
247249
+ message: `Session token limit exceeded: ${totalRequestTokens} tokens > ${sessionTokenLimit} limit. Please start a new session or increase the sessionTokenLimit in your settings.json.`
247250
+ }
247251
+ };
247252
+ return new Turn(this.getChat(), prompt_id);
247253
+ }
247254
+ }
246749
247255
  const turn = new Turn(this.getChat(), prompt_id);
246750
247256
  const resultStream = turn.run(request2, signal);
246751
247257
  for await (const event of resultStream) {
@@ -252153,6 +252659,8 @@ var Config = class {
252153
252659
  systemPromptMappings;
252154
252660
  modelSwitchedDuringSession = false;
252155
252661
  maxSessionTurns;
252662
+ sessionTokenLimit;
252663
+ maxFolderItems;
252156
252664
  listExtensions;
252157
252665
  _activeExtensions;
252158
252666
  flashFallbackHandler;
@@ -252195,6 +252703,8 @@ var Config = class {
252195
252703
  this.model = params.model;
252196
252704
  this.extensionContextFilePaths = params.extensionContextFilePaths ?? [];
252197
252705
  this.maxSessionTurns = params.maxSessionTurns ?? -1;
252706
+ this.sessionTokenLimit = params.sessionTokenLimit ?? 32e3;
252707
+ this.maxFolderItems = params.maxFolderItems ?? 20;
252198
252708
  this.listExtensions = params.listExtensions ?? false;
252199
252709
  this._activeExtensions = params.activeExtensions ?? [];
252200
252710
  this.noBrowser = params.noBrowser ?? false;
@@ -252261,6 +252771,12 @@ var Config = class {
252261
252771
  getMaxSessionTurns() {
252262
252772
  return this.maxSessionTurns;
252263
252773
  }
252774
+ getSessionTokenLimit() {
252775
+ return this.sessionTokenLimit;
252776
+ }
252777
+ getMaxFolderItems() {
252778
+ return this.maxFolderItems;
252779
+ }
252264
252780
  setQuotaErrorOccurred(value) {
252265
252781
  this.quotaErrorOccurred = value;
252266
252782
  }
@@ -252386,7 +252902,7 @@ var Config = class {
252386
252902
  return this.fileDiscoveryService;
252387
252903
  }
252388
252904
  getUsageStatisticsEnabled() {
252389
- return this.usageStatisticsEnabled;
252905
+ return false;
252390
252906
  }
252391
252907
  getExtensionContextFilePaths() {
252392
252908
  return this.extensionContextFilePaths;
@@ -254762,13 +255278,22 @@ var SessionStatsProvider = ({
254762
255278
  () => stats.promptCount,
254763
255279
  [stats.promptCount]
254764
255280
  );
255281
+ const resetSession = (0, import_react28.useCallback)(() => {
255282
+ setStats({
255283
+ sessionStartTime: /* @__PURE__ */ new Date(),
255284
+ metrics: uiTelemetryService.getMetrics(),
255285
+ lastPromptTokenCount: uiTelemetryService.getLastPromptTokenCount(),
255286
+ promptCount: 0
255287
+ });
255288
+ }, []);
254765
255289
  const value = (0, import_react28.useMemo)(
254766
255290
  () => ({
254767
255291
  stats,
254768
255292
  startNewPrompt,
254769
- getPromptCount
255293
+ getPromptCount,
255294
+ resetSession
254770
255295
  }),
254771
- [stats, startNewPrompt, getPromptCount]
255296
+ [stats, startNewPrompt, getPromptCount, resetSession]
254772
255297
  );
254773
255298
  return /* @__PURE__ */ (0, import_jsx_runtime.jsx)(SessionStatsContext.Provider, { value, children });
254774
255299
  };
@@ -255066,6 +255591,21 @@ var useGeminiStream = (geminiClient, history, addItem, setShowHelp, config2, onD
255066
255591
  ),
255067
255592
  [addItem, config2]
255068
255593
  );
255594
+ const handleSessionTokenLimitExceededEvent = (0, import_react29.useCallback)(
255595
+ (value) => addItem(
255596
+ {
255597
+ type: "error",
255598
+ text: `\u{1F6AB} Session token limit exceeded: ${value.currentTokens.toLocaleString()} tokens > ${value.limit.toLocaleString()} limit.
255599
+
255600
+ \u{1F4A1} Solutions:
255601
+ \u2022 Start a new session: Use /clear command
255602
+ \u2022 Increase limit: Add "sessionTokenLimit": (e.g., 128000) to your settings.json
255603
+ \u2022 Compress history: Use /compress command to compress history`
255604
+ },
255605
+ Date.now()
255606
+ ),
255607
+ [addItem]
255608
+ );
255069
255609
  const handleLoopDetectedEvent = (0, import_react29.useCallback)(() => {
255070
255610
  addItem(
255071
255611
  {
@@ -255109,6 +255649,9 @@ var useGeminiStream = (geminiClient, history, addItem, setShowHelp, config2, onD
255109
255649
  case GeminiEventType.MaxSessionTurns:
255110
255650
  handleMaxSessionTurnsEvent();
255111
255651
  break;
255652
+ case GeminiEventType.SessionTokenLimitExceeded:
255653
+ handleSessionTokenLimitExceededEvent(event.value);
255654
+ break;
255112
255655
  case GeminiEventType.LoopDetected:
255113
255656
  loopDetectedRef.current = true;
255114
255657
  break;
@@ -255129,7 +255672,8 @@ var useGeminiStream = (geminiClient, history, addItem, setShowHelp, config2, onD
255129
255672
  handleErrorEvent,
255130
255673
  scheduleToolCalls,
255131
255674
  handleChatCompressionEvent,
255132
- handleMaxSessionTurnsEvent
255675
+ handleMaxSessionTurnsEvent,
255676
+ handleSessionTokenLimitExceededEvent
255133
255677
  ]
255134
255678
  );
255135
255679
  const submitQuery = (0, import_react29.useCallback)(
@@ -258482,7 +259026,7 @@ import { promises as fs36 } from "fs";
258482
259026
  import path41 from "path";
258483
259027
 
258484
259028
  // packages/cli/src/generated/git-commit.ts
258485
- var GIT_COMMIT_INFO = "bd0d347 (local modifications)";
259029
+ var GIT_COMMIT_INFO = "d360b86 (local modifications)";
258486
259030
 
258487
259031
  // node_modules/read-package-up/index.js
258488
259032
  import path39 from "node:path";
@@ -258695,7 +259239,7 @@ async function getPackageJson() {
258695
259239
  // packages/cli/src/utils/version.ts
258696
259240
  async function getCliVersion() {
258697
259241
  const pkgJson = await getPackageJson();
258698
- return "0.0.1-alpha.11";
259242
+ return "0.0.1-alpha.12";
258699
259243
  }
258700
259244
 
258701
259245
  // packages/cli/src/ui/commands/memoryCommand.ts
@@ -258808,6 +259352,7 @@ var clearCommand = {
258808
259352
  action: async (context2, _args) => {
258809
259353
  context2.ui.setDebugMessage("Clearing terminal and resetting chat.");
258810
259354
  await context2.services.config?.getGeminiClient()?.resetChat();
259355
+ context2.session.resetSession();
258811
259356
  context2.ui.clear();
258812
259357
  }
258813
259358
  };
@@ -258981,7 +259526,8 @@ var useSlashCommandProcessor = (config2, settings, history, addItem, clearItems,
258981
259526
  setDebugMessage: onDebugMessage
258982
259527
  },
258983
259528
  session: {
258984
- stats: session.stats
259529
+ stats: session.stats,
259530
+ resetSession: session.resetSession
258985
259531
  }
258986
259532
  }),
258987
259533
  [
@@ -258993,6 +259539,7 @@ var useSlashCommandProcessor = (config2, settings, history, addItem, clearItems,
258993
259539
  clearItems,
258994
259540
  refreshStatic,
258995
259541
  session.stats,
259542
+ session.resetSession,
258996
259543
  onDebugMessage
258997
259544
  ]
258998
259545
  );
@@ -259285,7 +259832,7 @@ ${docsUrl}`,
259285
259832
  return;
259286
259833
  }
259287
259834
  const geminiTools = tools.filter((tool) => !("serverName" in tool));
259288
- let message = "Available Gemini CLI tools:\n\n";
259835
+ let message = "Available Qwen Code tools:\n\n";
259289
259836
  if (geminiTools.length > 0) {
259290
259837
  geminiTools.forEach((tool) => {
259291
259838
  if (useShowDescriptions && tool.description) {
@@ -259350,7 +259897,7 @@ ${docsUrl}`,
259350
259897
  * **Model Version:** ${modelVersion}
259351
259898
  * **Memory Usage:** ${memoryUsage}
259352
259899
  `;
259353
- let bugReportUrl = "https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml&title={title}&info={info}";
259900
+ let bugReportUrl = "https://github.com/QwenLM/Qwen-Code/issues/new?template=bug_report.yml&title={title}&info={info}";
259354
259901
  const bugCommand = config2?.getBugCommand();
259355
259902
  if (bugCommand?.urlTemplate) {
259356
259903
  bugReportUrl = bugCommand.urlTemplate;
@@ -282603,6 +283150,8 @@ async function loadCliConfig(settings, extensions, sessionId2, argv) {
282603
283150
  model: argv.model,
282604
283151
  extensionContextFilePaths,
282605
283152
  maxSessionTurns: settings.maxSessionTurns ?? -1,
283153
+ sessionTokenLimit: settings.sessionTokenLimit ?? 32e3,
283154
+ maxFolderItems: settings.maxFolderItems ?? 20,
282606
283155
  listExtensions: argv.listExtensions || false,
282607
283156
  activeExtensions: activeExtensions.map((e3) => ({
282608
283157
  name: e3.config.name,
@@ -283765,7 +284314,7 @@ var AboutBox = ({
283765
284314
  marginY: 1,
283766
284315
  width: "100%",
283767
284316
  children: [
283768
- /* @__PURE__ */ (0, import_jsx_runtime38.jsx)(Box_default, { marginBottom: 1, children: /* @__PURE__ */ (0, import_jsx_runtime38.jsx)(Text, { bold: true, color: Colors.AccentPurple, children: "About Gemini CLI" }) }),
284317
+ /* @__PURE__ */ (0, import_jsx_runtime38.jsx)(Box_default, { marginBottom: 1, children: /* @__PURE__ */ (0, import_jsx_runtime38.jsx)(Text, { bold: true, color: Colors.AccentPurple, children: "About Qwen Code" }) }),
283769
284318
  /* @__PURE__ */ (0, import_jsx_runtime38.jsxs)(Box_default, { flexDirection: "row", children: [
283770
284319
  /* @__PURE__ */ (0, import_jsx_runtime38.jsx)(Box_default, { width: "35%", children: /* @__PURE__ */ (0, import_jsx_runtime38.jsx)(Text, { bold: true, color: Colors.LightBlue, children: "CLI Version" }) }),
283771
284320
  /* @__PURE__ */ (0, import_jsx_runtime38.jsx)(Box_default, { children: /* @__PURE__ */ (0, import_jsx_runtime38.jsx)(Text, { children: cliVersion }) })
@@ -289596,9 +290145,9 @@ function getContainerPath(hostPath) {
289596
290145
  }
289597
290146
  return hostPath;
289598
290147
  }
289599
- var LOCAL_DEV_SANDBOX_IMAGE_NAME = "gemini-cli-sandbox";
289600
- var SANDBOX_NETWORK_NAME = "gemini-cli-sandbox";
289601
- var SANDBOX_PROXY_NAME = "gemini-cli-sandbox-proxy";
290148
+ var LOCAL_DEV_SANDBOX_IMAGE_NAME = "qwen-code-sandbox";
290149
+ var SANDBOX_NETWORK_NAME = "qwen-code-sandbox";
290150
+ var SANDBOX_PROXY_NAME = "qwen-code-sandbox-proxy";
289602
290151
  var BUILTIN_SEATBELT_PROFILES = [
289603
290152
  "permissive-open",
289604
290153
  "permissive-closed",
@@ -289685,7 +290234,7 @@ function entrypoint(workdir) {
289685
290234
  )
289686
290235
  );
289687
290236
  const cliArgs = process.argv.slice(2).map((arg) => (0, import_shell_quote3.quote)([arg]));
289688
- const cliCmd = process.env.NODE_ENV === "development" ? process.env.DEBUG ? "npm run debug --" : "npm rebuild && npm run start --" : process.env.DEBUG ? `node --inspect-brk=0.0.0.0:${process.env.DEBUG_PORT || "9229"} $(which gemini)` : "gemini";
290237
+ const cliCmd = process.env.NODE_ENV === "development" ? process.env.DEBUG ? "npm run debug --" : "npm rebuild && npm run start --" : process.env.DEBUG ? `node --inspect-brk=0.0.0.0:${process.env.DEBUG_PORT || "9229"} $(which qwen)` : "qwen";
289689
290238
  const args = [...shellCmds, cliCmd, ...cliArgs];
289690
290239
  return ["bash", "-c", args.join(" ")];
289691
290240
  }
@@ -289942,6 +290491,15 @@ async function start_sandbox(config2, nodeArgs = []) {
289942
290491
  if (process.env.GOOGLE_API_KEY) {
289943
290492
  args.push("--env", `GOOGLE_API_KEY=${process.env.GOOGLE_API_KEY}`);
289944
290493
  }
290494
+ if (process.env.OPENAI_API_KEY) {
290495
+ args.push("--env", `OPENAI_API_KEY=${process.env.OPENAI_API_KEY}`);
290496
+ }
290497
+ if (process.env.OPENAI_BASE_URL) {
290498
+ args.push("--env", `OPENAI_BASE_URL=${process.env.OPENAI_BASE_URL}`);
290499
+ }
290500
+ if (process.env.OPENAI_MODEL) {
290501
+ args.push("--env", `OPENAI_MODEL=${process.env.OPENAI_MODEL}`);
290502
+ }
289945
290503
  if (process.env.GOOGLE_GENAI_USE_VERTEXAI) {
289946
290504
  args.push(
289947
290505
  "--env",
@@ -290701,13 +291259,23 @@ async function loadNonInteractiveConfig(config2, extensions, settings, argv) {
290701
291259
  );
290702
291260
  }
290703
291261
  async function validateNonInterActiveAuth(selectedAuthType, nonInteractiveConfig) {
290704
- if (!selectedAuthType && !process.env.GEMINI_API_KEY) {
291262
+ if (!selectedAuthType && !process.env.GEMINI_API_KEY && !process.env.OPENAI_API_KEY) {
290705
291263
  console.error(
290706
- `Please set an Auth method in your ${USER_SETTINGS_PATH} OR specify GEMINI_API_KEY env variable file before running`
291264
+ `Please set an Auth method in your ${USER_SETTINGS_PATH} OR specify GEMINI_API_KEY or OPENAI_API_KEY env variable before running`
290707
291265
  );
290708
291266
  process.exit(1);
290709
291267
  }
290710
- selectedAuthType = selectedAuthType || AuthType2.USE_GEMINI;
291268
+ if (!selectedAuthType) {
291269
+ if (process.env.OPENAI_API_KEY) {
291270
+ selectedAuthType = AuthType2.USE_OPENAI;
291271
+ } else if (process.env.GEMINI_API_KEY) {
291272
+ selectedAuthType = AuthType2.USE_GEMINI;
291273
+ }
291274
+ }
291275
+ if (!selectedAuthType) {
291276
+ console.error("No valid authentication method found");
291277
+ process.exit(1);
291278
+ }
290711
291279
  const err = validateAuthMethod(selectedAuthType);
290712
291280
  if (err != null) {
290713
291281
  console.error(err);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@qwen-code/qwen-code",
3
- "version": "0.0.1-alpha.11",
3
+ "version": "0.0.1-alpha.12",
4
4
  "engines": {
5
5
  "node": ">=20"
6
6
  },
@@ -13,7 +13,7 @@
13
13
  "url": "git+http://gitlab.alibaba-inc.com/Qwen-Coder/qwen-code.git"
14
14
  },
15
15
  "config": {
16
- "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.0.1-alpha.11"
16
+ "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.1-alpha.12"
17
17
  },
18
18
  "scripts": {
19
19
  "start": "node scripts/start.js",
@@ -83,6 +83,6 @@
83
83
  "yargs": "^18.0.0"
84
84
  },
85
85
  "dependencies": {
86
- "@qwen-code/qwen-code": "^0.0.1-alpha.11"
86
+ "@qwen-code/qwen-code": "^0.0.1-alpha.8"
87
87
  }
88
88
  }