@qwen-code/qwen-code 0.0.1-alpha.10 → 0.0.1-alpha.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -5,7 +5,7 @@
5
5
  Qwen Code is a command-line AI workflow tool adapted from [**Gemini CLI**](https://github.com/google-gemini/gemini-cli) (Please refer to [this document](./README.gemini.md) for more details), optimized for [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder) models with enhanced parser support & tool support.
6
6
 
7
7
  > [!WARNING]
8
- > Qwen Code may issue multiple API calls per cycle, resulting in higher token usage, similar to Claude Code. We’re actively working to enhance API efficiency and improve the overall developer experience.
8
+ > Qwen Code may issue multiple API calls per cycle, resulting in higher token usage, similar to Claude Code. We’re actively working to enhance API efficiency and improve the overall developer experience. ModelScope offers 2,000 free API calls if you are in China mainland. Please check [API config section](#api-configuration) for more details.
9
9
 
10
10
  ## Key Features
11
11
 
@@ -50,17 +50,31 @@ npm install -g .
50
50
  Set your Qwen API key (In Qwen Code project, you can also set your API key in `.env` file). the `.env` file should be placed in the root directory of your current project.
51
51
 
52
52
  > ⚠️ **Notice:** <br>
53
- > **If you are in mainland China, please go to https://bailian.console.aliyun.com/ to apply for your API key** <br>
53
+ > **If you are in mainland China, please go to https://bailian.console.aliyun.com/ or https://modelscope.cn/docs/model-service/API-Inference/intro to apply for your API key** <br>
54
54
  > **If you are not in mainland China, please go to https://modelstudio.console.alibabacloud.com/ to apply for your API key**
55
55
 
56
+ If you are in mainland China, you can use Qwen3-Coder through the Alibaba Cloud bailian platform.
57
+
56
58
  ```bash
57
- # If you are in mainland China, use the following URL:
58
- # https://dashscope.aliyuncs.com/compatible-mode/v1
59
- # If you are not in mainland China, use the following URL:
60
- # https://dashscope-intl.aliyuncs.com/compatible-mode/v1
61
59
  export OPENAI_API_KEY="your_api_key_here"
62
- export OPENAI_BASE_URL="your_api_base_url_here"
63
- export OPENAI_MODEL="your_api_model_here"
60
+ export OPENAI_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
61
+ export OPENAI_MODEL="qwen3-coder-plus"
62
+ ```
63
+
64
+ If you are in mainland China, ModelScope offers 2,000 free model inference API calls per day:
65
+
66
+ ```bash
67
+ export OPENAI_API_KEY="your_api_key_here"
68
+ export OPENAI_BASE_URL="https://api-inference.modelscope.cn/v1"
69
+ export OPENAI_MODEL="Qwen/Qwen3-Coder-480B-A35B-Instruct"
70
+ ```
71
+
72
+ If you are not in mainland China, you can use Qwen3-Coder through the Alibaba Cloud modelstuido platform.
73
+
74
+ ```bash
75
+ export OPENAI_API_KEY="your_api_key_here"
76
+ export OPENAI_BASE_URL="https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
77
+ export OPENAI_MODEL="qwen3-coder-plus"
64
78
  ```
65
79
 
66
80
  ## Usage Examples
@@ -148,3 +162,7 @@ This project is based on [Google Gemini CLI](https://github.com/google-gemini/ge
148
162
  ## License
149
163
 
150
164
  [LICENSE](./LICENSE)
165
+
166
+ ## Star History
167
+
168
+ [![Star History Chart](https://api.star-history.com/svg?repos=QwenLM/qwen-code&type=Date)](https://www.star-history.com/#QwenLM/qwen-code&Date)
package/bundle/gemini.js CHANGED
@@ -102278,20 +102278,20 @@ var require_url = __commonJS({
102278
102278
  "use strict";
102279
102279
  Object.defineProperty(exports2, "__esModule", { value: true });
102280
102280
  exports2.isUrlIgnored = exports2.urlMatches = void 0;
102281
- function urlMatches(url2, urlToMatch) {
102281
+ function urlMatches2(url2, urlToMatch) {
102282
102282
  if (typeof urlToMatch === "string") {
102283
102283
  return url2 === urlToMatch;
102284
102284
  } else {
102285
102285
  return !!url2.match(urlToMatch);
102286
102286
  }
102287
102287
  }
102288
- exports2.urlMatches = urlMatches;
102288
+ exports2.urlMatches = urlMatches2;
102289
102289
  function isUrlIgnored(url2, ignoredUrls) {
102290
102290
  if (!ignoredUrls) {
102291
102291
  return false;
102292
102292
  }
102293
102293
  for (const ignoreUrl of ignoredUrls) {
102294
- if (urlMatches(url2, ignoreUrl)) {
102294
+ if (urlMatches2(url2, ignoreUrl)) {
102295
102295
  return true;
102296
102296
  }
102297
102297
  }
@@ -154791,7 +154791,7 @@ async function createContentGeneratorConfig(model, authType) {
154791
154791
  return contentGeneratorConfig;
154792
154792
  }
154793
154793
  async function createContentGenerator(config2, gcConfig, sessionId2) {
154794
- const version = "0.0.1-alpha.10";
154794
+ const version = "0.0.1-alpha.11";
154795
154795
  const httpOptions = {
154796
154796
  headers: {
154797
154797
  "User-Agent": `GeminiCLI/${version} (${process.platform}; ${process.arch})`
@@ -245362,7 +245362,14 @@ var Turn = class {
245362
245362
  import path22 from "node:path";
245363
245363
  import fs23 from "node:fs";
245364
245364
  import process20 from "node:process";
245365
- function getCoreSystemPrompt(userMemory) {
245365
+ function normalizeUrl(url2) {
245366
+ return url2.endsWith("/") ? url2.slice(0, -1) : url2;
245367
+ }
245368
+ function urlMatches(urlArray, targetUrl) {
245369
+ const normalizedTarget = normalizeUrl(targetUrl);
245370
+ return urlArray.some((url2) => normalizeUrl(url2) === normalizedTarget);
245371
+ }
245372
+ function getCoreSystemPrompt(userMemory, config2) {
245366
245373
  let systemMdEnabled = false;
245367
245374
  let systemMdPath = path22.resolve(path22.join(GEMINI_CONFIG_DIR, "system.md"));
245368
245375
  const systemMdVar = process20.env.GEMINI_SYSTEM_MD?.toLowerCase();
@@ -245375,6 +245382,30 @@ function getCoreSystemPrompt(userMemory) {
245375
245382
  throw new Error(`missing system prompt file '${systemMdPath}'`);
245376
245383
  }
245377
245384
  }
245385
+ if (config2?.systemPromptMappings) {
245386
+ const currentModel = process20.env.OPENAI_MODEL || "";
245387
+ const currentBaseUrl = process20.env.OPENAI_BASE_URL || "";
245388
+ const matchedMapping = config2.systemPromptMappings.find((mapping) => {
245389
+ const { baseUrls, modelNames } = mapping;
245390
+ if (baseUrls && modelNames && urlMatches(baseUrls, currentBaseUrl) && modelNames.includes(currentModel)) {
245391
+ return true;
245392
+ }
245393
+ if (baseUrls && urlMatches(baseUrls, currentBaseUrl) && !modelNames) {
245394
+ return true;
245395
+ }
245396
+ if (modelNames && modelNames.includes(currentModel) && !baseUrls) {
245397
+ return true;
245398
+ }
245399
+ return false;
245400
+ });
245401
+ if (matchedMapping?.template) {
245402
+ const isGitRepo = isGitRepository(process20.cwd());
245403
+ let template = matchedMapping.template;
245404
+ template = template.replace("{RUNTIME_VARS_IS_GIT_REPO}", String(isGitRepo));
245405
+ template = template.replace("{RUNTIME_VARS_SANDBOX}", process20.env.SANDBOX || "");
245406
+ return template;
245407
+ }
245408
+ }
245378
245409
  const basePrompt = systemMdEnabled ? fs23.readFileSync(systemMdPath, "utf8") : `
245379
245410
  You are an interactive CLI agent specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
245380
245411
 
@@ -246676,7 +246707,10 @@ ${result.llmContent}`
246676
246707
  ];
246677
246708
  try {
246678
246709
  const userMemory = this.config.getUserMemory();
246679
- const systemInstruction = getCoreSystemPrompt(userMemory);
246710
+ const systemPromptMappings = this.config.getSystemPromptMappings();
246711
+ const systemInstruction = getCoreSystemPrompt(userMemory, {
246712
+ systemPromptMappings
246713
+ });
246680
246714
  const generateContentConfigWithThinking = isThinkingSupported(this.config.getModel()) ? {
246681
246715
  ...this.generateContentConfig,
246682
246716
  thinkingConfig: {
@@ -246738,7 +246772,10 @@ ${result.llmContent}`
246738
246772
  const modelToUse = model || this.config.getModel() || DEFAULT_GEMINI_FLASH_MODEL;
246739
246773
  try {
246740
246774
  const userMemory = this.config.getUserMemory();
246741
- const systemInstruction = getCoreSystemPrompt(userMemory);
246775
+ const systemPromptMappings = this.config.getSystemPromptMappings();
246776
+ const systemInstruction = getCoreSystemPrompt(userMemory, {
246777
+ systemPromptMappings
246778
+ });
246742
246779
  const requestConfig = {
246743
246780
  abortSignal,
246744
246781
  ...this.generateContentConfig,
@@ -246810,7 +246847,10 @@ ${result.llmContent}`
246810
246847
  };
246811
246848
  try {
246812
246849
  const userMemory = this.config.getUserMemory();
246813
- const systemInstruction = getCoreSystemPrompt(userMemory);
246850
+ const systemPromptMappings = this.config.getSystemPromptMappings();
246851
+ const systemInstruction = getCoreSystemPrompt(userMemory, {
246852
+ systemPromptMappings
246853
+ });
246814
246854
  const requestConfig = {
246815
246855
  abortSignal,
246816
246856
  ...configToUse,
@@ -252110,6 +252150,7 @@ var Config = class {
252110
252150
  ideMode;
252111
252151
  enableOpenAILogging;
252112
252152
  sampling_params;
252153
+ systemPromptMappings;
252113
252154
  modelSwitchedDuringSession = false;
252114
252155
  maxSessionTurns;
252115
252156
  listExtensions;
@@ -252160,6 +252201,7 @@ var Config = class {
252160
252201
  this.ideMode = params.ideMode ?? false;
252161
252202
  this.enableOpenAILogging = params.enableOpenAILogging ?? false;
252162
252203
  this.sampling_params = params.sampling_params;
252204
+ this.systemPromptMappings = params.systemPromptMappings;
252163
252205
  if (params.contextFileName) {
252164
252206
  setGeminiMdFilename(params.contextFileName);
252165
252207
  }
@@ -252371,6 +252413,9 @@ var Config = class {
252371
252413
  getEnableOpenAILogging() {
252372
252414
  return this.enableOpenAILogging;
252373
252415
  }
252416
+ getSystemPromptMappings() {
252417
+ return this.systemPromptMappings;
252418
+ }
252374
252419
  async refreshMemory() {
252375
252420
  const { memoryContent, fileCount } = await loadServerHierarchicalMemory(this.getWorkingDir(), this.getDebugMode(), this.getFileService(), this.getExtensionContextFilePaths());
252376
252421
  this.setUserMemory(memoryContent);
@@ -258437,7 +258482,7 @@ import { promises as fs36 } from "fs";
258437
258482
  import path41 from "path";
258438
258483
 
258439
258484
  // packages/cli/src/generated/git-commit.ts
258440
- var GIT_COMMIT_INFO = "b8e2891 (local modifications)";
258485
+ var GIT_COMMIT_INFO = "bd0d347 (local modifications)";
258441
258486
 
258442
258487
  // node_modules/read-package-up/index.js
258443
258488
  import path39 from "node:path";
@@ -258650,7 +258695,7 @@ async function getPackageJson() {
258650
258695
  // packages/cli/src/utils/version.ts
258651
258696
  async function getCliVersion() {
258652
258697
  const pkgJson = await getPackageJson();
258653
- return "0.0.1-alpha.10";
258698
+ return "0.0.1-alpha.11";
258654
258699
  }
258655
258700
 
258656
258701
  // packages/cli/src/ui/commands/memoryCommand.ts
@@ -282566,7 +282611,17 @@ async function loadCliConfig(settings, extensions, sessionId2, argv) {
282566
282611
  noBrowser: !!process29.env.NO_BROWSER,
282567
282612
  ideMode,
282568
282613
  enableOpenAILogging: (typeof argv.openaiLogging === "undefined" ? settings.enableOpenAILogging : argv.openaiLogging) ?? false,
282569
- sampling_params: settings.sampling_params
282614
+ sampling_params: settings.sampling_params,
282615
+ systemPromptMappings: settings.systemPromptMappings ?? [
282616
+ {
282617
+ baseUrls: [
282618
+ "https://dashscope.aliyuncs.com/compatible-mode/v1/",
282619
+ "https://dashscope-intl.aliyuncs.com/compatible-mode/v1/"
282620
+ ],
282621
+ modelNames: ["qwen3-coder-plus"],
282622
+ template: 'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}'
282623
+ }
282624
+ ]
282570
282625
  });
282571
282626
  }
282572
282627
  function mergeMcpServers(settings, extensions) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@qwen-code/qwen-code",
3
- "version": "0.0.1-alpha.10",
3
+ "version": "0.0.1-alpha.11",
4
4
  "engines": {
5
5
  "node": ">=20"
6
6
  },
@@ -13,7 +13,7 @@
13
13
  "url": "git+http://gitlab.alibaba-inc.com/Qwen-Coder/qwen-code.git"
14
14
  },
15
15
  "config": {
16
- "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.0.1-alpha.10"
16
+ "sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.0.1-alpha.11"
17
17
  },
18
18
  "scripts": {
19
19
  "start": "node scripts/start.js",
@@ -83,6 +83,6 @@
83
83
  "yargs": "^18.0.0"
84
84
  },
85
85
  "dependencies": {
86
- "@qwen-code/qwen-code": "^0.0.1-alpha.10"
86
+ "@qwen-code/qwen-code": "^0.0.1-alpha.11"
87
87
  }
88
88
  }