@jaydennleemc/qwen-code-local 0.12.5 → 0.12.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +7 -31
  2. package/dist/cli.js +149 -27
  3. package/package.json +2 -2
package/README.md CHANGED
@@ -33,45 +33,21 @@ Qwen Code is an open-source AI agent for the terminal, optimized for [Qwen3-Code
33
33
 
34
34
  ## Installation
35
35
 
36
- ### Quick Install (Recommended)
37
-
38
- #### Linux / macOS
39
-
40
- ```bash
41
- bash -c "$(curl -fsSL https://qwen-code-assets.oss-cn-hangzhou.aliyuncs.com/installation/install-qwen.sh)"
42
- ```
43
-
44
- #### Windows (Run as Administrator CMD)
45
-
46
- ```cmd
47
- curl -fsSL -o %TEMP%\install-qwen.bat https://qwen-code-assets.oss-cn-hangzhou.aliyuncs.com/installation/install-qwen.bat && %TEMP%\install-qwen.bat
48
- ```
49
-
50
- > **Note**: It's recommended to restart your terminal after installation to ensure environment variables take effect.
51
-
52
- ### Manual Installation
53
-
54
- #### Prerequisites
36
+ ### Prerequisites
55
37
 
56
38
  Make sure you have Node.js 20 or later installed. Download it from [nodejs.org](https://nodejs.org/en/download).
57
39
 
58
40
  #### NPM
59
41
 
60
42
  ```bash
61
- npm install -g @qwen-code/qwen-code@latest
62
- ```
63
-
64
- #### Homebrew (macOS, Linux)
65
-
66
- ```bash
67
- brew install qwen-code
43
+ npm i -g @jaydennleemc/qwen-code-local
68
44
  ```
69
45
 
70
46
  ## Quick Start
71
47
 
72
48
  ```bash
73
49
  # Start Qwen Code (interactive)
74
- qwen
50
+ qwen-local
75
51
 
76
52
  # Then, in the session:
77
53
  /help
@@ -175,7 +151,7 @@ Here is a complete example:
175
151
  **Step 3:** Start Qwen Code — your configuration takes effect automatically:
176
152
 
177
153
  ```bash
178
- qwen
154
+ qwen-local
179
155
  ```
180
156
 
181
157
  Use the `/model` command at any time to switch between all configured models.
@@ -364,16 +340,16 @@ As an open-source terminal agent, you can use Qwen Code in four primary ways:
364
340
 
365
341
  ```bash
366
342
  cd your-project/
367
- qwen
343
+ qwen-local
368
344
  ```
369
345
 
370
- Run `qwen` in your project folder to launch the interactive terminal UI. Use `@` to reference local files (for example `@src/main.ts`).
346
+ Run `qwen-local` in your project folder to launch the interactive terminal UI. Use `@` to reference local files (for example `@src/main.ts`).
371
347
 
372
348
  #### Headless mode
373
349
 
374
350
  ```bash
375
351
  cd your-project/
376
- qwen -p "your question"
352
+ qwen-local -p "your question"
377
353
  ```
378
354
 
379
355
  Use `-p` to run Qwen Code without the interactive UI—ideal for scripts, automation, and CI/CD. Learn more: [Headless mode](https://qwenlm.github.io/qwen-code-docs/en/users/features/headless).
package/dist/cli.js CHANGED
@@ -142684,11 +142684,12 @@ var init_default = __esm({
142684
142684
  return customHeaders ? { ...defaultHeaders, ...customHeaders } : defaultHeaders;
142685
142685
  }
142686
142686
  buildClient() {
142687
- const { apiKey, baseUrl, timeout: timeout2 = DEFAULT_TIMEOUT, maxRetries = DEFAULT_MAX_RETRIES } = this.contentGeneratorConfig;
142687
+ const { apiKey, baseUrl, timeout: timeout2 = DEFAULT_TIMEOUT, maxRetries = DEFAULT_MAX_RETRIES, authType } = this.contentGeneratorConfig;
142688
142688
  const defaultHeaders = this.buildHeaders();
142689
142689
  const runtimeOptions = buildRuntimeFetchOptions("openai", this.cliConfig.getProxy());
142690
+ const isLocalProvider = authType === "ollama" || authType === "lm-studio";
142690
142691
  return new OpenAI({
142691
- apiKey,
142692
+ apiKey: isLocalProvider ? "" : apiKey,
142692
142693
  baseURL: baseUrl,
142693
142694
  timeout: timeout2,
142694
142695
  maxRetries,
@@ -157729,7 +157730,7 @@ __export(geminiContentGenerator_exports, {
157729
157730
  createGeminiContentGenerator: () => createGeminiContentGenerator
157730
157731
  });
157731
157732
  function createGeminiContentGenerator(config2, gcConfig) {
157732
- const version2 = "0.12.5";
157733
+ const version2 = "0.12.7";
157733
157734
  const userAgent2 = config2.userAgent || `QwenCode/${version2} (${process.platform}; ${process.arch})`;
157734
157735
  const baseHeaders = {
157735
157736
  "User-Agent": userAgent2
@@ -157807,7 +157808,8 @@ function validateModelConfig(config2, isStrictModelProvider = false) {
157807
157808
  if (config2.authType === AuthType2.QWEN_OAUTH) {
157808
157809
  return { valid: true, errors: [] };
157809
157810
  }
157810
- if (!config2.apiKey) {
157811
+ const isLocalModelProvider = config2.authType === AuthType2.USE_OLLAMA || config2.authType === AuthType2.USE_LM_STUDIO;
157812
+ if (!config2.apiKey && !isLocalModelProvider) {
157811
157813
  if (isStrictModelProvider) {
157812
157814
  errors.push(new StrictMissingCredentialsError(config2.authType, config2.model, config2.apiKeyEnvKey));
157813
157815
  } else {
@@ -289775,7 +289777,7 @@ var require_backend = __commonJS({
289775
289777
  });
289776
289778
  return a2._currentValue;
289777
289779
  }, "useContext"),
289778
- useEffect: /* @__PURE__ */ __name(function useEffect60(a2) {
289780
+ useEffect: /* @__PURE__ */ __name(function useEffect61(a2) {
289779
289781
  C2();
289780
289782
  x3.push({
289781
289783
  primitive: "Effect",
@@ -382939,6 +382941,7 @@ __name(saveSettings, "saveSettings");
382939
382941
  var DEFAULT_ENV_KEYS = {
382940
382942
  [AuthType2.USE_OPENAI]: "OPENAI_API_KEY",
382941
382943
  [AuthType2.USE_LM_STUDIO]: "LMSTUDIO_API_KEY",
382944
+ [AuthType2.USE_OLLAMA]: "OLLAMA_API_KEY",
382942
382945
  [AuthType2.USE_ANTHROPIC]: "ANTHROPIC_API_KEY",
382943
382946
  [AuthType2.USE_GEMINI]: "GEMINI_API_KEY",
382944
382947
  [AuthType2.USE_VERTEX_AI]: "GOOGLE_API_KEY"
@@ -383063,6 +383066,9 @@ function validateAuthMethod(authMethod, config2) {
383063
383066
  }
383064
383067
  return null;
383065
383068
  }
383069
+ if (authMethod === AuthType2.USE_OLLAMA) {
383070
+ return null;
383071
+ }
383066
383072
  if (authMethod === AuthType2.QWEN_OAUTH) {
383067
383073
  return null;
383068
383074
  }
@@ -389669,7 +389675,7 @@ __name(getPackageJson, "getPackageJson");
389669
389675
  // packages/cli/src/utils/version.ts
389670
389676
  async function getCliVersion() {
389671
389677
  const pkgJson = await getPackageJson();
389672
- return "0.12.5";
389678
+ return "0.12.7";
389673
389679
  }
389674
389680
  __name(getCliVersion, "getCliVersion");
389675
389681
 
@@ -397233,7 +397239,7 @@ var formatDuration = /* @__PURE__ */ __name((milliseconds) => {
397233
397239
 
397234
397240
  // packages/cli/src/generated/git-commit.ts
397235
397241
  init_esbuild_shims();
397236
- var GIT_COMMIT_INFO = "927ad2e0";
397242
+ var GIT_COMMIT_INFO = "dbb1e26f0";
397237
397243
 
397238
397244
  // packages/cli/src/utils/systemInfo.ts
397239
397245
  async function getNpmVersion() {
@@ -423919,14 +423925,14 @@ function getPrefixWidth(prefix) {
423919
423925
  }
423920
423926
  __name(getPrefixWidth, "getPrefixWidth");
423921
423927
  function parseThinkingContent(text) {
423922
- const thinkPattern = /<think(ing)?>([\s\S]*?)<\/think(ing)?>/g;
423928
+ const thinkPattern = /<think(?:ing)?[^>]*>([\s\S]*?)<\/think(?:ing)?[^>]*>/gi;
423923
423929
  const matches = [...text.matchAll(thinkPattern)];
423924
423930
  if (matches.length === 0) {
423925
423931
  return { thinkingContent: "", mainContent: text, hasThinking: false };
423926
423932
  }
423927
423933
  const thinkingParts = [];
423928
423934
  for (const match2 of matches) {
423929
- const content = match2[2]?.trim();
423935
+ const content = match2[1]?.trim();
423930
423936
  if (content) {
423931
423937
  thinkingParts.push(content);
423932
423938
  }
@@ -423935,10 +423941,12 @@ function parseThinkingContent(text) {
423935
423941
  if (!hasThinking) {
423936
423942
  return { thinkingContent: "", mainContent: text, hasThinking: false };
423937
423943
  }
423938
- const lastMatch = matches[matches.length - 1];
423939
- const lastIndex = lastMatch.index + lastMatch[0].length;
423944
+ let mainContent = text;
423945
+ for (const match2 of matches) {
423946
+ mainContent = mainContent.replace(match2[0], "");
423947
+ }
423948
+ mainContent = mainContent.trim();
423940
423949
  const thinkingContent = thinkingParts.join("\n") + "\n";
423941
- const mainContent = text.slice(lastIndex).trim();
423942
423950
  return { thinkingContent, mainContent, hasThinking };
423943
423951
  }
423944
423952
  __name(parseThinkingContent, "parseThinkingContent");
@@ -435851,6 +435859,7 @@ function AuthDialog() {
435851
435859
  onAuthError
435852
435860
  } = useUIActions();
435853
435861
  const config2 = useConfig();
435862
+ const savedConfig = config2.getContentGeneratorConfig();
435854
435863
  const [errorMessage, setErrorMessage] = (0, import_react83.useState)(null);
435855
435864
  const [viewLevel, setViewLevel] = (0, import_react83.useState)("main");
435856
435865
  const [regionIndex, setRegionIndex] = (0, import_react83.useState)(0);
@@ -435858,17 +435867,51 @@ function AuthDialog() {
435858
435867
  "china" /* CHINA */
435859
435868
  );
435860
435869
  const [lmStudioBaseUrl, setLmStudioBaseUrl] = (0, import_react83.useState)(
435861
- "http://localhost:1234/v1"
435870
+ savedConfig?.baseUrl?.includes("1234") ? savedConfig.baseUrl : "http://localhost:1234/v1"
435871
+ );
435872
+ const [lmStudioApiKey, setLmStudioApiKey] = (0, import_react83.useState)(
435873
+ savedConfig?.apiKey || ""
435862
435874
  );
435863
- const [lmStudioApiKey, setLmStudioApiKey] = (0, import_react83.useState)("");
435864
435875
  const [lmStudioStep, setLmStudioStep] = (0, import_react83.useState)(
435865
435876
  "baseUrl"
435866
435877
  );
435867
435878
  const [ollamaBaseUrl, setOllamaBaseUrl] = (0, import_react83.useState)(
435868
- "http://localhost:11434/v1"
435879
+ savedConfig?.baseUrl?.includes("11434") ? savedConfig.baseUrl : "http://localhost:11434/v1"
435880
+ );
435881
+ const [ollamaApiKey, setOllamaApiKey] = (0, import_react83.useState)(
435882
+ savedConfig?.apiKey || ""
435869
435883
  );
435870
- const [ollamaApiKey, setOllamaApiKey] = (0, import_react83.useState)("");
435871
- const [ollamaStep, setOllamaStep] = (0, import_react83.useState)("baseUrl");
435884
+ const [ollamaStep, setOllamaStep] = (0, import_react83.useState)(
435885
+ "baseUrl"
435886
+ );
435887
+ const [ollamaModels, setOllamaModels] = (0, import_react83.useState)([]);
435888
+ const [loadingOllamaModels, setLoadingOllamaModels] = (0, import_react83.useState)(false);
435889
+ const [selectedOllamaModel, setSelectedOllamaModel] = (0, import_react83.useState)(
435890
+ savedConfig?.model || ""
435891
+ );
435892
+ (0, import_react83.useEffect)(() => {
435893
+ const currentAuthType = config2.getAuthType();
435894
+ const contentGenConfig2 = config2.getContentGeneratorConfig();
435895
+ if (currentAuthType === AuthType2.USE_LM_STUDIO && contentGenConfig2) {
435896
+ if (contentGenConfig2.baseUrl) {
435897
+ setLmStudioBaseUrl(contentGenConfig2.baseUrl);
435898
+ }
435899
+ if (contentGenConfig2.apiKey) {
435900
+ setLmStudioApiKey(contentGenConfig2.apiKey);
435901
+ }
435902
+ }
435903
+ if (currentAuthType === AuthType2.USE_OLLAMA && contentGenConfig2) {
435904
+ if (contentGenConfig2.baseUrl) {
435905
+ setOllamaBaseUrl(contentGenConfig2.baseUrl);
435906
+ }
435907
+ if (contentGenConfig2.apiKey) {
435908
+ setOllamaApiKey(contentGenConfig2.apiKey);
435909
+ }
435910
+ if (contentGenConfig2.model) {
435911
+ setSelectedOllamaModel(contentGenConfig2.model);
435912
+ }
435913
+ }
435914
+ }, [config2]);
435872
435915
  const mainItems = [
435873
435916
  {
435874
435917
  key: AuthType2.QWEN_OAUTH,
@@ -436035,19 +436078,46 @@ function AuthDialog() {
436035
436078
  }, "handleLmStudioSubmit");
436036
436079
  const handleOllamaSubmit = /* @__PURE__ */ __name(async () => {
436037
436080
  setErrorMessage(null);
436038
- if (!ollamaApiKey.trim()) {
436039
- setErrorMessage(t4("API key cannot be empty."));
436040
- return;
436081
+ setLoadingOllamaModels(true);
436082
+ try {
436083
+ const url2 = `${ollamaBaseUrl.replace("/v1", "")}/api/tags`;
436084
+ const response = await fetch(url2, {
436085
+ method: "GET",
436086
+ headers: ollamaApiKey ? { Authorization: `Bearer ${ollamaApiKey}` } : {}
436087
+ });
436088
+ if (!response.ok) {
436089
+ throw new Error(`Failed to fetch models: ${response.status}`);
436090
+ }
436091
+ const data = await response.json();
436092
+ const models = (data.models || []).map((m3) => m3.name);
436093
+ if (models.length === 0) {
436094
+ setErrorMessage(t4("No models found on Ollama server"));
436095
+ return;
436096
+ }
436097
+ setOllamaModels(models);
436098
+ setSelectedOllamaModel(models[0]);
436099
+ setOllamaStep("models");
436100
+ } catch (err) {
436101
+ setErrorMessage(
436102
+ t4("Failed to connect to Ollama: {{error}}", {
436103
+ error: err instanceof Error ? err.message : "Unknown error"
436104
+ })
436105
+ );
436106
+ } finally {
436107
+ setLoadingOllamaModels(false);
436041
436108
  }
436109
+ }, "handleOllamaSubmit");
436110
+ const handleOllamaModelSelect = /* @__PURE__ */ __name(async (model) => {
436042
436111
  await onAuthSelect(AuthType2.USE_OLLAMA, {
436043
436112
  apiKey: ollamaApiKey,
436044
- baseUrl: ollamaBaseUrl
436113
+ baseUrl: ollamaBaseUrl,
436114
+ model
436045
436115
  });
436046
- }, "handleOllamaSubmit");
436116
+ }, "handleOllamaModelSelect");
436047
436117
  const handleGoBack = /* @__PURE__ */ __name(() => {
436048
436118
  setErrorMessage(null);
436049
436119
  onAuthError(null);
436050
- if (viewLevel === "region-select" || viewLevel === "custom-info" || viewLevel === "lm-studio-input" || viewLevel === "ollama-input") {
436120
+ if (viewLevel === "region-select" || viewLevel === "custom-info" || viewLevel === "lm-studio-input" || viewLevel === "ollama-input" || viewLevel === "ollama-models") {
436051
436121
  setViewLevel("main");
436052
436122
  setLmStudioStep("baseUrl");
436053
436123
  setOllamaStep("baseUrl");
@@ -436075,7 +436145,9 @@ function AuthDialog() {
436075
436145
  return;
436076
436146
  }
436077
436147
  if (viewLevel === "ollama-input") {
436078
- if (ollamaStep === "apiKey") {
436148
+ if (ollamaStep === "models") {
436149
+ setOllamaStep("apiKey");
436150
+ } else if (ollamaStep === "apiKey") {
436079
436151
  setOllamaStep("baseUrl");
436080
436152
  } else {
436081
436153
  handleGoBack();
@@ -436209,7 +436281,35 @@ function AuthDialog() {
436209
436281
  }
436210
436282
  ) }),
436211
436283
  errorMessage && /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Box_default, { marginTop: 1, children: /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Text3, { color: theme.status.error, children: errorMessage }) }),
436212
- /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Box_default, { marginTop: 1, children: /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Text3, { color: theme.text.secondary, children: t4("\u2191\u2193 to navigate, Enter to submit, Esc to go back") }) })
436284
+ /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Box_default, { marginTop: 1, children: /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Text3, { color: theme.text.secondary, children: t4("\u2191\u2193 to navigate, Enter to fetch models, Esc to go back") }) })
436285
+ ] }),
436286
+ ollamaStep === "models" && /* @__PURE__ */ (0, import_jsx_runtime76.jsxs)(import_jsx_runtime76.Fragment, { children: [
436287
+ /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Box_default, { marginTop: 1, children: /* @__PURE__ */ (0, import_jsx_runtime76.jsxs)(Text3, { color: theme.text.secondary, children: [
436288
+ t4("Server"),
436289
+ ":",
436290
+ " ",
436291
+ /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Text3, { color: theme.text.primary, children: ollamaBaseUrl })
436292
+ ] }) }),
436293
+ /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Box_default, { marginTop: 1, children: /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Text3, { color: theme.text.secondary, children: t4("Select Model:") }) }),
436294
+ /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Box_default, { marginTop: 0, children: loadingOllamaModels ? /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Text3, { color: theme.text.secondary, children: t4("Loading models...") }) : /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(
436295
+ DescriptiveRadioButtonSelect,
436296
+ {
436297
+ items: ollamaModels.map((m3) => ({
436298
+ key: m3,
436299
+ title: m3,
436300
+ description: "",
436301
+ value: m3
436302
+ })),
436303
+ initialIndex: ollamaModels.indexOf(selectedOllamaModel),
436304
+ onSelect: (val) => {
436305
+ setSelectedOllamaModel(val);
436306
+ handleOllamaModelSelect(val);
436307
+ },
436308
+ maxItemsToShow: 5
436309
+ }
436310
+ ) }),
436311
+ errorMessage && /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Box_default, { marginTop: 1, children: /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Text3, { color: theme.status.error, children: errorMessage }) }),
436312
+ /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Box_default, { marginTop: 1, children: /* @__PURE__ */ (0, import_jsx_runtime76.jsx)(Text3, { color: theme.text.secondary, children: t4("\u2191\u2193 to navigate, Enter to select, Esc to go back") }) })
436213
436313
  ] })
436214
436314
  ] }), "renderOllamaInputView");
436215
436315
  const getViewTitle = /* @__PURE__ */ __name(() => {
@@ -443283,7 +443383,7 @@ var useAuthCommand = /* @__PURE__ */ __name((settings, config2, addItem, onAuthC
443283
443383
  setAuthError(null);
443284
443384
  setIsAuthDialogOpen(false);
443285
443385
  setIsAuthenticating(true);
443286
- if (authType === AuthType2.USE_OPENAI || authType === AuthType2.USE_LM_STUDIO) {
443386
+ if (authType === AuthType2.USE_OPENAI || authType === AuthType2.USE_LM_STUDIO || authType === AuthType2.USE_OLLAMA) {
443287
443387
  if (credentials) {
443288
443388
  const settingsGenerationConfig = settings.merged.model?.generationConfig;
443289
443389
  config2.updateCredentials(
@@ -443294,6 +443394,28 @@ var useAuthCommand = /* @__PURE__ */ __name((settings, config2, addItem, onAuthC
443294
443394
  },
443295
443395
  settingsGenerationConfig
443296
443396
  );
443397
+ if (authType === AuthType2.USE_OLLAMA && credentials.model) {
443398
+ const authTypeScope = getPersistScopeForModelSelection(settings);
443399
+ settings.setValue(authTypeScope, "security.auth", {
443400
+ selectedType: authType
443401
+ });
443402
+ settings.setValue(authTypeScope, "model.name", credentials.model);
443403
+ updateSettingsFilePreservingFormat(
443404
+ settings.user.path,
443405
+ settings.user.originalSettings
443406
+ );
443407
+ }
443408
+ if (authType === AuthType2.USE_LM_STUDIO && credentials.model) {
443409
+ const authTypeScope = getPersistScopeForModelSelection(settings);
443410
+ settings.setValue(authTypeScope, "security.auth", {
443411
+ selectedType: authType
443412
+ });
443413
+ settings.setValue(authTypeScope, "model.name", credentials.model);
443414
+ updateSettingsFilePreservingFormat(
443415
+ settings.user.path,
443416
+ settings.user.originalSettings
443417
+ );
443418
+ }
443297
443419
  await performAuth(authType, credentials);
443298
443420
  }
443299
443421
  return;
@@ -456129,7 +456251,7 @@ var QwenAgent = class {
456129
456251
  async initialize(args) {
456130
456252
  this.clientCapabilities = args.clientCapabilities;
456131
456253
  const authMethods = buildAuthMethods();
456132
- const version2 = "0.12.5";
456254
+ const version2 = "0.12.7";
456133
456255
  return {
456134
456256
  protocolVersion: PROTOCOL_VERSION,
456135
456257
  agentInfo: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@jaydennleemc/qwen-code-local",
3
- "version": "0.12.5",
3
+ "version": "0.12.7",
4
4
  "engines": {
5
5
  "node": ">=20.0.0"
6
6
  },
@@ -13,7 +13,7 @@
13
13
  "url": "git+https://github.com/jaydennleemc/qwen-code.git"
14
14
  },
15
15
  "config": {
16
- "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.12.5"
16
+ "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.12.7"
17
17
  },
18
18
  "scripts": {
19
19
  "start": "cross-env node scripts/start.js",