@rdmind/rdmind 0.2.8-alpha.8 → 0.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/cli.js +385 -118
  2. package/locales/zh.js +1 -4
  3. package/package.json +2 -2
package/cli.js CHANGED
@@ -19275,6 +19275,9 @@ function isSubpath(parentPath, childPath) {
19275
19275
  const relative11 = pathModule2.relative(parentPath, childPath);
19276
19276
  return !relative11.startsWith(`..${pathModule2.sep}`) && relative11 !== ".." && !pathModule2.isAbsolute(relative11);
19277
19277
  }
19278
+ function isSubpaths(parentPath, childPath) {
19279
+ return parentPath.some((p2) => isSubpath(p2, childPath));
19280
+ }
19278
19281
  function resolvePath(baseDir = process.cwd(), relativePath) {
19279
19282
  const homeDir = os.homedir();
19280
19283
  if (relativePath === "~") {
@@ -19331,6 +19334,7 @@ var init_paths = __esm({
19331
19334
  __name(getProjectHash, "getProjectHash");
19332
19335
  __name(sanitizeCwd, "sanitizeCwd");
19333
19336
  __name(isSubpath, "isSubpath");
19337
+ __name(isSubpaths, "isSubpaths");
19334
19338
  __name(resolvePath, "resolvePath");
19335
19339
  __name(validatePath, "validatePath");
19336
19340
  __name(resolveAndValidatePath, "resolveAndValidatePath");
@@ -19341,7 +19345,7 @@ var init_paths = __esm({
19341
19345
  import * as path2 from "node:path";
19342
19346
  import * as os2 from "node:os";
19343
19347
  import * as fs2 from "node:fs";
19344
- var QWEN_DIR2, GOOGLE_ACCOUNTS_FILENAME2, OAUTH_FILE, TMP_DIR_NAME, BIN_DIR_NAME, PROJECT_DIR_NAME, IDE_DIR_NAME, DEBUG_DIR_NAME, Storage;
19348
+ var QWEN_DIR2, GOOGLE_ACCOUNTS_FILENAME2, OAUTH_FILE, SKILL_PROVIDER_CONFIG_DIRS, TMP_DIR_NAME, BIN_DIR_NAME, PROJECT_DIR_NAME, IDE_DIR_NAME, DEBUG_DIR_NAME, Storage;
19345
19349
  var init_storage = __esm({
19346
19350
  "packages/core/src/config/storage.ts"() {
19347
19351
  "use strict";
@@ -19350,6 +19354,16 @@ var init_storage = __esm({
19350
19354
  QWEN_DIR2 = ".rdmind";
19351
19355
  GOOGLE_ACCOUNTS_FILENAME2 = "google_accounts.json";
19352
19356
  OAUTH_FILE = "oauth_creds.json";
19357
+ SKILL_PROVIDER_CONFIG_DIRS = [
19358
+ ".rdmind",
19359
+ ".qwen",
19360
+ ".agent",
19361
+ ".agents",
19362
+ ".claude",
19363
+ ".cursor",
19364
+ ".codex",
19365
+ ".codewiz"
19366
+ ];
19353
19367
  TMP_DIR_NAME = "tmp";
19354
19368
  BIN_DIR_NAME = "bin";
19355
19369
  PROJECT_DIR_NAME = "projects";
@@ -19447,8 +19461,11 @@ var init_storage = __esm({
19447
19461
  getExtensionsConfigPath() {
19448
19462
  return path2.join(this.getExtensionsDir(), "rdmind-extension.json");
19449
19463
  }
19450
- getUserSkillsDir() {
19451
- return path2.join(_Storage.getGlobalQwenDir(), "skills");
19464
+ getUserSkillsDirs() {
19465
+ const homeDir = os2.homedir() || os2.tmpdir();
19466
+ return SKILL_PROVIDER_CONFIG_DIRS.map(
19467
+ (dir) => path2.join(homeDir, dir, "skills")
19468
+ );
19452
19469
  }
19453
19470
  getHistoryFilePath() {
19454
19471
  return path2.join(this.getProjectTempDir(), "shell_history");
@@ -134331,7 +134348,8 @@ var init_loggingContentGenerator = __esm({
134331
134348
  combinedParts.push({
134332
134349
  text: part.text,
134333
134350
  ...part.thought ? { thought: true } : {},
134334
- ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {}
134351
+ ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {},
134352
+ ..."codexReasoningItem" in part && part.codexReasoningItem ? { codexReasoningItem: part.codexReasoningItem } : {}
134335
134353
  });
134336
134354
  }
134337
134355
  continue;
@@ -144134,6 +144152,18 @@ var init_provider = __esm({
144134
144152
  });
144135
144153
 
144136
144154
  // packages/core/src/core/openaiContentGenerator/pipeline.ts
144155
+ function buildProviderExtraBody(model) {
144156
+ const modelLower = model.toLowerCase();
144157
+ if (modelLower.includes("glm-5")) {
144158
+ return {
144159
+ thinking: {
144160
+ type: "enabled",
144161
+ clear_thinking: false
144162
+ }
144163
+ };
144164
+ }
144165
+ return void 0;
144166
+ }
144137
144167
  var StreamContentError, ContentGenerationPipeline;
144138
144168
  var init_pipeline = __esm({
144139
144169
  "packages/core/src/core/openaiContentGenerator/pipeline.ts"() {
@@ -144141,6 +144171,7 @@ var init_pipeline = __esm({
144141
144171
  init_esbuild_shims();
144142
144172
  init_node();
144143
144173
  init_converter();
144174
+ __name(buildProviderExtraBody, "buildProviderExtraBody");
144144
144175
  StreamContentError = class extends Error {
144145
144176
  static {
144146
144177
  __name(this, "StreamContentError");
@@ -144177,13 +144208,7 @@ var init_pipeline = __esm({
144177
144208
  false,
144178
144209
  effectiveModel,
144179
144210
  async (openaiRequest) => {
144180
- const extraBody = this.contentGeneratorConfig.model?.toLowerCase().includes("glm-5") ? {
144181
- thinking: {
144182
- type: "enabled",
144183
- clear_thinking: false
144184
- // Enable preserved/interleaved thinking
144185
- }
144186
- } : void 0;
144211
+ const extraBody = buildProviderExtraBody(this.contentGeneratorConfig.model);
144187
144212
  const openaiResponse = await this.client.chat.completions.create(
144188
144213
  openaiRequest,
144189
144214
  {
@@ -144206,13 +144231,7 @@ var init_pipeline = __esm({
144206
144231
  true,
144207
144232
  effectiveModel,
144208
144233
  async (openaiRequest, context2) => {
144209
- const extraBody = this.contentGeneratorConfig.model?.toLowerCase().includes("glm-5") ? {
144210
- thinking: {
144211
- type: "enabled",
144212
- clear_thinking: false
144213
- // Enable preserved/interleaved thinking
144214
- }
144215
- } : void 0;
144234
+ const extraBody = buildProviderExtraBody(this.contentGeneratorConfig.model);
144216
144235
  const stream2 = await this.client.chat.completions.create(
144217
144236
  openaiRequest,
144218
144237
  {
@@ -148823,7 +148842,7 @@ function extractReasoningEffort(model) {
148823
148842
  const match2 = model.match(/^(.+?)\((\w+)\)$/);
148824
148843
  if (!match2) return void 0;
148825
148844
  const level = match2[2].toLowerCase();
148826
- if (level === "low" || level === "medium" || level === "high") {
148845
+ if (level === "none" || level === "low" || level === "medium" || level === "high" || level === "xhigh") {
148827
148846
  return level;
148828
148847
  }
148829
148848
  return void 0;
@@ -148851,6 +148870,45 @@ function buildCodexInput(contents, systemInstruction, modalities) {
148851
148870
  }
148852
148871
  function convertContentToCodexMessages(content, modalities) {
148853
148872
  const messages = [];
148873
+ const convertCodexReasoningItem = /* @__PURE__ */ __name((rawItem, fallbackText, fallbackSignature) => {
148874
+ if (!rawItem || typeof rawItem !== "object") {
148875
+ if (!fallbackSignature) {
148876
+ return null;
148877
+ }
148878
+ return {
148879
+ type: "reasoning",
148880
+ summary: [{ type: "summary_text", text: fallbackText }],
148881
+ encrypted_content: fallbackSignature
148882
+ };
148883
+ }
148884
+ const raw2 = rawItem;
148885
+ const type = raw2["type"];
148886
+ if (type !== "reasoning") {
148887
+ return null;
148888
+ }
148889
+ const normalized2 = {
148890
+ type: "reasoning",
148891
+ id: typeof raw2["id"] === "string" ? raw2["id"] : void 0,
148892
+ status: typeof raw2["status"] === "string" || raw2["status"] === null ? raw2["status"] : void 0,
148893
+ encrypted_content: typeof raw2["encrypted_content"] === "string" || raw2["encrypted_content"] === null ? raw2["encrypted_content"] : fallbackSignature
148894
+ };
148895
+ if (Array.isArray(raw2["summary"])) {
148896
+ normalized2.summary = raw2["summary"].filter(
148897
+ (summaryItem) => typeof summaryItem === "object" && summaryItem !== null
148898
+ ).map((summaryItem) => ({
148899
+ ...summaryItem,
148900
+ type: summaryItem["type"] === "summary_text" ? "summary_text" : "summary_text",
148901
+ text: typeof summaryItem["text"] === "string" ? summaryItem["text"] : ""
148902
+ }));
148903
+ }
148904
+ if (Array.isArray(raw2["content"])) {
148905
+ normalized2.content = raw2["content"].filter((contentItem) => typeof contentItem === "object" && contentItem).map((contentItem) => ({ ...contentItem }));
148906
+ }
148907
+ if (!normalized2.summary || normalized2.summary.length === 0) {
148908
+ normalized2.summary = [{ type: "summary_text", text: fallbackText }];
148909
+ }
148910
+ return normalized2;
148911
+ }, "convertCodexReasoningItem");
148854
148912
  if (!content) return messages;
148855
148913
  if (typeof content === "string") {
148856
148914
  if (content.trim()) {
@@ -148868,7 +148926,21 @@ function convertContentToCodexMessages(content, modalities) {
148868
148926
  const partObj = part;
148869
148927
  if ("text" in partObj && typeof partObj["text"] === "string") {
148870
148928
  const text = partObj["text"];
148871
- if (!partObj["thought"]) {
148929
+ const isThought = Boolean(partObj["thought"]);
148930
+ const thoughtSignature = typeof partObj["thoughtSignature"] === "string" ? partObj["thoughtSignature"] : void 0;
148931
+ const codexReasoningItem = "codexReasoningItem" in partObj ? partObj["codexReasoningItem"] : void 0;
148932
+ if (isThought) {
148933
+ if (role === "assistant") {
148934
+ const reasoningMessage = convertCodexReasoningItem(
148935
+ codexReasoningItem,
148936
+ text,
148937
+ thoughtSignature
148938
+ );
148939
+ if (reasoningMessage) {
148940
+ messages.push(reasoningMessage);
148941
+ }
148942
+ }
148943
+ } else {
148872
148944
  messages.push({ role, content: text });
148873
148945
  }
148874
148946
  }
@@ -149111,11 +149183,79 @@ function convertGeminiSchemaToOpenAI(parameters) {
149111
149183
  }, "convertTypes");
149112
149184
  return convertTypes(converted);
149113
149185
  }
149186
+ function mapCodexStatusToFinishReason(status) {
149187
+ switch (status) {
149188
+ case "completed":
149189
+ return FinishReason.STOP;
149190
+ case "incomplete":
149191
+ return FinishReason.MAX_TOKENS;
149192
+ case "failed":
149193
+ case "cancelled":
149194
+ return FinishReason.FINISH_REASON_UNSPECIFIED;
149195
+ default:
149196
+ return FinishReason.STOP;
149197
+ }
149198
+ }
149199
+ function finalizeStreamResponse(response, streamState) {
149200
+ const finalResponse = convertCodexResponseToGemini(response);
149201
+ const shouldKeepThoughtPartsOnly = streamState.sawOutputTextDelta || streamState.sawFunctionCallChunk;
149202
+ if (!finalResponse.candidates?.length) {
149203
+ return finalResponse;
149204
+ }
149205
+ finalResponse.candidates = [
149206
+ {
149207
+ ...finalResponse.candidates[0] || {
149208
+ index: 0,
149209
+ content: { role: "model", parts: [] },
149210
+ safetyRatings: []
149211
+ },
149212
+ content: {
149213
+ role: "model",
149214
+ parts: finalResponse.candidates[0]?.content?.parts?.flatMap((part) => {
149215
+ if (shouldKeepThoughtPartsOnly && !part.thought) {
149216
+ return [];
149217
+ }
149218
+ if (streamState.sawReasoningSummaryTextDelta && part.thought) {
149219
+ return [
149220
+ {
149221
+ ...part,
149222
+ text: ""
149223
+ }
149224
+ ];
149225
+ }
149226
+ return [part];
149227
+ }) || []
149228
+ }
149229
+ }
149230
+ ];
149231
+ return finalResponse;
149232
+ }
149114
149233
  function convertCodexResponseToGemini(response) {
149115
149234
  const parts = [];
149116
149235
  if (response.output && Array.isArray(response.output)) {
149117
149236
  for (const item of response.output) {
149118
149237
  if (item.type === "reasoning") {
149238
+ const summaryText = item.summary?.map((summaryItem) => summaryItem.text).filter(Boolean).join("") || "";
149239
+ if (summaryText || item.encrypted_content) {
149240
+ const thoughtPart = {
149241
+ text: summaryText,
149242
+ thought: true
149243
+ };
149244
+ if (item.encrypted_content) {
149245
+ thoughtPart.thoughtSignature = item.encrypted_content;
149246
+ }
149247
+ thoughtPart.codexReasoningItem = {
149248
+ type: "reasoning",
149249
+ id: item.id,
149250
+ status: item.status,
149251
+ content: Array.isArray(item.content) ? item.content.map((contentItem) => ({
149252
+ ...contentItem
149253
+ })) : void 0,
149254
+ summary: item.summary?.map((summaryItem) => ({ ...summaryItem })),
149255
+ encrypted_content: item.encrypted_content
149256
+ };
149257
+ parts.push(thoughtPart);
149258
+ }
149119
149259
  continue;
149120
149260
  }
149121
149261
  if (item.type === "message" && item.content) {
@@ -149138,10 +149278,11 @@ function convertCodexResponseToGemini(response) {
149138
149278
  }
149139
149279
  }
149140
149280
  }
149281
+ const finishReason = mapCodexStatusToFinishReason(response.status);
149141
149282
  return createGeminiResponse(
149142
149283
  response.id,
149143
149284
  parts,
149144
- FinishReason.STOP,
149285
+ finishReason,
149145
149286
  response.usage ? {
149146
149287
  promptTokenCount: response.usage.input_tokens,
149147
149288
  candidatesTokenCount: response.usage.output_tokens,
@@ -149186,6 +149327,7 @@ var init_codexContentGenerator = __esm({
149186
149327
  apiKey;
149187
149328
  samplingParams;
149188
149329
  reasoning;
149330
+ enableOpenAILogging;
149189
149331
  modalities;
149190
149332
  cliConfig;
149191
149333
  errorHandler;
@@ -149195,6 +149337,7 @@ var init_codexContentGenerator = __esm({
149195
149337
  this.apiKey = config2.apiKey || "";
149196
149338
  this.samplingParams = config2.samplingParams;
149197
149339
  this.reasoning = config2.reasoning;
149340
+ this.enableOpenAILogging = config2.enableOpenAILogging ?? false;
149198
149341
  this.modalities = config2.modalities ?? defaultModalities(config2.model);
149199
149342
  this.cliConfig = cliConfig;
149200
149343
  if (!this.apiKey) {
@@ -149204,7 +149347,9 @@ var init_codexContentGenerator = __esm({
149204
149347
  throw new Error("Base URL is required for Codex");
149205
149348
  }
149206
149349
  this.errorHandler = new EnhancedErrorHandler(() => false);
149207
- this.logger = new OpenAILogger(config2.openAILoggingDir);
149350
+ if (this.enableOpenAILogging) {
149351
+ this.logger = new OpenAILogger(config2.openAILoggingDir);
149352
+ }
149208
149353
  }
149209
149354
  // ============================================================================
149210
149355
  // 主要 API 方法
@@ -149265,14 +149410,25 @@ var init_codexContentGenerator = __esm({
149265
149410
  };
149266
149411
  }
149267
149412
  async fetchApi(request4) {
149268
- const response = await fetch(this.baseUrl, {
149269
- method: "POST",
149270
- headers: {
149271
- "Content-Type": "application/json",
149272
- "api-key": this.apiKey
149273
- },
149274
- body: JSON.stringify(request4)
149275
- });
149413
+ let response;
149414
+ try {
149415
+ response = await fetch(this.baseUrl, {
149416
+ method: "POST",
149417
+ headers: {
149418
+ "Content-Type": "application/json",
149419
+ "api-key": this.apiKey
149420
+ },
149421
+ body: JSON.stringify(request4)
149422
+ });
149423
+ } catch (err) {
149424
+ const cause = err instanceof Error ? err.cause : void 0;
149425
+ const code2 = cause && typeof cause === "object" && "code" in cause ? cause.code : void 0;
149426
+ const msg = cause instanceof Error ? cause.message : String(cause ?? err);
149427
+ throw new Error(
149428
+ `Codex API network error: fetch failed (${code2 ?? "unknown"}). ${msg}`,
149429
+ { cause: err }
149430
+ );
149431
+ }
149276
149432
  if (!response.ok) {
149277
149433
  const errorText = await response.text();
149278
149434
  throw new Error(
@@ -149293,7 +149449,9 @@ var init_codexContentGenerator = __esm({
149293
149449
  model,
149294
149450
  input,
149295
149451
  stream: stream2,
149296
- store: true,
149452
+ store: false,
149453
+ include: ["reasoning.encrypted_content"],
149454
+ truncation: "auto",
149297
149455
  temperature: this.samplingParams?.temperature ?? 1,
149298
149456
  top_p: this.samplingParams?.top_p,
149299
149457
  max_output_tokens: this.samplingParams?.max_tokens,
@@ -149316,41 +149474,96 @@ var init_codexContentGenerator = __esm({
149316
149474
  const decoder = new TextDecoder();
149317
149475
  let buffer = "";
149318
149476
  let currentEvent = "";
149477
+ const streamDiag = {
149478
+ eventTypes: [],
149479
+ totalLines: 0,
149480
+ skippedLines: 0,
149481
+ firstRawChunk: "",
149482
+ lastRawDataSnippet: "",
149483
+ finalEventType: "",
149484
+ finalStatus: "",
149485
+ finalOutputTypes: [],
149486
+ finalOutputSummary: [],
149487
+ streamErrors: []
149488
+ };
149489
+ let streamFinished = false;
149319
149490
  const toolCallArgs = /* @__PURE__ */ new Map();
149491
+ const streamState = {
149492
+ sawReasoningSummaryTextDelta: false,
149493
+ sawOutputTextDelta: false,
149494
+ sawFunctionCallChunk: false
149495
+ };
149320
149496
  try {
149321
- while (true) {
149497
+ let isFirstChunk = true;
149498
+ while (!streamFinished) {
149322
149499
  const { done, value } = await reader.read();
149323
149500
  if (done) break;
149324
- buffer += decoder.decode(value, { stream: true });
149501
+ const chunk = decoder.decode(value, { stream: true });
149502
+ if (isFirstChunk) {
149503
+ streamDiag.firstRawChunk = chunk.slice(0, 500);
149504
+ isFirstChunk = false;
149505
+ }
149506
+ buffer += chunk;
149325
149507
  const lines = buffer.split("\n");
149326
149508
  buffer = lines.pop() || "";
149327
149509
  for (const line of lines) {
149328
149510
  const trimmed2 = line.trim();
149329
149511
  if (!trimmed2) continue;
149512
+ streamDiag.totalLines++;
149330
149513
  if (trimmed2.startsWith("event: ")) {
149331
149514
  currentEvent = trimmed2.slice(7).trim();
149332
149515
  continue;
149333
149516
  }
149334
149517
  if (trimmed2.startsWith("data: ")) {
149335
149518
  const dataStr = trimmed2.slice(6).trim();
149336
- if (dataStr === "[DONE]") return;
149519
+ if (dataStr === "[DONE]") {
149520
+ streamFinished = true;
149521
+ break;
149522
+ }
149337
149523
  try {
149338
149524
  const data = JSON.parse(dataStr);
149525
+ streamDiag.lastRawDataSnippet = dataStr.slice(0, 1e3);
149526
+ const eventType = currentEvent || data.type || "";
149527
+ if (eventType && !streamDiag.eventTypes.includes(eventType)) {
149528
+ streamDiag.eventTypes.push(eventType);
149529
+ }
149530
+ if (eventType === "response.completed" || eventType === "response.incomplete" || eventType === "response.failed") {
149531
+ const finalResponse = data.response;
149532
+ streamDiag.finalEventType = eventType;
149533
+ streamDiag.finalStatus = finalResponse?.status ?? "";
149534
+ streamDiag.finalOutputTypes = finalResponse?.output?.map((item) => item.type) ?? [];
149535
+ streamDiag.finalOutputSummary = finalResponse?.output?.map((item) => ({
149536
+ type: item.type,
149537
+ contentTypes: item.content?.map(
149538
+ (c4) => typeof c4 === "object" && c4 && "type" in c4 ? c4.type : void 0
149539
+ ).filter((type) => typeof type === "string")
149540
+ })) ?? [];
149541
+ }
149542
+ if (eventType === "error") {
149543
+ const errPayload = data;
149544
+ const nested = errPayload["error"];
149545
+ const code2 = errPayload["code"] || nested?.code || nested?.type || "unknown";
149546
+ const message = errPayload["message"] || nested?.message || JSON.stringify(data);
149547
+ streamDiag.streamErrors.push({ code: code2, message });
149548
+ }
149339
149549
  const response = this.handleStreamEvent(
149340
149550
  currentEvent,
149341
149551
  data,
149342
- toolCallArgs
149552
+ toolCallArgs,
149553
+ streamState
149343
149554
  );
149344
149555
  if (response) {
149345
149556
  yield response;
149346
149557
  }
149347
149558
  } catch {
149348
149559
  }
149560
+ } else {
149561
+ streamDiag.skippedLines++;
149349
149562
  }
149350
149563
  }
149351
149564
  }
149352
149565
  context2.duration = Date.now() - context2.startTime;
149353
- await this.logStreamingSuccess(context2, request4);
149566
+ await this.logStreamingSuccess(context2, request4, streamDiag);
149354
149567
  } catch (error40) {
149355
149568
  context2.duration = Date.now() - context2.startTime;
149356
149569
  await this.logError(context2, error40, request4);
@@ -149359,17 +149572,26 @@ var init_codexContentGenerator = __esm({
149359
149572
  reader.releaseLock();
149360
149573
  }
149361
149574
  }
149362
- handleStreamEvent(event, data, toolCallArgs) {
149575
+ handleStreamEvent(event, data, toolCallArgs, streamState) {
149363
149576
  switch (event) {
149364
149577
  case "response.reasoning_summary_text.delta": {
149365
- return null;
149578
+ const text = data.delta;
149579
+ if (!text) return null;
149580
+ streamState.sawReasoningSummaryTextDelta = true;
149581
+ return createGeminiResponse(data.item_id || "unknown", [
149582
+ {
149583
+ text,
149584
+ thought: true
149585
+ }
149586
+ ]);
149366
149587
  }
149367
149588
  case "response.output_text.delta": {
149368
149589
  const text = data.delta;
149369
149590
  if (!text) return null;
149591
+ streamState.sawOutputTextDelta = true;
149370
149592
  return createGeminiResponse(data.item_id || "unknown", [{ text }]);
149371
149593
  }
149372
- case "response.tool_calls.delta": {
149594
+ case "response.function_call_arguments.delta": {
149373
149595
  const index = data.tool_call_index ?? 0;
149374
149596
  const current = toolCallArgs.get(index) || { args: "" };
149375
149597
  if (data.tool_call_id) current.id = data.tool_call_id;
@@ -149380,36 +149602,51 @@ var init_codexContentGenerator = __esm({
149380
149602
  }
149381
149603
  case "response.output_item.done": {
149382
149604
  const item = data.item;
149383
- if (item?.type === "function_call" && item.arguments) {
149384
- try {
149385
- const args = JSON.parse(item.arguments);
149386
- return createGeminiResponse(data.item_id || "unknown", [
149387
- {
149388
- functionCall: {
149389
- id: item.call_id || item.id || `call_${Date.now()}`,
149390
- name: item.name || "unknown",
149391
- args
149605
+ if (item?.type === "function_call") {
149606
+ const index = data.tool_call_index ?? 0;
149607
+ const accumulated = toolCallArgs.get(index);
149608
+ const rawArgs = item.arguments || accumulated?.args;
149609
+ if (rawArgs) {
149610
+ try {
149611
+ const args = JSON.parse(rawArgs);
149612
+ const callId = item.call_id || item.id || accumulated?.id || `call_${Date.now()}`;
149613
+ const name3 = item.name || accumulated?.name || "unknown";
149614
+ toolCallArgs.delete(index);
149615
+ streamState.sawFunctionCallChunk = true;
149616
+ return createGeminiResponse(data.item_id || "unknown", [
149617
+ {
149618
+ functionCall: { id: callId, name: name3, args }
149392
149619
  }
149393
- }
149394
- ]);
149395
- } catch {
149620
+ ]);
149621
+ } catch {
149622
+ }
149396
149623
  }
149397
149624
  }
149398
149625
  return null;
149399
149626
  }
149400
149627
  case "response.completed": {
149401
149628
  const response = data.response;
149629
+ if (response) {
149630
+ return finalizeStreamResponse(response, streamState);
149631
+ }
149632
+ return createGeminiResponse("final", []);
149633
+ }
149634
+ case "response.incomplete": {
149635
+ const response = data.response;
149636
+ if (response) {
149637
+ return finalizeStreamResponse(response, streamState);
149638
+ }
149639
+ return createGeminiResponse("final", [], FinishReason.MAX_TOKENS);
149640
+ }
149641
+ case "response.failed": {
149642
+ const response = data.response;
149643
+ if (response) {
149644
+ return finalizeStreamResponse(response, streamState);
149645
+ }
149402
149646
  return createGeminiResponse(
149403
- response?.id || "final",
149647
+ "final",
149404
149648
  [],
149405
- FinishReason.STOP,
149406
- response?.usage ? {
149407
- promptTokenCount: response.usage.input_tokens,
149408
- candidatesTokenCount: response.usage.output_tokens,
149409
- totalTokenCount: response.usage.total_tokens,
149410
- cachedContentTokenCount: response.usage.input_tokens_details?.cached_tokens,
149411
- thoughtsTokenCount: response.usage.output_tokens_details?.reasoning_tokens
149412
- } : void 0
149649
+ FinishReason.FINISH_REASON_UNSPECIFIED
149413
149650
  );
149414
149651
  }
149415
149652
  default:
@@ -149430,7 +149667,7 @@ var init_codexContentGenerator = __esm({
149430
149667
  response.usageMetadata
149431
149668
  );
149432
149669
  logApiResponse(this.cliConfig, event);
149433
- if (request4 && rawResponse) {
149670
+ if (this.enableOpenAILogging && this.logger && request4 && rawResponse) {
149434
149671
  await this.logger.logInteraction(request4, rawResponse);
149435
149672
  }
149436
149673
  }
@@ -149449,11 +149686,11 @@ var init_codexContentGenerator = __esm({
149449
149686
  apiError?.code
149450
149687
  );
149451
149688
  logApiError(this.cliConfig, event);
149452
- if (request4) {
149689
+ if (this.enableOpenAILogging && this.logger && request4) {
149453
149690
  await this.logger.logInteraction(request4, void 0, error40);
149454
149691
  }
149455
149692
  }
149456
- async logStreamingSuccess(context2, request4) {
149693
+ async logStreamingSuccess(context2, request4, diagnostics) {
149457
149694
  if (!this.cliConfig) return;
149458
149695
  const event = new ApiResponseEvent(
149459
149696
  "unknown",
@@ -149464,7 +149701,12 @@ var init_codexContentGenerator = __esm({
149464
149701
  void 0
149465
149702
  );
149466
149703
  logApiResponse(this.cliConfig, event);
149467
- await this.logger.logInteraction(request4, { streamed: true });
149704
+ if (this.enableOpenAILogging && this.logger) {
149705
+ await this.logger.logInteraction(request4, {
149706
+ streamed: true,
149707
+ ...diagnostics ?? {}
149708
+ });
149709
+ }
149468
149710
  }
149469
149711
  };
149470
149712
  __name(extractBaseModel, "extractBaseModel");
@@ -149477,6 +149719,8 @@ var init_codexContentGenerator = __esm({
149477
149719
  __name(extractTextFromContent, "extractTextFromContent");
149478
149720
  __name(convertTools, "convertTools");
149479
149721
  __name(convertGeminiSchemaToOpenAI, "convertGeminiSchemaToOpenAI");
149722
+ __name(mapCodexStatusToFinishReason, "mapCodexStatusToFinishReason");
149723
+ __name(finalizeStreamResponse, "finalizeStreamResponse");
149480
149724
  __name(convertCodexResponseToGemini, "convertCodexResponseToGemini");
149481
149725
  __name(createGeminiResponse, "createGeminiResponse");
149482
149726
  }
@@ -161068,7 +161312,7 @@ __export(geminiContentGenerator_exports2, {
161068
161312
  createGeminiContentGenerator: () => createGeminiContentGenerator
161069
161313
  });
161070
161314
  function createGeminiContentGenerator(config2, gcConfig) {
161071
- const version2 = "0.2.8-alpha.8";
161315
+ const version2 = "0.2.8";
161072
161316
  const userAgent2 = config2.userAgent || `QwenCode/${version2} (${process.platform}; ${process.arch})`;
161073
161317
  const baseHeaders = {
161074
161318
  "User-Agent": userAgent2
@@ -161249,7 +161493,7 @@ async function createContentGenerator(generatorConfig, config2, isInitialAuth) {
161249
161493
  generatorConfig,
161250
161494
  config2
161251
161495
  );
161252
- } else if (model.includes("codex")) {
161496
+ } else if (model.includes("codex") || model.startsWith("gpt-5.4")) {
161253
161497
  const { CodexContentGenerator: CodexContentGenerator2 } = await Promise.resolve().then(() => (init_codexContentGenerator(), codexContentGenerator_exports));
161254
161498
  baseGenerator = new CodexContentGenerator2(generatorConfig, config2);
161255
161499
  } else {
@@ -163018,6 +163262,12 @@ This error was probably caused by cyclic schema references in one of the followi
163018
163262
  if (thoughtContentPart && thoughtSignature) {
163019
163263
  thoughtContentPart.thoughtSignature = thoughtSignature;
163020
163264
  }
163265
+ const codexReasoningItem = allModelParts.find(
163266
+ (part) => part && typeof part === "object" && "thought" in part && part.thought && "codexReasoningItem" in part
163267
+ );
163268
+ if (thoughtContentPart && codexReasoningItem?.codexReasoningItem) {
163269
+ thoughtContentPart.codexReasoningItem = codexReasoningItem.codexReasoningItem;
163270
+ }
163021
163271
  }
163022
163272
  const contentParts = allModelParts.filter((part) => !part.thought);
163023
163273
  const consolidatedHistoryParts = [];
@@ -178610,10 +178860,10 @@ Co-authored-by: ${gitCoAuthorSettings.name} <${gitCoAuthorSettings.email}>`;
178610
178860
  if (!path20.isAbsolute(params.directory)) {
178611
178861
  return "Directory must be an absolute path.";
178612
178862
  }
178613
- const userSkillsDir = this.config.storage.getUserSkillsDir();
178863
+ const userSkillsDirs = this.config.storage.getUserSkillsDirs();
178614
178864
  const resolvedDirectoryPath = path20.resolve(params.directory);
178615
- const isWithinUserSkills = isSubpath(
178616
- userSkillsDir,
178865
+ const isWithinUserSkills = isSubpaths(
178866
+ userSkillsDirs,
178617
178867
  resolvedDirectoryPath
178618
178868
  );
178619
178869
  if (isWithinUserSkills) {
@@ -205513,10 +205763,10 @@ ${result.llmContent}`;
205513
205763
  const workspaceContext = this.config.getWorkspaceContext();
205514
205764
  const globalTempDir = Storage.getGlobalTempDir();
205515
205765
  const projectTempDir = this.config.storage.getProjectTempDir();
205516
- const userSkillsDir = this.config.storage.getUserSkillsDir();
205766
+ const userSkillsDirs = this.config.storage.getUserSkillsDirs();
205517
205767
  const resolvedFilePath = path34.resolve(filePath);
205518
205768
  const isWithinTempDir = isSubpath(projectTempDir, resolvedFilePath) || isSubpath(globalTempDir, resolvedFilePath);
205519
- const isWithinUserSkills = isSubpath(userSkillsDir, resolvedFilePath);
205769
+ const isWithinUserSkills = isSubpaths(userSkillsDirs, resolvedFilePath);
205520
205770
  if (!workspaceContext.isPathWithinWorkspace(filePath) && !isWithinTempDir && !isWithinUserSkills) {
205521
205771
  const directories = workspaceContext.getDirectories();
205522
205772
  return `File path must be within one of the workspace directories: ${directories.join(
@@ -226483,8 +226733,8 @@ ${directoryContent}`;
226483
226733
  if (!path40.isAbsolute(params.path)) {
226484
226734
  return `Path must be absolute: ${params.path}`;
226485
226735
  }
226486
- const userSkillsBase = this.config.storage.getUserSkillsDir();
226487
- const isUnderUserSkills = isSubpath(userSkillsBase, params.path);
226736
+ const userSkillsBases = this.config.storage.getUserSkillsDirs();
226737
+ const isUnderUserSkills = isSubpaths(userSkillsBases, params.path);
226488
226738
  const workspaceContext = this.config.getWorkspaceContext();
226489
226739
  if (!workspaceContext.isPathWithinWorkspace(params.path) && !isUnderUserSkills) {
226490
226740
  const directories = workspaceContext.getDirectories();
@@ -242175,6 +242425,7 @@ var init_skill_manager = __esm({
242175
242425
  init_skill_load();
242176
242426
  init_debugLogger();
242177
242427
  init_textUtils();
242428
+ init_storage();
242178
242429
  debugLogger65 = createDebugLogger("SKILL_MANAGER");
242179
242430
  QWEN_CONFIG_DIR2 = ".rdmind";
242180
242431
  SKILLS_CONFIG_DIR = "skills";
@@ -242485,15 +242736,15 @@ var init_skill_manager = __esm({
242485
242736
  * Gets the base directory for skills at a specific level.
242486
242737
  *
242487
242738
  * @param level - Storage level
242488
- * @returns Absolute directory path
242739
+ * @returns Absolute directory paths
242489
242740
  */
242490
- getSkillsBaseDir(level) {
242491
- const baseDir = level === "project" ? path51.join(
242492
- this.config.getProjectRoot(),
242493
- QWEN_CONFIG_DIR2,
242494
- SKILLS_CONFIG_DIR
242495
- ) : path51.join(os20.homedir(), QWEN_CONFIG_DIR2, SKILLS_CONFIG_DIR);
242496
- return baseDir;
242741
+ getSkillsBaseDirs(level) {
242742
+ const baseDirs = level === "project" ? SKILL_PROVIDER_CONFIG_DIRS.map(
242743
+ (v2) => path51.join(this.config.getProjectRoot(), v2, SKILLS_CONFIG_DIR)
242744
+ ) : SKILL_PROVIDER_CONFIG_DIRS.map(
242745
+ (v2) => path51.join(os20.homedir(), v2, SKILLS_CONFIG_DIR)
242746
+ );
242747
+ return baseDirs;
242497
242748
  }
242498
242749
  /**
242499
242750
  * Lists skills at a specific level.
@@ -242524,9 +242775,23 @@ var init_skill_manager = __esm({
242524
242775
  );
242525
242776
  return skills2;
242526
242777
  }
242527
- const baseDir = this.getSkillsBaseDir(level);
242528
- debugLogger65.debug(`Loading ${level} level skills from: ${baseDir}`);
242529
- const skills = await this.loadSkillsFromDir(baseDir, level);
242778
+ const baseDirs = this.getSkillsBaseDirs(level);
242779
+ const skills = [];
242780
+ const seenNames = /* @__PURE__ */ new Set();
242781
+ for (const baseDir of baseDirs) {
242782
+ debugLogger65.debug(`Loading ${level} level skills from: ${baseDir}`);
242783
+ const skillsFromDir = await this.loadSkillsFromDir(baseDir, level);
242784
+ for (const skill of skillsFromDir) {
242785
+ if (seenNames.has(skill.name)) {
242786
+ debugLogger65.debug(
242787
+ `Skipping duplicate skill at ${level} level: ${skill.name} from ${baseDir}`
242788
+ );
242789
+ continue;
242790
+ }
242791
+ seenNames.add(skill.name);
242792
+ skills.push(skill);
242793
+ }
242794
+ }
242530
242795
  debugLogger65.debug(`Loaded ${skills.length} ${level} level skills`);
242531
242796
  return skills;
242532
242797
  }
@@ -242616,7 +242881,7 @@ var init_skill_manager = __esm({
242616
242881
  }
242617
242882
  updateWatchersFromCache() {
242618
242883
  const watchTargets = new Set(
242619
- ["project", "user"].map((level) => this.getSkillsBaseDir(level)).filter((baseDir) => fsSync2.existsSync(baseDir))
242884
+ ["project", "user"].map((level) => this.getSkillsBaseDirs(level)).reduce((acc, baseDirs) => acc.concat(baseDirs), []).filter((baseDir) => fsSync2.existsSync(baseDir))
242620
242885
  );
242621
242886
  for (const existingPath of this.watchers.keys()) {
242622
242887
  if (!watchTargets.has(existingPath)) {
@@ -242660,7 +242925,7 @@ var init_skill_manager = __esm({
242660
242925
  }, 150);
242661
242926
  }
242662
242927
  async ensureUserSkillsDir() {
242663
- const baseDir = this.getSkillsBaseDir("user");
242928
+ const baseDir = path51.join(os20.homedir(), QWEN_CONFIG_DIR2, SKILLS_CONFIG_DIR);
242664
242929
  try {
242665
242930
  await fs47.mkdir(baseDir, { recursive: true });
242666
242931
  } catch (error40) {
@@ -266797,12 +267062,12 @@ async function fetchModelKey(modelName) {
266797
267062
  try {
266798
267063
  let processedModelName = modelName;
266799
267064
  const modelNameLower = modelName.toLowerCase();
266800
- if (modelNameLower.startsWith("gemini") || modelNameLower.includes("codex")) {
267065
+ if (modelNameLower.startsWith("gemini") || modelNameLower.includes("codex") || modelNameLower.startsWith("gpt-5.4")) {
266801
267066
  const match2 = modelName.match(/^(.+?)\(\w+\)$/);
266802
267067
  if (match2) {
266803
267068
  processedModelName = match2[1];
266804
267069
  logger3.debug(
266805
- `${modelNameLower.startsWith("gemini") ? "gemini" : "codex"} \u6A21\u578B\u9884\u5904\u7406: ${modelName} -> ${processedModelName}`
267070
+ `${modelNameLower.startsWith("gemini") ? "gemini" : modelNameLower.includes("codex") ? "codex" : "gpt-5.4"} \u6A21\u578B\u9884\u5904\u7406: ${modelName} -> ${processedModelName}`
266806
267071
  );
266807
267072
  }
266808
267073
  }
@@ -276250,6 +276515,7 @@ __export(core_exports5, {
276250
276515
  isSdkMcpServerConfig: () => isSdkMcpServerConfig,
276251
276516
  isStructuredError: () => isStructuredError,
276252
276517
  isSubpath: () => isSubpath,
276518
+ isSubpaths: () => isSubpaths,
276253
276519
  isSupportedImageMimeType: () => isSupportedImageMimeType,
276254
276520
  isTelemetrySdkInitialized: () => isTelemetrySdkInitialized,
276255
276521
  isTomlFormat: () => isTomlFormat,
@@ -314987,7 +315253,7 @@ var init_zh = __esm({
314987
315253
  "No sessions found. Start a new session with {{cmd}}.": "\u672A\u53D1\u73B0\u4F1A\u8BDD\u3002\u8BF7\u4F7F\u7528 {{cmd}} \u5F00\u59CB\u65B0\u4F1A\u8BDD\u3002",
314988
315254
  "No saved session found with ID {{sessionId}}. Run `rdmind --resume` without an ID to choose from existing sessions.": "\u672A\u53D1\u73B0 ID \u4E3A {{sessionId}} \u7684\u5DF2\u4FDD\u5B58\u4F1A\u8BDD\u3002\u8BF7\u8FD0\u884C\u4E0D\u5E26 ID \u7684 `rdmind --resume` \u4EE5\u4ECE\u73B0\u6709\u4F1A\u8BDD\u4E2D\u9009\u62E9\u3002",
314989
315255
  "Clear conversation history and free up context": "\u6E05\u9664\u5BF9\u8BDD\u5386\u53F2\u5E76\u91CA\u653E\u4E0A\u4E0B\u6587",
314990
- "Compresses the context by replacing it with a summary.": "\u901A\u8FC7\u7528\u6458\u8981\u66FF\u6362\u6765\u538B\u7F29\u4E0A\u4E0B\u6587",
315256
+ "Compresses the context by replacing it with a summary.": "\u901A\u8FC7\u6458\u8981\u66FF\u6362\u6765\u538B\u7F29\u4E0A\u4E0B\u6587",
314991
315257
  "open full RDMind documentation in your browser": "\u5728\u6D4F\u89C8\u5668\u4E2D\u6253\u5F00\u5B8C\u6574\u7684 RDMind \u6587\u6863",
314992
315258
  "Configuration not available.": "\u914D\u7F6E\u4E0D\u53EF\u7528",
314993
315259
  "change the auth method": "\u66F4\u6539\u8BA4\u8BC1\u65B9\u6CD5",
@@ -315947,11 +316213,8 @@ var init_zh = __esm({
315947
316213
  "\u6B63\u5728\u5C1D\u8BD5\u9000\u51FA Vim...",
315948
316214
  "\u8FD9\u4E0D\u662F\u4E00\u4E2A\u9519\u8BEF\uFF0C\u8FD9\u662F\u4E00\u4E2A\u672A\u8BB0\u5F55\u7684\u529F\u80FD...",
315949
316215
  "\u6B63\u5728\u5199\u4E00\u4E2A\u6C38\u8FDC\u4E0D\u4F1A\u89E6\u53D1\u7684 else...",
315950
- "\u6B63\u5728\u6392\u67E5\u662F\u8C01\u5199\u7684\u8FD9\u6BB5\u7956\u4F20\u4EE3\u7801...\u54E6\u662F\u6211\u81EA\u5DF1...",
315951
316216
  "\u6B63\u5728\u628A if-else \u91CD\u6784\u6210 switch...",
315952
- "\u6B63\u5728\u89E3\u51B3\u4F9D\u8D56\u5173\u7CFB\uFF08\u5730\u72F1\uFF09...",
315953
316217
  "\u6B63\u5728\u5C06\u5496\u5561\u8F6C\u6362\u4E3A\u4EE3\u7801...",
315954
- "\u6B63\u5728\u5220\u5E93\u8DD1\u8DEF...\u7684\u8FB9\u7F18\u8BD5\u63A2...",
315955
316218
  // --- 工作/职场相关 ---
315956
316219
  "\u6B63\u5728\u7B49\u5F85\u4EA7\u54C1\u7ECF\u7406\u6539\u9700\u6C42...",
315957
316220
  "\u6B63\u5728\u5B66\u4E60\u5982\u4F55\u4F18\u96C5\u5730\u62D2\u7EDD\u9700\u6C42...",
@@ -368274,7 +368537,8 @@ var SETTINGS_SCHEMA = {
368274
368537
  requiresRestart: true,
368275
368538
  default: void 0,
368276
368539
  description: "A list of MCP servers to allow.",
368277
- showInDialog: false
368540
+ showInDialog: false,
368541
+ mergeStrategy: "concat" /* CONCAT */
368278
368542
  },
368279
368543
  excluded: {
368280
368544
  type: "array",
@@ -368283,7 +368547,8 @@ var SETTINGS_SCHEMA = {
368283
368547
  requiresRestart: true,
368284
368548
  default: void 0,
368285
368549
  description: "A list of MCP servers to exclude.",
368286
- showInDialog: false
368550
+ showInDialog: false,
368551
+ mergeStrategy: "concat" /* CONCAT */
368287
368552
  }
368288
368553
  }
368289
368554
  },
@@ -377033,7 +377298,7 @@ __name(getPackageJson, "getPackageJson");
377033
377298
  // packages/cli/src/utils/version.ts
377034
377299
  async function getCliVersion() {
377035
377300
  const pkgJson = await getPackageJson();
377036
- return "0.2.8-alpha.8";
377301
+ return "0.2.8";
377037
377302
  }
377038
377303
  __name(getCliVersion, "getCliVersion");
377039
377304
 
@@ -384756,7 +385021,7 @@ var formatDuration = /* @__PURE__ */ __name((milliseconds) => {
384756
385021
 
384757
385022
  // packages/cli/src/generated/git-commit.ts
384758
385023
  init_esbuild_shims();
384759
- var GIT_COMMIT_INFO = "908297548";
385024
+ var GIT_COMMIT_INFO = "4ebf619b1";
384760
385025
 
384761
385026
  // packages/cli/src/utils/systemInfo.ts
384762
385027
  async function getNpmVersion() {
@@ -426118,11 +426383,18 @@ var import_react87 = __toESM(require_react(), 1);
426118
426383
  init_esbuild_shims();
426119
426384
  var XHS_SSO_MODELS = [
426120
426385
  {
426121
- id: "gpt-5.4",
426122
- displayName: "gpt-5.4",
426123
- baseUrl: "https://runway.devops.rednote.life/openai",
426386
+ id: "gpt-5.4(none)",
426387
+ displayName: "gpt-5.4(none)",
426388
+ baseUrl: "https://runway.devops.rednote.life/openai/v1/responses?api-version=v1",
426124
426389
  contextWindow: "272K",
426125
- description: "\u7528\u4E8E\u5904\u7406\u590D\u6742\u4E13\u4E1A\u5DE5\u4F5C\u7684\u524D\u6CBF\u6A21\u578B"
426390
+ description: "\u7528\u4E8E\u5904\u7406\u590D\u6742\u4E13\u4E1A\u5DE5\u4F5C\u7684\u524D\u6CBF\u6A21\u578B (\u4E0D\u542F\u7528\u63A8\u7406)"
426391
+ },
426392
+ {
426393
+ id: "gpt-5.4(medium)",
426394
+ displayName: "gpt-5.4(medium)",
426395
+ baseUrl: "https://runway.devops.rednote.life/openai/v1/responses?api-version=v1",
426396
+ contextWindow: "272K",
426397
+ description: "\u7528\u4E8E\u5904\u7406\u590D\u6742\u4E13\u4E1A\u5DE5\u4F5C\u7684\u524D\u6CBF\u6A21\u578B (\u63A8\u7406\u5F3A\u5EA6\u4E2D)"
426126
426398
  },
426127
426399
  {
426128
426400
  id: "gpt-5.3-codex(medium)",
@@ -426187,13 +426459,13 @@ var XHS_SSO_MODELS = [
426187
426459
  contextWindow: "200K",
426188
426460
  description: "\u667A\u8C31\u65B0\u4E00\u4EE3\u7684\u65D7\u8230\u57FA\u5EA7\u6A21\u578B\uFF0C\u9762\u5411 Agentic Engineering \u6253\u9020\uFF0C\u5BF9\u9F50 Claude Opus 4.5"
426189
426461
  },
426190
- {
426191
- id: "claude-opus-4-5@20251101",
426192
- displayName: "Claude Opus 4.5",
426193
- baseUrl: "https://runway.devops.rednote.life/openai/google/anthropic/v1",
426194
- contextWindow: "200K",
426195
- description: "Anthropic \u6700\u5F3A\u5927\u7684\u6A21\u578B\uFF0C\u64C5\u957F\u590D\u6742\u63A8\u7406\u548C\u4EE3\u7801\u751F\u6210"
426196
- },
426462
+ // {
426463
+ // id: 'claude-opus-4-5@20251101',
426464
+ // displayName: 'Claude Opus 4.5',
426465
+ // baseUrl: 'https://runway.devops.rednote.life/openai/google/anthropic/v1',
426466
+ // contextWindow: '200K',
426467
+ // description: 'Anthropic 最强大的模型,擅长复杂推理和代码生成',
426468
+ // },
426197
426469
  {
426198
426470
  id: "Kimi-K2.5",
426199
426471
  displayName: "Kimi-K2.5",
@@ -428057,7 +428329,7 @@ var ServerDetailStep = /* @__PURE__ */ __name(({
428057
428329
  ] }),
428058
428330
  /* @__PURE__ */ (0, import_jsx_runtime92.jsxs)(Box_default, { children: [
428059
428331
  /* @__PURE__ */ (0, import_jsx_runtime92.jsx)(Box_default, { width: LABEL_WIDTH, children: /* @__PURE__ */ (0, import_jsx_runtime92.jsx)(Text3, { color: theme.text.primary, children: t4("Source:") }) }),
428060
- /* @__PURE__ */ (0, import_jsx_runtime92.jsx)(Box_default, { children: /* @__PURE__ */ (0, import_jsx_runtime92.jsx)(Text3, { color: theme.text.primary, children: server.scope === "user" ? t4("User Settings") : server.scope === "workspace" ? t4("Workspace Settings") : t4("Extension") }) })
428332
+ /* @__PURE__ */ (0, import_jsx_runtime92.jsx)(Box_default, { children: /* @__PURE__ */ (0, import_jsx_runtime92.jsx)(Text3, { color: theme.text.primary, children: server.source === "user" ? t4("User Settings") : server.source === "project" ? t4("Workspace Settings") : t4("Extension") }) })
428061
428333
  ] }),
428062
428334
  /* @__PURE__ */ (0, import_jsx_runtime92.jsxs)(Box_default, { children: [
428063
428335
  /* @__PURE__ */ (0, import_jsx_runtime92.jsx)(Box_default, { width: LABEL_WIDTH, children: /* @__PURE__ */ (0, import_jsx_runtime92.jsx)(Text3, { color: theme.text.primary, children: t4("Command:") }) }),
@@ -428485,14 +428757,10 @@ var MCPManagementDialog = /* @__PURE__ */ __name(({
428485
428757
  let source2 = "user";
428486
428758
  if (serverConfig.extensionName) {
428487
428759
  source2 = "extension";
428488
- }
428489
- let scope = "user";
428490
- if (serverConfig.extensionName) {
428491
- scope = "extension";
428492
428760
  } else if (workspaceSettings.mcpServers?.[name3]) {
428493
- scope = "workspace";
428761
+ source2 = "project";
428494
428762
  } else if (userSettings.mcpServers?.[name3]) {
428495
- scope = "user";
428763
+ source2 = "user";
428496
428764
  }
428497
428765
  const isDisabled = config2.isMcpServerDisabled(name3);
428498
428766
  const invalidToolCount = serverTools.filter(
@@ -428502,7 +428770,6 @@ var MCPManagementDialog = /* @__PURE__ */ __name(({
428502
428770
  name: name3,
428503
428771
  status,
428504
428772
  source: source2,
428505
- scope,
428506
428773
  config: serverConfig,
428507
428774
  toolCount: serverTools.length,
428508
428775
  invalidToolCount,
@@ -428668,13 +428935,13 @@ var MCPManagementDialog = /* @__PURE__ */ __name(({
428668
428935
  const server = selectedServer;
428669
428936
  const settings = loadSettings();
428670
428937
  let targetScope = "user";
428671
- if (server.scope === "extension") {
428938
+ if (server.source === "extension") {
428672
428939
  debugLogger130.warn(
428673
428940
  `Cannot disable extension MCP server '${server.name}'`
428674
428941
  );
428675
428942
  setIsLoading(false);
428676
428943
  return;
428677
- } else if (server.scope === "workspace") {
428944
+ } else if (server.source === "project") {
428678
428945
  targetScope = "workspace";
428679
428946
  }
428680
428947
  const scopeSettings = settings.forScope(
@@ -447123,7 +447390,7 @@ var QwenAgent = class {
447123
447390
  async initialize(args) {
447124
447391
  this.clientCapabilities = args.clientCapabilities;
447125
447392
  const authMethods = buildAuthMethods();
447126
- const version2 = "0.2.8-alpha.8";
447393
+ const version2 = "0.2.8";
447127
447394
  return {
447128
447395
  protocolVersion: PROTOCOL_VERSION,
447129
447396
  agentInfo: {
package/locales/zh.js CHANGED
@@ -145,7 +145,7 @@ export default {
145
145
  '未发现 ID 为 {{sessionId}} 的已保存会话。请运行不带 ID 的 `rdmind --resume` 以从现有会话中选择。',
146
146
  'Clear conversation history and free up context': '清除对话历史并释放上下文',
147
147
  'Compresses the context by replacing it with a summary.':
148
- '通过用摘要替换来压缩上下文',
148
+ '通过摘要替换来压缩上下文',
149
149
  'open full RDMind documentation in your browser':
150
150
  '在浏览器中打开完整的 RDMind 文档',
151
151
  'Configuration not available.': '配置不可用',
@@ -1371,11 +1371,8 @@ export default {
1371
1371
  '正在尝试退出 Vim...',
1372
1372
  '这不是一个错误,这是一个未记录的功能...',
1373
1373
  '正在写一个永远不会触发的 else...',
1374
- '正在排查是谁写的这段祖传代码...哦是我自己...',
1375
1374
  '正在把 if-else 重构成 switch...',
1376
- '正在解决依赖关系(地狱)...',
1377
1375
  '正在将咖啡转换为代码...',
1378
- '正在删库跑路...的边缘试探...',
1379
1376
 
1380
1377
  // --- 工作/职场相关 ---
1381
1378
  '正在等待产品经理改需求...',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rdmind/rdmind",
3
- "version": "0.2.8-alpha.8",
3
+ "version": "0.2.8",
4
4
  "description": "RDMind - AI-powered coding assistant",
5
5
  "type": "module",
6
6
  "main": "cli.js",
@@ -19,7 +19,7 @@
19
19
  "locales"
20
20
  ],
21
21
  "config": {
22
- "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.8-alpha.8"
22
+ "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.8"
23
23
  },
24
24
  "publishConfig": {
25
25
  "access": "public"