@rdmind/rdmind 0.2.8-alpha.9 → 0.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/cli.js +245 -79
  2. package/package.json +2 -2
package/cli.js CHANGED
@@ -134348,7 +134348,8 @@ var init_loggingContentGenerator = __esm({
134348
134348
  combinedParts.push({
134349
134349
  text: part.text,
134350
134350
  ...part.thought ? { thought: true } : {},
134351
- ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {}
134351
+ ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {},
134352
+ ..."codexReasoningItem" in part && part.codexReasoningItem ? { codexReasoningItem: part.codexReasoningItem } : {}
134352
134353
  });
134353
134354
  }
134354
134355
  continue;
@@ -148869,6 +148870,45 @@ function buildCodexInput(contents, systemInstruction, modalities) {
148869
148870
  }
148870
148871
  function convertContentToCodexMessages(content, modalities) {
148871
148872
  const messages = [];
148873
+ const convertCodexReasoningItem = /* @__PURE__ */ __name((rawItem, fallbackText, fallbackSignature) => {
148874
+ if (!rawItem || typeof rawItem !== "object") {
148875
+ if (!fallbackSignature) {
148876
+ return null;
148877
+ }
148878
+ return {
148879
+ type: "reasoning",
148880
+ summary: [{ type: "summary_text", text: fallbackText }],
148881
+ encrypted_content: fallbackSignature
148882
+ };
148883
+ }
148884
+ const raw2 = rawItem;
148885
+ const type = raw2["type"];
148886
+ if (type !== "reasoning") {
148887
+ return null;
148888
+ }
148889
+ const normalized2 = {
148890
+ type: "reasoning",
148891
+ id: typeof raw2["id"] === "string" ? raw2["id"] : void 0,
148892
+ status: typeof raw2["status"] === "string" || raw2["status"] === null ? raw2["status"] : void 0,
148893
+ encrypted_content: typeof raw2["encrypted_content"] === "string" || raw2["encrypted_content"] === null ? raw2["encrypted_content"] : fallbackSignature
148894
+ };
148895
+ if (Array.isArray(raw2["summary"])) {
148896
+ normalized2.summary = raw2["summary"].filter(
148897
+ (summaryItem) => typeof summaryItem === "object" && summaryItem !== null
148898
+ ).map((summaryItem) => ({
148899
+ ...summaryItem,
148900
+ type: summaryItem["type"] === "summary_text" ? "summary_text" : "summary_text",
148901
+ text: typeof summaryItem["text"] === "string" ? summaryItem["text"] : ""
148902
+ }));
148903
+ }
148904
+ if (Array.isArray(raw2["content"])) {
148905
+ normalized2.content = raw2["content"].filter((contentItem) => typeof contentItem === "object" && contentItem).map((contentItem) => ({ ...contentItem }));
148906
+ }
148907
+ if (!normalized2.summary || normalized2.summary.length === 0) {
148908
+ normalized2.summary = [{ type: "summary_text", text: fallbackText }];
148909
+ }
148910
+ return normalized2;
148911
+ }, "convertCodexReasoningItem");
148872
148912
  if (!content) return messages;
148873
148913
  if (typeof content === "string") {
148874
148914
  if (content.trim()) {
@@ -148886,7 +148926,21 @@ function convertContentToCodexMessages(content, modalities) {
148886
148926
  const partObj = part;
148887
148927
  if ("text" in partObj && typeof partObj["text"] === "string") {
148888
148928
  const text = partObj["text"];
148889
- if (!partObj["thought"]) {
148929
+ const isThought = Boolean(partObj["thought"]);
148930
+ const thoughtSignature = typeof partObj["thoughtSignature"] === "string" ? partObj["thoughtSignature"] : void 0;
148931
+ const codexReasoningItem = "codexReasoningItem" in partObj ? partObj["codexReasoningItem"] : void 0;
148932
+ if (isThought) {
148933
+ if (role === "assistant") {
148934
+ const reasoningMessage = convertCodexReasoningItem(
148935
+ codexReasoningItem,
148936
+ text,
148937
+ thoughtSignature
148938
+ );
148939
+ if (reasoningMessage) {
148940
+ messages.push(reasoningMessage);
148941
+ }
148942
+ }
148943
+ } else {
148890
148944
  messages.push({ role, content: text });
148891
148945
  }
148892
148946
  }
@@ -149142,11 +149196,66 @@ function mapCodexStatusToFinishReason(status) {
149142
149196
  return FinishReason.STOP;
149143
149197
  }
149144
149198
  }
149199
+ function finalizeStreamResponse(response, streamState) {
149200
+ const finalResponse = convertCodexResponseToGemini(response);
149201
+ const shouldKeepThoughtPartsOnly = streamState.sawOutputTextDelta || streamState.sawFunctionCallChunk;
149202
+ if (!finalResponse.candidates?.length) {
149203
+ return finalResponse;
149204
+ }
149205
+ finalResponse.candidates = [
149206
+ {
149207
+ ...finalResponse.candidates[0] || {
149208
+ index: 0,
149209
+ content: { role: "model", parts: [] },
149210
+ safetyRatings: []
149211
+ },
149212
+ content: {
149213
+ role: "model",
149214
+ parts: finalResponse.candidates[0]?.content?.parts?.flatMap((part) => {
149215
+ if (shouldKeepThoughtPartsOnly && !part.thought) {
149216
+ return [];
149217
+ }
149218
+ if (streamState.sawReasoningSummaryTextDelta && part.thought) {
149219
+ return [
149220
+ {
149221
+ ...part,
149222
+ text: ""
149223
+ }
149224
+ ];
149225
+ }
149226
+ return [part];
149227
+ }) || []
149228
+ }
149229
+ }
149230
+ ];
149231
+ return finalResponse;
149232
+ }
149145
149233
  function convertCodexResponseToGemini(response) {
149146
149234
  const parts = [];
149147
149235
  if (response.output && Array.isArray(response.output)) {
149148
149236
  for (const item of response.output) {
149149
149237
  if (item.type === "reasoning") {
149238
+ const summaryText = item.summary?.map((summaryItem) => summaryItem.text).filter(Boolean).join("") || "";
149239
+ if (summaryText || item.encrypted_content) {
149240
+ const thoughtPart = {
149241
+ text: summaryText,
149242
+ thought: true
149243
+ };
149244
+ if (item.encrypted_content) {
149245
+ thoughtPart.thoughtSignature = item.encrypted_content;
149246
+ }
149247
+ thoughtPart.codexReasoningItem = {
149248
+ type: "reasoning",
149249
+ id: item.id,
149250
+ status: item.status,
149251
+ content: Array.isArray(item.content) ? item.content.map((contentItem) => ({
149252
+ ...contentItem
149253
+ })) : void 0,
149254
+ summary: item.summary?.map((summaryItem) => ({ ...summaryItem })),
149255
+ encrypted_content: item.encrypted_content
149256
+ };
149257
+ parts.push(thoughtPart);
149258
+ }
149150
149259
  continue;
149151
149260
  }
149152
149261
  if (item.type === "message" && item.content) {
@@ -149301,14 +149410,25 @@ var init_codexContentGenerator = __esm({
149301
149410
  };
149302
149411
  }
149303
149412
  async fetchApi(request4) {
149304
- const response = await fetch(this.baseUrl, {
149305
- method: "POST",
149306
- headers: {
149307
- "Content-Type": "application/json",
149308
- "api-key": this.apiKey
149309
- },
149310
- body: JSON.stringify(request4)
149311
- });
149413
+ let response;
149414
+ try {
149415
+ response = await fetch(this.baseUrl, {
149416
+ method: "POST",
149417
+ headers: {
149418
+ "Content-Type": "application/json",
149419
+ "api-key": this.apiKey
149420
+ },
149421
+ body: JSON.stringify(request4)
149422
+ });
149423
+ } catch (err) {
149424
+ const cause = err instanceof Error ? err.cause : void 0;
149425
+ const code2 = cause && typeof cause === "object" && "code" in cause ? cause.code : void 0;
149426
+ const msg = cause instanceof Error ? cause.message : String(cause ?? err);
149427
+ throw new Error(
149428
+ `Codex API network error: fetch failed (${code2 ?? "unknown"}). ${msg}`,
149429
+ { cause: err }
149430
+ );
149431
+ }
149312
149432
  if (!response.ok) {
149313
149433
  const errorText = await response.text();
149314
149434
  throw new Error(
@@ -149329,7 +149449,8 @@ var init_codexContentGenerator = __esm({
149329
149449
  model,
149330
149450
  input,
149331
149451
  stream: stream2,
149332
- store: true,
149452
+ store: false,
149453
+ include: ["reasoning.encrypted_content"],
149333
149454
  truncation: "auto",
149334
149455
  temperature: this.samplingParams?.temperature ?? 1,
149335
149456
  top_p: this.samplingParams?.top_p,
@@ -149353,49 +149474,96 @@ var init_codexContentGenerator = __esm({
149353
149474
  const decoder = new TextDecoder();
149354
149475
  let buffer = "";
149355
149476
  let currentEvent = "";
149356
- const yieldState = { hasYieldedText: false, hasYieldedFunctionCall: false };
149477
+ const streamDiag = {
149478
+ eventTypes: [],
149479
+ totalLines: 0,
149480
+ skippedLines: 0,
149481
+ firstRawChunk: "",
149482
+ lastRawDataSnippet: "",
149483
+ finalEventType: "",
149484
+ finalStatus: "",
149485
+ finalOutputTypes: [],
149486
+ finalOutputSummary: [],
149487
+ streamErrors: []
149488
+ };
149489
+ let streamFinished = false;
149357
149490
  const toolCallArgs = /* @__PURE__ */ new Map();
149491
+ const streamState = {
149492
+ sawReasoningSummaryTextDelta: false,
149493
+ sawOutputTextDelta: false,
149494
+ sawFunctionCallChunk: false
149495
+ };
149358
149496
  try {
149359
- while (true) {
149497
+ let isFirstChunk = true;
149498
+ while (!streamFinished) {
149360
149499
  const { done, value } = await reader.read();
149361
149500
  if (done) break;
149362
- buffer += decoder.decode(value, { stream: true });
149501
+ const chunk = decoder.decode(value, { stream: true });
149502
+ if (isFirstChunk) {
149503
+ streamDiag.firstRawChunk = chunk.slice(0, 500);
149504
+ isFirstChunk = false;
149505
+ }
149506
+ buffer += chunk;
149363
149507
  const lines = buffer.split("\n");
149364
149508
  buffer = lines.pop() || "";
149365
149509
  for (const line of lines) {
149366
149510
  const trimmed2 = line.trim();
149367
149511
  if (!trimmed2) continue;
149512
+ streamDiag.totalLines++;
149368
149513
  if (trimmed2.startsWith("event: ")) {
149369
149514
  currentEvent = trimmed2.slice(7).trim();
149370
149515
  continue;
149371
149516
  }
149372
149517
  if (trimmed2.startsWith("data: ")) {
149373
149518
  const dataStr = trimmed2.slice(6).trim();
149374
- if (dataStr === "[DONE]") return;
149519
+ if (dataStr === "[DONE]") {
149520
+ streamFinished = true;
149521
+ break;
149522
+ }
149375
149523
  try {
149376
149524
  const data = JSON.parse(dataStr);
149525
+ streamDiag.lastRawDataSnippet = dataStr.slice(0, 1e3);
149377
149526
  const eventType = currentEvent || data.type || "";
149527
+ if (eventType && !streamDiag.eventTypes.includes(eventType)) {
149528
+ streamDiag.eventTypes.push(eventType);
149529
+ }
149530
+ if (eventType === "response.completed" || eventType === "response.incomplete" || eventType === "response.failed") {
149531
+ const finalResponse = data.response;
149532
+ streamDiag.finalEventType = eventType;
149533
+ streamDiag.finalStatus = finalResponse?.status ?? "";
149534
+ streamDiag.finalOutputTypes = finalResponse?.output?.map((item) => item.type) ?? [];
149535
+ streamDiag.finalOutputSummary = finalResponse?.output?.map((item) => ({
149536
+ type: item.type,
149537
+ contentTypes: item.content?.map(
149538
+ (c4) => typeof c4 === "object" && c4 && "type" in c4 ? c4.type : void 0
149539
+ ).filter((type) => typeof type === "string")
149540
+ })) ?? [];
149541
+ }
149542
+ if (eventType === "error") {
149543
+ const errPayload = data;
149544
+ const nested = errPayload["error"];
149545
+ const code2 = errPayload["code"] || nested?.code || nested?.type || "unknown";
149546
+ const message = errPayload["message"] || nested?.message || JSON.stringify(data);
149547
+ streamDiag.streamErrors.push({ code: code2, message });
149548
+ }
149378
149549
  const response = this.handleStreamEvent(
149379
- eventType,
149550
+ currentEvent,
149380
149551
  data,
149381
149552
  toolCallArgs,
149382
- yieldState
149553
+ streamState
149383
149554
  );
149384
149555
  if (response) {
149385
- const parts = response.candidates?.[0]?.content?.parts;
149386
- if (parts?.some((p2) => p2.text))
149387
- yieldState.hasYieldedText = true;
149388
- if (parts?.some((p2) => p2.functionCall))
149389
- yieldState.hasYieldedFunctionCall = true;
149390
149556
  yield response;
149391
149557
  }
149392
149558
  } catch {
149393
149559
  }
149560
+ } else {
149561
+ streamDiag.skippedLines++;
149394
149562
  }
149395
149563
  }
149396
149564
  }
149397
149565
  context2.duration = Date.now() - context2.startTime;
149398
- await this.logStreamingSuccess(context2, request4);
149566
+ await this.logStreamingSuccess(context2, request4, streamDiag);
149399
149567
  } catch (error40) {
149400
149568
  context2.duration = Date.now() - context2.startTime;
149401
149569
  await this.logError(context2, error40, request4);
@@ -149404,14 +149572,23 @@ var init_codexContentGenerator = __esm({
149404
149572
  reader.releaseLock();
149405
149573
  }
149406
149574
  }
149407
- handleStreamEvent(event, data, toolCallArgs, yieldState) {
149575
+ handleStreamEvent(event, data, toolCallArgs, streamState) {
149408
149576
  switch (event) {
149409
149577
  case "response.reasoning_summary_text.delta": {
149410
- return null;
149578
+ const text = data.delta;
149579
+ if (!text) return null;
149580
+ streamState.sawReasoningSummaryTextDelta = true;
149581
+ return createGeminiResponse(data.item_id || "unknown", [
149582
+ {
149583
+ text,
149584
+ thought: true
149585
+ }
149586
+ ]);
149411
149587
  }
149412
149588
  case "response.output_text.delta": {
149413
149589
  const text = data.delta;
149414
149590
  if (!text) return null;
149591
+ streamState.sawOutputTextDelta = true;
149415
149592
  return createGeminiResponse(data.item_id || "unknown", [{ text }]);
149416
149593
  }
149417
149594
  case "response.function_call_arguments.delta": {
@@ -149435,6 +149612,7 @@ var init_codexContentGenerator = __esm({
149435
149612
  const callId = item.call_id || item.id || accumulated?.id || `call_${Date.now()}`;
149436
149613
  const name3 = item.name || accumulated?.name || "unknown";
149437
149614
  toolCallArgs.delete(index);
149615
+ streamState.sawFunctionCallChunk = true;
149438
149616
  return createGeminiResponse(data.item_id || "unknown", [
149439
149617
  {
149440
149618
  functionCall: { id: callId, name: name3, args }
@@ -149446,44 +149624,29 @@ var init_codexContentGenerator = __esm({
149446
149624
  }
149447
149625
  return null;
149448
149626
  }
149449
- case "response.completed":
149450
- case "response.incomplete":
149627
+ case "response.completed": {
149628
+ const response = data.response;
149629
+ if (response) {
149630
+ return finalizeStreamResponse(response, streamState);
149631
+ }
149632
+ return createGeminiResponse("final", []);
149633
+ }
149634
+ case "response.incomplete": {
149635
+ const response = data.response;
149636
+ if (response) {
149637
+ return finalizeStreamResponse(response, streamState);
149638
+ }
149639
+ return createGeminiResponse("final", [], FinishReason.MAX_TOKENS);
149640
+ }
149451
149641
  case "response.failed": {
149452
149642
  const response = data.response;
149453
- const finishReason = event === "response.incomplete" ? FinishReason.MAX_TOKENS : event === "response.failed" ? FinishReason.FINISH_REASON_UNSPECIFIED : mapCodexStatusToFinishReason(response?.status);
149454
- const usage2 = response?.usage;
149455
- const parts = [];
149456
- if (response?.output) {
149457
- for (const item of response.output) {
149458
- if (!yieldState.hasYieldedText && item.type === "message" && item.content) {
149459
- const text = item.content.map((c4) => c4.text).filter(Boolean).join("");
149460
- if (text) parts.push({ text });
149461
- } else if (!yieldState.hasYieldedFunctionCall && item.type === "function_call" && item.arguments) {
149462
- try {
149463
- const args = JSON.parse(item.arguments);
149464
- parts.push({
149465
- functionCall: {
149466
- id: item.call_id || item.id || `call_${Date.now()}`,
149467
- name: item.name || "unknown",
149468
- args
149469
- }
149470
- });
149471
- } catch {
149472
- }
149473
- }
149474
- }
149643
+ if (response) {
149644
+ return finalizeStreamResponse(response, streamState);
149475
149645
  }
149476
149646
  return createGeminiResponse(
149477
- response?.id || "final",
149478
- parts,
149479
- finishReason,
149480
- usage2 ? {
149481
- promptTokenCount: usage2.input_tokens,
149482
- candidatesTokenCount: usage2.output_tokens,
149483
- totalTokenCount: usage2.total_tokens,
149484
- cachedContentTokenCount: usage2.input_tokens_details?.cached_tokens,
149485
- thoughtsTokenCount: usage2.output_tokens_details?.reasoning_tokens
149486
- } : void 0
149647
+ "final",
149648
+ [],
149649
+ FinishReason.FINISH_REASON_UNSPECIFIED
149487
149650
  );
149488
149651
  }
149489
149652
  default:
@@ -149527,7 +149690,7 @@ var init_codexContentGenerator = __esm({
149527
149690
  await this.logger.logInteraction(request4, void 0, error40);
149528
149691
  }
149529
149692
  }
149530
- async logStreamingSuccess(context2, request4) {
149693
+ async logStreamingSuccess(context2, request4, diagnostics) {
149531
149694
  if (!this.cliConfig) return;
149532
149695
  const event = new ApiResponseEvent(
149533
149696
  "unknown",
@@ -149539,7 +149702,10 @@ var init_codexContentGenerator = __esm({
149539
149702
  );
149540
149703
  logApiResponse(this.cliConfig, event);
149541
149704
  if (this.enableOpenAILogging && this.logger) {
149542
- await this.logger.logInteraction(request4, { streamed: true });
149705
+ await this.logger.logInteraction(request4, {
149706
+ streamed: true,
149707
+ ...diagnostics ?? {}
149708
+ });
149543
149709
  }
149544
149710
  }
149545
149711
  };
@@ -149554,6 +149720,7 @@ var init_codexContentGenerator = __esm({
149554
149720
  __name(convertTools, "convertTools");
149555
149721
  __name(convertGeminiSchemaToOpenAI, "convertGeminiSchemaToOpenAI");
149556
149722
  __name(mapCodexStatusToFinishReason, "mapCodexStatusToFinishReason");
149723
+ __name(finalizeStreamResponse, "finalizeStreamResponse");
149557
149724
  __name(convertCodexResponseToGemini, "convertCodexResponseToGemini");
149558
149725
  __name(createGeminiResponse, "createGeminiResponse");
149559
149726
  }
@@ -161145,7 +161312,7 @@ __export(geminiContentGenerator_exports2, {
161145
161312
  createGeminiContentGenerator: () => createGeminiContentGenerator
161146
161313
  });
161147
161314
  function createGeminiContentGenerator(config2, gcConfig) {
161148
- const version2 = "0.2.8-alpha.9";
161315
+ const version2 = "0.2.8";
161149
161316
  const userAgent2 = config2.userAgent || `QwenCode/${version2} (${process.platform}; ${process.arch})`;
161150
161317
  const baseHeaders = {
161151
161318
  "User-Agent": userAgent2
@@ -163095,6 +163262,12 @@ This error was probably caused by cyclic schema references in one of the followi
163095
163262
  if (thoughtContentPart && thoughtSignature) {
163096
163263
  thoughtContentPart.thoughtSignature = thoughtSignature;
163097
163264
  }
163265
+ const codexReasoningItem = allModelParts.find(
163266
+ (part) => part && typeof part === "object" && "thought" in part && part.thought && "codexReasoningItem" in part
163267
+ );
163268
+ if (thoughtContentPart && codexReasoningItem?.codexReasoningItem) {
163269
+ thoughtContentPart.codexReasoningItem = codexReasoningItem.codexReasoningItem;
163270
+ }
163098
163271
  }
163099
163272
  const contentParts = allModelParts.filter((part) => !part.thought);
163100
163273
  const consolidatedHistoryParts = [];
@@ -377125,7 +377298,7 @@ __name(getPackageJson, "getPackageJson");
377125
377298
  // packages/cli/src/utils/version.ts
377126
377299
  async function getCliVersion() {
377127
377300
  const pkgJson = await getPackageJson();
377128
- return "0.2.8-alpha.9";
377301
+ return "0.2.8";
377129
377302
  }
377130
377303
  __name(getCliVersion, "getCliVersion");
377131
377304
 
@@ -384848,7 +385021,7 @@ var formatDuration = /* @__PURE__ */ __name((milliseconds) => {
384848
385021
 
384849
385022
  // packages/cli/src/generated/git-commit.ts
384850
385023
  init_esbuild_shims();
384851
- var GIT_COMMIT_INFO = "32931e816";
385024
+ var GIT_COMMIT_INFO = "4ebf619b1";
384852
385025
 
384853
385026
  // packages/cli/src/utils/systemInfo.ts
384854
385027
  async function getNpmVersion() {
@@ -426223,13 +426396,6 @@ var XHS_SSO_MODELS = [
426223
426396
  contextWindow: "272K",
426224
426397
  description: "\u7528\u4E8E\u5904\u7406\u590D\u6742\u4E13\u4E1A\u5DE5\u4F5C\u7684\u524D\u6CBF\u6A21\u578B (\u63A8\u7406\u5F3A\u5EA6\u4E2D)"
426225
426398
  },
426226
- {
426227
- id: "gpt-5.4(xhigh)",
426228
- displayName: "gpt-5.4(xhigh)",
426229
- baseUrl: "https://runway.devops.rednote.life/openai/v1/responses?api-version=v1",
426230
- contextWindow: "272K",
426231
- description: "\u7528\u4E8E\u5904\u7406\u590D\u6742\u4E13\u4E1A\u5DE5\u4F5C\u7684\u524D\u6CBF\u6A21\u578B (\u63A8\u7406\u5F3A\u5EA6\u6781\u9AD8)"
426232
- },
426233
426399
  {
426234
426400
  id: "gpt-5.3-codex(medium)",
426235
426401
  displayName: "gpt-5.3-codex(medium)",
@@ -426293,13 +426459,13 @@ var XHS_SSO_MODELS = [
426293
426459
  contextWindow: "200K",
426294
426460
  description: "\u667A\u8C31\u65B0\u4E00\u4EE3\u7684\u65D7\u8230\u57FA\u5EA7\u6A21\u578B\uFF0C\u9762\u5411 Agentic Engineering \u6253\u9020\uFF0C\u5BF9\u9F50 Claude Opus 4.5"
426295
426461
  },
426296
- {
426297
- id: "claude-opus-4-5@20251101",
426298
- displayName: "Claude Opus 4.5",
426299
- baseUrl: "https://runway.devops.rednote.life/openai/google/anthropic/v1",
426300
- contextWindow: "200K",
426301
- description: "Anthropic \u6700\u5F3A\u5927\u7684\u6A21\u578B\uFF0C\u64C5\u957F\u590D\u6742\u63A8\u7406\u548C\u4EE3\u7801\u751F\u6210"
426302
- },
426462
+ // {
426463
+ // id: 'claude-opus-4-5@20251101',
426464
+ // displayName: 'Claude Opus 4.5',
426465
+ // baseUrl: 'https://runway.devops.rednote.life/openai/google/anthropic/v1',
426466
+ // contextWindow: '200K',
426467
+ // description: 'Anthropic 最强大的模型,擅长复杂推理和代码生成',
426468
+ // },
426303
426469
  {
426304
426470
  id: "Kimi-K2.5",
426305
426471
  displayName: "Kimi-K2.5",
@@ -447224,7 +447390,7 @@ var QwenAgent = class {
447224
447390
  async initialize(args) {
447225
447391
  this.clientCapabilities = args.clientCapabilities;
447226
447392
  const authMethods = buildAuthMethods();
447227
- const version2 = "0.2.8-alpha.9";
447393
+ const version2 = "0.2.8";
447228
447394
  return {
447229
447395
  protocolVersion: PROTOCOL_VERSION,
447230
447396
  agentInfo: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rdmind/rdmind",
3
- "version": "0.2.8-alpha.9",
3
+ "version": "0.2.8",
4
4
  "description": "RDMind - AI-powered coding assistant",
5
5
  "type": "module",
6
6
  "main": "cli.js",
@@ -19,7 +19,7 @@
19
19
  "locales"
20
20
  ],
21
21
  "config": {
22
- "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.8-alpha.9"
22
+ "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.8"
23
23
  },
24
24
  "publishConfig": {
25
25
  "access": "public"