@rdmind/rdmind 0.2.3-alpha.1 → 0.2.3-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/cli.js +1128 -27
  2. package/package.json +2 -2
package/cli.js CHANGED
@@ -133331,6 +133331,8 @@ var init_tokenLimits = __esm({
133331
133331
  // some Sonnet 3.7/Opus variants advertise 1M beta in docs
133332
133332
  [/^claude-sonnet-4.*$/, LIMITS["1m"]],
133333
133333
  [/^claude-opus-4.*$/, LIMITS["1m"]],
133334
+ // Claude Opus 4, 4.1, 4.5 all have 200K context (using 1M as upper bound)
133335
+ [/^claude-haiku-4.*$/, LIMITS["200k"]],
133334
133336
  // -------------------
133335
133337
  // Alibaba / Qwen
133336
133338
  // -------------------
@@ -146025,6 +146027,827 @@ var init_geminiContentGenerator = __esm({
146025
146027
  }
146026
146028
  });
146027
146029
 
146030
+ // packages/core/src/core/vertexAnthropicContentGenerator.ts
146031
+ var vertexAnthropicContentGenerator_exports = {};
146032
+ __export(vertexAnthropicContentGenerator_exports, {
146033
+ VertexAnthropicContentGenerator: () => VertexAnthropicContentGenerator
146034
+ });
146035
+ var DefaultTelemetryService2, VertexAnthropicContentGenerator;
146036
+ var init_vertexAnthropicContentGenerator = __esm({
146037
+ "packages/core/src/core/vertexAnthropicContentGenerator.ts"() {
146038
+ "use strict";
146039
+ init_esbuild_shims();
146040
+ init_node();
146041
+ init_errorHandler();
146042
+ init_loggers();
146043
+ init_types();
146044
+ init_openaiLogger();
146045
+ DefaultTelemetryService2 = class {
146046
+ constructor(config2, enableLogging = false, loggingDir) {
146047
+ this.config = config2;
146048
+ this.enableLogging = enableLogging;
146049
+ this.logger = new OpenAILogger(loggingDir);
146050
+ }
146051
+ static {
146052
+ __name(this, "DefaultTelemetryService");
146053
+ }
146054
+ logger;
146055
+ async logSuccess(context2, response, request4, rawResponse) {
146056
+ const responseEvent = new ApiResponseEvent(
146057
+ response.responseId || "unknown",
146058
+ context2.model,
146059
+ context2.duration,
146060
+ context2.userPromptId,
146061
+ context2.authType,
146062
+ response.usageMetadata
146063
+ );
146064
+ logApiResponse(this.config, responseEvent);
146065
+ if (this.enableLogging && request4 && rawResponse) {
146066
+ await this.logger.logInteraction(request4, rawResponse);
146067
+ }
146068
+ }
146069
+ async logError(context2, error2, request4) {
146070
+ const errorMessage = error2 instanceof Error ? error2.message : String(error2);
146071
+ const apiError = error2;
146072
+ const errorEvent = new ApiErrorEvent(
146073
+ apiError?.requestID || "unknown",
146074
+ context2.model,
146075
+ errorMessage,
146076
+ context2.duration,
146077
+ context2.userPromptId,
146078
+ context2.authType,
146079
+ apiError?.type,
146080
+ apiError?.code
146081
+ );
146082
+ logApiError(this.config, errorEvent);
146083
+ if (this.enableLogging && request4) {
146084
+ await this.logger.logInteraction(request4, void 0, error2);
146085
+ }
146086
+ }
146087
+ async logStreamingSuccess(context2, responses, request4, _chunks, combinedResponse) {
146088
+ const finalUsageMetadata = responses.slice().reverse().find((r5) => r5.usageMetadata)?.usageMetadata;
146089
+ const lastResponse = responses[responses.length - 1];
146090
+ const responseEvent = new ApiResponseEvent(
146091
+ lastResponse?.responseId || "unknown",
146092
+ context2.model,
146093
+ context2.duration,
146094
+ context2.userPromptId,
146095
+ context2.authType,
146096
+ finalUsageMetadata
146097
+ );
146098
+ logApiResponse(this.config, responseEvent);
146099
+ if (this.enableLogging && request4 && combinedResponse) {
146100
+ await this.logger.logInteraction(request4, combinedResponse);
146101
+ }
146102
+ }
146103
+ };
146104
+ VertexAnthropicContentGenerator = class {
146105
+ static {
146106
+ __name(this, "VertexAnthropicContentGenerator");
146107
+ }
146108
+ baseUrl;
146109
+ apiKey;
146110
+ samplingParams;
146111
+ reasoning;
146112
+ cliConfig;
146113
+ telemetryService;
146114
+ errorHandler;
146115
+ constructor(config2, cliConfig) {
146116
+ this.baseUrl = config2.baseUrl || "";
146117
+ this.apiKey = config2.apiKey || "";
146118
+ this.samplingParams = config2.samplingParams;
146119
+ this.reasoning = config2.reasoning;
146120
+ this.cliConfig = cliConfig;
146121
+ if (!this.apiKey) {
146122
+ throw new Error("API key is required for Vertex Anthropic");
146123
+ }
146124
+ if (!this.baseUrl) {
146125
+ throw new Error("Base URL is required for Vertex Anthropic");
146126
+ }
146127
+ if (cliConfig) {
146128
+ this.telemetryService = new DefaultTelemetryService2(
146129
+ cliConfig,
146130
+ config2.enableOpenAILogging,
146131
+ config2.openAILoggingDir
146132
+ );
146133
+ } else {
146134
+ this.telemetryService = {
146135
+ logSuccess: /* @__PURE__ */ __name(async () => {
146136
+ }, "logSuccess"),
146137
+ logError: /* @__PURE__ */ __name(async () => {
146138
+ }, "logError"),
146139
+ logStreamingSuccess: /* @__PURE__ */ __name(async () => {
146140
+ }, "logStreamingSuccess")
146141
+ };
146142
+ }
146143
+ this.errorHandler = new EnhancedErrorHandler(
146144
+ (error2, _request) => this.shouldSuppressErrorLogging(error2, _request)
146145
+ );
146146
+ }
146147
+ shouldSuppressErrorLogging(_error, _request) {
146148
+ return false;
146149
+ }
146150
+ getRequestUrl(action) {
146151
+ return `${this.baseUrl}:${action}`;
146152
+ }
146153
+ async fetchApi(url2, body, signal) {
146154
+ const headers = {
146155
+ "Content-Type": "application/json",
146156
+ "api-key": this.apiKey
146157
+ };
146158
+ if (this.cliConfig?.getDebugMode()) {
146159
+ console.debug(
146160
+ `[VertexAnthropicContentGenerator] Request URL: ${url2}`
146161
+ );
146162
+ console.debug(
146163
+ `[VertexAnthropicContentGenerator] Request body:`,
146164
+ JSON.stringify(body, null, 2)
146165
+ );
146166
+ }
146167
+ const response = await fetch(url2, {
146168
+ method: "POST",
146169
+ headers,
146170
+ body: JSON.stringify(body),
146171
+ signal
146172
+ });
146173
+ if (!response.ok) {
146174
+ const errorText = await response.text();
146175
+ if (this.cliConfig?.getDebugMode()) {
146176
+ console.error(
146177
+ `[VertexAnthropicContentGenerator] API Error (${response.status}):`,
146178
+ errorText
146179
+ );
146180
+ }
146181
+ throw new Error(
146182
+ `Vertex Anthropic API request failed: ${response.status} ${response.statusText} - ${errorText}`
146183
+ );
146184
+ }
146185
+ return response;
146186
+ }
146187
+ async convertGeminiRequestToVertexAnthropic(request4) {
146188
+ const messages = [];
146189
+ let systemInstruction;
146190
+ if (request4.config?.systemInstruction) {
146191
+ if (typeof request4.config.systemInstruction === "string") {
146192
+ systemInstruction = request4.config.systemInstruction;
146193
+ } else if ("parts" in request4.config.systemInstruction && Array.isArray(request4.config.systemInstruction.parts)) {
146194
+ systemInstruction = request4.config.systemInstruction.parts.filter((p2) => typeof p2 === "object" && "text" in p2).map((p2) => p2.text).join("\n");
146195
+ }
146196
+ }
146197
+ const contents = Array.isArray(request4.contents) ? request4.contents : [request4.contents];
146198
+ for (const content of contents) {
146199
+ if (typeof content === "string") {
146200
+ messages.push({ role: "user", content });
146201
+ } else if ("role" in content && "parts" in content && content.parts) {
146202
+ const role = content.role === "model" ? "assistant" : "user";
146203
+ const contentBlocks = this.convertPartsToAnthropicBlocks(content.parts);
146204
+ if (contentBlocks.length > 0) {
146205
+ if (contentBlocks.length === 1 && contentBlocks[0].type === "text") {
146206
+ messages.push({ role, content: contentBlocks[0].text });
146207
+ } else {
146208
+ messages.push({ role, content: contentBlocks });
146209
+ }
146210
+ }
146211
+ }
146212
+ }
146213
+ const temperature = this.samplingParams?.temperature ?? 1;
146214
+ const thinking = this.buildThinkingConfig(request4);
146215
+ const defaultMaxTokens = thinking ? thinking.budget_tokens + 16e3 : 1e4;
146216
+ const maxTokens = this.samplingParams?.max_tokens ?? defaultMaxTokens;
146217
+ const vertexRequest = {
146218
+ anthropic_version: "vertex-2023-10-16",
146219
+ messages,
146220
+ max_tokens: maxTokens,
146221
+ temperature
146222
+ };
146223
+ if (systemInstruction) {
146224
+ vertexRequest.system = systemInstruction;
146225
+ }
146226
+ if (this.samplingParams?.top_p !== void 0) {
146227
+ vertexRequest.top_p = this.samplingParams.top_p;
146228
+ }
146229
+ if (this.samplingParams?.top_k !== void 0) {
146230
+ vertexRequest.top_k = this.samplingParams.top_k;
146231
+ }
146232
+ if (thinking) {
146233
+ vertexRequest.thinking = thinking;
146234
+ }
146235
+ if (request4.config?.tools && request4.config.tools.length > 0) {
146236
+ const tools = await this.convertGeminiToolsToAnthropic(
146237
+ request4.config.tools
146238
+ );
146239
+ if (tools.length > 0) {
146240
+ vertexRequest.tools = tools;
146241
+ }
146242
+ }
146243
+ return vertexRequest;
146244
+ }
146245
+ buildThinkingConfig(request4) {
146246
+ if (request4.config?.thinkingConfig?.includeThoughts === false) {
146247
+ return void 0;
146248
+ }
146249
+ const reasoning = this.reasoning;
146250
+ if (reasoning === false) {
146251
+ return void 0;
146252
+ }
146253
+ if (reasoning?.budget_tokens !== void 0) {
146254
+ return {
146255
+ type: "enabled",
146256
+ budget_tokens: reasoning.budget_tokens
146257
+ };
146258
+ }
146259
+ const effort = reasoning?.effort ?? "medium";
146260
+ const budgetTokens = effort === "low" ? 16e3 : effort === "high" ? 64e3 : 32e3;
146261
+ return {
146262
+ type: "enabled",
146263
+ budget_tokens: budgetTokens
146264
+ };
146265
+ }
146266
+ /**
146267
+ * 将 Gemini Part 数组转换为 Anthropic 内容块数组
146268
+ */
146269
+ convertPartsToAnthropicBlocks(parts) {
146270
+ const blocks = [];
146271
+ for (const part of parts) {
146272
+ const block2 = this.convertPartToAnthropicBlock(part);
146273
+ if (block2) {
146274
+ blocks.push(block2);
146275
+ }
146276
+ }
146277
+ return blocks;
146278
+ }
146279
+ /**
146280
+ * 将单个 Gemini Part 转换为 Anthropic 内容块
146281
+ */
146282
+ convertPartToAnthropicBlock(part) {
146283
+ if ("text" in part && "thought" in part && part.thought) {
146284
+ const thinkingBlock = {
146285
+ type: "thinking",
146286
+ thinking: part.text || ""
146287
+ };
146288
+ if ("thoughtSignature" in part && typeof part.thoughtSignature === "string") {
146289
+ thinkingBlock.signature = part.thoughtSignature;
146290
+ }
146291
+ return thinkingBlock;
146292
+ }
146293
+ if ("text" in part && part.text && !("thought" in part && part.thought)) {
146294
+ return { type: "text", text: part.text };
146295
+ }
146296
+ if (part.inlineData?.mimeType && part.inlineData?.data) {
146297
+ const mimeType = part.inlineData.mimeType;
146298
+ if (this.isSupportedImageMimeType(mimeType)) {
146299
+ return {
146300
+ type: "image",
146301
+ source: {
146302
+ type: "base64",
146303
+ media_type: mimeType,
146304
+ data: part.inlineData.data
146305
+ }
146306
+ };
146307
+ }
146308
+ if (mimeType === "application/pdf") {
146309
+ return {
146310
+ type: "document",
146311
+ source: {
146312
+ type: "base64",
146313
+ media_type: "application/pdf",
146314
+ data: part.inlineData.data
146315
+ }
146316
+ };
146317
+ }
146318
+ const displayName = part.inlineData.displayName ? ` (${part.inlineData.displayName})` : "";
146319
+ return {
146320
+ type: "text",
146321
+ text: `[Unsupported media type: ${mimeType}${displayName}]`
146322
+ };
146323
+ }
146324
+ if (part.fileData?.mimeType && part.fileData?.fileUri) {
146325
+ return {
146326
+ type: "text",
146327
+ text: `[External file reference: ${part.fileData.fileUri}]`
146328
+ };
146329
+ }
146330
+ if ("functionCall" in part && part.functionCall) {
146331
+ return {
146332
+ type: "tool_use",
146333
+ id: part.functionCall.id || `tool_${Date.now()}`,
146334
+ name: part.functionCall.name || "",
146335
+ input: part.functionCall.args || {}
146336
+ };
146337
+ }
146338
+ if ("functionResponse" in part && part.functionResponse) {
146339
+ const response = part.functionResponse;
146340
+ let content;
146341
+ if (response.response) {
146342
+ content = JSON.stringify(response.response);
146343
+ } else {
146344
+ content = "";
146345
+ }
146346
+ return {
146347
+ type: "tool_result",
146348
+ tool_use_id: response.id || "",
146349
+ content
146350
+ };
146351
+ }
146352
+ return null;
146353
+ }
146354
+ /**
146355
+ * 检查是否是 Anthropic 支持的图片类型
146356
+ */
146357
+ isSupportedImageMimeType(mimeType) {
146358
+ return mimeType === "image/jpeg" || mimeType === "image/png" || mimeType === "image/gif" || mimeType === "image/webp";
146359
+ }
146360
+ /**
146361
+ * 将 Gemini 工具定义转换为 Anthropic 格式
146362
+ * 参考 AnthropicContentConverter.convertGeminiToolsToAnthropic
146363
+ */
146364
+ async convertGeminiToolsToAnthropic(geminiTools) {
146365
+ const tools = [];
146366
+ if (!geminiTools) {
146367
+ return tools;
146368
+ }
146369
+ for (const tool of geminiTools) {
146370
+ let actualTool;
146371
+ const toolObj = tool;
146372
+ if ("tool" in toolObj && typeof toolObj["tool"] === "function") {
146373
+ actualTool = await toolObj["tool"]();
146374
+ } else {
146375
+ actualTool = tool;
146376
+ }
146377
+ if (!actualTool.functionDeclarations) {
146378
+ continue;
146379
+ }
146380
+ for (const func of actualTool.functionDeclarations) {
146381
+ if (!func.name) continue;
146382
+ let inputSchema;
146383
+ if (func.parametersJsonSchema) {
146384
+ inputSchema = {
146385
+ ...func.parametersJsonSchema
146386
+ };
146387
+ } else if (func.parameters) {
146388
+ inputSchema = func.parameters;
146389
+ }
146390
+ if (!inputSchema) {
146391
+ inputSchema = { type: "object", properties: {} };
146392
+ }
146393
+ if (typeof inputSchema["type"] !== "string") {
146394
+ inputSchema["type"] = "object";
146395
+ }
146396
+ tools.push({
146397
+ name: func.name,
146398
+ description: func.description,
146399
+ input_schema: inputSchema
146400
+ });
146401
+ }
146402
+ }
146403
+ return tools;
146404
+ }
146405
+ convertVertexAnthropicResponseToGemini(response) {
146406
+ const parts = [];
146407
+ for (const content of response.content) {
146408
+ if (content.type === "text" && content.text) {
146409
+ parts.push({ text: content.text });
146410
+ } else if (content.type === "thinking" && content.thinking) {
146411
+ const thinkingPart = { text: content.thinking, thought: true };
146412
+ if (content.signature) {
146413
+ thinkingPart.thoughtSignature = content.signature;
146414
+ }
146415
+ parts.push(thinkingPart);
146416
+ } else if (content.type === "tool_use" && content.name && content.id) {
146417
+ parts.push({
146418
+ functionCall: {
146419
+ name: content.name,
146420
+ args: content.input || {},
146421
+ id: content.id
146422
+ }
146423
+ });
146424
+ }
146425
+ }
146426
+ const result = {
146427
+ responseId: response.id,
146428
+ modelVersion: response.model,
146429
+ candidates: [
146430
+ {
146431
+ content: {
146432
+ parts,
146433
+ role: "model"
146434
+ },
146435
+ index: 0,
146436
+ finishReason: this.mapFinishReason(response.stop_reason),
146437
+ safetyRatings: []
146438
+ }
146439
+ ],
146440
+ promptFeedback: { safetyRatings: [] },
146441
+ usageMetadata: {
146442
+ promptTokenCount: response.usage.input_tokens,
146443
+ candidatesTokenCount: response.usage.output_tokens,
146444
+ totalTokenCount: response.usage.input_tokens + response.usage.output_tokens
146445
+ }
146446
+ };
146447
+ return result;
146448
+ }
146449
+ mapFinishReason(stopReason) {
146450
+ if (!stopReason) {
146451
+ return void 0;
146452
+ }
146453
+ switch (stopReason) {
146454
+ case "end_turn":
146455
+ return FinishReason.STOP;
146456
+ case "max_tokens":
146457
+ return FinishReason.MAX_TOKENS;
146458
+ case "stop_sequence":
146459
+ return FinishReason.STOP;
146460
+ case "tool_use":
146461
+ return FinishReason.STOP;
146462
+ default:
146463
+ return FinishReason.OTHER;
146464
+ }
146465
+ }
146466
+ /**
146467
+ * 安全解析 JSON,失败时返回默认值
146468
+ */
146469
+ safeJsonParse(jsonStr, defaultValue) {
146470
+ try {
146471
+ return JSON.parse(jsonStr);
146472
+ } catch {
146473
+ return defaultValue;
146474
+ }
146475
+ }
146476
+ async generateContent(request4, userPromptId) {
146477
+ const startTime = Date.now();
146478
+ const context2 = {
146479
+ userPromptId,
146480
+ model: request4.model,
146481
+ authType: "xhs-sso",
146482
+ startTime,
146483
+ duration: 0,
146484
+ isStreaming: false
146485
+ };
146486
+ try {
146487
+ const url2 = this.getRequestUrl("rawPredict");
146488
+ const body = await this.convertGeminiRequestToVertexAnthropic(request4);
146489
+ const response = await this.fetchApi(
146490
+ url2,
146491
+ body,
146492
+ request4.config?.abortSignal
146493
+ );
146494
+ const data = await response.json();
146495
+ context2.duration = Date.now() - startTime;
146496
+ const geminiResponse = this.convertVertexAnthropicResponseToGemini(data);
146497
+ await this.telemetryService.logSuccess(context2, geminiResponse, body, data);
146498
+ return geminiResponse;
146499
+ } catch (error2) {
146500
+ context2.duration = Date.now() - startTime;
146501
+ await this.telemetryService.logError(context2, error2, request4);
146502
+ return this.errorHandler.handle(error2, context2, request4);
146503
+ }
146504
+ }
146505
+ async generateContentStream(request4, userPromptId) {
146506
+ const startTime = Date.now();
146507
+ const context2 = {
146508
+ userPromptId,
146509
+ model: request4.model,
146510
+ authType: "xhs-sso",
146511
+ startTime,
146512
+ duration: 0,
146513
+ isStreaming: true
146514
+ };
146515
+ try {
146516
+ const url2 = this.getRequestUrl("streamRawPredict");
146517
+ const baseBody = await this.convertGeminiRequestToVertexAnthropic(request4);
146518
+ const body = {
146519
+ ...baseBody,
146520
+ stream: true
146521
+ };
146522
+ const response = await this.fetchApi(
146523
+ url2,
146524
+ body,
146525
+ request4.config?.abortSignal
146526
+ );
146527
+ if (!response.body) {
146528
+ throw new Error("Response body is null");
146529
+ }
146530
+ const stream2 = this.handleStream(response.body);
146531
+ const collectedResponses = [];
146532
+ return async function* () {
146533
+ try {
146534
+ for await (const chunk of stream2) {
146535
+ collectedResponses.push(chunk);
146536
+ yield chunk;
146537
+ }
146538
+ context2.duration = Date.now() - startTime;
146539
+ const combinedResponse = this.combineResponses(collectedResponses);
146540
+ await this.telemetryService.logStreamingSuccess(
146541
+ context2,
146542
+ collectedResponses,
146543
+ body,
146544
+ void 0,
146545
+ combinedResponse
146546
+ );
146547
+ } catch (error2) {
146548
+ context2.duration = Date.now() - startTime;
146549
+ await this.telemetryService.logError(context2, error2, body);
146550
+ throw error2;
146551
+ }
146552
+ }.call(this);
146553
+ } catch (error2) {
146554
+ context2.duration = Date.now() - startTime;
146555
+ await this.telemetryService.logError(context2, error2, request4);
146556
+ return this.errorHandler.handle(error2, context2, request4);
146557
+ }
146558
+ }
146559
+ combineResponses(responses) {
146560
+ if (responses.length === 0) {
146561
+ return {};
146562
+ }
146563
+ const lastResponse = responses[responses.length - 1];
146564
+ let combinedText = "";
146565
+ for (const response of responses) {
146566
+ if (response.candidates && response.candidates[0]?.content?.parts) {
146567
+ for (const part of response.candidates[0].content.parts) {
146568
+ if ("text" in part && part.text) {
146569
+ combinedText += part.text;
146570
+ }
146571
+ }
146572
+ }
146573
+ }
146574
+ return {
146575
+ ...lastResponse,
146576
+ candidates: lastResponse.candidates ? [
146577
+ {
146578
+ ...lastResponse.candidates[0],
146579
+ content: {
146580
+ ...lastResponse.candidates[0].content,
146581
+ parts: [{ text: combinedText }]
146582
+ }
146583
+ }
146584
+ ] : void 0
146585
+ };
146586
+ }
146587
+ async *handleStream(body) {
146588
+ const reader = body.getReader();
146589
+ const decoder = new TextDecoder();
146590
+ let buffer = "";
146591
+ let currentEvent = "";
146592
+ let messageId;
146593
+ let model = "";
146594
+ let cachedTokens = 0;
146595
+ let promptTokens = 0;
146596
+ let completionTokens = 0;
146597
+ let finishReason;
146598
+ const blocks = /* @__PURE__ */ new Map();
146599
+ try {
146600
+ while (true) {
146601
+ const { done, value } = await reader.read();
146602
+ if (done) break;
146603
+ buffer += decoder.decode(value, { stream: true });
146604
+ const lines = buffer.split("\n");
146605
+ buffer = lines.pop() || "";
146606
+ for (const line of lines) {
146607
+ const trimmedLine = line.trim();
146608
+ if (!trimmedLine) {
146609
+ currentEvent = "";
146610
+ continue;
146611
+ }
146612
+ if (trimmedLine.startsWith("event: ")) {
146613
+ currentEvent = trimmedLine.slice(7).trim();
146614
+ continue;
146615
+ }
146616
+ if (trimmedLine.startsWith("data: ")) {
146617
+ const dataStr = trimmedLine.slice(6).trim();
146618
+ if (!dataStr || dataStr === "[DONE]") continue;
146619
+ try {
146620
+ const data = JSON.parse(dataStr);
146621
+ const eventType = data.type || currentEvent;
146622
+ switch (eventType) {
146623
+ case "message_start": {
146624
+ if (data.message) {
146625
+ messageId = data.message.id ?? messageId;
146626
+ model = data.message.model ?? model;
146627
+ if (data.message.usage) {
146628
+ cachedTokens = data.message.usage.cache_read_input_tokens ?? 0;
146629
+ promptTokens = data.message.usage.input_tokens ?? 0;
146630
+ }
146631
+ }
146632
+ break;
146633
+ }
146634
+ case "content_block_start": {
146635
+ const index = data.index ?? 0;
146636
+ const type = String(data.content_block?.type || "text");
146637
+ const initialInput = type === "tool_use" && data.content_block?.input ? JSON.stringify(data.content_block.input) : "";
146638
+ const initialSignature = type === "thinking" && data.content_block?.signature ? String(data.content_block.signature) : "";
146639
+ if (this.cliConfig?.getDebugMode() && type === "tool_use") {
146640
+ console.debug(
146641
+ `[VertexAnthropicContentGenerator] Tool use block start:`,
146642
+ JSON.stringify({
146643
+ index,
146644
+ id: data.content_block?.id,
146645
+ name: data.content_block?.name,
146646
+ initialInput
146647
+ })
146648
+ );
146649
+ }
146650
+ blocks.set(index, {
146651
+ type,
146652
+ id: type === "tool_use" ? String(data.content_block?.id || "") : void 0,
146653
+ name: type === "tool_use" ? String(data.content_block?.name || "") : void 0,
146654
+ // SDK 兼容:如果初始 input 是空对象 {},则设为空字符串
146655
+ // 实际参数通过后续的 input_json_delta 事件发送
146656
+ inputJson: initialInput !== "{}" ? initialInput : "",
146657
+ signature: initialSignature
146658
+ });
146659
+ break;
146660
+ }
146661
+ case "content_block_delta": {
146662
+ const deltaType = data.delta?.type;
146663
+ const index = data.index ?? 0;
146664
+ if (deltaType === "text_delta" && data.delta?.text) {
146665
+ const chunk = this.buildGeminiChunk(
146666
+ { text: data.delta.text },
146667
+ messageId,
146668
+ model
146669
+ );
146670
+ yield chunk;
146671
+ } else if (deltaType === "thinking_delta" && data.delta?.thinking) {
146672
+ const chunk = this.buildGeminiChunk(
146673
+ { text: data.delta.thinking, thought: true },
146674
+ messageId,
146675
+ model
146676
+ );
146677
+ yield chunk;
146678
+ } else if (deltaType === "input_json_delta" && data.delta?.partial_json) {
146679
+ const blockState = blocks.get(index);
146680
+ if (blockState) {
146681
+ blockState.inputJson += data.delta.partial_json;
146682
+ if (this.cliConfig?.getDebugMode()) {
146683
+ console.debug(
146684
+ `[VertexAnthropicContentGenerator] input_json_delta:`,
146685
+ data.delta.partial_json
146686
+ );
146687
+ }
146688
+ }
146689
+ } else if (deltaType === "signature_delta" && data.delta?.signature) {
146690
+ const blockState = blocks.get(index);
146691
+ if (blockState) {
146692
+ blockState.signature += data.delta.signature;
146693
+ const chunk = this.buildGeminiChunk(
146694
+ { thought: true, thoughtSignature: data.delta.signature },
146695
+ messageId,
146696
+ model
146697
+ );
146698
+ yield chunk;
146699
+ }
146700
+ }
146701
+ break;
146702
+ }
146703
+ case "content_block_stop": {
146704
+ const index = data.index ?? 0;
146705
+ const blockState = blocks.get(index);
146706
+ if (blockState?.type === "tool_use") {
146707
+ const args = this.safeJsonParse(blockState.inputJson || "{}", {});
146708
+ if (this.cliConfig?.getDebugMode()) {
146709
+ console.debug(
146710
+ `[VertexAnthropicContentGenerator] Tool use block stop:`,
146711
+ JSON.stringify({
146712
+ index,
146713
+ id: blockState.id,
146714
+ name: blockState.name,
146715
+ inputJson: blockState.inputJson,
146716
+ parsedArgs: args
146717
+ })
146718
+ );
146719
+ }
146720
+ const chunk = this.buildGeminiChunk(
146721
+ {
146722
+ functionCall: {
146723
+ id: blockState.id,
146724
+ name: blockState.name,
146725
+ args
146726
+ }
146727
+ },
146728
+ messageId,
146729
+ model
146730
+ );
146731
+ yield chunk;
146732
+ }
146733
+ blocks.delete(index);
146734
+ break;
146735
+ }
146736
+ case "message_delta": {
146737
+ if (data.delta?.stop_reason) {
146738
+ finishReason = data.delta.stop_reason;
146739
+ }
146740
+ if (data.usage?.output_tokens !== void 0) {
146741
+ completionTokens = data.usage.output_tokens;
146742
+ }
146743
+ if (finishReason || data.usage) {
146744
+ const chunk = this.buildGeminiChunk(
146745
+ void 0,
146746
+ messageId,
146747
+ model,
146748
+ finishReason,
146749
+ {
146750
+ cachedContentTokenCount: cachedTokens,
146751
+ promptTokenCount: cachedTokens + promptTokens,
146752
+ candidatesTokenCount: completionTokens,
146753
+ totalTokenCount: cachedTokens + promptTokens + completionTokens
146754
+ }
146755
+ );
146756
+ yield chunk;
146757
+ }
146758
+ break;
146759
+ }
146760
+ case "message_stop": {
146761
+ if (promptTokens || completionTokens) {
146762
+ const chunk = this.buildGeminiChunk(
146763
+ void 0,
146764
+ messageId,
146765
+ model,
146766
+ finishReason,
146767
+ {
146768
+ cachedContentTokenCount: cachedTokens,
146769
+ promptTokenCount: cachedTokens + promptTokens,
146770
+ candidatesTokenCount: completionTokens,
146771
+ totalTokenCount: cachedTokens + promptTokens + completionTokens
146772
+ }
146773
+ );
146774
+ yield chunk;
146775
+ }
146776
+ break;
146777
+ }
146778
+ default:
146779
+ break;
146780
+ }
146781
+ } catch (error2) {
146782
+ if (this.cliConfig?.getDebugMode()) {
146783
+ console.error(
146784
+ `[VertexAnthropicContentGenerator] Failed to parse SSE data:`,
146785
+ dataStr,
146786
+ error2
146787
+ );
146788
+ }
146789
+ }
146790
+ }
146791
+ }
146792
+ }
146793
+ } finally {
146794
+ reader.releaseLock();
146795
+ }
146796
+ }
146797
+ buildGeminiChunk(part, responseId, model, finishReason, usageMetadata) {
146798
+ const response = new GenerateContentResponse();
146799
+ response.responseId = responseId;
146800
+ response.createTime = Date.now().toString();
146801
+ response.modelVersion = model || "";
146802
+ response.promptFeedback = { safetyRatings: [] };
146803
+ let candidateParts = [];
146804
+ if (part) {
146805
+ if (part.functionCall) {
146806
+ candidateParts = [
146807
+ {
146808
+ functionCall: {
146809
+ name: part.functionCall.name || "",
146810
+ args: part.functionCall.args || {},
146811
+ id: part.functionCall.id
146812
+ }
146813
+ }
146814
+ ];
146815
+ } else {
146816
+ candidateParts = [part];
146817
+ }
146818
+ }
146819
+ const mappedFinishReason = finishReason ? this.mapFinishReason(finishReason) : void 0;
146820
+ response.candidates = [
146821
+ {
146822
+ content: {
146823
+ parts: candidateParts,
146824
+ role: "model"
146825
+ },
146826
+ index: 0,
146827
+ safetyRatings: [],
146828
+ ...mappedFinishReason ? { finishReason: mappedFinishReason } : {}
146829
+ }
146830
+ ];
146831
+ if (usageMetadata) {
146832
+ response.usageMetadata = usageMetadata;
146833
+ }
146834
+ return response;
146835
+ }
146836
+ async countTokens(request4) {
146837
+ const content = JSON.stringify(request4.contents);
146838
+ const totalTokens = Math.ceil(content.length / 4);
146839
+ return { totalTokens };
146840
+ }
146841
+ async embedContent(_request) {
146842
+ throw new Error("Vertex Anthropic does not support embeddings.");
146843
+ }
146844
+ useSummarizedThinking() {
146845
+ return false;
146846
+ }
146847
+ };
146848
+ }
146849
+ });
146850
+
146028
146851
  // node_modules/@anthropic-ai/sdk/version.mjs
146029
146852
  var VERSION3;
146030
146853
  var init_version3 = __esm({
@@ -157557,7 +158380,7 @@ __export(geminiContentGenerator_exports2, {
157557
158380
  createGeminiContentGenerator: () => createGeminiContentGenerator
157558
158381
  });
157559
158382
  function createGeminiContentGenerator(config2, gcConfig) {
157560
- const version2 = "0.2.3-alpha.1";
158383
+ const version2 = "0.2.3-alpha.3";
157561
158384
  const userAgent2 = config2.userAgent || `QwenCode/${version2} (${process.platform}; ${process.arch})`;
157562
158385
  const baseHeaders = {
157563
158386
  "User-Agent": userAgent2
@@ -157732,6 +158555,12 @@ async function createContentGenerator(generatorConfig, config2, isInitialAuth) {
157732
158555
  if (model.startsWith("gemini")) {
157733
158556
  const { GeminiContentGenerator: GeminiContentGenerator3 } = await Promise.resolve().then(() => (init_geminiContentGenerator(), geminiContentGenerator_exports));
157734
158557
  baseGenerator = new GeminiContentGenerator3(generatorConfig, config2);
158558
+ } else if (model.startsWith("claude")) {
158559
+ const { VertexAnthropicContentGenerator: VertexAnthropicContentGenerator2 } = await Promise.resolve().then(() => (init_vertexAnthropicContentGenerator(), vertexAnthropicContentGenerator_exports));
158560
+ baseGenerator = new VertexAnthropicContentGenerator2(
158561
+ generatorConfig,
158562
+ config2
158563
+ );
157735
158564
  } else {
157736
158565
  const { createOpenAIContentGenerator: createOpenAIContentGenerator2 } = await Promise.resolve().then(() => (init_openaiContentGenerator2(), openaiContentGenerator_exports));
157737
158566
  baseGenerator = createOpenAIContentGenerator2(generatorConfig, config2);
@@ -222219,7 +223048,7 @@ Usage notes:
222219
223048
  });
222220
223049
 
222221
223050
  // packages/core/src/tools/redoc-fetch.ts
222222
- var REDOC_API_TIMEOUT_MS, REDOC_API_URL, REDOC_URL_PATTERN, RedocFetchToolInvocation, RedocFetchTool;
223051
+ var REDOC_API_TIMEOUT_MS, REDOC_API_URL, REDOC_URL_PATTERN, IMAGE_DOWNLOAD_TIMEOUT_MS, MAX_IMAGE_SIZE_MB, RedocFetchToolInvocation, RedocFetchTool;
222223
223052
  var init_redoc_fetch = __esm({
222224
223053
  "packages/core/src/tools/redoc-fetch.ts"() {
222225
223054
  "use strict";
@@ -222228,9 +223057,12 @@ var init_redoc_fetch = __esm({
222228
223057
  init_core5();
222229
223058
  init_partUtils();
222230
223059
  init_models();
223060
+ init_index_lite();
222231
223061
  REDOC_API_TIMEOUT_MS = 1e4;
222232
223062
  REDOC_API_URL = "https://athena-next.devops.xiaohongshu.com/api/media/query/redoc";
222233
223063
  REDOC_URL_PATTERN = /^https:\/\/docs\.xiaohongshu\.com\/doc\/([a-f0-9]+)$/;
223064
+ IMAGE_DOWNLOAD_TIMEOUT_MS = 3e4;
223065
+ MAX_IMAGE_SIZE_MB = 20;
222234
223066
  RedocFetchToolInvocation = class extends BaseToolInvocation {
222235
223067
  constructor(config2, params) {
222236
223068
  super(params);
@@ -222316,6 +223148,265 @@ var init_redoc_fetch = __esm({
222316
223148
  clearTimeout(timeoutId);
222317
223149
  }
222318
223150
  }
223151
+ /**
223152
+ * 解析文档内容并构建包含文本和图片(按原始顺序)的结构
223153
+ */
223154
+ async buildContentWithImages(content, signal) {
223155
+ try {
223156
+ const contentObj = JSON.parse(content);
223157
+ if (!contentObj.children || !Array.isArray(contentObj.children)) {
223158
+ return {
223159
+ parts: [{ text: content }],
223160
+ textContent: content,
223161
+ imageCount: 0,
223162
+ successCount: 0
223163
+ };
223164
+ }
223165
+ const parts = [];
223166
+ let textBuffer = [];
223167
+ let imageCount = 0;
223168
+ let successCount = 0;
223169
+ const processNode = /* @__PURE__ */ __name(async (node, depth = 0) => {
223170
+ if (!node) return;
223171
+ if (node.type === "image" && node.url) {
223172
+ imageCount++;
223173
+ if (textBuffer.length > 0) {
223174
+ parts.push({ text: textBuffer.join("\n") });
223175
+ textBuffer = [];
223176
+ }
223177
+ console.debug(
223178
+ `[RedocFetchTool] Downloading image ${imageCount} (depth ${depth}): ${node.url}`
223179
+ );
223180
+ const imageData = await this.downloadImageAsBase64(node.url, signal);
223181
+ if (imageData) {
223182
+ const imageCaption = `
223183
+ [\u56FE\u7247 ${imageCount}${node.width && node.height ? ` (${node.width}x${node.height})` : ""}]
223184
+ `;
223185
+ parts.push({ text: imageCaption });
223186
+ parts.push({
223187
+ inlineData: {
223188
+ data: imageData.data,
223189
+ mimeType: imageData.mimeType
223190
+ }
223191
+ });
223192
+ successCount++;
223193
+ console.debug(
223194
+ `[RedocFetchTool] Image ${imageCount} downloaded successfully`
223195
+ );
223196
+ } else {
223197
+ const placeholder = `
223198
+ [\u56FE\u7247 ${imageCount} - \u4E0B\u8F7D\u5931\u8D25: ${node.url}]
223199
+ `;
223200
+ parts.push({ text: placeholder });
223201
+ console.warn(
223202
+ `[RedocFetchTool] Failed to download image ${imageCount}: ${node.url}`
223203
+ );
223204
+ }
223205
+ return;
223206
+ }
223207
+ if (node.children && Array.isArray(node.children)) {
223208
+ if (node.type === "columns") {
223209
+ textBuffer.push("\n[\u591A\u680F\u5E03\u5C40]");
223210
+ } else if (node.type === "column") {
223211
+ textBuffer.push("\n[\u680F\u76EE]");
223212
+ }
223213
+ for (const child of node.children) {
223214
+ await processNode(child, depth + 1);
223215
+ }
223216
+ return;
223217
+ }
223218
+ const textContent3 = this.extractTextFromNode(node);
223219
+ if (textContent3) {
223220
+ textBuffer.push(textContent3);
223221
+ }
223222
+ }, "processNode");
223223
+ for (const child of contentObj.children) {
223224
+ await processNode(child, 0);
223225
+ }
223226
+ if (textBuffer.length > 0) {
223227
+ parts.push({ text: textBuffer.join("\n") });
223228
+ }
223229
+ const extractAllText = /* @__PURE__ */ __name((node) => {
223230
+ if (!node) return "";
223231
+ if (node.type === "image") return "";
223232
+ const nodeText = this.extractTextFromNode(node);
223233
+ let childrenText = "";
223234
+ if (node.children && Array.isArray(node.children)) {
223235
+ childrenText = node.children.map((child) => extractAllText(child)).filter((text) => text).join("\n");
223236
+ }
223237
+ return [nodeText, childrenText].filter(Boolean).join("\n");
223238
+ }, "extractAllText");
223239
+ const textContent2 = contentObj.children.map((child) => extractAllText(child)).filter((text) => text).join("\n");
223240
+ return {
223241
+ parts,
223242
+ textContent: textContent2,
223243
+ imageCount,
223244
+ successCount
223245
+ };
223246
+ } catch {
223247
+ return {
223248
+ parts: [{ text: content }],
223249
+ textContent: content,
223250
+ imageCount: 0,
223251
+ successCount: 0
223252
+ };
223253
+ }
223254
+ }
223255
+ /**
223256
+ * 从文档节点中提取文本内容(支持递归)
223257
+ */
223258
+ extractTextFromNode(node) {
223259
+ if (!node) return "";
223260
+ if (node.type === "image") return "";
223261
+ switch (node.type) {
223262
+ case "title":
223263
+ return this.extractTextFromChildren(node.children, "# ");
223264
+ case "h1":
223265
+ return this.extractTextFromChildren(node.children, "## ");
223266
+ case "h2":
223267
+ return this.extractTextFromChildren(node.children, "### ");
223268
+ case "h3":
223269
+ return this.extractTextFromChildren(node.children, "#### ");
223270
+ case "paragraph":
223271
+ return this.extractTextFromChildren(node.children);
223272
+ case "code":
223273
+ return `\`\`\`
223274
+ ${this.extractTextFromChildren(node.children)}
223275
+ \`\`\``;
223276
+ case "numbered-list":
223277
+ case "list":
223278
+ return this.extractTextFromChildren(node.children, "- ");
223279
+ case "block-quote":
223280
+ return `> ${this.extractTextFromChildren(node.children)}`;
223281
+ case "table":
223282
+ return this.extractTableContent(node);
223283
+ case "columns":
223284
+ case "column":
223285
+ case "table-cell-block":
223286
+ if (node.children && Array.isArray(node.children)) {
223287
+ return node.children.map((child) => this.extractTextFromNode(child)).filter((text) => text).join("\n");
223288
+ }
223289
+ return "";
223290
+ default:
223291
+ if (node.children) {
223292
+ return this.extractTextFromChildren(node.children);
223293
+ }
223294
+ if (node.text) {
223295
+ return node.text;
223296
+ }
223297
+ return "";
223298
+ }
223299
+ }
223300
+ /**
223301
+ * 从表格节点中提取文本内容
223302
+ */
223303
+ extractTableContent(tableNode) {
223304
+ if (!tableNode.children || !Array.isArray(tableNode.children)) {
223305
+ return "";
223306
+ }
223307
+ const rows = [];
223308
+ for (const row of tableNode.children) {
223309
+ if (row.type === "tr" && row.children) {
223310
+ const cells = row.children.map((cell) => {
223311
+ if (cell.type === "td") {
223312
+ return this.extractTextFromNode(cell);
223313
+ }
223314
+ return "";
223315
+ }).filter((text) => text);
223316
+ if (cells.length > 0) {
223317
+ rows.push("| " + cells.join(" | ") + " |");
223318
+ }
223319
+ }
223320
+ }
223321
+ return rows.join("\n");
223322
+ }
223323
+ /**
223324
+ * 从子节点数组中提取文本
223325
+ */
223326
+ extractTextFromChildren(children, prefix = "") {
223327
+ if (!children || !Array.isArray(children)) return "";
223328
+ return children.map((child) => {
223329
+ if (typeof child === "string") return child;
223330
+ if (child.text) return prefix + child.text;
223331
+ return this.extractTextFromNode(child);
223332
+ }).join("");
223333
+ }
223334
+ /**
223335
+ * 下载图片并转换为 Base64
223336
+ */
223337
+ async downloadImageAsBase64(url2, signal) {
223338
+ const controller = new AbortController();
223339
+ const timeoutId = setTimeout(
223340
+ () => controller.abort(),
223341
+ IMAGE_DOWNLOAD_TIMEOUT_MS
223342
+ );
223343
+ try {
223344
+ console.debug(`[RedocFetchTool] Downloading image from: ${url2}`);
223345
+ const response = await fetch(url2, {
223346
+ signal: controller.signal,
223347
+ headers: {
223348
+ "User-Agent": "Mozilla/5.0 (compatible; RDMind/1.0)"
223349
+ }
223350
+ });
223351
+ if (!response.ok) {
223352
+ console.warn(
223353
+ `[RedocFetchTool] Failed to download image: ${response.status} ${response.statusText}`
223354
+ );
223355
+ return null;
223356
+ }
223357
+ const contentType = response.headers.get("content-type") || "";
223358
+ const contentLength = response.headers.get("content-length");
223359
+ if (contentLength) {
223360
+ const sizeMB = parseInt(contentLength) / (1024 * 1024);
223361
+ if (sizeMB > MAX_IMAGE_SIZE_MB) {
223362
+ console.warn(
223363
+ `[RedocFetchTool] Image too large: ${sizeMB.toFixed(2)}MB (max: ${MAX_IMAGE_SIZE_MB}MB)`
223364
+ );
223365
+ return null;
223366
+ }
223367
+ }
223368
+ if (!contentType.startsWith("image/")) {
223369
+ console.warn(
223370
+ `[RedocFetchTool] URL does not return an image: ${contentType}`
223371
+ );
223372
+ return null;
223373
+ }
223374
+ const arrayBuffer = await response.arrayBuffer();
223375
+ const buffer = Buffer.from(arrayBuffer);
223376
+ const actualSizeMB = buffer.length / (1024 * 1024);
223377
+ if (actualSizeMB > MAX_IMAGE_SIZE_MB) {
223378
+ console.warn(
223379
+ `[RedocFetchTool] Downloaded image too large: ${actualSizeMB.toFixed(2)}MB`
223380
+ );
223381
+ return null;
223382
+ }
223383
+ const base64Data = buffer.toString("base64");
223384
+ let mimeType = contentType;
223385
+ if (!mimeType || mimeType === "application/octet-stream") {
223386
+ const urlPath = new URL(url2).pathname;
223387
+ const detectedMime = index_lite_default.getType(urlPath);
223388
+ if (detectedMime) {
223389
+ mimeType = detectedMime;
223390
+ }
223391
+ }
223392
+ console.debug(
223393
+ `[RedocFetchTool] Successfully downloaded image: ${actualSizeMB.toFixed(2)}MB, type: ${mimeType}`
223394
+ );
223395
+ return {
223396
+ data: base64Data,
223397
+ mimeType
223398
+ };
223399
+ } catch (error2) {
223400
+ if (error2 instanceof Error) {
223401
+ console.warn(
223402
+ `[RedocFetchTool] Error downloading image from ${url2}: ${error2.message}`
223403
+ );
223404
+ }
223405
+ return null;
223406
+ } finally {
223407
+ clearTimeout(timeoutId);
223408
+ }
223409
+ }
222319
223410
  getDescription() {
222320
223411
  const displayPrompt = this.params.prompt.length > 100 ? this.params.prompt.substring(0, 97) + "..." : this.params.prompt;
222321
223412
  return `\u83B7\u53D6 Redoc \u6587\u6863\u5E76\u5206\u6790\uFF1A${displayPrompt}`;
@@ -222353,31 +223444,33 @@ var init_redoc_fetch = __esm({
222353
223444
  console.debug(
222354
223445
  `[RedocFetchTool] Processing content with prompt: "${this.params.prompt}"`
222355
223446
  );
223447
+ const { parts, imageCount, successCount } = await this.buildContentWithImages(content, signal);
223448
+ console.debug(
223449
+ `[RedocFetchTool] Content parsed: ${imageCount} images found, ${successCount} downloaded successfully`
223450
+ );
222356
223451
  const geminiClient = this.config.getGeminiClient();
222357
- let processedContent = content;
222358
- try {
222359
- const contentObj = JSON.parse(content);
222360
- if (contentObj.children && Array.isArray(contentObj.children)) {
222361
- processedContent = `\u6587\u6863\u7ED3\u6784\u5316\u5185\u5BB9\uFF08\u5305\u542B ${contentObj.children.length} \u4E2A\u5185\u5BB9\u5757\uFF09\uFF1A
222362
- ${content}`;
222363
- }
222364
- } catch (_e2) {
222365
- processedContent = content;
222366
- }
222367
- const fallbackPrompt = `\u8BF7\u6839\u636E\u7528\u6237\u7684\u95EE\u9898\u5206\u6790\u4EE5\u4E0B\u6587\u6863\u5185\u5BB9\uFF1A
223452
+ const imageInfo = imageCount > 0 ? `
223453
+
223454
+ \u6CE8\u610F\uFF1A\u6587\u6863\u4E2D\u5305\u542B ${imageCount} \u5F20\u56FE\u7247\uFF08\u6210\u529F\u52A0\u8F7D ${successCount} \u5F20\uFF09\uFF0C\u56FE\u7247\u5DF2\u6309\u539F\u59CB\u4F4D\u7F6E\u63D2\u5165\u5230\u6587\u6863\u5185\u5BB9\u4E2D\uFF0C\u8BF7\u7ED3\u5408\u4E0A\u4E0B\u6587\u548C\u56FE\u7247\u5185\u5BB9\u8FDB\u884C\u5206\u6790\u3002` : "";
223455
+ const promptPart = {
223456
+ text: `\u8BF7\u6839\u636E\u7528\u6237\u7684\u95EE\u9898\u5206\u6790\u4EE5\u4E0B\u6587\u6863\u5185\u5BB9\uFF1A
222368
223457
 
222369
223458
  \u7528\u6237\u95EE\u9898\uFF1A${this.params.prompt}
222370
223459
 
222371
223460
  \u6587\u6863\u6765\u6E90\uFF1A${this.params.url}
223461
+ ${imageInfo}
222372
223462
 
222373
- \u6587\u6863\u5185\u5BB9\uFF1A
222374
- ---
222375
- ${processedContent}
222376
- ---
223463
+ \u6587\u6863\u5185\u5BB9\u5982\u4E0B\uFF08\u6587\u672C\u548C\u56FE\u7247\u6309\u539F\u59CB\u987A\u5E8F\u6392\u5217\uFF09\uFF1A
223464
+ ---`
223465
+ };
223466
+ const endPart = {
223467
+ text: `---
222377
223468
 
222378
- \u8BF7\u6839\u636E\u4E0A\u8FF0\u6587\u6863\u5185\u5BB9\u56DE\u7B54\u7528\u6237\u7684\u95EE\u9898\u3002`;
223469
+ \u8BF7\u6839\u636E\u4E0A\u8FF0\u6587\u6863\u5185\u5BB9\uFF08\u5305\u62EC\u6587\u672C\u548C\u56FE\u7247\uFF09\u56DE\u7B54\u7528\u6237\u7684\u95EE\u9898\u3002`
223470
+ };
223471
+ const allParts = [promptPart, ...parts, endPart];
222379
223472
  const result = await geminiClient.generateContent(
222380
- [{ role: "user", parts: [{ text: fallbackPrompt }] }],
223473
+ [{ role: "user", parts: allParts }],
222381
223474
  {},
222382
223475
  signal,
222383
223476
  this.config.getModel() || DEFAULT_QWEN_MODEL
@@ -222386,9 +223479,10 @@ ${processedContent}
222386
223479
  console.debug(
222387
223480
  `[RedocFetchTool] Successfully processed Redoc content from ${this.params.url}`
222388
223481
  );
223482
+ const displayMessage = imageCount > 0 ? `Redoc document from ${this.params.url} processed successfully (${successCount}/${imageCount} images loaded).` : `Redoc document from ${this.params.url} processed successfully.`;
222389
223483
  return {
222390
223484
  llmContent: resultText,
222391
- returnDisplay: `Redoc document from ${this.params.url} processed successfully.`
223485
+ returnDisplay: displayMessage
222392
223486
  };
222393
223487
  } catch (e4) {
222394
223488
  const error2 = e4;
@@ -222406,7 +223500,7 @@ ${processedContent}
222406
223500
  super(
222407
223501
  _RedocFetchTool.Name,
222408
223502
  "RedocFetch",
222409
- "\u4ECE\u5C0F\u7EA2\u4E66 Redoc \u6587\u6863\u83B7\u53D6\u5185\u5BB9\u5E76\u4F7F\u7528 AI \u6A21\u578B\u5904\u7406\n- \u63A5\u53D7 Redoc \u6587\u6863 URL \u548C\u63D0\u793A\u8BCD\u4F5C\u4E3A\u8F93\u5165\n- \u4ECE URL \u4E2D\u63D0\u53D6\u6587\u6863 ID \u5E76\u901A\u8FC7 Redoc API \u83B7\u53D6\u5185\u5BB9\n- \u4F7F\u7528 AI \u6A21\u578B\u5904\u7406\u6587\u6863\u5185\u5BB9\u5E76\u56DE\u7B54\u7528\u6237\u95EE\u9898\n- \u8FD4\u56DE\u6A21\u578B\u5BF9\u5185\u5BB9\u7684\u54CD\u5E94\n- \u9002\u7528\u4E8E\u5404\u79CD\u7C7B\u578B\u7684\u5C0F\u7EA2\u4E66 Redoc \u6587\u6863\uFF08\u6280\u672F\u6587\u6863\u3001\u4EA7\u54C1\u6587\u6863\u3001\u8BBE\u8BA1\u6587\u6863\u7B49\uFF09\n\n\u4F7F\u7528\u8BF4\u660E:\n - \u6B64\u5DE5\u5177\u4E13\u95E8\u9488\u5BF9\u683C\u5F0F\u4E3A https://docs.xiaohongshu.com/doc/{doc_id} \u7684 URL\n - URL \u5FC5\u987B\u5305\u542B\u6709\u6548\u7684\u6587\u6863 ID\uFF0832 \u4F4D\u5341\u516D\u8FDB\u5236\u5B57\u7B26\u4E32\uFF09\n - \u63D0\u793A\u8BCD\u5E94\u8BE5\u6E05\u6670\u63CF\u8FF0\u7528\u6237\u60F3\u4E86\u89E3\u6587\u6863\u7684\u54EA\u4E9B\u65B9\u9762\n - \u6B64\u5DE5\u5177\u4E3A\u53EA\u8BFB\u5DE5\u5177\uFF0C\u4E0D\u4F1A\u4FEE\u6539\u4EFB\u4F55\u6587\u4EF6\n - \u5982\u679C\u5185\u5BB9\u5F88\u5927\uFF0C\u7ED3\u679C\u53EF\u80FD\u4F1A\u88AB\u6458\u8981",
223503
+ "\u4ECE\u5C0F\u7EA2\u4E66 Redoc \u6587\u6863\u83B7\u53D6\u5185\u5BB9\u5E76\u4F7F\u7528 AI \u6A21\u578B\u5904\u7406\n- \u63A5\u53D7 Redoc \u6587\u6863 URL \u548C\u63D0\u793A\u8BCD\u4F5C\u4E3A\u8F93\u5165\n- \u4ECE URL \u4E2D\u63D0\u53D6\u6587\u6863 ID \u5E76\u901A\u8FC7 Redoc API \u83B7\u53D6\u5185\u5BB9\n- \u81EA\u52A8\u63D0\u53D6\u6587\u6863\u4E2D\u7684\u56FE\u7247\u5E76\u4E0B\u8F7D\uFF0C\u652F\u6301\u56FE\u7247\u7406\u89E3\n- \u4F7F\u7528 AI \u6A21\u578B\u5904\u7406\u6587\u6863\u5185\u5BB9\uFF08\u5305\u542B\u6587\u672C\u548C\u56FE\u7247\uFF09\u5E76\u56DE\u7B54\u7528\u6237\u95EE\u9898\n- \u8FD4\u56DE\u6A21\u578B\u5BF9\u5185\u5BB9\u7684\u54CD\u5E94\n- \u9002\u7528\u4E8E\u5404\u79CD\u7C7B\u578B\u7684\u5C0F\u7EA2\u4E66 Redoc \u6587\u6863\uFF08\u6280\u672F\u6587\u6863\u3001\u4EA7\u54C1\u6587\u6863\u3001\u8BBE\u8BA1\u6587\u6863\u7B49\uFF09\n\n\u4F7F\u7528\u8BF4\u660E:\n - \u6B64\u5DE5\u5177\u4E13\u95E8\u9488\u5BF9\u683C\u5F0F\u4E3A https://docs.xiaohongshu.com/doc/{doc_id} \u7684 URL\n - URL \u5FC5\u987B\u5305\u542B\u6709\u6548\u7684\u6587\u6863 ID\uFF0832 \u4F4D\u5341\u516D\u8FDB\u5236\u5B57\u7B26\u4E32\uFF09\n - \u63D0\u793A\u8BCD\u5E94\u8BE5\u6E05\u6670\u63CF\u8FF0\u7528\u6237\u60F3\u4E86\u89E3\u6587\u6863\u7684\u54EA\u4E9B\u65B9\u9762\n - \u6B64\u5DE5\u5177\u4E3A\u53EA\u8BFB\u5DE5\u5177\uFF0C\u4E0D\u4F1A\u4FEE\u6539\u4EFB\u4F55\u6587\u4EF6\n - \u5982\u679C\u6587\u6863\u5305\u542B\u56FE\u7247\uFF0C\u4F1A\u81EA\u52A8\u4E0B\u8F7D\u5E76\u53D1\u9001\u7ED9\u6A21\u578B\u8FDB\u884C\u7406\u89E3\n - \u652F\u6301\u7684\u56FE\u7247\u683C\u5F0F\uFF1APNG\u3001JPEG\u3001GIF\u3001WEBP \u7B49\n - \u5355\u5F20\u56FE\u7247\u6700\u5927 20MB",
222410
223504
  "fetch" /* Fetch */,
222411
223505
  {
222412
223506
  properties: {
@@ -258845,8 +259939,8 @@ var init_git_commit = __esm({
258845
259939
  "packages/core/src/generated/git-commit.ts"() {
258846
259940
  "use strict";
258847
259941
  init_esbuild_shims();
258848
- GIT_COMMIT_INFO = "73d256122";
258849
- CLI_VERSION = "0.2.3-alpha.1";
259942
+ GIT_COMMIT_INFO = "982b367d4";
259943
+ CLI_VERSION = "0.2.3-alpha.3";
258850
259944
  }
258851
259945
  });
258852
259946
 
@@ -359640,7 +360734,7 @@ __name(getPackageJson, "getPackageJson");
359640
360734
  // packages/cli/src/utils/version.ts
359641
360735
  async function getCliVersion() {
359642
360736
  const pkgJson = await getPackageJson();
359643
- return "0.2.3-alpha.1";
360737
+ return "0.2.3-alpha.3";
359644
360738
  }
359645
360739
  __name(getCliVersion, "getCliVersion");
359646
360740
 
@@ -367370,7 +368464,7 @@ var formatDuration = /* @__PURE__ */ __name((milliseconds) => {
367370
368464
 
367371
368465
  // packages/cli/src/generated/git-commit.ts
367372
368466
  init_esbuild_shims();
367373
- var GIT_COMMIT_INFO2 = "73d256122";
368467
+ var GIT_COMMIT_INFO2 = "982b367d4";
367374
368468
 
367375
368469
  // packages/cli/src/utils/systemInfo.ts
367376
368470
  async function getNpmVersion() {
@@ -375839,7 +376933,7 @@ async function buildSystemMessage(config2, sessionId, permissionMode, allowedBui
375839
376933
  model: config2.getModel(),
375840
376934
  permission_mode: permissionMode,
375841
376935
  slash_commands: slashCommands,
375842
- qwen_code_version: config2.getCliVersion() || "unknown",
376936
+ rdmind_version: config2.getCliVersion() || "unknown",
375843
376937
  agents: agentNames
375844
376938
  };
375845
376939
  return systemMessage;
@@ -407172,6 +408266,13 @@ var XHS_SSO_MODELS = [
407172
408266
  baseUrl: "https://runway.devops.xiaohongshu.com/openai/moonshot/v1",
407173
408267
  contextWindow: "256K",
407174
408268
  description: "\u5728 Agent\u3001\u4EE3\u7801\u3001\u89C6\u89C9\u7406\u89E3\u53CA\u4E00\u7CFB\u5217\u901A\u7528\u667A\u80FD\u4EFB\u52A1\u4E0A\u53D6\u5F97\u5F00\u6E90 SoTA \u8868\u73B0"
408269
+ },
408270
+ {
408271
+ id: "claude-opus-4-5@20251101",
408272
+ displayName: "Claude Opus 4.5",
408273
+ baseUrl: "https://runway.devops.rednote.life/openai/google/anthropic/v1",
408274
+ contextWindow: "200K",
408275
+ description: "Anthropic \u6700\u5F3A\u5927\u7684\u6A21\u578B\uFF0C\u64C5\u957F\u590D\u6742\u63A8\u7406\u548C\u4EE3\u7801\u751F\u6210"
407175
408276
  }
407176
408277
  ];
407177
408278
 
@@ -425800,7 +426901,7 @@ var GeminiAgent = class {
425800
426901
  name: APPROVAL_MODE_INFO[mode].name,
425801
426902
  description: APPROVAL_MODE_INFO[mode].description
425802
426903
  }));
425803
- const version2 = "0.2.3-alpha.1";
426904
+ const version2 = "0.2.3-alpha.3";
425804
426905
  return {
425805
426906
  protocolVersion: PROTOCOL_VERSION,
425806
426907
  agentInfo: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rdmind/rdmind",
3
- "version": "0.2.3-alpha.1",
3
+ "version": "0.2.3-alpha.3",
4
4
  "description": "RDMind - AI-powered coding assistant",
5
5
  "type": "module",
6
6
  "main": "cli.js",
@@ -20,7 +20,7 @@
20
20
  "locales"
21
21
  ],
22
22
  "config": {
23
- "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.3-alpha.1"
23
+ "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.2.3-alpha.3"
24
24
  },
25
25
  "publishConfig": {
26
26
  "access": "public"