koishi-plugin-chatluna-google-gemini-adapter 1.3.0 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.cjs CHANGED
@@ -79,7 +79,12 @@ async function langchainMessageToGeminiMessage(messages, plugin, model) {
79
79
  role,
80
80
  parts: []
81
81
  };
82
- result.parts = typeof message.content === "string" ? [{ text: message.content }] : await processGeminiContentParts(plugin, message.content);
82
+ const thoughtData = message.additional_kwargs["thought_data"] ?? {};
83
+ result.parts = typeof message.content === "string" ? [{ text: message.content, ...thoughtData }] : await processGeminiContentParts(
84
+ plugin,
85
+ message.content,
86
+ thoughtData
87
+ );
83
88
  const images = message.additional_kwargs.images;
84
89
  if (images) {
85
90
  processImageParts(result, images, model);
@@ -129,6 +134,7 @@ function parseJsonArgs(args) {
129
134
  }
130
135
  __name(parseJsonArgs, "parseJsonArgs");
131
136
  function processFunctionMessage(message) {
137
+ const thoughtData = message.additional_kwargs["thought_data"] ?? {};
132
138
  if (message["tool_calls"]) {
133
139
  message = message;
134
140
  const toolCalls = message.tool_calls;
@@ -140,7 +146,8 @@ function processFunctionMessage(message) {
140
146
  name: toolCall.name,
141
147
  args: toolCall.args,
142
148
  id: toolCall.id
143
- }
149
+ },
150
+ ...thoughtData
144
151
  };
145
152
  })
146
153
  };
@@ -195,11 +202,11 @@ async function processGeminiImageContent(plugin, part) {
195
202
  };
196
203
  }
197
204
  __name(processGeminiImageContent, "processGeminiImageContent");
198
- async function processGeminiContentParts(plugin, content) {
205
+ async function processGeminiContentParts(plugin, content, thoughtData) {
199
206
  return Promise.all(
200
207
  content.map(async (part) => {
201
208
  if ((0, import_string.isMessageContentText)(part)) {
202
- return { text: part.text };
209
+ return { text: part.text, ...thoughtData };
203
210
  }
204
211
  if ((0, import_string.isMessageContentImageUrl)(part)) {
205
212
  return await processGeminiImageContent(plugin, part);
@@ -764,10 +771,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
764
771
  const messageChunk = this._createMessageChunk(
765
772
  updatedContent,
766
773
  updatedToolCalling,
767
- this.ctx.chatluna_storage != null ? void 0 : partAsTypeCheck(
768
- chunk,
769
- (part) => part["inlineData"] != null
770
- )
774
+ chunk
771
775
  );
772
776
  const generationChunk = new import_outputs.ChatGenerationChunk({
773
777
  message: messageChunk,
@@ -868,7 +872,11 @@ ${groundingContent}`
868
872
  }
869
873
  }
870
874
  }
871
- _createMessageChunk(content, functionCall, imagePart) {
875
+ _createMessageChunk(content, functionCall, chunk) {
876
+ const imagePart = this.ctx.chatluna_storage != null ? void 0 : partAsTypeCheck(
877
+ chunk,
878
+ (part) => part["inlineData"] != null
879
+ );
872
880
  const messageChunk = new import_messages.AIMessageChunk({
873
881
  content: content ?? "",
874
882
  tool_call_chunks: [functionCall].filter(Boolean)
@@ -876,7 +884,10 @@ ${groundingContent}`
876
884
  messageChunk.additional_kwargs = {
877
885
  images: imagePart ? [
878
886
  `data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data}`
879
- ] : void 0
887
+ ] : void 0,
888
+ thought_data: {
889
+ thoughtSignature: chunk["thoughtSignature"]
890
+ }
880
891
  };
881
892
  return messageChunk;
882
893
  }
@@ -969,7 +980,10 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
969
980
  import_types2.ModelCapabilities.ToolCall
970
981
  ]
971
982
  };
972
- if (model.name.includes("gemini-2.5") && !model.name.includes("pro") && !model.name.includes("image")) {
983
+ const thinkingModel = ["gemini-2.5-pro", "gemini-3.0-pro"];
984
+ if (thinkingModel.some(
985
+ (name2) => name2.includes(model.name.toLowerCase())
986
+ )) {
973
987
  if (!model.name.includes("-thinking")) {
974
988
  models.push(
975
989
  { ...info, name: model.name + "-non-thinking" },
package/lib/index.mjs CHANGED
@@ -76,7 +76,12 @@ async function langchainMessageToGeminiMessage(messages, plugin, model) {
76
76
  role,
77
77
  parts: []
78
78
  };
79
- result.parts = typeof message.content === "string" ? [{ text: message.content }] : await processGeminiContentParts(plugin, message.content);
79
+ const thoughtData = message.additional_kwargs["thought_data"] ?? {};
80
+ result.parts = typeof message.content === "string" ? [{ text: message.content, ...thoughtData }] : await processGeminiContentParts(
81
+ plugin,
82
+ message.content,
83
+ thoughtData
84
+ );
80
85
  const images = message.additional_kwargs.images;
81
86
  if (images) {
82
87
  processImageParts(result, images, model);
@@ -126,6 +131,7 @@ function parseJsonArgs(args) {
126
131
  }
127
132
  __name(parseJsonArgs, "parseJsonArgs");
128
133
  function processFunctionMessage(message) {
134
+ const thoughtData = message.additional_kwargs["thought_data"] ?? {};
129
135
  if (message["tool_calls"]) {
130
136
  message = message;
131
137
  const toolCalls = message.tool_calls;
@@ -137,7 +143,8 @@ function processFunctionMessage(message) {
137
143
  name: toolCall.name,
138
144
  args: toolCall.args,
139
145
  id: toolCall.id
140
- }
146
+ },
147
+ ...thoughtData
141
148
  };
142
149
  })
143
150
  };
@@ -192,11 +199,11 @@ async function processGeminiImageContent(plugin, part) {
192
199
  };
193
200
  }
194
201
  __name(processGeminiImageContent, "processGeminiImageContent");
195
- async function processGeminiContentParts(plugin, content) {
202
+ async function processGeminiContentParts(plugin, content, thoughtData) {
196
203
  return Promise.all(
197
204
  content.map(async (part) => {
198
205
  if (isMessageContentText(part)) {
199
- return { text: part.text };
206
+ return { text: part.text, ...thoughtData };
200
207
  }
201
208
  if (isMessageContentImageUrl(part)) {
202
209
  return await processGeminiImageContent(plugin, part);
@@ -761,10 +768,7 @@ var GeminiRequester = class extends ModelRequester {
761
768
  const messageChunk = this._createMessageChunk(
762
769
  updatedContent,
763
770
  updatedToolCalling,
764
- this.ctx.chatluna_storage != null ? void 0 : partAsTypeCheck(
765
- chunk,
766
- (part) => part["inlineData"] != null
767
- )
771
+ chunk
768
772
  );
769
773
  const generationChunk = new ChatGenerationChunk({
770
774
  message: messageChunk,
@@ -865,7 +869,11 @@ ${groundingContent}`
865
869
  }
866
870
  }
867
871
  }
868
- _createMessageChunk(content, functionCall, imagePart) {
872
+ _createMessageChunk(content, functionCall, chunk) {
873
+ const imagePart = this.ctx.chatluna_storage != null ? void 0 : partAsTypeCheck(
874
+ chunk,
875
+ (part) => part["inlineData"] != null
876
+ );
869
877
  const messageChunk = new AIMessageChunk({
870
878
  content: content ?? "",
871
879
  tool_call_chunks: [functionCall].filter(Boolean)
@@ -873,7 +881,10 @@ ${groundingContent}`
873
881
  messageChunk.additional_kwargs = {
874
882
  images: imagePart ? [
875
883
  `data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data}`
876
- ] : void 0
884
+ ] : void 0,
885
+ thought_data: {
886
+ thoughtSignature: chunk["thoughtSignature"]
887
+ }
877
888
  };
878
889
  return messageChunk;
879
890
  }
@@ -966,7 +977,10 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
966
977
  ModelCapabilities.ToolCall
967
978
  ]
968
979
  };
969
- if (model.name.includes("gemini-2.5") && !model.name.includes("pro") && !model.name.includes("image")) {
980
+ const thinkingModel = ["gemini-2.5-pro", "gemini-3.0-pro"];
981
+ if (thinkingModel.some(
982
+ (name2) => name2.includes(model.name.toLowerCase())
983
+ )) {
970
984
  if (!model.name.includes("-thinking")) {
971
985
  models.push(
972
986
  { ...info, name: model.name + "-non-thinking" },
package/lib/types.d.ts CHANGED
@@ -2,7 +2,10 @@ export interface ChatCompletionResponseMessage {
2
2
  role: string;
3
3
  parts?: ChatPart[];
4
4
  }
5
- export type ChatPart = ChatMessagePart | ChatInlineDataPart | ChatFunctionCallingPart | ChatFunctionResponsePart | ChatUploadDataPart | ChatUsageMetadataPart;
5
+ export type BaseChatPart = {
6
+ thoughtSignature?: string;
7
+ };
8
+ export type ChatPart = (ChatMessagePart & BaseChatPart) | ChatInlineDataPart | (ChatFunctionCallingPart & BaseChatPart) | ChatFunctionResponsePart | ChatUploadDataPart | ChatUsageMetadataPart;
6
9
  export type ChatMessagePart = {
7
10
  text: string;
8
11
  thought?: boolean;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.3.0",
4
+ "version": "1.3.1",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",