koishi-plugin-chatluna-google-gemini-adapter 1.2.5 → 1.2.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.cjs CHANGED
@@ -76,7 +76,7 @@ var import_stream = require("koishi-plugin-chatluna/utils/stream");
76
76
  var import_messages = require("@langchain/core/messages");
77
77
  var import_zod_to_json_schema = require("zod-to-json-schema");
78
78
  async function langchainMessageToGeminiMessage(messages, model) {
79
- const mappedMessage = await Promise.all(
79
+ const mappedMessages = await Promise.all(
80
80
  messages.map(async (rawMessage) => {
81
81
  const role = messageTypeToGeminiRole(rawMessage.getType());
82
82
  if (role === "function" || rawMessage.additional_kwargs?.function_call != null) {
@@ -90,15 +90,15 @@ async function langchainMessageToGeminiMessage(messages, model) {
90
90
  name: rawMessage.name,
91
91
  content: (() => {
92
92
  try {
93
- const result3 = JSON.parse(
93
+ const result2 = JSON.parse(
94
94
  rawMessage.content
95
95
  );
96
- if (typeof result3 === "string") {
96
+ if (typeof result2 === "string") {
97
97
  return {
98
- response: result3
98
+ response: result2
99
99
  };
100
100
  } else {
101
- return result3;
101
+ return result2;
102
102
  }
103
103
  } catch (e) {
104
104
  return {
@@ -112,15 +112,15 @@ async function langchainMessageToGeminiMessage(messages, model) {
112
112
  name: rawMessage.additional_kwargs.function_call.name,
113
113
  args: (() => {
114
114
  try {
115
- const result3 = JSON.parse(
115
+ const result2 = JSON.parse(
116
116
  rawMessage.additional_kwargs.function_call.arguments
117
117
  );
118
- if (typeof result3 === "string") {
118
+ if (typeof result2 === "string") {
119
119
  return {
120
- input: result3
120
+ input: result2
121
121
  };
122
122
  } else {
123
- return result3;
123
+ return result2;
124
124
  }
125
125
  } catch (e) {
126
126
  return {
@@ -134,7 +134,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
134
134
  };
135
135
  }
136
136
  const images = rawMessage.additional_kwargs.images;
137
- const result2 = {
137
+ const result = {
138
138
  role,
139
139
  parts: [
140
140
  {
@@ -146,7 +146,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
146
146
  for (const image of images) {
147
147
  const mineType = image.split(";")?.[0]?.split(":")?.[1];
148
148
  const data = image.replace(/^data:image\/\w+;base64,/, "");
149
- result2.parts.push({
149
+ result.parts.push({
150
150
  inline_data: {
151
151
  // base64 image match type
152
152
  data,
@@ -154,7 +154,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
154
154
  }
155
155
  });
156
156
  }
157
- result2.parts = result2.parts.filter((uncheckedPart) => {
157
+ result.parts = result.parts.filter((uncheckedPart) => {
158
158
  const part = partAsTypeCheck(
159
159
  uncheckedPart,
160
160
  (part2) => part2["text"] != null
@@ -162,54 +162,39 @@ async function langchainMessageToGeminiMessage(messages, model) {
162
162
  return part == null || part.text.length > 0;
163
163
  });
164
164
  }
165
- return result2;
165
+ return result;
166
166
  })
167
167
  );
168
- const result = [];
169
- for (let i = 0; i < mappedMessage.length; i++) {
170
- const message = mappedMessage[i];
171
- if (message.role !== "system") {
172
- result.push(message);
173
- continue;
174
- }
175
- result.push({
176
- role: "user",
177
- parts: message.parts
178
- });
179
- const nextMessage = mappedMessage?.[i + 1];
180
- if (nextMessage?.role === "model") {
181
- continue;
182
- }
183
- if (nextMessage?.role === "user" || nextMessage?.role === "system") {
184
- result.push({
185
- role: "model",
186
- parts: [{ text: "Okay, what do I need to do?" }]
187
- });
188
- }
189
- if (nextMessage?.role === "system") {
190
- result.push({
191
- role: "user",
192
- parts: [
193
- {
194
- text: "Continue what I said to you last message. Follow these instructions."
195
- }
196
- ]
197
- });
168
+ return mappedMessages;
169
+ }
170
+ __name(langchainMessageToGeminiMessage, "langchainMessageToGeminiMessage");
171
+ function extractSystemMessages(messages) {
172
+ let lastSystemMessage;
173
+ for (let i = messages.length - 1; i >= 0; i--) {
174
+ if (messages[i].role === "system") {
175
+ lastSystemMessage = messages[i];
176
+ break;
198
177
  }
199
178
  }
200
- if (result[result.length - 1].role === "model") {
201
- result.push({
202
- role: "user",
203
- parts: [
204
- {
205
- text: "Continue what I said to you last message. Follow these instructions."
206
- }
207
- ]
208
- });
179
+ if (lastSystemMessage == null) {
180
+ return [void 0, messages];
209
181
  }
210
- return result;
182
+ const systemMessages = messages.slice(
183
+ 0,
184
+ messages.indexOf(lastSystemMessage)
185
+ );
186
+ return [
187
+ {
188
+ role: "user",
189
+ parts: systemMessages.reduce((acc, cur) => {
190
+ acc.push(...cur.parts);
191
+ return acc;
192
+ }, [])
193
+ },
194
+ messages.slice(messages.indexOf(lastSystemMessage))
195
+ ];
211
196
  }
212
- __name(langchainMessageToGeminiMessage, "langchainMessageToGeminiMessage");
197
+ __name(extractSystemMessages, "extractSystemMessages");
213
198
  function partAsType(part) {
214
199
  return part;
215
200
  }
@@ -355,16 +340,18 @@ var GeminiRequester = class extends import_api.ModelRequester {
355
340
  let model = params.model;
356
341
  let enabledThinking = null;
357
342
  if (model.includes("-thinking") && model.includes("gemini-2.5")) {
358
- enabledThinking = !model.includes("-no-thinking");
359
- model = model.replace("-no-thinking", "").replace("-thinking", "");
343
+ enabledThinking = !model.includes("-non-thinking");
344
+ model = model.replace("-nom-thinking", "").replace("-thinking", "");
360
345
  }
346
+ const geminiMessages = await langchainMessageToGeminiMessage(
347
+ params.input,
348
+ model
349
+ );
350
+ const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
361
351
  const response = await this._post(
362
352
  `models/${model}:streamGenerateContent?alt=sse`,
363
353
  {
364
- contents: await langchainMessageToGeminiMessage(
365
- params.input,
366
- model
367
- ),
354
+ contents: modelMessages,
368
355
  safetySettings: [
369
356
  {
370
357
  category: "HARM_CATEGORY_HARASSMENT",
@@ -397,10 +384,11 @@ var GeminiRequester = class extends import_api.ModelRequester {
397
384
  "gemini-2.0-flash-exp"
398
385
  ) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
399
386
  thinkingConfig: enabledThinking != null || this._pluginConfig.includeThoughts ? {
400
- thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ?? -1 : 0,
387
+ thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ?? -1 : -1,
401
388
  includeThoughts: this._pluginConfig.includeThoughts
402
389
  } : void 0
403
390
  },
391
+ systemInstruction: systemInstruction != null ? systemInstruction : void 0,
404
392
  tools: params.tools != null || this._pluginConfig.googleSearch || this._pluginConfig.codeExecution || this._pluginConfig.urlContext ? formatToolsToGeminiAITools(
405
393
  params.tools ?? [],
406
394
  this._pluginConfig,
@@ -413,7 +401,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
413
401
  );
414
402
  let errorCount = 0;
415
403
  let groundingContent = "";
416
- let currentGroudingIndex = 0;
404
+ let currentGroundingIndex = 0;
417
405
  await (0, import_sse.checkResponse)(response);
418
406
  const readableStream = new ReadableStream({
419
407
  async start(controller) {
@@ -444,7 +432,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
444
432
  controller.enqueue(part);
445
433
  }
446
434
  for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
447
- groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
435
+ groundingContent += `[^${currentGroundingIndex++}]: [${source.web.title}](${source.web.uri})
448
436
  `;
449
437
  }
450
438
  }
@@ -711,10 +699,10 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
711
699
  functionCall: !model.name.includes("vision"),
712
700
  supportMode: ["all"]
713
701
  };
714
- if (model.name.includes("gemini-2.5")) {
702
+ if (model.name.includes("gemini-2.5") && !model.name.includes("pro")) {
715
703
  if (!model.name.includes("-thinking")) {
716
704
  models.push(
717
- { ...info, name: model.name + "-no-thinking" },
705
+ { ...info, name: model.name + "-nonthinking" },
718
706
  { ...info, name: model.name + "-thinking" },
719
707
  info
720
708
  );
@@ -814,7 +802,7 @@ var Config3 = import_koishi.Schema.intersect([
814
802
  googleSearch: import_koishi.Schema.boolean().default(false),
815
803
  codeExecution: import_koishi.Schema.boolean().default(false),
816
804
  urlContext: import_koishi.Schema.boolean().default(false),
817
- thinkingBudget: import_koishi.Schema.number().min(-1).max(24576).step(16).default(-1),
805
+ thinkingBudget: import_koishi.Schema.number().min(128).max(24576).step(16).default(128),
818
806
  includeThoughts: import_koishi.Schema.boolean().default(false),
819
807
  imageGeneration: import_koishi.Schema.boolean().default(false),
820
808
  groundingContentDisplay: import_koishi.Schema.boolean().default(false),
package/lib/index.mjs CHANGED
@@ -59,7 +59,7 @@ import {
59
59
  } from "@langchain/core/messages";
60
60
  import { zodToJsonSchema } from "zod-to-json-schema";
61
61
  async function langchainMessageToGeminiMessage(messages, model) {
62
- const mappedMessage = await Promise.all(
62
+ const mappedMessages = await Promise.all(
63
63
  messages.map(async (rawMessage) => {
64
64
  const role = messageTypeToGeminiRole(rawMessage.getType());
65
65
  if (role === "function" || rawMessage.additional_kwargs?.function_call != null) {
@@ -73,15 +73,15 @@ async function langchainMessageToGeminiMessage(messages, model) {
73
73
  name: rawMessage.name,
74
74
  content: (() => {
75
75
  try {
76
- const result3 = JSON.parse(
76
+ const result2 = JSON.parse(
77
77
  rawMessage.content
78
78
  );
79
- if (typeof result3 === "string") {
79
+ if (typeof result2 === "string") {
80
80
  return {
81
- response: result3
81
+ response: result2
82
82
  };
83
83
  } else {
84
- return result3;
84
+ return result2;
85
85
  }
86
86
  } catch (e) {
87
87
  return {
@@ -95,15 +95,15 @@ async function langchainMessageToGeminiMessage(messages, model) {
95
95
  name: rawMessage.additional_kwargs.function_call.name,
96
96
  args: (() => {
97
97
  try {
98
- const result3 = JSON.parse(
98
+ const result2 = JSON.parse(
99
99
  rawMessage.additional_kwargs.function_call.arguments
100
100
  );
101
- if (typeof result3 === "string") {
101
+ if (typeof result2 === "string") {
102
102
  return {
103
- input: result3
103
+ input: result2
104
104
  };
105
105
  } else {
106
- return result3;
106
+ return result2;
107
107
  }
108
108
  } catch (e) {
109
109
  return {
@@ -117,7 +117,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
117
117
  };
118
118
  }
119
119
  const images = rawMessage.additional_kwargs.images;
120
- const result2 = {
120
+ const result = {
121
121
  role,
122
122
  parts: [
123
123
  {
@@ -129,7 +129,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
129
129
  for (const image of images) {
130
130
  const mineType = image.split(";")?.[0]?.split(":")?.[1];
131
131
  const data = image.replace(/^data:image\/\w+;base64,/, "");
132
- result2.parts.push({
132
+ result.parts.push({
133
133
  inline_data: {
134
134
  // base64 image match type
135
135
  data,
@@ -137,7 +137,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
137
137
  }
138
138
  });
139
139
  }
140
- result2.parts = result2.parts.filter((uncheckedPart) => {
140
+ result.parts = result.parts.filter((uncheckedPart) => {
141
141
  const part = partAsTypeCheck(
142
142
  uncheckedPart,
143
143
  (part2) => part2["text"] != null
@@ -145,54 +145,39 @@ async function langchainMessageToGeminiMessage(messages, model) {
145
145
  return part == null || part.text.length > 0;
146
146
  });
147
147
  }
148
- return result2;
148
+ return result;
149
149
  })
150
150
  );
151
- const result = [];
152
- for (let i = 0; i < mappedMessage.length; i++) {
153
- const message = mappedMessage[i];
154
- if (message.role !== "system") {
155
- result.push(message);
156
- continue;
157
- }
158
- result.push({
159
- role: "user",
160
- parts: message.parts
161
- });
162
- const nextMessage = mappedMessage?.[i + 1];
163
- if (nextMessage?.role === "model") {
164
- continue;
165
- }
166
- if (nextMessage?.role === "user" || nextMessage?.role === "system") {
167
- result.push({
168
- role: "model",
169
- parts: [{ text: "Okay, what do I need to do?" }]
170
- });
171
- }
172
- if (nextMessage?.role === "system") {
173
- result.push({
174
- role: "user",
175
- parts: [
176
- {
177
- text: "Continue what I said to you last message. Follow these instructions."
178
- }
179
- ]
180
- });
151
+ return mappedMessages;
152
+ }
153
+ __name(langchainMessageToGeminiMessage, "langchainMessageToGeminiMessage");
154
+ function extractSystemMessages(messages) {
155
+ let lastSystemMessage;
156
+ for (let i = messages.length - 1; i >= 0; i--) {
157
+ if (messages[i].role === "system") {
158
+ lastSystemMessage = messages[i];
159
+ break;
181
160
  }
182
161
  }
183
- if (result[result.length - 1].role === "model") {
184
- result.push({
185
- role: "user",
186
- parts: [
187
- {
188
- text: "Continue what I said to you last message. Follow these instructions."
189
- }
190
- ]
191
- });
162
+ if (lastSystemMessage == null) {
163
+ return [void 0, messages];
192
164
  }
193
- return result;
165
+ const systemMessages = messages.slice(
166
+ 0,
167
+ messages.indexOf(lastSystemMessage)
168
+ );
169
+ return [
170
+ {
171
+ role: "user",
172
+ parts: systemMessages.reduce((acc, cur) => {
173
+ acc.push(...cur.parts);
174
+ return acc;
175
+ }, [])
176
+ },
177
+ messages.slice(messages.indexOf(lastSystemMessage))
178
+ ];
194
179
  }
195
- __name(langchainMessageToGeminiMessage, "langchainMessageToGeminiMessage");
180
+ __name(extractSystemMessages, "extractSystemMessages");
196
181
  function partAsType(part) {
197
182
  return part;
198
183
  }
@@ -338,16 +323,18 @@ var GeminiRequester = class extends ModelRequester {
338
323
  let model = params.model;
339
324
  let enabledThinking = null;
340
325
  if (model.includes("-thinking") && model.includes("gemini-2.5")) {
341
- enabledThinking = !model.includes("-no-thinking");
342
- model = model.replace("-no-thinking", "").replace("-thinking", "");
326
+ enabledThinking = !model.includes("-non-thinking");
327
+ model = model.replace("-nom-thinking", "").replace("-thinking", "");
343
328
  }
329
+ const geminiMessages = await langchainMessageToGeminiMessage(
330
+ params.input,
331
+ model
332
+ );
333
+ const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
344
334
  const response = await this._post(
345
335
  `models/${model}:streamGenerateContent?alt=sse`,
346
336
  {
347
- contents: await langchainMessageToGeminiMessage(
348
- params.input,
349
- model
350
- ),
337
+ contents: modelMessages,
351
338
  safetySettings: [
352
339
  {
353
340
  category: "HARM_CATEGORY_HARASSMENT",
@@ -380,10 +367,11 @@ var GeminiRequester = class extends ModelRequester {
380
367
  "gemini-2.0-flash-exp"
381
368
  ) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
382
369
  thinkingConfig: enabledThinking != null || this._pluginConfig.includeThoughts ? {
383
- thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ?? -1 : 0,
370
+ thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ?? -1 : -1,
384
371
  includeThoughts: this._pluginConfig.includeThoughts
385
372
  } : void 0
386
373
  },
374
+ systemInstruction: systemInstruction != null ? systemInstruction : void 0,
387
375
  tools: params.tools != null || this._pluginConfig.googleSearch || this._pluginConfig.codeExecution || this._pluginConfig.urlContext ? formatToolsToGeminiAITools(
388
376
  params.tools ?? [],
389
377
  this._pluginConfig,
@@ -396,7 +384,7 @@ var GeminiRequester = class extends ModelRequester {
396
384
  );
397
385
  let errorCount = 0;
398
386
  let groundingContent = "";
399
- let currentGroudingIndex = 0;
387
+ let currentGroundingIndex = 0;
400
388
  await checkResponse(response);
401
389
  const readableStream = new ReadableStream({
402
390
  async start(controller) {
@@ -427,7 +415,7 @@ var GeminiRequester = class extends ModelRequester {
427
415
  controller.enqueue(part);
428
416
  }
429
417
  for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
430
- groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
418
+ groundingContent += `[^${currentGroundingIndex++}]: [${source.web.title}](${source.web.uri})
431
419
  `;
432
420
  }
433
421
  }
@@ -694,10 +682,10 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
694
682
  functionCall: !model.name.includes("vision"),
695
683
  supportMode: ["all"]
696
684
  };
697
- if (model.name.includes("gemini-2.5")) {
685
+ if (model.name.includes("gemini-2.5") && !model.name.includes("pro")) {
698
686
  if (!model.name.includes("-thinking")) {
699
687
  models.push(
700
- { ...info, name: model.name + "-no-thinking" },
688
+ { ...info, name: model.name + "-nonthinking" },
701
689
  { ...info, name: model.name + "-thinking" },
702
690
  info
703
691
  );
@@ -797,7 +785,7 @@ var Config3 = Schema.intersect([
797
785
  googleSearch: Schema.boolean().default(false),
798
786
  codeExecution: Schema.boolean().default(false),
799
787
  urlContext: Schema.boolean().default(false),
800
- thinkingBudget: Schema.number().min(-1).max(24576).step(16).default(-1),
788
+ thinkingBudget: Schema.number().min(128).max(24576).step(16).default(128),
801
789
  includeThoughts: Schema.boolean().default(false),
802
790
  imageGeneration: Schema.boolean().default(false),
803
791
  groundingContentDisplay: Schema.boolean().default(false),
package/lib/utils.d.ts CHANGED
@@ -3,6 +3,7 @@ import { StructuredTool } from '@langchain/core/tools';
3
3
  import { ChatCompletionFunction, ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum, ChatPart } from './types';
4
4
  import { Config } from '.';
5
5
  export declare function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): Promise<ChatCompletionResponseMessage[]>;
6
+ export declare function extractSystemMessages(messages: ChatCompletionResponseMessage[]): [ChatCompletionResponseMessage, ChatCompletionResponseMessage[]];
6
7
  export declare function partAsType<T extends ChatPart>(part: ChatPart): T;
7
8
  export declare function partAsTypeCheck<T extends ChatPart>(part: ChatPart, check: (part: ChatPart & unknown) => boolean): T | undefined;
8
9
  export declare function formatToolsToGeminiAITools(tools: StructuredTool[], config: Config, model: string): Record<string, any>;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.2.5",
4
+ "version": "1.2.7",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -72,7 +72,7 @@
72
72
  },
73
73
  "peerDependencies": {
74
74
  "koishi": "^4.18.7",
75
- "koishi-plugin-chatluna": "^1.3.0-alpha.4"
75
+ "koishi-plugin-chatluna": "^1.3.0-alpha.7"
76
76
  },
77
77
  "koishi": {
78
78
  "description": {