koishi-plugin-chatluna-google-gemini-adapter 1.1.0-beta.2 → 1.1.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.cjs CHANGED
@@ -56,7 +56,6 @@ var import_error2 = require("koishi-plugin-chatluna/utils/error");
56
56
  // src/requester.ts
57
57
  var import_messages2 = require("@langchain/core/messages");
58
58
  var import_outputs = require("@langchain/core/outputs");
59
- var import_json = require("@streamparser/json");
60
59
  var import_api = require("koishi-plugin-chatluna/llm-core/platform/api");
61
60
  var import_error = require("koishi-plugin-chatluna/utils/error");
62
61
  var import_sse = require("koishi-plugin-chatluna/utils/sse");
@@ -243,14 +242,25 @@ function formatToolsToGeminiAITools(tools, config, model) {
243
242
  }
244
243
  const functions = tools.map(formatToolToGeminiAITool);
245
244
  const result = [];
246
- if (functions.length > 0 && !config.googleSearch) {
245
+ const unsupportedModels = [
246
+ "gemini-1.0",
247
+ "gemini-2.0-flash-lite",
248
+ "gemini-1.5-flash"
249
+ ];
250
+ let googleSearch = config.googleSearch;
251
+ if (functions.length > 0 && !googleSearch) {
247
252
  result.push({
248
253
  functionDeclarations: functions
249
254
  });
250
- } else if (functions.length > 0 && config.googleSearch) {
255
+ } else if (functions.length > 0 && googleSearch) {
251
256
  logger.warn("Google search is enabled, tool calling will be disable.");
257
+ } else if (unsupportedModels.some((model2) => model2.includes(model2)) && googleSearch) {
258
+ logger.warn(
259
+ `The model ${model} does not support google search. google search will be disable.`
260
+ );
261
+ googleSearch = false;
252
262
  }
253
- if (config.googleSearch) {
263
+ if (googleSearch) {
254
264
  if (model.includes("gemini-2")) {
255
265
  result.push({
256
266
  google_search: {}
@@ -336,7 +346,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
336
346
  async *completionStream(params) {
337
347
  try {
338
348
  const response = await this._post(
339
- `models/${params.model}:streamGenerateContent`,
349
+ `models/${params.model}:streamGenerateContent?alt=sse`,
340
350
  {
341
351
  contents: await langchainMessageToGeminiMessage(
342
352
  params.input,
@@ -345,23 +355,23 @@ var GeminiRequester = class extends import_api.ModelRequester {
345
355
  safetySettings: [
346
356
  {
347
357
  category: "HARM_CATEGORY_HARASSMENT",
348
- threshold: "BLOCK_NONE"
358
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
349
359
  },
350
360
  {
351
361
  category: "HARM_CATEGORY_HATE_SPEECH",
352
- threshold: "BLOCK_NONE"
362
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
353
363
  },
354
364
  {
355
365
  category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
356
- threshold: "BLOCK_NONE"
366
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
357
367
  },
358
368
  {
359
369
  category: "HARM_CATEGORY_DANGEROUS_CONTENT",
360
- threshold: "BLOCK_NONE"
370
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
361
371
  },
362
372
  {
363
373
  category: "HARM_CATEGORY_CIVIC_INTEGRITY",
364
- threshold: "BLOCK_NONE"
374
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
365
375
  }
366
376
  ],
367
377
  generationConfig: {
@@ -382,44 +392,41 @@ var GeminiRequester = class extends import_api.ModelRequester {
382
392
  }
383
393
  );
384
394
  let errorCount = 0;
385
- const stream = new TransformStream();
386
- const iterable = (0, import_stream.readableStreamToAsyncIterable)(
387
- stream.readable
388
- );
389
- const jsonParser = new import_json.JSONParser();
390
- const writable = stream.writable.getWriter();
391
395
  let groundingContent = "";
392
396
  let currentGroudingIndex = 0;
393
- jsonParser.onEnd = async () => {
394
- await writable.close();
395
- };
396
- jsonParser.onValue = async ({ value }) => {
397
- const transformValue = value;
398
- if (!transformValue.candidates) {
399
- return;
400
- }
401
- for (const candidate of transformValue.candidates) {
402
- const parts = candidate.content?.parts;
403
- if (parts == null || parts.length < 1) {
404
- throw new Error(JSON.stringify(value));
397
+ await (0, import_sse.checkResponse)(response);
398
+ const readableStream = new ReadableStream({
399
+ async start(controller) {
400
+ for await (const chunk of (0, import_sse.sseIterable)(response)) {
401
+ controller.enqueue(chunk.data);
405
402
  }
406
- for (const part of parts) {
407
- await writable.write(part);
403
+ controller.close();
404
+ }
405
+ });
406
+ const transformToChatPartStream = new TransformStream({
407
+ async transform(chunk, controller) {
408
+ const parsedValue = JSON.parse(chunk);
409
+ const transformValue = parsedValue;
410
+ if (!transformValue.candidates) {
411
+ return;
408
412
  }
409
- for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
410
- groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
413
+ for (const candidate of transformValue.candidates) {
414
+ const parts = candidate.content?.parts;
415
+ if (parts == null || parts.length < 1) {
416
+ throw new Error(chunk);
417
+ }
418
+ for (const part of parts) {
419
+ controller.enqueue(part);
420
+ }
421
+ for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
422
+ groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
411
423
  `;
424
+ }
412
425
  }
413
426
  }
414
- };
415
- await (0, import_sse.checkResponse)(response);
416
- (0, import_sse.sse)(
417
- response,
418
- async (rawData) => {
419
- jsonParser.write(rawData);
420
- return true;
421
- },
422
- 0
427
+ });
428
+ const iterable = (0, import_stream.readableStreamToAsyncIterable)(
429
+ readableStream.pipeThrough(transformToChatPartStream)
423
430
  );
424
431
  let reasoningContent = "";
425
432
  let content = "";
@@ -602,10 +609,15 @@ ${groundingContent}`
602
609
  }
603
610
  _concatUrl(url) {
604
611
  const apiEndPoint = this._config.apiEndpoint;
612
+ let baseURL;
605
613
  if (apiEndPoint.endsWith("/")) {
606
- return apiEndPoint + url + `?key=${this._config.apiKey}`;
614
+ baseURL = new URL(apiEndPoint + url);
615
+ } else {
616
+ baseURL = new URL(apiEndPoint + "/" + url);
607
617
  }
608
- return apiEndPoint + "/" + url + `?key=${this._config.apiKey}`;
618
+ const searchParams = baseURL.searchParams;
619
+ searchParams.set("key", this._config.apiKey);
620
+ return baseURL.toString();
609
621
  }
610
622
  _buildHeaders() {
611
623
  return {
package/lib/index.mjs CHANGED
@@ -40,7 +40,6 @@ import {
40
40
  // src/requester.ts
41
41
  import { AIMessageChunk as AIMessageChunk2 } from "@langchain/core/messages";
42
42
  import { ChatGenerationChunk } from "@langchain/core/outputs";
43
- import { JSONParser } from "@streamparser/json";
44
43
  import {
45
44
  ModelRequester
46
45
  } from "koishi-plugin-chatluna/llm-core/platform/api";
@@ -48,7 +47,7 @@ import {
48
47
  ChatLunaError,
49
48
  ChatLunaErrorCode
50
49
  } from "koishi-plugin-chatluna/utils/error";
51
- import { checkResponse, sse } from "koishi-plugin-chatluna/utils/sse";
50
+ import { checkResponse, sseIterable } from "koishi-plugin-chatluna/utils/sse";
52
51
  import { readableStreamToAsyncIterable } from "koishi-plugin-chatluna/utils/stream";
53
52
 
54
53
  // src/utils.ts
@@ -237,14 +236,25 @@ function formatToolsToGeminiAITools(tools, config, model) {
237
236
  }
238
237
  const functions = tools.map(formatToolToGeminiAITool);
239
238
  const result = [];
240
- if (functions.length > 0 && !config.googleSearch) {
239
+ const unsupportedModels = [
240
+ "gemini-1.0",
241
+ "gemini-2.0-flash-lite",
242
+ "gemini-1.5-flash"
243
+ ];
244
+ let googleSearch = config.googleSearch;
245
+ if (functions.length > 0 && !googleSearch) {
241
246
  result.push({
242
247
  functionDeclarations: functions
243
248
  });
244
- } else if (functions.length > 0 && config.googleSearch) {
249
+ } else if (functions.length > 0 && googleSearch) {
245
250
  logger.warn("Google search is enabled, tool calling will be disable.");
251
+ } else if (unsupportedModels.some((model2) => model2.includes(model2)) && googleSearch) {
252
+ logger.warn(
253
+ `The model ${model} does not support google search. google search will be disable.`
254
+ );
255
+ googleSearch = false;
246
256
  }
247
- if (config.googleSearch) {
257
+ if (googleSearch) {
248
258
  if (model.includes("gemini-2")) {
249
259
  result.push({
250
260
  google_search: {}
@@ -330,7 +340,7 @@ var GeminiRequester = class extends ModelRequester {
330
340
  async *completionStream(params) {
331
341
  try {
332
342
  const response = await this._post(
333
- `models/${params.model}:streamGenerateContent`,
343
+ `models/${params.model}:streamGenerateContent?alt=sse`,
334
344
  {
335
345
  contents: await langchainMessageToGeminiMessage(
336
346
  params.input,
@@ -339,23 +349,23 @@ var GeminiRequester = class extends ModelRequester {
339
349
  safetySettings: [
340
350
  {
341
351
  category: "HARM_CATEGORY_HARASSMENT",
342
- threshold: "BLOCK_NONE"
352
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
343
353
  },
344
354
  {
345
355
  category: "HARM_CATEGORY_HATE_SPEECH",
346
- threshold: "BLOCK_NONE"
356
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
347
357
  },
348
358
  {
349
359
  category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
350
- threshold: "BLOCK_NONE"
360
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
351
361
  },
352
362
  {
353
363
  category: "HARM_CATEGORY_DANGEROUS_CONTENT",
354
- threshold: "BLOCK_NONE"
364
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
355
365
  },
356
366
  {
357
367
  category: "HARM_CATEGORY_CIVIC_INTEGRITY",
358
- threshold: "BLOCK_NONE"
368
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
359
369
  }
360
370
  ],
361
371
  generationConfig: {
@@ -376,44 +386,41 @@ var GeminiRequester = class extends ModelRequester {
376
386
  }
377
387
  );
378
388
  let errorCount = 0;
379
- const stream = new TransformStream();
380
- const iterable = readableStreamToAsyncIterable(
381
- stream.readable
382
- );
383
- const jsonParser = new JSONParser();
384
- const writable = stream.writable.getWriter();
385
389
  let groundingContent = "";
386
390
  let currentGroudingIndex = 0;
387
- jsonParser.onEnd = async () => {
388
- await writable.close();
389
- };
390
- jsonParser.onValue = async ({ value }) => {
391
- const transformValue = value;
392
- if (!transformValue.candidates) {
393
- return;
394
- }
395
- for (const candidate of transformValue.candidates) {
396
- const parts = candidate.content?.parts;
397
- if (parts == null || parts.length < 1) {
398
- throw new Error(JSON.stringify(value));
391
+ await checkResponse(response);
392
+ const readableStream = new ReadableStream({
393
+ async start(controller) {
394
+ for await (const chunk of sseIterable(response)) {
395
+ controller.enqueue(chunk.data);
399
396
  }
400
- for (const part of parts) {
401
- await writable.write(part);
397
+ controller.close();
398
+ }
399
+ });
400
+ const transformToChatPartStream = new TransformStream({
401
+ async transform(chunk, controller) {
402
+ const parsedValue = JSON.parse(chunk);
403
+ const transformValue = parsedValue;
404
+ if (!transformValue.candidates) {
405
+ return;
402
406
  }
403
- for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
404
- groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
407
+ for (const candidate of transformValue.candidates) {
408
+ const parts = candidate.content?.parts;
409
+ if (parts == null || parts.length < 1) {
410
+ throw new Error(chunk);
411
+ }
412
+ for (const part of parts) {
413
+ controller.enqueue(part);
414
+ }
415
+ for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
416
+ groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
405
417
  `;
418
+ }
406
419
  }
407
420
  }
408
- };
409
- await checkResponse(response);
410
- sse(
411
- response,
412
- async (rawData) => {
413
- jsonParser.write(rawData);
414
- return true;
415
- },
416
- 0
421
+ });
422
+ const iterable = readableStreamToAsyncIterable(
423
+ readableStream.pipeThrough(transformToChatPartStream)
417
424
  );
418
425
  let reasoningContent = "";
419
426
  let content = "";
@@ -596,10 +603,15 @@ ${groundingContent}`
596
603
  }
597
604
  _concatUrl(url) {
598
605
  const apiEndPoint = this._config.apiEndpoint;
606
+ let baseURL;
599
607
  if (apiEndPoint.endsWith("/")) {
600
- return apiEndPoint + url + `?key=${this._config.apiKey}`;
608
+ baseURL = new URL(apiEndPoint + url);
609
+ } else {
610
+ baseURL = new URL(apiEndPoint + "/" + url);
601
611
  }
602
- return apiEndPoint + "/" + url + `?key=${this._config.apiKey}`;
612
+ const searchParams = baseURL.searchParams;
613
+ searchParams.set("key", this._config.apiKey);
614
+ return baseURL.toString();
603
615
  }
604
616
  _buildHeaders() {
605
617
  return {
package/lib/utils.d.ts CHANGED
@@ -7,4 +7,4 @@ export declare function partAsType<T extends ChatPart>(part: ChatPart): T;
7
7
  export declare function formatToolsToGeminiAITools(tools: StructuredTool[], config: Config, model: string): Record<string, any>;
8
8
  export declare function formatToolToGeminiAITool(tool: StructuredTool): ChatCompletionFunction;
9
9
  export declare function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
10
- export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | ChatMessageChunk;
10
+ export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | ChatMessageChunk;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.1.0-beta.2",
4
+ "version": "1.1.0-beta.3",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -63,7 +63,6 @@
63
63
  ],
64
64
  "dependencies": {
65
65
  "@langchain/core": "^0.3.18",
66
- "@streamparser/json": "^0.0.21",
67
66
  "zod": "^3.24.0-canary.20241107T043915",
68
67
  "zod-to-json-schema": "^3.23.5"
69
68
  },
@@ -73,7 +72,7 @@
73
72
  },
74
73
  "peerDependencies": {
75
74
  "koishi": "^4.18.4",
76
- "koishi-plugin-chatluna": "^1.1.0-beta.14"
75
+ "koishi-plugin-chatluna": "^1.1.0-beta.16"
77
76
  },
78
77
  "koishi": {
79
78
  "description": {