koishi-plugin-chatluna-google-gemini-adapter 1.1.0-beta.1 → 1.1.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,18 @@
1
+ import { Context } from 'koishi';
2
+ import { PlatformModelAndEmbeddingsClient } from 'koishi-plugin-chatluna/llm-core/platform/client';
3
+ import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
4
+ import { ChatHubBaseEmbeddings, ChatLunaChatModel } from 'koishi-plugin-chatluna/llm-core/platform/model';
5
+ import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
6
+ import { Config } from '.';
7
+ import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
8
+ export declare class GeminiClient extends PlatformModelAndEmbeddingsClient {
9
+ private _config;
10
+ platform: string;
11
+ private _requester;
12
+ private _models;
13
+ constructor(ctx: Context, _config: Config, clientConfig: ClientConfig, plugin: ChatLunaPlugin);
14
+ init(): Promise<void>;
15
+ refreshModels(): Promise<ModelInfo[]>;
16
+ getModels(): Promise<ModelInfo[]>;
17
+ protected _createModel(model: string): ChatLunaChatModel | ChatHubBaseEmbeddings;
18
+ }
package/lib/index.cjs CHANGED
@@ -56,7 +56,6 @@ var import_error2 = require("koishi-plugin-chatluna/utils/error");
56
56
  // src/requester.ts
57
57
  var import_messages2 = require("@langchain/core/messages");
58
58
  var import_outputs = require("@langchain/core/outputs");
59
- var import_json = require("@streamparser/json");
60
59
  var import_api = require("koishi-plugin-chatluna/llm-core/platform/api");
61
60
  var import_error = require("koishi-plugin-chatluna/utils/error");
62
61
  var import_sse = require("koishi-plugin-chatluna/utils/sse");
@@ -243,14 +242,25 @@ function formatToolsToGeminiAITools(tools, config, model) {
243
242
  }
244
243
  const functions = tools.map(formatToolToGeminiAITool);
245
244
  const result = [];
246
- if (functions.length > 0 && !config.googleSearch) {
245
+ const unsupportedModels = [
246
+ "gemini-1.0",
247
+ "gemini-2.0-flash-lite",
248
+ "gemini-1.5-flash"
249
+ ];
250
+ let googleSearch = config.googleSearch;
251
+ if (functions.length > 0 && !googleSearch) {
247
252
  result.push({
248
253
  functionDeclarations: functions
249
254
  });
250
- } else if (functions.length > 0 && config.googleSearch) {
255
+ } else if (functions.length > 0 && googleSearch) {
251
256
  logger.warn("Google search is enabled, tool calling will be disable.");
257
+ } else if (unsupportedModels.some((model2) => model2.includes(model2)) && googleSearch) {
258
+ logger.warn(
259
+ `The model ${model} does not support google search. google search will be disable.`
260
+ );
261
+ googleSearch = false;
252
262
  }
253
- if (config.googleSearch) {
263
+ if (googleSearch) {
254
264
  if (model.includes("gemini-2")) {
255
265
  result.push({
256
266
  google_search: {}
@@ -336,7 +346,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
336
346
  async *completionStream(params) {
337
347
  try {
338
348
  const response = await this._post(
339
- `models/${params.model}:streamGenerateContent`,
349
+ `models/${params.model}:streamGenerateContent?alt=sse`,
340
350
  {
341
351
  contents: await langchainMessageToGeminiMessage(
342
352
  params.input,
@@ -345,23 +355,23 @@ var GeminiRequester = class extends import_api.ModelRequester {
345
355
  safetySettings: [
346
356
  {
347
357
  category: "HARM_CATEGORY_HARASSMENT",
348
- threshold: "BLOCK_NONE"
358
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
349
359
  },
350
360
  {
351
361
  category: "HARM_CATEGORY_HATE_SPEECH",
352
- threshold: "BLOCK_NONE"
362
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
353
363
  },
354
364
  {
355
365
  category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
356
- threshold: "BLOCK_NONE"
366
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
357
367
  },
358
368
  {
359
369
  category: "HARM_CATEGORY_DANGEROUS_CONTENT",
360
- threshold: "BLOCK_NONE"
370
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
361
371
  },
362
372
  {
363
373
  category: "HARM_CATEGORY_CIVIC_INTEGRITY",
364
- threshold: "BLOCK_NONE"
374
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
365
375
  }
366
376
  ],
367
377
  generationConfig: {
@@ -382,43 +392,41 @@ var GeminiRequester = class extends import_api.ModelRequester {
382
392
  }
383
393
  );
384
394
  let errorCount = 0;
385
- const stream = new TransformStream();
386
- const iterable = (0, import_stream.readableStreamToAsyncIterable)(
387
- stream.readable
388
- );
389
- const jsonParser = new import_json.JSONParser();
390
- const writable = stream.writable.getWriter();
391
395
  let groundingContent = "";
392
396
  let currentGroudingIndex = 0;
393
- jsonParser.onEnd = async () => {
394
- await writable.close();
395
- };
396
- jsonParser.onValue = async ({ value }) => {
397
- const transformValue = value;
398
- if (!transformValue.candidates) {
399
- return;
400
- }
401
- for (const candidate of transformValue.candidates) {
402
- const parts = candidate.content?.parts;
403
- if (parts == null || parts.length < 1) {
404
- throw new Error(JSON.stringify(value));
397
+ await (0, import_sse.checkResponse)(response);
398
+ const readableStream = new ReadableStream({
399
+ async start(controller) {
400
+ for await (const chunk of (0, import_sse.sseIterable)(response)) {
401
+ controller.enqueue(chunk.data);
405
402
  }
406
- for (const part of parts) {
407
- await writable.write(part);
403
+ controller.close();
404
+ }
405
+ });
406
+ const transformToChatPartStream = new TransformStream({
407
+ async transform(chunk, controller) {
408
+ const parsedValue = JSON.parse(chunk);
409
+ const transformValue = parsedValue;
410
+ if (!transformValue.candidates) {
411
+ return;
408
412
  }
409
- for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
410
- groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
413
+ for (const candidate of transformValue.candidates) {
414
+ const parts = candidate.content?.parts;
415
+ if (parts == null || parts.length < 1) {
416
+ throw new Error(chunk);
417
+ }
418
+ for (const part of parts) {
419
+ controller.enqueue(part);
420
+ }
421
+ for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
422
+ groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
411
423
  `;
424
+ }
412
425
  }
413
426
  }
414
- };
415
- (0, import_sse.sse)(
416
- response,
417
- async (rawData) => {
418
- jsonParser.write(rawData);
419
- return true;
420
- },
421
- 0
427
+ });
428
+ const iterable = (0, import_stream.readableStreamToAsyncIterable)(
429
+ readableStream.pipeThrough(transformToChatPartStream)
422
430
  );
423
431
  let reasoningContent = "";
424
432
  let content = "";
@@ -601,10 +609,15 @@ ${groundingContent}`
601
609
  }
602
610
  _concatUrl(url) {
603
611
  const apiEndPoint = this._config.apiEndpoint;
612
+ let baseURL;
604
613
  if (apiEndPoint.endsWith("/")) {
605
- return apiEndPoint + url + `?key=${this._config.apiKey}`;
614
+ baseURL = new URL(apiEndPoint + url);
615
+ } else {
616
+ baseURL = new URL(apiEndPoint + "/" + url);
606
617
  }
607
- return apiEndPoint + "/" + url + `?key=${this._config.apiKey}`;
618
+ const searchParams = baseURL.searchParams;
619
+ searchParams.set("key", this._config.apiKey);
620
+ return baseURL.toString();
608
621
  }
609
622
  _buildHeaders() {
610
623
  return {
package/lib/index.d.ts CHANGED
@@ -1,136 +1,15 @@
1
- import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
2
- import { Context, Logger, Schema } from 'koishi';
3
- import { AIMessageChunk, BaseMessage, ChatMessageChunk, HumanMessageChunk, MessageType, SystemMessageChunk } from '@langchain/core/messages';
4
- import { StructuredTool } from '@langchain/core/tools';
5
- import { ChatGenerationChunk } from '@langchain/core/outputs';
6
- import { EmbeddingsRequester, EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/llm-core/platform/api';
7
- import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
8
- import { PlatformModelAndEmbeddingsClient } from 'koishi-plugin-chatluna/llm-core/platform/client';
9
- import { ChatHubBaseEmbeddings, ChatLunaChatModel } from 'koishi-plugin-chatluna/llm-core/platform/model';
10
- import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
11
- export let logger: Logger;
12
- export function apply(ctx: Context, config: Config): void;
13
- export interface Config extends ChatLunaPlugin.Config {
14
- apiKeys: [string, string][];
15
- maxTokens: number;
16
- temperature: number;
17
- googleSearch: boolean;
18
- searchThreshold: number;
19
- groundingContentDisplay: boolean;
20
- }
21
- export const Config: Schema<Config>;
22
- export const inject: string[];
23
- export const name = "chatluna-google-gemini-adapter";
24
- export interface ChatCompletionResponseMessage {
25
- role: string;
26
- parts?: ChatPart[];
27
- }
28
- export type ChatPart = ChatMessagePart | ChatUploadDataPart | ChatFunctionCallingPart | ChatFunctionResponsePart;
29
- export type ChatMessagePart = {
30
- text: string;
31
- thought?: boolean;
32
- };
33
- export type ChatUploadDataPart = {
34
- inline_data: {
35
- mime_type: string;
36
- data?: string;
37
- };
38
- };
39
- export type ChatFunctionCallingPart = {
40
- functionCall: {
41
- name: string;
42
- args?: any;
43
- };
44
- };
45
- export type ChatFunctionResponsePart = {
46
- functionResponse: {
47
- name: string;
48
- response: any;
49
- };
50
- };
51
- export interface ChatResponse {
52
- candidates: {
53
- content: ChatCompletionResponseMessage;
54
- groundingMetadata: {
55
- searchEntryPoint: {
56
- renderedContent: string;
57
- };
58
- groundingChunks: {
59
- web: {
60
- uri: string;
61
- title: string;
62
- };
63
- }[];
64
- groundingSupports: {
65
- segment: {
66
- endIndex: number;
67
- text: string;
68
- };
69
- groundingChunkIndices: number[];
70
- confidenceScores: number[];
71
- }[];
72
- webSearchQueries: string[];
73
- };
74
- finishReason: string;
75
- index: number;
76
- safetyRatings: {
77
- category: string;
78
- probability: string;
79
- }[];
80
- }[];
81
- promptFeedback: {
82
- safetyRatings: {
83
- category: string;
84
- probability: string;
85
- }[];
86
- };
87
- }
88
- export interface ChatCompletionFunction {
89
- name: string;
90
- description?: string;
91
- parameters?: {
92
- [key: string]: any;
93
- };
94
- }
95
- export interface ChatCompletionMessageFunctionCall {
96
- name: string;
97
- args?: any;
98
- }
99
- export interface CreateEmbeddingResponse {
100
- embeddings: {
101
- values: number[];
102
- }[];
103
- }
104
- export type ChatCompletionResponseMessageRoleEnum = 'system' | 'model' | 'user' | 'function';
105
- export function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): Promise<ChatCompletionResponseMessage[]>;
106
- export function partAsType<T extends ChatPart>(part: ChatPart): T;
107
- export function formatToolsToGeminiAITools(tools: StructuredTool[], config: Config, model: string): Record<string, any>;
108
- export function formatToolToGeminiAITool(tool: StructuredTool): ChatCompletionFunction;
109
- export function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
110
- export function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | ChatMessageChunk;
111
- export class GeminiRequester extends ModelRequester implements EmbeddingsRequester {
112
- private _config;
113
- private _plugin;
114
- private _pluginConfig;
115
- constructor(_config: ClientConfig, _plugin: ChatLunaPlugin, _pluginConfig: Config);
116
- completionStream(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk>;
117
- embeddings(params: EmbeddingsRequestParams): Promise<number[] | number[][]>;
118
- getModels(): Promise<string[]>;
119
- private _post;
120
- private _get;
121
- private _concatUrl;
122
- private _buildHeaders;
123
- init(): Promise<void>;
124
- dispose(): Promise<void>;
125
- }
126
- export class GeminiClient extends PlatformModelAndEmbeddingsClient {
127
- private _config;
128
- platform: string;
129
- private _requester;
130
- private _models;
131
- constructor(ctx: Context, _config: Config, clientConfig: ClientConfig, plugin: ChatLunaPlugin);
132
- init(): Promise<void>;
133
- refreshModels(): Promise<ModelInfo[]>;
134
- getModels(): Promise<ModelInfo[]>;
135
- protected _createModel(model: string): ChatLunaChatModel | ChatHubBaseEmbeddings;
136
- }
1
+ import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
2
+ import { Context, Logger, Schema } from 'koishi';
3
+ export declare let logger: Logger;
4
+ export declare function apply(ctx: Context, config: Config): void;
5
+ export interface Config extends ChatLunaPlugin.Config {
6
+ apiKeys: [string, string][];
7
+ maxTokens: number;
8
+ temperature: number;
9
+ googleSearch: boolean;
10
+ searchThreshold: number;
11
+ groundingContentDisplay: boolean;
12
+ }
13
+ export declare const Config: Schema<Config>;
14
+ export declare const inject: string[];
15
+ export declare const name = "chatluna-google-gemini-adapter";
package/lib/index.mjs CHANGED
@@ -40,7 +40,6 @@ import {
40
40
  // src/requester.ts
41
41
  import { AIMessageChunk as AIMessageChunk2 } from "@langchain/core/messages";
42
42
  import { ChatGenerationChunk } from "@langchain/core/outputs";
43
- import { JSONParser } from "@streamparser/json";
44
43
  import {
45
44
  ModelRequester
46
45
  } from "koishi-plugin-chatluna/llm-core/platform/api";
@@ -48,7 +47,7 @@ import {
48
47
  ChatLunaError,
49
48
  ChatLunaErrorCode
50
49
  } from "koishi-plugin-chatluna/utils/error";
51
- import { sse } from "koishi-plugin-chatluna/utils/sse";
50
+ import { checkResponse, sseIterable } from "koishi-plugin-chatluna/utils/sse";
52
51
  import { readableStreamToAsyncIterable } from "koishi-plugin-chatluna/utils/stream";
53
52
 
54
53
  // src/utils.ts
@@ -237,14 +236,25 @@ function formatToolsToGeminiAITools(tools, config, model) {
237
236
  }
238
237
  const functions = tools.map(formatToolToGeminiAITool);
239
238
  const result = [];
240
- if (functions.length > 0 && !config.googleSearch) {
239
+ const unsupportedModels = [
240
+ "gemini-1.0",
241
+ "gemini-2.0-flash-lite",
242
+ "gemini-1.5-flash"
243
+ ];
244
+ let googleSearch = config.googleSearch;
245
+ if (functions.length > 0 && !googleSearch) {
241
246
  result.push({
242
247
  functionDeclarations: functions
243
248
  });
244
- } else if (functions.length > 0 && config.googleSearch) {
249
+ } else if (functions.length > 0 && googleSearch) {
245
250
  logger.warn("Google search is enabled, tool calling will be disable.");
251
+ } else if (unsupportedModels.some((model2) => model2.includes(model2)) && googleSearch) {
252
+ logger.warn(
253
+ `The model ${model} does not support google search. google search will be disable.`
254
+ );
255
+ googleSearch = false;
246
256
  }
247
- if (config.googleSearch) {
257
+ if (googleSearch) {
248
258
  if (model.includes("gemini-2")) {
249
259
  result.push({
250
260
  google_search: {}
@@ -330,7 +340,7 @@ var GeminiRequester = class extends ModelRequester {
330
340
  async *completionStream(params) {
331
341
  try {
332
342
  const response = await this._post(
333
- `models/${params.model}:streamGenerateContent`,
343
+ `models/${params.model}:streamGenerateContent?alt=sse`,
334
344
  {
335
345
  contents: await langchainMessageToGeminiMessage(
336
346
  params.input,
@@ -339,23 +349,23 @@ var GeminiRequester = class extends ModelRequester {
339
349
  safetySettings: [
340
350
  {
341
351
  category: "HARM_CATEGORY_HARASSMENT",
342
- threshold: "BLOCK_NONE"
352
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
343
353
  },
344
354
  {
345
355
  category: "HARM_CATEGORY_HATE_SPEECH",
346
- threshold: "BLOCK_NONE"
356
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
347
357
  },
348
358
  {
349
359
  category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
350
- threshold: "BLOCK_NONE"
360
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
351
361
  },
352
362
  {
353
363
  category: "HARM_CATEGORY_DANGEROUS_CONTENT",
354
- threshold: "BLOCK_NONE"
364
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
355
365
  },
356
366
  {
357
367
  category: "HARM_CATEGORY_CIVIC_INTEGRITY",
358
- threshold: "BLOCK_NONE"
368
+ threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
359
369
  }
360
370
  ],
361
371
  generationConfig: {
@@ -376,43 +386,41 @@ var GeminiRequester = class extends ModelRequester {
376
386
  }
377
387
  );
378
388
  let errorCount = 0;
379
- const stream = new TransformStream();
380
- const iterable = readableStreamToAsyncIterable(
381
- stream.readable
382
- );
383
- const jsonParser = new JSONParser();
384
- const writable = stream.writable.getWriter();
385
389
  let groundingContent = "";
386
390
  let currentGroudingIndex = 0;
387
- jsonParser.onEnd = async () => {
388
- await writable.close();
389
- };
390
- jsonParser.onValue = async ({ value }) => {
391
- const transformValue = value;
392
- if (!transformValue.candidates) {
393
- return;
394
- }
395
- for (const candidate of transformValue.candidates) {
396
- const parts = candidate.content?.parts;
397
- if (parts == null || parts.length < 1) {
398
- throw new Error(JSON.stringify(value));
391
+ await checkResponse(response);
392
+ const readableStream = new ReadableStream({
393
+ async start(controller) {
394
+ for await (const chunk of sseIterable(response)) {
395
+ controller.enqueue(chunk.data);
399
396
  }
400
- for (const part of parts) {
401
- await writable.write(part);
397
+ controller.close();
398
+ }
399
+ });
400
+ const transformToChatPartStream = new TransformStream({
401
+ async transform(chunk, controller) {
402
+ const parsedValue = JSON.parse(chunk);
403
+ const transformValue = parsedValue;
404
+ if (!transformValue.candidates) {
405
+ return;
402
406
  }
403
- for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
404
- groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
407
+ for (const candidate of transformValue.candidates) {
408
+ const parts = candidate.content?.parts;
409
+ if (parts == null || parts.length < 1) {
410
+ throw new Error(chunk);
411
+ }
412
+ for (const part of parts) {
413
+ controller.enqueue(part);
414
+ }
415
+ for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
416
+ groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
405
417
  `;
418
+ }
406
419
  }
407
420
  }
408
- };
409
- sse(
410
- response,
411
- async (rawData) => {
412
- jsonParser.write(rawData);
413
- return true;
414
- },
415
- 0
421
+ });
422
+ const iterable = readableStreamToAsyncIterable(
423
+ readableStream.pipeThrough(transformToChatPartStream)
416
424
  );
417
425
  let reasoningContent = "";
418
426
  let content = "";
@@ -595,10 +603,15 @@ ${groundingContent}`
595
603
  }
596
604
  _concatUrl(url) {
597
605
  const apiEndPoint = this._config.apiEndpoint;
606
+ let baseURL;
598
607
  if (apiEndPoint.endsWith("/")) {
599
- return apiEndPoint + url + `?key=${this._config.apiKey}`;
608
+ baseURL = new URL(apiEndPoint + url);
609
+ } else {
610
+ baseURL = new URL(apiEndPoint + "/" + url);
600
611
  }
601
- return apiEndPoint + "/" + url + `?key=${this._config.apiKey}`;
612
+ const searchParams = baseURL.searchParams;
613
+ searchParams.set("key", this._config.apiKey);
614
+ return baseURL.toString();
602
615
  }
603
616
  _buildHeaders() {
604
617
  return {
@@ -0,0 +1,20 @@
1
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
2
+ import { EmbeddingsRequester, EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/llm-core/platform/api';
3
+ import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
4
+ import { Config } from '.';
5
+ import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
6
+ export declare class GeminiRequester extends ModelRequester implements EmbeddingsRequester {
7
+ private _config;
8
+ private _plugin;
9
+ private _pluginConfig;
10
+ constructor(_config: ClientConfig, _plugin: ChatLunaPlugin, _pluginConfig: Config);
11
+ completionStream(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk>;
12
+ embeddings(params: EmbeddingsRequestParams): Promise<number[] | number[][]>;
13
+ getModels(): Promise<string[]>;
14
+ private _post;
15
+ private _get;
16
+ private _concatUrl;
17
+ private _buildHeaders;
18
+ init(): Promise<void>;
19
+ dispose(): Promise<void>;
20
+ }
package/lib/types.d.ts ADDED
@@ -0,0 +1,81 @@
1
+ export interface ChatCompletionResponseMessage {
2
+ role: string;
3
+ parts?: ChatPart[];
4
+ }
5
+ export type ChatPart = ChatMessagePart | ChatUploadDataPart | ChatFunctionCallingPart | ChatFunctionResponsePart;
6
+ export type ChatMessagePart = {
7
+ text: string;
8
+ thought?: boolean;
9
+ };
10
+ export type ChatUploadDataPart = {
11
+ inline_data: {
12
+ mime_type: string;
13
+ data?: string;
14
+ };
15
+ };
16
+ export type ChatFunctionCallingPart = {
17
+ functionCall: {
18
+ name: string;
19
+ args?: any;
20
+ };
21
+ };
22
+ export type ChatFunctionResponsePart = {
23
+ functionResponse: {
24
+ name: string;
25
+ response: any;
26
+ };
27
+ };
28
+ export interface ChatResponse {
29
+ candidates: {
30
+ content: ChatCompletionResponseMessage;
31
+ groundingMetadata: {
32
+ searchEntryPoint: {
33
+ renderedContent: string;
34
+ };
35
+ groundingChunks: {
36
+ web: {
37
+ uri: string;
38
+ title: string;
39
+ };
40
+ }[];
41
+ groundingSupports: {
42
+ segment: {
43
+ endIndex: number;
44
+ text: string;
45
+ };
46
+ groundingChunkIndices: number[];
47
+ confidenceScores: number[];
48
+ }[];
49
+ webSearchQueries: string[];
50
+ };
51
+ finishReason: string;
52
+ index: number;
53
+ safetyRatings: {
54
+ category: string;
55
+ probability: string;
56
+ }[];
57
+ }[];
58
+ promptFeedback: {
59
+ safetyRatings: {
60
+ category: string;
61
+ probability: string;
62
+ }[];
63
+ };
64
+ }
65
+ export interface ChatCompletionFunction {
66
+ name: string;
67
+ description?: string;
68
+ parameters?: {
69
+ [key: string]: any;
70
+ };
71
+ }
72
+ export interface ChatCompletionMessageFunctionCall {
73
+ name: string;
74
+ args?: any;
75
+ }
76
+ export interface CreateEmbeddingResponse {
77
+ embeddings: {
78
+ values: number[];
79
+ }[];
80
+ }
81
+ export type ChatCompletionResponseMessageRoleEnum = 'system' | 'model' | 'user' | 'function';
package/lib/utils.d.ts ADDED
@@ -0,0 +1,10 @@
1
+ import { AIMessageChunk, BaseMessage, ChatMessageChunk, HumanMessageChunk, MessageType, SystemMessageChunk } from '@langchain/core/messages';
2
+ import { StructuredTool } from '@langchain/core/tools';
3
+ import { ChatCompletionFunction, ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum, ChatPart } from './types';
4
+ import { Config } from '.';
5
+ export declare function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): Promise<ChatCompletionResponseMessage[]>;
6
+ export declare function partAsType<T extends ChatPart>(part: ChatPart): T;
7
+ export declare function formatToolsToGeminiAITools(tools: StructuredTool[], config: Config, model: string): Record<string, any>;
8
+ export declare function formatToolToGeminiAITool(tool: StructuredTool): ChatCompletionFunction;
9
+ export declare function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
10
+ export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | ChatMessageChunk;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.1.0-beta.1",
4
+ "version": "1.1.0-beta.3",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -63,7 +63,6 @@
63
63
  ],
64
64
  "dependencies": {
65
65
  "@langchain/core": "^0.3.18",
66
- "@streamparser/json": "^0.0.21",
67
66
  "zod": "^3.24.0-canary.20241107T043915",
68
67
  "zod-to-json-schema": "^3.23.5"
69
68
  },
@@ -73,7 +72,7 @@
73
72
  },
74
73
  "peerDependencies": {
75
74
  "koishi": "^4.18.4",
76
- "koishi-plugin-chatluna": "^1.1.0-beta.1"
75
+ "koishi-plugin-chatluna": "^1.1.0-beta.16"
77
76
  },
78
77
  "koishi": {
79
78
  "description": {