koishi-plugin-chatluna-google-gemini-adapter 1.2.0 → 1.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.cjs CHANGED
@@ -589,9 +589,14 @@ ${groundingContent}`
589
589
  "error when listing gemini models, Result:" + JSON.stringify(data)
590
590
  );
591
591
  }
592
- return data.models.map((model) => model.name).filter(
593
- (model) => model.includes("gemini") || model.includes("gemma") || model.includes("embedding")
594
- );
592
+ return data.models.filter(
593
+ (model) => model.name.includes("gemini") || model.name.includes("gemma") || model.name.includes("embedding")
594
+ ).map((model) => {
595
+ return {
596
+ ...model,
597
+ name: model.name.replace("models/", "")
598
+ };
599
+ });
595
600
  } catch (e) {
596
601
  const error = new Error(
597
602
  "error when listing gemini models, Result: " + JSON.stringify(data)
@@ -672,36 +677,24 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
672
677
  }
673
678
  async refreshModels() {
674
679
  try {
675
- let rawModels = await this._requester.getModels();
680
+ const rawModels = await this._requester.getModels();
676
681
  if (!rawModels.length) {
677
682
  throw new import_error2.ChatLunaError(
678
683
  import_error2.ChatLunaErrorCode.MODEL_INIT_ERROR,
679
684
  new Error("No model found")
680
685
  );
681
686
  }
682
- rawModels = rawModels.map((model) => model.replace("models/", ""));
683
687
  const models = [];
684
688
  for (const model of rawModels) {
685
689
  const info = {
686
- name: model,
687
- maxTokens: ((model2) => {
688
- if (model2.includes("gemini-1.5-pro")) {
689
- return 1048576;
690
- }
691
- if (model2.includes("gemini-1.5-flash") || model2.includes("gemini-2.0-pro") || model2.includes("gemini-2.5-pro")) {
692
- return 2097152;
693
- }
694
- if (model2.includes("gemini-1.0-pro")) {
695
- return 30720;
696
- }
697
- return 1048576;
698
- })(model),
699
- type: model.includes("embedding") ? import_types.ModelType.embeddings : import_types.ModelType.llm,
700
- functionCall: !model.includes("vision"),
690
+ name: model.name,
691
+ maxTokens: model.inputTokenLimit,
692
+ type: model.name.includes("embedding") ? import_types.ModelType.embeddings : import_types.ModelType.llm,
693
+ functionCall: !model.name.includes("vision"),
701
694
  supportMode: ["all"]
702
695
  };
703
- if (model.includes("gemini-2.5")) {
704
- if (!model.includes("-thinking")) {
696
+ if (model.name.includes("gemini-2.5")) {
697
+ if (!model.name.includes("-thinking")) {
705
698
  models.push(
706
699
  { ...info, name: model + "-no-thinking" },
707
700
  { ...info, name: model + "-thinking" },
@@ -720,7 +713,7 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
720
713
  }
721
714
  }
722
715
  async getModels() {
723
- if (this._models) {
716
+ if (this._models && Object.keys(this._models).length > 0) {
724
717
  return Object.values(this._models);
725
718
  }
726
719
  const models = await this.refreshModels();
package/lib/index.mjs CHANGED
@@ -572,9 +572,14 @@ ${groundingContent}`
572
572
  "error when listing gemini models, Result:" + JSON.stringify(data)
573
573
  );
574
574
  }
575
- return data.models.map((model) => model.name).filter(
576
- (model) => model.includes("gemini") || model.includes("gemma") || model.includes("embedding")
577
- );
575
+ return data.models.filter(
576
+ (model) => model.name.includes("gemini") || model.name.includes("gemma") || model.name.includes("embedding")
577
+ ).map((model) => {
578
+ return {
579
+ ...model,
580
+ name: model.name.replace("models/", "")
581
+ };
582
+ });
578
583
  } catch (e) {
579
584
  const error = new Error(
580
585
  "error when listing gemini models, Result: " + JSON.stringify(data)
@@ -655,36 +660,24 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
655
660
  }
656
661
  async refreshModels() {
657
662
  try {
658
- let rawModels = await this._requester.getModels();
663
+ const rawModels = await this._requester.getModels();
659
664
  if (!rawModels.length) {
660
665
  throw new ChatLunaError2(
661
666
  ChatLunaErrorCode2.MODEL_INIT_ERROR,
662
667
  new Error("No model found")
663
668
  );
664
669
  }
665
- rawModels = rawModels.map((model) => model.replace("models/", ""));
666
670
  const models = [];
667
671
  for (const model of rawModels) {
668
672
  const info = {
669
- name: model,
670
- maxTokens: ((model2) => {
671
- if (model2.includes("gemini-1.5-pro")) {
672
- return 1048576;
673
- }
674
- if (model2.includes("gemini-1.5-flash") || model2.includes("gemini-2.0-pro") || model2.includes("gemini-2.5-pro")) {
675
- return 2097152;
676
- }
677
- if (model2.includes("gemini-1.0-pro")) {
678
- return 30720;
679
- }
680
- return 1048576;
681
- })(model),
682
- type: model.includes("embedding") ? ModelType.embeddings : ModelType.llm,
683
- functionCall: !model.includes("vision"),
673
+ name: model.name,
674
+ maxTokens: model.inputTokenLimit,
675
+ type: model.name.includes("embedding") ? ModelType.embeddings : ModelType.llm,
676
+ functionCall: !model.name.includes("vision"),
684
677
  supportMode: ["all"]
685
678
  };
686
- if (model.includes("gemini-2.5")) {
687
- if (!model.includes("-thinking")) {
679
+ if (model.name.includes("gemini-2.5")) {
680
+ if (!model.name.includes("-thinking")) {
688
681
  models.push(
689
682
  { ...info, name: model + "-no-thinking" },
690
683
  { ...info, name: model + "-thinking" },
@@ -703,7 +696,7 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
703
696
  }
704
697
  }
705
698
  async getModels() {
706
- if (this._models) {
699
+ if (this._models && Object.keys(this._models).length > 0) {
707
700
  return Object.values(this._models);
708
701
  }
709
702
  const models = await this.refreshModels();
@@ -2,6 +2,7 @@ import { ChatGenerationChunk } from '@langchain/core/outputs';
2
2
  import { EmbeddingsRequester, EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/llm-core/platform/api';
3
3
  import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
4
4
  import { Config } from '.';
5
+ import { GeminiModelInfo } from './types';
5
6
  import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
6
7
  export declare class GeminiRequester extends ModelRequester implements EmbeddingsRequester {
7
8
  private _config;
@@ -10,7 +11,7 @@ export declare class GeminiRequester extends ModelRequester implements Embedding
10
11
  constructor(_config: ClientConfig, _plugin: ChatLunaPlugin, _pluginConfig: Config);
11
12
  completionStream(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk>;
12
13
  embeddings(params: EmbeddingsRequestParams): Promise<number[] | number[][]>;
13
- getModels(): Promise<string[]>;
14
+ getModels(): Promise<GeminiModelInfo[]>;
14
15
  private _post;
15
16
  private _get;
16
17
  private _concatUrl;
package/lib/types.d.ts CHANGED
@@ -85,3 +85,16 @@ export interface CreateEmbeddingResponse {
85
85
  }[];
86
86
  }
87
87
  export type ChatCompletionResponseMessageRoleEnum = 'system' | 'model' | 'user' | 'function';
88
+ export interface GeminiModelInfo {
89
+ name: string;
90
+ version: string;
91
+ displayName: string;
92
+ description: string;
93
+ inputTokenLimit: number;
94
+ outputTokenLimit: number;
95
+ supportedGenerationMethods: string[];
96
+ temperature: number;
97
+ topP: number;
98
+ topK: number;
99
+ maxTemperature: number;
100
+ }
package/lib/utils.d.ts CHANGED
@@ -8,4 +8,4 @@ export declare function partAsTypeCheck<T extends ChatPart>(part: ChatPart, chec
8
8
  export declare function formatToolsToGeminiAITools(tools: StructuredTool[], config: Config, model: string): Record<string, any>;
9
9
  export declare function formatToolToGeminiAITool(tool: StructuredTool): ChatCompletionFunction;
10
10
  export declare function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
11
- export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | ChatMessageChunk;
11
+ export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | ChatMessageChunk;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.2.0",
4
+ "version": "1.2.1",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -72,7 +72,7 @@
72
72
  },
73
73
  "peerDependencies": {
74
74
  "koishi": "^4.18.7",
75
- "koishi-plugin-chatluna": "^1.2.0"
75
+ "koishi-plugin-chatluna": "^1.3.0-alpha.1"
76
76
  },
77
77
  "koishi": {
78
78
  "description": {