koishi-plugin-chatluna-google-gemini-adapter 1.3.15 → 1.3.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.cjs CHANGED
@@ -348,11 +348,11 @@ function prepareModelConfig(params, pluginConfig) {
348
348
  if (model.includes("gemini-3")) {
349
349
  enabledThinking = true;
350
350
  thinkingBudget = void 0;
351
- const match = model.match(/-(low|medium|high|tiny)-thinking/);
351
+ const match = model.match(/-(low|medium|high|tiny|minimal)-thinking/);
352
352
  if (match && match[1]) {
353
353
  const level = match[1];
354
354
  model = model.replace(`-${level}-thinking`, "");
355
- if (level === "tiny") {
355
+ if (level === "minimal" && model.includes("3-pro")) {
356
356
  thinkingLevel = void 0;
357
357
  thinkingBudget = 128;
358
358
  } else {
@@ -1028,68 +1028,73 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
1028
1028
  }
1029
1029
  async refreshModels(config) {
1030
1030
  const thinkingModel = ["gemini-2.5-pro", "gemini-2.5-flash"];
1031
- const thinkingLevelModel = ["gemini-3-pro"];
1031
+ const thinkingLevelModel = ["gemini-3-pro", "gemini-3-flash"];
1032
1032
  const imageResolutionModel = ["gemini-3-pro-image"];
1033
+ const includesAny = /* @__PURE__ */ __name((needles, haystack) => needles.some((name2) => haystack.includes(name2)), "includesAny");
1034
+ const pushExpanded = /* @__PURE__ */ __name((out, base, suffixes) => {
1035
+ for (const suffix of suffixes) {
1036
+ out.push({ ...base, name: base.name + suffix });
1037
+ }
1038
+ out.push(base);
1039
+ }, "pushExpanded");
1040
+ const models = [];
1041
+ let rawModels = [];
1033
1042
  try {
1034
- const rawModels = await this._requester.getModels(config);
1035
- if (!rawModels.length) {
1043
+ rawModels = await this._requester.getModels(config);
1044
+ if (rawModels.length === 0) {
1036
1045
  throw new import_error2.ChatLunaError(
1037
1046
  import_error2.ChatLunaErrorCode.MODEL_INIT_ERROR,
1038
1047
  new Error("No model found")
1039
1048
  );
1040
1049
  }
1041
- const models = [];
1042
- for (const model of rawModels) {
1043
- const modelNameLower = model.name.toLowerCase();
1044
- const info = {
1045
- name: model.name,
1046
- maxTokens: model.inputTokenLimit,
1047
- type: model.name.includes("embedding") ? import_types2.ModelType.embeddings : import_types2.ModelType.llm,
1048
- capabilities: [
1049
- import_types2.ModelCapabilities.ImageInput,
1050
- import_types2.ModelCapabilities.ToolCall
1051
- ]
1052
- };
1053
- if (imageResolutionModel.some(
1054
- (name2) => modelNameLower.includes(name2)
1055
- )) {
1056
- models.push(
1057
- { ...info, name: model.name + "-2K" },
1058
- { ...info, name: model.name + "-4K" },
1059
- info
1060
- );
1061
- } else if (thinkingModel.some(
1062
- (name2) => modelNameLower.includes(name2) && !modelNameLower.includes("image")
1063
- )) {
1064
- if (!model.name.includes("-thinking")) {
1065
- models.push(
1066
- { ...info, name: model.name + "-non-thinking" },
1067
- { ...info, name: model.name + "-thinking" },
1068
- info
1069
- );
1070
- } else {
1071
- models.push(info);
1072
- }
1073
- } else if (thinkingLevelModel.some(
1074
- (name2) => modelNameLower.includes(name2) && !modelNameLower.includes("image")
1075
- )) {
1076
- models.push(
1077
- { ...info, name: model.name + "-low-thinking" },
1078
- { ...info, name: model.name + "-high-thinking" },
1079
- { ...info, name: model.name + "-tiny-thinking" },
1080
- info
1081
- );
1082
- } else {
1083
- models.push(info);
1084
- }
1085
- }
1086
- return models;
1087
1050
  } catch (e) {
1088
1051
  if (e instanceof import_error2.ChatLunaError) {
1089
1052
  throw e;
1090
1053
  }
1091
1054
  throw new import_error2.ChatLunaError(import_error2.ChatLunaErrorCode.MODEL_INIT_ERROR, e);
1092
1055
  }
1056
+ for (const model of rawModels) {
1057
+ const modelNameLower = model.name.toLowerCase();
1058
+ const baseInfo = {
1059
+ name: model.name,
1060
+ maxTokens: model.inputTokenLimit,
1061
+ type: modelNameLower.includes("embedding") ? import_types2.ModelType.embeddings : import_types2.ModelType.llm,
1062
+ capabilities: modelNameLower.includes("embedding") ? [] : [import_types2.ModelCapabilities.ImageInput, import_types2.ModelCapabilities.ToolCall]
1063
+ };
1064
+ const isImageResolution = includesAny(
1065
+ imageResolutionModel,
1066
+ modelNameLower
1067
+ );
1068
+ const isThinking = includesAny(thinkingModel, modelNameLower) && !modelNameLower.includes("image");
1069
+ const isThinkingLevel = includesAny(thinkingLevelModel, modelNameLower) && !modelNameLower.includes("image");
1070
+ if (isImageResolution) {
1071
+ pushExpanded(models, baseInfo, ["-2K", "-4K"]);
1072
+ continue;
1073
+ }
1074
+ if (isThinking) {
1075
+ if (modelNameLower.includes("-thinking")) {
1076
+ models.push(baseInfo);
1077
+ } else {
1078
+ pushExpanded(models, baseInfo, [
1079
+ "-non-thinking",
1080
+ "-thinking"
1081
+ ]);
1082
+ }
1083
+ continue;
1084
+ }
1085
+ if (isThinkingLevel) {
1086
+ const suffixes = modelNameLower.includes("3-pro") ? ["-low-thinking", "-high-thinking", "-minimal-thinking"] : [
1087
+ "-low-thinking",
1088
+ "-high-thinking",
1089
+ "-minimal-thinking",
1090
+ "-medium-thinking"
1091
+ ];
1092
+ pushExpanded(models, baseInfo, suffixes);
1093
+ continue;
1094
+ }
1095
+ models.push(baseInfo);
1096
+ }
1097
+ return models;
1093
1098
  }
1094
1099
  _createModel(model) {
1095
1100
  const info = this._modelInfos[model];
package/lib/index.mjs CHANGED
@@ -345,11 +345,11 @@ function prepareModelConfig(params, pluginConfig) {
345
345
  if (model.includes("gemini-3")) {
346
346
  enabledThinking = true;
347
347
  thinkingBudget = void 0;
348
- const match = model.match(/-(low|medium|high|tiny)-thinking/);
348
+ const match = model.match(/-(low|medium|high|tiny|minimal)-thinking/);
349
349
  if (match && match[1]) {
350
350
  const level = match[1];
351
351
  model = model.replace(`-${level}-thinking`, "");
352
- if (level === "tiny") {
352
+ if (level === "minimal" && model.includes("3-pro")) {
353
353
  thinkingLevel = void 0;
354
354
  thinkingBudget = 128;
355
355
  } else {
@@ -1025,68 +1025,73 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
1025
1025
  }
1026
1026
  async refreshModels(config) {
1027
1027
  const thinkingModel = ["gemini-2.5-pro", "gemini-2.5-flash"];
1028
- const thinkingLevelModel = ["gemini-3-pro"];
1028
+ const thinkingLevelModel = ["gemini-3-pro", "gemini-3-flash"];
1029
1029
  const imageResolutionModel = ["gemini-3-pro-image"];
1030
+ const includesAny = /* @__PURE__ */ __name((needles, haystack) => needles.some((name2) => haystack.includes(name2)), "includesAny");
1031
+ const pushExpanded = /* @__PURE__ */ __name((out, base, suffixes) => {
1032
+ for (const suffix of suffixes) {
1033
+ out.push({ ...base, name: base.name + suffix });
1034
+ }
1035
+ out.push(base);
1036
+ }, "pushExpanded");
1037
+ const models = [];
1038
+ let rawModels = [];
1030
1039
  try {
1031
- const rawModels = await this._requester.getModels(config);
1032
- if (!rawModels.length) {
1040
+ rawModels = await this._requester.getModels(config);
1041
+ if (rawModels.length === 0) {
1033
1042
  throw new ChatLunaError2(
1034
1043
  ChatLunaErrorCode2.MODEL_INIT_ERROR,
1035
1044
  new Error("No model found")
1036
1045
  );
1037
1046
  }
1038
- const models = [];
1039
- for (const model of rawModels) {
1040
- const modelNameLower = model.name.toLowerCase();
1041
- const info = {
1042
- name: model.name,
1043
- maxTokens: model.inputTokenLimit,
1044
- type: model.name.includes("embedding") ? ModelType.embeddings : ModelType.llm,
1045
- capabilities: [
1046
- ModelCapabilities.ImageInput,
1047
- ModelCapabilities.ToolCall
1048
- ]
1049
- };
1050
- if (imageResolutionModel.some(
1051
- (name2) => modelNameLower.includes(name2)
1052
- )) {
1053
- models.push(
1054
- { ...info, name: model.name + "-2K" },
1055
- { ...info, name: model.name + "-4K" },
1056
- info
1057
- );
1058
- } else if (thinkingModel.some(
1059
- (name2) => modelNameLower.includes(name2) && !modelNameLower.includes("image")
1060
- )) {
1061
- if (!model.name.includes("-thinking")) {
1062
- models.push(
1063
- { ...info, name: model.name + "-non-thinking" },
1064
- { ...info, name: model.name + "-thinking" },
1065
- info
1066
- );
1067
- } else {
1068
- models.push(info);
1069
- }
1070
- } else if (thinkingLevelModel.some(
1071
- (name2) => modelNameLower.includes(name2) && !modelNameLower.includes("image")
1072
- )) {
1073
- models.push(
1074
- { ...info, name: model.name + "-low-thinking" },
1075
- { ...info, name: model.name + "-high-thinking" },
1076
- { ...info, name: model.name + "-tiny-thinking" },
1077
- info
1078
- );
1079
- } else {
1080
- models.push(info);
1081
- }
1082
- }
1083
- return models;
1084
1047
  } catch (e) {
1085
1048
  if (e instanceof ChatLunaError2) {
1086
1049
  throw e;
1087
1050
  }
1088
1051
  throw new ChatLunaError2(ChatLunaErrorCode2.MODEL_INIT_ERROR, e);
1089
1052
  }
1053
+ for (const model of rawModels) {
1054
+ const modelNameLower = model.name.toLowerCase();
1055
+ const baseInfo = {
1056
+ name: model.name,
1057
+ maxTokens: model.inputTokenLimit,
1058
+ type: modelNameLower.includes("embedding") ? ModelType.embeddings : ModelType.llm,
1059
+ capabilities: modelNameLower.includes("embedding") ? [] : [ModelCapabilities.ImageInput, ModelCapabilities.ToolCall]
1060
+ };
1061
+ const isImageResolution = includesAny(
1062
+ imageResolutionModel,
1063
+ modelNameLower
1064
+ );
1065
+ const isThinking = includesAny(thinkingModel, modelNameLower) && !modelNameLower.includes("image");
1066
+ const isThinkingLevel = includesAny(thinkingLevelModel, modelNameLower) && !modelNameLower.includes("image");
1067
+ if (isImageResolution) {
1068
+ pushExpanded(models, baseInfo, ["-2K", "-4K"]);
1069
+ continue;
1070
+ }
1071
+ if (isThinking) {
1072
+ if (modelNameLower.includes("-thinking")) {
1073
+ models.push(baseInfo);
1074
+ } else {
1075
+ pushExpanded(models, baseInfo, [
1076
+ "-non-thinking",
1077
+ "-thinking"
1078
+ ]);
1079
+ }
1080
+ continue;
1081
+ }
1082
+ if (isThinkingLevel) {
1083
+ const suffixes = modelNameLower.includes("3-pro") ? ["-low-thinking", "-high-thinking", "-minimal-thinking"] : [
1084
+ "-low-thinking",
1085
+ "-high-thinking",
1086
+ "-minimal-thinking",
1087
+ "-medium-thinking"
1088
+ ];
1089
+ pushExpanded(models, baseInfo, suffixes);
1090
+ continue;
1091
+ }
1092
+ models.push(baseInfo);
1093
+ }
1094
+ return models;
1090
1095
  }
1091
1096
  _createModel(model) {
1092
1097
  const info = this._modelInfos[model];
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.3.15",
4
+ "version": "1.3.16",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -75,7 +75,7 @@
75
75
  },
76
76
  "peerDependencies": {
77
77
  "koishi": "^4.18.9",
78
- "koishi-plugin-chatluna": "^1.3.4",
78
+ "koishi-plugin-chatluna": "^1.3.6",
79
79
  "koishi-plugin-chatluna-storage-service": "^0.0.11"
80
80
  },
81
81
  "peerDependenciesMeta": {