koishi-plugin-chatluna-google-gemini-adapter 1.0.0-beta.21 → 1.0.0-beta.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.cjs CHANGED
@@ -580,7 +580,7 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
580
580
  requester: this._requester,
581
581
  model,
582
582
  modelMaxContextSize: info.maxTokens,
583
- maxTokens: this._config.maxTokens,
583
+ maxTokenLimit: this._config.maxTokens,
584
584
  timeout: this._config.timeout,
585
585
  temperature: this._config.temperature,
586
586
  maxRetries: this._config.maxRetries,
@@ -637,7 +637,7 @@ var Config = import_koishi.Schema.intersect([
637
637
  maxTokens: import_koishi.Schema.number().description(
638
638
  "回复的最大 Token 数(16~2097000,必须是16的倍数)(注意如果你目前使用的模型的最大 Token 为 8000 及以上的话才建议设置超过 512 token)"
639
639
  ).min(16).max(2097e3).step(16).default(8064),
640
- temperature: import_koishi.Schema.percent().description("回复温度,越高越随机").min(0).max(1).step(0.1).default(0.8)
640
+ temperature: import_koishi.Schema.percent().description("回复温度,越高越随机").min(0).max(2).step(0.1).default(0.8)
641
641
  }).description("模型设置")
642
642
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
643
643
  ]);
package/lib/index.mjs CHANGED
@@ -573,7 +573,7 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
573
573
  requester: this._requester,
574
574
  model,
575
575
  modelMaxContextSize: info.maxTokens,
576
- maxTokens: this._config.maxTokens,
576
+ maxTokenLimit: this._config.maxTokens,
577
577
  timeout: this._config.timeout,
578
578
  temperature: this._config.temperature,
579
579
  maxRetries: this._config.maxRetries,
@@ -630,7 +630,7 @@ var Config = Schema.intersect([
630
630
  maxTokens: Schema.number().description(
631
631
  "回复的最大 Token 数(16~2097000,必须是16的倍数)(注意如果你目前使用的模型的最大 Token 为 8000 及以上的话才建议设置超过 512 token)"
632
632
  ).min(16).max(2097e3).step(16).default(8064),
633
- temperature: Schema.percent().description("回复温度,越高越随机").min(0).max(1).step(0.1).default(0.8)
633
+ temperature: Schema.percent().description("回复温度,越高越随机").min(0).max(2).step(0.1).default(0.8)
634
634
  }).description("模型设置")
635
635
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
636
636
  ]);
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.0.0-beta.21",
4
+ "version": "1.0.0-beta.23",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -59,7 +59,7 @@
59
59
  },
60
60
  "peerDependencies": {
61
61
  "koishi": "^4.17.12",
62
- "koishi-plugin-chatluna": "^1.0.0-beta.86"
62
+ "koishi-plugin-chatluna": "^1.0.0-beta.87"
63
63
  },
64
64
  "koishi": {
65
65
  "description": {