koishi-plugin-chatluna-google-gemini-adapter 1.0.0-beta.16 → 1.0.0-beta.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.cjs CHANGED
@@ -404,24 +404,33 @@ var GeminiRequester = class extends import_api.ModelRequester {
404
404
  }
405
405
  async embeddings(params) {
406
406
  let data;
407
+ if (typeof params.input === "string") {
408
+ params.input = [params.input];
409
+ }
407
410
  try {
408
411
  const response = await this._post(
409
- `models/${params.model}:embedContent`,
412
+ `models/${params.model}:batchEmbedContents`,
410
413
  {
411
- model: `models/${params.model}`,
412
- content: {
413
- parts: [
414
- {
415
- text: params.input
414
+ requests: params.input.map((input) => {
415
+ return {
416
+ model: `models/${params.model}`,
417
+ content: {
418
+ parts: [
419
+ {
420
+ text: input
421
+ }
422
+ ]
416
423
  }
417
- ]
418
- }
424
+ };
425
+ })
419
426
  }
420
427
  );
421
428
  data = await response.text();
422
429
  data = JSON.parse(data);
423
- if (data.embedding && data.embedding.values?.length > 0) {
424
- return data.embedding.values;
430
+ if (data.embeddings && data.embeddings.length > 0) {
431
+ return data.embeddings.map((embedding) => {
432
+ return embedding.values;
433
+ });
425
434
  }
426
435
  throw new Error(
427
436
  "error when calling gemini embeddings, Result: " + JSON.stringify(data)
@@ -529,7 +538,18 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
529
538
  return rawModels.map((model) => model.replace("models/", "")).map((model) => {
530
539
  return {
531
540
  name: model,
532
- maxTokens: model.includes("vision") ? 12288 : 30720,
541
+ maxTokens: ((model2) => {
542
+ if (model2.includes("gemini-1.5-pro")) {
543
+ return 1048576;
544
+ }
545
+ if (model2.includes("gemini-1.5-flash")) {
546
+ return 2097152;
547
+ }
548
+ if (model2.includes("gemini-1.0-pro")) {
549
+ return 30720;
550
+ }
551
+ return 30720;
552
+ })(model),
533
553
  type: model.includes("embedding") ? import_types.ModelType.embeddings : import_types.ModelType.llm,
534
554
  functionCall: !model.includes("vision"),
535
555
  supportMode: ["all"]
@@ -559,6 +579,7 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
559
579
  modelInfo: info,
560
580
  requester: this._requester,
561
581
  model,
582
+ modelMaxContextSize: info.maxTokens,
562
583
  maxTokens: this._config.maxTokens,
563
584
  timeout: this._config.timeout,
564
585
  temperature: this._config.temperature,
@@ -614,8 +635,8 @@ var Config = import_koishi.Schema.intersect([
614
635
  }).description("请求设置"),
615
636
  import_koishi.Schema.object({
616
637
  maxTokens: import_koishi.Schema.number().description(
617
- "回复的最大 Token 数(16~32800,必须是16的倍数)(注意如果你目前使用的模型的最大 Token 为 8000 及以上的话才建议设置超过 512 token)"
618
- ).min(16).max(128e3).step(16).default(1024),
638
+ "回复的最大 Token 数(16~2097000,必须是16的倍数)(注意如果你目前使用的模型的最大 Token 为 8000 及以上的话才建议设置超过 512 token)"
639
+ ).min(16).max(2097e3).step(16).default(8064),
619
640
  temperature: import_koishi.Schema.percent().description("回复温度,越高越随机").min(0).max(1).step(0.1).default(0.8)
620
641
  }).description("模型设置")
621
642
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
package/lib/index.d.ts CHANGED
@@ -73,9 +73,9 @@ export interface ChatCompletionMessageFunctionCall {
73
73
  args?: any;
74
74
  }
75
75
  export interface CreateEmbeddingResponse {
76
- embedding: {
76
+ embeddings: {
77
77
  values: number[];
78
- };
78
+ }[];
79
79
  }
80
80
  export type ChatCompletionResponseMessageRoleEnum = 'system' | 'model' | 'user' | 'function';
81
81
  export function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): Promise<ChatCompletionResponseMessage[]>;
package/lib/index.mjs CHANGED
@@ -397,24 +397,33 @@ var GeminiRequester = class extends ModelRequester {
397
397
  }
398
398
  async embeddings(params) {
399
399
  let data;
400
+ if (typeof params.input === "string") {
401
+ params.input = [params.input];
402
+ }
400
403
  try {
401
404
  const response = await this._post(
402
- `models/${params.model}:embedContent`,
405
+ `models/${params.model}:batchEmbedContents`,
403
406
  {
404
- model: `models/${params.model}`,
405
- content: {
406
- parts: [
407
- {
408
- text: params.input
407
+ requests: params.input.map((input) => {
408
+ return {
409
+ model: `models/${params.model}`,
410
+ content: {
411
+ parts: [
412
+ {
413
+ text: input
414
+ }
415
+ ]
409
416
  }
410
- ]
411
- }
417
+ };
418
+ })
412
419
  }
413
420
  );
414
421
  data = await response.text();
415
422
  data = JSON.parse(data);
416
- if (data.embedding && data.embedding.values?.length > 0) {
417
- return data.embedding.values;
423
+ if (data.embeddings && data.embeddings.length > 0) {
424
+ return data.embeddings.map((embedding) => {
425
+ return embedding.values;
426
+ });
418
427
  }
419
428
  throw new Error(
420
429
  "error when calling gemini embeddings, Result: " + JSON.stringify(data)
@@ -522,7 +531,18 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
522
531
  return rawModels.map((model) => model.replace("models/", "")).map((model) => {
523
532
  return {
524
533
  name: model,
525
- maxTokens: model.includes("vision") ? 12288 : 30720,
534
+ maxTokens: ((model2) => {
535
+ if (model2.includes("gemini-1.5-pro")) {
536
+ return 1048576;
537
+ }
538
+ if (model2.includes("gemini-1.5-flash")) {
539
+ return 2097152;
540
+ }
541
+ if (model2.includes("gemini-1.0-pro")) {
542
+ return 30720;
543
+ }
544
+ return 30720;
545
+ })(model),
526
546
  type: model.includes("embedding") ? ModelType.embeddings : ModelType.llm,
527
547
  functionCall: !model.includes("vision"),
528
548
  supportMode: ["all"]
@@ -552,6 +572,7 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
552
572
  modelInfo: info,
553
573
  requester: this._requester,
554
574
  model,
575
+ modelMaxContextSize: info.maxTokens,
555
576
  maxTokens: this._config.maxTokens,
556
577
  timeout: this._config.timeout,
557
578
  temperature: this._config.temperature,
@@ -607,8 +628,8 @@ var Config = Schema.intersect([
607
628
  }).description("请求设置"),
608
629
  Schema.object({
609
630
  maxTokens: Schema.number().description(
610
- "回复的最大 Token 数(16~32800,必须是16的倍数)(注意如果你目前使用的模型的最大 Token 为 8000 及以上的话才建议设置超过 512 token)"
611
- ).min(16).max(128e3).step(16).default(1024),
631
+ "回复的最大 Token 数(16~2097000,必须是16的倍数)(注意如果你目前使用的模型的最大 Token 为 8000 及以上的话才建议设置超过 512 token)"
632
+ ).min(16).max(2097e3).step(16).default(8064),
612
633
  temperature: Schema.percent().description("回复温度,越高越随机").min(0).max(1).step(0.1).default(0.8)
613
634
  }).description("模型设置")
614
635
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.0.0-beta.16",
4
+ "version": "1.0.0-beta.19",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -48,18 +48,18 @@
48
48
  "adapter"
49
49
  ],
50
50
  "dependencies": {
51
- "@langchain/core": "^0.2.12",
51
+ "@langchain/core": "^0.2.17",
52
52
  "@streamparser/json": "^0.0.21",
53
53
  "zod": "^3.24.0-canary.20240701T200529",
54
54
  "zod-to-json-schema": "^3.23.1"
55
55
  },
56
56
  "devDependencies": {
57
57
  "atsc": "^2.1.0",
58
- "koishi": "^4.17.9"
58
+ "koishi": "^4.17.10"
59
59
  },
60
60
  "peerDependencies": {
61
61
  "koishi": "^4.17.9",
62
- "koishi-plugin-chatluna": "^1.0.0-beta.56"
62
+ "koishi-plugin-chatluna": "^1.0.0-beta.62"
63
63
  },
64
64
  "koishi": {
65
65
  "description": {