@chatluna/v1-shared-adapter 1.0.12 → 1.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +13 -10
- package/lib/index.mjs +13 -10
- package/package.json +2 -2
package/lib/index.cjs
CHANGED
|
@@ -512,6 +512,19 @@ async function* processStreamResponse(requestContext, iterator) {
|
|
|
512
512
|
);
|
|
513
513
|
}
|
|
514
514
|
const choice = data.choices?.[0];
|
|
515
|
+
if (data.usage) {
|
|
516
|
+
yield new import_outputs.ChatGenerationChunk({
|
|
517
|
+
message: new import_messages2.AIMessageChunk(""),
|
|
518
|
+
text: "",
|
|
519
|
+
generationInfo: {
|
|
520
|
+
tokenUsage: {
|
|
521
|
+
promptTokens: data.usage.prompt_tokens,
|
|
522
|
+
completionTokens: data.usage.completion_tokens,
|
|
523
|
+
totalTokens: data.usage.total_tokens
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
});
|
|
527
|
+
}
|
|
515
528
|
if (!choice) continue;
|
|
516
529
|
const { delta } = choice;
|
|
517
530
|
const messageChunk = convertDeltaToMessageChunk(delta, defaultRole);
|
|
@@ -524,16 +537,6 @@ async function* processStreamResponse(requestContext, iterator) {
|
|
|
524
537
|
message: messageChunk,
|
|
525
538
|
text: messageChunk.content
|
|
526
539
|
});
|
|
527
|
-
if (data.usage) {
|
|
528
|
-
yield new import_outputs.ChatGenerationChunk({
|
|
529
|
-
message: new import_messages2.AIMessageChunk(""),
|
|
530
|
-
text: "",
|
|
531
|
-
generationInfo: {
|
|
532
|
-
tokenUsage: data.usage
|
|
533
|
-
}
|
|
534
|
-
});
|
|
535
|
-
continue;
|
|
536
|
-
}
|
|
537
540
|
} catch (e) {
|
|
538
541
|
if (errorCount > 5) {
|
|
539
542
|
requestContext.modelRequester.logger.error(
|
package/lib/index.mjs
CHANGED
|
@@ -482,6 +482,19 @@ async function* processStreamResponse(requestContext, iterator) {
|
|
|
482
482
|
);
|
|
483
483
|
}
|
|
484
484
|
const choice = data.choices?.[0];
|
|
485
|
+
if (data.usage) {
|
|
486
|
+
yield new ChatGenerationChunk({
|
|
487
|
+
message: new AIMessageChunk2(""),
|
|
488
|
+
text: "",
|
|
489
|
+
generationInfo: {
|
|
490
|
+
tokenUsage: {
|
|
491
|
+
promptTokens: data.usage.prompt_tokens,
|
|
492
|
+
completionTokens: data.usage.completion_tokens,
|
|
493
|
+
totalTokens: data.usage.total_tokens
|
|
494
|
+
}
|
|
495
|
+
}
|
|
496
|
+
});
|
|
497
|
+
}
|
|
485
498
|
if (!choice) continue;
|
|
486
499
|
const { delta } = choice;
|
|
487
500
|
const messageChunk = convertDeltaToMessageChunk(delta, defaultRole);
|
|
@@ -494,16 +507,6 @@ async function* processStreamResponse(requestContext, iterator) {
|
|
|
494
507
|
message: messageChunk,
|
|
495
508
|
text: messageChunk.content
|
|
496
509
|
});
|
|
497
|
-
if (data.usage) {
|
|
498
|
-
yield new ChatGenerationChunk({
|
|
499
|
-
message: new AIMessageChunk2(""),
|
|
500
|
-
text: "",
|
|
501
|
-
generationInfo: {
|
|
502
|
-
tokenUsage: data.usage
|
|
503
|
-
}
|
|
504
|
-
});
|
|
505
|
-
continue;
|
|
506
|
-
}
|
|
507
510
|
} catch (e) {
|
|
508
511
|
if (errorCount > 5) {
|
|
509
512
|
requestContext.modelRequester.logger.error(
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@chatluna/v1-shared-adapter",
|
|
3
3
|
"description": "chatluna shared adapter",
|
|
4
|
-
"version": "1.0.
|
|
4
|
+
"version": "1.0.14",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -70,6 +70,6 @@
|
|
|
70
70
|
},
|
|
71
71
|
"peerDependencies": {
|
|
72
72
|
"koishi": "^4.18.9",
|
|
73
|
-
"koishi-plugin-chatluna": "^1.3.0-alpha.
|
|
73
|
+
"koishi-plugin-chatluna": "^1.3.0-alpha.60"
|
|
74
74
|
}
|
|
75
75
|
}
|