@kenkaiiii/gg-ai 4.2.90 → 4.2.92
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +283 -211
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +13 -9
- package/dist/index.d.ts +13 -9
- package/dist/index.js +283 -211
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -104,44 +104,62 @@ var EventStream = class {
|
|
|
104
104
|
}
|
|
105
105
|
};
|
|
106
106
|
var StreamResult = class {
|
|
107
|
-
events;
|
|
108
107
|
response;
|
|
108
|
+
buffer = [];
|
|
109
|
+
done = false;
|
|
110
|
+
error = null;
|
|
109
111
|
resolveResponse;
|
|
110
112
|
rejectResponse;
|
|
111
|
-
|
|
112
|
-
constructor() {
|
|
113
|
-
this.events = new EventStream();
|
|
113
|
+
resolveWait = null;
|
|
114
|
+
constructor(generator) {
|
|
114
115
|
this.response = new Promise((resolve, reject) => {
|
|
115
116
|
this.resolveResponse = resolve;
|
|
116
117
|
this.rejectResponse = reject;
|
|
117
118
|
});
|
|
119
|
+
this.pump(generator);
|
|
118
120
|
}
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
121
|
+
async pump(generator) {
|
|
122
|
+
try {
|
|
123
|
+
let next = await generator.next();
|
|
124
|
+
while (!next.done) {
|
|
125
|
+
this.buffer.push(next.value);
|
|
126
|
+
this.resolveWait?.();
|
|
127
|
+
this.resolveWait = null;
|
|
128
|
+
next = await generator.next();
|
|
129
|
+
}
|
|
130
|
+
this.done = true;
|
|
131
|
+
this.resolveResponse(next.value);
|
|
132
|
+
this.resolveWait?.();
|
|
133
|
+
this.resolveWait = null;
|
|
134
|
+
} catch (err) {
|
|
135
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
136
|
+
this.error = error;
|
|
137
|
+
this.done = true;
|
|
138
|
+
this.rejectResponse(error);
|
|
139
|
+
this.resolveWait?.();
|
|
140
|
+
this.resolveWait = null;
|
|
141
|
+
}
|
|
129
142
|
}
|
|
130
|
-
[Symbol.asyncIterator]() {
|
|
131
|
-
|
|
132
|
-
|
|
143
|
+
async *[Symbol.asyncIterator]() {
|
|
144
|
+
let index = 0;
|
|
145
|
+
while (true) {
|
|
146
|
+
while (index < this.buffer.length) {
|
|
147
|
+
yield this.buffer[index++];
|
|
148
|
+
}
|
|
149
|
+
if (this.error) throw this.error;
|
|
150
|
+
if (this.done) return;
|
|
151
|
+
await new Promise((r) => {
|
|
152
|
+
this.resolveWait = r;
|
|
153
|
+
if (this.buffer.length > index || this.done || this.error) {
|
|
154
|
+
this.resolveWait = null;
|
|
155
|
+
r();
|
|
156
|
+
}
|
|
157
|
+
});
|
|
158
|
+
}
|
|
133
159
|
}
|
|
134
160
|
then(onfulfilled, onrejected) {
|
|
135
|
-
this.drainEvents().catch(() => {
|
|
136
|
-
});
|
|
137
161
|
return this.response.then(onfulfilled, onrejected);
|
|
138
162
|
}
|
|
139
|
-
async drainEvents() {
|
|
140
|
-
if (this.hasConsumer) return;
|
|
141
|
-
this.hasConsumer = true;
|
|
142
|
-
for await (const _ of this.events) {
|
|
143
|
-
}
|
|
144
|
-
}
|
|
145
163
|
};
|
|
146
164
|
|
|
147
165
|
// src/providers/anthropic.ts
|
|
@@ -383,8 +401,10 @@ function toOpenAIMessages(messages, options) {
|
|
|
383
401
|
content: parts || textParts || null,
|
|
384
402
|
...toolCalls?.length ? { tool_calls: toolCalls } : {}
|
|
385
403
|
};
|
|
386
|
-
if (thinkingParts
|
|
387
|
-
assistantMsg.reasoning_content = thinkingParts
|
|
404
|
+
if (thinkingParts) {
|
|
405
|
+
assistantMsg.reasoning_content = thinkingParts;
|
|
406
|
+
} else if (options?.provider === "moonshot" && toolCalls?.length) {
|
|
407
|
+
assistantMsg.reasoning_content = " ";
|
|
388
408
|
}
|
|
389
409
|
out.push(assistantMsg);
|
|
390
410
|
continue;
|
|
@@ -451,11 +471,9 @@ function normalizeOpenAIStopReason(reason) {
|
|
|
451
471
|
|
|
452
472
|
// src/providers/anthropic.ts
|
|
453
473
|
function streamAnthropic(options) {
|
|
454
|
-
|
|
455
|
-
runStream(options, result).catch((err) => result.abort(toError(err)));
|
|
456
|
-
return result;
|
|
474
|
+
return new StreamResult(runStream(options));
|
|
457
475
|
}
|
|
458
|
-
async function runStream(options
|
|
476
|
+
async function* runStream(options) {
|
|
459
477
|
const isOAuth = options.apiKey?.startsWith("sk-ant-oat");
|
|
460
478
|
const client = new import_sdk.default({
|
|
461
479
|
...isOAuth ? { apiKey: null, authToken: options.apiKey } : { apiKey: options.apiKey },
|
|
@@ -525,116 +543,185 @@ async function runStream(options, result) {
|
|
|
525
543
|
...betaHeaders.length ? { headers: { "anthropic-beta": betaHeaders.join(",") } } : {}
|
|
526
544
|
});
|
|
527
545
|
const contentParts = [];
|
|
528
|
-
|
|
529
|
-
let
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
result.push({ type: "thinking_delta", text: thinkingDelta });
|
|
535
|
-
});
|
|
536
|
-
stream2.on("streamEvent", (event) => {
|
|
537
|
-
if (event.type === "content_block_start") {
|
|
538
|
-
if (event.content_block.type === "tool_use") {
|
|
539
|
-
currentToolId = event.content_block.id;
|
|
540
|
-
currentToolName = event.content_block.name;
|
|
541
|
-
}
|
|
542
|
-
if (event.content_block.type === "server_tool_use") {
|
|
543
|
-
currentToolId = event.content_block.id;
|
|
544
|
-
currentToolName = event.content_block.name;
|
|
545
|
-
}
|
|
546
|
-
}
|
|
547
|
-
});
|
|
548
|
-
stream2.on("inputJson", (delta) => {
|
|
549
|
-
result.push({
|
|
550
|
-
type: "toolcall_delta",
|
|
551
|
-
id: currentToolId,
|
|
552
|
-
name: currentToolName,
|
|
553
|
-
argsJson: delta
|
|
554
|
-
});
|
|
555
|
-
});
|
|
556
|
-
stream2.on("contentBlock", (block) => {
|
|
557
|
-
if (block.type === "text") {
|
|
558
|
-
contentParts.push({ type: "text", text: block.text });
|
|
559
|
-
} else if (block.type === "thinking") {
|
|
560
|
-
contentParts.push({ type: "thinking", text: block.thinking, signature: block.signature });
|
|
561
|
-
} else if (block.type === "tool_use") {
|
|
562
|
-
const tc = {
|
|
563
|
-
type: "tool_call",
|
|
564
|
-
id: block.id,
|
|
565
|
-
name: block.name,
|
|
566
|
-
args: block.input
|
|
567
|
-
};
|
|
568
|
-
contentParts.push(tc);
|
|
569
|
-
result.push({
|
|
570
|
-
type: "toolcall_done",
|
|
571
|
-
id: tc.id,
|
|
572
|
-
name: tc.name,
|
|
573
|
-
args: tc.args
|
|
574
|
-
});
|
|
575
|
-
} else if (block.type === "server_tool_use") {
|
|
576
|
-
const stc = {
|
|
577
|
-
type: "server_tool_call",
|
|
578
|
-
id: block.id,
|
|
579
|
-
name: block.name,
|
|
580
|
-
input: block.input
|
|
581
|
-
};
|
|
582
|
-
contentParts.push(stc);
|
|
583
|
-
result.push({
|
|
584
|
-
type: "server_toolcall",
|
|
585
|
-
id: stc.id,
|
|
586
|
-
name: stc.name,
|
|
587
|
-
input: stc.input
|
|
588
|
-
});
|
|
589
|
-
} else {
|
|
590
|
-
const raw = block;
|
|
591
|
-
const blockType = raw.type;
|
|
592
|
-
if (blockType === "web_search_tool_result") {
|
|
593
|
-
const str = {
|
|
594
|
-
type: "server_tool_result",
|
|
595
|
-
toolUseId: raw.tool_use_id,
|
|
596
|
-
resultType: blockType,
|
|
597
|
-
data: raw
|
|
598
|
-
};
|
|
599
|
-
contentParts.push(str);
|
|
600
|
-
result.push({
|
|
601
|
-
type: "server_toolresult",
|
|
602
|
-
toolUseId: str.toolUseId,
|
|
603
|
-
resultType: str.resultType,
|
|
604
|
-
data: str.data
|
|
605
|
-
});
|
|
606
|
-
} else {
|
|
607
|
-
contentParts.push({ type: "raw", data: raw });
|
|
608
|
-
}
|
|
609
|
-
}
|
|
610
|
-
});
|
|
546
|
+
const blocks = /* @__PURE__ */ new Map();
|
|
547
|
+
let inputTokens = 0;
|
|
548
|
+
let outputTokens = 0;
|
|
549
|
+
let cacheRead;
|
|
550
|
+
let cacheWrite;
|
|
551
|
+
let stopReason = null;
|
|
611
552
|
try {
|
|
612
|
-
const
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
}
|
|
626
|
-
|
|
627
|
-
|
|
553
|
+
for await (const event of stream2) {
|
|
554
|
+
switch (event.type) {
|
|
555
|
+
case "message_start": {
|
|
556
|
+
const usage = event.message.usage;
|
|
557
|
+
inputTokens = usage.input_tokens;
|
|
558
|
+
const usageAny = usage;
|
|
559
|
+
if (usageAny.cache_read_input_tokens != null) {
|
|
560
|
+
cacheRead = usageAny.cache_read_input_tokens;
|
|
561
|
+
}
|
|
562
|
+
if (usageAny.cache_creation_input_tokens != null) {
|
|
563
|
+
cacheWrite = usageAny.cache_creation_input_tokens;
|
|
564
|
+
}
|
|
565
|
+
break;
|
|
566
|
+
}
|
|
567
|
+
case "content_block_start": {
|
|
568
|
+
const block = event.content_block;
|
|
569
|
+
const idx = event.index;
|
|
570
|
+
const accum = {
|
|
571
|
+
type: block.type,
|
|
572
|
+
text: "",
|
|
573
|
+
thinking: "",
|
|
574
|
+
signature: "",
|
|
575
|
+
toolId: "",
|
|
576
|
+
toolName: "",
|
|
577
|
+
argsJson: "",
|
|
578
|
+
input: void 0,
|
|
579
|
+
raw: null
|
|
580
|
+
};
|
|
581
|
+
if (block.type === "tool_use") {
|
|
582
|
+
accum.toolId = block.id;
|
|
583
|
+
accum.toolName = block.name;
|
|
584
|
+
} else if (block.type === "server_tool_use") {
|
|
585
|
+
accum.toolId = block.id;
|
|
586
|
+
accum.toolName = block.name;
|
|
587
|
+
} else if (block.type === "redacted_thinking") {
|
|
588
|
+
accum.raw = block;
|
|
589
|
+
}
|
|
590
|
+
blocks.set(idx, accum);
|
|
591
|
+
break;
|
|
592
|
+
}
|
|
593
|
+
case "content_block_delta": {
|
|
594
|
+
const accum = blocks.get(event.index);
|
|
595
|
+
if (!accum) break;
|
|
596
|
+
const delta = event.delta;
|
|
597
|
+
const deltaType = delta.type;
|
|
598
|
+
if (deltaType === "text_delta") {
|
|
599
|
+
const text = delta.text;
|
|
600
|
+
accum.text += text;
|
|
601
|
+
yield { type: "text_delta", text };
|
|
602
|
+
} else if (deltaType === "thinking_delta") {
|
|
603
|
+
const text = delta.thinking;
|
|
604
|
+
accum.thinking += text;
|
|
605
|
+
yield { type: "thinking_delta", text };
|
|
606
|
+
} else if (deltaType === "input_json_delta") {
|
|
607
|
+
const partialJson = delta.partial_json;
|
|
608
|
+
accum.argsJson += partialJson;
|
|
609
|
+
yield {
|
|
610
|
+
type: "toolcall_delta",
|
|
611
|
+
id: accum.toolId,
|
|
612
|
+
name: accum.toolName,
|
|
613
|
+
argsJson: partialJson
|
|
614
|
+
};
|
|
615
|
+
} else if (deltaType === "signature_delta") {
|
|
616
|
+
accum.signature = delta.signature;
|
|
617
|
+
}
|
|
618
|
+
break;
|
|
619
|
+
}
|
|
620
|
+
case "content_block_stop": {
|
|
621
|
+
const accum = blocks.get(event.index);
|
|
622
|
+
if (!accum) break;
|
|
623
|
+
if (accum.type === "text") {
|
|
624
|
+
contentParts.push({ type: "text", text: accum.text });
|
|
625
|
+
} else if (accum.type === "thinking") {
|
|
626
|
+
contentParts.push({
|
|
627
|
+
type: "thinking",
|
|
628
|
+
text: accum.thinking,
|
|
629
|
+
signature: accum.signature
|
|
630
|
+
});
|
|
631
|
+
} else if (accum.type === "tool_use") {
|
|
632
|
+
let args = {};
|
|
633
|
+
try {
|
|
634
|
+
args = JSON.parse(accum.argsJson);
|
|
635
|
+
} catch {
|
|
636
|
+
}
|
|
637
|
+
const tc = {
|
|
638
|
+
type: "tool_call",
|
|
639
|
+
id: accum.toolId,
|
|
640
|
+
name: accum.toolName,
|
|
641
|
+
args
|
|
642
|
+
};
|
|
643
|
+
contentParts.push(tc);
|
|
644
|
+
yield {
|
|
645
|
+
type: "toolcall_done",
|
|
646
|
+
id: tc.id,
|
|
647
|
+
name: tc.name,
|
|
648
|
+
args: tc.args
|
|
649
|
+
};
|
|
650
|
+
} else if (accum.type === "server_tool_use") {
|
|
651
|
+
const stc = {
|
|
652
|
+
type: "server_tool_call",
|
|
653
|
+
id: accum.toolId,
|
|
654
|
+
name: accum.toolName,
|
|
655
|
+
input: accum.input
|
|
656
|
+
};
|
|
657
|
+
contentParts.push(stc);
|
|
658
|
+
yield {
|
|
659
|
+
type: "server_toolcall",
|
|
660
|
+
id: stc.id,
|
|
661
|
+
name: stc.name,
|
|
662
|
+
input: stc.input
|
|
663
|
+
};
|
|
664
|
+
} else if (accum.type === "redacted_thinking" && accum.raw) {
|
|
665
|
+
contentParts.push({ type: "raw", data: accum.raw });
|
|
666
|
+
} else {
|
|
667
|
+
const msg = stream2.currentMessage;
|
|
668
|
+
const rawBlock = msg?.content[event.index];
|
|
669
|
+
if (rawBlock) {
|
|
670
|
+
const blockType = rawBlock.type;
|
|
671
|
+
if (blockType === "web_search_tool_result") {
|
|
672
|
+
const str = {
|
|
673
|
+
type: "server_tool_result",
|
|
674
|
+
toolUseId: rawBlock.tool_use_id,
|
|
675
|
+
resultType: blockType,
|
|
676
|
+
data: rawBlock
|
|
677
|
+
};
|
|
678
|
+
contentParts.push(str);
|
|
679
|
+
yield {
|
|
680
|
+
type: "server_toolresult",
|
|
681
|
+
toolUseId: str.toolUseId,
|
|
682
|
+
resultType: str.resultType,
|
|
683
|
+
data: str.data
|
|
684
|
+
};
|
|
685
|
+
} else {
|
|
686
|
+
contentParts.push({ type: "raw", data: rawBlock });
|
|
687
|
+
}
|
|
688
|
+
}
|
|
689
|
+
}
|
|
690
|
+
blocks.delete(event.index);
|
|
691
|
+
break;
|
|
692
|
+
}
|
|
693
|
+
case "message_delta": {
|
|
694
|
+
const delta = event.delta;
|
|
695
|
+
if (delta.stop_reason) {
|
|
696
|
+
stopReason = delta.stop_reason;
|
|
697
|
+
}
|
|
698
|
+
const usage = event.usage;
|
|
699
|
+
if (usage?.output_tokens != null) {
|
|
700
|
+
outputTokens = usage.output_tokens;
|
|
701
|
+
}
|
|
702
|
+
break;
|
|
628
703
|
}
|
|
629
704
|
}
|
|
630
|
-
}
|
|
631
|
-
result.push({ type: "done", stopReason });
|
|
632
|
-
result.complete(response);
|
|
705
|
+
}
|
|
633
706
|
} catch (err) {
|
|
634
|
-
|
|
635
|
-
result.push({ type: "error", error });
|
|
636
|
-
result.abort(error);
|
|
707
|
+
throw toError(err);
|
|
637
708
|
}
|
|
709
|
+
const normalizedStop = normalizeAnthropicStopReason(stopReason);
|
|
710
|
+
const response = {
|
|
711
|
+
message: {
|
|
712
|
+
role: "assistant",
|
|
713
|
+
content: contentParts.length > 0 ? contentParts : ""
|
|
714
|
+
},
|
|
715
|
+
stopReason: normalizedStop,
|
|
716
|
+
usage: {
|
|
717
|
+
inputTokens,
|
|
718
|
+
outputTokens,
|
|
719
|
+
...cacheRead != null && { cacheRead },
|
|
720
|
+
...cacheWrite != null && { cacheWrite }
|
|
721
|
+
}
|
|
722
|
+
};
|
|
723
|
+
yield { type: "done", stopReason: normalizedStop };
|
|
724
|
+
return response;
|
|
638
725
|
}
|
|
639
726
|
function toError(err) {
|
|
640
727
|
if (err instanceof import_sdk.default.APIError) {
|
|
@@ -652,12 +739,10 @@ function toError(err) {
|
|
|
652
739
|
// src/providers/openai.ts
|
|
653
740
|
var import_openai = __toESM(require("openai"), 1);
|
|
654
741
|
function streamOpenAI(options) {
|
|
655
|
-
|
|
656
|
-
const providerName = options.provider ?? "openai";
|
|
657
|
-
runStream2(options, result).catch((err) => result.abort(toError2(err, providerName)));
|
|
658
|
-
return result;
|
|
742
|
+
return new StreamResult(runStream2(options));
|
|
659
743
|
}
|
|
660
|
-
async function runStream2(options
|
|
744
|
+
async function* runStream2(options) {
|
|
745
|
+
const providerName = options.provider ?? "openai";
|
|
661
746
|
const client = new import_openai.default({
|
|
662
747
|
apiKey: options.apiKey,
|
|
663
748
|
...options.baseUrl ? { baseURL: options.baseUrl } : {},
|
|
@@ -691,9 +776,14 @@ async function runStream2(options, result) {
|
|
|
691
776
|
if (usesThinkingParam) {
|
|
692
777
|
params.thinking = options.thinking ? { type: "enabled" } : { type: "disabled" };
|
|
693
778
|
}
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
779
|
+
let stream2;
|
|
780
|
+
try {
|
|
781
|
+
stream2 = await client.chat.completions.create(params, {
|
|
782
|
+
signal: options.signal ?? void 0
|
|
783
|
+
});
|
|
784
|
+
} catch (err) {
|
|
785
|
+
throw toError2(err, providerName);
|
|
786
|
+
}
|
|
697
787
|
const contentParts = [];
|
|
698
788
|
const toolCallAccum = /* @__PURE__ */ new Map();
|
|
699
789
|
let textAccum = "";
|
|
@@ -720,11 +810,11 @@ async function runStream2(options, result) {
|
|
|
720
810
|
const reasoningContent = delta.reasoning_content;
|
|
721
811
|
if (typeof reasoningContent === "string" && reasoningContent) {
|
|
722
812
|
thinkingAccum += reasoningContent;
|
|
723
|
-
|
|
813
|
+
yield { type: "thinking_delta", text: reasoningContent };
|
|
724
814
|
}
|
|
725
815
|
if (delta.content) {
|
|
726
816
|
textAccum += delta.content;
|
|
727
|
-
|
|
817
|
+
yield { type: "text_delta", text: delta.content };
|
|
728
818
|
}
|
|
729
819
|
if (delta.tool_calls) {
|
|
730
820
|
for (const tc of delta.tool_calls) {
|
|
@@ -741,12 +831,12 @@ async function runStream2(options, result) {
|
|
|
741
831
|
if (tc.function?.name) accum.name = tc.function.name;
|
|
742
832
|
if (tc.function?.arguments) {
|
|
743
833
|
accum.argsJson += tc.function.arguments;
|
|
744
|
-
|
|
834
|
+
yield {
|
|
745
835
|
type: "toolcall_delta",
|
|
746
836
|
id: accum.id,
|
|
747
837
|
name: accum.name,
|
|
748
838
|
argsJson: tc.function.arguments
|
|
749
|
-
}
|
|
839
|
+
};
|
|
750
840
|
}
|
|
751
841
|
}
|
|
752
842
|
}
|
|
@@ -770,12 +860,12 @@ async function runStream2(options, result) {
|
|
|
770
860
|
args
|
|
771
861
|
};
|
|
772
862
|
contentParts.push(toolCall);
|
|
773
|
-
|
|
863
|
+
yield {
|
|
774
864
|
type: "toolcall_done",
|
|
775
865
|
id: tc.id,
|
|
776
866
|
name: tc.name,
|
|
777
867
|
args
|
|
778
|
-
}
|
|
868
|
+
};
|
|
779
869
|
}
|
|
780
870
|
const stopReason = normalizeOpenAIStopReason(finishReason);
|
|
781
871
|
const response = {
|
|
@@ -786,8 +876,8 @@ async function runStream2(options, result) {
|
|
|
786
876
|
stopReason,
|
|
787
877
|
usage: { inputTokens, outputTokens, ...cacheRead > 0 && { cacheRead } }
|
|
788
878
|
};
|
|
789
|
-
|
|
790
|
-
|
|
879
|
+
yield { type: "done", stopReason };
|
|
880
|
+
return response;
|
|
791
881
|
}
|
|
792
882
|
function toError2(err, provider = "openai") {
|
|
793
883
|
if (err instanceof import_openai.default.APIError) {
|
|
@@ -811,11 +901,9 @@ function toError2(err, provider = "openai") {
|
|
|
811
901
|
var import_node_os = __toESM(require("os"), 1);
|
|
812
902
|
var DEFAULT_BASE_URL = "https://chatgpt.com/backend-api";
|
|
813
903
|
function streamOpenAICodex(options) {
|
|
814
|
-
|
|
815
|
-
runStream3(options, result).catch((err) => result.abort(toError3(err)));
|
|
816
|
-
return result;
|
|
904
|
+
return new StreamResult(runStream3(options));
|
|
817
905
|
}
|
|
818
|
-
async function runStream3(options
|
|
906
|
+
async function* runStream3(options) {
|
|
819
907
|
const baseUrl = (options.baseUrl || DEFAULT_BASE_URL).replace(/\/+$/, "");
|
|
820
908
|
const url = `${baseUrl}/codex/responses`;
|
|
821
909
|
const { system, input } = toCodexInput(options.messages);
|
|
@@ -892,11 +980,11 @@ Hint: Codex models require a ChatGPT Plus ($20/mo) or Pro ($200/mo) subscription
|
|
|
892
980
|
if (type === "response.output_text.delta") {
|
|
893
981
|
const delta = event.delta;
|
|
894
982
|
textAccum += delta;
|
|
895
|
-
|
|
983
|
+
yield { type: "text_delta", text: delta };
|
|
896
984
|
}
|
|
897
985
|
if (type === "response.reasoning_summary_text.delta") {
|
|
898
986
|
const delta = event.delta;
|
|
899
|
-
|
|
987
|
+
yield { type: "thinking_delta", text: delta };
|
|
900
988
|
}
|
|
901
989
|
if (type === "response.output_item.added") {
|
|
902
990
|
const item = event.item;
|
|
@@ -914,12 +1002,12 @@ Hint: Codex models require a ChatGPT Plus ($20/mo) or Pro ($200/mo) subscription
|
|
|
914
1002
|
for (const [key, tc] of toolCalls) {
|
|
915
1003
|
if (key.endsWith(`|${itemId}`)) {
|
|
916
1004
|
tc.argsJson += delta;
|
|
917
|
-
|
|
1005
|
+
yield {
|
|
918
1006
|
type: "toolcall_delta",
|
|
919
1007
|
id: tc.id,
|
|
920
1008
|
name: tc.name,
|
|
921
1009
|
argsJson: delta
|
|
922
|
-
}
|
|
1010
|
+
};
|
|
923
1011
|
break;
|
|
924
1012
|
}
|
|
925
1013
|
}
|
|
@@ -947,12 +1035,12 @@ Hint: Codex models require a ChatGPT Plus ($20/mo) or Pro ($200/mo) subscription
|
|
|
947
1035
|
args = JSON.parse(tc.argsJson);
|
|
948
1036
|
} catch {
|
|
949
1037
|
}
|
|
950
|
-
|
|
1038
|
+
yield {
|
|
951
1039
|
type: "toolcall_done",
|
|
952
1040
|
id: tc.id,
|
|
953
1041
|
name: tc.name,
|
|
954
1042
|
args
|
|
955
|
-
}
|
|
1043
|
+
};
|
|
956
1044
|
}
|
|
957
1045
|
}
|
|
958
1046
|
}
|
|
@@ -992,8 +1080,8 @@ Hint: Codex models require a ChatGPT Plus ($20/mo) or Pro ($200/mo) subscription
|
|
|
992
1080
|
stopReason,
|
|
993
1081
|
usage: { inputTokens, outputTokens }
|
|
994
1082
|
};
|
|
995
|
-
|
|
996
|
-
|
|
1083
|
+
yield { type: "done", stopReason };
|
|
1084
|
+
return streamResponse;
|
|
997
1085
|
}
|
|
998
1086
|
async function* parseSSE(body) {
|
|
999
1087
|
const reader = body.getReader();
|
|
@@ -1107,13 +1195,6 @@ function toCodexTools(tools) {
|
|
|
1107
1195
|
strict: null
|
|
1108
1196
|
}));
|
|
1109
1197
|
}
|
|
1110
|
-
function toError3(err) {
|
|
1111
|
-
if (err instanceof ProviderError) return err;
|
|
1112
|
-
if (err instanceof Error) {
|
|
1113
|
-
return new ProviderError("openai", err.message, { cause: err });
|
|
1114
|
-
}
|
|
1115
|
-
return new ProviderError("openai", String(err));
|
|
1116
|
-
}
|
|
1117
1198
|
|
|
1118
1199
|
// src/provider-registry.ts
|
|
1119
1200
|
var ProviderRegistryImpl = class {
|
|
@@ -1187,32 +1268,28 @@ function stream(options) {
|
|
|
1187
1268
|
return entry.stream(options);
|
|
1188
1269
|
}
|
|
1189
1270
|
function streamGLMWithFallback(options) {
|
|
1190
|
-
|
|
1191
|
-
runGLMWithFallback(options, result).catch((err) => {
|
|
1192
|
-
result.abort(err instanceof Error ? err : new Error(String(err)));
|
|
1193
|
-
});
|
|
1194
|
-
return result;
|
|
1271
|
+
return new StreamResult(runGLMWithFallback(options));
|
|
1195
1272
|
}
|
|
1196
|
-
async function runGLMWithFallback(options
|
|
1197
|
-
const
|
|
1198
|
-
|
|
1273
|
+
async function* runGLMWithFallback(options) {
|
|
1274
|
+
const coding = streamOpenAI({ ...options, baseUrl: GLM_CODING_BASE_URL });
|
|
1275
|
+
coding.response.catch(() => {
|
|
1199
1276
|
});
|
|
1200
1277
|
try {
|
|
1201
|
-
for await (const event of
|
|
1202
|
-
|
|
1278
|
+
for await (const event of coding) {
|
|
1279
|
+
yield event;
|
|
1203
1280
|
}
|
|
1204
|
-
|
|
1281
|
+
return await coding.response;
|
|
1205
1282
|
} catch {
|
|
1206
|
-
const
|
|
1207
|
-
|
|
1283
|
+
const regular = streamOpenAI({ ...options, baseUrl: GLM_REGULAR_BASE_URL });
|
|
1284
|
+
regular.response.catch(() => {
|
|
1208
1285
|
});
|
|
1209
1286
|
try {
|
|
1210
|
-
for await (const event of
|
|
1211
|
-
|
|
1287
|
+
for await (const event of regular) {
|
|
1288
|
+
yield event;
|
|
1212
1289
|
}
|
|
1213
|
-
|
|
1290
|
+
return await regular.response;
|
|
1214
1291
|
} catch (fallbackErr) {
|
|
1215
|
-
|
|
1292
|
+
throw fallbackErr instanceof Error ? fallbackErr : new Error(String(fallbackErr));
|
|
1216
1293
|
}
|
|
1217
1294
|
}
|
|
1218
1295
|
}
|
|
@@ -1243,31 +1320,29 @@ function chunkText(text, size) {
|
|
|
1243
1320
|
}
|
|
1244
1321
|
return chunks.length > 0 ? chunks : [""];
|
|
1245
1322
|
}
|
|
1246
|
-
function simulateStream(message, stopReason,
|
|
1323
|
+
async function* simulateStream(message, stopReason, signal, cacheUsage) {
|
|
1247
1324
|
if (signal?.aborted) {
|
|
1248
|
-
|
|
1249
|
-
return;
|
|
1325
|
+
throw new Error("aborted");
|
|
1250
1326
|
}
|
|
1251
1327
|
const content = typeof message.content === "string" ? message.content ? [{ type: "text", text: message.content }] : [] : message.content;
|
|
1252
1328
|
let outputChars = 0;
|
|
1253
1329
|
for (const part of content) {
|
|
1254
1330
|
if (signal?.aborted) {
|
|
1255
|
-
|
|
1256
|
-
return;
|
|
1331
|
+
throw new Error("aborted");
|
|
1257
1332
|
}
|
|
1258
1333
|
if (part.type === "text") {
|
|
1259
1334
|
const chunks = chunkText(part.text, DEFAULT_CHUNK_SIZE);
|
|
1260
1335
|
for (const chunk of chunks) {
|
|
1261
|
-
|
|
1336
|
+
yield { type: "text_delta", text: chunk };
|
|
1262
1337
|
outputChars += chunk.length;
|
|
1263
1338
|
}
|
|
1264
1339
|
} else if (part.type === "thinking") {
|
|
1265
|
-
|
|
1340
|
+
yield { type: "thinking_delta", text: part.text };
|
|
1266
1341
|
outputChars += part.text.length;
|
|
1267
1342
|
} else if (part.type === "tool_call") {
|
|
1268
1343
|
const argsJson = JSON.stringify(part.args);
|
|
1269
|
-
|
|
1270
|
-
|
|
1344
|
+
yield { type: "toolcall_delta", id: part.id, name: part.name, argsJson };
|
|
1345
|
+
yield { type: "toolcall_done", id: part.id, name: part.name, args: part.args };
|
|
1271
1346
|
outputChars += argsJson.length;
|
|
1272
1347
|
}
|
|
1273
1348
|
}
|
|
@@ -1278,8 +1353,8 @@ function simulateStream(message, stopReason, result, signal, cacheUsage) {
|
|
|
1278
1353
|
...cacheUsage?.cacheRead ? { cacheRead: cacheUsage.cacheRead } : {},
|
|
1279
1354
|
...cacheUsage?.cacheWrite ? { cacheWrite: cacheUsage.cacheWrite } : {}
|
|
1280
1355
|
};
|
|
1281
|
-
|
|
1282
|
-
|
|
1356
|
+
yield { type: "done", stopReason };
|
|
1357
|
+
return { message, stopReason, usage };
|
|
1283
1358
|
}
|
|
1284
1359
|
function computeCacheUsage(current, previous) {
|
|
1285
1360
|
if (!previous) {
|
|
@@ -1351,24 +1426,21 @@ function registerPalsuProvider(config) {
|
|
|
1351
1426
|
state.callCount++;
|
|
1352
1427
|
const ms = modelStates.get(options.model);
|
|
1353
1428
|
const responseDef = (ms && ms.responses.length > 0 ? ms.responses.shift() : void 0) ?? (responses.length > 0 ? responses.shift() : void 0) ?? ms?.defaultResponse ?? defaultResponse;
|
|
1354
|
-
const result = new StreamResult();
|
|
1355
1429
|
let cacheUsage;
|
|
1356
1430
|
if (enableCache) {
|
|
1357
1431
|
const serialized = JSON.stringify(options.messages);
|
|
1358
1432
|
cacheUsage = computeCacheUsage(serialized, lastMessagesSerialized);
|
|
1359
1433
|
lastMessagesSerialized = serialized;
|
|
1360
1434
|
}
|
|
1361
|
-
const
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
);
|
|
1371
|
-
return result;
|
|
1435
|
+
const gen = (async function* () {
|
|
1436
|
+
const rawMessage = typeof responseDef === "function" ? responseDef(options.messages, options, state) : responseDef;
|
|
1437
|
+
const message = await Promise.resolve(rawMessage);
|
|
1438
|
+
const hasToolCalls = Array.isArray(message.content) && message.content.some((p) => p.type === "tool_call");
|
|
1439
|
+
const explicitStop = message._stopReason;
|
|
1440
|
+
const stopReason = explicitStop ?? (hasToolCalls ? "tool_use" : "end_turn");
|
|
1441
|
+
return yield* simulateStream(message, stopReason, options.signal, cacheUsage);
|
|
1442
|
+
})();
|
|
1443
|
+
return new StreamResult(gen);
|
|
1372
1444
|
}
|
|
1373
1445
|
});
|
|
1374
1446
|
return handle;
|