@jsonstudio/llms 0.4.4 → 0.4.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/conversion/codec-registry.js +11 -1
- package/dist/conversion/codecs/anthropic-openai-codec.d.ts +13 -0
- package/dist/conversion/codecs/anthropic-openai-codec.js +18 -473
- package/dist/conversion/codecs/gemini-openai-codec.js +91 -48
- package/dist/conversion/codecs/responses-openai-codec.js +9 -2
- package/dist/conversion/hub/format-adapters/anthropic-format-adapter.js +3 -0
- package/dist/conversion/hub/format-adapters/chat-format-adapter.js +3 -0
- package/dist/conversion/hub/format-adapters/gemini-format-adapter.js +3 -0
- package/dist/conversion/hub/format-adapters/responses-format-adapter.d.ts +19 -0
- package/dist/conversion/hub/format-adapters/responses-format-adapter.js +9 -0
- package/dist/conversion/hub/node-support.js +3 -1
- package/dist/conversion/hub/pipeline/hub-pipeline.js +37 -32
- package/dist/conversion/hub/response/provider-response.js +1 -1
- package/dist/conversion/hub/response/response-mappers.js +1 -1
- package/dist/conversion/hub/response/response-runtime.js +109 -10
- package/dist/conversion/hub/semantic-mappers/anthropic-mapper.js +70 -156
- package/dist/conversion/hub/semantic-mappers/chat-mapper.js +63 -52
- package/dist/conversion/hub/semantic-mappers/gemini-mapper.js +76 -143
- package/dist/conversion/hub/semantic-mappers/responses-mapper.js +40 -160
- package/dist/conversion/hub/standardized-bridge.js +3 -0
- package/dist/conversion/hub/tool-governance/rules.js +2 -2
- package/dist/conversion/index.d.ts +5 -0
- package/dist/conversion/index.js +5 -0
- package/dist/conversion/pipeline/codecs/v2/anthropic-openai-pipeline.d.ts +12 -0
- package/dist/conversion/pipeline/codecs/v2/anthropic-openai-pipeline.js +100 -0
- package/dist/conversion/pipeline/codecs/v2/openai-openai-pipeline.d.ts +15 -0
- package/dist/conversion/pipeline/codecs/v2/openai-openai-pipeline.js +174 -0
- package/dist/conversion/pipeline/codecs/v2/responses-openai-pipeline.d.ts +14 -0
- package/dist/conversion/pipeline/codecs/v2/responses-openai-pipeline.js +166 -0
- package/dist/conversion/pipeline/codecs/v2/shared/openai-chat-helpers.d.ts +13 -0
- package/dist/conversion/pipeline/codecs/v2/shared/openai-chat-helpers.js +66 -0
- package/dist/conversion/pipeline/hooks/adapter-context.d.ts +7 -0
- package/dist/conversion/pipeline/hooks/adapter-context.js +18 -0
- package/dist/conversion/pipeline/hooks/protocol-hooks.d.ts +67 -0
- package/dist/conversion/pipeline/hooks/protocol-hooks.js +1 -0
- package/dist/conversion/pipeline/index.d.ts +35 -0
- package/dist/conversion/pipeline/index.js +103 -0
- package/dist/conversion/pipeline/meta/meta-bag.d.ts +20 -0
- package/dist/conversion/pipeline/meta/meta-bag.js +81 -0
- package/dist/conversion/pipeline/schema/canonical-chat.d.ts +18 -0
- package/dist/conversion/pipeline/schema/canonical-chat.js +1 -0
- package/dist/conversion/pipeline/schema/index.d.ts +1 -0
- package/dist/conversion/pipeline/schema/index.js +1 -0
- package/dist/conversion/responses/responses-openai-bridge.d.ts +48 -0
- package/dist/conversion/responses/responses-openai-bridge.js +157 -1146
- package/dist/conversion/shared/anthropic-message-utils.d.ts +12 -0
- package/dist/conversion/shared/anthropic-message-utils.js +587 -0
- package/dist/conversion/shared/bridge-actions.d.ts +39 -0
- package/dist/conversion/shared/bridge-actions.js +709 -0
- package/dist/conversion/shared/bridge-conversation-store.d.ts +41 -0
- package/dist/conversion/shared/bridge-conversation-store.js +279 -0
- package/dist/conversion/shared/bridge-id-utils.d.ts +7 -0
- package/dist/conversion/shared/bridge-id-utils.js +42 -0
- package/dist/conversion/shared/bridge-instructions.d.ts +1 -0
- package/dist/conversion/shared/bridge-instructions.js +113 -0
- package/dist/conversion/shared/bridge-message-types.d.ts +39 -0
- package/dist/conversion/shared/bridge-message-types.js +1 -0
- package/dist/conversion/shared/bridge-message-utils.d.ts +22 -0
- package/dist/conversion/shared/bridge-message-utils.js +473 -0
- package/dist/conversion/shared/bridge-metadata.d.ts +1 -0
- package/dist/conversion/shared/bridge-metadata.js +1 -0
- package/dist/conversion/shared/bridge-policies.d.ts +18 -0
- package/dist/conversion/shared/bridge-policies.js +276 -0
- package/dist/conversion/shared/bridge-request-adapter.d.ts +28 -0
- package/dist/conversion/shared/bridge-request-adapter.js +430 -0
- package/dist/conversion/shared/chat-output-normalizer.d.ts +4 -0
- package/dist/conversion/shared/chat-output-normalizer.js +56 -0
- package/dist/conversion/shared/chat-request-filters.js +24 -1
- package/dist/conversion/shared/gemini-tool-utils.d.ts +5 -0
- package/dist/conversion/shared/gemini-tool-utils.js +130 -0
- package/dist/conversion/shared/metadata-passthrough.d.ts +11 -0
- package/dist/conversion/shared/metadata-passthrough.js +57 -0
- package/dist/conversion/shared/output-content-normalizer.d.ts +12 -0
- package/dist/conversion/shared/output-content-normalizer.js +119 -0
- package/dist/conversion/shared/reasoning-normalizer.d.ts +21 -0
- package/dist/conversion/shared/reasoning-normalizer.js +368 -0
- package/dist/conversion/shared/reasoning-tool-normalizer.d.ts +12 -0
- package/dist/conversion/shared/reasoning-tool-normalizer.js +132 -0
- package/dist/conversion/shared/reasoning-tool-parser.d.ts +10 -0
- package/dist/conversion/shared/reasoning-tool-parser.js +95 -0
- package/dist/conversion/shared/reasoning-utils.d.ts +2 -0
- package/dist/conversion/shared/reasoning-utils.js +42 -0
- package/dist/conversion/shared/responses-conversation-store.js +5 -11
- package/dist/conversion/shared/responses-message-utils.d.ts +15 -0
- package/dist/conversion/shared/responses-message-utils.js +206 -0
- package/dist/conversion/shared/responses-output-builder.d.ts +15 -0
- package/dist/conversion/shared/responses-output-builder.js +179 -0
- package/dist/conversion/shared/responses-output-utils.d.ts +7 -0
- package/dist/conversion/shared/responses-output-utils.js +108 -0
- package/dist/conversion/shared/responses-request-adapter.d.ts +28 -0
- package/dist/conversion/shared/responses-request-adapter.js +9 -40
- package/dist/conversion/shared/responses-response-utils.d.ts +3 -0
- package/dist/conversion/shared/responses-response-utils.js +209 -0
- package/dist/conversion/shared/responses-tool-utils.d.ts +12 -0
- package/dist/conversion/shared/responses-tool-utils.js +90 -0
- package/dist/conversion/shared/responses-types.d.ts +33 -0
- package/dist/conversion/shared/responses-types.js +1 -0
- package/dist/conversion/shared/tool-call-utils.d.ts +11 -0
- package/dist/conversion/shared/tool-call-utils.js +56 -0
- package/dist/conversion/shared/tool-governor.js +5 -0
- package/dist/conversion/shared/tool-mapping.d.ts +19 -0
- package/dist/conversion/shared/tool-mapping.js +124 -0
- package/dist/conversion/shared/tool-normalizers.d.ts +4 -0
- package/dist/conversion/shared/tool-normalizers.js +84 -0
- package/dist/router/virtual-router/bootstrap.js +18 -3
- package/dist/router/virtual-router/provider-registry.js +4 -2
- package/dist/router/virtual-router/types.d.ts +212 -0
- package/dist/sse/index.d.ts +38 -2
- package/dist/sse/index.js +27 -0
- package/dist/sse/json-to-sse/anthropic-json-to-sse-converter.d.ts +14 -0
- package/dist/sse/json-to-sse/anthropic-json-to-sse-converter.js +106 -73
- package/dist/sse/json-to-sse/chat-json-to-sse-converter.js +6 -2
- package/dist/sse/json-to-sse/gemini-json-to-sse-converter.d.ts +14 -0
- package/dist/sse/json-to-sse/gemini-json-to-sse-converter.js +99 -0
- package/dist/sse/json-to-sse/index.d.ts +7 -0
- package/dist/sse/json-to-sse/index.js +2 -0
- package/dist/sse/json-to-sse/sequencers/anthropic-sequencer.d.ts +13 -0
- package/dist/sse/json-to-sse/sequencers/anthropic-sequencer.js +150 -0
- package/dist/sse/json-to-sse/sequencers/chat-sequencer.d.ts +39 -0
- package/dist/sse/json-to-sse/sequencers/chat-sequencer.js +49 -3
- package/dist/sse/json-to-sse/sequencers/gemini-sequencer.d.ts +10 -0
- package/dist/sse/json-to-sse/sequencers/gemini-sequencer.js +95 -0
- package/dist/sse/json-to-sse/sequencers/responses-sequencer.js +31 -5
- package/dist/sse/registry/sse-codec-registry.d.ts +32 -0
- package/dist/sse/registry/sse-codec-registry.js +30 -1
- package/dist/sse/shared/reasoning-dispatcher.d.ts +10 -0
- package/dist/sse/shared/reasoning-dispatcher.js +25 -0
- package/dist/sse/shared/responses-output-normalizer.d.ts +12 -0
- package/dist/sse/shared/responses-output-normalizer.js +45 -0
- package/dist/sse/shared/serializers/anthropic-event-serializer.d.ts +2 -0
- package/dist/sse/shared/serializers/anthropic-event-serializer.js +9 -0
- package/dist/sse/shared/serializers/gemini-event-serializer.d.ts +2 -0
- package/dist/sse/shared/serializers/gemini-event-serializer.js +5 -0
- package/dist/sse/shared/serializers/index.d.ts +41 -0
- package/dist/sse/shared/serializers/index.js +2 -0
- package/dist/sse/shared/writer.d.ts +127 -0
- package/dist/sse/shared/writer.js +37 -1
- package/dist/sse/sse-to-json/anthropic-sse-to-json-converter.d.ts +11 -0
- package/dist/sse/sse-to-json/anthropic-sse-to-json-converter.js +92 -127
- package/dist/sse/sse-to-json/builders/anthropic-response-builder.d.ts +16 -0
- package/dist/sse/sse-to-json/builders/anthropic-response-builder.js +151 -0
- package/dist/sse/sse-to-json/builders/response-builder.d.ts +165 -0
- package/dist/sse/sse-to-json/builders/response-builder.js +27 -6
- package/dist/sse/sse-to-json/chat-sse-to-json-converter.d.ts +114 -0
- package/dist/sse/sse-to-json/chat-sse-to-json-converter.js +79 -3
- package/dist/sse/sse-to-json/gemini-sse-to-json-converter.d.ts +13 -0
- package/dist/sse/sse-to-json/gemini-sse-to-json-converter.js +160 -0
- package/dist/sse/sse-to-json/index.d.ts +7 -0
- package/dist/sse/sse-to-json/index.js +2 -0
- package/dist/sse/sse-to-json/parsers/sse-parser.js +53 -1
- package/dist/sse/types/anthropic-types.d.ts +170 -0
- package/dist/sse/types/anthropic-types.js +8 -5
- package/dist/sse/types/chat-types.d.ts +10 -0
- package/dist/sse/types/chat-types.js +2 -1
- package/dist/sse/types/core-interfaces.d.ts +1 -1
- package/dist/sse/types/gemini-types.d.ts +116 -0
- package/dist/sse/types/gemini-types.js +5 -0
- package/dist/sse/types/index.d.ts +5 -2
- package/dist/sse/types/index.js +2 -0
- package/package.json +1 -1
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import { PassThrough } from 'node:stream';
|
|
2
|
+
import { DEFAULT_GEMINI_CONVERSION_CONFIG } from '../types/index.js';
|
|
3
|
+
import { ErrorUtils } from '../shared/utils.js';
|
|
4
|
+
import { createGeminiSequencer } from './sequencers/gemini-sequencer.js';
|
|
5
|
+
import { createGeminiStreamWriter } from '../shared/writer.js';
|
|
6
|
+
export class GeminiJsonToSseConverter {
|
|
7
|
+
config = DEFAULT_GEMINI_CONVERSION_CONFIG;
|
|
8
|
+
contexts = new Map();
|
|
9
|
+
constructor(config) {
|
|
10
|
+
if (config) {
|
|
11
|
+
this.config = { ...this.config, ...config };
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
async convertResponseToJsonToSse(response, options) {
|
|
15
|
+
const context = this.createContext(response, options);
|
|
16
|
+
this.contexts.set(options.requestId, context);
|
|
17
|
+
const stream = new PassThrough({ objectMode: true });
|
|
18
|
+
const writer = createGeminiStreamWriter(stream, {
|
|
19
|
+
onEvent: () => this.updateStats(context, 'chunk'),
|
|
20
|
+
onError: (error) => this.handleStreamError(context, error, stream)
|
|
21
|
+
});
|
|
22
|
+
this.processResponse(response, context, writer, stream).catch((error) => {
|
|
23
|
+
this.handleStreamError(context, error, stream);
|
|
24
|
+
});
|
|
25
|
+
return Object.assign(stream, {
|
|
26
|
+
protocol: 'gemini-chat',
|
|
27
|
+
direction: 'json_to_sse',
|
|
28
|
+
requestId: options.requestId,
|
|
29
|
+
getStats: () => context.eventStats,
|
|
30
|
+
complete: () => writer.complete(),
|
|
31
|
+
abort: (error) => writer.abort(error)
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
createContext(response, options) {
|
|
35
|
+
const stats = {
|
|
36
|
+
totalEvents: 0,
|
|
37
|
+
chunkEvents: 0,
|
|
38
|
+
doneEvents: 0,
|
|
39
|
+
errors: 0,
|
|
40
|
+
startTime: Date.now()
|
|
41
|
+
};
|
|
42
|
+
return {
|
|
43
|
+
requestId: options.requestId,
|
|
44
|
+
model: options.model,
|
|
45
|
+
response,
|
|
46
|
+
options,
|
|
47
|
+
startTime: Date.now(),
|
|
48
|
+
eventStats: stats
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
async processResponse(response, context, writer, stream) {
|
|
52
|
+
try {
|
|
53
|
+
this.validateResponse(response);
|
|
54
|
+
const sequencer = createGeminiSequencer({
|
|
55
|
+
chunkDelayMs: context.options.chunkDelayMs ?? this.config.chunkDelayMs,
|
|
56
|
+
reasoningMode: context.options.reasoningMode ?? this.config.reasoningMode,
|
|
57
|
+
reasoningTextPrefix: context.options.reasoningTextPrefix ?? this.config.reasoningTextPrefix
|
|
58
|
+
});
|
|
59
|
+
const events = sequencer.sequenceResponse(response);
|
|
60
|
+
await writer.writeGeminiEvents(events);
|
|
61
|
+
this.updateStats(context, 'done');
|
|
62
|
+
writer.complete();
|
|
63
|
+
}
|
|
64
|
+
catch (error) {
|
|
65
|
+
writer.abort(error);
|
|
66
|
+
throw this.wrapError('GEMINI_JSON_TO_SSE_FAILED', error, context.requestId);
|
|
67
|
+
}
|
|
68
|
+
finally {
|
|
69
|
+
this.contexts.delete(context.requestId);
|
|
70
|
+
stream.end();
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
validateResponse(response) {
|
|
74
|
+
if (!response || typeof response !== 'object') {
|
|
75
|
+
throw new Error('Invalid Gemini response payload');
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
updateStats(context, kind) {
|
|
79
|
+
context.eventStats.totalEvents += 1;
|
|
80
|
+
if (kind === 'chunk') {
|
|
81
|
+
context.eventStats.chunkEvents += 1;
|
|
82
|
+
}
|
|
83
|
+
else {
|
|
84
|
+
context.eventStats.doneEvents += 1;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
handleStreamError(context, error, stream) {
|
|
88
|
+
context.eventStats.errors += 1;
|
|
89
|
+
try {
|
|
90
|
+
stream.destroy(error);
|
|
91
|
+
}
|
|
92
|
+
catch {
|
|
93
|
+
/* ignore */
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
wrapError(code, error, requestId) {
|
|
97
|
+
return ErrorUtils.createError(error.message, code, { requestId });
|
|
98
|
+
}
|
|
99
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* JSON→SSE转换模块导出
|
|
3
|
+
*/
|
|
4
|
+
export { ChatJsonToSseConverter } from './chat-json-to-sse-converter.js';
|
|
5
|
+
export { ResponsesJsonToSseConverter } from './responses-json-to-sse-converter.js';
|
|
6
|
+
export { GeminiJsonToSseConverter } from './gemini-json-to-sse-converter.js';
|
|
7
|
+
export type { ChatJsonToSseOptions, ChatJsonToSseContext, ChatEventStats, ResponsesJsonToSseOptions, ResponsesJsonToSseContext, ResponsesEventStats, GeminiJsonToSseOptions, GeminiJsonToSseContext, GeminiEventStats } from '../types/index.js';
|
|
@@ -5,3 +5,5 @@
|
|
|
5
5
|
export { ChatJsonToSseConverter } from './chat-json-to-sse-converter.js';
|
|
6
6
|
// Responses协议转换器(重构版本)
|
|
7
7
|
export { ResponsesJsonToSseConverter } from './responses-json-to-sse-converter.js';
|
|
8
|
+
// Gemini协议转换器
|
|
9
|
+
export { GeminiJsonToSseConverter } from './gemini-json-to-sse-converter.js';
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { AnthropicMessageResponse, AnthropicSseEvent } from '../../types/index.js';
|
|
2
|
+
import type { ChatReasoningMode } from '../../types/chat-types.js';
|
|
3
|
+
export interface AnthropicSequencerConfig {
|
|
4
|
+
chunkSize: number;
|
|
5
|
+
chunkDelayMs: number;
|
|
6
|
+
enableDelay: boolean;
|
|
7
|
+
reasoningMode?: ChatReasoningMode;
|
|
8
|
+
reasoningTextPrefix?: string;
|
|
9
|
+
}
|
|
10
|
+
export declare const DEFAULT_ANTHROPIC_SEQUENCER_CONFIG: AnthropicSequencerConfig;
|
|
11
|
+
export declare function createAnthropicSequencer(config?: Partial<AnthropicSequencerConfig>): {
|
|
12
|
+
sequenceResponse(response: AnthropicMessageResponse, requestId: string): AsyncGenerator<AnthropicSseEvent>;
|
|
13
|
+
};
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import { dispatchReasoning } from '../../shared/reasoning-dispatcher.js';
|
|
2
|
+
export const DEFAULT_ANTHROPIC_SEQUENCER_CONFIG = {
|
|
3
|
+
chunkSize: 1024,
|
|
4
|
+
chunkDelayMs: 0,
|
|
5
|
+
enableDelay: false,
|
|
6
|
+
reasoningMode: 'channel',
|
|
7
|
+
reasoningTextPrefix: undefined
|
|
8
|
+
};
|
|
9
|
+
function createEvent(type, data) {
|
|
10
|
+
return {
|
|
11
|
+
type,
|
|
12
|
+
event: type,
|
|
13
|
+
timestamp: Date.now(),
|
|
14
|
+
protocol: 'anthropic-messages',
|
|
15
|
+
direction: 'json_to_sse',
|
|
16
|
+
data: { type, ...data }
|
|
17
|
+
};
|
|
18
|
+
}
|
|
19
|
+
async function maybeDelay(config) {
|
|
20
|
+
if (!config.enableDelay || !config.chunkDelayMs)
|
|
21
|
+
return;
|
|
22
|
+
await new Promise(resolve => setTimeout(resolve, config.chunkDelayMs));
|
|
23
|
+
}
|
|
24
|
+
function chunkText(input, size) {
|
|
25
|
+
if (!input || size <= 0)
|
|
26
|
+
return [input];
|
|
27
|
+
const chunks = [];
|
|
28
|
+
for (let i = 0; i < input.length; i += size) {
|
|
29
|
+
chunks.push(input.slice(i, i + size));
|
|
30
|
+
}
|
|
31
|
+
return chunks.length ? chunks : [''];
|
|
32
|
+
}
|
|
33
|
+
function normalizeToolInput(input) {
|
|
34
|
+
if (typeof input === 'string') {
|
|
35
|
+
return input;
|
|
36
|
+
}
|
|
37
|
+
try {
|
|
38
|
+
return JSON.stringify(input ?? {});
|
|
39
|
+
}
|
|
40
|
+
catch {
|
|
41
|
+
return String(input ?? '');
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
export function createAnthropicSequencer(config) {
|
|
45
|
+
const finalConfig = {
|
|
46
|
+
...DEFAULT_ANTHROPIC_SEQUENCER_CONFIG,
|
|
47
|
+
...config
|
|
48
|
+
};
|
|
49
|
+
return {
|
|
50
|
+
async *sequenceResponse(response, requestId) {
|
|
51
|
+
yield createEvent('message_start', {
|
|
52
|
+
message: {
|
|
53
|
+
id: response.id || `msg_${requestId}`,
|
|
54
|
+
type: 'message',
|
|
55
|
+
role: response.role || 'assistant',
|
|
56
|
+
model: response.model
|
|
57
|
+
}
|
|
58
|
+
});
|
|
59
|
+
let index = 0;
|
|
60
|
+
for (const block of response.content || []) {
|
|
61
|
+
if (!block || typeof block !== 'object')
|
|
62
|
+
continue;
|
|
63
|
+
if (block.type === 'text') {
|
|
64
|
+
yield createEvent('content_block_start', { index, content_block: { type: 'text' } });
|
|
65
|
+
for (const chunk of chunkText(block.text ?? '', finalConfig.chunkSize)) {
|
|
66
|
+
if (!chunk)
|
|
67
|
+
continue;
|
|
68
|
+
yield createEvent('content_block_delta', {
|
|
69
|
+
index,
|
|
70
|
+
delta: { type: 'text_delta', text: chunk }
|
|
71
|
+
});
|
|
72
|
+
await maybeDelay(finalConfig);
|
|
73
|
+
}
|
|
74
|
+
yield createEvent('content_block_stop', { index });
|
|
75
|
+
index += 1;
|
|
76
|
+
}
|
|
77
|
+
else if (block.type === 'thinking') {
|
|
78
|
+
const decision = dispatchReasoning(block.text, {
|
|
79
|
+
mode: finalConfig.reasoningMode,
|
|
80
|
+
prefix: finalConfig.reasoningTextPrefix
|
|
81
|
+
});
|
|
82
|
+
if (decision.appendToContent) {
|
|
83
|
+
yield createEvent('content_block_start', { index, content_block: { type: 'text' } });
|
|
84
|
+
for (const chunk of chunkText(decision.appendToContent, finalConfig.chunkSize)) {
|
|
85
|
+
if (!chunk)
|
|
86
|
+
continue;
|
|
87
|
+
yield createEvent('content_block_delta', {
|
|
88
|
+
index,
|
|
89
|
+
delta: { type: 'text_delta', text: chunk }
|
|
90
|
+
});
|
|
91
|
+
await maybeDelay(finalConfig);
|
|
92
|
+
}
|
|
93
|
+
yield createEvent('content_block_stop', { index });
|
|
94
|
+
index += 1;
|
|
95
|
+
}
|
|
96
|
+
if (decision.channel) {
|
|
97
|
+
yield createEvent('content_block_start', { index, content_block: { type: 'thinking' } });
|
|
98
|
+
for (const chunk of chunkText(decision.channel, finalConfig.chunkSize)) {
|
|
99
|
+
if (!chunk)
|
|
100
|
+
continue;
|
|
101
|
+
yield createEvent('content_block_delta', {
|
|
102
|
+
index,
|
|
103
|
+
delta: { type: 'thinking_delta', text: chunk }
|
|
104
|
+
});
|
|
105
|
+
await maybeDelay(finalConfig);
|
|
106
|
+
}
|
|
107
|
+
yield createEvent('content_block_stop', { index });
|
|
108
|
+
index += 1;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
else if (block.type === 'tool_use') {
|
|
112
|
+
const id = block.id || `call_${requestId}_${index}`;
|
|
113
|
+
yield createEvent('content_block_start', {
|
|
114
|
+
index,
|
|
115
|
+
content_block: { type: 'tool_use', id, name: block.name, input: block.input }
|
|
116
|
+
});
|
|
117
|
+
const payload = normalizeToolInput(block.input ?? {});
|
|
118
|
+
if (payload) {
|
|
119
|
+
yield createEvent('content_block_delta', {
|
|
120
|
+
index,
|
|
121
|
+
delta: { type: 'input_json_delta', partial_json: payload }
|
|
122
|
+
});
|
|
123
|
+
}
|
|
124
|
+
yield createEvent('content_block_stop', { index });
|
|
125
|
+
index += 1;
|
|
126
|
+
}
|
|
127
|
+
else if (block.type === 'tool_result') {
|
|
128
|
+
yield createEvent('content_block_start', {
|
|
129
|
+
index,
|
|
130
|
+
content_block: {
|
|
131
|
+
type: 'tool_result',
|
|
132
|
+
tool_use_id: block.tool_use_id,
|
|
133
|
+
content: block.content,
|
|
134
|
+
is_error: block.is_error
|
|
135
|
+
}
|
|
136
|
+
});
|
|
137
|
+
yield createEvent('content_block_stop', { index });
|
|
138
|
+
index += 1;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
yield createEvent('message_delta', {
|
|
142
|
+
delta: {
|
|
143
|
+
stop_reason: response.stop_reason ?? 'end_turn',
|
|
144
|
+
usage: response.usage
|
|
145
|
+
}
|
|
146
|
+
});
|
|
147
|
+
yield createEvent('message_stop', {});
|
|
148
|
+
}
|
|
149
|
+
};
|
|
150
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Chat协议事件编排器
|
|
3
|
+
* 负责严格的事件时序组合:role → content OR tool_calls(name → args.delta*) → finish_reason → done
|
|
4
|
+
*/
|
|
5
|
+
import { ChatCompletionResponse, ChatReasoningMode, ChatSseEvent } from '../../types/index.js';
|
|
6
|
+
import { ChatEventGeneratorContext, ChatEventGeneratorConfig } from '../event-generators/chat.js';
|
|
7
|
+
export interface ChatSequencerConfig extends ChatEventGeneratorConfig {
|
|
8
|
+
includeSequenceNumbers: boolean;
|
|
9
|
+
enableDelay: boolean;
|
|
10
|
+
validateOrder: boolean;
|
|
11
|
+
reasoningMode?: ChatReasoningMode;
|
|
12
|
+
reasoningTextPrefix?: string;
|
|
13
|
+
}
|
|
14
|
+
export declare const DEFAULT_CHAT_SEQUENCER_CONFIG: ChatSequencerConfig;
|
|
15
|
+
/**
|
|
16
|
+
* 主编排器:将Chat响应转换为有序的SSE事件流
|
|
17
|
+
*/
|
|
18
|
+
export declare function sequenceChatResponse(response: ChatCompletionResponse, context: ChatEventGeneratorContext, config?: ChatSequencerConfig): AsyncGenerator<ChatSseEvent>;
|
|
19
|
+
/**
|
|
20
|
+
* 序列化Chat请求(用于请求→SSE转换)
|
|
21
|
+
*/
|
|
22
|
+
export declare function sequenceChatRequest(request: any, context: ChatEventGeneratorContext, config?: ChatSequencerConfig): AsyncGenerator<ChatSseEvent>;
|
|
23
|
+
/**
|
|
24
|
+
* 创建Chat事件序列化器工厂
|
|
25
|
+
*/
|
|
26
|
+
export declare function createChatSequencer(config?: Partial<ChatSequencerConfig>): {
|
|
27
|
+
/**
|
|
28
|
+
* 序列化响应
|
|
29
|
+
*/
|
|
30
|
+
sequenceResponse(response: ChatCompletionResponse, model: string, requestId: string): AsyncGenerator<ChatSseEvent, void, any>;
|
|
31
|
+
/**
|
|
32
|
+
* 序列化请求
|
|
33
|
+
*/
|
|
34
|
+
sequenceRequest(request: any, model: string, requestId: string): AsyncGenerator<ChatSseEvent, void, any>;
|
|
35
|
+
/**
|
|
36
|
+
* 获取当前配置
|
|
37
|
+
*/
|
|
38
|
+
getConfig(): ChatSequencerConfig;
|
|
39
|
+
};
|
|
@@ -3,12 +3,16 @@
|
|
|
3
3
|
* 负责严格的事件时序组合:role → content OR tool_calls(name → args.delta*) → finish_reason → done
|
|
4
4
|
*/
|
|
5
5
|
import { buildRoleDelta, buildReasoningDeltas, buildContentDeltas, buildToolCallStart, buildToolCallArgsDeltas, buildFinishEvent, buildDoneEvent, buildErrorEvent, DEFAULT_CHAT_EVENT_GENERATOR_CONFIG, createDefaultContext } from '../event-generators/chat.js';
|
|
6
|
+
import { normalizeMessageReasoningTools } from '../../../conversion/shared/reasoning-tool-normalizer.js';
|
|
7
|
+
import { normalizeChatMessageContent } from '../../../conversion/shared/chat-output-normalizer.js';
|
|
8
|
+
import { dispatchReasoning } from '../../shared/reasoning-dispatcher.js';
|
|
6
9
|
// 默认配置
|
|
7
10
|
export const DEFAULT_CHAT_SEQUENCER_CONFIG = {
|
|
8
11
|
...DEFAULT_CHAT_EVENT_GENERATOR_CONFIG,
|
|
9
12
|
includeSequenceNumbers: true,
|
|
10
13
|
enableDelay: false,
|
|
11
|
-
validateOrder: true
|
|
14
|
+
validateOrder: true,
|
|
15
|
+
reasoningMode: 'channel'
|
|
12
16
|
};
|
|
13
17
|
/**
|
|
14
18
|
* 验证消息顺序的合法性
|
|
@@ -40,6 +44,26 @@ function hasMeaningfulContent(content) {
|
|
|
40
44
|
return Object.keys(content).length > 0;
|
|
41
45
|
return false;
|
|
42
46
|
}
|
|
47
|
+
function appendReasoningToContent(message, reasoningText, prefix) {
|
|
48
|
+
const trimmed = reasoningText.trim();
|
|
49
|
+
if (!trimmed) {
|
|
50
|
+
return;
|
|
51
|
+
}
|
|
52
|
+
const normalizedPrefix = typeof prefix === 'string' && prefix.length ? prefix : '';
|
|
53
|
+
const formatted = normalizedPrefix ? `${normalizedPrefix}${normalizedPrefix.endsWith(' ') || normalizedPrefix.endsWith('\n') ? '' : ' '}${trimmed}` : trimmed;
|
|
54
|
+
const current = message.content;
|
|
55
|
+
if (typeof current === 'string' || current == null) {
|
|
56
|
+
message.content = typeof current === 'string' && current.length
|
|
57
|
+
? `${current}${current.endsWith('\n') ? '' : '\n\n'}${formatted}`
|
|
58
|
+
: formatted;
|
|
59
|
+
return;
|
|
60
|
+
}
|
|
61
|
+
if (Array.isArray(current)) {
|
|
62
|
+
current.push({ type: 'text', text: formatted });
|
|
63
|
+
return;
|
|
64
|
+
}
|
|
65
|
+
message.content = [current, { type: 'text', text: formatted }];
|
|
66
|
+
}
|
|
43
67
|
/**
|
|
44
68
|
* 异步生成器:为事件添加序列号和延迟
|
|
45
69
|
*/
|
|
@@ -62,9 +86,31 @@ async function* sequenceMessageContent(message, context, config) {
|
|
|
62
86
|
for (const roleEvent of buildRoleDelta(message.role, context, config)) {
|
|
63
87
|
yield roleEvent;
|
|
64
88
|
}
|
|
89
|
+
const contentNormalization = normalizeChatMessageContent(message.content);
|
|
90
|
+
if (contentNormalization.contentText !== undefined) {
|
|
91
|
+
message.content = contentNormalization.contentText;
|
|
92
|
+
}
|
|
93
|
+
const normalization = normalizeMessageReasoningTools(message, {
|
|
94
|
+
idPrefix: `chat_seq_reasoning_${context.choiceIndex + 1}`
|
|
95
|
+
});
|
|
96
|
+
const reasoningText = normalization.cleanedReasoning
|
|
97
|
+
?? contentNormalization.reasoningText
|
|
98
|
+
?? (typeof message?.reasoning_content === 'string'
|
|
99
|
+
? message.reasoning_content
|
|
100
|
+
: typeof message?.reasoning === 'string'
|
|
101
|
+
? message.reasoning
|
|
102
|
+
: undefined);
|
|
103
|
+
const reasoningDispatch = dispatchReasoning(reasoningText, {
|
|
104
|
+
mode: config.reasoningMode,
|
|
105
|
+
prefix: config.reasoningTextPrefix
|
|
106
|
+
});
|
|
107
|
+
if (reasoningDispatch.appendToContent) {
|
|
108
|
+
appendReasoningToContent(message, reasoningDispatch.appendToContent);
|
|
109
|
+
}
|
|
110
|
+
const reasoningForChannel = reasoningDispatch.channel;
|
|
65
111
|
// 2. 处理reasoning(如果有)
|
|
66
|
-
if (
|
|
67
|
-
yield* withSequencing(buildReasoningDeltas(
|
|
112
|
+
if (reasoningForChannel) {
|
|
113
|
+
yield* withSequencing(buildReasoningDeltas(reasoningForChannel, context, config), config);
|
|
68
114
|
}
|
|
69
115
|
// 3. 处理content和tool_calls
|
|
70
116
|
if (hasMeaningfulContent(message.content)) {
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { GeminiResponse, GeminiSseEvent } from '../../types/index.js';
|
|
2
|
+
import type { ChatReasoningMode } from '../../types/chat-types.js';
|
|
3
|
+
export interface GeminiSequencerConfig {
|
|
4
|
+
chunkDelayMs: number;
|
|
5
|
+
reasoningMode?: ChatReasoningMode;
|
|
6
|
+
reasoningTextPrefix?: string;
|
|
7
|
+
}
|
|
8
|
+
export declare function createGeminiSequencer(config?: Partial<GeminiSequencerConfig>): {
|
|
9
|
+
sequenceResponse(response: GeminiResponse): AsyncGenerator<GeminiSseEvent>;
|
|
10
|
+
};
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import { dispatchReasoning } from '../../shared/reasoning-dispatcher.js';
|
|
2
|
+
const DEFAULT_CONFIG = {
|
|
3
|
+
chunkDelayMs: 0,
|
|
4
|
+
reasoningMode: 'channel',
|
|
5
|
+
reasoningTextPrefix: undefined
|
|
6
|
+
};
|
|
7
|
+
function createEvent(type, data) {
|
|
8
|
+
return {
|
|
9
|
+
type,
|
|
10
|
+
event: type,
|
|
11
|
+
protocol: 'gemini-chat',
|
|
12
|
+
direction: 'json_to_sse',
|
|
13
|
+
timestamp: Date.now(),
|
|
14
|
+
data,
|
|
15
|
+
sequenceNumber: 0
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
async function maybeDelay(config) {
|
|
19
|
+
if (!config.chunkDelayMs)
|
|
20
|
+
return;
|
|
21
|
+
await new Promise((resolve) => setTimeout(resolve, config.chunkDelayMs));
|
|
22
|
+
}
|
|
23
|
+
function getCandidateRole(candidate, fallback = 'model') {
|
|
24
|
+
const role = candidate?.content?.role;
|
|
25
|
+
if (typeof role === 'string' && role.trim().length) {
|
|
26
|
+
return role;
|
|
27
|
+
}
|
|
28
|
+
return fallback;
|
|
29
|
+
}
|
|
30
|
+
function getCandidateParts(candidate) {
|
|
31
|
+
const parts = candidate?.content?.parts;
|
|
32
|
+
if (Array.isArray(parts)) {
|
|
33
|
+
return parts.filter((part) => Boolean(part));
|
|
34
|
+
}
|
|
35
|
+
return [];
|
|
36
|
+
}
|
|
37
|
+
export function createGeminiSequencer(config) {
|
|
38
|
+
const finalConfig = { ...DEFAULT_CONFIG, ...config };
|
|
39
|
+
return {
|
|
40
|
+
async *sequenceResponse(response) {
|
|
41
|
+
const candidates = Array.isArray(response.candidates) ? response.candidates : [];
|
|
42
|
+
for (let candidateIndex = 0; candidateIndex < candidates.length; candidateIndex += 1) {
|
|
43
|
+
const candidate = candidates[candidateIndex] || {};
|
|
44
|
+
const role = getCandidateRole(candidate);
|
|
45
|
+
const parts = getCandidateParts(candidate);
|
|
46
|
+
for (let partIndex = 0; partIndex < parts.length; partIndex += 1) {
|
|
47
|
+
const normalizedParts = normalizeReasoningPart(parts[partIndex], finalConfig);
|
|
48
|
+
for (const normalizedPart of normalizedParts) {
|
|
49
|
+
yield createEvent('gemini.data', {
|
|
50
|
+
kind: 'part',
|
|
51
|
+
candidateIndex,
|
|
52
|
+
partIndex,
|
|
53
|
+
role,
|
|
54
|
+
part: normalizedPart
|
|
55
|
+
});
|
|
56
|
+
await maybeDelay(finalConfig);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
const doneData = {
|
|
61
|
+
kind: 'done',
|
|
62
|
+
usageMetadata: response.usageMetadata,
|
|
63
|
+
promptFeedback: response.promptFeedback,
|
|
64
|
+
modelVersion: response.modelVersion,
|
|
65
|
+
candidates: candidates.map((candidate, index) => ({
|
|
66
|
+
index,
|
|
67
|
+
finishReason: candidate?.finishReason,
|
|
68
|
+
safetyRatings: candidate?.safetyRatings
|
|
69
|
+
}))
|
|
70
|
+
};
|
|
71
|
+
yield createEvent('gemini.done', doneData);
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
function normalizeReasoningPart(part, config) {
|
|
76
|
+
if (!part || typeof part !== 'object') {
|
|
77
|
+
return [part];
|
|
78
|
+
}
|
|
79
|
+
const reasoning = typeof part.reasoning === 'string' ? part.reasoning : undefined;
|
|
80
|
+
if (!reasoning) {
|
|
81
|
+
return [part];
|
|
82
|
+
}
|
|
83
|
+
const decision = dispatchReasoning(reasoning, {
|
|
84
|
+
mode: config.reasoningMode,
|
|
85
|
+
prefix: config.reasoningTextPrefix
|
|
86
|
+
});
|
|
87
|
+
const normalized = [];
|
|
88
|
+
if (decision.appendToContent) {
|
|
89
|
+
normalized.push({ text: decision.appendToContent });
|
|
90
|
+
}
|
|
91
|
+
if (decision.channel) {
|
|
92
|
+
normalized.push({ reasoning: decision.channel });
|
|
93
|
+
}
|
|
94
|
+
return normalized;
|
|
95
|
+
}
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
* 负责将Responses响应数据转换为有序的SSE事件流
|
|
4
4
|
*/
|
|
5
5
|
import { buildResponseStartEvents, buildResponseDoneEvent, buildOutputItemStartEvent, buildOutputItemDoneEvent, buildContentPartStartEvent, buildContentPartDeltas, buildContentPartDoneEvent, buildOutputTextDoneEvent, buildFunctionCallArgsDeltas, buildFunctionCallDoneEvent, buildReasoningStartEvent, buildReasoningDeltas, buildReasoningDoneEvent, buildRequiredActionEvent, buildResponseCompletedEvent, buildErrorEvent, DEFAULT_RESPONSES_EVENT_GENERATOR_CONFIG, createDefaultResponsesContext } from '../event-generators/responses.js';
|
|
6
|
+
import { expandResponsesMessageItem } from '../../shared/responses-output-normalizer.js';
|
|
6
7
|
// 默认配置
|
|
7
8
|
export const DEFAULT_RESPONSES_SEQUENCER_CONFIG = {
|
|
8
9
|
...DEFAULT_RESPONSES_EVENT_GENERATOR_CONFIG,
|
|
@@ -13,6 +14,23 @@ export const DEFAULT_RESPONSES_SEQUENCER_CONFIG = {
|
|
|
13
14
|
maxContentParts: 100,
|
|
14
15
|
submittedToolOutputs: undefined
|
|
15
16
|
};
|
|
17
|
+
function normalizeResponseOutput(output, requestId) {
|
|
18
|
+
if (!Array.isArray(output))
|
|
19
|
+
return [];
|
|
20
|
+
const normalized = [];
|
|
21
|
+
output.forEach((item, index) => {
|
|
22
|
+
if (item && typeof item === 'object' && item.type === 'message') {
|
|
23
|
+
normalized.push(...expandResponsesMessageItem(item, {
|
|
24
|
+
requestId,
|
|
25
|
+
outputIndex: index
|
|
26
|
+
}));
|
|
27
|
+
}
|
|
28
|
+
else {
|
|
29
|
+
normalized.push(item);
|
|
30
|
+
}
|
|
31
|
+
});
|
|
32
|
+
return normalized;
|
|
33
|
+
}
|
|
16
34
|
/**
|
|
17
35
|
* 验证响应格式
|
|
18
36
|
*/
|
|
@@ -158,14 +176,15 @@ export async function* sequenceResponse(response, context, config = DEFAULT_RESP
|
|
|
158
176
|
context.outputIndexCounter = i;
|
|
159
177
|
yield* sequenceFunctionCallOutputItem(submittedOutputs[i], context, config);
|
|
160
178
|
}
|
|
179
|
+
const normalizedOutput = normalizeResponseOutput(response.output, context.requestId);
|
|
161
180
|
const outputOffset = submittedOutputs.length;
|
|
162
181
|
// 3. 序列化所有输出项
|
|
163
|
-
for (let outputIndex = 0; outputIndex <
|
|
164
|
-
const item =
|
|
182
|
+
for (let outputIndex = 0; outputIndex < normalizedOutput.length; outputIndex++) {
|
|
183
|
+
const item = normalizedOutput[outputIndex];
|
|
165
184
|
context.outputIndexCounter = outputOffset + outputIndex;
|
|
166
185
|
yield* sequenceOutputItem(item, context, config);
|
|
167
186
|
// 输出项间添加小延迟(如果启用)
|
|
168
|
-
if (config.enableDelay && config.chunkDelayMs > 0 && outputIndex <
|
|
187
|
+
if (config.enableDelay && config.chunkDelayMs > 0 && outputIndex < normalizedOutput.length - 1) {
|
|
169
188
|
await new Promise(resolve => setTimeout(resolve, config.chunkDelayMs * 2));
|
|
170
189
|
}
|
|
171
190
|
}
|
|
@@ -203,9 +222,9 @@ export async function* sequenceRequest(request, context, config = DEFAULT_RESPON
|
|
|
203
222
|
}, context, config);
|
|
204
223
|
// 回显输入消息作为输出项
|
|
205
224
|
if (request.input && Array.isArray(request.input)) {
|
|
225
|
+
let syntheticIndex = 0;
|
|
206
226
|
for (let inputIndex = 0; inputIndex < request.input.length; inputIndex++) {
|
|
207
227
|
const inputItem = request.input[inputIndex];
|
|
208
|
-
context.outputIndexCounter = inputIndex;
|
|
209
228
|
// 将输入转换为输出项格式
|
|
210
229
|
const outputItem = {
|
|
211
230
|
id: `${context.requestId}-input-${inputIndex}`,
|
|
@@ -214,7 +233,14 @@ export async function* sequenceRequest(request, context, config = DEFAULT_RESPON
|
|
|
214
233
|
role: inputItem.role,
|
|
215
234
|
content: inputItem.content
|
|
216
235
|
};
|
|
217
|
-
|
|
236
|
+
const expandedItems = expandResponsesMessageItem(outputItem, {
|
|
237
|
+
requestId: context.requestId,
|
|
238
|
+
outputIndex: syntheticIndex
|
|
239
|
+
});
|
|
240
|
+
for (const expanded of expandedItems) {
|
|
241
|
+
context.outputIndexCounter = syntheticIndex++;
|
|
242
|
+
yield* sequenceOutputItem(expanded, context, config);
|
|
243
|
+
}
|
|
218
244
|
}
|
|
219
245
|
}
|
|
220
246
|
const syntheticResponse = {
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import type { ResponsesFunctionCallOutputItem } from '../types/index.js';
|
|
2
|
+
export type SseProtocol = 'openai-chat' | 'openai-responses' | 'anthropic-messages' | 'gemini-chat';
|
|
3
|
+
export type SseStreamLike = any;
|
|
4
|
+
export type SseStreamInput = any;
|
|
5
|
+
export interface JsonToSseContext {
|
|
6
|
+
requestId: string;
|
|
7
|
+
model?: string;
|
|
8
|
+
direction?: 'request' | 'response';
|
|
9
|
+
resumeToolOutputs?: ResponsesFunctionCallOutputItem[];
|
|
10
|
+
}
|
|
11
|
+
export interface SseToJsonContext {
|
|
12
|
+
requestId: string;
|
|
13
|
+
model?: string;
|
|
14
|
+
direction?: 'request' | 'response';
|
|
15
|
+
}
|
|
16
|
+
export interface NormalizeSseContext {
|
|
17
|
+
requestId: string;
|
|
18
|
+
protocol: SseProtocol;
|
|
19
|
+
}
|
|
20
|
+
export interface SseCodec {
|
|
21
|
+
protocol: SseProtocol;
|
|
22
|
+
convertJsonToSse(payload: unknown, context: JsonToSseContext): Promise<SseStreamLike>;
|
|
23
|
+
convertSseToJson(stream: SseStreamInput, context: SseToJsonContext): Promise<unknown>;
|
|
24
|
+
normalize?(stream: SseStreamInput, context: NormalizeSseContext): Promise<SseStreamInput>;
|
|
25
|
+
}
|
|
26
|
+
export declare class SseCodecRegistry {
|
|
27
|
+
private readonly codecs;
|
|
28
|
+
register(codec: SseCodec): void;
|
|
29
|
+
get(protocol: SseProtocol): SseCodec;
|
|
30
|
+
list(): SseCodec[];
|
|
31
|
+
}
|
|
32
|
+
export declare const defaultSseCodecRegistry: SseCodecRegistry;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { chatConverters, responsesConverters, anthropicConverters } from '../index.js';
|
|
1
|
+
import { chatConverters, responsesConverters, anthropicConverters, geminiConverters } from '../index.js';
|
|
2
2
|
export class SseCodecRegistry {
|
|
3
3
|
codecs = new Map();
|
|
4
4
|
register(codec) {
|
|
@@ -27,11 +27,17 @@ function resolveModelId(payload, fallback) {
|
|
|
27
27
|
if (typeof record.model === 'string') {
|
|
28
28
|
return record.model;
|
|
29
29
|
}
|
|
30
|
+
if (record.modelVersion && typeof record.modelVersion === 'string') {
|
|
31
|
+
return record.modelVersion;
|
|
32
|
+
}
|
|
30
33
|
if (record.response && typeof record.response === 'object') {
|
|
31
34
|
const inner = record.response;
|
|
32
35
|
if (typeof inner.model === 'string') {
|
|
33
36
|
return inner.model;
|
|
34
37
|
}
|
|
38
|
+
if (typeof inner.modelVersion === 'string') {
|
|
39
|
+
return inner.modelVersion;
|
|
40
|
+
}
|
|
35
41
|
}
|
|
36
42
|
return 'unknown';
|
|
37
43
|
}
|
|
@@ -101,6 +107,29 @@ function createAnthropicCodec() {
|
|
|
101
107
|
}
|
|
102
108
|
};
|
|
103
109
|
}
|
|
110
|
+
function createGeminiCodec() {
|
|
111
|
+
return {
|
|
112
|
+
protocol: 'gemini-chat',
|
|
113
|
+
async convertJsonToSse(payload, context) {
|
|
114
|
+
const model = resolveModelId(payload, context.model);
|
|
115
|
+
return geminiConverters.jsonToSse
|
|
116
|
+
.convertResponseToJsonToSse(payload, {
|
|
117
|
+
requestId: context.requestId,
|
|
118
|
+
model
|
|
119
|
+
});
|
|
120
|
+
},
|
|
121
|
+
async convertSseToJson(stream, context) {
|
|
122
|
+
return geminiConverters.sseToJson.convertSseToJson(stream, {
|
|
123
|
+
requestId: context.requestId,
|
|
124
|
+
model: context.model
|
|
125
|
+
});
|
|
126
|
+
},
|
|
127
|
+
async normalize(stream) {
|
|
128
|
+
return stream;
|
|
129
|
+
}
|
|
130
|
+
};
|
|
131
|
+
}
|
|
104
132
|
defaultSseCodecRegistry.register(createChatCodec());
|
|
105
133
|
defaultSseCodecRegistry.register(createResponsesCodec());
|
|
106
134
|
defaultSseCodecRegistry.register(createAnthropicCodec());
|
|
135
|
+
defaultSseCodecRegistry.register(createGeminiCodec());
|