graphlit-client 1.0.20250531005 → 1.0.20250610001
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +64 -0
- package/README.md +441 -53
- package/dist/client.d.ts +126 -1
- package/dist/client.js +1616 -1569
- package/dist/generated/graphql-documents.d.ts +1 -0
- package/dist/generated/graphql-documents.js +372 -310
- package/dist/generated/graphql-types.d.ts +207 -23
- package/dist/generated/graphql-types.js +299 -246
- package/dist/model-mapping.d.ts +18 -0
- package/dist/model-mapping.js +95 -0
- package/dist/stream-helpers.d.ts +106 -0
- package/dist/stream-helpers.js +237 -0
- package/dist/streaming/chunk-buffer.d.ts +25 -0
- package/dist/streaming/chunk-buffer.js +170 -0
- package/dist/streaming/llm-formatters.d.ts +64 -0
- package/dist/streaming/llm-formatters.js +187 -0
- package/dist/streaming/providers.d.ts +18 -0
- package/dist/streaming/providers.js +353 -0
- package/dist/streaming/ui-event-adapter.d.ts +52 -0
- package/dist/streaming/ui-event-adapter.js +288 -0
- package/dist/types/agent.d.ts +39 -0
- package/dist/types/agent.js +1 -0
- package/dist/types/streaming.d.ts +58 -0
- package/dist/types/streaming.js +7 -0
- package/dist/types/ui-events.d.ts +38 -0
- package/dist/types/ui-events.js +1 -0
- package/package.json +32 -5
@@ -0,0 +1,64 @@
|
|
1
|
+
import { ConversationMessage } from "../generated/graphql-types.js";
|
2
|
+
/**
|
3
|
+
* OpenAI message format
|
4
|
+
*/
|
5
|
+
export interface OpenAIMessage {
|
6
|
+
role: "system" | "user" | "assistant" | "tool";
|
7
|
+
content?: string;
|
8
|
+
tool_calls?: Array<{
|
9
|
+
id: string;
|
10
|
+
type: "function";
|
11
|
+
function: {
|
12
|
+
name: string;
|
13
|
+
arguments: string;
|
14
|
+
};
|
15
|
+
}>;
|
16
|
+
tool_call_id?: string;
|
17
|
+
}
|
18
|
+
/**
|
19
|
+
* Anthropic message format
|
20
|
+
*/
|
21
|
+
export interface AnthropicMessage {
|
22
|
+
role: "user" | "assistant";
|
23
|
+
content: string | Array<{
|
24
|
+
type: "text" | "tool_use" | "tool_result";
|
25
|
+
text?: string;
|
26
|
+
id?: string;
|
27
|
+
name?: string;
|
28
|
+
input?: unknown;
|
29
|
+
tool_use_id?: string;
|
30
|
+
content?: string;
|
31
|
+
}>;
|
32
|
+
}
|
33
|
+
/**
|
34
|
+
* Google message format
|
35
|
+
*/
|
36
|
+
export interface GoogleMessage {
|
37
|
+
role: "user" | "model";
|
38
|
+
parts: Array<{
|
39
|
+
text?: string;
|
40
|
+
functionCall?: {
|
41
|
+
name: string;
|
42
|
+
args: unknown;
|
43
|
+
};
|
44
|
+
functionResponse?: {
|
45
|
+
name: string;
|
46
|
+
response: unknown;
|
47
|
+
};
|
48
|
+
}>;
|
49
|
+
}
|
50
|
+
/**
|
51
|
+
* Format GraphQL conversation messages for OpenAI SDK
|
52
|
+
*/
|
53
|
+
export declare function formatMessagesForOpenAI(messages: ConversationMessage[]): OpenAIMessage[];
|
54
|
+
/**
|
55
|
+
* Format GraphQL conversation messages for Anthropic SDK
|
56
|
+
*/
|
57
|
+
export declare function formatMessagesForAnthropic(messages: ConversationMessage[]): {
|
58
|
+
system?: string;
|
59
|
+
messages: AnthropicMessage[];
|
60
|
+
};
|
61
|
+
/**
|
62
|
+
* Format GraphQL conversation messages for Google SDK
|
63
|
+
*/
|
64
|
+
export declare function formatMessagesForGoogle(messages: ConversationMessage[]): GoogleMessage[];
|
@@ -0,0 +1,187 @@
|
|
1
|
+
import { ConversationRoleTypes, } from "../generated/graphql-types.js";
|
2
|
+
/**
|
3
|
+
* Format GraphQL conversation messages for OpenAI SDK
|
4
|
+
*/
|
5
|
+
export function formatMessagesForOpenAI(messages) {
|
6
|
+
const formattedMessages = [];
|
7
|
+
for (const message of messages) {
|
8
|
+
if (!message.role) {
|
9
|
+
continue;
|
10
|
+
}
|
11
|
+
// Allow messages with tool calls even if they have no text content
|
12
|
+
const hasContent = message.message?.trim();
|
13
|
+
const hasToolCalls = message.toolCalls && message.toolCalls.length > 0;
|
14
|
+
if (!hasContent && !hasToolCalls) {
|
15
|
+
continue;
|
16
|
+
}
|
17
|
+
const trimmedMessage = message.message?.trim() || "";
|
18
|
+
switch (message.role) {
|
19
|
+
case ConversationRoleTypes.System:
|
20
|
+
formattedMessages.push({
|
21
|
+
role: "system",
|
22
|
+
content: trimmedMessage,
|
23
|
+
});
|
24
|
+
break;
|
25
|
+
case ConversationRoleTypes.Assistant:
|
26
|
+
const assistantMessage = {
|
27
|
+
role: "assistant",
|
28
|
+
};
|
29
|
+
// Only add content if there's actual text
|
30
|
+
if (trimmedMessage) {
|
31
|
+
assistantMessage.content = trimmedMessage;
|
32
|
+
}
|
33
|
+
// Add tool calls if present
|
34
|
+
if (message.toolCalls && message.toolCalls.length > 0) {
|
35
|
+
assistantMessage.tool_calls = message.toolCalls
|
36
|
+
.filter((tc) => tc !== null)
|
37
|
+
.map((toolCall) => ({
|
38
|
+
id: toolCall.id,
|
39
|
+
type: "function",
|
40
|
+
function: {
|
41
|
+
name: toolCall.name,
|
42
|
+
arguments: toolCall.arguments,
|
43
|
+
},
|
44
|
+
}));
|
45
|
+
}
|
46
|
+
formattedMessages.push(assistantMessage);
|
47
|
+
break;
|
48
|
+
case ConversationRoleTypes.Tool:
|
49
|
+
formattedMessages.push({
|
50
|
+
role: "tool",
|
51
|
+
content: trimmedMessage,
|
52
|
+
tool_call_id: message.toolCallId || "",
|
53
|
+
});
|
54
|
+
break;
|
55
|
+
default: // User messages
|
56
|
+
formattedMessages.push({
|
57
|
+
role: "user",
|
58
|
+
content: trimmedMessage,
|
59
|
+
});
|
60
|
+
break;
|
61
|
+
}
|
62
|
+
}
|
63
|
+
return formattedMessages;
|
64
|
+
}
|
65
|
+
/**
|
66
|
+
* Format GraphQL conversation messages for Anthropic SDK
|
67
|
+
*/
|
68
|
+
export function formatMessagesForAnthropic(messages) {
|
69
|
+
let systemPrompt;
|
70
|
+
const formattedMessages = [];
|
71
|
+
for (const message of messages) {
|
72
|
+
if (!message.role)
|
73
|
+
continue;
|
74
|
+
// Allow messages with tool calls even if they have no text content
|
75
|
+
const hasContent = message.message?.trim();
|
76
|
+
const hasToolCalls = message.toolCalls && message.toolCalls.length > 0;
|
77
|
+
if (!hasContent && !hasToolCalls)
|
78
|
+
continue;
|
79
|
+
const trimmedMessage = message.message?.trim() || "";
|
80
|
+
switch (message.role) {
|
81
|
+
case ConversationRoleTypes.System:
|
82
|
+
systemPrompt = trimmedMessage;
|
83
|
+
break;
|
84
|
+
case ConversationRoleTypes.Assistant:
|
85
|
+
const content = [];
|
86
|
+
// Add text content
|
87
|
+
if (trimmedMessage) {
|
88
|
+
content.push({
|
89
|
+
type: "text",
|
90
|
+
text: trimmedMessage,
|
91
|
+
});
|
92
|
+
}
|
93
|
+
// Add tool uses if present
|
94
|
+
if (message.toolCalls && message.toolCalls.length > 0) {
|
95
|
+
for (const toolCall of message.toolCalls) {
|
96
|
+
if (toolCall) {
|
97
|
+
content.push({
|
98
|
+
type: "tool_use",
|
99
|
+
id: toolCall.id,
|
100
|
+
name: toolCall.name,
|
101
|
+
input: toolCall.arguments ? JSON.parse(toolCall.arguments) : {},
|
102
|
+
});
|
103
|
+
}
|
104
|
+
}
|
105
|
+
}
|
106
|
+
formattedMessages.push({
|
107
|
+
role: "assistant",
|
108
|
+
content,
|
109
|
+
});
|
110
|
+
break;
|
111
|
+
case ConversationRoleTypes.Tool:
|
112
|
+
// Anthropic expects tool responses as user messages with tool_result content blocks
|
113
|
+
formattedMessages.push({
|
114
|
+
role: "user",
|
115
|
+
content: [
|
116
|
+
{
|
117
|
+
type: "tool_result",
|
118
|
+
tool_use_id: message.toolCallId || "",
|
119
|
+
content: trimmedMessage,
|
120
|
+
},
|
121
|
+
],
|
122
|
+
});
|
123
|
+
break;
|
124
|
+
default: // User messages
|
125
|
+
formattedMessages.push({
|
126
|
+
role: "user",
|
127
|
+
content: trimmedMessage,
|
128
|
+
});
|
129
|
+
break;
|
130
|
+
}
|
131
|
+
}
|
132
|
+
const result = { system: systemPrompt, messages: formattedMessages };
|
133
|
+
return result;
|
134
|
+
}
|
135
|
+
/**
|
136
|
+
* Format GraphQL conversation messages for Google SDK
|
137
|
+
*/
|
138
|
+
export function formatMessagesForGoogle(messages) {
|
139
|
+
const formattedMessages = [];
|
140
|
+
for (const message of messages) {
|
141
|
+
if (!message.role || !message.message?.trim())
|
142
|
+
continue;
|
143
|
+
const trimmedMessage = message.message.trim();
|
144
|
+
switch (message.role) {
|
145
|
+
case ConversationRoleTypes.System:
|
146
|
+
// Google handles system prompts differently, usually as part of the first user message
|
147
|
+
formattedMessages.push({
|
148
|
+
role: "user",
|
149
|
+
parts: [{ text: trimmedMessage }],
|
150
|
+
});
|
151
|
+
break;
|
152
|
+
case ConversationRoleTypes.Assistant:
|
153
|
+
const parts = [];
|
154
|
+
// Add text content
|
155
|
+
if (trimmedMessage) {
|
156
|
+
parts.push({ text: trimmedMessage });
|
157
|
+
}
|
158
|
+
// Add function calls if present
|
159
|
+
if (message.toolCalls && message.toolCalls.length > 0) {
|
160
|
+
for (const toolCall of message.toolCalls) {
|
161
|
+
if (toolCall) {
|
162
|
+
parts.push({
|
163
|
+
functionCall: {
|
164
|
+
name: toolCall.name,
|
165
|
+
args: toolCall.arguments
|
166
|
+
? JSON.parse(toolCall.arguments)
|
167
|
+
: {},
|
168
|
+
},
|
169
|
+
});
|
170
|
+
}
|
171
|
+
}
|
172
|
+
}
|
173
|
+
formattedMessages.push({
|
174
|
+
role: "model",
|
175
|
+
parts,
|
176
|
+
});
|
177
|
+
break;
|
178
|
+
default: // User messages
|
179
|
+
formattedMessages.push({
|
180
|
+
role: "user",
|
181
|
+
parts: [{ text: trimmedMessage }],
|
182
|
+
});
|
183
|
+
break;
|
184
|
+
}
|
185
|
+
}
|
186
|
+
return formattedMessages;
|
187
|
+
}
|
@@ -0,0 +1,18 @@
|
|
1
|
+
import { ConversationToolCall, Specification, ToolDefinitionInput } from "../generated/graphql-types.js";
|
2
|
+
import { StreamEvent } from "../client.js";
|
3
|
+
import { OpenAIMessage, AnthropicMessage, GoogleMessage } from "./llm-formatters.js";
|
4
|
+
/**
|
5
|
+
* Stream with OpenAI SDK
|
6
|
+
*/
|
7
|
+
export declare function streamWithOpenAI(specification: Specification, messages: OpenAIMessage[], tools: ToolDefinitionInput[] | undefined, openaiClient: any, // OpenAI client instance
|
8
|
+
onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls: ConversationToolCall[]) => void): Promise<void>;
|
9
|
+
/**
|
10
|
+
* Stream with Anthropic SDK
|
11
|
+
*/
|
12
|
+
export declare function streamWithAnthropic(specification: Specification, messages: AnthropicMessage[], systemPrompt: string | undefined, tools: ToolDefinitionInput[] | undefined, anthropicClient: any, // Anthropic client instance
|
13
|
+
onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls: ConversationToolCall[]) => void): Promise<void>;
|
14
|
+
/**
|
15
|
+
* Stream with Google SDK
|
16
|
+
*/
|
17
|
+
export declare function streamWithGoogle(specification: Specification, messages: GoogleMessage[], systemPrompt: string | undefined, tools: ToolDefinitionInput[] | undefined, googleClient: any, // Google GenerativeAI client instance
|
18
|
+
onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls: ConversationToolCall[]) => void): Promise<void>;
|
@@ -0,0 +1,353 @@
|
|
1
|
+
import { getModelName } from "../model-mapping.js";
|
2
|
+
/**
|
3
|
+
* Stream with OpenAI SDK
|
4
|
+
*/
|
5
|
+
export async function streamWithOpenAI(specification, messages, tools, openaiClient, // OpenAI client instance
|
6
|
+
onEvent, onComplete) {
|
7
|
+
let fullMessage = "";
|
8
|
+
let toolCalls = [];
|
9
|
+
try {
|
10
|
+
const modelName = getModelName(specification);
|
11
|
+
if (!modelName) {
|
12
|
+
throw new Error(`No model name found for OpenAI specification: ${specification.name}`);
|
13
|
+
}
|
14
|
+
const streamConfig = {
|
15
|
+
model: modelName,
|
16
|
+
messages,
|
17
|
+
stream: true,
|
18
|
+
temperature: specification.openAI?.temperature,
|
19
|
+
//top_p: specification.openAI?.probability,
|
20
|
+
max_completion_tokens: specification.openAI?.completionTokenLimit,
|
21
|
+
};
|
22
|
+
// Add tools if provided
|
23
|
+
if (tools && tools.length > 0) {
|
24
|
+
streamConfig.tools = tools.map((tool) => ({
|
25
|
+
type: "function",
|
26
|
+
function: {
|
27
|
+
name: tool.name,
|
28
|
+
description: tool.description,
|
29
|
+
parameters: tool.schema ? JSON.parse(tool.schema) : {},
|
30
|
+
},
|
31
|
+
}));
|
32
|
+
}
|
33
|
+
const stream = await openaiClient.chat.completions.create(streamConfig);
|
34
|
+
for await (const chunk of stream) {
|
35
|
+
const delta = chunk.choices[0]?.delta;
|
36
|
+
if (delta?.content) {
|
37
|
+
fullMessage += delta.content;
|
38
|
+
onEvent({
|
39
|
+
type: "token",
|
40
|
+
token: delta.content,
|
41
|
+
});
|
42
|
+
}
|
43
|
+
// Handle tool calls
|
44
|
+
if (delta?.tool_calls) {
|
45
|
+
for (const toolCallDelta of delta.tool_calls) {
|
46
|
+
const index = toolCallDelta.index;
|
47
|
+
if (!toolCalls[index]) {
|
48
|
+
toolCalls[index] = {
|
49
|
+
id: toolCallDelta.id || `tool_${Date.now()}_${index}`,
|
50
|
+
name: "",
|
51
|
+
arguments: "",
|
52
|
+
};
|
53
|
+
onEvent({
|
54
|
+
type: "tool_call_start",
|
55
|
+
toolCall: {
|
56
|
+
id: toolCalls[index].id,
|
57
|
+
name: toolCallDelta.function?.name || "",
|
58
|
+
},
|
59
|
+
});
|
60
|
+
}
|
61
|
+
if (toolCallDelta.function?.name) {
|
62
|
+
toolCalls[index].name = toolCallDelta.function.name;
|
63
|
+
}
|
64
|
+
if (toolCallDelta.function?.arguments) {
|
65
|
+
toolCalls[index].arguments += toolCallDelta.function.arguments;
|
66
|
+
onEvent({
|
67
|
+
type: "tool_call_delta",
|
68
|
+
toolCallId: toolCalls[index].id,
|
69
|
+
argumentDelta: toolCallDelta.function.arguments,
|
70
|
+
});
|
71
|
+
}
|
72
|
+
}
|
73
|
+
}
|
74
|
+
}
|
75
|
+
// Emit complete events for tool calls
|
76
|
+
for (const toolCall of toolCalls) {
|
77
|
+
onEvent({
|
78
|
+
type: "tool_call_complete",
|
79
|
+
toolCall: {
|
80
|
+
id: toolCall.id,
|
81
|
+
name: toolCall.name,
|
82
|
+
arguments: toolCall.arguments,
|
83
|
+
},
|
84
|
+
});
|
85
|
+
}
|
86
|
+
onComplete(fullMessage, toolCalls);
|
87
|
+
}
|
88
|
+
catch (error) {
|
89
|
+
onEvent({
|
90
|
+
type: "error",
|
91
|
+
error: error instanceof Error ? error.message : "OpenAI streaming failed",
|
92
|
+
});
|
93
|
+
throw error;
|
94
|
+
}
|
95
|
+
}
|
96
|
+
/**
|
97
|
+
* Stream with Anthropic SDK
|
98
|
+
*/
|
99
|
+
export async function streamWithAnthropic(specification, messages, systemPrompt, tools, anthropicClient, // Anthropic client instance
|
100
|
+
onEvent, onComplete) {
|
101
|
+
let fullMessage = "";
|
102
|
+
let toolCalls = [];
|
103
|
+
try {
|
104
|
+
const modelName = getModelName(specification);
|
105
|
+
if (!modelName) {
|
106
|
+
throw new Error(`No model name found for Anthropic specification: ${specification.name}`);
|
107
|
+
}
|
108
|
+
const streamConfig = {
|
109
|
+
model: modelName,
|
110
|
+
messages,
|
111
|
+
stream: true,
|
112
|
+
temperature: specification.anthropic?.temperature,
|
113
|
+
//top_p: specification.anthropic?.probability,
|
114
|
+
max_tokens: specification.anthropic?.completionTokenLimit,
|
115
|
+
};
|
116
|
+
if (systemPrompt) {
|
117
|
+
streamConfig.system = systemPrompt;
|
118
|
+
}
|
119
|
+
// Add tools if provided
|
120
|
+
if (tools && tools.length > 0) {
|
121
|
+
streamConfig.tools = tools.map((tool) => ({
|
122
|
+
name: tool.name,
|
123
|
+
description: tool.description,
|
124
|
+
input_schema: tool.schema ? JSON.parse(tool.schema) : {},
|
125
|
+
}));
|
126
|
+
}
|
127
|
+
const stream = await anthropicClient.messages.create(streamConfig);
|
128
|
+
for await (const chunk of stream) {
|
129
|
+
if (chunk.type === "content_block_start") {
|
130
|
+
if (chunk.content_block.type === "tool_use") {
|
131
|
+
const toolCall = {
|
132
|
+
id: chunk.content_block.id,
|
133
|
+
name: chunk.content_block.name,
|
134
|
+
arguments: "",
|
135
|
+
};
|
136
|
+
toolCalls.push(toolCall);
|
137
|
+
onEvent({
|
138
|
+
type: "tool_call_start",
|
139
|
+
toolCall: {
|
140
|
+
id: toolCall.id,
|
141
|
+
name: toolCall.name,
|
142
|
+
},
|
143
|
+
});
|
144
|
+
}
|
145
|
+
}
|
146
|
+
else if (chunk.type === "content_block_delta") {
|
147
|
+
if (chunk.delta.type === "text_delta") {
|
148
|
+
fullMessage += chunk.delta.text;
|
149
|
+
onEvent({
|
150
|
+
type: "token",
|
151
|
+
token: chunk.delta.text,
|
152
|
+
});
|
153
|
+
}
|
154
|
+
else if (chunk.delta.type === "input_json_delta") {
|
155
|
+
// Find the current tool call and append arguments
|
156
|
+
const currentTool = toolCalls[toolCalls.length - 1];
|
157
|
+
if (currentTool) {
|
158
|
+
currentTool.arguments += chunk.delta.partial_json;
|
159
|
+
onEvent({
|
160
|
+
type: "tool_call_delta",
|
161
|
+
toolCallId: currentTool.id,
|
162
|
+
argumentDelta: chunk.delta.partial_json,
|
163
|
+
});
|
164
|
+
}
|
165
|
+
}
|
166
|
+
}
|
167
|
+
else if (chunk.type === "content_block_stop") {
|
168
|
+
// Tool call complete
|
169
|
+
const currentTool = toolCalls[toolCalls.length - 1];
|
170
|
+
if (currentTool) {
|
171
|
+
onEvent({
|
172
|
+
type: "tool_call_complete",
|
173
|
+
toolCall: {
|
174
|
+
id: currentTool.id,
|
175
|
+
name: currentTool.name,
|
176
|
+
arguments: currentTool.arguments,
|
177
|
+
},
|
178
|
+
});
|
179
|
+
}
|
180
|
+
}
|
181
|
+
}
|
182
|
+
onComplete(fullMessage, toolCalls);
|
183
|
+
}
|
184
|
+
catch (error) {
|
185
|
+
onEvent({
|
186
|
+
type: "error",
|
187
|
+
error: error instanceof Error ? error.message : "Anthropic streaming failed",
|
188
|
+
});
|
189
|
+
throw error;
|
190
|
+
}
|
191
|
+
}
|
192
|
+
/**
|
193
|
+
* Stream with Google SDK
|
194
|
+
*/
|
195
|
+
export async function streamWithGoogle(specification, messages, systemPrompt, tools, googleClient, // Google GenerativeAI client instance
|
196
|
+
onEvent, onComplete) {
|
197
|
+
let fullMessage = "";
|
198
|
+
let toolCalls = [];
|
199
|
+
try {
|
200
|
+
const modelName = getModelName(specification);
|
201
|
+
if (!modelName) {
|
202
|
+
throw new Error(`No model name found for Google specification: ${specification.name}`);
|
203
|
+
}
|
204
|
+
const streamConfig = {
|
205
|
+
model: modelName,
|
206
|
+
messages,
|
207
|
+
stream: true,
|
208
|
+
temperature: specification.google?.temperature,
|
209
|
+
//top_p: specification.google?.probability,
|
210
|
+
max_tokens: specification.google?.completionTokenLimit,
|
211
|
+
};
|
212
|
+
if (systemPrompt) {
|
213
|
+
streamConfig.system = systemPrompt;
|
214
|
+
}
|
215
|
+
// Add tools if provided
|
216
|
+
if (tools && tools.length > 0) {
|
217
|
+
streamConfig.tools = tools.map((tool) => ({
|
218
|
+
name: tool.name,
|
219
|
+
description: tool.description,
|
220
|
+
input_schema: tool.schema ? JSON.parse(tool.schema) : {},
|
221
|
+
}));
|
222
|
+
}
|
223
|
+
// Configure tools for Google - expects a single array of function declarations
|
224
|
+
const googleTools = tools && tools.length > 0 ? [{
|
225
|
+
functionDeclarations: tools.map((tool) => ({
|
226
|
+
name: tool.name,
|
227
|
+
description: tool.description,
|
228
|
+
parameters: tool.schema ? JSON.parse(tool.schema) : {},
|
229
|
+
})),
|
230
|
+
}] : undefined;
|
231
|
+
const model = googleClient.getGenerativeModel({
|
232
|
+
model: modelName,
|
233
|
+
generationConfig: {
|
234
|
+
temperature: streamConfig.temperature ?? 0.1,
|
235
|
+
maxOutputTokens: streamConfig.max_tokens ?? 4096,
|
236
|
+
},
|
237
|
+
tools: googleTools,
|
238
|
+
});
|
239
|
+
// Convert messages to Google chat format
|
240
|
+
const history = messages.slice(0, -1); // All but last message
|
241
|
+
const prompt = messages[messages.length - 1]?.parts[0]?.text || "";
|
242
|
+
const chat = model.startChat({ history });
|
243
|
+
const result = await chat.sendMessageStream(prompt);
|
244
|
+
for await (const chunk of result.stream) {
|
245
|
+
const text = chunk.text();
|
246
|
+
if (text) {
|
247
|
+
fullMessage += text;
|
248
|
+
onEvent({
|
249
|
+
type: "token",
|
250
|
+
token: text,
|
251
|
+
});
|
252
|
+
}
|
253
|
+
// Google streams function calls as part of the candidates
|
254
|
+
// Check if this chunk contains function calls
|
255
|
+
try {
|
256
|
+
const candidate = chunk.candidates?.[0];
|
257
|
+
if (candidate?.content?.parts) {
|
258
|
+
for (const part of candidate.content.parts) {
|
259
|
+
if (part.functionCall) {
|
260
|
+
const toolCall = {
|
261
|
+
id: `google_tool_${Date.now()}_${toolCalls.length}`,
|
262
|
+
name: part.functionCall.name,
|
263
|
+
arguments: JSON.stringify(part.functionCall.args || {}),
|
264
|
+
};
|
265
|
+
toolCalls.push(toolCall);
|
266
|
+
// Emit tool call events
|
267
|
+
onEvent({
|
268
|
+
type: "tool_call_start",
|
269
|
+
toolCall: {
|
270
|
+
id: toolCall.id,
|
271
|
+
name: toolCall.name,
|
272
|
+
},
|
273
|
+
});
|
274
|
+
onEvent({
|
275
|
+
type: "tool_call_delta",
|
276
|
+
toolCallId: toolCall.id,
|
277
|
+
argumentDelta: toolCall.arguments,
|
278
|
+
});
|
279
|
+
onEvent({
|
280
|
+
type: "tool_call_complete",
|
281
|
+
toolCall: {
|
282
|
+
id: toolCall.id,
|
283
|
+
name: toolCall.name,
|
284
|
+
arguments: toolCall.arguments,
|
285
|
+
},
|
286
|
+
});
|
287
|
+
}
|
288
|
+
}
|
289
|
+
}
|
290
|
+
}
|
291
|
+
catch (error) {
|
292
|
+
// Silently ignore parsing errors
|
293
|
+
}
|
294
|
+
}
|
295
|
+
// Google might also return function calls or additional text in the final response
|
296
|
+
try {
|
297
|
+
const response = await result.response;
|
298
|
+
const candidate = response.candidates?.[0];
|
299
|
+
if (candidate?.content?.parts) {
|
300
|
+
for (const part of candidate.content.parts) {
|
301
|
+
// Check for any final text we might have missed
|
302
|
+
if (part.text) {
|
303
|
+
const finalText = part.text;
|
304
|
+
// Only add if it's not already included in fullMessage
|
305
|
+
if (!fullMessage.endsWith(finalText)) {
|
306
|
+
fullMessage += finalText;
|
307
|
+
onEvent({
|
308
|
+
type: "token",
|
309
|
+
token: finalText,
|
310
|
+
});
|
311
|
+
}
|
312
|
+
}
|
313
|
+
// Check for function calls
|
314
|
+
if (part.functionCall && !toolCalls.some(tc => tc.name === part.functionCall.name)) {
|
315
|
+
const toolCall = {
|
316
|
+
id: `google_tool_${Date.now()}_${toolCalls.length}`,
|
317
|
+
name: part.functionCall.name,
|
318
|
+
arguments: JSON.stringify(part.functionCall.args || {}),
|
319
|
+
};
|
320
|
+
toolCalls.push(toolCall);
|
321
|
+
// Emit events for function calls found in final response
|
322
|
+
onEvent({
|
323
|
+
type: "tool_call_start",
|
324
|
+
toolCall: {
|
325
|
+
id: toolCall.id,
|
326
|
+
name: toolCall.name,
|
327
|
+
},
|
328
|
+
});
|
329
|
+
onEvent({
|
330
|
+
type: "tool_call_complete",
|
331
|
+
toolCall: {
|
332
|
+
id: toolCall.id,
|
333
|
+
name: toolCall.name,
|
334
|
+
arguments: toolCall.arguments,
|
335
|
+
},
|
336
|
+
});
|
337
|
+
}
|
338
|
+
}
|
339
|
+
}
|
340
|
+
}
|
341
|
+
catch (error) {
|
342
|
+
// Silently ignore parsing errors
|
343
|
+
}
|
344
|
+
onComplete(fullMessage, toolCalls);
|
345
|
+
}
|
346
|
+
catch (error) {
|
347
|
+
onEvent({
|
348
|
+
type: "error",
|
349
|
+
error: error instanceof Error ? error.message : "Google streaming failed",
|
350
|
+
});
|
351
|
+
throw error;
|
352
|
+
}
|
353
|
+
}
|
@@ -0,0 +1,52 @@
|
|
1
|
+
import { AgentStreamEvent } from "../types/ui-events.js";
|
2
|
+
import { StreamEvent } from "../client.js";
|
3
|
+
import { ChunkingStrategy } from "./chunk-buffer.js";
|
4
|
+
/**
|
5
|
+
* Adapter that transforms low-level streaming events into high-level UI events
|
6
|
+
* using GraphQL types for type safety
|
7
|
+
*/
|
8
|
+
export declare class UIEventAdapter {
|
9
|
+
private onEvent;
|
10
|
+
private conversationId;
|
11
|
+
private model?;
|
12
|
+
private currentMessage;
|
13
|
+
private isStreaming;
|
14
|
+
private activeToolCalls;
|
15
|
+
private lastUpdateTime;
|
16
|
+
private updateTimer?;
|
17
|
+
private showTokenStream;
|
18
|
+
private chunkBuffer?;
|
19
|
+
private smoothingDelay;
|
20
|
+
private chunkQueue;
|
21
|
+
constructor(onEvent: (event: AgentStreamEvent) => void, conversationId: string, options?: {
|
22
|
+
showTokenStream?: boolean;
|
23
|
+
smoothingEnabled?: boolean;
|
24
|
+
chunkingStrategy?: ChunkingStrategy;
|
25
|
+
smoothingDelay?: number;
|
26
|
+
});
|
27
|
+
/**
|
28
|
+
* Process a raw streaming event and emit appropriate UI events
|
29
|
+
*/
|
30
|
+
handleEvent(event: StreamEvent): void;
|
31
|
+
/**
|
32
|
+
* Set tool execution result directly (for tool handlers)
|
33
|
+
*/
|
34
|
+
setToolResult(toolCallId: string, result: unknown, error?: string): void;
|
35
|
+
private handleStart;
|
36
|
+
private handleToken;
|
37
|
+
private handleMessage;
|
38
|
+
private handleToolCallStart;
|
39
|
+
private handleToolCallDelta;
|
40
|
+
private handleToolCallComplete;
|
41
|
+
private handleComplete;
|
42
|
+
private handleError;
|
43
|
+
private scheduleMessageUpdate;
|
44
|
+
private scheduleChunkEmission;
|
45
|
+
private emitNextChunk;
|
46
|
+
private emitMessageUpdate;
|
47
|
+
private emitUIEvent;
|
48
|
+
/**
|
49
|
+
* Clean up any pending timers
|
50
|
+
*/
|
51
|
+
dispose(): void;
|
52
|
+
}
|