@nocobase/plugin-ai 2.1.0-beta.25 → 2.1.0-beta.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client/{559.c119db3f985a695f.js → 559.39872901b9053629.js} +1 -1
- package/dist/client/index.js +3 -3
- package/dist/client/llm-providers/xai/ModelSettings.d.ts +10 -0
- package/dist/client/llm-providers/xai/index.d.ts +10 -0
- package/dist/externalVersion.js +15 -15
- package/dist/node_modules/@langchain/xai/LICENSE +21 -0
- package/dist/node_modules/@langchain/xai/dist/_virtual/rolldown_runtime.cjs +25 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/completions.cjs +568 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/completions.d.cts +619 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/completions.d.ts +619 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/completions.js +566 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/index.cjs +2 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/index.d.ts +3 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/index.js +2 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/responses-types.d.cts +1178 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/responses-types.d.ts +1178 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/responses.cjs +233 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/responses.d.cts +70 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/responses.d.ts +70 -0
- package/dist/node_modules/@langchain/xai/dist/chat_models/responses.js +232 -0
- package/dist/node_modules/@langchain/xai/dist/converters/responses.cjs +168 -0
- package/dist/node_modules/@langchain/xai/dist/converters/responses.js +164 -0
- package/dist/node_modules/@langchain/xai/dist/index.cjs +7 -0
- package/dist/node_modules/@langchain/xai/dist/index.d.cts +5 -0
- package/dist/node_modules/@langchain/xai/dist/index.d.ts +6 -0
- package/dist/node_modules/@langchain/xai/dist/index.js +6 -0
- package/dist/node_modules/@langchain/xai/dist/live_search.cjs +54 -0
- package/dist/node_modules/@langchain/xai/dist/live_search.d.cts +145 -0
- package/dist/node_modules/@langchain/xai/dist/live_search.d.ts +145 -0
- package/dist/node_modules/@langchain/xai/dist/live_search.js +51 -0
- package/dist/node_modules/@langchain/xai/dist/profiles.cjs +289 -0
- package/dist/node_modules/@langchain/xai/dist/profiles.js +288 -0
- package/dist/node_modules/@langchain/xai/dist/tools/code_execution.cjs +52 -0
- package/dist/node_modules/@langchain/xai/dist/tools/code_execution.d.cts +64 -0
- package/dist/node_modules/@langchain/xai/dist/tools/code_execution.d.ts +64 -0
- package/dist/node_modules/@langchain/xai/dist/tools/code_execution.js +50 -0
- package/dist/node_modules/@langchain/xai/dist/tools/collections_search.cjs +60 -0
- package/dist/node_modules/@langchain/xai/dist/tools/collections_search.d.cts +90 -0
- package/dist/node_modules/@langchain/xai/dist/tools/collections_search.d.ts +90 -0
- package/dist/node_modules/@langchain/xai/dist/tools/collections_search.js +58 -0
- package/dist/node_modules/@langchain/xai/dist/tools/index.cjs +18 -0
- package/dist/node_modules/@langchain/xai/dist/tools/index.d.cts +18 -0
- package/dist/node_modules/@langchain/xai/dist/tools/index.d.ts +18 -0
- package/dist/node_modules/@langchain/xai/dist/tools/index.js +18 -0
- package/dist/node_modules/@langchain/xai/dist/tools/live_search.cjs +94 -0
- package/dist/node_modules/@langchain/xai/dist/tools/live_search.d.cts +149 -0
- package/dist/node_modules/@langchain/xai/dist/tools/live_search.d.ts +149 -0
- package/dist/node_modules/@langchain/xai/dist/tools/live_search.js +91 -0
- package/dist/node_modules/@langchain/xai/dist/tools/web_search.cjs +57 -0
- package/dist/node_modules/@langchain/xai/dist/tools/web_search.d.cts +104 -0
- package/dist/node_modules/@langchain/xai/dist/tools/web_search.d.ts +104 -0
- package/dist/node_modules/@langchain/xai/dist/tools/web_search.js +55 -0
- package/dist/node_modules/@langchain/xai/dist/tools/x_search.cjs +63 -0
- package/dist/node_modules/@langchain/xai/dist/tools/x_search.d.cts +145 -0
- package/dist/node_modules/@langchain/xai/dist/tools/x_search.d.ts +145 -0
- package/dist/node_modules/@langchain/xai/dist/tools/x_search.js +61 -0
- package/dist/node_modules/@langchain/xai/package.json +1 -0
- package/dist/node_modules/fast-glob/package.json +1 -1
- package/dist/node_modules/flexsearch/package.json +1 -1
- package/dist/node_modules/fs-extra/package.json +1 -1
- package/dist/node_modules/jsonrepair/package.json +1 -1
- package/dist/node_modules/nodejs-snowflake/package.json +1 -1
- package/dist/node_modules/openai/package.json +1 -1
- package/dist/node_modules/zod/package.json +1 -1
- package/dist/server/ai-employees/ai-employee.js +11 -7
- package/dist/server/llm-providers/xai.d.ts +17 -0
- package/dist/server/llm-providers/xai.js +88 -0
- package/dist/server/plugin.js +3 -0
- package/dist/server/workflow/nodes/employee/files.js +7 -4
- package/dist/server/workflow/nodes/employee/index.js +4 -3
- package/dist/server/workflow/nodes/employee/types.d.ts +1 -1
- package/package.json +3 -2
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
|
|
2
|
+
const require_responses = require('../converters/responses.cjs');
|
|
3
|
+
const __langchain_core_utils_env = require_rolldown_runtime.__toESM(require("@langchain/core/utils/env"));
|
|
4
|
+
const __langchain_core_language_models_chat_models = require_rolldown_runtime.__toESM(require("@langchain/core/language_models/chat_models"));
|
|
5
|
+
|
|
6
|
+
//#region src/chat_models/responses.ts
|
|
7
|
+
/**
|
|
8
|
+
* xAI Responses API chat model integration.
|
|
9
|
+
*
|
|
10
|
+
* This class provides access to xAI's Responses API, which offers enhanced
|
|
11
|
+
* capabilities including built-in tools, reasoning, and search.
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* ```typescript
|
|
15
|
+
* import { ChatXAIResponses } from "@langchain/xai";
|
|
16
|
+
*
|
|
17
|
+
* const llm = new ChatXAIResponses({
|
|
18
|
+
* model: "grok-3",
|
|
19
|
+
* temperature: 0.7,
|
|
20
|
+
* });
|
|
21
|
+
*
|
|
22
|
+
* const result = await llm.invoke("What is the capital of France?");
|
|
23
|
+
* console.log(result.content);
|
|
24
|
+
* ```
|
|
25
|
+
*/
|
|
26
|
+
var ChatXAIResponses = class extends __langchain_core_language_models_chat_models.BaseChatModel {
|
|
27
|
+
static lc_name() {
|
|
28
|
+
return "ChatXAIResponses";
|
|
29
|
+
}
|
|
30
|
+
lc_serializable = true;
|
|
31
|
+
lc_namespace = [
|
|
32
|
+
"langchain",
|
|
33
|
+
"chat_models",
|
|
34
|
+
"xai"
|
|
35
|
+
];
|
|
36
|
+
get lc_secrets() {
|
|
37
|
+
return { apiKey: "XAI_API_KEY" };
|
|
38
|
+
}
|
|
39
|
+
get lc_aliases() {
|
|
40
|
+
return { apiKey: "xai_api_key" };
|
|
41
|
+
}
|
|
42
|
+
apiKey;
|
|
43
|
+
model;
|
|
44
|
+
streaming;
|
|
45
|
+
temperature;
|
|
46
|
+
topP;
|
|
47
|
+
maxOutputTokens;
|
|
48
|
+
store;
|
|
49
|
+
user;
|
|
50
|
+
baseURL;
|
|
51
|
+
searchParameters;
|
|
52
|
+
reasoning;
|
|
53
|
+
tools;
|
|
54
|
+
constructor(fields) {
|
|
55
|
+
super(fields ?? {});
|
|
56
|
+
const apiKey = fields?.apiKey ?? (0, __langchain_core_utils_env.getEnvironmentVariable)("XAI_API_KEY");
|
|
57
|
+
if (!apiKey) throw new Error(`xAI API key not found. Please set the XAI_API_KEY environment variable or provide the key in the "apiKey" field.`);
|
|
58
|
+
this.apiKey = apiKey;
|
|
59
|
+
this.model = fields?.model ?? "grok-3";
|
|
60
|
+
this.streaming = fields?.streaming ?? false;
|
|
61
|
+
this.temperature = fields?.temperature;
|
|
62
|
+
this.topP = fields?.topP;
|
|
63
|
+
this.maxOutputTokens = fields?.maxOutputTokens;
|
|
64
|
+
this.store = fields?.store;
|
|
65
|
+
this.user = fields?.user;
|
|
66
|
+
this.baseURL = fields?.baseURL ?? "https://api.x.ai/v1";
|
|
67
|
+
this.searchParameters = fields?.searchParameters;
|
|
68
|
+
this.reasoning = fields?.reasoning;
|
|
69
|
+
this.tools = fields?.tools;
|
|
70
|
+
}
|
|
71
|
+
_llmType() {
|
|
72
|
+
return "xai-responses";
|
|
73
|
+
}
|
|
74
|
+
getLsParams(options) {
|
|
75
|
+
const params = super.getLsParams(options);
|
|
76
|
+
params.ls_provider = "xai";
|
|
77
|
+
params.ls_model_name = this.model;
|
|
78
|
+
params.ls_model_type = "chat";
|
|
79
|
+
params.ls_temperature = this.temperature;
|
|
80
|
+
params.ls_max_tokens = this.maxOutputTokens;
|
|
81
|
+
return params;
|
|
82
|
+
}
|
|
83
|
+
toJSON() {
|
|
84
|
+
const result = super.toJSON();
|
|
85
|
+
if ("kwargs" in result && typeof result.kwargs === "object" && result.kwargs != null) delete result.kwargs.apiKey;
|
|
86
|
+
return result;
|
|
87
|
+
}
|
|
88
|
+
invocationParams(options) {
|
|
89
|
+
return {
|
|
90
|
+
model: this.model,
|
|
91
|
+
temperature: this.temperature,
|
|
92
|
+
top_p: this.topP,
|
|
93
|
+
max_output_tokens: this.maxOutputTokens,
|
|
94
|
+
store: this.store,
|
|
95
|
+
user: this.user,
|
|
96
|
+
stream: this.streaming,
|
|
97
|
+
previous_response_id: options?.previous_response_id,
|
|
98
|
+
include: options?.include,
|
|
99
|
+
text: options?.text,
|
|
100
|
+
search_parameters: options?.search_parameters ?? this.searchParameters,
|
|
101
|
+
reasoning: options?.reasoning ?? this.reasoning,
|
|
102
|
+
tools: options?.tools ?? this.tools,
|
|
103
|
+
tool_choice: options?.tool_choice,
|
|
104
|
+
parallel_tool_calls: options?.parallel_tool_calls
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
async _makeRequest(request) {
|
|
108
|
+
const url = `${this.baseURL}/responses`;
|
|
109
|
+
const headers = {
|
|
110
|
+
"Content-Type": "application/json",
|
|
111
|
+
Authorization: `Bearer ${this.apiKey}`
|
|
112
|
+
};
|
|
113
|
+
if (request.stream) return this._makeStreamingRequest(url, headers, request);
|
|
114
|
+
const response = await this.caller.call(async () => {
|
|
115
|
+
const res = await fetch(url, {
|
|
116
|
+
method: "POST",
|
|
117
|
+
headers,
|
|
118
|
+
body: JSON.stringify(request)
|
|
119
|
+
});
|
|
120
|
+
if (!res.ok) {
|
|
121
|
+
const errorBody = await res.text();
|
|
122
|
+
throw new Error(`xAI API error: ${res.status} ${res.statusText} - ${errorBody}`);
|
|
123
|
+
}
|
|
124
|
+
return res.json();
|
|
125
|
+
});
|
|
126
|
+
return response;
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Makes a streaming request to the xAI Responses API.
|
|
130
|
+
*/
|
|
131
|
+
async *_makeStreamingRequest(url, headers, request) {
|
|
132
|
+
const response = await this.caller.call(async () => {
|
|
133
|
+
const res = await fetch(url, {
|
|
134
|
+
method: "POST",
|
|
135
|
+
headers,
|
|
136
|
+
body: JSON.stringify(request)
|
|
137
|
+
});
|
|
138
|
+
if (!res.ok) {
|
|
139
|
+
const errorBody = await res.text();
|
|
140
|
+
throw new Error(`xAI API error: ${res.status} ${res.statusText} - ${errorBody}`);
|
|
141
|
+
}
|
|
142
|
+
return res;
|
|
143
|
+
});
|
|
144
|
+
const reader = response.body?.getReader();
|
|
145
|
+
if (!reader) throw new Error("No response body");
|
|
146
|
+
const decoder = new TextDecoder();
|
|
147
|
+
let buffer = "";
|
|
148
|
+
try {
|
|
149
|
+
while (true) {
|
|
150
|
+
const { done, value } = await reader.read();
|
|
151
|
+
if (done) break;
|
|
152
|
+
buffer += decoder.decode(value, { stream: true });
|
|
153
|
+
const lines = buffer.split("\n");
|
|
154
|
+
buffer = lines.pop() ?? "";
|
|
155
|
+
for (const line of lines) {
|
|
156
|
+
const trimmed = line.trim();
|
|
157
|
+
if (!trimmed || trimmed === "data: [DONE]") continue;
|
|
158
|
+
if (trimmed.startsWith("data: ")) try {
|
|
159
|
+
const data = JSON.parse(trimmed.slice(6));
|
|
160
|
+
yield data;
|
|
161
|
+
} catch {}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
} finally {
|
|
165
|
+
reader.releaseLock();
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
async _generate(messages, options, runManager) {
|
|
169
|
+
options.signal?.throwIfAborted();
|
|
170
|
+
const invocationParams = this.invocationParams(options);
|
|
171
|
+
const input = require_responses.convertMessagesToResponsesInput(messages);
|
|
172
|
+
if (invocationParams.stream) {
|
|
173
|
+
const stream = this._streamResponseChunks(messages, options, runManager);
|
|
174
|
+
let finalChunk;
|
|
175
|
+
for await (const chunk of stream) {
|
|
176
|
+
chunk.message.response_metadata = {
|
|
177
|
+
...chunk.generationInfo,
|
|
178
|
+
...chunk.message.response_metadata
|
|
179
|
+
};
|
|
180
|
+
finalChunk = finalChunk?.concat(chunk) ?? chunk;
|
|
181
|
+
}
|
|
182
|
+
return {
|
|
183
|
+
generations: finalChunk ? [finalChunk] : [],
|
|
184
|
+
llmOutput: { estimatedTokenUsage: (finalChunk?.message)?.usage_metadata }
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
const response = await this._makeRequest({
|
|
188
|
+
input,
|
|
189
|
+
...invocationParams,
|
|
190
|
+
stream: false
|
|
191
|
+
});
|
|
192
|
+
const aiMessage = require_responses.convertResponseToAIMessage(response);
|
|
193
|
+
const text = require_responses.extractTextFromOutput(response.output);
|
|
194
|
+
return {
|
|
195
|
+
generations: [{
|
|
196
|
+
text,
|
|
197
|
+
message: aiMessage
|
|
198
|
+
}],
|
|
199
|
+
llmOutput: {
|
|
200
|
+
id: response.id,
|
|
201
|
+
estimatedTokenUsage: response.usage ? {
|
|
202
|
+
promptTokens: response.usage.input_tokens,
|
|
203
|
+
completionTokens: response.usage.output_tokens,
|
|
204
|
+
totalTokens: response.usage.total_tokens
|
|
205
|
+
} : void 0
|
|
206
|
+
}
|
|
207
|
+
};
|
|
208
|
+
}
|
|
209
|
+
async *_streamResponseChunks(messages, options, runManager) {
|
|
210
|
+
const invocationParams = this.invocationParams(options);
|
|
211
|
+
const input = require_responses.convertMessagesToResponsesInput(messages);
|
|
212
|
+
const streamIterable = await this._makeRequest({
|
|
213
|
+
input,
|
|
214
|
+
...invocationParams,
|
|
215
|
+
stream: true
|
|
216
|
+
});
|
|
217
|
+
for await (const event of streamIterable) {
|
|
218
|
+
if (options.signal?.aborted) return;
|
|
219
|
+
const chunk = require_responses.convertStreamEventToChunk(event);
|
|
220
|
+
if (chunk) {
|
|
221
|
+
yield chunk;
|
|
222
|
+
await runManager?.handleLLMNewToken(chunk.text || "", {
|
|
223
|
+
prompt: 0,
|
|
224
|
+
completion: 0
|
|
225
|
+
});
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
};
|
|
230
|
+
|
|
231
|
+
//#endregion
|
|
232
|
+
exports.ChatXAIResponses = ChatXAIResponses;
|
|
233
|
+
//# sourceMappingURL=responses.cjs.map
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import { ChatXAIResponsesCallOptions, ChatXAIResponsesInput, ChatXAIResponsesInvocationParams, XAIResponse, XAIResponsesCreateParams, XAIResponsesCreateParamsNonStreaming, XAIResponsesCreateParamsStreaming, XAIResponsesReasoning, XAIResponsesSearchParameters, XAIResponsesStreamEvent, XAIResponsesTool } from "./responses-types.cjs";
|
|
2
|
+
import { BaseChatModel, LangSmithParams } from "@langchain/core/language_models/chat_models";
|
|
3
|
+
import { Serialized } from "@langchain/core/load/serializable";
|
|
4
|
+
import { BaseMessage } from "@langchain/core/messages";
|
|
5
|
+
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
|
|
6
|
+
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
|
|
7
|
+
|
|
8
|
+
//#region src/chat_models/responses.d.ts
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* xAI Responses API chat model integration.
|
|
12
|
+
*
|
|
13
|
+
* This class provides access to xAI's Responses API, which offers enhanced
|
|
14
|
+
* capabilities including built-in tools, reasoning, and search.
|
|
15
|
+
*
|
|
16
|
+
* @example
|
|
17
|
+
* ```typescript
|
|
18
|
+
* import { ChatXAIResponses } from "@langchain/xai";
|
|
19
|
+
*
|
|
20
|
+
* const llm = new ChatXAIResponses({
|
|
21
|
+
* model: "grok-3",
|
|
22
|
+
* temperature: 0.7,
|
|
23
|
+
* });
|
|
24
|
+
*
|
|
25
|
+
* const result = await llm.invoke("What is the capital of France?");
|
|
26
|
+
* console.log(result.content);
|
|
27
|
+
* ```
|
|
28
|
+
*/
|
|
29
|
+
declare class ChatXAIResponses<CallOptions extends ChatXAIResponsesCallOptions = ChatXAIResponsesCallOptions> extends BaseChatModel<CallOptions> {
|
|
30
|
+
static lc_name(): string;
|
|
31
|
+
lc_serializable: boolean;
|
|
32
|
+
lc_namespace: string[];
|
|
33
|
+
get lc_secrets(): {
|
|
34
|
+
[key: string]: string;
|
|
35
|
+
} | undefined;
|
|
36
|
+
get lc_aliases(): {
|
|
37
|
+
[key: string]: string;
|
|
38
|
+
} | undefined;
|
|
39
|
+
apiKey: string;
|
|
40
|
+
model: string;
|
|
41
|
+
streaming: boolean;
|
|
42
|
+
temperature?: number;
|
|
43
|
+
topP?: number;
|
|
44
|
+
maxOutputTokens?: number;
|
|
45
|
+
store?: boolean;
|
|
46
|
+
user?: string;
|
|
47
|
+
baseURL: string;
|
|
48
|
+
searchParameters?: XAIResponsesSearchParameters;
|
|
49
|
+
reasoning?: XAIResponsesReasoning;
|
|
50
|
+
tools?: XAIResponsesTool[];
|
|
51
|
+
constructor(fields?: ChatXAIResponsesInput);
|
|
52
|
+
_llmType(): string;
|
|
53
|
+
getLsParams(options: this["ParsedCallOptions"]): LangSmithParams;
|
|
54
|
+
toJSON(): Serialized;
|
|
55
|
+
invocationParams(options?: this["ParsedCallOptions"]): ChatXAIResponsesInvocationParams;
|
|
56
|
+
/**
|
|
57
|
+
* Makes a request to the xAI Responses API.
|
|
58
|
+
*/
|
|
59
|
+
protected _makeRequest(request: XAIResponsesCreateParamsNonStreaming): Promise<XAIResponse>;
|
|
60
|
+
protected _makeRequest(request: XAIResponsesCreateParamsStreaming): Promise<AsyncIterable<XAIResponsesStreamEvent>>;
|
|
61
|
+
/**
|
|
62
|
+
* Makes a streaming request to the xAI Responses API.
|
|
63
|
+
*/
|
|
64
|
+
protected _makeStreamingRequest(url: string, headers: Record<string, string>, request: XAIResponsesCreateParams): AsyncIterable<XAIResponsesStreamEvent>;
|
|
65
|
+
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
66
|
+
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
67
|
+
}
|
|
68
|
+
//#endregion
|
|
69
|
+
export { ChatXAIResponses };
|
|
70
|
+
//# sourceMappingURL=responses.d.cts.map
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import { ChatXAIResponsesCallOptions, ChatXAIResponsesInput, ChatXAIResponsesInvocationParams, XAIResponse, XAIResponsesCreateParams, XAIResponsesCreateParamsNonStreaming, XAIResponsesCreateParamsStreaming, XAIResponsesReasoning, XAIResponsesSearchParameters, XAIResponsesStreamEvent, XAIResponsesTool } from "./responses-types.js";
|
|
2
|
+
import { BaseChatModel, LangSmithParams } from "@langchain/core/language_models/chat_models";
|
|
3
|
+
import { BaseMessage } from "@langchain/core/messages";
|
|
4
|
+
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
|
|
5
|
+
import { Serialized } from "@langchain/core/load/serializable";
|
|
6
|
+
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
|
|
7
|
+
|
|
8
|
+
//#region src/chat_models/responses.d.ts
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* xAI Responses API chat model integration.
|
|
12
|
+
*
|
|
13
|
+
* This class provides access to xAI's Responses API, which offers enhanced
|
|
14
|
+
* capabilities including built-in tools, reasoning, and search.
|
|
15
|
+
*
|
|
16
|
+
* @example
|
|
17
|
+
* ```typescript
|
|
18
|
+
* import { ChatXAIResponses } from "@langchain/xai";
|
|
19
|
+
*
|
|
20
|
+
* const llm = new ChatXAIResponses({
|
|
21
|
+
* model: "grok-3",
|
|
22
|
+
* temperature: 0.7,
|
|
23
|
+
* });
|
|
24
|
+
*
|
|
25
|
+
* const result = await llm.invoke("What is the capital of France?");
|
|
26
|
+
* console.log(result.content);
|
|
27
|
+
* ```
|
|
28
|
+
*/
|
|
29
|
+
declare class ChatXAIResponses<CallOptions extends ChatXAIResponsesCallOptions = ChatXAIResponsesCallOptions> extends BaseChatModel<CallOptions> {
|
|
30
|
+
static lc_name(): string;
|
|
31
|
+
lc_serializable: boolean;
|
|
32
|
+
lc_namespace: string[];
|
|
33
|
+
get lc_secrets(): {
|
|
34
|
+
[key: string]: string;
|
|
35
|
+
} | undefined;
|
|
36
|
+
get lc_aliases(): {
|
|
37
|
+
[key: string]: string;
|
|
38
|
+
} | undefined;
|
|
39
|
+
apiKey: string;
|
|
40
|
+
model: string;
|
|
41
|
+
streaming: boolean;
|
|
42
|
+
temperature?: number;
|
|
43
|
+
topP?: number;
|
|
44
|
+
maxOutputTokens?: number;
|
|
45
|
+
store?: boolean;
|
|
46
|
+
user?: string;
|
|
47
|
+
baseURL: string;
|
|
48
|
+
searchParameters?: XAIResponsesSearchParameters;
|
|
49
|
+
reasoning?: XAIResponsesReasoning;
|
|
50
|
+
tools?: XAIResponsesTool[];
|
|
51
|
+
constructor(fields?: ChatXAIResponsesInput);
|
|
52
|
+
_llmType(): string;
|
|
53
|
+
getLsParams(options: this["ParsedCallOptions"]): LangSmithParams;
|
|
54
|
+
toJSON(): Serialized;
|
|
55
|
+
invocationParams(options?: this["ParsedCallOptions"]): ChatXAIResponsesInvocationParams;
|
|
56
|
+
/**
|
|
57
|
+
* Makes a request to the xAI Responses API.
|
|
58
|
+
*/
|
|
59
|
+
protected _makeRequest(request: XAIResponsesCreateParamsNonStreaming): Promise<XAIResponse>;
|
|
60
|
+
protected _makeRequest(request: XAIResponsesCreateParamsStreaming): Promise<AsyncIterable<XAIResponsesStreamEvent>>;
|
|
61
|
+
/**
|
|
62
|
+
* Makes a streaming request to the xAI Responses API.
|
|
63
|
+
*/
|
|
64
|
+
protected _makeStreamingRequest(url: string, headers: Record<string, string>, request: XAIResponsesCreateParams): AsyncIterable<XAIResponsesStreamEvent>;
|
|
65
|
+
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
66
|
+
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
67
|
+
}
|
|
68
|
+
//#endregion
|
|
69
|
+
export { ChatXAIResponses };
|
|
70
|
+
//# sourceMappingURL=responses.d.ts.map
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
import { convertMessagesToResponsesInput, convertResponseToAIMessage, convertStreamEventToChunk, extractTextFromOutput } from "../converters/responses.js";
|
|
2
|
+
import { getEnvironmentVariable } from "@langchain/core/utils/env";
|
|
3
|
+
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
4
|
+
|
|
5
|
+
//#region src/chat_models/responses.ts
|
|
6
|
+
/**
|
|
7
|
+
* xAI Responses API chat model integration.
|
|
8
|
+
*
|
|
9
|
+
* This class provides access to xAI's Responses API, which offers enhanced
|
|
10
|
+
* capabilities including built-in tools, reasoning, and search.
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import { ChatXAIResponses } from "@langchain/xai";
|
|
15
|
+
*
|
|
16
|
+
* const llm = new ChatXAIResponses({
|
|
17
|
+
* model: "grok-3",
|
|
18
|
+
* temperature: 0.7,
|
|
19
|
+
* });
|
|
20
|
+
*
|
|
21
|
+
* const result = await llm.invoke("What is the capital of France?");
|
|
22
|
+
* console.log(result.content);
|
|
23
|
+
* ```
|
|
24
|
+
*/
|
|
25
|
+
var ChatXAIResponses = class extends BaseChatModel {
|
|
26
|
+
static lc_name() {
|
|
27
|
+
return "ChatXAIResponses";
|
|
28
|
+
}
|
|
29
|
+
lc_serializable = true;
|
|
30
|
+
lc_namespace = [
|
|
31
|
+
"langchain",
|
|
32
|
+
"chat_models",
|
|
33
|
+
"xai"
|
|
34
|
+
];
|
|
35
|
+
get lc_secrets() {
|
|
36
|
+
return { apiKey: "XAI_API_KEY" };
|
|
37
|
+
}
|
|
38
|
+
get lc_aliases() {
|
|
39
|
+
return { apiKey: "xai_api_key" };
|
|
40
|
+
}
|
|
41
|
+
apiKey;
|
|
42
|
+
model;
|
|
43
|
+
streaming;
|
|
44
|
+
temperature;
|
|
45
|
+
topP;
|
|
46
|
+
maxOutputTokens;
|
|
47
|
+
store;
|
|
48
|
+
user;
|
|
49
|
+
baseURL;
|
|
50
|
+
searchParameters;
|
|
51
|
+
reasoning;
|
|
52
|
+
tools;
|
|
53
|
+
constructor(fields) {
|
|
54
|
+
super(fields ?? {});
|
|
55
|
+
const apiKey = fields?.apiKey ?? getEnvironmentVariable("XAI_API_KEY");
|
|
56
|
+
if (!apiKey) throw new Error(`xAI API key not found. Please set the XAI_API_KEY environment variable or provide the key in the "apiKey" field.`);
|
|
57
|
+
this.apiKey = apiKey;
|
|
58
|
+
this.model = fields?.model ?? "grok-3";
|
|
59
|
+
this.streaming = fields?.streaming ?? false;
|
|
60
|
+
this.temperature = fields?.temperature;
|
|
61
|
+
this.topP = fields?.topP;
|
|
62
|
+
this.maxOutputTokens = fields?.maxOutputTokens;
|
|
63
|
+
this.store = fields?.store;
|
|
64
|
+
this.user = fields?.user;
|
|
65
|
+
this.baseURL = fields?.baseURL ?? "https://api.x.ai/v1";
|
|
66
|
+
this.searchParameters = fields?.searchParameters;
|
|
67
|
+
this.reasoning = fields?.reasoning;
|
|
68
|
+
this.tools = fields?.tools;
|
|
69
|
+
}
|
|
70
|
+
_llmType() {
|
|
71
|
+
return "xai-responses";
|
|
72
|
+
}
|
|
73
|
+
getLsParams(options) {
|
|
74
|
+
const params = super.getLsParams(options);
|
|
75
|
+
params.ls_provider = "xai";
|
|
76
|
+
params.ls_model_name = this.model;
|
|
77
|
+
params.ls_model_type = "chat";
|
|
78
|
+
params.ls_temperature = this.temperature;
|
|
79
|
+
params.ls_max_tokens = this.maxOutputTokens;
|
|
80
|
+
return params;
|
|
81
|
+
}
|
|
82
|
+
toJSON() {
|
|
83
|
+
const result = super.toJSON();
|
|
84
|
+
if ("kwargs" in result && typeof result.kwargs === "object" && result.kwargs != null) delete result.kwargs.apiKey;
|
|
85
|
+
return result;
|
|
86
|
+
}
|
|
87
|
+
invocationParams(options) {
|
|
88
|
+
return {
|
|
89
|
+
model: this.model,
|
|
90
|
+
temperature: this.temperature,
|
|
91
|
+
top_p: this.topP,
|
|
92
|
+
max_output_tokens: this.maxOutputTokens,
|
|
93
|
+
store: this.store,
|
|
94
|
+
user: this.user,
|
|
95
|
+
stream: this.streaming,
|
|
96
|
+
previous_response_id: options?.previous_response_id,
|
|
97
|
+
include: options?.include,
|
|
98
|
+
text: options?.text,
|
|
99
|
+
search_parameters: options?.search_parameters ?? this.searchParameters,
|
|
100
|
+
reasoning: options?.reasoning ?? this.reasoning,
|
|
101
|
+
tools: options?.tools ?? this.tools,
|
|
102
|
+
tool_choice: options?.tool_choice,
|
|
103
|
+
parallel_tool_calls: options?.parallel_tool_calls
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
async _makeRequest(request) {
|
|
107
|
+
const url = `${this.baseURL}/responses`;
|
|
108
|
+
const headers = {
|
|
109
|
+
"Content-Type": "application/json",
|
|
110
|
+
Authorization: `Bearer ${this.apiKey}`
|
|
111
|
+
};
|
|
112
|
+
if (request.stream) return this._makeStreamingRequest(url, headers, request);
|
|
113
|
+
const response = await this.caller.call(async () => {
|
|
114
|
+
const res = await fetch(url, {
|
|
115
|
+
method: "POST",
|
|
116
|
+
headers,
|
|
117
|
+
body: JSON.stringify(request)
|
|
118
|
+
});
|
|
119
|
+
if (!res.ok) {
|
|
120
|
+
const errorBody = await res.text();
|
|
121
|
+
throw new Error(`xAI API error: ${res.status} ${res.statusText} - ${errorBody}`);
|
|
122
|
+
}
|
|
123
|
+
return res.json();
|
|
124
|
+
});
|
|
125
|
+
return response;
|
|
126
|
+
}
|
|
127
|
+
/**
|
|
128
|
+
* Makes a streaming request to the xAI Responses API.
|
|
129
|
+
*/
|
|
130
|
+
async *_makeStreamingRequest(url, headers, request) {
|
|
131
|
+
const response = await this.caller.call(async () => {
|
|
132
|
+
const res = await fetch(url, {
|
|
133
|
+
method: "POST",
|
|
134
|
+
headers,
|
|
135
|
+
body: JSON.stringify(request)
|
|
136
|
+
});
|
|
137
|
+
if (!res.ok) {
|
|
138
|
+
const errorBody = await res.text();
|
|
139
|
+
throw new Error(`xAI API error: ${res.status} ${res.statusText} - ${errorBody}`);
|
|
140
|
+
}
|
|
141
|
+
return res;
|
|
142
|
+
});
|
|
143
|
+
const reader = response.body?.getReader();
|
|
144
|
+
if (!reader) throw new Error("No response body");
|
|
145
|
+
const decoder = new TextDecoder();
|
|
146
|
+
let buffer = "";
|
|
147
|
+
try {
|
|
148
|
+
while (true) {
|
|
149
|
+
const { done, value } = await reader.read();
|
|
150
|
+
if (done) break;
|
|
151
|
+
buffer += decoder.decode(value, { stream: true });
|
|
152
|
+
const lines = buffer.split("\n");
|
|
153
|
+
buffer = lines.pop() ?? "";
|
|
154
|
+
for (const line of lines) {
|
|
155
|
+
const trimmed = line.trim();
|
|
156
|
+
if (!trimmed || trimmed === "data: [DONE]") continue;
|
|
157
|
+
if (trimmed.startsWith("data: ")) try {
|
|
158
|
+
const data = JSON.parse(trimmed.slice(6));
|
|
159
|
+
yield data;
|
|
160
|
+
} catch {}
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
} finally {
|
|
164
|
+
reader.releaseLock();
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
async _generate(messages, options, runManager) {
|
|
168
|
+
options.signal?.throwIfAborted();
|
|
169
|
+
const invocationParams = this.invocationParams(options);
|
|
170
|
+
const input = convertMessagesToResponsesInput(messages);
|
|
171
|
+
if (invocationParams.stream) {
|
|
172
|
+
const stream = this._streamResponseChunks(messages, options, runManager);
|
|
173
|
+
let finalChunk;
|
|
174
|
+
for await (const chunk of stream) {
|
|
175
|
+
chunk.message.response_metadata = {
|
|
176
|
+
...chunk.generationInfo,
|
|
177
|
+
...chunk.message.response_metadata
|
|
178
|
+
};
|
|
179
|
+
finalChunk = finalChunk?.concat(chunk) ?? chunk;
|
|
180
|
+
}
|
|
181
|
+
return {
|
|
182
|
+
generations: finalChunk ? [finalChunk] : [],
|
|
183
|
+
llmOutput: { estimatedTokenUsage: (finalChunk?.message)?.usage_metadata }
|
|
184
|
+
};
|
|
185
|
+
}
|
|
186
|
+
const response = await this._makeRequest({
|
|
187
|
+
input,
|
|
188
|
+
...invocationParams,
|
|
189
|
+
stream: false
|
|
190
|
+
});
|
|
191
|
+
const aiMessage = convertResponseToAIMessage(response);
|
|
192
|
+
const text = extractTextFromOutput(response.output);
|
|
193
|
+
return {
|
|
194
|
+
generations: [{
|
|
195
|
+
text,
|
|
196
|
+
message: aiMessage
|
|
197
|
+
}],
|
|
198
|
+
llmOutput: {
|
|
199
|
+
id: response.id,
|
|
200
|
+
estimatedTokenUsage: response.usage ? {
|
|
201
|
+
promptTokens: response.usage.input_tokens,
|
|
202
|
+
completionTokens: response.usage.output_tokens,
|
|
203
|
+
totalTokens: response.usage.total_tokens
|
|
204
|
+
} : void 0
|
|
205
|
+
}
|
|
206
|
+
};
|
|
207
|
+
}
|
|
208
|
+
async *_streamResponseChunks(messages, options, runManager) {
|
|
209
|
+
const invocationParams = this.invocationParams(options);
|
|
210
|
+
const input = convertMessagesToResponsesInput(messages);
|
|
211
|
+
const streamIterable = await this._makeRequest({
|
|
212
|
+
input,
|
|
213
|
+
...invocationParams,
|
|
214
|
+
stream: true
|
|
215
|
+
});
|
|
216
|
+
for await (const event of streamIterable) {
|
|
217
|
+
if (options.signal?.aborted) return;
|
|
218
|
+
const chunk = convertStreamEventToChunk(event);
|
|
219
|
+
if (chunk) {
|
|
220
|
+
yield chunk;
|
|
221
|
+
await runManager?.handleLLMNewToken(chunk.text || "", {
|
|
222
|
+
prompt: 0,
|
|
223
|
+
completion: 0
|
|
224
|
+
});
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
};
|
|
229
|
+
|
|
230
|
+
//#endregion
|
|
231
|
+
export { ChatXAIResponses };
|
|
232
|
+
//# sourceMappingURL=responses.js.map
|