@nocobase/plugin-ai 2.1.0-beta.24 → 2.1.0-beta.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/dist/ai/ai-employees/dara.js +1 -0
  2. package/dist/ai/tools/sub-agents/shared.js +3 -1
  3. package/dist/client/343.6f36d97dd122c5b6.js +10 -0
  4. package/dist/client/{559.133d286a0a0a1d93.js → 559.39872901b9053629.js} +1 -1
  5. package/dist/client/646.afa699c92cd556f3.js +10 -0
  6. package/dist/client/ai-employees/types.d.ts +2 -0
  7. package/dist/client/components/skill-settings.d.ts +2 -0
  8. package/dist/client/index.js +3 -3
  9. package/dist/client/llm-providers/mimo/ModelSettings.d.ts +10 -0
  10. package/dist/client/llm-providers/mimo/index.d.ts +10 -0
  11. package/dist/client/llm-providers/xai/ModelSettings.d.ts +10 -0
  12. package/dist/client/llm-providers/xai/index.d.ts +10 -0
  13. package/dist/collections/ai-employees.d.ts +7 -0
  14. package/dist/collections/ai-employees.js +13 -0
  15. package/dist/externalVersion.js +15 -15
  16. package/dist/locale/en-US.json +2 -0
  17. package/dist/locale/zh-CN.json +2 -0
  18. package/dist/node_modules/@langchain/xai/LICENSE +21 -0
  19. package/dist/node_modules/@langchain/xai/dist/_virtual/rolldown_runtime.cjs +25 -0
  20. package/dist/node_modules/@langchain/xai/dist/chat_models/completions.cjs +568 -0
  21. package/dist/node_modules/@langchain/xai/dist/chat_models/completions.d.cts +619 -0
  22. package/dist/node_modules/@langchain/xai/dist/chat_models/completions.d.ts +619 -0
  23. package/dist/node_modules/@langchain/xai/dist/chat_models/completions.js +566 -0
  24. package/dist/node_modules/@langchain/xai/dist/chat_models/index.cjs +2 -0
  25. package/dist/node_modules/@langchain/xai/dist/chat_models/index.d.ts +3 -0
  26. package/dist/node_modules/@langchain/xai/dist/chat_models/index.js +2 -0
  27. package/dist/node_modules/@langchain/xai/dist/chat_models/responses-types.d.cts +1178 -0
  28. package/dist/node_modules/@langchain/xai/dist/chat_models/responses-types.d.ts +1178 -0
  29. package/dist/node_modules/@langchain/xai/dist/chat_models/responses.cjs +233 -0
  30. package/dist/node_modules/@langchain/xai/dist/chat_models/responses.d.cts +70 -0
  31. package/dist/node_modules/@langchain/xai/dist/chat_models/responses.d.ts +70 -0
  32. package/dist/node_modules/@langchain/xai/dist/chat_models/responses.js +232 -0
  33. package/dist/node_modules/@langchain/xai/dist/converters/responses.cjs +168 -0
  34. package/dist/node_modules/@langchain/xai/dist/converters/responses.js +164 -0
  35. package/dist/node_modules/@langchain/xai/dist/index.cjs +7 -0
  36. package/dist/node_modules/@langchain/xai/dist/index.d.cts +5 -0
  37. package/dist/node_modules/@langchain/xai/dist/index.d.ts +6 -0
  38. package/dist/node_modules/@langchain/xai/dist/index.js +6 -0
  39. package/dist/node_modules/@langchain/xai/dist/live_search.cjs +54 -0
  40. package/dist/node_modules/@langchain/xai/dist/live_search.d.cts +145 -0
  41. package/dist/node_modules/@langchain/xai/dist/live_search.d.ts +145 -0
  42. package/dist/node_modules/@langchain/xai/dist/live_search.js +51 -0
  43. package/dist/node_modules/@langchain/xai/dist/profiles.cjs +289 -0
  44. package/dist/node_modules/@langchain/xai/dist/profiles.js +288 -0
  45. package/dist/node_modules/@langchain/xai/dist/tools/code_execution.cjs +52 -0
  46. package/dist/node_modules/@langchain/xai/dist/tools/code_execution.d.cts +64 -0
  47. package/dist/node_modules/@langchain/xai/dist/tools/code_execution.d.ts +64 -0
  48. package/dist/node_modules/@langchain/xai/dist/tools/code_execution.js +50 -0
  49. package/dist/node_modules/@langchain/xai/dist/tools/collections_search.cjs +60 -0
  50. package/dist/node_modules/@langchain/xai/dist/tools/collections_search.d.cts +90 -0
  51. package/dist/node_modules/@langchain/xai/dist/tools/collections_search.d.ts +90 -0
  52. package/dist/node_modules/@langchain/xai/dist/tools/collections_search.js +58 -0
  53. package/dist/node_modules/@langchain/xai/dist/tools/index.cjs +18 -0
  54. package/dist/node_modules/@langchain/xai/dist/tools/index.d.cts +18 -0
  55. package/dist/node_modules/@langchain/xai/dist/tools/index.d.ts +18 -0
  56. package/dist/node_modules/@langchain/xai/dist/tools/index.js +18 -0
  57. package/dist/node_modules/@langchain/xai/dist/tools/live_search.cjs +94 -0
  58. package/dist/node_modules/@langchain/xai/dist/tools/live_search.d.cts +149 -0
  59. package/dist/node_modules/@langchain/xai/dist/tools/live_search.d.ts +149 -0
  60. package/dist/node_modules/@langchain/xai/dist/tools/live_search.js +91 -0
  61. package/dist/node_modules/@langchain/xai/dist/tools/web_search.cjs +57 -0
  62. package/dist/node_modules/@langchain/xai/dist/tools/web_search.d.cts +104 -0
  63. package/dist/node_modules/@langchain/xai/dist/tools/web_search.d.ts +104 -0
  64. package/dist/node_modules/@langchain/xai/dist/tools/web_search.js +55 -0
  65. package/dist/node_modules/@langchain/xai/dist/tools/x_search.cjs +63 -0
  66. package/dist/node_modules/@langchain/xai/dist/tools/x_search.d.cts +145 -0
  67. package/dist/node_modules/@langchain/xai/dist/tools/x_search.d.ts +145 -0
  68. package/dist/node_modules/@langchain/xai/dist/tools/x_search.js +61 -0
  69. package/dist/node_modules/@langchain/xai/package.json +1 -0
  70. package/dist/node_modules/fast-glob/package.json +1 -1
  71. package/dist/node_modules/flexsearch/package.json +1 -1
  72. package/dist/node_modules/fs-extra/package.json +1 -1
  73. package/dist/node_modules/jsonrepair/package.json +1 -1
  74. package/dist/node_modules/nodejs-snowflake/package.json +1 -1
  75. package/dist/node_modules/openai/package.json +1 -1
  76. package/dist/node_modules/zod/package.json +1 -1
  77. package/dist/server/ai-employees/ai-employee.js +20 -10
  78. package/dist/server/llm-providers/common/reasoning.js +2 -4
  79. package/dist/server/llm-providers/mimo.d.ts +37 -0
  80. package/dist/server/llm-providers/mimo.js +156 -0
  81. package/dist/server/llm-providers/xai.d.ts +17 -0
  82. package/dist/server/llm-providers/xai.js +88 -0
  83. package/dist/server/migrations/20260428175558-update-ai-employee-category.d.ts +14 -0
  84. package/dist/server/migrations/20260428175558-update-ai-employee-category.js +55 -0
  85. package/dist/server/migrations/20260429175132-ai-employee-deprecated-orin.d.ts +14 -0
  86. package/dist/server/migrations/20260429175132-ai-employee-deprecated-orin.js +53 -0
  87. package/dist/server/plugin.js +5 -0
  88. package/dist/server/resource/aiEmployees.js +10 -1
  89. package/dist/server/workflow/nodes/employee/files.js +7 -4
  90. package/dist/server/workflow/nodes/employee/index.js +136 -132
  91. package/dist/server/workflow/nodes/employee/types.d.ts +1 -1
  92. package/package.json +3 -2
  93. package/dist/client/343.83f7d96664e4e038.js +0 -10
  94. package/dist/client/646.cba98d80e9e6ea74.js +0 -10
@@ -0,0 +1,619 @@
1
+ import { XAISearchParameters, XAISearchParametersPayload } from "../live_search.js";
2
+ import { XAILiveSearchTool } from "../tools/live_search.js";
3
+ import { ChatOpenAICompletions, OpenAIClient, OpenAICoreRequestOptions } from "@langchain/openai";
4
+ import { BaseChatModelCallOptions, BaseChatModelParams, BindToolsInput, LangSmithParams } from "@langchain/core/language_models/chat_models";
5
+ import { AIMessageChunk, BaseMessage, UsageMetadata } from "@langchain/core/messages";
6
+ import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
7
+ import { ModelProfile } from "@langchain/core/language_models/profile";
8
+ import { Serialized } from "@langchain/core/load/serializable";
9
+ import { Runnable } from "@langchain/core/runnables";
10
+ import { InteropZodType } from "@langchain/core/utils/types";
11
+
12
+ //#region src/chat_models/completions.d.ts
13
+ type OpenAIToolChoice = OpenAIClient.ChatCompletionToolChoiceOption | "any" | string;
14
+ /**
15
+ * Union type for all xAI built-in server-side tools.
16
+ */
17
+ type XAIBuiltInTool = XAILiveSearchTool;
18
+ /**
19
+ * Tool type that includes both standard tools and xAI built-in tools.
20
+ */
21
+ type ChatXAIToolType = BindToolsInput | OpenAIClient.ChatCompletionTool | XAIBuiltInTool;
22
+ /**
23
+ * xAI-specific invocation parameters that extend the OpenAI completion params
24
+ * with xAI's search_parameters field.
25
+ */
26
+ type ChatXAICompletionsInvocationParams = Omit<OpenAIClient.Chat.Completions.ChatCompletionCreateParams, "messages"> & {
27
+ /**
28
+ * Search parameters for xAI's Live Search API.
29
+ * When present, enables the model to search the web for real-time information.
30
+ */
31
+ search_parameters?: XAISearchParametersPayload;
32
+ };
33
+ /**
34
+ * xAI-specific additional kwargs that may be present on AI messages.
35
+ * Includes xAI-specific fields like reasoning_content.
36
+ */
37
+ interface XAIAdditionalKwargs {
38
+ /**
39
+ * The reasoning content from xAI models that support chain-of-thought reasoning.
40
+ * This contains the model's internal reasoning process.
41
+ */
42
+ reasoning_content?: string;
43
+ /**
44
+ * Tool calls made by the model.
45
+ */
46
+ tool_calls?: OpenAIClient.ChatCompletionMessageToolCall[];
47
+ /**
48
+ * Additional properties that may be present.
49
+ */
50
+ [key: string]: unknown;
51
+ }
52
+ /**
53
+ * xAI-specific response metadata that may include usage information.
54
+ */
55
+ interface XAIResponseMetadata {
56
+ /**
57
+ * Token usage information.
58
+ */
59
+ usage?: UsageMetadata;
60
+ /**
61
+ * Additional metadata properties.
62
+ */
63
+ [key: string]: unknown;
64
+ }
65
+ /**
66
+ * Checks if a tool is an xAI built-in tool (like live_search).
67
+ * Built-in tools are executed server-side by the xAI API.
68
+ *
69
+ * @param tool - The tool to check
70
+ * @returns true if the tool is an xAI built-in tool
71
+ */
72
+ declare function isXAIBuiltInTool(tool: ChatXAIToolType): tool is XAIBuiltInTool;
73
+ interface ChatXAICallOptions extends BaseChatModelCallOptions {
74
+ headers?: Record<string, string>;
75
+ /**
76
+ * A list of tools the model may call.
77
+ * Can include standard function tools and xAI built-in tools like `{ type: "live_search" }`.
78
+ *
79
+ * @example
80
+ * ```typescript
81
+ * // Using built-in live_search tool
82
+ * const llm = new ChatXAI().bindTools([{ type: "live_search" }]);
83
+ * const result = await llm.invoke("What happened in tech news today?");
84
+ * ```
85
+ */
86
+ tools?: ChatXAIToolType[];
87
+ tool_choice?: OpenAIToolChoice | string | "auto" | "any";
88
+ /**
89
+ * Search parameters for xAI's Live Search API.
90
+ * Enables the model to search the web for real-time information.
91
+ *
92
+ * @note This is an alternative to using `tools: [{ type: "live_search" }]`.
93
+ * The Live Search API parameters approach may be deprecated in favor of
94
+ * the tool-based approach.
95
+ *
96
+ * @example
97
+ * ```typescript
98
+ * const result = await llm.invoke("What's the latest news?", {
99
+ * searchParameters: {
100
+ * mode: "auto",
101
+ * max_search_results: 5,
102
+ * }
103
+ * });
104
+ * ```
105
+ */
106
+ searchParameters?: XAISearchParameters;
107
+ }
108
+ interface ChatXAIInput extends BaseChatModelParams {
109
+ /**
110
+ * The xAI API key to use for requests.
111
+ * @default process.env.XAI_API_KEY
112
+ */
113
+ apiKey?: string;
114
+ /**
115
+ * The name of the model to use.
116
+ * @default "grok-beta"
117
+ */
118
+ model?: string;
119
+ /**
120
+ * Up to 4 sequences where the API will stop generating further tokens. The
121
+ * returned text will not contain the stop sequence.
122
+ * Alias for `stopSequences`
123
+ */
124
+ stop?: Array<string>;
125
+ /**
126
+ * Up to 4 sequences where the API will stop generating further tokens. The
127
+ * returned text will not contain the stop sequence.
128
+ */
129
+ stopSequences?: Array<string>;
130
+ /**
131
+ * Whether or not to stream responses.
132
+ */
133
+ streaming?: boolean;
134
+ /**
135
+ * The temperature to use for sampling.
136
+ * @default 0.7
137
+ */
138
+ temperature?: number;
139
+ /**
140
+ * The maximum number of tokens that the model can process in a single response.
141
+ * This limits ensures computational efficiency and resource management.
142
+ */
143
+ maxTokens?: number;
144
+ /**
145
+ * Default search parameters for xAI's Live Search API.
146
+ * When set, these parameters will be applied to all requests unless
147
+ * overridden in the call options.
148
+ *
149
+ * @example
150
+ * ```typescript
151
+ * const llm = new ChatXAI({
152
+ * model: "grok-beta",
153
+ * searchParameters: {
154
+ * mode: "auto",
155
+ * max_search_results: 5,
156
+ * }
157
+ * });
158
+ * ```
159
+ */
160
+ searchParameters?: XAISearchParameters;
161
+ /**
162
+ * The base URL for the xAI API.
163
+ * @default "https://api.x.ai/v1"
164
+ */
165
+ baseURL?: string;
166
+ }
167
+ /**
168
+ * xAI chat model integration.
169
+ *
170
+ * The xAI API is compatible to the OpenAI API with some limitations.
171
+ *
172
+ * Setup:
173
+ * Install `@langchain/xai` and set an environment variable named `XAI_API_KEY`.
174
+ *
175
+ * ```bash
176
+ * npm install @langchain/xai
177
+ * export XAI_API_KEY="your-api-key"
178
+ * ```
179
+ *
180
+ * ## [Constructor args](https://api.js.langchain.com/classes/_langchain_xai.ChatXAI.html#constructor)
181
+ *
182
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/_langchain_xai.ChatXAICallOptions.html)
183
+ *
184
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
185
+ * They can also be passed via `.withConfig`, or the second arg in `.bindTools`, like shown in the examples below:
186
+ *
187
+ * ```typescript
188
+ * // When calling `.withConfig`, call options should be passed via the first argument
189
+ * const llmWithArgsBound = llm.withConfig({
190
+ * stop: ["\n"],
191
+ * tools: [...],
192
+ * });
193
+ *
194
+ * // When calling `.bindTools`, call options should be passed via the second argument
195
+ * const llmWithTools = llm.bindTools(
196
+ * [...],
197
+ * {
198
+ * tool_choice: "auto",
199
+ * }
200
+ * );
201
+ * ```
202
+ *
203
+ * ## Examples
204
+ *
205
+ * <details open>
206
+ * <summary><strong>Instantiate</strong></summary>
207
+ *
208
+ * ```typescript
209
+ * import { ChatXAI } from '@langchain/xai';
210
+ *
211
+ * const llm = new ChatXAI({
212
+ * model: "grok-beta",
213
+ * temperature: 0,
214
+ * // other params...
215
+ * });
216
+ * ```
217
+ * </details>
218
+ *
219
+ * <br />
220
+ *
221
+ * <details>
222
+ * <summary><strong>Invoking</strong></summary>
223
+ *
224
+ * ```typescript
225
+ * const input = `Translate "I love programming" into French.`;
226
+ *
227
+ * // Models also accept a list of chat messages or a formatted prompt
228
+ * const result = await llm.invoke(input);
229
+ * console.log(result);
230
+ * ```
231
+ *
232
+ * ```txt
233
+ * AIMessage {
234
+ * "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.",
235
+ * "additional_kwargs": {},
236
+ * "response_metadata": {
237
+ * "tokenUsage": {
238
+ * "completionTokens": 82,
239
+ * "promptTokens": 20,
240
+ * "totalTokens": 102
241
+ * },
242
+ * "finish_reason": "stop"
243
+ * },
244
+ * "tool_calls": [],
245
+ * "invalid_tool_calls": []
246
+ * }
247
+ * ```
248
+ * </details>
249
+ *
250
+ * <br />
251
+ *
252
+ * <details>
253
+ * <summary><strong>Streaming Chunks</strong></summary>
254
+ *
255
+ * ```typescript
256
+ * for await (const chunk of await llm.stream(input)) {
257
+ * console.log(chunk);
258
+ * }
259
+ * ```
260
+ *
261
+ * ```txt
262
+ * AIMessageChunk {
263
+ * "content": "",
264
+ * "additional_kwargs": {},
265
+ * "response_metadata": {
266
+ * "finishReason": null
267
+ * },
268
+ * "tool_calls": [],
269
+ * "tool_call_chunks": [],
270
+ * "invalid_tool_calls": []
271
+ * }
272
+ * AIMessageChunk {
273
+ * "content": "The",
274
+ * "additional_kwargs": {},
275
+ * "response_metadata": {
276
+ * "finishReason": null
277
+ * },
278
+ * "tool_calls": [],
279
+ * "tool_call_chunks": [],
280
+ * "invalid_tool_calls": []
281
+ * }
282
+ * AIMessageChunk {
283
+ * "content": " French",
284
+ * "additional_kwargs": {},
285
+ * "response_metadata": {
286
+ * "finishReason": null
287
+ * },
288
+ * "tool_calls": [],
289
+ * "tool_call_chunks": [],
290
+ * "invalid_tool_calls": []
291
+ * }
292
+ * AIMessageChunk {
293
+ * "content": " translation",
294
+ * "additional_kwargs": {},
295
+ * "response_metadata": {
296
+ * "finishReason": null
297
+ * },
298
+ * "tool_calls": [],
299
+ * "tool_call_chunks": [],
300
+ * "invalid_tool_calls": []
301
+ * }
302
+ * AIMessageChunk {
303
+ * "content": " of",
304
+ * "additional_kwargs": {},
305
+ * "response_metadata": {
306
+ * "finishReason": null
307
+ * },
308
+ * "tool_calls": [],
309
+ * "tool_call_chunks": [],
310
+ * "invalid_tool_calls": []
311
+ * }
312
+ * AIMessageChunk {
313
+ * "content": " \"",
314
+ * "additional_kwargs": {},
315
+ * "response_metadata": {
316
+ * "finishReason": null
317
+ * },
318
+ * "tool_calls": [],
319
+ * "tool_call_chunks": [],
320
+ * "invalid_tool_calls": []
321
+ * }
322
+ * AIMessageChunk {
323
+ * "content": "I",
324
+ * "additional_kwargs": {},
325
+ * "response_metadata": {
326
+ * "finishReason": null
327
+ * },
328
+ * "tool_calls": [],
329
+ * "tool_call_chunks": [],
330
+ * "invalid_tool_calls": []
331
+ * }
332
+ * AIMessageChunk {
333
+ * "content": " love",
334
+ * "additional_kwargs": {},
335
+ * "response_metadata": {
336
+ * "finishReason": null
337
+ * },
338
+ * "tool_calls": [],
339
+ * "tool_call_chunks": [],
340
+ * "invalid_tool_calls": []
341
+ * }
342
+ * ...
343
+ * AIMessageChunk {
344
+ * "content": ".",
345
+ * "additional_kwargs": {},
346
+ * "response_metadata": {
347
+ * "finishReason": null
348
+ * },
349
+ * "tool_calls": [],
350
+ * "tool_call_chunks": [],
351
+ * "invalid_tool_calls": []
352
+ * }
353
+ * AIMessageChunk {
354
+ * "content": "",
355
+ * "additional_kwargs": {},
356
+ * "response_metadata": {
357
+ * "finishReason": "stop"
358
+ * },
359
+ * "tool_calls": [],
360
+ * "tool_call_chunks": [],
361
+ * "invalid_tool_calls": []
362
+ * }
363
+ * ```
364
+ * </details>
365
+ *
366
+ * <br />
367
+ *
368
+ * <details>
369
+ * <summary><strong>Aggregate Streamed Chunks</strong></summary>
370
+ *
371
+ * ```typescript
372
+ * import { AIMessageChunk } from '@langchain/core/messages';
373
+ * import { concat } from '@langchain/core/utils/stream';
374
+ *
375
+ * const stream = await llm.stream(input);
376
+ * let full: AIMessageChunk | undefined;
377
+ * for await (const chunk of stream) {
378
+ * full = !full ? chunk : concat(full, chunk);
379
+ * }
380
+ * console.log(full);
381
+ * ```
382
+ *
383
+ * ```txt
384
+ * AIMessageChunk {
385
+ * "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.",
386
+ * "additional_kwargs": {},
387
+ * "response_metadata": {
388
+ * "finishReason": "stop"
389
+ * },
390
+ * "tool_calls": [],
391
+ * "tool_call_chunks": [],
392
+ * "invalid_tool_calls": []
393
+ * }
394
+ * ```
395
+ * </details>
396
+ *
397
+ * <br />
398
+ *
399
+ * <details>
400
+ * <summary><strong>Bind tools</strong></summary>
401
+ *
402
+ * ```typescript
403
+ * import { z } from 'zod';
404
+ *
405
+ * const llmForToolCalling = new ChatXAI({
406
+ * model: "grok-beta",
407
+ * temperature: 0,
408
+ * // other params...
409
+ * });
410
+ *
411
+ * const GetWeather = {
412
+ * name: "GetWeather",
413
+ * description: "Get the current weather in a given location",
414
+ * schema: z.object({
415
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
416
+ * }),
417
+ * }
418
+ *
419
+ * const GetPopulation = {
420
+ * name: "GetPopulation",
421
+ * description: "Get the current population in a given location",
422
+ * schema: z.object({
423
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
424
+ * }),
425
+ * }
426
+ *
427
+ * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);
428
+ * const aiMsg = await llmWithTools.invoke(
429
+ * "Which city is hotter today and which is bigger: LA or NY?"
430
+ * );
431
+ * console.log(aiMsg.tool_calls);
432
+ * ```
433
+ *
434
+ * ```txt
435
+ * [
436
+ * {
437
+ * name: 'GetWeather',
438
+ * args: { location: 'Los Angeles, CA' },
439
+ * type: 'tool_call',
440
+ * id: 'call_cd34'
441
+ * },
442
+ * {
443
+ * name: 'GetWeather',
444
+ * args: { location: 'New York, NY' },
445
+ * type: 'tool_call',
446
+ * id: 'call_68rf'
447
+ * },
448
+ * {
449
+ * name: 'GetPopulation',
450
+ * args: { location: 'Los Angeles, CA' },
451
+ * type: 'tool_call',
452
+ * id: 'call_f81z'
453
+ * },
454
+ * {
455
+ * name: 'GetPopulation',
456
+ * args: { location: 'New York, NY' },
457
+ * type: 'tool_call',
458
+ * id: 'call_8byt'
459
+ * }
460
+ * ]
461
+ * ```
462
+ * </details>
463
+ *
464
+ * <br />
465
+ *
466
+ * <details>
467
+ * <summary><strong>Structured Output</strong></summary>
468
+ *
469
+ * ```typescript
470
+ * import { z } from 'zod';
471
+ *
472
+ * const Joke = z.object({
473
+ * setup: z.string().describe("The setup of the joke"),
474
+ * punchline: z.string().describe("The punchline to the joke"),
475
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
476
+ * }).describe('Joke to tell user.');
477
+ *
478
+ * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: "Joke" });
479
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
480
+ * console.log(jokeResult);
481
+ * ```
482
+ *
483
+ * ```txt
484
+ * {
485
+ * setup: "Why don't cats play poker in the wild?",
486
+ * punchline: 'Because there are too many cheetahs.'
487
+ * }
488
+ * ```
489
+ * </details>
490
+ *
491
+ * <br />
492
+ *
493
+ * <details>
494
+ * <summary><strong>Server Tool Calling (Live Search)</strong></summary>
495
+ *
496
+ * xAI supports server-side tools that are executed by the API rather than
497
+ * requiring client-side execution. The `live_search` tool enables the model
498
+ * to search the web for real-time information.
499
+ *
500
+ * ```typescript
501
+ * // Method 1: Using the built-in live_search tool
502
+ * const llm = new ChatXAI({
503
+ * model: "grok-beta",
504
+ * temperature: 0,
505
+ * });
506
+ *
507
+ * const llmWithSearch = llm.bindTools([{ type: "live_search" }]);
508
+ * const result = await llmWithSearch.invoke("What happened in tech news today?");
509
+ * console.log(result.content);
510
+ * // The model will search the web and include real-time information in its response
511
+ * ```
512
+ *
513
+ * ```typescript
514
+ * // Method 2: Using searchParameters for more control
515
+ * const llm = new ChatXAI({
516
+ * model: "grok-beta",
517
+ * searchParameters: {
518
+ * mode: "auto", // "auto" | "on" | "off"
519
+ * max_search_results: 5,
520
+ * from_date: "2024-01-01", // ISO date string
521
+ * return_citations: true,
522
+ * }
523
+ * });
524
+ *
525
+ * const result = await llm.invoke("What are the latest AI developments?");
526
+ * ```
527
+ *
528
+ * ```typescript
529
+ * // Method 3: Override search parameters per request
530
+ * const result = await llm.invoke("Find recent news about SpaceX", {
531
+ * searchParameters: {
532
+ * mode: "on",
533
+ * max_search_results: 10,
534
+ * sources: [
535
+ * { type: "web", allowed_websites: ["spacex.com", "nasa.gov"] },
536
+ * ],
537
+ * }
538
+ * });
539
+ * ```
540
+ * </details>
541
+ *
542
+ * <br />
543
+ */
544
+ declare class ChatXAI extends ChatOpenAICompletions<ChatXAICallOptions> {
545
+ static lc_name(): string;
546
+ _llmType(): string;
547
+ get lc_secrets(): {
548
+ [key: string]: string;
549
+ } | undefined;
550
+ lc_serializable: boolean;
551
+ lc_namespace: string[];
552
+ /**
553
+ * Default search parameters for the Live Search API.
554
+ */
555
+ searchParameters?: XAISearchParameters;
556
+ constructor(fields?: Partial<ChatXAIInput>);
557
+ toJSON(): Serialized;
558
+ getLsParams(options: this["ParsedCallOptions"]): LangSmithParams;
559
+ /**
560
+ * Get the effective search parameters, merging defaults with call options.
561
+ * @param options Call options that may contain search parameters
562
+ * @returns Merged search parameters or undefined if none are configured
563
+ */
564
+ protected _getEffectiveSearchParameters(options?: this["ParsedCallOptions"]): XAISearchParameters | undefined;
565
+ /**
566
+ * Check if any built-in tools (like live_search) are in the tools list.
567
+ * @param tools List of tools to check
568
+ * @returns true if any built-in tools are present
569
+ */
570
+ protected _hasBuiltInTools(tools?: ChatXAIToolType[]): boolean;
571
+ /**
572
+ * Formats tools to xAI/OpenAI format, preserving provider-specific definitions.
573
+ *
574
+ * @param tools The tools to format
575
+ * @returns The formatted tools
576
+ */
577
+ formatStructuredToolToXAI(tools: ChatXAIToolType[]): (OpenAIClient.ChatCompletionTool | XAIBuiltInTool)[] | undefined;
578
+ bindTools(tools: ChatXAIToolType[], kwargs?: Partial<ChatXAICallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, ChatXAICallOptions>;
579
+ /** @internal */
580
+ invocationParams(options?: this["ParsedCallOptions"], extra?: {
581
+ streaming?: boolean;
582
+ }): ChatXAICompletionsInvocationParams;
583
+ completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>;
584
+ completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Chat.Completions.ChatCompletion>;
585
+ protected _convertCompletionsDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: "function" | "user" | "system" | "developer" | "assistant" | "tool"): AIMessageChunk;
586
+ protected _convertCompletionsMessageToBaseMessage(message: OpenAIClient.ChatCompletionMessage & {
587
+ reasoning_content?: string;
588
+ }, rawResponse: OpenAIClient.ChatCompletion): AIMessageChunk;
589
+ /**
590
+ * Return profiling information for the model.
591
+ *
592
+ * Provides information about the model's capabilities and constraints,
593
+ * including token limits, multimodal support, and advanced features like
594
+ * tool calling and structured output.
595
+ *
596
+ * @returns {ModelProfile} An object describing the model's capabilities and constraints
597
+ *
598
+ * @example
599
+ * ```typescript
600
+ * const model = new ChatXAI({ model: "grok-beta" });
601
+ * const profile = model.profile;
602
+ * console.log(profile.maxInputTokens); // 128000
603
+ * console.log(profile.imageInputs); // true
604
+ * ```
605
+ */
606
+ get profile(): ModelProfile;
607
+ withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
608
+ withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
609
+ raw: BaseMessage;
610
+ parsed: RunOutput;
611
+ }>;
612
+ withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {
613
+ raw: BaseMessage;
614
+ parsed: RunOutput;
615
+ }>;
616
+ }
617
+ //#endregion
618
+ export { ChatXAI, ChatXAICallOptions, ChatXAICompletionsInvocationParams, ChatXAIInput, OpenAIToolChoice, XAIAdditionalKwargs, XAIBuiltInTool, XAIResponseMetadata, isXAIBuiltInTool };
619
+ //# sourceMappingURL=completions.d.ts.map