langchain 0.0.137 → 0.0.139

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/chat_models/minimax.cjs +1 -0
  2. package/chat_models/minimax.d.ts +1 -0
  3. package/chat_models/minimax.js +1 -0
  4. package/dist/agents/initialize.cjs +11 -0
  5. package/dist/agents/initialize.d.ts +4 -0
  6. package/dist/agents/initialize.js +11 -0
  7. package/dist/agents/xml/index.cjs +119 -0
  8. package/dist/agents/xml/index.d.ts +51 -0
  9. package/dist/agents/xml/index.js +114 -0
  10. package/dist/agents/xml/prompt.cjs +23 -0
  11. package/dist/agents/xml/prompt.d.ts +1 -0
  12. package/dist/agents/xml/prompt.js +20 -0
  13. package/dist/callbacks/base.d.ts +12 -4
  14. package/dist/callbacks/handlers/run_collector.cjs +50 -0
  15. package/dist/callbacks/handlers/run_collector.d.ts +26 -0
  16. package/dist/callbacks/handlers/run_collector.js +46 -0
  17. package/dist/callbacks/handlers/tracer.cjs +33 -20
  18. package/dist/callbacks/handlers/tracer.d.ts +7 -3
  19. package/dist/callbacks/handlers/tracer.js +33 -20
  20. package/dist/callbacks/handlers/tracer_langchain.cjs +1 -0
  21. package/dist/callbacks/handlers/tracer_langchain.d.ts +2 -1
  22. package/dist/callbacks/handlers/tracer_langchain.js +1 -0
  23. package/dist/callbacks/index.cjs +3 -1
  24. package/dist/callbacks/index.d.ts +1 -0
  25. package/dist/callbacks/index.js +1 -0
  26. package/dist/callbacks/manager.cjs +29 -14
  27. package/dist/callbacks/manager.d.ts +9 -4
  28. package/dist/callbacks/manager.js +29 -14
  29. package/dist/chains/openai_functions/extraction.cjs +2 -2
  30. package/dist/chains/openai_functions/extraction.d.ts +5 -4
  31. package/dist/chains/openai_functions/extraction.js +2 -2
  32. package/dist/chains/openai_functions/openapi.d.ts +2 -1
  33. package/dist/chains/openai_functions/structured_output.d.ts +4 -3
  34. package/dist/chains/openai_functions/tagging.cjs +2 -2
  35. package/dist/chains/openai_functions/tagging.d.ts +5 -4
  36. package/dist/chains/openai_functions/tagging.js +2 -2
  37. package/dist/chat_models/anthropic.cjs +7 -5
  38. package/dist/chat_models/anthropic.d.ts +17 -12
  39. package/dist/chat_models/anthropic.js +4 -2
  40. package/dist/chat_models/minimax.cjs +547 -0
  41. package/dist/chat_models/minimax.d.ts +364 -0
  42. package/dist/chat_models/minimax.js +543 -0
  43. package/dist/chat_models/ollama.cjs +136 -0
  44. package/dist/chat_models/ollama.d.ts +34 -0
  45. package/dist/chat_models/ollama.js +136 -0
  46. package/dist/embeddings/minimax.cjs +152 -0
  47. package/dist/embeddings/minimax.d.ts +104 -0
  48. package/dist/embeddings/minimax.js +148 -0
  49. package/dist/experimental/chat_models/anthropic_functions.cjs +129 -0
  50. package/dist/experimental/chat_models/anthropic_functions.d.ts +20 -0
  51. package/dist/experimental/chat_models/anthropic_functions.js +125 -0
  52. package/dist/llms/ollama.cjs +136 -0
  53. package/dist/llms/ollama.d.ts +34 -0
  54. package/dist/llms/ollama.js +136 -0
  55. package/dist/load/import_constants.cjs +1 -0
  56. package/dist/load/import_constants.js +1 -0
  57. package/dist/load/import_map.cjs +4 -2
  58. package/dist/load/import_map.d.ts +2 -0
  59. package/dist/load/import_map.js +2 -0
  60. package/dist/schema/output_parser.cjs +1 -1
  61. package/dist/schema/output_parser.js +1 -1
  62. package/dist/schema/runnable.cjs +54 -15
  63. package/dist/schema/runnable.d.ts +9 -3
  64. package/dist/schema/runnable.js +55 -16
  65. package/dist/sql_db.cjs +3 -1
  66. package/dist/sql_db.js +3 -1
  67. package/dist/util/ollama.d.ts +34 -0
  68. package/dist/vectorstores/redis.cjs +17 -2
  69. package/dist/vectorstores/redis.d.ts +10 -1
  70. package/dist/vectorstores/redis.js +17 -2
  71. package/dist/vectorstores/zep.cjs +2 -1
  72. package/dist/vectorstores/zep.js +3 -2
  73. package/embeddings/minimax.cjs +1 -0
  74. package/embeddings/minimax.d.ts +1 -0
  75. package/embeddings/minimax.js +1 -0
  76. package/experimental/chat_models/anthropic_functions.cjs +1 -0
  77. package/experimental/chat_models/anthropic_functions.d.ts +1 -0
  78. package/experimental/chat_models/anthropic_functions.js +1 -0
  79. package/package.json +34 -5
@@ -0,0 +1,364 @@
1
+ import { BaseChatModel, BaseChatModelParams } from "./base.js";
2
+ import { BaseMessage, ChatResult } from "../schema/index.js";
3
+ import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
4
+ import { StructuredTool } from "../tools/index.js";
5
+ import { BaseLanguageModelCallOptions } from "../base_language/index.js";
6
+ /**
7
+ * Type representing the sender_type of a message in the Minimax chat model.
8
+ */
9
+ export type MinimaxMessageRole = "BOT" | "USER" | "FUNCTION";
10
+ /**
11
+ * Interface representing a message in the Minimax chat model.
12
+ */
13
+ interface MinimaxChatCompletionRequestMessage {
14
+ sender_type: MinimaxMessageRole;
15
+ sender_name?: string;
16
+ text: string;
17
+ }
18
+ export interface MinimaxChatCompletionRequestFunctions {
19
+ /**
20
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
21
+ * @type {string}
22
+ * @memberof MinimaxChatCompletionRequestFunctions
23
+ */
24
+ name: string;
25
+ /**
26
+ * The description of what the function does.
27
+ * @type {string}
28
+ * @memberof MinimaxChatCompletionRequestFunctions
29
+ */
30
+ description?: string;
31
+ /**
32
+ * The parameters the functions accepts, described as a JSON Schema object.
33
+ * @type {{ [key: string]: any; }}
34
+ * @memberof MinimaxChatCompletionRequestFunctions
35
+ */
36
+ parameters?: {
37
+ [key: string]: any;
38
+ };
39
+ }
40
+ /**
41
+ * Interface representing a request for a chat completion.
42
+ */
43
+ interface MinimaxChatCompletionRequest {
44
+ model: string;
45
+ messages: MinimaxChatCompletionRequestMessage[];
46
+ stream?: boolean;
47
+ prompt?: string;
48
+ temperature?: number;
49
+ top_p?: number;
50
+ tokens_to_generate?: number;
51
+ skip_info_mask?: boolean;
52
+ mask_sensitive_info?: boolean;
53
+ beam_width?: number;
54
+ use_standard_sse?: boolean;
55
+ role_meta?: RoleMeta;
56
+ bot_setting?: BotSetting[];
57
+ reply_constraints?: ReplyConstraints;
58
+ sample_messages?: MinimaxChatCompletionRequestMessage[];
59
+ /**
60
+ * A list of functions the model may generate JSON inputs for.
61
+ * @type {Array<MinimaxChatCompletionRequestFunctions>}
62
+ */
63
+ functions?: Array<MinimaxChatCompletionRequestFunctions>;
64
+ plugins?: string[];
65
+ }
66
+ interface RoleMeta {
67
+ role_meta: string;
68
+ bot_name: string;
69
+ }
70
+ interface RawGlyph {
71
+ type: "raw";
72
+ raw_glyph: string;
73
+ }
74
+ interface JsonGlyph {
75
+ type: "json_value";
76
+ json_properties: any;
77
+ }
78
+ type ReplyConstraintsGlyph = RawGlyph | JsonGlyph;
79
+ interface ReplyConstraints {
80
+ sender_type: string;
81
+ sender_name: string;
82
+ glyph?: ReplyConstraintsGlyph;
83
+ }
84
+ interface BotSetting {
85
+ content: string;
86
+ bot_name: string;
87
+ }
88
+ export declare interface ConfigurationParameters {
89
+ basePath?: string;
90
+ headers?: Record<string, string>;
91
+ }
92
+ /**
93
+ * Interface defining the input to the ChatMinimax class.
94
+ */
95
+ declare interface MinimaxChatInputBase {
96
+ /** Model name to use
97
+ * @default "abab5.5-chat"
98
+ */
99
+ modelName: string;
100
+ /** Whether to stream the results or not. Defaults to false. */
101
+ streaming?: boolean;
102
+ prefixMessages?: MinimaxChatCompletionRequestMessage[];
103
+ /**
104
+ * API key to use when making requests. Defaults to the value of
105
+ * `MINIMAX_GROUP_ID` environment variable.
106
+ */
107
+ minimaxGroupId?: string;
108
+ /**
109
+ * Secret key to use when making requests. Defaults to the value of
110
+ * `MINIMAX_API_KEY` environment variable.
111
+ */
112
+ minimaxApiKey?: string;
113
+ /** Amount of randomness injected into the response. Ranges
114
+ * from 0 to 1 (0 is not included). Use temp closer to 0 for analytical /
115
+ * multiple choice, and temp closer to 1 for creative
116
+ * and generative tasks. Defaults to 0.95.
117
+ */
118
+ temperature?: number;
119
+ /**
120
+ * The smaller the sampling method, the more determinate the result;
121
+ * the larger the number, the more random the result.
122
+ */
123
+ topP?: number;
124
+ /**
125
+ * Enable Chatcompletion pro
126
+ */
127
+ proVersion?: boolean;
128
+ /**
129
+ * Pay attention to the maximum number of tokens generated,
130
+ * this parameter does not affect the generation effect of the model itself,
131
+ * but only realizes the function by truncating the tokens exceeding the limit.
132
+ * It is necessary to ensure that the number of tokens of the input context plus this value is less than 6144 or 16384,
133
+ * otherwise the request will fail.
134
+ */
135
+ tokensToGenerate?: number;
136
+ }
137
+ declare interface MinimaxChatInputNormal {
138
+ /**
139
+ * Dialogue setting, characters, or functionality setting.
140
+ */
141
+ prompt?: string;
142
+ /**
143
+ * Sensitize text information in the output that may involve privacy issues,
144
+ * currently including but not limited to emails, domain names,
145
+ * links, ID numbers, home addresses, etc. Default false, ie. enable sensitization.
146
+ */
147
+ skipInfoMask?: boolean;
148
+ /**
149
+ * Whether to use the standard SSE format, when set to true,
150
+ * the streaming results will be separated by two line breaks.
151
+ * This parameter only takes effect when stream is set to true.
152
+ */
153
+ useStandardSse?: boolean;
154
+ /**
155
+ * If it is true, this indicates that the current request is set to continuation mode,
156
+ * and the response is a continuation of the last sentence in the incoming messages;
157
+ * at this time, the last sender is not limited to USER, it can also be BOT.
158
+ * Assuming the last sentence of incoming messages is {"sender_type": " U S E R", "text": "天生我材"},
159
+ * the completion of the reply may be "It must be useful."
160
+ */
161
+ continueLastMessage?: boolean;
162
+ /**
163
+ * How many results to generate; the default is 1 and the maximum is not more than 4.
164
+ * Because beamWidth generates multiple results, it will consume more tokens.
165
+ */
166
+ beamWidth?: number;
167
+ /**
168
+ * Dialogue Metadata
169
+ */
170
+ roleMeta?: RoleMeta;
171
+ }
172
+ declare interface MinimaxChatInputPro extends MinimaxChatInputBase {
173
+ /**
174
+ * For the text information in the output that may involve privacy issues,
175
+ * code masking is currently included but not limited to emails, domains, links, ID numbers, home addresses, etc.,
176
+ * with the default being true, that is, code masking is enabled.
177
+ */
178
+ maskSensitiveInfo?: boolean;
179
+ /**
180
+ * Default bot name
181
+ */
182
+ defaultBotName?: string;
183
+ /**
184
+ * Default user name
185
+ */
186
+ defaultUserName?: string;
187
+ /**
188
+ * Setting for each robot, only available for pro version.
189
+ */
190
+ botSetting?: BotSetting[];
191
+ replyConstraints?: ReplyConstraints;
192
+ }
193
+ type MinimaxChatInput = MinimaxChatInputNormal & MinimaxChatInputPro;
194
+ export interface ChatMinimaxCallOptions extends BaseLanguageModelCallOptions {
195
+ functions?: MinimaxChatCompletionRequestFunctions[];
196
+ tools?: StructuredTool[];
197
+ defaultUserName?: string;
198
+ defaultBotName?: string;
199
+ plugins?: string[];
200
+ botSetting?: BotSetting[];
201
+ replyConstraints?: ReplyConstraints;
202
+ sampleMessages?: BaseMessage[];
203
+ }
204
+ /**
205
+ * Wrapper around Minimax large language models that use the Chat endpoint.
206
+ *
207
+ * To use you should have the `MINIMAX_GROUP_ID` and `MINIMAX_API_KEY`
208
+ * environment variable set.
209
+ */
210
+ export declare class ChatMinimax extends BaseChatModel<ChatMinimaxCallOptions> implements MinimaxChatInput {
211
+ static lc_name(): string;
212
+ get callKeys(): (keyof ChatMinimaxCallOptions)[];
213
+ get lc_secrets(): {
214
+ [key: string]: string;
215
+ } | undefined;
216
+ lc_serializable: boolean;
217
+ minimaxGroupId?: string;
218
+ minimaxApiKey?: string;
219
+ streaming: boolean;
220
+ prompt?: string;
221
+ modelName: string;
222
+ defaultBotName?: string;
223
+ defaultUserName?: string;
224
+ prefixMessages?: MinimaxChatCompletionRequestMessage[];
225
+ apiUrl: string;
226
+ basePath?: string;
227
+ headers?: Record<string, string>;
228
+ temperature?: number;
229
+ topP?: number;
230
+ tokensToGenerate?: number;
231
+ skipInfoMask?: boolean;
232
+ proVersion?: boolean;
233
+ beamWidth?: number;
234
+ botSetting?: BotSetting[];
235
+ continueLastMessage?: boolean;
236
+ maskSensitiveInfo?: boolean;
237
+ roleMeta?: RoleMeta;
238
+ useStandardSse?: boolean;
239
+ replyConstraints?: ReplyConstraints;
240
+ constructor(fields?: Partial<MinimaxChatInput> & BaseChatModelParams & {
241
+ configuration?: ConfigurationParameters;
242
+ });
243
+ fallbackBotName(options?: this["ParsedCallOptions"]): string;
244
+ defaultReplyConstraints(options?: this["ParsedCallOptions"]): ReplyConstraints;
245
+ /**
246
+ * Get the parameters used to invoke the model
247
+ */
248
+ invocationParams(options?: this["ParsedCallOptions"]): Omit<MinimaxChatCompletionRequest, "messages">;
249
+ /**
250
+ * Get the identifying parameters for the model
251
+ */
252
+ identifyingParams(): {
253
+ prompt?: string | undefined;
254
+ stream?: boolean | undefined;
255
+ functions?: MinimaxChatCompletionRequestFunctions[] | undefined;
256
+ model: string;
257
+ temperature?: number | undefined;
258
+ top_p?: number | undefined;
259
+ plugins?: string[] | undefined;
260
+ tokens_to_generate?: number | undefined;
261
+ skip_info_mask?: boolean | undefined;
262
+ mask_sensitive_info?: boolean | undefined;
263
+ beam_width?: number | undefined;
264
+ use_standard_sse?: boolean | undefined;
265
+ role_meta?: RoleMeta | undefined;
266
+ bot_setting?: BotSetting[] | undefined;
267
+ reply_constraints?: ReplyConstraints | undefined;
268
+ sample_messages?: MinimaxChatCompletionRequestMessage[] | undefined;
269
+ };
270
+ /**
271
+ * Convert a list of messages to the format expected by the model.
272
+ * @param messages
273
+ * @param options
274
+ */
275
+ messageToMinimaxMessage(messages?: BaseMessage[], options?: this["ParsedCallOptions"]): MinimaxChatCompletionRequestMessage[] | undefined;
276
+ /** @ignore */
277
+ _generate(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
278
+ /** @ignore */
279
+ completionWithRetry(request: MinimaxChatCompletionRequest, stream: boolean, signal?: AbortSignal, onmessage?: (event: MessageEvent) => void): Promise<ChatCompletionResponse>;
280
+ _llmType(): string;
281
+ /** @ignore */
282
+ _combineLLMOutput(): never[];
283
+ private botSettingFallback;
284
+ }
285
+ /** ---Response Model---* */
286
+ /**
287
+ * Interface representing a message responsed in the Minimax chat model.
288
+ */
289
+ interface ChatCompletionResponseMessage {
290
+ sender_type: MinimaxMessageRole;
291
+ sender_name?: string;
292
+ text: string;
293
+ function_call?: ChatCompletionResponseMessageFunctionCall;
294
+ }
295
+ /**
296
+ * Interface representing the usage of tokens in a chat completion.
297
+ */
298
+ interface TokenUsage {
299
+ total_tokens?: number;
300
+ }
301
+ interface BaseResp {
302
+ status_code?: number;
303
+ status_msg?: string;
304
+ }
305
+ /**
306
+ * The name and arguments of a function that should be called, as generated by the model.
307
+ * @export
308
+ * @interface ChatCompletionResponseMessageFunctionCall
309
+ */
310
+ export interface ChatCompletionResponseMessageFunctionCall {
311
+ /**
312
+ * The name of the function to call.
313
+ * @type {string}
314
+ * @memberof ChatCompletionResponseMessageFunctionCall
315
+ */
316
+ name?: string;
317
+ /**
318
+ * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
319
+ * @type {string}
320
+ * @memberof ChatCompletionResponseMessageFunctionCall
321
+ */
322
+ arguments?: string;
323
+ }
324
+ /**
325
+ *
326
+ * @export
327
+ * @interface ChatCompletionResponseChoices
328
+ */
329
+ export interface ChatCompletionResponseChoicesPro {
330
+ /**
331
+ *
332
+ * @type {string}
333
+ * @memberof ChatCompletionResponseChoices
334
+ */
335
+ messages?: ChatCompletionResponseMessage[];
336
+ /**
337
+ *
338
+ * @type {string}
339
+ * @memberof ChatCompletionResponseChoices
340
+ */
341
+ finish_reason?: string;
342
+ }
343
+ interface ChatCompletionResponseChoices {
344
+ delta?: string;
345
+ text?: string;
346
+ index?: number;
347
+ finish_reason?: string;
348
+ }
349
+ /**
350
+ * Interface representing a response from a chat completion.
351
+ */
352
+ interface ChatCompletionResponse {
353
+ model: string;
354
+ created: number;
355
+ reply: string;
356
+ input_sensitive?: boolean;
357
+ input_sensitive_type?: number;
358
+ output_sensitive?: boolean;
359
+ output_sensitive_type?: number;
360
+ usage?: TokenUsage;
361
+ base_resp?: BaseResp;
362
+ choices: Array<ChatCompletionResponseChoicesPro & ChatCompletionResponseChoices>;
363
+ }
364
+ export {};