langchain 0.0.197-rc.1 → 0.0.197

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/dist/chains/openai_moderation.cjs +2 -2
  2. package/dist/chains/openai_moderation.d.ts +1 -1
  3. package/dist/chains/openai_moderation.js +1 -1
  4. package/dist/chat_models/anthropic.cjs +351 -15
  5. package/dist/chat_models/anthropic.d.ts +157 -1
  6. package/dist/chat_models/anthropic.js +348 -1
  7. package/dist/chat_models/cloudflare_workersai.cjs +5 -0
  8. package/dist/chat_models/cloudflare_workersai.d.ts +3 -0
  9. package/dist/chat_models/cloudflare_workersai.js +5 -0
  10. package/dist/chat_models/fireworks.d.ts +1 -1
  11. package/dist/chat_models/iflytek_xinghuo/common.d.ts +1 -1
  12. package/dist/chat_models/minimax.d.ts +1 -1
  13. package/dist/chat_models/openai.cjs +698 -4
  14. package/dist/chat_models/openai.d.ts +137 -4
  15. package/dist/chat_models/openai.js +695 -2
  16. package/dist/document_loaders/fs/openai_whisper_audio.cjs +2 -2
  17. package/dist/document_loaders/fs/openai_whisper_audio.d.ts +1 -1
  18. package/dist/document_loaders/fs/openai_whisper_audio.js +1 -1
  19. package/dist/embeddings/openai.cjs +240 -2
  20. package/dist/embeddings/openai.d.ts +82 -1
  21. package/dist/embeddings/openai.js +239 -1
  22. package/dist/experimental/openai_assistant/index.cjs +3 -3
  23. package/dist/experimental/openai_assistant/index.d.ts +1 -1
  24. package/dist/experimental/openai_assistant/index.js +1 -1
  25. package/dist/experimental/openai_assistant/schema.d.ts +1 -1
  26. package/dist/experimental/openai_files/index.cjs +2 -2
  27. package/dist/experimental/openai_files/index.d.ts +1 -1
  28. package/dist/experimental/openai_files/index.js +1 -1
  29. package/dist/llms/fireworks.d.ts +1 -1
  30. package/dist/llms/openai-chat.cjs +445 -3
  31. package/dist/llms/openai-chat.d.ts +123 -4
  32. package/dist/llms/openai-chat.js +443 -2
  33. package/dist/llms/openai.cjs +530 -6
  34. package/dist/llms/openai.d.ts +123 -4
  35. package/dist/llms/openai.js +525 -2
  36. package/dist/schema/index.d.ts +1 -1
  37. package/dist/tools/convert_to_openai.cjs +38 -4
  38. package/dist/tools/convert_to_openai.d.ts +11 -1
  39. package/dist/tools/convert_to_openai.js +35 -1
  40. package/dist/types/openai-types.d.ts +133 -1
  41. package/dist/util/env.cjs +9 -70
  42. package/dist/util/env.d.ts +1 -21
  43. package/dist/util/env.js +1 -62
  44. package/dist/util/openai-format-fndef.cjs +81 -0
  45. package/dist/util/openai-format-fndef.d.ts +44 -0
  46. package/dist/util/openai-format-fndef.js +77 -0
  47. package/dist/util/openai.cjs +18 -2
  48. package/dist/util/openai.d.ts +1 -1
  49. package/dist/util/openai.js +17 -1
  50. package/dist/util/openapi.d.ts +2 -2
  51. package/dist/util/prompt-layer.d.ts +1 -1
  52. package/package.json +3 -6
@@ -1 +1,348 @@
1
- export * from "@langchain/anthropic";
1
+ import { Anthropic, AI_PROMPT, HUMAN_PROMPT, } from "@anthropic-ai/sdk";
2
+ import { AIMessage, AIMessageChunk, ChatMessage, } from "@langchain/core/messages";
3
+ import { ChatGenerationChunk, } from "@langchain/core/outputs";
4
+ import { getEnvironmentVariable } from "@langchain/core/utils/env";
5
+ import { BaseChatModel, } from "@langchain/core/language_models/chat_models";
6
+ export { AI_PROMPT, HUMAN_PROMPT };
7
+ /**
8
+ * Extracts the custom role of a generic chat message.
9
+ * @param message The chat message from which to extract the custom role.
10
+ * @returns The custom role of the chat message.
11
+ */
12
+ function extractGenericMessageCustomRole(message) {
13
+ if (message.role !== AI_PROMPT &&
14
+ message.role !== HUMAN_PROMPT &&
15
+ message.role !== "") {
16
+ console.warn(`Unknown message role: ${message.role}`);
17
+ }
18
+ return message.role;
19
+ }
20
+ /**
21
+ * Gets the Anthropic prompt from a base message.
22
+ * @param message The base message from which to get the Anthropic prompt.
23
+ * @returns The Anthropic prompt from the base message.
24
+ */
25
+ function getAnthropicPromptFromMessage(message) {
26
+ const type = message._getType();
27
+ switch (type) {
28
+ case "ai":
29
+ return AI_PROMPT;
30
+ case "human":
31
+ return HUMAN_PROMPT;
32
+ case "system":
33
+ return "";
34
+ case "generic": {
35
+ if (!ChatMessage.isInstance(message))
36
+ throw new Error("Invalid generic chat message");
37
+ return extractGenericMessageCustomRole(message);
38
+ }
39
+ default:
40
+ throw new Error(`Unknown message type: ${type}`);
41
+ }
42
+ }
43
+ export const DEFAULT_STOP_SEQUENCES = [HUMAN_PROMPT];
44
+ /**
45
+ * Wrapper around Anthropic large language models.
46
+ *
47
+ * To use you should have the `@anthropic-ai/sdk` package installed, with the
48
+ * `ANTHROPIC_API_KEY` environment variable set.
49
+ *
50
+ * @remarks
51
+ * Any parameters that are valid to be passed to {@link
52
+ * https://console.anthropic.com/docs/api/reference |
53
+ * `anthropic.complete`} can be passed through {@link invocationKwargs},
54
+ * even if not explicitly available on this class.
55
+ * @example
56
+ * ```typescript
57
+ * const model = new ChatAnthropic({
58
+ * temperature: 0.9,
59
+ * anthropicApiKey: 'YOUR-API-KEY',
60
+ * });
61
+ * const res = await model.invoke({ input: 'Hello!' });
62
+ * console.log(res);
63
+ * ```
64
+ */
65
+ export class ChatAnthropic extends BaseChatModel {
66
+ static lc_name() {
67
+ return "ChatAnthropic";
68
+ }
69
+ get lc_secrets() {
70
+ return {
71
+ anthropicApiKey: "ANTHROPIC_API_KEY",
72
+ };
73
+ }
74
+ get lc_aliases() {
75
+ return {
76
+ modelName: "model",
77
+ };
78
+ }
79
+ constructor(fields) {
80
+ super(fields ?? {});
81
+ Object.defineProperty(this, "lc_serializable", {
82
+ enumerable: true,
83
+ configurable: true,
84
+ writable: true,
85
+ value: true
86
+ });
87
+ Object.defineProperty(this, "anthropicApiKey", {
88
+ enumerable: true,
89
+ configurable: true,
90
+ writable: true,
91
+ value: void 0
92
+ });
93
+ Object.defineProperty(this, "apiUrl", {
94
+ enumerable: true,
95
+ configurable: true,
96
+ writable: true,
97
+ value: void 0
98
+ });
99
+ Object.defineProperty(this, "temperature", {
100
+ enumerable: true,
101
+ configurable: true,
102
+ writable: true,
103
+ value: 1
104
+ });
105
+ Object.defineProperty(this, "topK", {
106
+ enumerable: true,
107
+ configurable: true,
108
+ writable: true,
109
+ value: -1
110
+ });
111
+ Object.defineProperty(this, "topP", {
112
+ enumerable: true,
113
+ configurable: true,
114
+ writable: true,
115
+ value: -1
116
+ });
117
+ Object.defineProperty(this, "maxTokensToSample", {
118
+ enumerable: true,
119
+ configurable: true,
120
+ writable: true,
121
+ value: 2048
122
+ });
123
+ Object.defineProperty(this, "modelName", {
124
+ enumerable: true,
125
+ configurable: true,
126
+ writable: true,
127
+ value: "claude-2"
128
+ });
129
+ Object.defineProperty(this, "invocationKwargs", {
130
+ enumerable: true,
131
+ configurable: true,
132
+ writable: true,
133
+ value: void 0
134
+ });
135
+ Object.defineProperty(this, "stopSequences", {
136
+ enumerable: true,
137
+ configurable: true,
138
+ writable: true,
139
+ value: void 0
140
+ });
141
+ Object.defineProperty(this, "streaming", {
142
+ enumerable: true,
143
+ configurable: true,
144
+ writable: true,
145
+ value: false
146
+ });
147
+ Object.defineProperty(this, "clientOptions", {
148
+ enumerable: true,
149
+ configurable: true,
150
+ writable: true,
151
+ value: void 0
152
+ });
153
+ // Used for non-streaming requests
154
+ Object.defineProperty(this, "batchClient", {
155
+ enumerable: true,
156
+ configurable: true,
157
+ writable: true,
158
+ value: void 0
159
+ });
160
+ // Used for streaming requests
161
+ Object.defineProperty(this, "streamingClient", {
162
+ enumerable: true,
163
+ configurable: true,
164
+ writable: true,
165
+ value: void 0
166
+ });
167
+ this.anthropicApiKey =
168
+ fields?.anthropicApiKey ?? getEnvironmentVariable("ANTHROPIC_API_KEY");
169
+ if (!this.anthropicApiKey) {
170
+ throw new Error("Anthropic API key not found");
171
+ }
172
+ // Support overriding the default API URL (i.e., https://api.anthropic.com)
173
+ this.apiUrl = fields?.anthropicApiUrl;
174
+ this.modelName = fields?.modelName ?? this.modelName;
175
+ this.invocationKwargs = fields?.invocationKwargs ?? {};
176
+ this.temperature = fields?.temperature ?? this.temperature;
177
+ this.topK = fields?.topK ?? this.topK;
178
+ this.topP = fields?.topP ?? this.topP;
179
+ this.maxTokensToSample =
180
+ fields?.maxTokensToSample ?? this.maxTokensToSample;
181
+ this.stopSequences = fields?.stopSequences ?? this.stopSequences;
182
+ this.streaming = fields?.streaming ?? false;
183
+ this.clientOptions = fields?.clientOptions ?? {};
184
+ }
185
+ /**
186
+ * Get the parameters used to invoke the model
187
+ */
188
+ invocationParams(options) {
189
+ return {
190
+ model: this.modelName,
191
+ temperature: this.temperature,
192
+ top_k: this.topK,
193
+ top_p: this.topP,
194
+ stop_sequences: options?.stop?.concat(DEFAULT_STOP_SEQUENCES) ??
195
+ this.stopSequences ??
196
+ DEFAULT_STOP_SEQUENCES,
197
+ max_tokens_to_sample: this.maxTokensToSample,
198
+ stream: this.streaming,
199
+ ...this.invocationKwargs,
200
+ };
201
+ }
202
+ /** @ignore */
203
+ _identifyingParams() {
204
+ return {
205
+ model_name: this.modelName,
206
+ ...this.invocationParams(),
207
+ };
208
+ }
209
+ /**
210
+ * Get the identifying parameters for the model
211
+ */
212
+ identifyingParams() {
213
+ return {
214
+ model_name: this.modelName,
215
+ ...this.invocationParams(),
216
+ };
217
+ }
218
+ async *_streamResponseChunks(messages, options, runManager) {
219
+ const params = this.invocationParams(options);
220
+ const stream = await this.createStreamWithRetry({
221
+ ...params,
222
+ prompt: this.formatMessagesAsPrompt(messages),
223
+ });
224
+ let modelSent = false;
225
+ let stopReasonSent = false;
226
+ for await (const data of stream) {
227
+ if (options.signal?.aborted) {
228
+ stream.controller.abort();
229
+ throw new Error("AbortError: User aborted the request.");
230
+ }
231
+ const additional_kwargs = {};
232
+ if (data.model && !modelSent) {
233
+ additional_kwargs.model = data.model;
234
+ modelSent = true;
235
+ }
236
+ else if (data.stop_reason && !stopReasonSent) {
237
+ additional_kwargs.stop_reason = data.stop_reason;
238
+ stopReasonSent = true;
239
+ }
240
+ const delta = data.completion ?? "";
241
+ yield new ChatGenerationChunk({
242
+ message: new AIMessageChunk({
243
+ content: delta,
244
+ additional_kwargs,
245
+ }),
246
+ text: delta,
247
+ });
248
+ await runManager?.handleLLMNewToken(delta);
249
+ if (data.stop_reason) {
250
+ break;
251
+ }
252
+ }
253
+ }
254
+ /**
255
+ * Formats messages as a prompt for the model.
256
+ * @param messages The base messages to format as a prompt.
257
+ * @returns The formatted prompt.
258
+ */
259
+ formatMessagesAsPrompt(messages) {
260
+ return (messages
261
+ .map((message) => {
262
+ const messagePrompt = getAnthropicPromptFromMessage(message);
263
+ return `${messagePrompt} ${message.content}`;
264
+ })
265
+ .join("") + AI_PROMPT);
266
+ }
267
+ /** @ignore */
268
+ async _generate(messages, options, runManager) {
269
+ if (this.stopSequences && options.stop) {
270
+ throw new Error(`"stopSequence" parameter found in input and default params`);
271
+ }
272
+ const params = this.invocationParams(options);
273
+ let response;
274
+ if (params.stream) {
275
+ response = {
276
+ completion: "",
277
+ model: "",
278
+ stop_reason: "",
279
+ };
280
+ const stream = await this._streamResponseChunks(messages, options, runManager);
281
+ for await (const chunk of stream) {
282
+ response.completion += chunk.message.content;
283
+ response.model =
284
+ chunk.message.additional_kwargs.model ?? response.model;
285
+ response.stop_reason =
286
+ chunk.message.additional_kwargs.stop_reason ??
287
+ response.stop_reason;
288
+ }
289
+ }
290
+ else {
291
+ response = await this.completionWithRetry({
292
+ ...params,
293
+ prompt: this.formatMessagesAsPrompt(messages),
294
+ }, { signal: options.signal });
295
+ }
296
+ const generations = (response.completion ?? "")
297
+ .split(AI_PROMPT)
298
+ .map((message) => ({
299
+ text: message,
300
+ message: new AIMessage(message),
301
+ }));
302
+ return {
303
+ generations,
304
+ };
305
+ }
306
+ /**
307
+ * Creates a streaming request with retry.
308
+ * @param request The parameters for creating a completion.
309
+ * @returns A streaming request.
310
+ */
311
+ async createStreamWithRetry(request) {
312
+ if (!this.streamingClient) {
313
+ const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
314
+ this.streamingClient = new Anthropic({
315
+ ...this.clientOptions,
316
+ ...options,
317
+ apiKey: this.anthropicApiKey,
318
+ maxRetries: 0,
319
+ });
320
+ }
321
+ const makeCompletionRequest = async () => this.streamingClient.completions.create({ ...request, stream: true }, { headers: request.headers });
322
+ return this.caller.call(makeCompletionRequest);
323
+ }
324
+ /** @ignore */
325
+ async completionWithRetry(request, options) {
326
+ if (!this.anthropicApiKey) {
327
+ throw new Error("Missing Anthropic API key.");
328
+ }
329
+ if (!this.batchClient) {
330
+ const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
331
+ this.batchClient = new Anthropic({
332
+ ...this.clientOptions,
333
+ ...options,
334
+ apiKey: this.anthropicApiKey,
335
+ maxRetries: 0,
336
+ });
337
+ }
338
+ const makeCompletionRequest = async () => this.batchClient.completions.create({ ...request, stream: false }, { headers: request.headers });
339
+ return this.caller.callWithOptions({ signal: options.signal }, makeCompletionRequest);
340
+ }
341
+ _llmType() {
342
+ return "anthropic";
343
+ }
344
+ /** @ignore */
345
+ _combineLLMOutput() {
346
+ return [];
347
+ }
348
+ }
@@ -82,6 +82,11 @@ class ChatCloudflareWorkersAI extends base_js_1.SimpleChatModel {
82
82
  this.baseUrl = this.baseUrl.slice(0, -1);
83
83
  }
84
84
  }
85
+ get lc_secrets() {
86
+ return {
87
+ cloudflareApiToken: "CLOUDFLARE_API_TOKEN",
88
+ };
89
+ }
85
90
  _llmType() {
86
91
  return "cloudflare";
87
92
  }
@@ -38,6 +38,9 @@ export declare class ChatCloudflareWorkersAI extends SimpleChatModel implements
38
38
  baseUrl: string;
39
39
  streaming: boolean;
40
40
  constructor(fields?: CloudflareWorkersAIInput & BaseChatModelParams);
41
+ get lc_secrets(): {
42
+ [key: string]: string;
43
+ } | undefined;
41
44
  _llmType(): string;
42
45
  /** Get the identifying parameters for this LLM. */
43
46
  get identifyingParams(): {
@@ -79,6 +79,11 @@ export class ChatCloudflareWorkersAI extends SimpleChatModel {
79
79
  this.baseUrl = this.baseUrl.slice(0, -1);
80
80
  }
81
81
  }
82
+ get lc_secrets() {
83
+ return {
84
+ cloudflareApiToken: "CLOUDFLARE_API_TOKEN",
85
+ };
86
+ }
82
87
  _llmType() {
83
88
  return "cloudflare";
84
89
  }
@@ -1,4 +1,4 @@
1
- import type { OpenAIClient } from "@langchain/openai";
1
+ import type { OpenAI as OpenAIClient } from "openai";
2
2
  import type { ChatOpenAICallOptions, OpenAIChatInput } from "./openai.js";
3
3
  import type { OpenAICoreRequestOptions } from "../types/openai-types.js";
4
4
  import type { BaseChatModelParams } from "./base.js";
@@ -131,8 +131,8 @@ export declare abstract class BaseChatIflytekXinghuo extends BaseChatModel imple
131
131
  * Get the identifying parameters for the model
132
132
  */
133
133
  identifyingParams(): {
134
- temperature?: number | undefined;
135
134
  max_tokens?: number | undefined;
135
+ temperature?: number | undefined;
136
136
  top_k?: number | undefined;
137
137
  chat_id?: string | undefined;
138
138
  streaming: boolean;
@@ -258,8 +258,8 @@ export declare class ChatMinimax extends BaseChatModel<ChatMinimaxCallOptions> i
258
258
  prompt?: string | undefined;
259
259
  stream?: boolean | undefined;
260
260
  functions?: OpenAIClient.Chat.Completions.ChatCompletionCreateParams.Function[] | undefined;
261
- temperature?: number | undefined;
262
261
  model: string;
262
+ temperature?: number | undefined;
263
263
  top_p?: number | undefined;
264
264
  plugins?: string[] | undefined;
265
265
  tokens_to_generate?: number | undefined;