langchain 0.0.140 → 0.0.142

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dist/chains/openai_moderation.cjs +5 -13
  2. package/dist/chains/openai_moderation.d.ts +5 -5
  3. package/dist/chains/openai_moderation.js +6 -11
  4. package/dist/chat_models/anthropic.d.ts +2 -2
  5. package/dist/chat_models/openai.cjs +99 -215
  6. package/dist/chat_models/openai.d.ts +20 -60
  7. package/dist/chat_models/openai.js +101 -214
  8. package/dist/document_loaders/web/github.cjs +4 -0
  9. package/dist/document_loaders/web/github.js +4 -0
  10. package/dist/embeddings/openai.cjs +32 -22
  11. package/dist/embeddings/openai.d.ts +3 -3
  12. package/dist/embeddings/openai.js +34 -21
  13. package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
  14. package/dist/experimental/chat_models/anthropic_functions.d.ts +3 -3
  15. package/dist/experimental/chat_models/anthropic_functions.js +3 -0
  16. package/dist/llms/openai-chat.cjs +69 -187
  17. package/dist/llms/openai-chat.d.ts +19 -71
  18. package/dist/llms/openai-chat.js +71 -186
  19. package/dist/llms/openai.cjs +92 -166
  20. package/dist/llms/openai.d.ts +25 -71
  21. package/dist/llms/openai.js +94 -165
  22. package/dist/load/import_map.cjs +3 -2
  23. package/dist/load/import_map.d.ts +1 -0
  24. package/dist/load/import_map.js +1 -0
  25. package/dist/prompts/chat.cjs +21 -9
  26. package/dist/prompts/chat.d.ts +3 -3
  27. package/dist/prompts/chat.js +22 -10
  28. package/dist/schema/index.d.ts +2 -2
  29. package/dist/schema/runnable.cjs +3 -0
  30. package/dist/schema/runnable.d.ts +1 -0
  31. package/dist/schema/runnable.js +3 -0
  32. package/dist/tools/convert_to_openai.d.ts +2 -2
  33. package/dist/types/openai-types.d.ts +27 -4
  34. package/dist/util/async_caller.cjs +10 -7
  35. package/dist/util/async_caller.js +10 -7
  36. package/dist/util/azure.cjs +4 -4
  37. package/dist/util/azure.d.ts +3 -3
  38. package/dist/util/azure.js +4 -4
  39. package/dist/util/openai.cjs +21 -0
  40. package/dist/util/openai.d.ts +1 -0
  41. package/dist/util/openai.js +17 -0
  42. package/dist/util/prompt-layer.cjs +1 -2
  43. package/dist/util/prompt-layer.d.ts +2 -2
  44. package/dist/util/prompt-layer.js +1 -2
  45. package/package.json +10 -2
  46. package/schema/document.cjs +1 -0
  47. package/schema/document.d.ts +1 -0
  48. package/schema/document.js +1 -0
@@ -1,5 +1,4 @@
1
- import { AxiosRequestConfig } from "axios";
2
- import { ChatCompletionRequestMessage } from "openai";
1
+ import type { OpenAI as OpenAIClient } from "openai";
3
2
  import { BaseLanguageModelCallOptions } from "../base_language/index.js";
4
3
  export declare interface OpenAIBaseInput {
5
4
  /** Sampling temperature to use */
@@ -42,11 +41,23 @@ export declare interface OpenAIBaseInput {
42
41
  */
43
42
  openAIApiKey?: string;
44
43
  }
44
+ export type OpenAICoreRequestOptions<Req extends object = Record<string, unknown>> = {
45
+ path?: string;
46
+ query?: Req | undefined;
47
+ body?: Req | undefined;
48
+ headers?: Record<string, string | null | undefined> | undefined;
49
+ maxRetries?: number;
50
+ stream?: boolean | undefined;
51
+ timeout?: number;
52
+ httpAgent?: any;
53
+ signal?: AbortSignal | undefined | null;
54
+ idempotencyKey?: string;
55
+ };
45
56
  export interface OpenAICallOptions extends BaseLanguageModelCallOptions {
46
57
  /**
47
58
  * Additional options to pass to the underlying axios request.
48
59
  */
49
- options?: AxiosRequestConfig;
60
+ options?: OpenAICoreRequestOptions;
50
61
  }
51
62
  /**
52
63
  * Input to OpenAI class.
@@ -57,9 +68,21 @@ export declare interface OpenAIInput extends OpenAIBaseInput {
57
68
  /** Batch size to use when passing multiple documents to generate */
58
69
  batchSize: number;
59
70
  }
71
+ /**
72
+ * @deprecated Use "baseURL", "defaultHeaders", and "defaultParams" instead.
73
+ */
74
+ export interface LegacyOpenAIInput {
75
+ /** @deprecated Use baseURL instead */
76
+ basePath?: string;
77
+ /** @deprecated Use defaultHeaders and defaultQuery instead */
78
+ baseOptions?: {
79
+ headers?: Record<string, string>;
80
+ params?: Record<string, string>;
81
+ };
82
+ }
60
83
  export interface OpenAIChatInput extends OpenAIBaseInput {
61
84
  /** ChatGPT messages to pass as a prefix to the prompt */
62
- prefixMessages?: ChatCompletionRequestMessage[];
85
+ prefixMessages?: OpenAIClient.Chat.CreateChatCompletionRequestMessage[];
63
86
  }
64
87
  export declare interface AzureOpenAIInput {
65
88
  /**
@@ -9,6 +9,7 @@ const p_queue_1 = __importDefault(require("p-queue"));
9
9
  const STATUS_NO_RETRY = [
10
10
  400,
11
11
  401,
12
+ 402,
12
13
  403,
13
14
  404,
14
15
  405,
@@ -69,24 +70,26 @@ class AsyncCaller {
69
70
  onFailedAttempt(error) {
70
71
  if (error.message.startsWith("Cancel") ||
71
72
  error.message.startsWith("TimeoutError") ||
72
- error.message.startsWith("AbortError")) {
73
+ error.name === "TimeoutError" ||
74
+ error.message.startsWith("AbortError") ||
75
+ error.name === "AbortError") {
73
76
  throw error;
74
77
  }
75
78
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
76
79
  if (error?.code === "ECONNABORTED") {
77
80
  throw error;
78
81
  }
82
+ const status =
79
83
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
80
- const status = error?.response?.status;
84
+ error?.response?.status ?? error?.status;
81
85
  if (status && STATUS_NO_RETRY.includes(+status)) {
82
86
  throw error;
83
87
  }
84
88
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
85
- const data = error?.response?.data;
86
- if (data?.error?.code === "insufficient_quota") {
87
- const error = new Error(data?.error?.message);
88
- error.name = "InsufficientQuotaError";
89
- throw error;
89
+ if (error?.error?.code === "insufficient_quota") {
90
+ const err = new Error(error?.message);
91
+ err.name = "InsufficientQuotaError";
92
+ throw err;
90
93
  }
91
94
  },
92
95
  retries: this.maxRetries,
@@ -3,6 +3,7 @@ import PQueueMod from "p-queue";
3
3
  const STATUS_NO_RETRY = [
4
4
  400,
5
5
  401,
6
+ 402,
6
7
  403,
7
8
  404,
8
9
  405,
@@ -63,24 +64,26 @@ export class AsyncCaller {
63
64
  onFailedAttempt(error) {
64
65
  if (error.message.startsWith("Cancel") ||
65
66
  error.message.startsWith("TimeoutError") ||
66
- error.message.startsWith("AbortError")) {
67
+ error.name === "TimeoutError" ||
68
+ error.message.startsWith("AbortError") ||
69
+ error.name === "AbortError") {
67
70
  throw error;
68
71
  }
69
72
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
70
73
  if (error?.code === "ECONNABORTED") {
71
74
  throw error;
72
75
  }
76
+ const status =
73
77
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
74
- const status = error?.response?.status;
78
+ error?.response?.status ?? error?.status;
75
79
  if (status && STATUS_NO_RETRY.includes(+status)) {
76
80
  throw error;
77
81
  }
78
82
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
79
- const data = error?.response?.data;
80
- if (data?.error?.code === "insufficient_quota") {
81
- const error = new Error(data?.error?.message);
82
- error.name = "InsufficientQuotaError";
83
- throw error;
83
+ if (error?.error?.code === "insufficient_quota") {
84
+ const err = new Error(error?.message);
85
+ err.name = "InsufficientQuotaError";
86
+ throw err;
84
87
  }
85
88
  },
86
89
  retries: this.maxRetries,
@@ -11,12 +11,12 @@ exports.getEndpoint = void 0;
11
11
  * @property {string} config.azureOpenAIApiInstanceName - The instance name of Azure OpenAI.
12
12
  * @property {string} config.azureOpenAIApiKey - The API Key for Azure OpenAI.
13
13
  * @property {string} config.azureOpenAIBasePath - The base path for Azure OpenAI.
14
- * @property {string} config.basePath - Some other custom base path URL.
14
+ * @property {string} config.baseURL - Some other custom base path URL.
15
15
  *
16
16
  * The function operates as follows:
17
17
  * - If both `azureOpenAIBasePath` and `azureOpenAIApiDeploymentName` (plus `azureOpenAIApiKey`) are provided, it returns an URL combining these two parameters (`${azureOpenAIBasePath}/${azureOpenAIApiDeploymentName}`).
18
18
  * - If `azureOpenAIApiKey` is provided, it checks for `azureOpenAIApiInstanceName` and `azureOpenAIApiDeploymentName` and throws an error if any of these is missing. If both are provided, it generates an URL incorporating these parameters.
19
- * - If none of the above conditions are met, return any custom `basePath`.
19
+ * - If none of the above conditions are met, return any custom `baseURL`.
20
20
  * - The function returns the generated URL as a string, or undefined if no custom paths are specified.
21
21
  *
22
22
  * @throws Will throw an error if the necessary parameters for generating the URL are missing.
@@ -24,7 +24,7 @@ exports.getEndpoint = void 0;
24
24
  * @returns {string | undefined} The generated (Azure) OpenAI endpoint URL.
25
25
  */
26
26
  function getEndpoint(config) {
27
- const { azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName, azureOpenAIApiKey, azureOpenAIBasePath, basePath, } = config;
27
+ const { azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName, azureOpenAIApiKey, azureOpenAIBasePath, baseURL, } = config;
28
28
  if (azureOpenAIApiKey &&
29
29
  azureOpenAIBasePath &&
30
30
  azureOpenAIApiDeploymentName) {
@@ -39,6 +39,6 @@ function getEndpoint(config) {
39
39
  }
40
40
  return `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}`;
41
41
  }
42
- return basePath;
42
+ return baseURL;
43
43
  }
44
44
  exports.getEndpoint = getEndpoint;
@@ -3,7 +3,7 @@ export interface OpenAIEndpointConfig {
3
3
  azureOpenAIApiInstanceName?: string;
4
4
  azureOpenAIApiKey?: string;
5
5
  azureOpenAIBasePath?: string;
6
- basePath?: string;
6
+ baseURL?: string;
7
7
  }
8
8
  /**
9
9
  * This function generates an endpoint URL for (Azure) OpenAI
@@ -15,12 +15,12 @@ export interface OpenAIEndpointConfig {
15
15
  * @property {string} config.azureOpenAIApiInstanceName - The instance name of Azure OpenAI.
16
16
  * @property {string} config.azureOpenAIApiKey - The API Key for Azure OpenAI.
17
17
  * @property {string} config.azureOpenAIBasePath - The base path for Azure OpenAI.
18
- * @property {string} config.basePath - Some other custom base path URL.
18
+ * @property {string} config.baseURL - Some other custom base path URL.
19
19
  *
20
20
  * The function operates as follows:
21
21
  * - If both `azureOpenAIBasePath` and `azureOpenAIApiDeploymentName` (plus `azureOpenAIApiKey`) are provided, it returns an URL combining these two parameters (`${azureOpenAIBasePath}/${azureOpenAIApiDeploymentName}`).
22
22
  * - If `azureOpenAIApiKey` is provided, it checks for `azureOpenAIApiInstanceName` and `azureOpenAIApiDeploymentName` and throws an error if any of these is missing. If both are provided, it generates an URL incorporating these parameters.
23
- * - If none of the above conditions are met, return any custom `basePath`.
23
+ * - If none of the above conditions are met, return any custom `baseURL`.
24
24
  * - The function returns the generated URL as a string, or undefined if no custom paths are specified.
25
25
  *
26
26
  * @throws Will throw an error if the necessary parameters for generating the URL are missing.
@@ -8,12 +8,12 @@
8
8
  * @property {string} config.azureOpenAIApiInstanceName - The instance name of Azure OpenAI.
9
9
  * @property {string} config.azureOpenAIApiKey - The API Key for Azure OpenAI.
10
10
  * @property {string} config.azureOpenAIBasePath - The base path for Azure OpenAI.
11
- * @property {string} config.basePath - Some other custom base path URL.
11
+ * @property {string} config.baseURL - Some other custom base path URL.
12
12
  *
13
13
  * The function operates as follows:
14
14
  * - If both `azureOpenAIBasePath` and `azureOpenAIApiDeploymentName` (plus `azureOpenAIApiKey`) are provided, it returns an URL combining these two parameters (`${azureOpenAIBasePath}/${azureOpenAIApiDeploymentName}`).
15
15
  * - If `azureOpenAIApiKey` is provided, it checks for `azureOpenAIApiInstanceName` and `azureOpenAIApiDeploymentName` and throws an error if any of these is missing. If both are provided, it generates an URL incorporating these parameters.
16
- * - If none of the above conditions are met, return any custom `basePath`.
16
+ * - If none of the above conditions are met, return any custom `baseURL`.
17
17
  * - The function returns the generated URL as a string, or undefined if no custom paths are specified.
18
18
  *
19
19
  * @throws Will throw an error if the necessary parameters for generating the URL are missing.
@@ -21,7 +21,7 @@
21
21
  * @returns {string | undefined} The generated (Azure) OpenAI endpoint URL.
22
22
  */
23
23
  export function getEndpoint(config) {
24
- const { azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName, azureOpenAIApiKey, azureOpenAIBasePath, basePath, } = config;
24
+ const { azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName, azureOpenAIApiKey, azureOpenAIBasePath, baseURL, } = config;
25
25
  if (azureOpenAIApiKey &&
26
26
  azureOpenAIBasePath &&
27
27
  azureOpenAIApiDeploymentName) {
@@ -36,5 +36,5 @@ export function getEndpoint(config) {
36
36
  }
37
37
  return `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}`;
38
38
  }
39
- return basePath;
39
+ return baseURL;
40
40
  }
@@ -0,0 +1,21 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.wrapOpenAIClientError = void 0;
4
+ const openai_1 = require("openai");
5
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
6
+ function wrapOpenAIClientError(e) {
7
+ let error;
8
+ if (e.constructor.name === openai_1.APIConnectionTimeoutError.name) {
9
+ error = new Error(e.message);
10
+ error.name = "TimeoutError";
11
+ }
12
+ else if (e.constructor.name === openai_1.APIUserAbortError.name) {
13
+ error = new Error(e.message);
14
+ error.name = "AbortError";
15
+ }
16
+ else {
17
+ error = e;
18
+ }
19
+ return error;
20
+ }
21
+ exports.wrapOpenAIClientError = wrapOpenAIClientError;
@@ -0,0 +1 @@
1
+ export declare function wrapOpenAIClientError(e: any): any;
@@ -0,0 +1,17 @@
1
+ import { APIConnectionTimeoutError, APIUserAbortError } from "openai";
2
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
3
+ export function wrapOpenAIClientError(e) {
4
+ let error;
5
+ if (e.constructor.name === APIConnectionTimeoutError.name) {
6
+ error = new Error(e.message);
7
+ error.name = "TimeoutError";
8
+ }
9
+ else if (e.constructor.name === APIUserAbortError.name) {
10
+ error = new Error(e.message);
11
+ error.name = "AbortError";
12
+ }
13
+ else {
14
+ error = e;
15
+ }
16
+ return error;
17
+ }
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.promptLayerTrackRequest = void 0;
4
- const promptLayerTrackRequest = async (callerFunc, functionName, prompt, kwargs, plTags,
4
+ const promptLayerTrackRequest = async (callerFunc, functionName, kwargs, plTags,
5
5
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
6
6
  requestResponse, startTime, endTime, apiKey) => {
7
7
  // https://github.com/MagnivOrg/promptlayer-js-helper
@@ -14,7 +14,6 @@ requestResponse, startTime, endTime, apiKey) => {
14
14
  body: JSON.stringify({
15
15
  function_name: functionName,
16
16
  provider: "langchain",
17
- args: prompt,
18
17
  kwargs,
19
18
  tags: plTags,
20
19
  request_response: requestResponse,
@@ -1,3 +1,3 @@
1
- import { CreateCompletionRequestPrompt, CreateCompletionRequest } from "openai";
1
+ import type { OpenAI as OpenAIClient } from "openai";
2
2
  import { AsyncCaller } from "../util/async_caller.js";
3
- export declare const promptLayerTrackRequest: (callerFunc: AsyncCaller, functionName: string, prompt: CreateCompletionRequestPrompt, kwargs: CreateCompletionRequest, plTags: string[] | undefined, requestResponse: any, startTime: number, endTime: number, apiKey: string | undefined) => Promise<any>;
3
+ export declare const promptLayerTrackRequest: (callerFunc: AsyncCaller, functionName: string, kwargs: OpenAIClient.CompletionCreateParams | OpenAIClient.Chat.CompletionCreateParams, plTags: string[] | undefined, requestResponse: any, startTime: number, endTime: number, apiKey: string | undefined) => Promise<any>;
@@ -1,4 +1,4 @@
1
- export const promptLayerTrackRequest = async (callerFunc, functionName, prompt, kwargs, plTags,
1
+ export const promptLayerTrackRequest = async (callerFunc, functionName, kwargs, plTags,
2
2
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
3
3
  requestResponse, startTime, endTime, apiKey) => {
4
4
  // https://github.com/MagnivOrg/promptlayer-js-helper
@@ -11,7 +11,6 @@ requestResponse, startTime, endTime, apiKey) => {
11
11
  body: JSON.stringify({
12
12
  function_name: functionName,
13
13
  provider: "langchain",
14
- args: prompt,
15
14
  kwargs,
16
15
  tags: plTags,
17
16
  request_response: requestResponse,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.0.140",
3
+ "version": "0.0.142",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {
@@ -403,6 +403,9 @@
403
403
  "schema.cjs",
404
404
  "schema.js",
405
405
  "schema.d.ts",
406
+ "schema/document.cjs",
407
+ "schema/document.js",
408
+ "schema/document.d.ts",
406
409
  "schema/output_parser.cjs",
407
410
  "schema/output_parser.js",
408
411
  "schema/output_parser.d.ts",
@@ -1057,7 +1060,7 @@
1057
1060
  "langsmith": "~0.0.31",
1058
1061
  "ml-distance": "^4.0.0",
1059
1062
  "object-hash": "^3.0.0",
1060
- "openai": "^3.3.0",
1063
+ "openai": "^4.4.0",
1061
1064
  "openapi-types": "^12.1.3",
1062
1065
  "p-queue": "^6.6.2",
1063
1066
  "p-retry": "4",
@@ -1754,6 +1757,11 @@
1754
1757
  "import": "./schema.js",
1755
1758
  "require": "./schema.cjs"
1756
1759
  },
1760
+ "./schema/document": {
1761
+ "types": "./schema/document.d.ts",
1762
+ "import": "./schema/document.js",
1763
+ "require": "./schema/document.cjs"
1764
+ },
1757
1765
  "./schema/output_parser": {
1758
1766
  "types": "./schema/output_parser.d.ts",
1759
1767
  "import": "./schema/output_parser.js",
@@ -0,0 +1 @@
1
+ module.exports = require('../dist/schema/document.cjs');
@@ -0,0 +1 @@
1
+ export * from '../dist/schema/document.js'
@@ -0,0 +1 @@
1
+ export * from '../dist/schema/document.js'