langchain 0.0.194 → 0.0.196

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (241) hide show
  1. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts +1 -1
  2. package/dist/base_language/count_tokens.cjs +5 -70
  3. package/dist/base_language/count_tokens.d.ts +1 -10
  4. package/dist/base_language/count_tokens.js +1 -65
  5. package/dist/base_language/index.cjs +6 -196
  6. package/dist/base_language/index.d.ts +1 -111
  7. package/dist/base_language/index.js +1 -191
  8. package/dist/cache/base.cjs +15 -37
  9. package/dist/cache/base.d.ts +1 -20
  10. package/dist/cache/base.js +1 -33
  11. package/dist/cache/index.cjs +2 -46
  12. package/dist/cache/index.d.ts +1 -29
  13. package/dist/cache/index.js +1 -45
  14. package/dist/callbacks/base.cjs +3 -139
  15. package/dist/callbacks/base.d.ts +1 -266
  16. package/dist/callbacks/base.js +1 -126
  17. package/dist/callbacks/handlers/console.cjs +14 -221
  18. package/dist/callbacks/handlers/console.d.ts +1 -117
  19. package/dist/callbacks/handlers/console.js +1 -217
  20. package/dist/callbacks/handlers/initialize.cjs +15 -30
  21. package/dist/callbacks/handlers/initialize.d.ts +1 -16
  22. package/dist/callbacks/handlers/initialize.js +1 -27
  23. package/dist/callbacks/handlers/log_stream.cjs +15 -293
  24. package/dist/callbacks/handlers/log_stream.d.ts +1 -100
  25. package/dist/callbacks/handlers/log_stream.js +1 -289
  26. package/dist/callbacks/handlers/run_collector.cjs +15 -48
  27. package/dist/callbacks/handlers/run_collector.d.ts +1 -26
  28. package/dist/callbacks/handlers/run_collector.js +1 -46
  29. package/dist/callbacks/handlers/tracer.cjs +15 -375
  30. package/dist/callbacks/handlers/tracer.d.ts +1 -70
  31. package/dist/callbacks/handlers/tracer.js +1 -373
  32. package/dist/callbacks/handlers/tracer_langchain.cjs +15 -104
  33. package/dist/callbacks/handlers/tracer_langchain.d.ts +1 -41
  34. package/dist/callbacks/handlers/tracer_langchain.js +1 -102
  35. package/dist/callbacks/handlers/tracer_langchain_v1.cjs +15 -197
  36. package/dist/callbacks/handlers/tracer_langchain_v1.d.ts +1 -57
  37. package/dist/callbacks/handlers/tracer_langchain_v1.js +1 -195
  38. package/dist/callbacks/manager.cjs +15 -676
  39. package/dist/callbacks/manager.d.ts +1 -180
  40. package/dist/callbacks/manager.js +1 -666
  41. package/dist/callbacks/promises.cjs +14 -42
  42. package/dist/callbacks/promises.d.ts +1 -11
  43. package/dist/callbacks/promises.js +1 -37
  44. package/dist/chains/graph_qa/prompts.d.ts +1 -1
  45. package/dist/chains/openai_functions/structured_output.cjs +2 -2
  46. package/dist/chains/openai_functions/structured_output.d.ts +1 -1
  47. package/dist/chains/openai_functions/structured_output.js +1 -1
  48. package/dist/chat_models/baiduwenxin.d.ts +1 -1
  49. package/dist/chat_models/base.cjs +15 -296
  50. package/dist/chat_models/base.d.ts +1 -122
  51. package/dist/chat_models/base.js +1 -292
  52. package/dist/chat_models/bedrock/web.d.ts +1 -1
  53. package/dist/chat_models/fireworks.d.ts +1 -1
  54. package/dist/document.cjs +2 -24
  55. package/dist/document.d.ts +1 -12
  56. package/dist/document.js +1 -23
  57. package/dist/document_loaders/web/azure_blob_storage_file.d.ts +1 -1
  58. package/dist/document_loaders/web/s3.d.ts +1 -1
  59. package/dist/embeddings/base.cjs +15 -22
  60. package/dist/embeddings/base.d.ts +1 -33
  61. package/dist/embeddings/base.js +1 -20
  62. package/dist/embeddings/cache_backed.cjs +2 -2
  63. package/dist/embeddings/cache_backed.js +1 -1
  64. package/dist/embeddings/hf.cjs +1 -2
  65. package/dist/embeddings/hf.js +1 -2
  66. package/dist/evaluation/agents/trajectory.d.ts +1 -1
  67. package/dist/evaluation/criteria/prompt.d.ts +2 -2
  68. package/dist/evaluation/qa/prompt.d.ts +2 -2
  69. package/dist/experimental/hubs/makersuite/googlemakersuitehub.d.ts +1 -1
  70. package/dist/experimental/openai_assistant/index.cjs +2 -1
  71. package/dist/experimental/openai_assistant/index.d.ts +2 -1
  72. package/dist/experimental/openai_assistant/index.js +2 -1
  73. package/dist/experimental/openai_files/index.cjs +88 -0
  74. package/dist/experimental/openai_files/index.d.ts +79 -0
  75. package/dist/experimental/openai_files/index.js +84 -0
  76. package/dist/experimental/plan_and_execute/prompt.d.ts +1 -1
  77. package/dist/llms/base.cjs +15 -278
  78. package/dist/llms/base.d.ts +1 -115
  79. package/dist/llms/base.js +1 -275
  80. package/dist/llms/bedrock/web.d.ts +1 -1
  81. package/dist/llms/fireworks.d.ts +1 -1
  82. package/dist/load/import_constants.cjs +1 -0
  83. package/dist/load/import_constants.js +1 -0
  84. package/dist/load/import_map.cjs +4 -2
  85. package/dist/load/import_map.d.ts +2 -0
  86. package/dist/load/import_map.js +2 -0
  87. package/dist/load/index.cjs +7 -148
  88. package/dist/load/index.js +7 -148
  89. package/dist/load/map_keys.cjs +0 -24
  90. package/dist/load/map_keys.d.ts +0 -6
  91. package/dist/load/map_keys.js +1 -17
  92. package/dist/load/serializable.cjs +15 -178
  93. package/dist/load/serializable.d.ts +1 -66
  94. package/dist/load/serializable.js +1 -175
  95. package/dist/memory/base.cjs +17 -92
  96. package/dist/memory/base.d.ts +2 -68
  97. package/dist/memory/base.js +2 -87
  98. package/dist/output_parsers/openai_functions.cjs +2 -2
  99. package/dist/output_parsers/openai_functions.d.ts +1 -1
  100. package/dist/output_parsers/openai_functions.js +1 -1
  101. package/dist/output_parsers/regex.d.ts +1 -1
  102. package/dist/output_parsers/structured.d.ts +1 -1
  103. package/dist/prompts/base.cjs +8 -183
  104. package/dist/prompts/base.d.ts +3 -132
  105. package/dist/prompts/base.js +3 -178
  106. package/dist/prompts/chat.cjs +15 -477
  107. package/dist/prompts/chat.d.ts +1 -219
  108. package/dist/prompts/chat.js +1 -466
  109. package/dist/prompts/few_shot.cjs +15 -353
  110. package/dist/prompts/few_shot.d.ts +1 -192
  111. package/dist/prompts/few_shot.js +1 -350
  112. package/dist/prompts/index.cjs +3 -2
  113. package/dist/prompts/index.d.ts +2 -1
  114. package/dist/prompts/index.js +2 -1
  115. package/dist/prompts/pipeline.cjs +15 -142
  116. package/dist/prompts/pipeline.d.ts +1 -98
  117. package/dist/prompts/pipeline.js +1 -140
  118. package/dist/prompts/prompt.cjs +15 -146
  119. package/dist/prompts/prompt.d.ts +1 -92
  120. package/dist/prompts/prompt.js +1 -144
  121. package/dist/prompts/selectors/LengthBasedExampleSelector.cjs +15 -148
  122. package/dist/prompts/selectors/LengthBasedExampleSelector.d.ts +1 -89
  123. package/dist/prompts/selectors/LengthBasedExampleSelector.js +1 -146
  124. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.cjs +15 -137
  125. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.d.ts +1 -91
  126. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.js +1 -135
  127. package/dist/prompts/selectors/conditional.cjs +15 -74
  128. package/dist/prompts/selectors/conditional.d.ts +1 -63
  129. package/dist/prompts/selectors/conditional.js +1 -69
  130. package/dist/prompts/serde.cjs +15 -0
  131. package/dist/prompts/serde.d.ts +1 -43
  132. package/dist/prompts/serde.js +1 -1
  133. package/dist/prompts/template.cjs +14 -88
  134. package/dist/prompts/template.d.ts +1 -36
  135. package/dist/prompts/template.js +1 -83
  136. package/dist/retrievers/chaindesk.cjs +9 -1
  137. package/dist/retrievers/chaindesk.d.ts +3 -1
  138. package/dist/retrievers/chaindesk.js +9 -1
  139. package/dist/retrievers/self_query/vectara.cjs +138 -0
  140. package/dist/retrievers/self_query/vectara.d.ts +41 -0
  141. package/dist/retrievers/self_query/vectara.js +134 -0
  142. package/dist/{util/@cfworker/json-schema → runnables}/index.cjs +1 -1
  143. package/dist/runnables/index.d.ts +1 -0
  144. package/dist/runnables/index.js +1 -0
  145. package/dist/schema/document.cjs +3 -34
  146. package/dist/schema/document.d.ts +2 -29
  147. package/dist/schema/document.js +2 -32
  148. package/dist/schema/index.cjs +37 -612
  149. package/dist/schema/index.d.ts +11 -311
  150. package/dist/schema/index.js +8 -583
  151. package/dist/schema/output_parser.cjs +15 -309
  152. package/dist/schema/output_parser.d.ts +1 -173
  153. package/dist/schema/output_parser.js +1 -301
  154. package/dist/schema/retriever.cjs +15 -77
  155. package/dist/schema/retriever.d.ts +1 -43
  156. package/dist/schema/retriever.js +1 -75
  157. package/dist/schema/runnable/base.cjs +10 -1072
  158. package/dist/schema/runnable/base.d.ts +1 -356
  159. package/dist/schema/runnable/base.js +1 -1060
  160. package/dist/schema/runnable/branch.cjs +2 -131
  161. package/dist/schema/runnable/branch.d.ts +1 -94
  162. package/dist/schema/runnable/branch.js +1 -130
  163. package/dist/schema/runnable/config.cjs +0 -6
  164. package/dist/schema/runnable/config.d.ts +1 -3
  165. package/dist/schema/runnable/config.js +1 -4
  166. package/dist/schema/runnable/index.cjs +15 -16
  167. package/dist/schema/runnable/index.d.ts +1 -5
  168. package/dist/schema/runnable/index.js +1 -4
  169. package/dist/schema/runnable/passthrough.cjs +3 -113
  170. package/dist/schema/runnable/passthrough.d.ts +1 -72
  171. package/dist/schema/runnable/passthrough.js +1 -111
  172. package/dist/schema/runnable/router.cjs +2 -71
  173. package/dist/schema/runnable/router.d.ts +1 -29
  174. package/dist/schema/runnable/router.js +1 -70
  175. package/dist/schema/storage.cjs +15 -8
  176. package/dist/schema/storage.d.ts +1 -57
  177. package/dist/schema/storage.js +1 -6
  178. package/dist/tools/bingserpapi.d.ts +1 -1
  179. package/dist/tools/searchapi.d.ts +1 -1
  180. package/dist/tools/serpapi.d.ts +1 -1
  181. package/dist/tools/serper.d.ts +1 -1
  182. package/dist/util/async_caller.cjs +14 -128
  183. package/dist/util/async_caller.d.ts +1 -45
  184. package/dist/util/async_caller.js +1 -124
  185. package/dist/vectorstores/vectara.cjs +77 -7
  186. package/dist/vectorstores/vectara.d.ts +9 -3
  187. package/dist/vectorstores/vectara.js +54 -7
  188. package/experimental/openai_files.cjs +1 -0
  189. package/experimental/openai_files.d.ts +1 -0
  190. package/experimental/openai_files.js +1 -0
  191. package/package.json +27 -5
  192. package/retrievers/self_query/vectara.cjs +1 -0
  193. package/retrievers/self_query/vectara.d.ts +1 -0
  194. package/retrievers/self_query/vectara.js +1 -0
  195. package/runnables.cjs +1 -0
  196. package/runnables.d.ts +1 -0
  197. package/runnables.js +1 -0
  198. package/dist/util/@cfworker/json-schema/index.d.ts +0 -1
  199. package/dist/util/@cfworker/json-schema/index.js +0 -1
  200. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.cjs +0 -43
  201. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.d.ts +0 -1
  202. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.js +0 -39
  203. package/dist/util/@cfworker/json-schema/src/dereference.cjs +0 -169
  204. package/dist/util/@cfworker/json-schema/src/dereference.d.ts +0 -12
  205. package/dist/util/@cfworker/json-schema/src/dereference.js +0 -165
  206. package/dist/util/@cfworker/json-schema/src/format.cjs +0 -139
  207. package/dist/util/@cfworker/json-schema/src/format.d.ts +0 -2
  208. package/dist/util/@cfworker/json-schema/src/format.js +0 -136
  209. package/dist/util/@cfworker/json-schema/src/index.cjs +0 -24
  210. package/dist/util/@cfworker/json-schema/src/index.d.ts +0 -8
  211. package/dist/util/@cfworker/json-schema/src/index.js +0 -8
  212. package/dist/util/@cfworker/json-schema/src/pointer.cjs +0 -11
  213. package/dist/util/@cfworker/json-schema/src/pointer.d.ts +0 -2
  214. package/dist/util/@cfworker/json-schema/src/pointer.js +0 -6
  215. package/dist/util/@cfworker/json-schema/src/types.cjs +0 -2
  216. package/dist/util/@cfworker/json-schema/src/types.d.ts +0 -72
  217. package/dist/util/@cfworker/json-schema/src/types.js +0 -1
  218. package/dist/util/@cfworker/json-schema/src/ucs2-length.cjs +0 -28
  219. package/dist/util/@cfworker/json-schema/src/ucs2-length.d.ts +0 -6
  220. package/dist/util/@cfworker/json-schema/src/ucs2-length.js +0 -24
  221. package/dist/util/@cfworker/json-schema/src/validate.cjs +0 -808
  222. package/dist/util/@cfworker/json-schema/src/validate.d.ts +0 -3
  223. package/dist/util/@cfworker/json-schema/src/validate.js +0 -804
  224. package/dist/util/@cfworker/json-schema/src/validator.cjs +0 -44
  225. package/dist/util/@cfworker/json-schema/src/validator.d.ts +0 -10
  226. package/dist/util/@cfworker/json-schema/src/validator.js +0 -40
  227. package/dist/util/fast-json-patch/index.cjs +0 -49
  228. package/dist/util/fast-json-patch/index.d.ts +0 -22
  229. package/dist/util/fast-json-patch/index.js +0 -16
  230. package/dist/util/fast-json-patch/src/core.cjs +0 -469
  231. package/dist/util/fast-json-patch/src/core.d.ts +0 -111
  232. package/dist/util/fast-json-patch/src/core.js +0 -459
  233. package/dist/util/fast-json-patch/src/duplex.cjs +0 -237
  234. package/dist/util/fast-json-patch/src/duplex.d.ts +0 -23
  235. package/dist/util/fast-json-patch/src/duplex.js +0 -230
  236. package/dist/util/fast-json-patch/src/helpers.cjs +0 -194
  237. package/dist/util/fast-json-patch/src/helpers.d.ts +0 -36
  238. package/dist/util/fast-json-patch/src/helpers.js +0 -181
  239. package/dist/util/js-sha1/hash.cjs +0 -358
  240. package/dist/util/js-sha1/hash.d.ts +0 -1
  241. package/dist/util/js-sha1/hash.js +0 -355
@@ -0,0 +1,88 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OpenAIFiles = void 0;
4
+ const openai_1 = require("openai");
5
+ const serializable_js_1 = require("../../load/serializable.cjs");
6
+ class OpenAIFiles extends serializable_js_1.Serializable {
7
+ constructor(fields) {
8
+ super(fields);
9
+ Object.defineProperty(this, "lc_namespace", {
10
+ enumerable: true,
11
+ configurable: true,
12
+ writable: true,
13
+ value: ["langchain", "experimental"]
14
+ });
15
+ Object.defineProperty(this, "oaiClient", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: void 0
20
+ });
21
+ this.oaiClient = fields?.client ?? new openai_1.OpenAI(fields?.clientOptions);
22
+ }
23
+ /**
24
+ * Upload file
25
+ * Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB.
26
+ *
27
+ * @note The size of individual files can be a maximum of 512 MB. See the Assistants Tools guide to learn more about the types of files supported. The Fine-tuning API only supports .jsonl files.
28
+ *
29
+ * @link {https://platform.openai.com/docs/api-reference/files/create}
30
+ * @param {OpenAIClient.FileCreateParams['file']} file
31
+ * @param {OpenAIClient.FileCreateParams['purpose']} purpose
32
+ * @param {OpenAIClient.RequestOptions | undefined} options
33
+ * @returns {Promise<OpenAIClient.Files.FileObject>}
34
+ */
35
+ async createFile({ file, purpose, options, }) {
36
+ return this.oaiClient.files.create({ file, purpose }, options);
37
+ }
38
+ /**
39
+ * Delete a file.
40
+ *
41
+ * @link {https://platform.openai.com/docs/api-reference/files/delete}
42
+ * @param {string} fileId
43
+ * @param {OpenAIClient.RequestOptions | undefined} options
44
+ * @returns {Promise<OpenAIClient.Files.FileDeleted>}
45
+ */
46
+ async deleteFile({ fileId, options, }) {
47
+ return this.oaiClient.files.del(fileId, options);
48
+ }
49
+ /**
50
+ * List files
51
+ * Returns a list of files that belong to the user's organization.
52
+ *
53
+ * @link {https://platform.openai.com/docs/api-reference/files/list}
54
+ * @param {OpenAIClient.Files.FileListParams | undefined} query
55
+ * @param {OpenAIClient.RequestOptions | undefined} options
56
+ * @returns {Promise<OpenAIClient.Files.FileObjectsPage>}
57
+ */
58
+ async listFiles(props) {
59
+ return this.oaiClient.files.list(props?.query, props?.options);
60
+ }
61
+ /**
62
+ * Retrieve file
63
+ * Returns information about a specific file.
64
+ *
65
+ * @link {https://platform.openai.com/docs/api-reference/files/retrieve}
66
+ * @param {string} fileId
67
+ * @param {OpenAIClient.RequestOptions | undefined} options
68
+ * @returns {Promise<OpenAIClient.Files.FileObject>}
69
+ */
70
+ async retrieveFile({ fileId, options, }) {
71
+ return this.oaiClient.files.retrieve(fileId, options);
72
+ }
73
+ /**
74
+ * Retrieve file content
75
+ * Returns the contents of the specified file.
76
+ *
77
+ * @note You can't retrieve the contents of a file that was uploaded with the "purpose": "assistants" API.
78
+ *
79
+ * @link {https://platform.openai.com/docs/api-reference/files/retrieve-contents}
80
+ * @param {string} fileId
81
+ * @param {OpenAIClient.RequestOptions | undefined} options
82
+ * @returns {Promise<string>}
83
+ */
84
+ async retrieveFileContent({ fileId, options, }) {
85
+ return this.oaiClient.files.retrieveContent(fileId, options);
86
+ }
87
+ }
88
+ exports.OpenAIFiles = OpenAIFiles;
@@ -0,0 +1,79 @@
1
+ import { ClientOptions, OpenAI as OpenAIClient } from "openai";
2
+ import { Serializable } from "../../load/serializable.js";
3
+ export type OpenAIFilesInput = {
4
+ client?: OpenAIClient;
5
+ clientOptions?: ClientOptions;
6
+ };
7
+ export declare class OpenAIFiles extends Serializable {
8
+ lc_namespace: string[];
9
+ private oaiClient;
10
+ constructor(fields?: OpenAIFilesInput);
11
+ /**
12
+ * Upload file
13
+ * Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB.
14
+ *
15
+ * @note The size of individual files can be a maximum of 512 MB. See the Assistants Tools guide to learn more about the types of files supported. The Fine-tuning API only supports .jsonl files.
16
+ *
17
+ * @link {https://platform.openai.com/docs/api-reference/files/create}
18
+ * @param {OpenAIClient.FileCreateParams['file']} file
19
+ * @param {OpenAIClient.FileCreateParams['purpose']} purpose
20
+ * @param {OpenAIClient.RequestOptions | undefined} options
21
+ * @returns {Promise<OpenAIClient.Files.FileObject>}
22
+ */
23
+ createFile({ file, purpose, options, }: OpenAIClient.FileCreateParams & {
24
+ options?: OpenAIClient.RequestOptions;
25
+ }): Promise<OpenAIClient.Files.FileObject>;
26
+ /**
27
+ * Delete a file.
28
+ *
29
+ * @link {https://platform.openai.com/docs/api-reference/files/delete}
30
+ * @param {string} fileId
31
+ * @param {OpenAIClient.RequestOptions | undefined} options
32
+ * @returns {Promise<OpenAIClient.Files.FileDeleted>}
33
+ */
34
+ deleteFile({ fileId, options, }: {
35
+ fileId: string;
36
+ options?: OpenAIClient.RequestOptions;
37
+ }): Promise<OpenAIClient.Files.FileDeleted>;
38
+ /**
39
+ * List files
40
+ * Returns a list of files that belong to the user's organization.
41
+ *
42
+ * @link {https://platform.openai.com/docs/api-reference/files/list}
43
+ * @param {OpenAIClient.Files.FileListParams | undefined} query
44
+ * @param {OpenAIClient.RequestOptions | undefined} options
45
+ * @returns {Promise<OpenAIClient.Files.FileObjectsPage>}
46
+ */
47
+ listFiles(props?: {
48
+ query?: OpenAIClient.Files.FileListParams;
49
+ options?: OpenAIClient.RequestOptions;
50
+ }): Promise<OpenAIClient.Files.FileObjectsPage>;
51
+ /**
52
+ * Retrieve file
53
+ * Returns information about a specific file.
54
+ *
55
+ * @link {https://platform.openai.com/docs/api-reference/files/retrieve}
56
+ * @param {string} fileId
57
+ * @param {OpenAIClient.RequestOptions | undefined} options
58
+ * @returns {Promise<OpenAIClient.Files.FileObject>}
59
+ */
60
+ retrieveFile({ fileId, options, }: {
61
+ fileId: string;
62
+ options?: OpenAIClient.RequestOptions;
63
+ }): Promise<OpenAIClient.Files.FileObject>;
64
+ /**
65
+ * Retrieve file content
66
+ * Returns the contents of the specified file.
67
+ *
68
+ * @note You can't retrieve the contents of a file that was uploaded with the "purpose": "assistants" API.
69
+ *
70
+ * @link {https://platform.openai.com/docs/api-reference/files/retrieve-contents}
71
+ * @param {string} fileId
72
+ * @param {OpenAIClient.RequestOptions | undefined} options
73
+ * @returns {Promise<string>}
74
+ */
75
+ retrieveFileContent({ fileId, options, }: {
76
+ fileId: string;
77
+ options?: OpenAIClient.RequestOptions;
78
+ }): Promise<string>;
79
+ }
@@ -0,0 +1,84 @@
1
+ import { OpenAI as OpenAIClient } from "openai";
2
+ import { Serializable } from "../../load/serializable.js";
3
+ export class OpenAIFiles extends Serializable {
4
+ constructor(fields) {
5
+ super(fields);
6
+ Object.defineProperty(this, "lc_namespace", {
7
+ enumerable: true,
8
+ configurable: true,
9
+ writable: true,
10
+ value: ["langchain", "experimental"]
11
+ });
12
+ Object.defineProperty(this, "oaiClient", {
13
+ enumerable: true,
14
+ configurable: true,
15
+ writable: true,
16
+ value: void 0
17
+ });
18
+ this.oaiClient = fields?.client ?? new OpenAIClient(fields?.clientOptions);
19
+ }
20
+ /**
21
+ * Upload file
22
+ * Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB.
23
+ *
24
+ * @note The size of individual files can be a maximum of 512 MB. See the Assistants Tools guide to learn more about the types of files supported. The Fine-tuning API only supports .jsonl files.
25
+ *
26
+ * @link {https://platform.openai.com/docs/api-reference/files/create}
27
+ * @param {OpenAIClient.FileCreateParams['file']} file
28
+ * @param {OpenAIClient.FileCreateParams['purpose']} purpose
29
+ * @param {OpenAIClient.RequestOptions | undefined} options
30
+ * @returns {Promise<OpenAIClient.Files.FileObject>}
31
+ */
32
+ async createFile({ file, purpose, options, }) {
33
+ return this.oaiClient.files.create({ file, purpose }, options);
34
+ }
35
+ /**
36
+ * Delete a file.
37
+ *
38
+ * @link {https://platform.openai.com/docs/api-reference/files/delete}
39
+ * @param {string} fileId
40
+ * @param {OpenAIClient.RequestOptions | undefined} options
41
+ * @returns {Promise<OpenAIClient.Files.FileDeleted>}
42
+ */
43
+ async deleteFile({ fileId, options, }) {
44
+ return this.oaiClient.files.del(fileId, options);
45
+ }
46
+ /**
47
+ * List files
48
+ * Returns a list of files that belong to the user's organization.
49
+ *
50
+ * @link {https://platform.openai.com/docs/api-reference/files/list}
51
+ * @param {OpenAIClient.Files.FileListParams | undefined} query
52
+ * @param {OpenAIClient.RequestOptions | undefined} options
53
+ * @returns {Promise<OpenAIClient.Files.FileObjectsPage>}
54
+ */
55
+ async listFiles(props) {
56
+ return this.oaiClient.files.list(props?.query, props?.options);
57
+ }
58
+ /**
59
+ * Retrieve file
60
+ * Returns information about a specific file.
61
+ *
62
+ * @link {https://platform.openai.com/docs/api-reference/files/retrieve}
63
+ * @param {string} fileId
64
+ * @param {OpenAIClient.RequestOptions | undefined} options
65
+ * @returns {Promise<OpenAIClient.Files.FileObject>}
66
+ */
67
+ async retrieveFile({ fileId, options, }) {
68
+ return this.oaiClient.files.retrieve(fileId, options);
69
+ }
70
+ /**
71
+ * Retrieve file content
72
+ * Returns the contents of the specified file.
73
+ *
74
+ * @note You can't retrieve the contents of a file that was uploaded with the "purpose": "assistants" API.
75
+ *
76
+ * @link {https://platform.openai.com/docs/api-reference/files/retrieve-contents}
77
+ * @param {string} fileId
78
+ * @param {OpenAIClient.RequestOptions | undefined} options
79
+ * @returns {Promise<string>}
80
+ */
81
+ async retrieveFileContent({ fileId, options, }) {
82
+ return this.oaiClient.files.retrieveContent(fileId, options);
83
+ }
84
+ }
@@ -10,4 +10,4 @@ export declare const DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = "Previo
10
10
  * @param tools the tools available to the `planner`
11
11
  * @returns
12
12
  */
13
- export declare const getPlannerChatPrompt: (tools: Tool[] | DynamicStructuredTool[]) => Promise<ChatPromptTemplate<import("../../schema/index.js").InputValues<string>, any>>;
13
+ export declare const getPlannerChatPrompt: (tools: Tool[] | DynamicStructuredTool[]) => Promise<ChatPromptTemplate<import("langchain-core/schema").InputValues<string>, any>>;
@@ -1,280 +1,17 @@
1
1
  "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
2
16
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.LLM = exports.BaseLLM = void 0;
4
- const index_js_1 = require("../schema/index.cjs");
5
- const manager_js_1 = require("../callbacks/manager.cjs");
6
- const index_js_2 = require("../base_language/index.cjs");
7
- const base_js_1 = require("../memory/base.cjs");
8
- /**
9
- * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
10
- */
11
- class BaseLLM extends index_js_2.BaseLanguageModel {
12
- constructor({ concurrency, ...rest }) {
13
- super(concurrency ? { maxConcurrency: concurrency, ...rest } : rest);
14
- Object.defineProperty(this, "lc_namespace", {
15
- enumerable: true,
16
- configurable: true,
17
- writable: true,
18
- value: ["langchain", "llms", this._llmType()]
19
- });
20
- }
21
- /**
22
- * This method takes an input and options, and returns a string. It
23
- * converts the input to a prompt value and generates a result based on
24
- * the prompt.
25
- * @param input Input for the LLM.
26
- * @param options Options for the LLM call.
27
- * @returns A string result based on the prompt.
28
- */
29
- async invoke(input, options) {
30
- const promptValue = BaseLLM._convertInputToPromptValue(input);
31
- const result = await this.generatePrompt([promptValue], options, options?.callbacks);
32
- return result.generations[0][0].text;
33
- }
34
- // eslint-disable-next-line require-yield
35
- async *_streamResponseChunks(_input, _options, _runManager) {
36
- throw new Error("Not implemented.");
37
- }
38
- _separateRunnableConfigFromCallOptions(options) {
39
- const [runnableConfig, callOptions] = super._separateRunnableConfigFromCallOptions(options);
40
- if (callOptions?.timeout && !callOptions.signal) {
41
- callOptions.signal = AbortSignal.timeout(callOptions.timeout);
42
- }
43
- return [runnableConfig, callOptions];
44
- }
45
- async *_streamIterator(input, options) {
46
- // Subclass check required to avoid double callbacks with default implementation
47
- if (this._streamResponseChunks === BaseLLM.prototype._streamResponseChunks) {
48
- yield this.invoke(input, options);
49
- }
50
- else {
51
- const prompt = BaseLLM._convertInputToPromptValue(input);
52
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
53
- const callbackManager_ = await manager_js_1.CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
54
- const extra = {
55
- options: callOptions,
56
- invocation_params: this?.invocationParams(callOptions),
57
- batch_size: 1,
58
- };
59
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], undefined, undefined, extra, undefined, undefined, runnableConfig.runName);
60
- let generation = new index_js_1.GenerationChunk({
61
- text: "",
62
- });
63
- try {
64
- for await (const chunk of this._streamResponseChunks(input.toString(), callOptions, runManagers?.[0])) {
65
- if (!generation) {
66
- generation = chunk;
67
- }
68
- else {
69
- generation = generation.concat(chunk);
70
- }
71
- if (typeof chunk.text === "string") {
72
- yield chunk.text;
73
- }
74
- }
75
- }
76
- catch (err) {
77
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
78
- throw err;
79
- }
80
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({
81
- generations: [[generation]],
82
- })));
83
- }
84
- }
85
- /**
86
- * This method takes prompt values, options, and callbacks, and generates
87
- * a result based on the prompts.
88
- * @param promptValues Prompt values for the LLM.
89
- * @param options Options for the LLM call.
90
- * @param callbacks Callbacks for the LLM call.
91
- * @returns An LLMResult based on the prompts.
92
- */
93
- async generatePrompt(promptValues, options, callbacks) {
94
- const prompts = promptValues.map((promptValue) => promptValue.toString());
95
- return this.generate(prompts, options, callbacks);
96
- }
97
- /**
98
- * Get the parameters used to invoke the model
99
- */
100
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
101
- invocationParams(_options) {
102
- return {};
103
- }
104
- _flattenLLMResult(llmResult) {
105
- const llmResults = [];
106
- for (let i = 0; i < llmResult.generations.length; i += 1) {
107
- const genList = llmResult.generations[i];
108
- if (i === 0) {
109
- llmResults.push({
110
- generations: [genList],
111
- llmOutput: llmResult.llmOutput,
112
- });
113
- }
114
- else {
115
- const llmOutput = llmResult.llmOutput
116
- ? { ...llmResult.llmOutput, tokenUsage: {} }
117
- : undefined;
118
- llmResults.push({
119
- generations: [genList],
120
- llmOutput,
121
- });
122
- }
123
- }
124
- return llmResults;
125
- }
126
- /** @ignore */
127
- async _generateUncached(prompts, parsedOptions, handledOptions) {
128
- const callbackManager_ = await manager_js_1.CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
129
- const extra = {
130
- options: parsedOptions,
131
- invocation_params: this?.invocationParams(parsedOptions),
132
- batch_size: prompts.length,
133
- };
134
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, undefined, undefined, extra, undefined, undefined, handledOptions?.runName);
135
- let output;
136
- try {
137
- output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
138
- }
139
- catch (err) {
140
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
141
- throw err;
142
- }
143
- const flattenedOutputs = this._flattenLLMResult(output);
144
- await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
145
- const runIds = runManagers?.map((manager) => manager.runId) || undefined;
146
- // This defines RUN_KEY as a non-enumerable property on the output object
147
- // so that it is not serialized when the output is stringified, and so that
148
- // it isnt included when listing the keys of the output object.
149
- Object.defineProperty(output, index_js_1.RUN_KEY, {
150
- value: runIds ? { runIds } : undefined,
151
- configurable: true,
152
- });
153
- return output;
154
- }
155
- /**
156
- * Run the LLM on the given prompts and input, handling caching.
157
- */
158
- async generate(prompts, options, callbacks) {
159
- if (!Array.isArray(prompts)) {
160
- throw new Error("Argument 'prompts' is expected to be a string[]");
161
- }
162
- let parsedOptions;
163
- if (Array.isArray(options)) {
164
- parsedOptions = { stop: options };
165
- }
166
- else {
167
- parsedOptions = options;
168
- }
169
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(parsedOptions);
170
- runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;
171
- if (!this.cache) {
172
- return this._generateUncached(prompts, callOptions, runnableConfig);
173
- }
174
- const { cache } = this;
175
- const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
176
- const missingPromptIndices = [];
177
- const generations = await Promise.all(prompts.map(async (prompt, index) => {
178
- const result = await cache.lookup(prompt, llmStringKey);
179
- if (!result) {
180
- missingPromptIndices.push(index);
181
- }
182
- return result;
183
- }));
184
- let llmOutput = {};
185
- if (missingPromptIndices.length > 0) {
186
- const results = await this._generateUncached(missingPromptIndices.map((i) => prompts[i]), callOptions, runnableConfig);
187
- await Promise.all(results.generations.map(async (generation, index) => {
188
- const promptIndex = missingPromptIndices[index];
189
- generations[promptIndex] = generation;
190
- return cache.update(prompts[promptIndex], llmStringKey, generation);
191
- }));
192
- llmOutput = results.llmOutput ?? {};
193
- }
194
- return { generations, llmOutput };
195
- }
196
- /**
197
- * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
198
- */
199
- async call(prompt, options, callbacks) {
200
- const { generations } = await this.generate([prompt], options, callbacks);
201
- return generations[0][0].text;
202
- }
203
- /**
204
- * This method is similar to `call`, but it's used for making predictions
205
- * based on the input text.
206
- * @param text Input text for the prediction.
207
- * @param options Options for the LLM call.
208
- * @param callbacks Callbacks for the LLM call.
209
- * @returns A prediction based on the input text.
210
- */
211
- async predict(text, options, callbacks) {
212
- return this.call(text, options, callbacks);
213
- }
214
- /**
215
- * This method takes a list of messages, options, and callbacks, and
216
- * returns a predicted message.
217
- * @param messages A list of messages for the prediction.
218
- * @param options Options for the LLM call.
219
- * @param callbacks Callbacks for the LLM call.
220
- * @returns A predicted message based on the list of messages.
221
- */
222
- async predictMessages(messages, options, callbacks) {
223
- const text = (0, base_js_1.getBufferString)(messages);
224
- const prediction = await this.call(text, options, callbacks);
225
- return new index_js_1.AIMessage(prediction);
226
- }
227
- /**
228
- * Get the identifying parameters of the LLM.
229
- */
230
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
231
- _identifyingParams() {
232
- return {};
233
- }
234
- /**
235
- * @deprecated
236
- * Return a json-like object representing this LLM.
237
- */
238
- serialize() {
239
- return {
240
- ...this._identifyingParams(),
241
- _type: this._llmType(),
242
- _model: this._modelType(),
243
- };
244
- }
245
- _modelType() {
246
- return "base_llm";
247
- }
248
- /**
249
- * @deprecated
250
- * Load an LLM from a json-like object describing it.
251
- */
252
- static async deserialize(data) {
253
- const { _type, _model, ...rest } = data;
254
- if (_model && _model !== "base_llm") {
255
- throw new Error(`Cannot load LLM with model ${_model}`);
256
- }
257
- const Cls = {
258
- openai: (await import("./openai.js")).OpenAI,
259
- }[_type];
260
- if (Cls === undefined) {
261
- throw new Error(`Cannot load LLM with type ${_type}`);
262
- }
263
- return new Cls(rest);
264
- }
265
- }
266
- exports.BaseLLM = BaseLLM;
267
- /**
268
- * LLM class that provides a simpler interface to subclass than {@link BaseLLM}.
269
- *
270
- * Requires only implementing a simpler {@link _call} method instead of {@link _generate}.
271
- *
272
- * @augments BaseLLM
273
- */
274
- class LLM extends BaseLLM {
275
- async _generate(prompts, options, runManager) {
276
- const generations = await Promise.all(prompts.map((prompt, promptIndex) => this._call(prompt, { ...options, promptIndex }, runManager).then((text) => [{ text }])));
277
- return { generations };
278
- }
279
- }
280
- exports.LLM = LLM;
17
+ __exportStar(require("langchain-core/llm"), exports);
@@ -1,115 +1 @@
1
- import { BaseMessage, BasePromptValue, GenerationChunk, LLMResult } from "../schema/index.js";
2
- import { BaseCallbackConfig, CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
3
- import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "../base_language/index.js";
4
- import { RunnableConfig } from "../schema/runnable/config.js";
5
- export type SerializedLLM = {
6
- _model: string;
7
- _type: string;
8
- } & Record<string, any>;
9
- export interface BaseLLMParams extends BaseLanguageModelParams {
10
- /**
11
- * @deprecated Use `maxConcurrency` instead
12
- */
13
- concurrency?: number;
14
- }
15
- export interface BaseLLMCallOptions extends BaseLanguageModelCallOptions {
16
- }
17
- /**
18
- * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
19
- */
20
- export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLanguageModel<string, CallOptions> {
21
- ParsedCallOptions: Omit<CallOptions, keyof RunnableConfig & "timeout">;
22
- lc_namespace: string[];
23
- constructor({ concurrency, ...rest }: BaseLLMParams);
24
- /**
25
- * This method takes an input and options, and returns a string. It
26
- * converts the input to a prompt value and generates a result based on
27
- * the prompt.
28
- * @param input Input for the LLM.
29
- * @param options Options for the LLM call.
30
- * @returns A string result based on the prompt.
31
- */
32
- invoke(input: BaseLanguageModelInput, options?: CallOptions): Promise<string>;
33
- _streamResponseChunks(_input: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
34
- protected _separateRunnableConfigFromCallOptions(options?: Partial<CallOptions>): [RunnableConfig, this["ParsedCallOptions"]];
35
- _streamIterator(input: BaseLanguageModelInput, options?: CallOptions): AsyncGenerator<string>;
36
- /**
37
- * This method takes prompt values, options, and callbacks, and generates
38
- * a result based on the prompts.
39
- * @param promptValues Prompt values for the LLM.
40
- * @param options Options for the LLM call.
41
- * @param callbacks Callbacks for the LLM call.
42
- * @returns An LLMResult based on the prompts.
43
- */
44
- generatePrompt(promptValues: BasePromptValue[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
45
- /**
46
- * Run the LLM on the given prompts and input.
47
- */
48
- abstract _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
49
- /**
50
- * Get the parameters used to invoke the model
51
- */
52
- invocationParams(_options?: this["ParsedCallOptions"]): any;
53
- _flattenLLMResult(llmResult: LLMResult): LLMResult[];
54
- /** @ignore */
55
- _generateUncached(prompts: string[], parsedOptions: this["ParsedCallOptions"], handledOptions: BaseCallbackConfig): Promise<LLMResult>;
56
- /**
57
- * Run the LLM on the given prompts and input, handling caching.
58
- */
59
- generate(prompts: string[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
60
- /**
61
- * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
62
- */
63
- call(prompt: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
64
- /**
65
- * This method is similar to `call`, but it's used for making predictions
66
- * based on the input text.
67
- * @param text Input text for the prediction.
68
- * @param options Options for the LLM call.
69
- * @param callbacks Callbacks for the LLM call.
70
- * @returns A prediction based on the input text.
71
- */
72
- predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
73
- /**
74
- * This method takes a list of messages, options, and callbacks, and
75
- * returns a predicted message.
76
- * @param messages A list of messages for the prediction.
77
- * @param options Options for the LLM call.
78
- * @param callbacks Callbacks for the LLM call.
79
- * @returns A predicted message based on the list of messages.
80
- */
81
- predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
82
- /**
83
- * Get the identifying parameters of the LLM.
84
- */
85
- _identifyingParams(): Record<string, any>;
86
- /**
87
- * Return the string type key uniquely identifying this class of LLM.
88
- */
89
- abstract _llmType(): string;
90
- /**
91
- * @deprecated
92
- * Return a json-like object representing this LLM.
93
- */
94
- serialize(): SerializedLLM;
95
- _modelType(): string;
96
- /**
97
- * @deprecated
98
- * Load an LLM from a json-like object describing it.
99
- */
100
- static deserialize(data: SerializedLLM): Promise<BaseLLM>;
101
- }
102
- /**
103
- * LLM class that provides a simpler interface to subclass than {@link BaseLLM}.
104
- *
105
- * Requires only implementing a simpler {@link _call} method instead of {@link _generate}.
106
- *
107
- * @augments BaseLLM
108
- */
109
- export declare abstract class LLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLLM<CallOptions> {
110
- /**
111
- * Run the LLM on the given prompt and input.
112
- */
113
- abstract _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
114
- _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
115
- }
1
+ export * from "langchain-core/llm";