langchain 0.0.195 → 0.0.197-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (236) hide show
  1. package/LICENSE +21 -0
  2. package/dist/agents/openai/index.cjs +6 -2
  3. package/dist/agents/openai/index.js +6 -2
  4. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts +1 -1
  5. package/dist/base_language/count_tokens.cjs +5 -70
  6. package/dist/base_language/count_tokens.d.ts +1 -10
  7. package/dist/base_language/count_tokens.js +1 -65
  8. package/dist/base_language/index.cjs +6 -196
  9. package/dist/base_language/index.d.ts +1 -111
  10. package/dist/base_language/index.js +1 -191
  11. package/dist/cache/base.cjs +15 -37
  12. package/dist/cache/base.d.ts +1 -20
  13. package/dist/cache/base.js +1 -33
  14. package/dist/cache/index.cjs +2 -46
  15. package/dist/cache/index.d.ts +1 -29
  16. package/dist/cache/index.js +1 -45
  17. package/dist/callbacks/base.cjs +3 -139
  18. package/dist/callbacks/base.d.ts +1 -266
  19. package/dist/callbacks/base.js +1 -126
  20. package/dist/callbacks/handlers/console.cjs +14 -221
  21. package/dist/callbacks/handlers/console.d.ts +1 -117
  22. package/dist/callbacks/handlers/console.js +1 -217
  23. package/dist/callbacks/handlers/initialize.cjs +15 -30
  24. package/dist/callbacks/handlers/initialize.d.ts +1 -16
  25. package/dist/callbacks/handlers/initialize.js +1 -27
  26. package/dist/callbacks/handlers/log_stream.cjs +15 -293
  27. package/dist/callbacks/handlers/log_stream.d.ts +1 -100
  28. package/dist/callbacks/handlers/log_stream.js +1 -289
  29. package/dist/callbacks/handlers/run_collector.cjs +15 -48
  30. package/dist/callbacks/handlers/run_collector.d.ts +1 -26
  31. package/dist/callbacks/handlers/run_collector.js +1 -46
  32. package/dist/callbacks/handlers/tracer.cjs +15 -375
  33. package/dist/callbacks/handlers/tracer.d.ts +1 -70
  34. package/dist/callbacks/handlers/tracer.js +1 -373
  35. package/dist/callbacks/handlers/tracer_langchain.cjs +15 -104
  36. package/dist/callbacks/handlers/tracer_langchain.d.ts +1 -41
  37. package/dist/callbacks/handlers/tracer_langchain.js +1 -102
  38. package/dist/callbacks/handlers/tracer_langchain_v1.cjs +15 -197
  39. package/dist/callbacks/handlers/tracer_langchain_v1.d.ts +1 -57
  40. package/dist/callbacks/handlers/tracer_langchain_v1.js +1 -195
  41. package/dist/callbacks/manager.cjs +15 -676
  42. package/dist/callbacks/manager.d.ts +1 -180
  43. package/dist/callbacks/manager.js +1 -666
  44. package/dist/callbacks/promises.cjs +14 -42
  45. package/dist/callbacks/promises.d.ts +1 -11
  46. package/dist/callbacks/promises.js +1 -37
  47. package/dist/chains/graph_qa/prompts.d.ts +1 -1
  48. package/dist/chains/openai_functions/structured_output.cjs +2 -2
  49. package/dist/chains/openai_functions/structured_output.d.ts +1 -1
  50. package/dist/chains/openai_functions/structured_output.js +1 -1
  51. package/dist/chat_models/anthropic.cjs +15 -348
  52. package/dist/chat_models/anthropic.d.ts +1 -156
  53. package/dist/chat_models/anthropic.js +1 -346
  54. package/dist/chat_models/baiduwenxin.d.ts +1 -1
  55. package/dist/chat_models/base.cjs +15 -296
  56. package/dist/chat_models/base.d.ts +1 -122
  57. package/dist/chat_models/base.js +1 -292
  58. package/dist/chat_models/bedrock/web.cjs +21 -1
  59. package/dist/chat_models/bedrock/web.d.ts +2 -2
  60. package/dist/chat_models/bedrock/web.js +21 -1
  61. package/dist/chat_models/fireworks.d.ts +1 -1
  62. package/dist/document.cjs +2 -24
  63. package/dist/document.d.ts +1 -12
  64. package/dist/document.js +1 -23
  65. package/dist/document_loaders/web/azure_blob_storage_file.d.ts +1 -1
  66. package/dist/document_loaders/web/github.cjs +105 -0
  67. package/dist/document_loaders/web/github.d.ts +26 -0
  68. package/dist/document_loaders/web/github.js +105 -0
  69. package/dist/document_loaders/web/s3.d.ts +1 -1
  70. package/dist/embeddings/base.cjs +15 -22
  71. package/dist/embeddings/base.d.ts +1 -33
  72. package/dist/embeddings/base.js +1 -20
  73. package/dist/embeddings/cache_backed.cjs +2 -2
  74. package/dist/embeddings/cache_backed.js +1 -1
  75. package/dist/evaluation/agents/trajectory.d.ts +1 -1
  76. package/dist/evaluation/criteria/prompt.d.ts +2 -2
  77. package/dist/evaluation/qa/prompt.d.ts +2 -2
  78. package/dist/experimental/hubs/makersuite/googlemakersuitehub.d.ts +1 -1
  79. package/dist/experimental/plan_and_execute/prompt.d.ts +1 -1
  80. package/dist/llms/base.cjs +15 -278
  81. package/dist/llms/base.d.ts +1 -115
  82. package/dist/llms/base.js +1 -275
  83. package/dist/llms/bedrock/web.cjs +21 -1
  84. package/dist/llms/bedrock/web.d.ts +2 -2
  85. package/dist/llms/bedrock/web.js +21 -1
  86. package/dist/llms/fireworks.d.ts +1 -1
  87. package/dist/load/import_map.cjs +2 -1
  88. package/dist/load/import_map.d.ts +1 -0
  89. package/dist/load/import_map.js +1 -0
  90. package/dist/load/index.cjs +7 -148
  91. package/dist/load/index.js +7 -148
  92. package/dist/load/map_keys.cjs +0 -24
  93. package/dist/load/map_keys.d.ts +0 -6
  94. package/dist/load/map_keys.js +1 -17
  95. package/dist/load/serializable.cjs +15 -178
  96. package/dist/load/serializable.d.ts +1 -66
  97. package/dist/load/serializable.js +1 -175
  98. package/dist/memory/base.cjs +17 -92
  99. package/dist/memory/base.d.ts +2 -68
  100. package/dist/memory/base.js +2 -87
  101. package/dist/output_parsers/list.cjs +4 -122
  102. package/dist/output_parsers/list.d.ts +1 -57
  103. package/dist/output_parsers/list.js +1 -119
  104. package/dist/output_parsers/openai_functions.cjs +2 -2
  105. package/dist/output_parsers/openai_functions.d.ts +1 -1
  106. package/dist/output_parsers/openai_functions.js +1 -1
  107. package/dist/output_parsers/regex.d.ts +1 -1
  108. package/dist/output_parsers/structured.d.ts +1 -1
  109. package/dist/prompts/base.cjs +8 -183
  110. package/dist/prompts/base.d.ts +3 -132
  111. package/dist/prompts/base.js +3 -178
  112. package/dist/prompts/chat.cjs +13 -477
  113. package/dist/prompts/chat.d.ts +2 -219
  114. package/dist/prompts/chat.js +2 -466
  115. package/dist/prompts/few_shot.cjs +3 -352
  116. package/dist/prompts/few_shot.d.ts +1 -192
  117. package/dist/prompts/few_shot.js +1 -350
  118. package/dist/prompts/index.cjs +3 -2
  119. package/dist/prompts/index.d.ts +2 -1
  120. package/dist/prompts/index.js +2 -1
  121. package/dist/prompts/pipeline.cjs +2 -141
  122. package/dist/prompts/pipeline.d.ts +1 -98
  123. package/dist/prompts/pipeline.js +1 -140
  124. package/dist/prompts/prompt.cjs +2 -145
  125. package/dist/prompts/prompt.d.ts +1 -92
  126. package/dist/prompts/prompt.js +1 -144
  127. package/dist/prompts/selectors/LengthBasedExampleSelector.cjs +2 -147
  128. package/dist/prompts/selectors/LengthBasedExampleSelector.d.ts +1 -89
  129. package/dist/prompts/selectors/LengthBasedExampleSelector.js +1 -146
  130. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.cjs +15 -137
  131. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.d.ts +1 -91
  132. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.js +1 -135
  133. package/dist/prompts/selectors/conditional.cjs +5 -73
  134. package/dist/prompts/selectors/conditional.d.ts +1 -63
  135. package/dist/prompts/selectors/conditional.js +1 -69
  136. package/dist/prompts/serde.d.ts +1 -43
  137. package/dist/prompts/template.cjs +8 -88
  138. package/dist/prompts/template.d.ts +1 -36
  139. package/dist/prompts/template.js +1 -83
  140. package/dist/{util/@cfworker/json-schema → runnables}/index.cjs +1 -1
  141. package/dist/runnables/index.d.ts +1 -0
  142. package/dist/runnables/index.js +1 -0
  143. package/dist/schema/document.cjs +3 -34
  144. package/dist/schema/document.d.ts +2 -29
  145. package/dist/schema/document.js +2 -32
  146. package/dist/schema/index.cjs +37 -612
  147. package/dist/schema/index.d.ts +11 -311
  148. package/dist/schema/index.js +8 -583
  149. package/dist/schema/output_parser.cjs +15 -309
  150. package/dist/schema/output_parser.d.ts +1 -173
  151. package/dist/schema/output_parser.js +1 -301
  152. package/dist/schema/retriever.cjs +15 -77
  153. package/dist/schema/retriever.d.ts +1 -43
  154. package/dist/schema/retriever.js +1 -75
  155. package/dist/schema/runnable/base.cjs +10 -1072
  156. package/dist/schema/runnable/base.d.ts +1 -356
  157. package/dist/schema/runnable/base.js +1 -1060
  158. package/dist/schema/runnable/branch.cjs +2 -131
  159. package/dist/schema/runnable/branch.d.ts +1 -94
  160. package/dist/schema/runnable/branch.js +1 -130
  161. package/dist/schema/runnable/config.cjs +0 -6
  162. package/dist/schema/runnable/config.d.ts +1 -3
  163. package/dist/schema/runnable/config.js +1 -4
  164. package/dist/schema/runnable/index.cjs +15 -16
  165. package/dist/schema/runnable/index.d.ts +1 -5
  166. package/dist/schema/runnable/index.js +1 -4
  167. package/dist/schema/runnable/passthrough.cjs +3 -113
  168. package/dist/schema/runnable/passthrough.d.ts +1 -72
  169. package/dist/schema/runnable/passthrough.js +1 -111
  170. package/dist/schema/runnable/router.cjs +2 -71
  171. package/dist/schema/runnable/router.d.ts +1 -29
  172. package/dist/schema/runnable/router.js +1 -70
  173. package/dist/schema/storage.cjs +15 -8
  174. package/dist/schema/storage.d.ts +1 -57
  175. package/dist/schema/storage.js +1 -6
  176. package/dist/tools/bingserpapi.d.ts +1 -1
  177. package/dist/tools/searchapi.d.ts +1 -1
  178. package/dist/tools/serpapi.d.ts +1 -1
  179. package/dist/tools/serper.d.ts +1 -1
  180. package/dist/util/async_caller.cjs +14 -128
  181. package/dist/util/async_caller.d.ts +1 -45
  182. package/dist/util/async_caller.js +1 -124
  183. package/dist/vectorstores/momento_vector_index.cjs +39 -0
  184. package/dist/vectorstores/momento_vector_index.d.ts +17 -1
  185. package/dist/vectorstores/momento_vector_index.js +40 -1
  186. package/dist/vectorstores/mongodb_atlas.cjs +22 -2
  187. package/dist/vectorstores/mongodb_atlas.d.ts +13 -0
  188. package/dist/vectorstores/mongodb_atlas.js +22 -2
  189. package/package.json +18 -11
  190. package/runnables.cjs +1 -0
  191. package/runnables.d.ts +1 -0
  192. package/runnables.js +1 -0
  193. package/dist/util/@cfworker/json-schema/index.d.ts +0 -1
  194. package/dist/util/@cfworker/json-schema/index.js +0 -1
  195. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.cjs +0 -43
  196. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.d.ts +0 -1
  197. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.js +0 -39
  198. package/dist/util/@cfworker/json-schema/src/dereference.cjs +0 -169
  199. package/dist/util/@cfworker/json-schema/src/dereference.d.ts +0 -12
  200. package/dist/util/@cfworker/json-schema/src/dereference.js +0 -165
  201. package/dist/util/@cfworker/json-schema/src/format.cjs +0 -139
  202. package/dist/util/@cfworker/json-schema/src/format.d.ts +0 -2
  203. package/dist/util/@cfworker/json-schema/src/format.js +0 -136
  204. package/dist/util/@cfworker/json-schema/src/index.cjs +0 -24
  205. package/dist/util/@cfworker/json-schema/src/index.d.ts +0 -8
  206. package/dist/util/@cfworker/json-schema/src/index.js +0 -8
  207. package/dist/util/@cfworker/json-schema/src/pointer.cjs +0 -11
  208. package/dist/util/@cfworker/json-schema/src/pointer.d.ts +0 -2
  209. package/dist/util/@cfworker/json-schema/src/pointer.js +0 -6
  210. package/dist/util/@cfworker/json-schema/src/types.cjs +0 -2
  211. package/dist/util/@cfworker/json-schema/src/types.d.ts +0 -72
  212. package/dist/util/@cfworker/json-schema/src/types.js +0 -1
  213. package/dist/util/@cfworker/json-schema/src/ucs2-length.cjs +0 -28
  214. package/dist/util/@cfworker/json-schema/src/ucs2-length.d.ts +0 -6
  215. package/dist/util/@cfworker/json-schema/src/ucs2-length.js +0 -24
  216. package/dist/util/@cfworker/json-schema/src/validate.cjs +0 -808
  217. package/dist/util/@cfworker/json-schema/src/validate.d.ts +0 -3
  218. package/dist/util/@cfworker/json-schema/src/validate.js +0 -804
  219. package/dist/util/@cfworker/json-schema/src/validator.cjs +0 -44
  220. package/dist/util/@cfworker/json-schema/src/validator.d.ts +0 -10
  221. package/dist/util/@cfworker/json-schema/src/validator.js +0 -40
  222. package/dist/util/fast-json-patch/index.cjs +0 -49
  223. package/dist/util/fast-json-patch/index.d.ts +0 -22
  224. package/dist/util/fast-json-patch/index.js +0 -16
  225. package/dist/util/fast-json-patch/src/core.cjs +0 -469
  226. package/dist/util/fast-json-patch/src/core.d.ts +0 -111
  227. package/dist/util/fast-json-patch/src/core.js +0 -459
  228. package/dist/util/fast-json-patch/src/duplex.cjs +0 -237
  229. package/dist/util/fast-json-patch/src/duplex.d.ts +0 -23
  230. package/dist/util/fast-json-patch/src/duplex.js +0 -230
  231. package/dist/util/fast-json-patch/src/helpers.cjs +0 -194
  232. package/dist/util/fast-json-patch/src/helpers.d.ts +0 -36
  233. package/dist/util/fast-json-patch/src/helpers.js +0 -181
  234. package/dist/util/js-sha1/hash.cjs +0 -358
  235. package/dist/util/js-sha1/hash.d.ts +0 -1
  236. package/dist/util/js-sha1/hash.js +0 -355
@@ -1,280 +1,17 @@
1
1
  "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
2
16
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.LLM = exports.BaseLLM = void 0;
4
- const index_js_1 = require("../schema/index.cjs");
5
- const manager_js_1 = require("../callbacks/manager.cjs");
6
- const index_js_2 = require("../base_language/index.cjs");
7
- const base_js_1 = require("../memory/base.cjs");
8
- /**
9
- * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
10
- */
11
- class BaseLLM extends index_js_2.BaseLanguageModel {
12
- constructor({ concurrency, ...rest }) {
13
- super(concurrency ? { maxConcurrency: concurrency, ...rest } : rest);
14
- Object.defineProperty(this, "lc_namespace", {
15
- enumerable: true,
16
- configurable: true,
17
- writable: true,
18
- value: ["langchain", "llms", this._llmType()]
19
- });
20
- }
21
- /**
22
- * This method takes an input and options, and returns a string. It
23
- * converts the input to a prompt value and generates a result based on
24
- * the prompt.
25
- * @param input Input for the LLM.
26
- * @param options Options for the LLM call.
27
- * @returns A string result based on the prompt.
28
- */
29
- async invoke(input, options) {
30
- const promptValue = BaseLLM._convertInputToPromptValue(input);
31
- const result = await this.generatePrompt([promptValue], options, options?.callbacks);
32
- return result.generations[0][0].text;
33
- }
34
- // eslint-disable-next-line require-yield
35
- async *_streamResponseChunks(_input, _options, _runManager) {
36
- throw new Error("Not implemented.");
37
- }
38
- _separateRunnableConfigFromCallOptions(options) {
39
- const [runnableConfig, callOptions] = super._separateRunnableConfigFromCallOptions(options);
40
- if (callOptions?.timeout && !callOptions.signal) {
41
- callOptions.signal = AbortSignal.timeout(callOptions.timeout);
42
- }
43
- return [runnableConfig, callOptions];
44
- }
45
- async *_streamIterator(input, options) {
46
- // Subclass check required to avoid double callbacks with default implementation
47
- if (this._streamResponseChunks === BaseLLM.prototype._streamResponseChunks) {
48
- yield this.invoke(input, options);
49
- }
50
- else {
51
- const prompt = BaseLLM._convertInputToPromptValue(input);
52
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
53
- const callbackManager_ = await manager_js_1.CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
54
- const extra = {
55
- options: callOptions,
56
- invocation_params: this?.invocationParams(callOptions),
57
- batch_size: 1,
58
- };
59
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], undefined, undefined, extra, undefined, undefined, runnableConfig.runName);
60
- let generation = new index_js_1.GenerationChunk({
61
- text: "",
62
- });
63
- try {
64
- for await (const chunk of this._streamResponseChunks(input.toString(), callOptions, runManagers?.[0])) {
65
- if (!generation) {
66
- generation = chunk;
67
- }
68
- else {
69
- generation = generation.concat(chunk);
70
- }
71
- if (typeof chunk.text === "string") {
72
- yield chunk.text;
73
- }
74
- }
75
- }
76
- catch (err) {
77
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
78
- throw err;
79
- }
80
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({
81
- generations: [[generation]],
82
- })));
83
- }
84
- }
85
- /**
86
- * This method takes prompt values, options, and callbacks, and generates
87
- * a result based on the prompts.
88
- * @param promptValues Prompt values for the LLM.
89
- * @param options Options for the LLM call.
90
- * @param callbacks Callbacks for the LLM call.
91
- * @returns An LLMResult based on the prompts.
92
- */
93
- async generatePrompt(promptValues, options, callbacks) {
94
- const prompts = promptValues.map((promptValue) => promptValue.toString());
95
- return this.generate(prompts, options, callbacks);
96
- }
97
- /**
98
- * Get the parameters used to invoke the model
99
- */
100
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
101
- invocationParams(_options) {
102
- return {};
103
- }
104
- _flattenLLMResult(llmResult) {
105
- const llmResults = [];
106
- for (let i = 0; i < llmResult.generations.length; i += 1) {
107
- const genList = llmResult.generations[i];
108
- if (i === 0) {
109
- llmResults.push({
110
- generations: [genList],
111
- llmOutput: llmResult.llmOutput,
112
- });
113
- }
114
- else {
115
- const llmOutput = llmResult.llmOutput
116
- ? { ...llmResult.llmOutput, tokenUsage: {} }
117
- : undefined;
118
- llmResults.push({
119
- generations: [genList],
120
- llmOutput,
121
- });
122
- }
123
- }
124
- return llmResults;
125
- }
126
- /** @ignore */
127
- async _generateUncached(prompts, parsedOptions, handledOptions) {
128
- const callbackManager_ = await manager_js_1.CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
129
- const extra = {
130
- options: parsedOptions,
131
- invocation_params: this?.invocationParams(parsedOptions),
132
- batch_size: prompts.length,
133
- };
134
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, undefined, undefined, extra, undefined, undefined, handledOptions?.runName);
135
- let output;
136
- try {
137
- output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
138
- }
139
- catch (err) {
140
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
141
- throw err;
142
- }
143
- const flattenedOutputs = this._flattenLLMResult(output);
144
- await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
145
- const runIds = runManagers?.map((manager) => manager.runId) || undefined;
146
- // This defines RUN_KEY as a non-enumerable property on the output object
147
- // so that it is not serialized when the output is stringified, and so that
148
- // it isnt included when listing the keys of the output object.
149
- Object.defineProperty(output, index_js_1.RUN_KEY, {
150
- value: runIds ? { runIds } : undefined,
151
- configurable: true,
152
- });
153
- return output;
154
- }
155
- /**
156
- * Run the LLM on the given prompts and input, handling caching.
157
- */
158
- async generate(prompts, options, callbacks) {
159
- if (!Array.isArray(prompts)) {
160
- throw new Error("Argument 'prompts' is expected to be a string[]");
161
- }
162
- let parsedOptions;
163
- if (Array.isArray(options)) {
164
- parsedOptions = { stop: options };
165
- }
166
- else {
167
- parsedOptions = options;
168
- }
169
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(parsedOptions);
170
- runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;
171
- if (!this.cache) {
172
- return this._generateUncached(prompts, callOptions, runnableConfig);
173
- }
174
- const { cache } = this;
175
- const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
176
- const missingPromptIndices = [];
177
- const generations = await Promise.all(prompts.map(async (prompt, index) => {
178
- const result = await cache.lookup(prompt, llmStringKey);
179
- if (!result) {
180
- missingPromptIndices.push(index);
181
- }
182
- return result;
183
- }));
184
- let llmOutput = {};
185
- if (missingPromptIndices.length > 0) {
186
- const results = await this._generateUncached(missingPromptIndices.map((i) => prompts[i]), callOptions, runnableConfig);
187
- await Promise.all(results.generations.map(async (generation, index) => {
188
- const promptIndex = missingPromptIndices[index];
189
- generations[promptIndex] = generation;
190
- return cache.update(prompts[promptIndex], llmStringKey, generation);
191
- }));
192
- llmOutput = results.llmOutput ?? {};
193
- }
194
- return { generations, llmOutput };
195
- }
196
- /**
197
- * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
198
- */
199
- async call(prompt, options, callbacks) {
200
- const { generations } = await this.generate([prompt], options, callbacks);
201
- return generations[0][0].text;
202
- }
203
- /**
204
- * This method is similar to `call`, but it's used for making predictions
205
- * based on the input text.
206
- * @param text Input text for the prediction.
207
- * @param options Options for the LLM call.
208
- * @param callbacks Callbacks for the LLM call.
209
- * @returns A prediction based on the input text.
210
- */
211
- async predict(text, options, callbacks) {
212
- return this.call(text, options, callbacks);
213
- }
214
- /**
215
- * This method takes a list of messages, options, and callbacks, and
216
- * returns a predicted message.
217
- * @param messages A list of messages for the prediction.
218
- * @param options Options for the LLM call.
219
- * @param callbacks Callbacks for the LLM call.
220
- * @returns A predicted message based on the list of messages.
221
- */
222
- async predictMessages(messages, options, callbacks) {
223
- const text = (0, base_js_1.getBufferString)(messages);
224
- const prediction = await this.call(text, options, callbacks);
225
- return new index_js_1.AIMessage(prediction);
226
- }
227
- /**
228
- * Get the identifying parameters of the LLM.
229
- */
230
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
231
- _identifyingParams() {
232
- return {};
233
- }
234
- /**
235
- * @deprecated
236
- * Return a json-like object representing this LLM.
237
- */
238
- serialize() {
239
- return {
240
- ...this._identifyingParams(),
241
- _type: this._llmType(),
242
- _model: this._modelType(),
243
- };
244
- }
245
- _modelType() {
246
- return "base_llm";
247
- }
248
- /**
249
- * @deprecated
250
- * Load an LLM from a json-like object describing it.
251
- */
252
- static async deserialize(data) {
253
- const { _type, _model, ...rest } = data;
254
- if (_model && _model !== "base_llm") {
255
- throw new Error(`Cannot load LLM with model ${_model}`);
256
- }
257
- const Cls = {
258
- openai: (await import("./openai.js")).OpenAI,
259
- }[_type];
260
- if (Cls === undefined) {
261
- throw new Error(`Cannot load LLM with type ${_type}`);
262
- }
263
- return new Cls(rest);
264
- }
265
- }
266
- exports.BaseLLM = BaseLLM;
267
- /**
268
- * LLM class that provides a simpler interface to subclass than {@link BaseLLM}.
269
- *
270
- * Requires only implementing a simpler {@link _call} method instead of {@link _generate}.
271
- *
272
- * @augments BaseLLM
273
- */
274
- class LLM extends BaseLLM {
275
- async _generate(prompts, options, runManager) {
276
- const generations = await Promise.all(prompts.map((prompt, promptIndex) => this._call(prompt, { ...options, promptIndex }, runManager).then((text) => [{ text }])));
277
- return { generations };
278
- }
279
- }
280
- exports.LLM = LLM;
17
+ __exportStar(require("langchain-core/language_models/llms"), exports);
@@ -1,115 +1 @@
1
- import { BaseMessage, BasePromptValue, GenerationChunk, LLMResult } from "../schema/index.js";
2
- import { BaseCallbackConfig, CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
3
- import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "../base_language/index.js";
4
- import { RunnableConfig } from "../schema/runnable/config.js";
5
- export type SerializedLLM = {
6
- _model: string;
7
- _type: string;
8
- } & Record<string, any>;
9
- export interface BaseLLMParams extends BaseLanguageModelParams {
10
- /**
11
- * @deprecated Use `maxConcurrency` instead
12
- */
13
- concurrency?: number;
14
- }
15
- export interface BaseLLMCallOptions extends BaseLanguageModelCallOptions {
16
- }
17
- /**
18
- * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
19
- */
20
- export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLanguageModel<string, CallOptions> {
21
- ParsedCallOptions: Omit<CallOptions, keyof RunnableConfig & "timeout">;
22
- lc_namespace: string[];
23
- constructor({ concurrency, ...rest }: BaseLLMParams);
24
- /**
25
- * This method takes an input and options, and returns a string. It
26
- * converts the input to a prompt value and generates a result based on
27
- * the prompt.
28
- * @param input Input for the LLM.
29
- * @param options Options for the LLM call.
30
- * @returns A string result based on the prompt.
31
- */
32
- invoke(input: BaseLanguageModelInput, options?: CallOptions): Promise<string>;
33
- _streamResponseChunks(_input: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
34
- protected _separateRunnableConfigFromCallOptions(options?: Partial<CallOptions>): [RunnableConfig, this["ParsedCallOptions"]];
35
- _streamIterator(input: BaseLanguageModelInput, options?: CallOptions): AsyncGenerator<string>;
36
- /**
37
- * This method takes prompt values, options, and callbacks, and generates
38
- * a result based on the prompts.
39
- * @param promptValues Prompt values for the LLM.
40
- * @param options Options for the LLM call.
41
- * @param callbacks Callbacks for the LLM call.
42
- * @returns An LLMResult based on the prompts.
43
- */
44
- generatePrompt(promptValues: BasePromptValue[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
45
- /**
46
- * Run the LLM on the given prompts and input.
47
- */
48
- abstract _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
49
- /**
50
- * Get the parameters used to invoke the model
51
- */
52
- invocationParams(_options?: this["ParsedCallOptions"]): any;
53
- _flattenLLMResult(llmResult: LLMResult): LLMResult[];
54
- /** @ignore */
55
- _generateUncached(prompts: string[], parsedOptions: this["ParsedCallOptions"], handledOptions: BaseCallbackConfig): Promise<LLMResult>;
56
- /**
57
- * Run the LLM on the given prompts and input, handling caching.
58
- */
59
- generate(prompts: string[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
60
- /**
61
- * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
62
- */
63
- call(prompt: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
64
- /**
65
- * This method is similar to `call`, but it's used for making predictions
66
- * based on the input text.
67
- * @param text Input text for the prediction.
68
- * @param options Options for the LLM call.
69
- * @param callbacks Callbacks for the LLM call.
70
- * @returns A prediction based on the input text.
71
- */
72
- predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
73
- /**
74
- * This method takes a list of messages, options, and callbacks, and
75
- * returns a predicted message.
76
- * @param messages A list of messages for the prediction.
77
- * @param options Options for the LLM call.
78
- * @param callbacks Callbacks for the LLM call.
79
- * @returns A predicted message based on the list of messages.
80
- */
81
- predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
82
- /**
83
- * Get the identifying parameters of the LLM.
84
- */
85
- _identifyingParams(): Record<string, any>;
86
- /**
87
- * Return the string type key uniquely identifying this class of LLM.
88
- */
89
- abstract _llmType(): string;
90
- /**
91
- * @deprecated
92
- * Return a json-like object representing this LLM.
93
- */
94
- serialize(): SerializedLLM;
95
- _modelType(): string;
96
- /**
97
- * @deprecated
98
- * Load an LLM from a json-like object describing it.
99
- */
100
- static deserialize(data: SerializedLLM): Promise<BaseLLM>;
101
- }
102
- /**
103
- * LLM class that provides a simpler interface to subclass than {@link BaseLLM}.
104
- *
105
- * Requires only implementing a simpler {@link _call} method instead of {@link _generate}.
106
- *
107
- * @augments BaseLLM
108
- */
109
- export declare abstract class LLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLLM<CallOptions> {
110
- /**
111
- * Run the LLM on the given prompt and input.
112
- */
113
- abstract _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
114
- _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
115
- }
1
+ export * from "langchain-core/language_models/llms";
package/dist/llms/base.js CHANGED
@@ -1,275 +1 @@
1
- import { AIMessage, GenerationChunk, RUN_KEY, } from "../schema/index.js";
2
- import { CallbackManager, } from "../callbacks/manager.js";
3
- import { BaseLanguageModel, } from "../base_language/index.js";
4
- import { getBufferString } from "../memory/base.js";
5
- /**
6
- * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
7
- */
8
- export class BaseLLM extends BaseLanguageModel {
9
- constructor({ concurrency, ...rest }) {
10
- super(concurrency ? { maxConcurrency: concurrency, ...rest } : rest);
11
- Object.defineProperty(this, "lc_namespace", {
12
- enumerable: true,
13
- configurable: true,
14
- writable: true,
15
- value: ["langchain", "llms", this._llmType()]
16
- });
17
- }
18
- /**
19
- * This method takes an input and options, and returns a string. It
20
- * converts the input to a prompt value and generates a result based on
21
- * the prompt.
22
- * @param input Input for the LLM.
23
- * @param options Options for the LLM call.
24
- * @returns A string result based on the prompt.
25
- */
26
- async invoke(input, options) {
27
- const promptValue = BaseLLM._convertInputToPromptValue(input);
28
- const result = await this.generatePrompt([promptValue], options, options?.callbacks);
29
- return result.generations[0][0].text;
30
- }
31
- // eslint-disable-next-line require-yield
32
- async *_streamResponseChunks(_input, _options, _runManager) {
33
- throw new Error("Not implemented.");
34
- }
35
- _separateRunnableConfigFromCallOptions(options) {
36
- const [runnableConfig, callOptions] = super._separateRunnableConfigFromCallOptions(options);
37
- if (callOptions?.timeout && !callOptions.signal) {
38
- callOptions.signal = AbortSignal.timeout(callOptions.timeout);
39
- }
40
- return [runnableConfig, callOptions];
41
- }
42
- async *_streamIterator(input, options) {
43
- // Subclass check required to avoid double callbacks with default implementation
44
- if (this._streamResponseChunks === BaseLLM.prototype._streamResponseChunks) {
45
- yield this.invoke(input, options);
46
- }
47
- else {
48
- const prompt = BaseLLM._convertInputToPromptValue(input);
49
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
50
- const callbackManager_ = await CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
51
- const extra = {
52
- options: callOptions,
53
- invocation_params: this?.invocationParams(callOptions),
54
- batch_size: 1,
55
- };
56
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], undefined, undefined, extra, undefined, undefined, runnableConfig.runName);
57
- let generation = new GenerationChunk({
58
- text: "",
59
- });
60
- try {
61
- for await (const chunk of this._streamResponseChunks(input.toString(), callOptions, runManagers?.[0])) {
62
- if (!generation) {
63
- generation = chunk;
64
- }
65
- else {
66
- generation = generation.concat(chunk);
67
- }
68
- if (typeof chunk.text === "string") {
69
- yield chunk.text;
70
- }
71
- }
72
- }
73
- catch (err) {
74
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
75
- throw err;
76
- }
77
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({
78
- generations: [[generation]],
79
- })));
80
- }
81
- }
82
- /**
83
- * This method takes prompt values, options, and callbacks, and generates
84
- * a result based on the prompts.
85
- * @param promptValues Prompt values for the LLM.
86
- * @param options Options for the LLM call.
87
- * @param callbacks Callbacks for the LLM call.
88
- * @returns An LLMResult based on the prompts.
89
- */
90
- async generatePrompt(promptValues, options, callbacks) {
91
- const prompts = promptValues.map((promptValue) => promptValue.toString());
92
- return this.generate(prompts, options, callbacks);
93
- }
94
- /**
95
- * Get the parameters used to invoke the model
96
- */
97
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
98
- invocationParams(_options) {
99
- return {};
100
- }
101
- _flattenLLMResult(llmResult) {
102
- const llmResults = [];
103
- for (let i = 0; i < llmResult.generations.length; i += 1) {
104
- const genList = llmResult.generations[i];
105
- if (i === 0) {
106
- llmResults.push({
107
- generations: [genList],
108
- llmOutput: llmResult.llmOutput,
109
- });
110
- }
111
- else {
112
- const llmOutput = llmResult.llmOutput
113
- ? { ...llmResult.llmOutput, tokenUsage: {} }
114
- : undefined;
115
- llmResults.push({
116
- generations: [genList],
117
- llmOutput,
118
- });
119
- }
120
- }
121
- return llmResults;
122
- }
123
- /** @ignore */
124
- async _generateUncached(prompts, parsedOptions, handledOptions) {
125
- const callbackManager_ = await CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
126
- const extra = {
127
- options: parsedOptions,
128
- invocation_params: this?.invocationParams(parsedOptions),
129
- batch_size: prompts.length,
130
- };
131
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, undefined, undefined, extra, undefined, undefined, handledOptions?.runName);
132
- let output;
133
- try {
134
- output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
135
- }
136
- catch (err) {
137
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
138
- throw err;
139
- }
140
- const flattenedOutputs = this._flattenLLMResult(output);
141
- await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
142
- const runIds = runManagers?.map((manager) => manager.runId) || undefined;
143
- // This defines RUN_KEY as a non-enumerable property on the output object
144
- // so that it is not serialized when the output is stringified, and so that
145
- // it isnt included when listing the keys of the output object.
146
- Object.defineProperty(output, RUN_KEY, {
147
- value: runIds ? { runIds } : undefined,
148
- configurable: true,
149
- });
150
- return output;
151
- }
152
- /**
153
- * Run the LLM on the given prompts and input, handling caching.
154
- */
155
- async generate(prompts, options, callbacks) {
156
- if (!Array.isArray(prompts)) {
157
- throw new Error("Argument 'prompts' is expected to be a string[]");
158
- }
159
- let parsedOptions;
160
- if (Array.isArray(options)) {
161
- parsedOptions = { stop: options };
162
- }
163
- else {
164
- parsedOptions = options;
165
- }
166
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(parsedOptions);
167
- runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;
168
- if (!this.cache) {
169
- return this._generateUncached(prompts, callOptions, runnableConfig);
170
- }
171
- const { cache } = this;
172
- const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
173
- const missingPromptIndices = [];
174
- const generations = await Promise.all(prompts.map(async (prompt, index) => {
175
- const result = await cache.lookup(prompt, llmStringKey);
176
- if (!result) {
177
- missingPromptIndices.push(index);
178
- }
179
- return result;
180
- }));
181
- let llmOutput = {};
182
- if (missingPromptIndices.length > 0) {
183
- const results = await this._generateUncached(missingPromptIndices.map((i) => prompts[i]), callOptions, runnableConfig);
184
- await Promise.all(results.generations.map(async (generation, index) => {
185
- const promptIndex = missingPromptIndices[index];
186
- generations[promptIndex] = generation;
187
- return cache.update(prompts[promptIndex], llmStringKey, generation);
188
- }));
189
- llmOutput = results.llmOutput ?? {};
190
- }
191
- return { generations, llmOutput };
192
- }
193
- /**
194
- * Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
195
- */
196
- async call(prompt, options, callbacks) {
197
- const { generations } = await this.generate([prompt], options, callbacks);
198
- return generations[0][0].text;
199
- }
200
- /**
201
- * This method is similar to `call`, but it's used for making predictions
202
- * based on the input text.
203
- * @param text Input text for the prediction.
204
- * @param options Options for the LLM call.
205
- * @param callbacks Callbacks for the LLM call.
206
- * @returns A prediction based on the input text.
207
- */
208
- async predict(text, options, callbacks) {
209
- return this.call(text, options, callbacks);
210
- }
211
- /**
212
- * This method takes a list of messages, options, and callbacks, and
213
- * returns a predicted message.
214
- * @param messages A list of messages for the prediction.
215
- * @param options Options for the LLM call.
216
- * @param callbacks Callbacks for the LLM call.
217
- * @returns A predicted message based on the list of messages.
218
- */
219
- async predictMessages(messages, options, callbacks) {
220
- const text = getBufferString(messages);
221
- const prediction = await this.call(text, options, callbacks);
222
- return new AIMessage(prediction);
223
- }
224
- /**
225
- * Get the identifying parameters of the LLM.
226
- */
227
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
228
- _identifyingParams() {
229
- return {};
230
- }
231
- /**
232
- * @deprecated
233
- * Return a json-like object representing this LLM.
234
- */
235
- serialize() {
236
- return {
237
- ...this._identifyingParams(),
238
- _type: this._llmType(),
239
- _model: this._modelType(),
240
- };
241
- }
242
- _modelType() {
243
- return "base_llm";
244
- }
245
- /**
246
- * @deprecated
247
- * Load an LLM from a json-like object describing it.
248
- */
249
- static async deserialize(data) {
250
- const { _type, _model, ...rest } = data;
251
- if (_model && _model !== "base_llm") {
252
- throw new Error(`Cannot load LLM with model ${_model}`);
253
- }
254
- const Cls = {
255
- openai: (await import("./openai.js")).OpenAI,
256
- }[_type];
257
- if (Cls === undefined) {
258
- throw new Error(`Cannot load LLM with type ${_type}`);
259
- }
260
- return new Cls(rest);
261
- }
262
- }
263
- /**
264
- * LLM class that provides a simpler interface to subclass than {@link BaseLLM}.
265
- *
266
- * Requires only implementing a simpler {@link _call} method instead of {@link _generate}.
267
- *
268
- * @augments BaseLLM
269
- */
270
- export class LLM extends BaseLLM {
271
- async _generate(prompts, options, runManager) {
272
- const generations = await Promise.all(prompts.map((prompt, promptIndex) => this._call(prompt, { ...options, promptIndex }, runManager).then((text) => [{ text }])));
273
- return { generations };
274
- }
275
- }
1
+ export * from "langchain-core/language_models/llms";