langchain 0.0.195 → 0.0.196

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts +1 -1
  2. package/dist/base_language/count_tokens.cjs +5 -70
  3. package/dist/base_language/count_tokens.d.ts +1 -10
  4. package/dist/base_language/count_tokens.js +1 -65
  5. package/dist/base_language/index.cjs +6 -196
  6. package/dist/base_language/index.d.ts +1 -111
  7. package/dist/base_language/index.js +1 -191
  8. package/dist/cache/base.cjs +15 -37
  9. package/dist/cache/base.d.ts +1 -20
  10. package/dist/cache/base.js +1 -33
  11. package/dist/cache/index.cjs +2 -46
  12. package/dist/cache/index.d.ts +1 -29
  13. package/dist/cache/index.js +1 -45
  14. package/dist/callbacks/base.cjs +3 -139
  15. package/dist/callbacks/base.d.ts +1 -266
  16. package/dist/callbacks/base.js +1 -126
  17. package/dist/callbacks/handlers/console.cjs +14 -221
  18. package/dist/callbacks/handlers/console.d.ts +1 -117
  19. package/dist/callbacks/handlers/console.js +1 -217
  20. package/dist/callbacks/handlers/initialize.cjs +15 -30
  21. package/dist/callbacks/handlers/initialize.d.ts +1 -16
  22. package/dist/callbacks/handlers/initialize.js +1 -27
  23. package/dist/callbacks/handlers/log_stream.cjs +15 -293
  24. package/dist/callbacks/handlers/log_stream.d.ts +1 -100
  25. package/dist/callbacks/handlers/log_stream.js +1 -289
  26. package/dist/callbacks/handlers/run_collector.cjs +15 -48
  27. package/dist/callbacks/handlers/run_collector.d.ts +1 -26
  28. package/dist/callbacks/handlers/run_collector.js +1 -46
  29. package/dist/callbacks/handlers/tracer.cjs +15 -375
  30. package/dist/callbacks/handlers/tracer.d.ts +1 -70
  31. package/dist/callbacks/handlers/tracer.js +1 -373
  32. package/dist/callbacks/handlers/tracer_langchain.cjs +15 -104
  33. package/dist/callbacks/handlers/tracer_langchain.d.ts +1 -41
  34. package/dist/callbacks/handlers/tracer_langchain.js +1 -102
  35. package/dist/callbacks/handlers/tracer_langchain_v1.cjs +15 -197
  36. package/dist/callbacks/handlers/tracer_langchain_v1.d.ts +1 -57
  37. package/dist/callbacks/handlers/tracer_langchain_v1.js +1 -195
  38. package/dist/callbacks/manager.cjs +15 -676
  39. package/dist/callbacks/manager.d.ts +1 -180
  40. package/dist/callbacks/manager.js +1 -666
  41. package/dist/callbacks/promises.cjs +14 -42
  42. package/dist/callbacks/promises.d.ts +1 -11
  43. package/dist/callbacks/promises.js +1 -37
  44. package/dist/chains/graph_qa/prompts.d.ts +1 -1
  45. package/dist/chains/openai_functions/structured_output.cjs +2 -2
  46. package/dist/chains/openai_functions/structured_output.d.ts +1 -1
  47. package/dist/chains/openai_functions/structured_output.js +1 -1
  48. package/dist/chat_models/baiduwenxin.d.ts +1 -1
  49. package/dist/chat_models/base.cjs +15 -296
  50. package/dist/chat_models/base.d.ts +1 -122
  51. package/dist/chat_models/base.js +1 -292
  52. package/dist/chat_models/bedrock/web.d.ts +1 -1
  53. package/dist/chat_models/fireworks.d.ts +1 -1
  54. package/dist/document.cjs +2 -24
  55. package/dist/document.d.ts +1 -12
  56. package/dist/document.js +1 -23
  57. package/dist/document_loaders/web/azure_blob_storage_file.d.ts +1 -1
  58. package/dist/document_loaders/web/s3.d.ts +1 -1
  59. package/dist/embeddings/base.cjs +15 -22
  60. package/dist/embeddings/base.d.ts +1 -33
  61. package/dist/embeddings/base.js +1 -20
  62. package/dist/embeddings/cache_backed.cjs +2 -2
  63. package/dist/embeddings/cache_backed.js +1 -1
  64. package/dist/evaluation/agents/trajectory.d.ts +1 -1
  65. package/dist/evaluation/criteria/prompt.d.ts +2 -2
  66. package/dist/evaluation/qa/prompt.d.ts +2 -2
  67. package/dist/experimental/hubs/makersuite/googlemakersuitehub.d.ts +1 -1
  68. package/dist/experimental/plan_and_execute/prompt.d.ts +1 -1
  69. package/dist/llms/base.cjs +15 -278
  70. package/dist/llms/base.d.ts +1 -115
  71. package/dist/llms/base.js +1 -275
  72. package/dist/llms/bedrock/web.d.ts +1 -1
  73. package/dist/llms/fireworks.d.ts +1 -1
  74. package/dist/load/import_map.cjs +2 -1
  75. package/dist/load/import_map.d.ts +1 -0
  76. package/dist/load/import_map.js +1 -0
  77. package/dist/load/index.cjs +7 -148
  78. package/dist/load/index.js +7 -148
  79. package/dist/load/map_keys.cjs +0 -24
  80. package/dist/load/map_keys.d.ts +0 -6
  81. package/dist/load/map_keys.js +1 -17
  82. package/dist/load/serializable.cjs +15 -178
  83. package/dist/load/serializable.d.ts +1 -66
  84. package/dist/load/serializable.js +1 -175
  85. package/dist/memory/base.cjs +17 -92
  86. package/dist/memory/base.d.ts +2 -68
  87. package/dist/memory/base.js +2 -87
  88. package/dist/output_parsers/openai_functions.cjs +2 -2
  89. package/dist/output_parsers/openai_functions.d.ts +1 -1
  90. package/dist/output_parsers/openai_functions.js +1 -1
  91. package/dist/output_parsers/regex.d.ts +1 -1
  92. package/dist/output_parsers/structured.d.ts +1 -1
  93. package/dist/prompts/base.cjs +8 -183
  94. package/dist/prompts/base.d.ts +3 -132
  95. package/dist/prompts/base.js +3 -178
  96. package/dist/prompts/chat.cjs +15 -477
  97. package/dist/prompts/chat.d.ts +1 -219
  98. package/dist/prompts/chat.js +1 -466
  99. package/dist/prompts/few_shot.cjs +15 -353
  100. package/dist/prompts/few_shot.d.ts +1 -192
  101. package/dist/prompts/few_shot.js +1 -350
  102. package/dist/prompts/index.cjs +3 -2
  103. package/dist/prompts/index.d.ts +2 -1
  104. package/dist/prompts/index.js +2 -1
  105. package/dist/prompts/pipeline.cjs +15 -142
  106. package/dist/prompts/pipeline.d.ts +1 -98
  107. package/dist/prompts/pipeline.js +1 -140
  108. package/dist/prompts/prompt.cjs +15 -146
  109. package/dist/prompts/prompt.d.ts +1 -92
  110. package/dist/prompts/prompt.js +1 -144
  111. package/dist/prompts/selectors/LengthBasedExampleSelector.cjs +15 -148
  112. package/dist/prompts/selectors/LengthBasedExampleSelector.d.ts +1 -89
  113. package/dist/prompts/selectors/LengthBasedExampleSelector.js +1 -146
  114. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.cjs +15 -137
  115. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.d.ts +1 -91
  116. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.js +1 -135
  117. package/dist/prompts/selectors/conditional.cjs +15 -74
  118. package/dist/prompts/selectors/conditional.d.ts +1 -63
  119. package/dist/prompts/selectors/conditional.js +1 -69
  120. package/dist/prompts/serde.cjs +15 -0
  121. package/dist/prompts/serde.d.ts +1 -43
  122. package/dist/prompts/serde.js +1 -1
  123. package/dist/prompts/template.cjs +14 -88
  124. package/dist/prompts/template.d.ts +1 -36
  125. package/dist/prompts/template.js +1 -83
  126. package/dist/{util/@cfworker/json-schema → runnables}/index.cjs +1 -1
  127. package/dist/runnables/index.d.ts +1 -0
  128. package/dist/runnables/index.js +1 -0
  129. package/dist/schema/document.cjs +3 -34
  130. package/dist/schema/document.d.ts +2 -29
  131. package/dist/schema/document.js +2 -32
  132. package/dist/schema/index.cjs +37 -612
  133. package/dist/schema/index.d.ts +11 -311
  134. package/dist/schema/index.js +8 -583
  135. package/dist/schema/output_parser.cjs +15 -309
  136. package/dist/schema/output_parser.d.ts +1 -173
  137. package/dist/schema/output_parser.js +1 -301
  138. package/dist/schema/retriever.cjs +15 -77
  139. package/dist/schema/retriever.d.ts +1 -43
  140. package/dist/schema/retriever.js +1 -75
  141. package/dist/schema/runnable/base.cjs +10 -1072
  142. package/dist/schema/runnable/base.d.ts +1 -356
  143. package/dist/schema/runnable/base.js +1 -1060
  144. package/dist/schema/runnable/branch.cjs +2 -131
  145. package/dist/schema/runnable/branch.d.ts +1 -94
  146. package/dist/schema/runnable/branch.js +1 -130
  147. package/dist/schema/runnable/config.cjs +0 -6
  148. package/dist/schema/runnable/config.d.ts +1 -3
  149. package/dist/schema/runnable/config.js +1 -4
  150. package/dist/schema/runnable/index.cjs +15 -16
  151. package/dist/schema/runnable/index.d.ts +1 -5
  152. package/dist/schema/runnable/index.js +1 -4
  153. package/dist/schema/runnable/passthrough.cjs +3 -113
  154. package/dist/schema/runnable/passthrough.d.ts +1 -72
  155. package/dist/schema/runnable/passthrough.js +1 -111
  156. package/dist/schema/runnable/router.cjs +2 -71
  157. package/dist/schema/runnable/router.d.ts +1 -29
  158. package/dist/schema/runnable/router.js +1 -70
  159. package/dist/schema/storage.cjs +15 -8
  160. package/dist/schema/storage.d.ts +1 -57
  161. package/dist/schema/storage.js +1 -6
  162. package/dist/tools/bingserpapi.d.ts +1 -1
  163. package/dist/tools/searchapi.d.ts +1 -1
  164. package/dist/tools/serpapi.d.ts +1 -1
  165. package/dist/tools/serper.d.ts +1 -1
  166. package/dist/util/async_caller.cjs +14 -128
  167. package/dist/util/async_caller.d.ts +1 -45
  168. package/dist/util/async_caller.js +1 -124
  169. package/package.json +11 -5
  170. package/runnables.cjs +1 -0
  171. package/runnables.d.ts +1 -0
  172. package/runnables.js +1 -0
  173. package/dist/util/@cfworker/json-schema/index.d.ts +0 -1
  174. package/dist/util/@cfworker/json-schema/index.js +0 -1
  175. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.cjs +0 -43
  176. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.d.ts +0 -1
  177. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.js +0 -39
  178. package/dist/util/@cfworker/json-schema/src/dereference.cjs +0 -169
  179. package/dist/util/@cfworker/json-schema/src/dereference.d.ts +0 -12
  180. package/dist/util/@cfworker/json-schema/src/dereference.js +0 -165
  181. package/dist/util/@cfworker/json-schema/src/format.cjs +0 -139
  182. package/dist/util/@cfworker/json-schema/src/format.d.ts +0 -2
  183. package/dist/util/@cfworker/json-schema/src/format.js +0 -136
  184. package/dist/util/@cfworker/json-schema/src/index.cjs +0 -24
  185. package/dist/util/@cfworker/json-schema/src/index.d.ts +0 -8
  186. package/dist/util/@cfworker/json-schema/src/index.js +0 -8
  187. package/dist/util/@cfworker/json-schema/src/pointer.cjs +0 -11
  188. package/dist/util/@cfworker/json-schema/src/pointer.d.ts +0 -2
  189. package/dist/util/@cfworker/json-schema/src/pointer.js +0 -6
  190. package/dist/util/@cfworker/json-schema/src/types.cjs +0 -2
  191. package/dist/util/@cfworker/json-schema/src/types.d.ts +0 -72
  192. package/dist/util/@cfworker/json-schema/src/types.js +0 -1
  193. package/dist/util/@cfworker/json-schema/src/ucs2-length.cjs +0 -28
  194. package/dist/util/@cfworker/json-schema/src/ucs2-length.d.ts +0 -6
  195. package/dist/util/@cfworker/json-schema/src/ucs2-length.js +0 -24
  196. package/dist/util/@cfworker/json-schema/src/validate.cjs +0 -808
  197. package/dist/util/@cfworker/json-schema/src/validate.d.ts +0 -3
  198. package/dist/util/@cfworker/json-schema/src/validate.js +0 -804
  199. package/dist/util/@cfworker/json-schema/src/validator.cjs +0 -44
  200. package/dist/util/@cfworker/json-schema/src/validator.d.ts +0 -10
  201. package/dist/util/@cfworker/json-schema/src/validator.js +0 -40
  202. package/dist/util/fast-json-patch/index.cjs +0 -49
  203. package/dist/util/fast-json-patch/index.d.ts +0 -22
  204. package/dist/util/fast-json-patch/index.js +0 -16
  205. package/dist/util/fast-json-patch/src/core.cjs +0 -469
  206. package/dist/util/fast-json-patch/src/core.d.ts +0 -111
  207. package/dist/util/fast-json-patch/src/core.js +0 -459
  208. package/dist/util/fast-json-patch/src/duplex.cjs +0 -237
  209. package/dist/util/fast-json-patch/src/duplex.d.ts +0 -23
  210. package/dist/util/fast-json-patch/src/duplex.js +0 -230
  211. package/dist/util/fast-json-patch/src/helpers.cjs +0 -194
  212. package/dist/util/fast-json-patch/src/helpers.d.ts +0 -36
  213. package/dist/util/fast-json-patch/src/helpers.js +0 -181
  214. package/dist/util/js-sha1/hash.cjs +0 -358
  215. package/dist/util/js-sha1/hash.d.ts +0 -1
  216. package/dist/util/js-sha1/hash.js +0 -355
@@ -33,7 +33,7 @@ export declare class OpenAIAgentTokenBufferMemory extends BaseChatMemory {
33
33
  * Retrieves the messages from the chat history.
34
34
  * @returns Promise that resolves with the messages from the chat history.
35
35
  */
36
- getMessages(): Promise<import("../../../schema/index.js").BaseMessage[]>;
36
+ getMessages(): Promise<import("langchain-core/schema/messages").BaseMessage[]>;
37
37
  /**
38
38
  * Loads memory variables from the input values.
39
39
  * @param _values Input values.
@@ -1,72 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.calculateMaxTokens = exports.getModelContextSize = exports.getEmbeddingContextSize = exports.getModelNameForTiktoken = void 0;
4
- const tiktoken_js_1 = require("../util/tiktoken.cjs");
5
- // https://www.npmjs.com/package/js-tiktoken
6
- const getModelNameForTiktoken = (modelName) => {
7
- if (modelName.startsWith("gpt-3.5-turbo-16k")) {
8
- return "gpt-3.5-turbo-16k";
9
- }
10
- if (modelName.startsWith("gpt-3.5-turbo-")) {
11
- return "gpt-3.5-turbo";
12
- }
13
- if (modelName.startsWith("gpt-4-32k")) {
14
- return "gpt-4-32k";
15
- }
16
- if (modelName.startsWith("gpt-4-")) {
17
- return "gpt-4";
18
- }
19
- return modelName;
20
- };
21
- exports.getModelNameForTiktoken = getModelNameForTiktoken;
22
- const getEmbeddingContextSize = (modelName) => {
23
- switch (modelName) {
24
- case "text-embedding-ada-002":
25
- return 8191;
26
- default:
27
- return 2046;
28
- }
29
- };
30
- exports.getEmbeddingContextSize = getEmbeddingContextSize;
31
- const getModelContextSize = (modelName) => {
32
- switch ((0, exports.getModelNameForTiktoken)(modelName)) {
33
- case "gpt-3.5-turbo-16k":
34
- return 16384;
35
- case "gpt-3.5-turbo":
36
- return 4096;
37
- case "gpt-4-32k":
38
- return 32768;
39
- case "gpt-4":
40
- return 8192;
41
- case "text-davinci-003":
42
- return 4097;
43
- case "text-curie-001":
44
- return 2048;
45
- case "text-babbage-001":
46
- return 2048;
47
- case "text-ada-001":
48
- return 2048;
49
- case "code-davinci-002":
50
- return 8000;
51
- case "code-cushman-001":
52
- return 2048;
53
- default:
54
- return 4097;
55
- }
56
- };
57
- exports.getModelContextSize = getModelContextSize;
58
- const calculateMaxTokens = async ({ prompt, modelName, }) => {
59
- let numTokens;
60
- try {
61
- numTokens = (await (0, tiktoken_js_1.encodingForModel)((0, exports.getModelNameForTiktoken)(modelName))).encode(prompt).length;
62
- }
63
- catch (error) {
64
- console.warn("Failed to calculate number of tokens, falling back to approximate count");
65
- // fallback to approximate calculation if tiktoken is not available
66
- // each token is ~4 characters: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them#
67
- numTokens = Math.ceil(prompt.length / 4);
68
- }
69
- const maxTokens = (0, exports.getModelContextSize)(modelName);
70
- return maxTokens - numTokens;
71
- };
72
- exports.calculateMaxTokens = calculateMaxTokens;
3
+ exports.getEmbeddingContextSize = exports.getModelContextSize = exports.calculateMaxTokens = void 0;
4
+ var language_model_1 = require("langchain-core/schema/language_model");
5
+ Object.defineProperty(exports, "calculateMaxTokens", { enumerable: true, get: function () { return language_model_1.calculateMaxTokens; } });
6
+ Object.defineProperty(exports, "getModelContextSize", { enumerable: true, get: function () { return language_model_1.getModelContextSize; } });
7
+ Object.defineProperty(exports, "getEmbeddingContextSize", { enumerable: true, get: function () { return language_model_1.getEmbeddingContextSize; } });
@@ -1,10 +1 @@
1
- import { type TiktokenModel } from "js-tiktoken/lite";
2
- export declare const getModelNameForTiktoken: (modelName: string) => TiktokenModel;
3
- export declare const getEmbeddingContextSize: (modelName?: string) => number;
4
- export declare const getModelContextSize: (modelName: string) => number;
5
- interface CalculateMaxTokenProps {
6
- prompt: string;
7
- modelName: TiktokenModel;
8
- }
9
- export declare const calculateMaxTokens: ({ prompt, modelName, }: CalculateMaxTokenProps) => Promise<number>;
10
- export {};
1
+ export { calculateMaxTokens, getModelContextSize, getEmbeddingContextSize, } from "langchain-core/schema/language_model";
@@ -1,65 +1 @@
1
- import { encodingForModel } from "../util/tiktoken.js";
2
- // https://www.npmjs.com/package/js-tiktoken
3
- export const getModelNameForTiktoken = (modelName) => {
4
- if (modelName.startsWith("gpt-3.5-turbo-16k")) {
5
- return "gpt-3.5-turbo-16k";
6
- }
7
- if (modelName.startsWith("gpt-3.5-turbo-")) {
8
- return "gpt-3.5-turbo";
9
- }
10
- if (modelName.startsWith("gpt-4-32k")) {
11
- return "gpt-4-32k";
12
- }
13
- if (modelName.startsWith("gpt-4-")) {
14
- return "gpt-4";
15
- }
16
- return modelName;
17
- };
18
- export const getEmbeddingContextSize = (modelName) => {
19
- switch (modelName) {
20
- case "text-embedding-ada-002":
21
- return 8191;
22
- default:
23
- return 2046;
24
- }
25
- };
26
- export const getModelContextSize = (modelName) => {
27
- switch (getModelNameForTiktoken(modelName)) {
28
- case "gpt-3.5-turbo-16k":
29
- return 16384;
30
- case "gpt-3.5-turbo":
31
- return 4096;
32
- case "gpt-4-32k":
33
- return 32768;
34
- case "gpt-4":
35
- return 8192;
36
- case "text-davinci-003":
37
- return 4097;
38
- case "text-curie-001":
39
- return 2048;
40
- case "text-babbage-001":
41
- return 2048;
42
- case "text-ada-001":
43
- return 2048;
44
- case "code-davinci-002":
45
- return 8000;
46
- case "code-cushman-001":
47
- return 2048;
48
- default:
49
- return 4097;
50
- }
51
- };
52
- export const calculateMaxTokens = async ({ prompt, modelName, }) => {
53
- let numTokens;
54
- try {
55
- numTokens = (await encodingForModel(getModelNameForTiktoken(modelName))).encode(prompt).length;
56
- }
57
- catch (error) {
58
- console.warn("Failed to calculate number of tokens, falling back to approximate count");
59
- // fallback to approximate calculation if tiktoken is not available
60
- // each token is ~4 characters: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them#
61
- numTokens = Math.ceil(prompt.length / 4);
62
- }
63
- const maxTokens = getModelContextSize(modelName);
64
- return maxTokens - numTokens;
65
- };
1
+ export { calculateMaxTokens, getModelContextSize, getEmbeddingContextSize, } from "langchain-core/schema/language_model";
@@ -1,204 +1,14 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.getModelContextSize = exports.calculateMaxTokens = exports.BaseLanguageModel = exports.BaseLangChain = void 0;
4
- const index_js_1 = require("../schema/index.cjs");
5
- const async_caller_js_1 = require("../util/async_caller.cjs");
6
- const count_tokens_js_1 = require("./count_tokens.cjs");
7
- const tiktoken_js_1 = require("../util/tiktoken.cjs");
8
- const index_js_2 = require("../schema/runnable/index.cjs");
9
- const base_js_1 = require("../prompts/base.cjs");
10
- const chat_js_1 = require("../prompts/chat.cjs");
11
- const index_js_3 = require("../cache/index.cjs");
12
- const getVerbosity = () => false;
13
- /**
14
- * Base class for language models, chains, tools.
15
- */
16
- class BaseLangChain extends index_js_2.Runnable {
17
- get lc_attributes() {
18
- return {
19
- callbacks: undefined,
20
- verbose: undefined,
21
- };
22
- }
23
- constructor(params) {
24
- super(params);
25
- /**
26
- * Whether to print out response text.
27
- */
28
- Object.defineProperty(this, "verbose", {
29
- enumerable: true,
30
- configurable: true,
31
- writable: true,
32
- value: void 0
33
- });
34
- Object.defineProperty(this, "callbacks", {
35
- enumerable: true,
36
- configurable: true,
37
- writable: true,
38
- value: void 0
39
- });
40
- Object.defineProperty(this, "tags", {
41
- enumerable: true,
42
- configurable: true,
43
- writable: true,
44
- value: void 0
45
- });
46
- Object.defineProperty(this, "metadata", {
47
- enumerable: true,
48
- configurable: true,
49
- writable: true,
50
- value: void 0
51
- });
52
- this.verbose = params.verbose ?? getVerbosity();
53
- this.callbacks = params.callbacks;
54
- this.tags = params.tags ?? [];
55
- this.metadata = params.metadata ?? {};
56
- }
57
- }
58
- exports.BaseLangChain = BaseLangChain;
59
- /**
60
- * Base class for language models.
61
- */
62
- class BaseLanguageModel extends BaseLangChain {
63
- /**
64
- * Keys that the language model accepts as call options.
65
- */
66
- get callKeys() {
67
- return ["stop", "timeout", "signal", "tags", "metadata", "callbacks"];
68
- }
69
- constructor({ callbacks, callbackManager, ...params }) {
70
- super({
71
- callbacks: callbacks ?? callbackManager,
72
- ...params,
73
- });
74
- /**
75
- * The async caller should be used by subclasses to make any async calls,
76
- * which will thus benefit from the concurrency and retry logic.
77
- */
78
- Object.defineProperty(this, "caller", {
79
- enumerable: true,
80
- configurable: true,
81
- writable: true,
82
- value: void 0
83
- });
84
- Object.defineProperty(this, "cache", {
85
- enumerable: true,
86
- configurable: true,
87
- writable: true,
88
- value: void 0
89
- });
90
- Object.defineProperty(this, "_encoding", {
91
- enumerable: true,
92
- configurable: true,
93
- writable: true,
94
- value: void 0
95
- });
96
- if (typeof params.cache === "object") {
97
- this.cache = params.cache;
98
- }
99
- else if (params.cache) {
100
- this.cache = index_js_3.InMemoryCache.global();
101
- }
102
- else {
103
- this.cache = undefined;
104
- }
105
- this.caller = new async_caller_js_1.AsyncCaller(params ?? {});
106
- }
107
- async getNumTokens(content) {
108
- // TODO: Figure out correct value.
109
- if (typeof content !== "string") {
110
- return 0;
111
- }
112
- // fallback to approximate calculation if tiktoken is not available
113
- let numTokens = Math.ceil(content.length / 4);
114
- if (!this._encoding) {
115
- try {
116
- this._encoding = await (0, tiktoken_js_1.encodingForModel)("modelName" in this
117
- ? (0, count_tokens_js_1.getModelNameForTiktoken)(this.modelName)
118
- : "gpt2");
119
- }
120
- catch (error) {
121
- console.warn("Failed to calculate number of tokens, falling back to approximate count", error);
122
- }
123
- }
124
- if (this._encoding) {
125
- numTokens = this._encoding.encode(content).length;
126
- }
127
- return numTokens;
128
- }
129
- static _convertInputToPromptValue(input) {
130
- if (typeof input === "string") {
131
- return new base_js_1.StringPromptValue(input);
132
- }
133
- else if (Array.isArray(input)) {
134
- return new chat_js_1.ChatPromptValue(input.map(index_js_1.coerceMessageLikeToMessage));
135
- }
136
- else {
137
- return input;
138
- }
139
- }
140
- /**
141
- * Get the identifying parameters of the LLM.
142
- */
143
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
144
- _identifyingParams() {
145
- return {};
146
- }
147
- /**
148
- * Create a unique cache key for a specific call to a specific language model.
149
- * @param callOptions Call options for the model
150
- * @returns A unique cache key.
151
- */
152
- _getSerializedCacheKeyParametersForCall(callOptions) {
153
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
154
- const params = {
155
- ...this._identifyingParams(),
156
- ...callOptions,
157
- _type: this._llmType(),
158
- _model: this._modelType(),
159
- };
160
- const filteredEntries = Object.entries(params).filter(([_, value]) => value !== undefined);
161
- const serializedEntries = filteredEntries
162
- .map(([key, value]) => `${key}:${JSON.stringify(value)}`)
163
- .sort()
164
- .join(",");
165
- return serializedEntries;
166
- }
167
- /**
168
- * @deprecated
169
- * Return a json-like object representing this LLM.
170
- */
171
- serialize() {
172
- return {
173
- ...this._identifyingParams(),
174
- _type: this._llmType(),
175
- _model: this._modelType(),
176
- };
177
- }
178
- /**
179
- * @deprecated
180
- * Load an LLM from a json-like object describing it.
181
- */
182
- static async deserialize(data) {
183
- const { _type, _model, ...rest } = data;
184
- if (_model && _model !== "base_chat_model") {
185
- throw new Error(`Cannot load LLM with model ${_model}`);
186
- }
187
- const Cls = {
188
- openai: (await import("../chat_models/openai.js")).ChatOpenAI,
189
- }[_type];
190
- if (Cls === undefined) {
191
- throw new Error(`Cannot load LLM with type ${_type}`);
192
- }
193
- return new Cls(rest);
194
- }
195
- }
196
- exports.BaseLanguageModel = BaseLanguageModel;
4
+ var language_model_1 = require("langchain-core/schema/language_model");
5
+ Object.defineProperty(exports, "BaseLangChain", { enumerable: true, get: function () { return language_model_1.BaseLangChain; } });
6
+ Object.defineProperty(exports, "BaseLanguageModel", { enumerable: true, get: function () { return language_model_1.BaseLanguageModel; } });
197
7
  /*
198
8
  * Export utility functions for token calculations:
199
9
  * - calculateMaxTokens: Calculate max tokens for a given model and prompt (the model context size - tokens in prompt).
200
10
  * - getModelContextSize: Get the context size for a specific model.
201
11
  */
202
- var count_tokens_js_2 = require("./count_tokens.cjs");
203
- Object.defineProperty(exports, "calculateMaxTokens", { enumerable: true, get: function () { return count_tokens_js_2.calculateMaxTokens; } });
204
- Object.defineProperty(exports, "getModelContextSize", { enumerable: true, get: function () { return count_tokens_js_2.getModelContextSize; } });
12
+ var count_tokens_js_1 = require("./count_tokens.cjs");
13
+ Object.defineProperty(exports, "calculateMaxTokens", { enumerable: true, get: function () { return count_tokens_js_1.calculateMaxTokens; } });
14
+ Object.defineProperty(exports, "getModelContextSize", { enumerable: true, get: function () { return count_tokens_js_1.getModelContextSize; } });
@@ -1,112 +1,2 @@
1
- import type { OpenAI as OpenAIClient } from "openai";
2
- import { BaseCache, BaseMessage, BaseMessageLike, BasePromptValue, LLMResult, MessageContent } from "../schema/index.js";
3
- import { BaseCallbackConfig, CallbackManager, Callbacks } from "../callbacks/manager.js";
4
- import { AsyncCaller, AsyncCallerParams } from "../util/async_caller.js";
5
- import { Runnable } from "../schema/runnable/index.js";
6
- import { RunnableConfig } from "../schema/runnable/config.js";
7
- export type SerializedLLM = {
8
- _model: string;
9
- _type: string;
10
- } & Record<string, any>;
11
- export interface BaseLangChainParams {
12
- verbose?: boolean;
13
- callbacks?: Callbacks;
14
- tags?: string[];
15
- metadata?: Record<string, unknown>;
16
- }
17
- /**
18
- * Base class for language models, chains, tools.
19
- */
20
- export declare abstract class BaseLangChain<RunInput, RunOutput, CallOptions extends RunnableConfig = RunnableConfig> extends Runnable<RunInput, RunOutput, CallOptions> implements BaseLangChainParams {
21
- /**
22
- * Whether to print out response text.
23
- */
24
- verbose: boolean;
25
- callbacks?: Callbacks;
26
- tags?: string[];
27
- metadata?: Record<string, unknown>;
28
- get lc_attributes(): {
29
- [key: string]: undefined;
30
- } | undefined;
31
- constructor(params: BaseLangChainParams);
32
- }
33
- /**
34
- * Base interface for language model parameters.
35
- * A subclass of {@link BaseLanguageModel} should have a constructor that
36
- * takes in a parameter that extends this interface.
37
- */
38
- export interface BaseLanguageModelParams extends AsyncCallerParams, BaseLangChainParams {
39
- /**
40
- * @deprecated Use `callbacks` instead
41
- */
42
- callbackManager?: CallbackManager;
43
- cache?: BaseCache | boolean;
44
- }
45
- export interface BaseLanguageModelCallOptions extends BaseCallbackConfig {
46
- /**
47
- * Stop tokens to use for this call.
48
- * If not provided, the default stop tokens for the model will be used.
49
- */
50
- stop?: string[];
51
- /**
52
- * Timeout for this call in milliseconds.
53
- */
54
- timeout?: number;
55
- /**
56
- * Abort signal for this call.
57
- * If provided, the call will be aborted when the signal is aborted.
58
- * @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
59
- */
60
- signal?: AbortSignal;
61
- }
62
- export interface BaseFunctionCallOptions extends BaseLanguageModelCallOptions {
63
- function_call?: OpenAIClient.Chat.ChatCompletionFunctionCallOption;
64
- functions?: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];
65
- }
66
- export type BaseLanguageModelInput = BasePromptValue | string | BaseMessageLike[];
67
- /**
68
- * Base class for language models.
69
- */
70
- export declare abstract class BaseLanguageModel<RunOutput = any, CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends BaseLangChain<BaseLanguageModelInput, RunOutput, CallOptions> implements BaseLanguageModelParams {
71
- CallOptions: CallOptions;
72
- /**
73
- * Keys that the language model accepts as call options.
74
- */
75
- get callKeys(): string[];
76
- /**
77
- * The async caller should be used by subclasses to make any async calls,
78
- * which will thus benefit from the concurrency and retry logic.
79
- */
80
- caller: AsyncCaller;
81
- cache?: BaseCache;
82
- constructor({ callbacks, callbackManager, ...params }: BaseLanguageModelParams);
83
- abstract generatePrompt(promptValues: BasePromptValue[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
84
- abstract predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
85
- abstract predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
86
- abstract _modelType(): string;
87
- abstract _llmType(): string;
88
- private _encoding?;
89
- getNumTokens(content: MessageContent): Promise<number>;
90
- protected static _convertInputToPromptValue(input: BaseLanguageModelInput): BasePromptValue;
91
- /**
92
- * Get the identifying parameters of the LLM.
93
- */
94
- _identifyingParams(): Record<string, any>;
95
- /**
96
- * Create a unique cache key for a specific call to a specific language model.
97
- * @param callOptions Call options for the model
98
- * @returns A unique cache key.
99
- */
100
- protected _getSerializedCacheKeyParametersForCall(callOptions: CallOptions): string;
101
- /**
102
- * @deprecated
103
- * Return a json-like object representing this LLM.
104
- */
105
- serialize(): SerializedLLM;
106
- /**
107
- * @deprecated
108
- * Load an LLM from a json-like object describing it.
109
- */
110
- static deserialize(data: SerializedLLM): Promise<BaseLanguageModel>;
111
- }
1
+ export { type SerializedLLM, type BaseLangChainParams, BaseLangChain, type BaseLanguageModelParams, type BaseLanguageModelCallOptions, type BaseFunctionCallOptions, type BaseLanguageModelInput, BaseLanguageModel, } from "langchain-core/schema/language_model";
112
2
  export { calculateMaxTokens, getModelContextSize } from "./count_tokens.js";
@@ -1,194 +1,4 @@
1
- import { coerceMessageLikeToMessage, } from "../schema/index.js";
2
- import { AsyncCaller } from "../util/async_caller.js";
3
- import { getModelNameForTiktoken } from "./count_tokens.js";
4
- import { encodingForModel } from "../util/tiktoken.js";
5
- import { Runnable } from "../schema/runnable/index.js";
6
- import { StringPromptValue } from "../prompts/base.js";
7
- import { ChatPromptValue } from "../prompts/chat.js";
8
- import { InMemoryCache } from "../cache/index.js";
9
- const getVerbosity = () => false;
10
- /**
11
- * Base class for language models, chains, tools.
12
- */
13
- export class BaseLangChain extends Runnable {
14
- get lc_attributes() {
15
- return {
16
- callbacks: undefined,
17
- verbose: undefined,
18
- };
19
- }
20
- constructor(params) {
21
- super(params);
22
- /**
23
- * Whether to print out response text.
24
- */
25
- Object.defineProperty(this, "verbose", {
26
- enumerable: true,
27
- configurable: true,
28
- writable: true,
29
- value: void 0
30
- });
31
- Object.defineProperty(this, "callbacks", {
32
- enumerable: true,
33
- configurable: true,
34
- writable: true,
35
- value: void 0
36
- });
37
- Object.defineProperty(this, "tags", {
38
- enumerable: true,
39
- configurable: true,
40
- writable: true,
41
- value: void 0
42
- });
43
- Object.defineProperty(this, "metadata", {
44
- enumerable: true,
45
- configurable: true,
46
- writable: true,
47
- value: void 0
48
- });
49
- this.verbose = params.verbose ?? getVerbosity();
50
- this.callbacks = params.callbacks;
51
- this.tags = params.tags ?? [];
52
- this.metadata = params.metadata ?? {};
53
- }
54
- }
55
- /**
56
- * Base class for language models.
57
- */
58
- export class BaseLanguageModel extends BaseLangChain {
59
- /**
60
- * Keys that the language model accepts as call options.
61
- */
62
- get callKeys() {
63
- return ["stop", "timeout", "signal", "tags", "metadata", "callbacks"];
64
- }
65
- constructor({ callbacks, callbackManager, ...params }) {
66
- super({
67
- callbacks: callbacks ?? callbackManager,
68
- ...params,
69
- });
70
- /**
71
- * The async caller should be used by subclasses to make any async calls,
72
- * which will thus benefit from the concurrency and retry logic.
73
- */
74
- Object.defineProperty(this, "caller", {
75
- enumerable: true,
76
- configurable: true,
77
- writable: true,
78
- value: void 0
79
- });
80
- Object.defineProperty(this, "cache", {
81
- enumerable: true,
82
- configurable: true,
83
- writable: true,
84
- value: void 0
85
- });
86
- Object.defineProperty(this, "_encoding", {
87
- enumerable: true,
88
- configurable: true,
89
- writable: true,
90
- value: void 0
91
- });
92
- if (typeof params.cache === "object") {
93
- this.cache = params.cache;
94
- }
95
- else if (params.cache) {
96
- this.cache = InMemoryCache.global();
97
- }
98
- else {
99
- this.cache = undefined;
100
- }
101
- this.caller = new AsyncCaller(params ?? {});
102
- }
103
- async getNumTokens(content) {
104
- // TODO: Figure out correct value.
105
- if (typeof content !== "string") {
106
- return 0;
107
- }
108
- // fallback to approximate calculation if tiktoken is not available
109
- let numTokens = Math.ceil(content.length / 4);
110
- if (!this._encoding) {
111
- try {
112
- this._encoding = await encodingForModel("modelName" in this
113
- ? getModelNameForTiktoken(this.modelName)
114
- : "gpt2");
115
- }
116
- catch (error) {
117
- console.warn("Failed to calculate number of tokens, falling back to approximate count", error);
118
- }
119
- }
120
- if (this._encoding) {
121
- numTokens = this._encoding.encode(content).length;
122
- }
123
- return numTokens;
124
- }
125
- static _convertInputToPromptValue(input) {
126
- if (typeof input === "string") {
127
- return new StringPromptValue(input);
128
- }
129
- else if (Array.isArray(input)) {
130
- return new ChatPromptValue(input.map(coerceMessageLikeToMessage));
131
- }
132
- else {
133
- return input;
134
- }
135
- }
136
- /**
137
- * Get the identifying parameters of the LLM.
138
- */
139
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
140
- _identifyingParams() {
141
- return {};
142
- }
143
- /**
144
- * Create a unique cache key for a specific call to a specific language model.
145
- * @param callOptions Call options for the model
146
- * @returns A unique cache key.
147
- */
148
- _getSerializedCacheKeyParametersForCall(callOptions) {
149
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
150
- const params = {
151
- ...this._identifyingParams(),
152
- ...callOptions,
153
- _type: this._llmType(),
154
- _model: this._modelType(),
155
- };
156
- const filteredEntries = Object.entries(params).filter(([_, value]) => value !== undefined);
157
- const serializedEntries = filteredEntries
158
- .map(([key, value]) => `${key}:${JSON.stringify(value)}`)
159
- .sort()
160
- .join(",");
161
- return serializedEntries;
162
- }
163
- /**
164
- * @deprecated
165
- * Return a json-like object representing this LLM.
166
- */
167
- serialize() {
168
- return {
169
- ...this._identifyingParams(),
170
- _type: this._llmType(),
171
- _model: this._modelType(),
172
- };
173
- }
174
- /**
175
- * @deprecated
176
- * Load an LLM from a json-like object describing it.
177
- */
178
- static async deserialize(data) {
179
- const { _type, _model, ...rest } = data;
180
- if (_model && _model !== "base_chat_model") {
181
- throw new Error(`Cannot load LLM with model ${_model}`);
182
- }
183
- const Cls = {
184
- openai: (await import("../chat_models/openai.js")).ChatOpenAI,
185
- }[_type];
186
- if (Cls === undefined) {
187
- throw new Error(`Cannot load LLM with type ${_type}`);
188
- }
189
- return new Cls(rest);
190
- }
191
- }
1
+ export { BaseLangChain, BaseLanguageModel, } from "langchain-core/schema/language_model";
192
2
  /*
193
3
  * Export utility functions for token calculations:
194
4
  * - calculateMaxTokens: Calculate max tokens for a given model and prompt (the model context size - tokens in prompt).