langchain 0.0.195 → 0.0.197-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (236) hide show
  1. package/LICENSE +21 -0
  2. package/dist/agents/openai/index.cjs +6 -2
  3. package/dist/agents/openai/index.js +6 -2
  4. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts +1 -1
  5. package/dist/base_language/count_tokens.cjs +5 -70
  6. package/dist/base_language/count_tokens.d.ts +1 -10
  7. package/dist/base_language/count_tokens.js +1 -65
  8. package/dist/base_language/index.cjs +6 -196
  9. package/dist/base_language/index.d.ts +1 -111
  10. package/dist/base_language/index.js +1 -191
  11. package/dist/cache/base.cjs +15 -37
  12. package/dist/cache/base.d.ts +1 -20
  13. package/dist/cache/base.js +1 -33
  14. package/dist/cache/index.cjs +2 -46
  15. package/dist/cache/index.d.ts +1 -29
  16. package/dist/cache/index.js +1 -45
  17. package/dist/callbacks/base.cjs +3 -139
  18. package/dist/callbacks/base.d.ts +1 -266
  19. package/dist/callbacks/base.js +1 -126
  20. package/dist/callbacks/handlers/console.cjs +14 -221
  21. package/dist/callbacks/handlers/console.d.ts +1 -117
  22. package/dist/callbacks/handlers/console.js +1 -217
  23. package/dist/callbacks/handlers/initialize.cjs +15 -30
  24. package/dist/callbacks/handlers/initialize.d.ts +1 -16
  25. package/dist/callbacks/handlers/initialize.js +1 -27
  26. package/dist/callbacks/handlers/log_stream.cjs +15 -293
  27. package/dist/callbacks/handlers/log_stream.d.ts +1 -100
  28. package/dist/callbacks/handlers/log_stream.js +1 -289
  29. package/dist/callbacks/handlers/run_collector.cjs +15 -48
  30. package/dist/callbacks/handlers/run_collector.d.ts +1 -26
  31. package/dist/callbacks/handlers/run_collector.js +1 -46
  32. package/dist/callbacks/handlers/tracer.cjs +15 -375
  33. package/dist/callbacks/handlers/tracer.d.ts +1 -70
  34. package/dist/callbacks/handlers/tracer.js +1 -373
  35. package/dist/callbacks/handlers/tracer_langchain.cjs +15 -104
  36. package/dist/callbacks/handlers/tracer_langchain.d.ts +1 -41
  37. package/dist/callbacks/handlers/tracer_langchain.js +1 -102
  38. package/dist/callbacks/handlers/tracer_langchain_v1.cjs +15 -197
  39. package/dist/callbacks/handlers/tracer_langchain_v1.d.ts +1 -57
  40. package/dist/callbacks/handlers/tracer_langchain_v1.js +1 -195
  41. package/dist/callbacks/manager.cjs +15 -676
  42. package/dist/callbacks/manager.d.ts +1 -180
  43. package/dist/callbacks/manager.js +1 -666
  44. package/dist/callbacks/promises.cjs +14 -42
  45. package/dist/callbacks/promises.d.ts +1 -11
  46. package/dist/callbacks/promises.js +1 -37
  47. package/dist/chains/graph_qa/prompts.d.ts +1 -1
  48. package/dist/chains/openai_functions/structured_output.cjs +2 -2
  49. package/dist/chains/openai_functions/structured_output.d.ts +1 -1
  50. package/dist/chains/openai_functions/structured_output.js +1 -1
  51. package/dist/chat_models/anthropic.cjs +15 -348
  52. package/dist/chat_models/anthropic.d.ts +1 -156
  53. package/dist/chat_models/anthropic.js +1 -346
  54. package/dist/chat_models/baiduwenxin.d.ts +1 -1
  55. package/dist/chat_models/base.cjs +15 -296
  56. package/dist/chat_models/base.d.ts +1 -122
  57. package/dist/chat_models/base.js +1 -292
  58. package/dist/chat_models/bedrock/web.cjs +21 -1
  59. package/dist/chat_models/bedrock/web.d.ts +2 -2
  60. package/dist/chat_models/bedrock/web.js +21 -1
  61. package/dist/chat_models/fireworks.d.ts +1 -1
  62. package/dist/document.cjs +2 -24
  63. package/dist/document.d.ts +1 -12
  64. package/dist/document.js +1 -23
  65. package/dist/document_loaders/web/azure_blob_storage_file.d.ts +1 -1
  66. package/dist/document_loaders/web/github.cjs +105 -0
  67. package/dist/document_loaders/web/github.d.ts +26 -0
  68. package/dist/document_loaders/web/github.js +105 -0
  69. package/dist/document_loaders/web/s3.d.ts +1 -1
  70. package/dist/embeddings/base.cjs +15 -22
  71. package/dist/embeddings/base.d.ts +1 -33
  72. package/dist/embeddings/base.js +1 -20
  73. package/dist/embeddings/cache_backed.cjs +2 -2
  74. package/dist/embeddings/cache_backed.js +1 -1
  75. package/dist/evaluation/agents/trajectory.d.ts +1 -1
  76. package/dist/evaluation/criteria/prompt.d.ts +2 -2
  77. package/dist/evaluation/qa/prompt.d.ts +2 -2
  78. package/dist/experimental/hubs/makersuite/googlemakersuitehub.d.ts +1 -1
  79. package/dist/experimental/plan_and_execute/prompt.d.ts +1 -1
  80. package/dist/llms/base.cjs +15 -278
  81. package/dist/llms/base.d.ts +1 -115
  82. package/dist/llms/base.js +1 -275
  83. package/dist/llms/bedrock/web.cjs +21 -1
  84. package/dist/llms/bedrock/web.d.ts +2 -2
  85. package/dist/llms/bedrock/web.js +21 -1
  86. package/dist/llms/fireworks.d.ts +1 -1
  87. package/dist/load/import_map.cjs +2 -1
  88. package/dist/load/import_map.d.ts +1 -0
  89. package/dist/load/import_map.js +1 -0
  90. package/dist/load/index.cjs +7 -148
  91. package/dist/load/index.js +7 -148
  92. package/dist/load/map_keys.cjs +0 -24
  93. package/dist/load/map_keys.d.ts +0 -6
  94. package/dist/load/map_keys.js +1 -17
  95. package/dist/load/serializable.cjs +15 -178
  96. package/dist/load/serializable.d.ts +1 -66
  97. package/dist/load/serializable.js +1 -175
  98. package/dist/memory/base.cjs +17 -92
  99. package/dist/memory/base.d.ts +2 -68
  100. package/dist/memory/base.js +2 -87
  101. package/dist/output_parsers/list.cjs +4 -122
  102. package/dist/output_parsers/list.d.ts +1 -57
  103. package/dist/output_parsers/list.js +1 -119
  104. package/dist/output_parsers/openai_functions.cjs +2 -2
  105. package/dist/output_parsers/openai_functions.d.ts +1 -1
  106. package/dist/output_parsers/openai_functions.js +1 -1
  107. package/dist/output_parsers/regex.d.ts +1 -1
  108. package/dist/output_parsers/structured.d.ts +1 -1
  109. package/dist/prompts/base.cjs +8 -183
  110. package/dist/prompts/base.d.ts +3 -132
  111. package/dist/prompts/base.js +3 -178
  112. package/dist/prompts/chat.cjs +13 -477
  113. package/dist/prompts/chat.d.ts +2 -219
  114. package/dist/prompts/chat.js +2 -466
  115. package/dist/prompts/few_shot.cjs +3 -352
  116. package/dist/prompts/few_shot.d.ts +1 -192
  117. package/dist/prompts/few_shot.js +1 -350
  118. package/dist/prompts/index.cjs +3 -2
  119. package/dist/prompts/index.d.ts +2 -1
  120. package/dist/prompts/index.js +2 -1
  121. package/dist/prompts/pipeline.cjs +2 -141
  122. package/dist/prompts/pipeline.d.ts +1 -98
  123. package/dist/prompts/pipeline.js +1 -140
  124. package/dist/prompts/prompt.cjs +2 -145
  125. package/dist/prompts/prompt.d.ts +1 -92
  126. package/dist/prompts/prompt.js +1 -144
  127. package/dist/prompts/selectors/LengthBasedExampleSelector.cjs +2 -147
  128. package/dist/prompts/selectors/LengthBasedExampleSelector.d.ts +1 -89
  129. package/dist/prompts/selectors/LengthBasedExampleSelector.js +1 -146
  130. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.cjs +15 -137
  131. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.d.ts +1 -91
  132. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.js +1 -135
  133. package/dist/prompts/selectors/conditional.cjs +5 -73
  134. package/dist/prompts/selectors/conditional.d.ts +1 -63
  135. package/dist/prompts/selectors/conditional.js +1 -69
  136. package/dist/prompts/serde.d.ts +1 -43
  137. package/dist/prompts/template.cjs +8 -88
  138. package/dist/prompts/template.d.ts +1 -36
  139. package/dist/prompts/template.js +1 -83
  140. package/dist/{util/@cfworker/json-schema → runnables}/index.cjs +1 -1
  141. package/dist/runnables/index.d.ts +1 -0
  142. package/dist/runnables/index.js +1 -0
  143. package/dist/schema/document.cjs +3 -34
  144. package/dist/schema/document.d.ts +2 -29
  145. package/dist/schema/document.js +2 -32
  146. package/dist/schema/index.cjs +37 -612
  147. package/dist/schema/index.d.ts +11 -311
  148. package/dist/schema/index.js +8 -583
  149. package/dist/schema/output_parser.cjs +15 -309
  150. package/dist/schema/output_parser.d.ts +1 -173
  151. package/dist/schema/output_parser.js +1 -301
  152. package/dist/schema/retriever.cjs +15 -77
  153. package/dist/schema/retriever.d.ts +1 -43
  154. package/dist/schema/retriever.js +1 -75
  155. package/dist/schema/runnable/base.cjs +10 -1072
  156. package/dist/schema/runnable/base.d.ts +1 -356
  157. package/dist/schema/runnable/base.js +1 -1060
  158. package/dist/schema/runnable/branch.cjs +2 -131
  159. package/dist/schema/runnable/branch.d.ts +1 -94
  160. package/dist/schema/runnable/branch.js +1 -130
  161. package/dist/schema/runnable/config.cjs +0 -6
  162. package/dist/schema/runnable/config.d.ts +1 -3
  163. package/dist/schema/runnable/config.js +1 -4
  164. package/dist/schema/runnable/index.cjs +15 -16
  165. package/dist/schema/runnable/index.d.ts +1 -5
  166. package/dist/schema/runnable/index.js +1 -4
  167. package/dist/schema/runnable/passthrough.cjs +3 -113
  168. package/dist/schema/runnable/passthrough.d.ts +1 -72
  169. package/dist/schema/runnable/passthrough.js +1 -111
  170. package/dist/schema/runnable/router.cjs +2 -71
  171. package/dist/schema/runnable/router.d.ts +1 -29
  172. package/dist/schema/runnable/router.js +1 -70
  173. package/dist/schema/storage.cjs +15 -8
  174. package/dist/schema/storage.d.ts +1 -57
  175. package/dist/schema/storage.js +1 -6
  176. package/dist/tools/bingserpapi.d.ts +1 -1
  177. package/dist/tools/searchapi.d.ts +1 -1
  178. package/dist/tools/serpapi.d.ts +1 -1
  179. package/dist/tools/serper.d.ts +1 -1
  180. package/dist/util/async_caller.cjs +14 -128
  181. package/dist/util/async_caller.d.ts +1 -45
  182. package/dist/util/async_caller.js +1 -124
  183. package/dist/vectorstores/momento_vector_index.cjs +39 -0
  184. package/dist/vectorstores/momento_vector_index.d.ts +17 -1
  185. package/dist/vectorstores/momento_vector_index.js +40 -1
  186. package/dist/vectorstores/mongodb_atlas.cjs +22 -2
  187. package/dist/vectorstores/mongodb_atlas.d.ts +13 -0
  188. package/dist/vectorstores/mongodb_atlas.js +22 -2
  189. package/package.json +18 -11
  190. package/runnables.cjs +1 -0
  191. package/runnables.d.ts +1 -0
  192. package/runnables.js +1 -0
  193. package/dist/util/@cfworker/json-schema/index.d.ts +0 -1
  194. package/dist/util/@cfworker/json-schema/index.js +0 -1
  195. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.cjs +0 -43
  196. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.d.ts +0 -1
  197. package/dist/util/@cfworker/json-schema/src/deep-compare-strict.js +0 -39
  198. package/dist/util/@cfworker/json-schema/src/dereference.cjs +0 -169
  199. package/dist/util/@cfworker/json-schema/src/dereference.d.ts +0 -12
  200. package/dist/util/@cfworker/json-schema/src/dereference.js +0 -165
  201. package/dist/util/@cfworker/json-schema/src/format.cjs +0 -139
  202. package/dist/util/@cfworker/json-schema/src/format.d.ts +0 -2
  203. package/dist/util/@cfworker/json-schema/src/format.js +0 -136
  204. package/dist/util/@cfworker/json-schema/src/index.cjs +0 -24
  205. package/dist/util/@cfworker/json-schema/src/index.d.ts +0 -8
  206. package/dist/util/@cfworker/json-schema/src/index.js +0 -8
  207. package/dist/util/@cfworker/json-schema/src/pointer.cjs +0 -11
  208. package/dist/util/@cfworker/json-schema/src/pointer.d.ts +0 -2
  209. package/dist/util/@cfworker/json-schema/src/pointer.js +0 -6
  210. package/dist/util/@cfworker/json-schema/src/types.cjs +0 -2
  211. package/dist/util/@cfworker/json-schema/src/types.d.ts +0 -72
  212. package/dist/util/@cfworker/json-schema/src/types.js +0 -1
  213. package/dist/util/@cfworker/json-schema/src/ucs2-length.cjs +0 -28
  214. package/dist/util/@cfworker/json-schema/src/ucs2-length.d.ts +0 -6
  215. package/dist/util/@cfworker/json-schema/src/ucs2-length.js +0 -24
  216. package/dist/util/@cfworker/json-schema/src/validate.cjs +0 -808
  217. package/dist/util/@cfworker/json-schema/src/validate.d.ts +0 -3
  218. package/dist/util/@cfworker/json-schema/src/validate.js +0 -804
  219. package/dist/util/@cfworker/json-schema/src/validator.cjs +0 -44
  220. package/dist/util/@cfworker/json-schema/src/validator.d.ts +0 -10
  221. package/dist/util/@cfworker/json-schema/src/validator.js +0 -40
  222. package/dist/util/fast-json-patch/index.cjs +0 -49
  223. package/dist/util/fast-json-patch/index.d.ts +0 -22
  224. package/dist/util/fast-json-patch/index.js +0 -16
  225. package/dist/util/fast-json-patch/src/core.cjs +0 -469
  226. package/dist/util/fast-json-patch/src/core.d.ts +0 -111
  227. package/dist/util/fast-json-patch/src/core.js +0 -459
  228. package/dist/util/fast-json-patch/src/duplex.cjs +0 -237
  229. package/dist/util/fast-json-patch/src/duplex.d.ts +0 -23
  230. package/dist/util/fast-json-patch/src/duplex.js +0 -230
  231. package/dist/util/fast-json-patch/src/helpers.cjs +0 -194
  232. package/dist/util/fast-json-patch/src/helpers.d.ts +0 -36
  233. package/dist/util/fast-json-patch/src/helpers.js +0 -181
  234. package/dist/util/js-sha1/hash.cjs +0 -358
  235. package/dist/util/js-sha1/hash.d.ts +0 -1
  236. package/dist/util/js-sha1/hash.js +0 -355
@@ -1,346 +1 @@
1
- import { Anthropic, AI_PROMPT, HUMAN_PROMPT, } from "@anthropic-ai/sdk";
2
- import { AIMessage, AIMessageChunk, ChatGenerationChunk, ChatMessage, } from "../schema/index.js";
3
- import { getEnvironmentVariable } from "../util/env.js";
4
- import { BaseChatModel } from "./base.js";
5
- /**
6
- * Extracts the custom role of a generic chat message.
7
- * @param message The chat message from which to extract the custom role.
8
- * @returns The custom role of the chat message.
9
- */
10
- function extractGenericMessageCustomRole(message) {
11
- if (message.role !== AI_PROMPT &&
12
- message.role !== HUMAN_PROMPT &&
13
- message.role !== "") {
14
- console.warn(`Unknown message role: ${message.role}`);
15
- }
16
- return message.role;
17
- }
18
- /**
19
- * Gets the Anthropic prompt from a base message.
20
- * @param message The base message from which to get the Anthropic prompt.
21
- * @returns The Anthropic prompt from the base message.
22
- */
23
- function getAnthropicPromptFromMessage(message) {
24
- const type = message._getType();
25
- switch (type) {
26
- case "ai":
27
- return AI_PROMPT;
28
- case "human":
29
- return HUMAN_PROMPT;
30
- case "system":
31
- return "";
32
- case "generic": {
33
- if (!ChatMessage.isInstance(message))
34
- throw new Error("Invalid generic chat message");
35
- return extractGenericMessageCustomRole(message);
36
- }
37
- default:
38
- throw new Error(`Unknown message type: ${type}`);
39
- }
40
- }
41
- export const DEFAULT_STOP_SEQUENCES = [HUMAN_PROMPT];
42
- /**
43
- * Wrapper around Anthropic large language models.
44
- *
45
- * To use you should have the `@anthropic-ai/sdk` package installed, with the
46
- * `ANTHROPIC_API_KEY` environment variable set.
47
- *
48
- * @remarks
49
- * Any parameters that are valid to be passed to {@link
50
- * https://console.anthropic.com/docs/api/reference |
51
- * `anthropic.complete`} can be passed through {@link invocationKwargs},
52
- * even if not explicitly available on this class.
53
- * @example
54
- * ```typescript
55
- * const model = new ChatAnthropic({
56
- * temperature: 0.9,
57
- * anthropicApiKey: 'YOUR-API-KEY',
58
- * });
59
- * const res = await model.invoke({ input: 'Hello!' });
60
- * console.log(res);
61
- * ```
62
- */
63
- export class ChatAnthropic extends BaseChatModel {
64
- static lc_name() {
65
- return "ChatAnthropic";
66
- }
67
- get lc_secrets() {
68
- return {
69
- anthropicApiKey: "ANTHROPIC_API_KEY",
70
- };
71
- }
72
- get lc_aliases() {
73
- return {
74
- modelName: "model",
75
- };
76
- }
77
- constructor(fields) {
78
- super(fields ?? {});
79
- Object.defineProperty(this, "lc_serializable", {
80
- enumerable: true,
81
- configurable: true,
82
- writable: true,
83
- value: true
84
- });
85
- Object.defineProperty(this, "anthropicApiKey", {
86
- enumerable: true,
87
- configurable: true,
88
- writable: true,
89
- value: void 0
90
- });
91
- Object.defineProperty(this, "apiUrl", {
92
- enumerable: true,
93
- configurable: true,
94
- writable: true,
95
- value: void 0
96
- });
97
- Object.defineProperty(this, "temperature", {
98
- enumerable: true,
99
- configurable: true,
100
- writable: true,
101
- value: 1
102
- });
103
- Object.defineProperty(this, "topK", {
104
- enumerable: true,
105
- configurable: true,
106
- writable: true,
107
- value: -1
108
- });
109
- Object.defineProperty(this, "topP", {
110
- enumerable: true,
111
- configurable: true,
112
- writable: true,
113
- value: -1
114
- });
115
- Object.defineProperty(this, "maxTokensToSample", {
116
- enumerable: true,
117
- configurable: true,
118
- writable: true,
119
- value: 2048
120
- });
121
- Object.defineProperty(this, "modelName", {
122
- enumerable: true,
123
- configurable: true,
124
- writable: true,
125
- value: "claude-2"
126
- });
127
- Object.defineProperty(this, "invocationKwargs", {
128
- enumerable: true,
129
- configurable: true,
130
- writable: true,
131
- value: void 0
132
- });
133
- Object.defineProperty(this, "stopSequences", {
134
- enumerable: true,
135
- configurable: true,
136
- writable: true,
137
- value: void 0
138
- });
139
- Object.defineProperty(this, "streaming", {
140
- enumerable: true,
141
- configurable: true,
142
- writable: true,
143
- value: false
144
- });
145
- Object.defineProperty(this, "clientOptions", {
146
- enumerable: true,
147
- configurable: true,
148
- writable: true,
149
- value: void 0
150
- });
151
- // Used for non-streaming requests
152
- Object.defineProperty(this, "batchClient", {
153
- enumerable: true,
154
- configurable: true,
155
- writable: true,
156
- value: void 0
157
- });
158
- // Used for streaming requests
159
- Object.defineProperty(this, "streamingClient", {
160
- enumerable: true,
161
- configurable: true,
162
- writable: true,
163
- value: void 0
164
- });
165
- this.anthropicApiKey =
166
- fields?.anthropicApiKey ?? getEnvironmentVariable("ANTHROPIC_API_KEY");
167
- if (!this.anthropicApiKey) {
168
- throw new Error("Anthropic API key not found");
169
- }
170
- // Support overriding the default API URL (i.e., https://api.anthropic.com)
171
- this.apiUrl = fields?.anthropicApiUrl;
172
- this.modelName = fields?.modelName ?? this.modelName;
173
- this.invocationKwargs = fields?.invocationKwargs ?? {};
174
- this.temperature = fields?.temperature ?? this.temperature;
175
- this.topK = fields?.topK ?? this.topK;
176
- this.topP = fields?.topP ?? this.topP;
177
- this.maxTokensToSample =
178
- fields?.maxTokensToSample ?? this.maxTokensToSample;
179
- this.stopSequences = fields?.stopSequences ?? this.stopSequences;
180
- this.streaming = fields?.streaming ?? false;
181
- this.clientOptions = fields?.clientOptions ?? {};
182
- }
183
- /**
184
- * Get the parameters used to invoke the model
185
- */
186
- invocationParams(options) {
187
- return {
188
- model: this.modelName,
189
- temperature: this.temperature,
190
- top_k: this.topK,
191
- top_p: this.topP,
192
- stop_sequences: options?.stop?.concat(DEFAULT_STOP_SEQUENCES) ??
193
- this.stopSequences ??
194
- DEFAULT_STOP_SEQUENCES,
195
- max_tokens_to_sample: this.maxTokensToSample,
196
- stream: this.streaming,
197
- ...this.invocationKwargs,
198
- };
199
- }
200
- /** @ignore */
201
- _identifyingParams() {
202
- return {
203
- model_name: this.modelName,
204
- ...this.invocationParams(),
205
- };
206
- }
207
- /**
208
- * Get the identifying parameters for the model
209
- */
210
- identifyingParams() {
211
- return {
212
- model_name: this.modelName,
213
- ...this.invocationParams(),
214
- };
215
- }
216
- async *_streamResponseChunks(messages, options, runManager) {
217
- const params = this.invocationParams(options);
218
- const stream = await this.createStreamWithRetry({
219
- ...params,
220
- prompt: this.formatMessagesAsPrompt(messages),
221
- });
222
- let modelSent = false;
223
- let stopReasonSent = false;
224
- for await (const data of stream) {
225
- if (options.signal?.aborted) {
226
- stream.controller.abort();
227
- throw new Error("AbortError: User aborted the request.");
228
- }
229
- const additional_kwargs = {};
230
- if (data.model && !modelSent) {
231
- additional_kwargs.model = data.model;
232
- modelSent = true;
233
- }
234
- else if (data.stop_reason && !stopReasonSent) {
235
- additional_kwargs.stop_reason = data.stop_reason;
236
- stopReasonSent = true;
237
- }
238
- const delta = data.completion ?? "";
239
- yield new ChatGenerationChunk({
240
- message: new AIMessageChunk({
241
- content: delta,
242
- additional_kwargs,
243
- }),
244
- text: delta,
245
- });
246
- await runManager?.handleLLMNewToken(delta);
247
- if (data.stop_reason) {
248
- break;
249
- }
250
- }
251
- }
252
- /**
253
- * Formats messages as a prompt for the model.
254
- * @param messages The base messages to format as a prompt.
255
- * @returns The formatted prompt.
256
- */
257
- formatMessagesAsPrompt(messages) {
258
- return (messages
259
- .map((message) => {
260
- const messagePrompt = getAnthropicPromptFromMessage(message);
261
- return `${messagePrompt} ${message.content}`;
262
- })
263
- .join("") + AI_PROMPT);
264
- }
265
- /** @ignore */
266
- async _generate(messages, options, runManager) {
267
- if (this.stopSequences && options.stop) {
268
- throw new Error(`"stopSequence" parameter found in input and default params`);
269
- }
270
- const params = this.invocationParams(options);
271
- let response;
272
- if (params.stream) {
273
- response = {
274
- completion: "",
275
- model: "",
276
- stop_reason: "",
277
- };
278
- const stream = await this._streamResponseChunks(messages, options, runManager);
279
- for await (const chunk of stream) {
280
- response.completion += chunk.message.content;
281
- response.model =
282
- chunk.message.additional_kwargs.model ?? response.model;
283
- response.stop_reason =
284
- chunk.message.additional_kwargs.stop_reason ??
285
- response.stop_reason;
286
- }
287
- }
288
- else {
289
- response = await this.completionWithRetry({
290
- ...params,
291
- prompt: this.formatMessagesAsPrompt(messages),
292
- }, { signal: options.signal });
293
- }
294
- const generations = (response.completion ?? "")
295
- .split(AI_PROMPT)
296
- .map((message) => ({
297
- text: message,
298
- message: new AIMessage(message),
299
- }));
300
- return {
301
- generations,
302
- };
303
- }
304
- /**
305
- * Creates a streaming request with retry.
306
- * @param request The parameters for creating a completion.
307
- * @returns A streaming request.
308
- */
309
- async createStreamWithRetry(request) {
310
- if (!this.streamingClient) {
311
- const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
312
- this.streamingClient = new Anthropic({
313
- ...this.clientOptions,
314
- ...options,
315
- apiKey: this.anthropicApiKey,
316
- maxRetries: 0,
317
- });
318
- }
319
- const makeCompletionRequest = async () => this.streamingClient.completions.create({ ...request, stream: true }, { headers: request.headers });
320
- return this.caller.call(makeCompletionRequest);
321
- }
322
- /** @ignore */
323
- async completionWithRetry(request, options) {
324
- if (!this.anthropicApiKey) {
325
- throw new Error("Missing Anthropic API key.");
326
- }
327
- if (!this.batchClient) {
328
- const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
329
- this.batchClient = new Anthropic({
330
- ...this.clientOptions,
331
- ...options,
332
- apiKey: this.anthropicApiKey,
333
- maxRetries: 0,
334
- });
335
- }
336
- const makeCompletionRequest = async () => this.batchClient.completions.create({ ...request, stream: false }, { headers: request.headers });
337
- return this.caller.callWithOptions({ signal: options.signal }, makeCompletionRequest);
338
- }
339
- _llmType() {
340
- return "anthropic";
341
- }
342
- /** @ignore */
343
- _combineLLMOutput() {
344
- return [];
345
- }
346
- }
1
+ export * from "@langchain/anthropic";
@@ -131,8 +131,8 @@ export declare class ChatBaiduWenxin extends BaseChatModel implements BaiduWenxi
131
131
  * Get the identifying parameters for the model
132
132
  */
133
133
  identifyingParams(): {
134
- system?: string | undefined;
135
134
  stream?: boolean | undefined;
135
+ system?: string | undefined;
136
136
  temperature?: number | undefined;
137
137
  top_p?: number | undefined;
138
138
  user_id?: string | undefined;
@@ -1,298 +1,17 @@
1
1
  "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
2
16
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.SimpleChatModel = exports.BaseChatModel = exports.createChatMessageChunkEncoderStream = void 0;
4
- const index_js_1 = require("../schema/index.cjs");
5
- const index_js_2 = require("../base_language/index.cjs");
6
- const manager_js_1 = require("../callbacks/manager.cjs");
7
- /**
8
- * Creates a transform stream for encoding chat message chunks.
9
- * @deprecated Use {@link BytesOutputParser} instead
10
- * @returns A TransformStream instance that encodes chat message chunks.
11
- */
12
- function createChatMessageChunkEncoderStream() {
13
- const textEncoder = new TextEncoder();
14
- return new TransformStream({
15
- transform(chunk, controller) {
16
- controller.enqueue(textEncoder.encode(typeof chunk.content === "string"
17
- ? chunk.content
18
- : JSON.stringify(chunk.content)));
19
- },
20
- });
21
- }
22
- exports.createChatMessageChunkEncoderStream = createChatMessageChunkEncoderStream;
23
- /**
24
- * Base class for chat models. It extends the BaseLanguageModel class and
25
- * provides methods for generating chat based on input messages.
26
- */
27
- class BaseChatModel extends index_js_2.BaseLanguageModel {
28
- constructor(fields) {
29
- super(fields);
30
- Object.defineProperty(this, "lc_namespace", {
31
- enumerable: true,
32
- configurable: true,
33
- writable: true,
34
- value: ["langchain", "chat_models", this._llmType()]
35
- });
36
- }
37
- _separateRunnableConfigFromCallOptions(options) {
38
- const [runnableConfig, callOptions] = super._separateRunnableConfigFromCallOptions(options);
39
- if (callOptions?.timeout && !callOptions.signal) {
40
- callOptions.signal = AbortSignal.timeout(callOptions.timeout);
41
- }
42
- return [runnableConfig, callOptions];
43
- }
44
- /**
45
- * Invokes the chat model with a single input.
46
- * @param input The input for the language model.
47
- * @param options The call options.
48
- * @returns A Promise that resolves to a BaseMessageChunk.
49
- */
50
- async invoke(input, options) {
51
- const promptValue = BaseChatModel._convertInputToPromptValue(input);
52
- const result = await this.generatePrompt([promptValue], options, options?.callbacks);
53
- const chatGeneration = result.generations[0][0];
54
- // TODO: Remove cast after figuring out inheritance
55
- return chatGeneration.message;
56
- }
57
- // eslint-disable-next-line require-yield
58
- async *_streamResponseChunks(_messages, _options, _runManager) {
59
- throw new Error("Not implemented.");
60
- }
61
- async *_streamIterator(input, options) {
62
- // Subclass check required to avoid double callbacks with default implementation
63
- if (this._streamResponseChunks ===
64
- BaseChatModel.prototype._streamResponseChunks) {
65
- yield this.invoke(input, options);
66
- }
67
- else {
68
- const prompt = BaseChatModel._convertInputToPromptValue(input);
69
- const messages = prompt.toChatMessages();
70
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(options);
71
- const callbackManager_ = await manager_js_1.CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
72
- const extra = {
73
- options: callOptions,
74
- invocation_params: this?.invocationParams(callOptions),
75
- batch_size: 1,
76
- };
77
- const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), [messages], undefined, undefined, extra, undefined, undefined, runnableConfig.runName);
78
- let generationChunk;
79
- try {
80
- for await (const chunk of this._streamResponseChunks(messages, callOptions, runManagers?.[0])) {
81
- yield chunk.message;
82
- if (!generationChunk) {
83
- generationChunk = chunk;
84
- }
85
- else {
86
- generationChunk = generationChunk.concat(chunk);
87
- }
88
- }
89
- }
90
- catch (err) {
91
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
92
- throw err;
93
- }
94
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({
95
- // TODO: Remove cast after figuring out inheritance
96
- generations: [[generationChunk]],
97
- })));
98
- }
99
- }
100
- /** @ignore */
101
- async _generateUncached(messages, parsedOptions, handledOptions) {
102
- const baseMessages = messages.map((messageList) => messageList.map(index_js_1.coerceMessageLikeToMessage));
103
- // create callback manager and start run
104
- const callbackManager_ = await manager_js_1.CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
105
- const extra = {
106
- options: parsedOptions,
107
- invocation_params: this?.invocationParams(parsedOptions),
108
- batch_size: 1,
109
- };
110
- const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, undefined, undefined, extra, undefined, undefined, handledOptions.runName);
111
- // generate results
112
- const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
113
- // handle results
114
- const generations = [];
115
- const llmOutputs = [];
116
- await Promise.all(results.map(async (pResult, i) => {
117
- if (pResult.status === "fulfilled") {
118
- const result = pResult.value;
119
- generations[i] = result.generations;
120
- llmOutputs[i] = result.llmOutput;
121
- return runManagers?.[i]?.handleLLMEnd({
122
- generations: [result.generations],
123
- llmOutput: result.llmOutput,
124
- });
125
- }
126
- else {
127
- // status === "rejected"
128
- await runManagers?.[i]?.handleLLMError(pResult.reason);
129
- return Promise.reject(pResult.reason);
130
- }
131
- }));
132
- // create combined output
133
- const output = {
134
- generations,
135
- llmOutput: llmOutputs.length
136
- ? this._combineLLMOutput?.(...llmOutputs)
137
- : undefined,
138
- };
139
- Object.defineProperty(output, index_js_1.RUN_KEY, {
140
- value: runManagers
141
- ? { runIds: runManagers?.map((manager) => manager.runId) }
142
- : undefined,
143
- configurable: true,
144
- });
145
- return output;
146
- }
147
- /**
148
- * Generates chat based on the input messages.
149
- * @param messages An array of arrays of BaseMessage instances.
150
- * @param options The call options or an array of stop sequences.
151
- * @param callbacks The callbacks for the language model.
152
- * @returns A Promise that resolves to an LLMResult.
153
- */
154
- async generate(messages, options, callbacks) {
155
- // parse call options
156
- let parsedOptions;
157
- if (Array.isArray(options)) {
158
- parsedOptions = { stop: options };
159
- }
160
- else {
161
- parsedOptions = options;
162
- }
163
- const baseMessages = messages.map((messageList) => messageList.map(index_js_1.coerceMessageLikeToMessage));
164
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptions(parsedOptions);
165
- runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;
166
- if (!this.cache) {
167
- return this._generateUncached(baseMessages, callOptions, runnableConfig);
168
- }
169
- const { cache } = this;
170
- const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
171
- const missingPromptIndices = [];
172
- const generations = await Promise.all(baseMessages.map(async (baseMessage, index) => {
173
- // Join all content into one string for the prompt index
174
- const prompt = BaseChatModel._convertInputToPromptValue(baseMessage).toString();
175
- const result = await cache.lookup(prompt, llmStringKey);
176
- if (!result) {
177
- missingPromptIndices.push(index);
178
- }
179
- return result;
180
- }));
181
- let llmOutput = {};
182
- if (missingPromptIndices.length > 0) {
183
- const results = await this._generateUncached(missingPromptIndices.map((i) => baseMessages[i]), callOptions, runnableConfig);
184
- await Promise.all(results.generations.map(async (generation, index) => {
185
- const promptIndex = missingPromptIndices[index];
186
- generations[promptIndex] = generation;
187
- // Join all content into one string for the prompt index
188
- const prompt = BaseChatModel._convertInputToPromptValue(baseMessages[promptIndex]).toString();
189
- return cache.update(prompt, llmStringKey, generation);
190
- }));
191
- llmOutput = results.llmOutput ?? {};
192
- }
193
- return { generations, llmOutput };
194
- }
195
- /**
196
- * Get the parameters used to invoke the model
197
- */
198
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
199
- invocationParams(_options) {
200
- return {};
201
- }
202
- _modelType() {
203
- return "base_chat_model";
204
- }
205
- /**
206
- * @deprecated
207
- * Return a json-like object representing this LLM.
208
- */
209
- serialize() {
210
- return {
211
- ...this.invocationParams(),
212
- _type: this._llmType(),
213
- _model: this._modelType(),
214
- };
215
- }
216
- /**
217
- * Generates a prompt based on the input prompt values.
218
- * @param promptValues An array of BasePromptValue instances.
219
- * @param options The call options or an array of stop sequences.
220
- * @param callbacks The callbacks for the language model.
221
- * @returns A Promise that resolves to an LLMResult.
222
- */
223
- async generatePrompt(promptValues, options, callbacks) {
224
- const promptMessages = promptValues.map((promptValue) => promptValue.toChatMessages());
225
- return this.generate(promptMessages, options, callbacks);
226
- }
227
- /**
228
- * Makes a single call to the chat model.
229
- * @param messages An array of BaseMessage instances.
230
- * @param options The call options or an array of stop sequences.
231
- * @param callbacks The callbacks for the language model.
232
- * @returns A Promise that resolves to a BaseMessage.
233
- */
234
- async call(messages, options, callbacks) {
235
- const result = await this.generate([messages.map(index_js_1.coerceMessageLikeToMessage)], options, callbacks);
236
- const generations = result.generations;
237
- return generations[0][0].message;
238
- }
239
- /**
240
- * Makes a single call to the chat model with a prompt value.
241
- * @param promptValue The value of the prompt.
242
- * @param options The call options or an array of stop sequences.
243
- * @param callbacks The callbacks for the language model.
244
- * @returns A Promise that resolves to a BaseMessage.
245
- */
246
- async callPrompt(promptValue, options, callbacks) {
247
- const promptMessages = promptValue.toChatMessages();
248
- return this.call(promptMessages, options, callbacks);
249
- }
250
- /**
251
- * Predicts the next message based on the input messages.
252
- * @param messages An array of BaseMessage instances.
253
- * @param options The call options or an array of stop sequences.
254
- * @param callbacks The callbacks for the language model.
255
- * @returns A Promise that resolves to a BaseMessage.
256
- */
257
- async predictMessages(messages, options, callbacks) {
258
- return this.call(messages, options, callbacks);
259
- }
260
- /**
261
- * Predicts the next message based on a text input.
262
- * @param text The text input.
263
- * @param options The call options or an array of stop sequences.
264
- * @param callbacks The callbacks for the language model.
265
- * @returns A Promise that resolves to a string.
266
- */
267
- async predict(text, options, callbacks) {
268
- const message = new index_js_1.HumanMessage(text);
269
- const result = await this.call([message], options, callbacks);
270
- if (typeof result.content !== "string") {
271
- throw new Error("Cannot use predict when output is not a string.");
272
- }
273
- return result.content;
274
- }
275
- }
276
- exports.BaseChatModel = BaseChatModel;
277
- /**
278
- * An abstract class that extends BaseChatModel and provides a simple
279
- * implementation of _generate.
280
- */
281
- class SimpleChatModel extends BaseChatModel {
282
- async _generate(messages, options, runManager) {
283
- const text = await this._call(messages, options, runManager);
284
- const message = new index_js_1.AIMessage(text);
285
- if (typeof message.content !== "string") {
286
- throw new Error("Cannot generate with a simple chat model when output is not a string.");
287
- }
288
- return {
289
- generations: [
290
- {
291
- text: message.content,
292
- message,
293
- },
294
- ],
295
- };
296
- }
297
- }
298
- exports.SimpleChatModel = SimpleChatModel;
17
+ __exportStar(require("langchain-core/language_models/chat_models"), exports);