langchain 0.0.152 → 0.0.154

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/chat_models/fireworks.cjs +1 -0
  2. package/chat_models/fireworks.d.ts +1 -0
  3. package/chat_models/fireworks.js +1 -0
  4. package/dist/agents/executor.cjs +9 -2
  5. package/dist/agents/executor.js +9 -2
  6. package/dist/base_language/count_tokens.cjs +1 -1
  7. package/dist/base_language/count_tokens.js +1 -1
  8. package/dist/base_language/index.cjs +36 -0
  9. package/dist/base_language/index.d.ts +9 -1
  10. package/dist/base_language/index.js +36 -0
  11. package/dist/cache/base.cjs +24 -1
  12. package/dist/cache/base.d.ts +9 -0
  13. package/dist/cache/base.js +21 -0
  14. package/dist/cache/cloudflare_kv.cjs +2 -5
  15. package/dist/cache/cloudflare_kv.js +3 -6
  16. package/dist/cache/ioredis.cjs +16 -6
  17. package/dist/cache/ioredis.d.ts +5 -2
  18. package/dist/cache/ioredis.js +17 -7
  19. package/dist/cache/momento.cjs +6 -2
  20. package/dist/cache/momento.js +7 -3
  21. package/dist/cache/redis.cjs +3 -5
  22. package/dist/cache/redis.js +4 -6
  23. package/dist/cache/upstash_redis.cjs +2 -5
  24. package/dist/cache/upstash_redis.js +3 -6
  25. package/dist/chains/openai_functions/structured_output.d.ts +2 -2
  26. package/dist/chat_models/base.cjs +64 -20
  27. package/dist/chat_models/base.d.ts +8 -1
  28. package/dist/chat_models/base.js +64 -20
  29. package/dist/chat_models/fireworks.cjs +81 -0
  30. package/dist/chat_models/fireworks.d.ts +33 -0
  31. package/dist/chat_models/fireworks.js +77 -0
  32. package/dist/chat_models/ollama.cjs +22 -5
  33. package/dist/chat_models/ollama.d.ts +1 -2
  34. package/dist/chat_models/ollama.js +22 -5
  35. package/dist/chat_models/openai.d.ts +2 -2
  36. package/dist/llms/base.cjs +10 -26
  37. package/dist/llms/base.d.ts +4 -4
  38. package/dist/llms/base.js +4 -20
  39. package/dist/llms/fireworks.cjs +92 -0
  40. package/dist/llms/fireworks.d.ts +33 -0
  41. package/dist/llms/fireworks.js +88 -0
  42. package/dist/llms/ollama.cjs +24 -8
  43. package/dist/llms/ollama.d.ts +1 -2
  44. package/dist/llms/ollama.js +24 -8
  45. package/dist/llms/openai-chat.cjs +1 -5
  46. package/dist/llms/openai-chat.d.ts +1 -1
  47. package/dist/llms/openai-chat.js +1 -5
  48. package/dist/llms/openai.cjs +1 -1
  49. package/dist/llms/openai.d.ts +2 -2
  50. package/dist/llms/openai.js +1 -1
  51. package/dist/load/import_map.cjs +4 -2
  52. package/dist/load/import_map.d.ts +2 -0
  53. package/dist/load/import_map.js +2 -0
  54. package/dist/schema/index.cjs +50 -1
  55. package/dist/schema/index.d.ts +5 -0
  56. package/dist/schema/index.js +48 -0
  57. package/dist/schema/output_parser.cjs +38 -6
  58. package/dist/schema/output_parser.d.ts +20 -5
  59. package/dist/schema/output_parser.js +38 -6
  60. package/dist/schema/runnable/base.cjs +65 -10
  61. package/dist/schema/runnable/base.d.ts +17 -3
  62. package/dist/schema/runnable/base.js +65 -10
  63. package/dist/stores/message/utils.cjs +2 -50
  64. package/dist/stores/message/utils.d.ts +0 -14
  65. package/dist/stores/message/utils.js +2 -49
  66. package/dist/util/ollama.cjs +2 -2
  67. package/dist/util/ollama.d.ts +6 -0
  68. package/dist/util/ollama.js +2 -2
  69. package/llms/fireworks.cjs +1 -0
  70. package/llms/fireworks.d.ts +1 -0
  71. package/llms/fireworks.js +1 -0
  72. package/package.json +17 -1
@@ -1,4 +1,4 @@
1
- import { BaseCache, BaseMessage, BasePromptValue, GenerationChunk, LLMResult } from "../schema/index.js";
1
+ import { BaseMessage, BasePromptValue, GenerationChunk, LLMResult } from "../schema/index.js";
2
2
  import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "../base_language/index.js";
3
3
  import { BaseCallbackConfig, CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
4
4
  import { RunnableConfig } from "../schema/runnable/config.js";
@@ -11,7 +11,6 @@ export interface BaseLLMParams extends BaseLanguageModelParams {
11
11
  * @deprecated Use `maxConcurrency` instead
12
12
  */
13
13
  concurrency?: number;
14
- cache?: BaseCache | boolean;
15
14
  }
16
15
  export interface BaseLLMCallOptions extends BaseLanguageModelCallOptions {
17
16
  }
@@ -21,8 +20,7 @@ export interface BaseLLMCallOptions extends BaseLanguageModelCallOptions {
21
20
  export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLanguageModel<string, CallOptions> {
22
21
  ParsedCallOptions: Omit<CallOptions, keyof RunnableConfig & "timeout">;
23
22
  lc_namespace: string[];
24
- cache?: BaseCache;
25
- constructor({ cache, concurrency, ...rest }: BaseLLMParams);
23
+ constructor({ concurrency, ...rest }: BaseLLMParams);
26
24
  /**
27
25
  * This method takes an input and options, and returns a string. It
28
26
  * converts the input to a prompt value and generates a result based on
@@ -90,11 +88,13 @@ export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = B
90
88
  */
91
89
  abstract _llmType(): string;
92
90
  /**
91
+ * @deprecated
93
92
  * Return a json-like object representing this LLM.
94
93
  */
95
94
  serialize(): SerializedLLM;
96
95
  _modelType(): string;
97
96
  /**
97
+ * @deprecated
98
98
  * Load an LLM from a json-like object describing it.
99
99
  */
100
100
  static deserialize(data: SerializedLLM): Promise<BaseLLM>;
package/dist/llms/base.js CHANGED
@@ -1,4 +1,3 @@
1
- import { InMemoryCache } from "../cache/index.js";
2
1
  import { AIMessage, GenerationChunk, RUN_KEY, } from "../schema/index.js";
3
2
  import { BaseLanguageModel, } from "../base_language/index.js";
4
3
  import { CallbackManager, } from "../callbacks/manager.js";
@@ -7,7 +6,7 @@ import { getBufferString } from "../memory/base.js";
7
6
  * LLM Wrapper. Provides an {@link call} (an {@link generate}) function that takes in a prompt (or prompts) and returns a string.
8
7
  */
9
8
  export class BaseLLM extends BaseLanguageModel {
10
- constructor({ cache, concurrency, ...rest }) {
9
+ constructor({ concurrency, ...rest }) {
11
10
  super(concurrency ? { maxConcurrency: concurrency, ...rest } : rest);
12
11
  Object.defineProperty(this, "lc_namespace", {
13
12
  enumerable: true,
@@ -15,21 +14,6 @@ export class BaseLLM extends BaseLanguageModel {
15
14
  writable: true,
16
15
  value: ["langchain", "llms", this._llmType()]
17
16
  });
18
- Object.defineProperty(this, "cache", {
19
- enumerable: true,
20
- configurable: true,
21
- writable: true,
22
- value: void 0
23
- });
24
- if (typeof cache === "object") {
25
- this.cache = cache;
26
- }
27
- else if (cache) {
28
- this.cache = InMemoryCache.global();
29
- }
30
- else {
31
- this.cache = undefined;
32
- }
33
17
  }
34
18
  /**
35
19
  * This method takes an input and options, and returns a string. It
@@ -183,9 +167,7 @@ export class BaseLLM extends BaseLanguageModel {
183
167
  return this._generateUncached(prompts, callOptions, runnableConfig);
184
168
  }
185
169
  const { cache } = this;
186
- const params = this.serialize();
187
- params.stop = callOptions.stop ?? params.stop;
188
- const llmStringKey = `${Object.entries(params).sort()}`;
170
+ const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
189
171
  const missingPromptIndices = [];
190
172
  const generations = await Promise.all(prompts.map(async (prompt, index) => {
191
173
  const result = await cache.lookup(prompt, llmStringKey);
@@ -245,6 +227,7 @@ export class BaseLLM extends BaseLanguageModel {
245
227
  return {};
246
228
  }
247
229
  /**
230
+ * @deprecated
248
231
  * Return a json-like object representing this LLM.
249
232
  */
250
233
  serialize() {
@@ -258,6 +241,7 @@ export class BaseLLM extends BaseLanguageModel {
258
241
  return "base_llm";
259
242
  }
260
243
  /**
244
+ * @deprecated
261
245
  * Load an LLM from a json-like object describing it.
262
246
  */
263
247
  static async deserialize(data) {
@@ -0,0 +1,92 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.Fireworks = void 0;
4
+ const env_js_1 = require("../util/env.cjs");
5
+ const openai_js_1 = require("./openai.cjs");
6
+ /**
7
+ * Wrapper around Fireworks API for large language models
8
+ *
9
+ * Fireworks API is compatible to the OpenAI API with some limitations described in
10
+ * https://readme.fireworks.ai/docs/openai-compatibility.
11
+ *
12
+ * To use, you should have the `openai` package installed and
13
+ * the `FIREWORKS_API_KEY` environment variable set.
14
+ */
15
+ class Fireworks extends openai_js_1.OpenAI {
16
+ static lc_name() {
17
+ return "Fireworks";
18
+ }
19
+ _llmType() {
20
+ return "fireworks";
21
+ }
22
+ get lc_secrets() {
23
+ return {
24
+ fireworksApiKey: "FIREWORKS_API_KEY",
25
+ };
26
+ }
27
+ constructor(fields) {
28
+ const fireworksApiKey = fields?.fireworksApiKey || (0, env_js_1.getEnvironmentVariable)("FIREWORKS_API_KEY");
29
+ if (!fireworksApiKey) {
30
+ throw new Error(`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`);
31
+ }
32
+ super({
33
+ ...fields,
34
+ openAIApiKey: fireworksApiKey,
35
+ modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b",
36
+ configuration: {
37
+ baseURL: "https://api.fireworks.ai/inference/v1",
38
+ },
39
+ });
40
+ Object.defineProperty(this, "lc_serializable", {
41
+ enumerable: true,
42
+ configurable: true,
43
+ writable: true,
44
+ value: true
45
+ });
46
+ Object.defineProperty(this, "fireworksApiKey", {
47
+ enumerable: true,
48
+ configurable: true,
49
+ writable: true,
50
+ value: void 0
51
+ });
52
+ this.fireworksApiKey = fireworksApiKey;
53
+ }
54
+ toJSON() {
55
+ const result = super.toJSON();
56
+ if ("kwargs" in result &&
57
+ typeof result.kwargs === "object" &&
58
+ result.kwargs != null) {
59
+ delete result.kwargs.openai_api_key;
60
+ delete result.kwargs.configuration;
61
+ }
62
+ return result;
63
+ }
64
+ /**
65
+ * Calls the Fireworks API with retry logic in case of failures.
66
+ * @param request The request to send to the Fireworks API.
67
+ * @param options Optional configuration for the API call.
68
+ * @returns The response from the Fireworks API.
69
+ */
70
+ async completionWithRetry(request, options) {
71
+ // https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility
72
+ if (Array.isArray(request.prompt)) {
73
+ if (request.prompt.length > 1) {
74
+ throw new Error("Multiple prompts are not supported by Fireworks");
75
+ }
76
+ const prompt = request.prompt[0];
77
+ if (typeof prompt !== "string") {
78
+ throw new Error("Only string prompts are supported by Fireworks");
79
+ }
80
+ request.prompt = prompt;
81
+ }
82
+ delete request.frequency_penalty;
83
+ delete request.presence_penalty;
84
+ delete request.best_of;
85
+ delete request.logit_bias;
86
+ if (request.stream === true) {
87
+ return super.completionWithRetry(request, options);
88
+ }
89
+ return super.completionWithRetry(request, options);
90
+ }
91
+ }
92
+ exports.Fireworks = Fireworks;
@@ -0,0 +1,33 @@
1
+ import type { OpenAI as OpenAIClient } from "openai";
2
+ import type { BaseLLMParams } from "./base.js";
3
+ import type { OpenAICallOptions, OpenAIInput } from "./openai.js";
4
+ import type { OpenAICoreRequestOptions } from "../types/openai-types.js";
5
+ import { OpenAI } from "./openai.js";
6
+ type FireworksUnsupportedArgs = "frequencyPenalty" | "presencePenalty" | "bestOf" | "logitBias";
7
+ type FireworksUnsupportedCallOptions = "functions" | "function_call" | "tools";
8
+ export type FireworksCallOptions = Partial<Omit<OpenAICallOptions, FireworksUnsupportedCallOptions>>;
9
+ /**
10
+ * Wrapper around Fireworks API for large language models
11
+ *
12
+ * Fireworks API is compatible to the OpenAI API with some limitations described in
13
+ * https://readme.fireworks.ai/docs/openai-compatibility.
14
+ *
15
+ * To use, you should have the `openai` package installed and
16
+ * the `FIREWORKS_API_KEY` environment variable set.
17
+ */
18
+ export declare class Fireworks extends OpenAI<FireworksCallOptions> {
19
+ static lc_name(): string;
20
+ _llmType(): string;
21
+ get lc_secrets(): {
22
+ [key: string]: string;
23
+ } | undefined;
24
+ lc_serializable: boolean;
25
+ fireworksApiKey?: string;
26
+ constructor(fields?: Partial<Omit<OpenAIInput, "openAIApiKey" | FireworksUnsupportedArgs>> & BaseLLMParams & {
27
+ fireworksApiKey?: string;
28
+ });
29
+ toJSON(): import("../load/serializable.js").Serialized;
30
+ completionWithRetry(request: OpenAIClient.CompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Completion>>;
31
+ completionWithRetry(request: OpenAIClient.CompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Completions.Completion>;
32
+ }
33
+ export {};
@@ -0,0 +1,88 @@
1
+ import { getEnvironmentVariable } from "../util/env.js";
2
+ import { OpenAI } from "./openai.js";
3
+ /**
4
+ * Wrapper around Fireworks API for large language models
5
+ *
6
+ * Fireworks API is compatible to the OpenAI API with some limitations described in
7
+ * https://readme.fireworks.ai/docs/openai-compatibility.
8
+ *
9
+ * To use, you should have the `openai` package installed and
10
+ * the `FIREWORKS_API_KEY` environment variable set.
11
+ */
12
+ export class Fireworks extends OpenAI {
13
+ static lc_name() {
14
+ return "Fireworks";
15
+ }
16
+ _llmType() {
17
+ return "fireworks";
18
+ }
19
+ get lc_secrets() {
20
+ return {
21
+ fireworksApiKey: "FIREWORKS_API_KEY",
22
+ };
23
+ }
24
+ constructor(fields) {
25
+ const fireworksApiKey = fields?.fireworksApiKey || getEnvironmentVariable("FIREWORKS_API_KEY");
26
+ if (!fireworksApiKey) {
27
+ throw new Error(`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`);
28
+ }
29
+ super({
30
+ ...fields,
31
+ openAIApiKey: fireworksApiKey,
32
+ modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b",
33
+ configuration: {
34
+ baseURL: "https://api.fireworks.ai/inference/v1",
35
+ },
36
+ });
37
+ Object.defineProperty(this, "lc_serializable", {
38
+ enumerable: true,
39
+ configurable: true,
40
+ writable: true,
41
+ value: true
42
+ });
43
+ Object.defineProperty(this, "fireworksApiKey", {
44
+ enumerable: true,
45
+ configurable: true,
46
+ writable: true,
47
+ value: void 0
48
+ });
49
+ this.fireworksApiKey = fireworksApiKey;
50
+ }
51
+ toJSON() {
52
+ const result = super.toJSON();
53
+ if ("kwargs" in result &&
54
+ typeof result.kwargs === "object" &&
55
+ result.kwargs != null) {
56
+ delete result.kwargs.openai_api_key;
57
+ delete result.kwargs.configuration;
58
+ }
59
+ return result;
60
+ }
61
+ /**
62
+ * Calls the Fireworks API with retry logic in case of failures.
63
+ * @param request The request to send to the Fireworks API.
64
+ * @param options Optional configuration for the API call.
65
+ * @returns The response from the Fireworks API.
66
+ */
67
+ async completionWithRetry(request, options) {
68
+ // https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility
69
+ if (Array.isArray(request.prompt)) {
70
+ if (request.prompt.length > 1) {
71
+ throw new Error("Multiple prompts are not supported by Fireworks");
72
+ }
73
+ const prompt = request.prompt[0];
74
+ if (typeof prompt !== "string") {
75
+ throw new Error("Only string prompts are supported by Fireworks");
76
+ }
77
+ request.prompt = prompt;
78
+ }
79
+ delete request.frequency_penalty;
80
+ delete request.presence_penalty;
81
+ delete request.best_of;
82
+ delete request.logit_bias;
83
+ if (request.stream === true) {
84
+ return super.completionWithRetry(request, options);
85
+ }
86
+ return super.completionWithRetry(request, options);
87
+ }
88
+ }
@@ -290,14 +290,30 @@ class Ollama extends base_js_1.LLM {
290
290
  async *_streamResponseChunks(prompt, options, runManager) {
291
291
  const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
292
292
  for await (const chunk of stream) {
293
- yield new index_js_1.GenerationChunk({
294
- text: chunk.response,
295
- generationInfo: {
296
- ...chunk,
297
- response: undefined,
298
- },
299
- });
300
- await runManager?.handleLLMNewToken(chunk.response ?? "");
293
+ if (!chunk.done) {
294
+ yield new index_js_1.GenerationChunk({
295
+ text: chunk.response,
296
+ generationInfo: {
297
+ ...chunk,
298
+ response: undefined,
299
+ },
300
+ });
301
+ await runManager?.handleLLMNewToken(chunk.response ?? "");
302
+ }
303
+ else {
304
+ yield new index_js_1.GenerationChunk({
305
+ text: "",
306
+ generationInfo: {
307
+ model: chunk.model,
308
+ total_duration: chunk.total_duration,
309
+ load_duration: chunk.load_duration,
310
+ prompt_eval_count: chunk.prompt_eval_count,
311
+ prompt_eval_duration: chunk.prompt_eval_duration,
312
+ eval_count: chunk.eval_count,
313
+ eval_duration: chunk.eval_duration,
314
+ },
315
+ });
316
+ }
301
317
  }
302
318
  }
303
319
  /** @ignore */
@@ -6,8 +6,7 @@ import { GenerationChunk } from "../schema/index.js";
6
6
  * Class that represents the Ollama language model. It extends the base
7
7
  * LLM class and implements the OllamaInput interface.
8
8
  */
9
- export declare class Ollama extends LLM implements OllamaInput {
10
- CallOptions: OllamaCallOptions;
9
+ export declare class Ollama extends LLM<OllamaCallOptions> implements OllamaInput {
11
10
  static lc_name(): string;
12
11
  lc_serializable: boolean;
13
12
  model: string;
@@ -287,14 +287,30 @@ export class Ollama extends LLM {
287
287
  async *_streamResponseChunks(prompt, options, runManager) {
288
288
  const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
289
289
  for await (const chunk of stream) {
290
- yield new GenerationChunk({
291
- text: chunk.response,
292
- generationInfo: {
293
- ...chunk,
294
- response: undefined,
295
- },
296
- });
297
- await runManager?.handleLLMNewToken(chunk.response ?? "");
290
+ if (!chunk.done) {
291
+ yield new GenerationChunk({
292
+ text: chunk.response,
293
+ generationInfo: {
294
+ ...chunk,
295
+ response: undefined,
296
+ },
297
+ });
298
+ await runManager?.handleLLMNewToken(chunk.response ?? "");
299
+ }
300
+ else {
301
+ yield new GenerationChunk({
302
+ text: "",
303
+ generationInfo: {
304
+ model: chunk.model,
305
+ total_duration: chunk.total_duration,
306
+ load_duration: chunk.load_duration,
307
+ prompt_eval_count: chunk.prompt_eval_count,
308
+ prompt_eval_duration: chunk.prompt_eval_duration,
309
+ eval_count: chunk.eval_count,
310
+ eval_duration: chunk.eval_duration,
311
+ },
312
+ });
313
+ }
298
314
  }
299
315
  }
300
316
  /** @ignore */
@@ -35,11 +35,7 @@ class OpenAIChat extends base_js_1.LLM {
35
35
  return "OpenAIChat";
36
36
  }
37
37
  get callKeys() {
38
- return [
39
- ...super.callKeys,
40
- "options",
41
- "promptIndex",
42
- ];
38
+ return [...super.callKeys, "options", "promptIndex"];
43
39
  }
44
40
  get lc_secrets() {
45
41
  return {
@@ -36,7 +36,7 @@ export interface OpenAIChatCallOptions extends OpenAICallOptions {
36
36
  */
37
37
  export declare class OpenAIChat extends LLM<OpenAIChatCallOptions> implements OpenAIChatInput, AzureOpenAIInput {
38
38
  static lc_name(): string;
39
- get callKeys(): (keyof OpenAIChatCallOptions)[];
39
+ get callKeys(): string[];
40
40
  lc_serializable: boolean;
41
41
  get lc_secrets(): {
42
42
  [key: string]: string;
@@ -32,11 +32,7 @@ export class OpenAIChat extends LLM {
32
32
  return "OpenAIChat";
33
33
  }
34
34
  get callKeys() {
35
- return [
36
- ...super.callKeys,
37
- "options",
38
- "promptIndex",
39
- ];
35
+ return [...super.callKeys, "options", "promptIndex"];
40
36
  }
41
37
  get lc_secrets() {
42
38
  return {
@@ -59,7 +59,7 @@ class OpenAI extends base_js_1.BaseLLM {
59
59
  if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
60
60
  fields?.modelName?.startsWith("gpt-4")) &&
61
61
  !fields?.modelName?.includes("-instruct")) {
62
- // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
62
+ // eslint-disable-next-line no-constructor-return
63
63
  return new openai_chat_js_1.OpenAIChat(fields, configuration);
64
64
  }
65
65
  super(fields ?? {});
@@ -22,9 +22,9 @@ export { AzureOpenAIInput, OpenAICallOptions, OpenAIInput };
22
22
  * `openai.createCompletion`} can be passed through {@link modelKwargs}, even
23
23
  * if not explicitly available on this class.
24
24
  */
25
- export declare class OpenAI extends BaseLLM<OpenAICallOptions> implements OpenAIInput, AzureOpenAIInput {
25
+ export declare class OpenAI<CallOptions extends OpenAICallOptions = OpenAICallOptions> extends BaseLLM<CallOptions> implements OpenAIInput, AzureOpenAIInput {
26
26
  static lc_name(): string;
27
- get callKeys(): (keyof OpenAICallOptions)[];
27
+ get callKeys(): string[];
28
28
  lc_serializable: boolean;
29
29
  get lc_secrets(): {
30
30
  [key: string]: string;
@@ -56,7 +56,7 @@ export class OpenAI extends BaseLLM {
56
56
  if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
57
57
  fields?.modelName?.startsWith("gpt-4")) &&
58
58
  !fields?.modelName?.includes("-instruct")) {
59
- // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
59
+ // eslint-disable-next-line no-constructor-return
60
60
  return new OpenAIChat(fields, configuration);
61
61
  }
62
62
  super(fields ?? {});
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  return result;
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
- exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = void 0;
27
+ exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fireworks = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
+ exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -44,6 +44,7 @@ exports.llms__openai = __importStar(require("../llms/openai.cjs"));
44
44
  exports.llms__ai21 = __importStar(require("../llms/ai21.cjs"));
45
45
  exports.llms__aleph_alpha = __importStar(require("../llms/aleph_alpha.cjs"));
46
46
  exports.llms__ollama = __importStar(require("../llms/ollama.cjs"));
47
+ exports.llms__fireworks = __importStar(require("../llms/fireworks.cjs"));
47
48
  exports.prompts = __importStar(require("../prompts/index.cjs"));
48
49
  exports.vectorstores__base = __importStar(require("../vectorstores/base.cjs"));
49
50
  exports.vectorstores__memory = __importStar(require("../vectorstores/memory.cjs"));
@@ -60,6 +61,7 @@ exports.document_transformers__openai_functions = __importStar(require("../docum
60
61
  exports.chat_models__base = __importStar(require("../chat_models/base.cjs"));
61
62
  exports.chat_models__openai = __importStar(require("../chat_models/openai.cjs"));
62
63
  exports.chat_models__anthropic = __importStar(require("../chat_models/anthropic.cjs"));
64
+ exports.chat_models__fireworks = __importStar(require("../chat_models/fireworks.cjs"));
63
65
  exports.chat_models__baiduwenxin = __importStar(require("../chat_models/baiduwenxin.cjs"));
64
66
  exports.chat_models__ollama = __importStar(require("../chat_models/ollama.cjs"));
65
67
  exports.chat_models__minimax = __importStar(require("../chat_models/minimax.cjs"));
@@ -16,6 +16,7 @@ export * as llms__openai from "../llms/openai.js";
16
16
  export * as llms__ai21 from "../llms/ai21.js";
17
17
  export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
18
18
  export * as llms__ollama from "../llms/ollama.js";
19
+ export * as llms__fireworks from "../llms/fireworks.js";
19
20
  export * as prompts from "../prompts/index.js";
20
21
  export * as vectorstores__base from "../vectorstores/base.js";
21
22
  export * as vectorstores__memory from "../vectorstores/memory.js";
@@ -32,6 +33,7 @@ export * as document_transformers__openai_functions from "../document_transforme
32
33
  export * as chat_models__base from "../chat_models/base.js";
33
34
  export * as chat_models__openai from "../chat_models/openai.js";
34
35
  export * as chat_models__anthropic from "../chat_models/anthropic.js";
36
+ export * as chat_models__fireworks from "../chat_models/fireworks.js";
35
37
  export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
36
38
  export * as chat_models__ollama from "../chat_models/ollama.js";
37
39
  export * as chat_models__minimax from "../chat_models/minimax.js";
@@ -17,6 +17,7 @@ export * as llms__openai from "../llms/openai.js";
17
17
  export * as llms__ai21 from "../llms/ai21.js";
18
18
  export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
19
19
  export * as llms__ollama from "../llms/ollama.js";
20
+ export * as llms__fireworks from "../llms/fireworks.js";
20
21
  export * as prompts from "../prompts/index.js";
21
22
  export * as vectorstores__base from "../vectorstores/base.js";
22
23
  export * as vectorstores__memory from "../vectorstores/memory.js";
@@ -33,6 +34,7 @@ export * as document_transformers__openai_functions from "../document_transforme
33
34
  export * as chat_models__base from "../chat_models/base.js";
34
35
  export * as chat_models__openai from "../chat_models/openai.js";
35
36
  export * as chat_models__anthropic from "../chat_models/anthropic.js";
37
+ export * as chat_models__fireworks from "../chat_models/fireworks.js";
36
38
  export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
37
39
  export * as chat_models__ollama from "../chat_models/ollama.js";
38
40
  export * as chat_models__minimax from "../chat_models/minimax.js";
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Docstore = exports.BaseEntityStore = exports.BaseFileStore = exports.BaseCache = exports.BaseListChatMessageHistory = exports.BaseChatMessageHistory = exports.BasePromptValue = exports.ChatGenerationChunk = exports.ChatMessageChunk = exports.coerceMessageLikeToMessage = exports.isBaseMessage = exports.ChatMessage = exports.FunctionMessageChunk = exports.FunctionMessage = exports.SystemChatMessage = exports.AIChatMessage = exports.HumanChatMessage = exports.BaseChatMessage = exports.SystemMessageChunk = exports.SystemMessage = exports.AIMessageChunk = exports.AIMessage = exports.HumanMessageChunk = exports.HumanMessage = exports.BaseMessageChunk = exports.BaseMessage = exports.GenerationChunk = exports.RUN_KEY = void 0;
3
+ exports.Docstore = exports.BaseEntityStore = exports.BaseFileStore = exports.BaseCache = exports.BaseListChatMessageHistory = exports.BaseChatMessageHistory = exports.BasePromptValue = exports.mapStoredMessageToChatMessage = exports.ChatGenerationChunk = exports.ChatMessageChunk = exports.coerceMessageLikeToMessage = exports.isBaseMessage = exports.ChatMessage = exports.FunctionMessageChunk = exports.FunctionMessage = exports.SystemChatMessage = exports.AIChatMessage = exports.HumanChatMessage = exports.BaseChatMessage = exports.SystemMessageChunk = exports.SystemMessage = exports.AIMessageChunk = exports.AIMessage = exports.HumanMessageChunk = exports.HumanMessage = exports.BaseMessageChunk = exports.BaseMessage = exports.GenerationChunk = exports.RUN_KEY = void 0;
4
4
  const serializable_js_1 = require("../load/serializable.cjs");
5
5
  exports.RUN_KEY = "__run";
6
6
  /**
@@ -405,6 +405,55 @@ class ChatGenerationChunk extends GenerationChunk {
405
405
  }
406
406
  }
407
407
  exports.ChatGenerationChunk = ChatGenerationChunk;
408
+ /**
409
+ * Maps messages from an older format (V1) to the current `StoredMessage`
410
+ * format. If the message is already in the `StoredMessage` format, it is
411
+ * returned as is. Otherwise, it transforms the V1 message into a
412
+ * `StoredMessage`. This function is important for maintaining
413
+ * compatibility with older message formats.
414
+ */
415
+ function mapV1MessageToStoredMessage(message) {
416
+ // TODO: Remove this mapper when we deprecate the old message format.
417
+ if (message.data !== undefined) {
418
+ return message;
419
+ }
420
+ else {
421
+ const v1Message = message;
422
+ return {
423
+ type: v1Message.type,
424
+ data: {
425
+ content: v1Message.text,
426
+ role: v1Message.role,
427
+ name: undefined,
428
+ },
429
+ };
430
+ }
431
+ }
432
+ function mapStoredMessageToChatMessage(message) {
433
+ const storedMessage = mapV1MessageToStoredMessage(message);
434
+ switch (storedMessage.type) {
435
+ case "human":
436
+ return new HumanMessage(storedMessage.data);
437
+ case "ai":
438
+ return new AIMessage(storedMessage.data);
439
+ case "system":
440
+ return new SystemMessage(storedMessage.data);
441
+ case "function":
442
+ if (storedMessage.data.name === undefined) {
443
+ throw new Error("Name must be defined for function messages");
444
+ }
445
+ return new FunctionMessage(storedMessage.data);
446
+ case "chat": {
447
+ if (storedMessage.data.role === undefined) {
448
+ throw new Error("Role must be defined for chat messages");
449
+ }
450
+ return new ChatMessage(storedMessage.data);
451
+ }
452
+ default:
453
+ throw new Error(`Got unexpected type: ${storedMessage.type}`);
454
+ }
455
+ }
456
+ exports.mapStoredMessageToChatMessage = mapStoredMessageToChatMessage;
408
457
  /**
409
458
  * Base PromptValue class. All prompt values should extend this class.
410
459
  */
@@ -59,6 +59,10 @@ export interface StoredMessage {
59
59
  type: string;
60
60
  data: StoredMessageData;
61
61
  }
62
+ export interface StoredGeneration {
63
+ text: string;
64
+ message?: StoredMessage;
65
+ }
62
66
  export type MessageType = "human" | "ai" | "generic" | "system" | "function";
63
67
  export interface BaseMessageFields {
64
68
  content: string;
@@ -239,6 +243,7 @@ export declare class ChatGenerationChunk extends GenerationChunk implements Chat
239
243
  constructor(fields: ChatGenerationChunkFields);
240
244
  concat(chunk: ChatGenerationChunk): ChatGenerationChunk;
241
245
  }
246
+ export declare function mapStoredMessageToChatMessage(message: StoredMessage): HumanMessage | AIMessage | SystemMessage | FunctionMessage | ChatMessage;
242
247
  export interface ChatResult {
243
248
  generations: ChatGeneration[];
244
249
  llmOutput?: Record<string, any>;