langchain 0.0.152 → 0.0.153
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/chat_models/fireworks.cjs +1 -0
- package/chat_models/fireworks.d.ts +1 -0
- package/chat_models/fireworks.js +1 -0
- package/dist/agents/executor.cjs +9 -2
- package/dist/agents/executor.js +9 -2
- package/dist/base_language/count_tokens.cjs +1 -1
- package/dist/base_language/count_tokens.js +1 -1
- package/dist/chains/openai_functions/structured_output.d.ts +2 -2
- package/dist/chat_models/base.d.ts +1 -1
- package/dist/chat_models/fireworks.cjs +81 -0
- package/dist/chat_models/fireworks.d.ts +33 -0
- package/dist/chat_models/fireworks.js +77 -0
- package/dist/chat_models/ollama.cjs +22 -5
- package/dist/chat_models/ollama.d.ts +1 -2
- package/dist/chat_models/ollama.js +22 -5
- package/dist/chat_models/openai.d.ts +2 -2
- package/dist/llms/fireworks.cjs +92 -0
- package/dist/llms/fireworks.d.ts +33 -0
- package/dist/llms/fireworks.js +88 -0
- package/dist/llms/ollama.cjs +24 -8
- package/dist/llms/ollama.d.ts +1 -2
- package/dist/llms/ollama.js +24 -8
- package/dist/llms/openai-chat.cjs +1 -5
- package/dist/llms/openai-chat.d.ts +1 -1
- package/dist/llms/openai-chat.js +1 -5
- package/dist/llms/openai.cjs +1 -1
- package/dist/llms/openai.d.ts +2 -2
- package/dist/llms/openai.js +1 -1
- package/dist/load/import_map.cjs +4 -2
- package/dist/load/import_map.d.ts +2 -0
- package/dist/load/import_map.js +2 -0
- package/dist/schema/output_parser.cjs +38 -6
- package/dist/schema/output_parser.d.ts +20 -5
- package/dist/schema/output_parser.js +38 -6
- package/dist/schema/runnable/base.cjs +65 -10
- package/dist/schema/runnable/base.d.ts +17 -3
- package/dist/schema/runnable/base.js +65 -10
- package/dist/util/ollama.cjs +2 -2
- package/dist/util/ollama.d.ts +6 -0
- package/dist/util/ollama.js +2 -2
- package/llms/fireworks.cjs +1 -0
- package/llms/fireworks.d.ts +1 -0
- package/llms/fireworks.js +1 -0
- package/package.json +17 -1
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../dist/chat_models/fireworks.cjs');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/chat_models/fireworks.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/chat_models/fireworks.js'
|
package/dist/agents/executor.cjs
CHANGED
|
@@ -147,8 +147,15 @@ class AgentExecutor extends base_js_1.BaseChain {
|
|
|
147
147
|
// eslint-disable-next-line no-instanceof/no-instanceof
|
|
148
148
|
if (e instanceof output_parser_js_1.OutputParserException) {
|
|
149
149
|
let observation;
|
|
150
|
+
let text = e.message;
|
|
150
151
|
if (this.handleParsingErrors === true) {
|
|
151
|
-
|
|
152
|
+
if (e.sendToLLM) {
|
|
153
|
+
observation = e.observation;
|
|
154
|
+
text = e.llmOutput ?? "";
|
|
155
|
+
}
|
|
156
|
+
else {
|
|
157
|
+
observation = "Invalid or incomplete response";
|
|
158
|
+
}
|
|
152
159
|
}
|
|
153
160
|
else if (typeof this.handleParsingErrors === "string") {
|
|
154
161
|
observation = this.handleParsingErrors;
|
|
@@ -162,7 +169,7 @@ class AgentExecutor extends base_js_1.BaseChain {
|
|
|
162
169
|
output = {
|
|
163
170
|
tool: "_Exception",
|
|
164
171
|
toolInput: observation,
|
|
165
|
-
log:
|
|
172
|
+
log: text,
|
|
166
173
|
};
|
|
167
174
|
}
|
|
168
175
|
else {
|
package/dist/agents/executor.js
CHANGED
|
@@ -143,8 +143,15 @@ export class AgentExecutor extends BaseChain {
|
|
|
143
143
|
// eslint-disable-next-line no-instanceof/no-instanceof
|
|
144
144
|
if (e instanceof OutputParserException) {
|
|
145
145
|
let observation;
|
|
146
|
+
let text = e.message;
|
|
146
147
|
if (this.handleParsingErrors === true) {
|
|
147
|
-
|
|
148
|
+
if (e.sendToLLM) {
|
|
149
|
+
observation = e.observation;
|
|
150
|
+
text = e.llmOutput ?? "";
|
|
151
|
+
}
|
|
152
|
+
else {
|
|
153
|
+
observation = "Invalid or incomplete response";
|
|
154
|
+
}
|
|
148
155
|
}
|
|
149
156
|
else if (typeof this.handleParsingErrors === "string") {
|
|
150
157
|
observation = this.handleParsingErrors;
|
|
@@ -158,7 +165,7 @@ export class AgentExecutor extends BaseChain {
|
|
|
158
165
|
output = {
|
|
159
166
|
tool: "_Exception",
|
|
160
167
|
toolInput: observation,
|
|
161
|
-
log:
|
|
168
|
+
log: text,
|
|
162
169
|
};
|
|
163
170
|
}
|
|
164
171
|
else {
|
|
@@ -58,7 +58,7 @@ exports.getModelContextSize = getModelContextSize;
|
|
|
58
58
|
const calculateMaxTokens = async ({ prompt, modelName, }) => {
|
|
59
59
|
let numTokens;
|
|
60
60
|
try {
|
|
61
|
-
numTokens = (await (0, tiktoken_js_1.encodingForModel)(modelName)).encode(prompt).length;
|
|
61
|
+
numTokens = (await (0, tiktoken_js_1.encodingForModel)((0, exports.getModelNameForTiktoken)(modelName))).encode(prompt).length;
|
|
62
62
|
}
|
|
63
63
|
catch (error) {
|
|
64
64
|
console.warn("Failed to calculate number of tokens, falling back to approximate count");
|
|
@@ -52,7 +52,7 @@ export const getModelContextSize = (modelName) => {
|
|
|
52
52
|
export const calculateMaxTokens = async ({ prompt, modelName, }) => {
|
|
53
53
|
let numTokens;
|
|
54
54
|
try {
|
|
55
|
-
numTokens = (await encodingForModel(modelName)).encode(prompt).length;
|
|
55
|
+
numTokens = (await encodingForModel(getModelNameForTiktoken(modelName))).encode(prompt).length;
|
|
56
56
|
}
|
|
57
57
|
catch (error) {
|
|
58
58
|
console.warn("Failed to calculate number of tokens, falling back to approximate count");
|
|
@@ -46,5 +46,5 @@ export declare class FunctionCallStructuredOutputParser<T extends z.AnyZodObject
|
|
|
46
46
|
* as well as an additional required "outputSchema" JSON Schema object.
|
|
47
47
|
* @returns OpenAPIChain
|
|
48
48
|
*/
|
|
49
|
-
export declare function createStructuredOutputChain<T extends z.AnyZodObject = z.AnyZodObject>(input: StructuredOutputChainInput): LLMChain<any,
|
|
50
|
-
export declare function createStructuredOutputChainFromZod<T extends z.AnyZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput, "outputSchema">): LLMChain<any,
|
|
49
|
+
export declare function createStructuredOutputChain<T extends z.AnyZodObject = z.AnyZodObject>(input: StructuredOutputChainInput): LLMChain<any, BaseChatModel<BaseFunctionCallOptions> | ChatOpenAI<BaseFunctionCallOptions>>;
|
|
50
|
+
export declare function createStructuredOutputChainFromZod<T extends z.AnyZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput, "outputSchema">): LLMChain<any, BaseChatModel<BaseFunctionCallOptions> | ChatOpenAI<BaseFunctionCallOptions>>;
|
|
@@ -109,7 +109,7 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
|
|
|
109
109
|
* An abstract class that extends BaseChatModel and provides a simple
|
|
110
110
|
* implementation of _generate.
|
|
111
111
|
*/
|
|
112
|
-
export declare abstract class SimpleChatModel extends BaseChatModel {
|
|
112
|
+
export declare abstract class SimpleChatModel<CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions> extends BaseChatModel<CallOptions> {
|
|
113
113
|
abstract _call(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
|
|
114
114
|
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
115
115
|
}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ChatFireworks = void 0;
|
|
4
|
+
const openai_js_1 = require("./openai.cjs");
|
|
5
|
+
const env_js_1 = require("../util/env.cjs");
|
|
6
|
+
/**
|
|
7
|
+
* Wrapper around Fireworks API for large language models fine-tuned for chat
|
|
8
|
+
*
|
|
9
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
10
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
11
|
+
*
|
|
12
|
+
* To use, you should have the `openai` package installed and
|
|
13
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
14
|
+
*/
|
|
15
|
+
class ChatFireworks extends openai_js_1.ChatOpenAI {
|
|
16
|
+
static lc_name() {
|
|
17
|
+
return "ChatFireworks";
|
|
18
|
+
}
|
|
19
|
+
_llmType() {
|
|
20
|
+
return "fireworks";
|
|
21
|
+
}
|
|
22
|
+
get lc_secrets() {
|
|
23
|
+
return {
|
|
24
|
+
fireworksApiKey: "FIREWORKS_API_KEY",
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
constructor(fields) {
|
|
28
|
+
const fireworksApiKey = fields?.fireworksApiKey || (0, env_js_1.getEnvironmentVariable)("FIREWORKS_API_KEY");
|
|
29
|
+
if (!fireworksApiKey) {
|
|
30
|
+
throw new Error(`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`);
|
|
31
|
+
}
|
|
32
|
+
super({
|
|
33
|
+
...fields,
|
|
34
|
+
modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b-chat",
|
|
35
|
+
openAIApiKey: fireworksApiKey,
|
|
36
|
+
configuration: {
|
|
37
|
+
baseURL: "https://api.fireworks.ai/inference/v1",
|
|
38
|
+
},
|
|
39
|
+
});
|
|
40
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
41
|
+
enumerable: true,
|
|
42
|
+
configurable: true,
|
|
43
|
+
writable: true,
|
|
44
|
+
value: true
|
|
45
|
+
});
|
|
46
|
+
Object.defineProperty(this, "fireworksApiKey", {
|
|
47
|
+
enumerable: true,
|
|
48
|
+
configurable: true,
|
|
49
|
+
writable: true,
|
|
50
|
+
value: void 0
|
|
51
|
+
});
|
|
52
|
+
this.fireworksApiKey = fireworksApiKey;
|
|
53
|
+
}
|
|
54
|
+
toJSON() {
|
|
55
|
+
const result = super.toJSON();
|
|
56
|
+
if ("kwargs" in result &&
|
|
57
|
+
typeof result.kwargs === "object" &&
|
|
58
|
+
result.kwargs != null) {
|
|
59
|
+
delete result.kwargs.openai_api_key;
|
|
60
|
+
delete result.kwargs.configuration;
|
|
61
|
+
}
|
|
62
|
+
return result;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Calls the Fireworks API with retry logic in case of failures.
|
|
66
|
+
* @param request The request to send to the Fireworks API.
|
|
67
|
+
* @param options Optional configuration for the API call.
|
|
68
|
+
* @returns The response from the Fireworks API.
|
|
69
|
+
*/
|
|
70
|
+
async completionWithRetry(request, options) {
|
|
71
|
+
delete request.frequency_penalty;
|
|
72
|
+
delete request.presence_penalty;
|
|
73
|
+
delete request.logit_bias;
|
|
74
|
+
delete request.functions;
|
|
75
|
+
if (request.stream === true) {
|
|
76
|
+
return super.completionWithRetry(request, options);
|
|
77
|
+
}
|
|
78
|
+
return super.completionWithRetry(request, options);
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
exports.ChatFireworks = ChatFireworks;
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
|
+
import type { ChatOpenAICallOptions, OpenAIChatInput } from "./openai.js";
|
|
3
|
+
import type { OpenAICoreRequestOptions } from "../types/openai-types.js";
|
|
4
|
+
import type { BaseChatModelParams } from "./base.js";
|
|
5
|
+
import { ChatOpenAI } from "./openai.js";
|
|
6
|
+
type FireworksUnsupportedArgs = "frequencyPenalty" | "presencePenalty" | "logitBias" | "functions";
|
|
7
|
+
type FireworksUnsupportedCallOptions = "functions" | "function_call" | "tools";
|
|
8
|
+
export type ChatFireworksCallOptions = Partial<Omit<ChatOpenAICallOptions, FireworksUnsupportedCallOptions>>;
|
|
9
|
+
/**
|
|
10
|
+
* Wrapper around Fireworks API for large language models fine-tuned for chat
|
|
11
|
+
*
|
|
12
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
13
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
14
|
+
*
|
|
15
|
+
* To use, you should have the `openai` package installed and
|
|
16
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
17
|
+
*/
|
|
18
|
+
export declare class ChatFireworks extends ChatOpenAI<ChatFireworksCallOptions> {
|
|
19
|
+
static lc_name(): string;
|
|
20
|
+
_llmType(): string;
|
|
21
|
+
get lc_secrets(): {
|
|
22
|
+
[key: string]: string;
|
|
23
|
+
} | undefined;
|
|
24
|
+
lc_serializable: boolean;
|
|
25
|
+
fireworksApiKey?: string;
|
|
26
|
+
constructor(fields?: Partial<Omit<OpenAIChatInput, "openAIApiKey" | FireworksUnsupportedArgs>> & BaseChatModelParams & {
|
|
27
|
+
fireworksApiKey?: string;
|
|
28
|
+
});
|
|
29
|
+
toJSON(): import("../load/serializable.js").Serialized;
|
|
30
|
+
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>;
|
|
31
|
+
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Chat.Completions.ChatCompletion>;
|
|
32
|
+
}
|
|
33
|
+
export {};
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import { ChatOpenAI } from "./openai.js";
|
|
2
|
+
import { getEnvironmentVariable } from "../util/env.js";
|
|
3
|
+
/**
|
|
4
|
+
* Wrapper around Fireworks API for large language models fine-tuned for chat
|
|
5
|
+
*
|
|
6
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
7
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
8
|
+
*
|
|
9
|
+
* To use, you should have the `openai` package installed and
|
|
10
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
11
|
+
*/
|
|
12
|
+
export class ChatFireworks extends ChatOpenAI {
|
|
13
|
+
static lc_name() {
|
|
14
|
+
return "ChatFireworks";
|
|
15
|
+
}
|
|
16
|
+
_llmType() {
|
|
17
|
+
return "fireworks";
|
|
18
|
+
}
|
|
19
|
+
get lc_secrets() {
|
|
20
|
+
return {
|
|
21
|
+
fireworksApiKey: "FIREWORKS_API_KEY",
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
constructor(fields) {
|
|
25
|
+
const fireworksApiKey = fields?.fireworksApiKey || getEnvironmentVariable("FIREWORKS_API_KEY");
|
|
26
|
+
if (!fireworksApiKey) {
|
|
27
|
+
throw new Error(`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`);
|
|
28
|
+
}
|
|
29
|
+
super({
|
|
30
|
+
...fields,
|
|
31
|
+
modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b-chat",
|
|
32
|
+
openAIApiKey: fireworksApiKey,
|
|
33
|
+
configuration: {
|
|
34
|
+
baseURL: "https://api.fireworks.ai/inference/v1",
|
|
35
|
+
},
|
|
36
|
+
});
|
|
37
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
38
|
+
enumerable: true,
|
|
39
|
+
configurable: true,
|
|
40
|
+
writable: true,
|
|
41
|
+
value: true
|
|
42
|
+
});
|
|
43
|
+
Object.defineProperty(this, "fireworksApiKey", {
|
|
44
|
+
enumerable: true,
|
|
45
|
+
configurable: true,
|
|
46
|
+
writable: true,
|
|
47
|
+
value: void 0
|
|
48
|
+
});
|
|
49
|
+
this.fireworksApiKey = fireworksApiKey;
|
|
50
|
+
}
|
|
51
|
+
toJSON() {
|
|
52
|
+
const result = super.toJSON();
|
|
53
|
+
if ("kwargs" in result &&
|
|
54
|
+
typeof result.kwargs === "object" &&
|
|
55
|
+
result.kwargs != null) {
|
|
56
|
+
delete result.kwargs.openai_api_key;
|
|
57
|
+
delete result.kwargs.configuration;
|
|
58
|
+
}
|
|
59
|
+
return result;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Calls the Fireworks API with retry logic in case of failures.
|
|
63
|
+
* @param request The request to send to the Fireworks API.
|
|
64
|
+
* @param options Optional configuration for the API call.
|
|
65
|
+
* @returns The response from the Fireworks API.
|
|
66
|
+
*/
|
|
67
|
+
async completionWithRetry(request, options) {
|
|
68
|
+
delete request.frequency_penalty;
|
|
69
|
+
delete request.presence_penalty;
|
|
70
|
+
delete request.logit_bias;
|
|
71
|
+
delete request.functions;
|
|
72
|
+
if (request.stream === true) {
|
|
73
|
+
return super.completionWithRetry(request, options);
|
|
74
|
+
}
|
|
75
|
+
return super.completionWithRetry(request, options);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
@@ -303,11 +303,28 @@ class ChatOllama extends base_js_1.SimpleChatModel {
|
|
|
303
303
|
prompt: this._formatMessagesAsPrompt(input),
|
|
304
304
|
}, options));
|
|
305
305
|
for await (const chunk of stream) {
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
306
|
+
if (!chunk.done) {
|
|
307
|
+
yield new index_js_1.ChatGenerationChunk({
|
|
308
|
+
text: chunk.response,
|
|
309
|
+
message: new index_js_1.AIMessageChunk({ content: chunk.response }),
|
|
310
|
+
});
|
|
311
|
+
await runManager?.handleLLMNewToken(chunk.response ?? "");
|
|
312
|
+
}
|
|
313
|
+
else {
|
|
314
|
+
yield new index_js_1.ChatGenerationChunk({
|
|
315
|
+
text: "",
|
|
316
|
+
message: new index_js_1.AIMessageChunk({ content: "" }),
|
|
317
|
+
generationInfo: {
|
|
318
|
+
model: chunk.model,
|
|
319
|
+
total_duration: chunk.total_duration,
|
|
320
|
+
load_duration: chunk.load_duration,
|
|
321
|
+
prompt_eval_count: chunk.prompt_eval_count,
|
|
322
|
+
prompt_eval_duration: chunk.prompt_eval_duration,
|
|
323
|
+
eval_count: chunk.eval_count,
|
|
324
|
+
eval_duration: chunk.eval_duration,
|
|
325
|
+
},
|
|
326
|
+
});
|
|
327
|
+
}
|
|
311
328
|
}
|
|
312
329
|
}
|
|
313
330
|
_formatMessagesAsPrompt(messages) {
|
|
@@ -14,8 +14,7 @@ export interface OllamaCallOptions extends BaseLanguageModelCallOptions {
|
|
|
14
14
|
* models in a chat-like fashion. It extends the SimpleChatModel class and
|
|
15
15
|
* implements the OllamaInput interface.
|
|
16
16
|
*/
|
|
17
|
-
export declare class ChatOllama extends SimpleChatModel implements OllamaInput {
|
|
18
|
-
CallOptions: OllamaCallOptions;
|
|
17
|
+
export declare class ChatOllama extends SimpleChatModel<OllamaCallOptions> implements OllamaInput {
|
|
19
18
|
static lc_name(): string;
|
|
20
19
|
lc_serializable: boolean;
|
|
21
20
|
model: string;
|
|
@@ -300,11 +300,28 @@ export class ChatOllama extends SimpleChatModel {
|
|
|
300
300
|
prompt: this._formatMessagesAsPrompt(input),
|
|
301
301
|
}, options));
|
|
302
302
|
for await (const chunk of stream) {
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
303
|
+
if (!chunk.done) {
|
|
304
|
+
yield new ChatGenerationChunk({
|
|
305
|
+
text: chunk.response,
|
|
306
|
+
message: new AIMessageChunk({ content: chunk.response }),
|
|
307
|
+
});
|
|
308
|
+
await runManager?.handleLLMNewToken(chunk.response ?? "");
|
|
309
|
+
}
|
|
310
|
+
else {
|
|
311
|
+
yield new ChatGenerationChunk({
|
|
312
|
+
text: "",
|
|
313
|
+
message: new AIMessageChunk({ content: "" }),
|
|
314
|
+
generationInfo: {
|
|
315
|
+
model: chunk.model,
|
|
316
|
+
total_duration: chunk.total_duration,
|
|
317
|
+
load_duration: chunk.load_duration,
|
|
318
|
+
prompt_eval_count: chunk.prompt_eval_count,
|
|
319
|
+
prompt_eval_duration: chunk.prompt_eval_duration,
|
|
320
|
+
eval_count: chunk.eval_count,
|
|
321
|
+
eval_duration: chunk.eval_duration,
|
|
322
|
+
},
|
|
323
|
+
});
|
|
324
|
+
}
|
|
308
325
|
}
|
|
309
326
|
}
|
|
310
327
|
_formatMessagesAsPrompt(messages) {
|
|
@@ -37,9 +37,9 @@ export interface ChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCa
|
|
|
37
37
|
* `openai.createChatCompletion`} can be passed through {@link modelKwargs}, even
|
|
38
38
|
* if not explicitly available on this class.
|
|
39
39
|
*/
|
|
40
|
-
export declare class ChatOpenAI extends BaseChatModel<
|
|
40
|
+
export declare class ChatOpenAI<CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions> extends BaseChatModel<CallOptions> implements OpenAIChatInput, AzureOpenAIInput {
|
|
41
41
|
static lc_name(): string;
|
|
42
|
-
get callKeys():
|
|
42
|
+
get callKeys(): string[];
|
|
43
43
|
lc_serializable: boolean;
|
|
44
44
|
get lc_secrets(): {
|
|
45
45
|
[key: string]: string;
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Fireworks = void 0;
|
|
4
|
+
const env_js_1 = require("../util/env.cjs");
|
|
5
|
+
const openai_js_1 = require("./openai.cjs");
|
|
6
|
+
/**
|
|
7
|
+
* Wrapper around Fireworks API for large language models
|
|
8
|
+
*
|
|
9
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
10
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
11
|
+
*
|
|
12
|
+
* To use, you should have the `openai` package installed and
|
|
13
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
14
|
+
*/
|
|
15
|
+
class Fireworks extends openai_js_1.OpenAI {
|
|
16
|
+
static lc_name() {
|
|
17
|
+
return "Fireworks";
|
|
18
|
+
}
|
|
19
|
+
_llmType() {
|
|
20
|
+
return "fireworks";
|
|
21
|
+
}
|
|
22
|
+
get lc_secrets() {
|
|
23
|
+
return {
|
|
24
|
+
fireworksApiKey: "FIREWORKS_API_KEY",
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
constructor(fields) {
|
|
28
|
+
const fireworksApiKey = fields?.fireworksApiKey || (0, env_js_1.getEnvironmentVariable)("FIREWORKS_API_KEY");
|
|
29
|
+
if (!fireworksApiKey) {
|
|
30
|
+
throw new Error(`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`);
|
|
31
|
+
}
|
|
32
|
+
super({
|
|
33
|
+
...fields,
|
|
34
|
+
openAIApiKey: fireworksApiKey,
|
|
35
|
+
modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b",
|
|
36
|
+
configuration: {
|
|
37
|
+
baseURL: "https://api.fireworks.ai/inference/v1",
|
|
38
|
+
},
|
|
39
|
+
});
|
|
40
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
41
|
+
enumerable: true,
|
|
42
|
+
configurable: true,
|
|
43
|
+
writable: true,
|
|
44
|
+
value: true
|
|
45
|
+
});
|
|
46
|
+
Object.defineProperty(this, "fireworksApiKey", {
|
|
47
|
+
enumerable: true,
|
|
48
|
+
configurable: true,
|
|
49
|
+
writable: true,
|
|
50
|
+
value: void 0
|
|
51
|
+
});
|
|
52
|
+
this.fireworksApiKey = fireworksApiKey;
|
|
53
|
+
}
|
|
54
|
+
toJSON() {
|
|
55
|
+
const result = super.toJSON();
|
|
56
|
+
if ("kwargs" in result &&
|
|
57
|
+
typeof result.kwargs === "object" &&
|
|
58
|
+
result.kwargs != null) {
|
|
59
|
+
delete result.kwargs.openai_api_key;
|
|
60
|
+
delete result.kwargs.configuration;
|
|
61
|
+
}
|
|
62
|
+
return result;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Calls the Fireworks API with retry logic in case of failures.
|
|
66
|
+
* @param request The request to send to the Fireworks API.
|
|
67
|
+
* @param options Optional configuration for the API call.
|
|
68
|
+
* @returns The response from the Fireworks API.
|
|
69
|
+
*/
|
|
70
|
+
async completionWithRetry(request, options) {
|
|
71
|
+
// https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility
|
|
72
|
+
if (Array.isArray(request.prompt)) {
|
|
73
|
+
if (request.prompt.length > 1) {
|
|
74
|
+
throw new Error("Multiple prompts are not supported by Fireworks");
|
|
75
|
+
}
|
|
76
|
+
const prompt = request.prompt[0];
|
|
77
|
+
if (typeof prompt !== "string") {
|
|
78
|
+
throw new Error("Only string prompts are supported by Fireworks");
|
|
79
|
+
}
|
|
80
|
+
request.prompt = prompt;
|
|
81
|
+
}
|
|
82
|
+
delete request.frequency_penalty;
|
|
83
|
+
delete request.presence_penalty;
|
|
84
|
+
delete request.best_of;
|
|
85
|
+
delete request.logit_bias;
|
|
86
|
+
if (request.stream === true) {
|
|
87
|
+
return super.completionWithRetry(request, options);
|
|
88
|
+
}
|
|
89
|
+
return super.completionWithRetry(request, options);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
exports.Fireworks = Fireworks;
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
|
+
import type { BaseLLMParams } from "./base.js";
|
|
3
|
+
import type { OpenAICallOptions, OpenAIInput } from "./openai.js";
|
|
4
|
+
import type { OpenAICoreRequestOptions } from "../types/openai-types.js";
|
|
5
|
+
import { OpenAI } from "./openai.js";
|
|
6
|
+
type FireworksUnsupportedArgs = "frequencyPenalty" | "presencePenalty" | "bestOf" | "logitBias";
|
|
7
|
+
type FireworksUnsupportedCallOptions = "functions" | "function_call" | "tools";
|
|
8
|
+
export type FireworksCallOptions = Partial<Omit<OpenAICallOptions, FireworksUnsupportedCallOptions>>;
|
|
9
|
+
/**
|
|
10
|
+
* Wrapper around Fireworks API for large language models
|
|
11
|
+
*
|
|
12
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
13
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
14
|
+
*
|
|
15
|
+
* To use, you should have the `openai` package installed and
|
|
16
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
17
|
+
*/
|
|
18
|
+
export declare class Fireworks extends OpenAI<FireworksCallOptions> {
|
|
19
|
+
static lc_name(): string;
|
|
20
|
+
_llmType(): string;
|
|
21
|
+
get lc_secrets(): {
|
|
22
|
+
[key: string]: string;
|
|
23
|
+
} | undefined;
|
|
24
|
+
lc_serializable: boolean;
|
|
25
|
+
fireworksApiKey?: string;
|
|
26
|
+
constructor(fields?: Partial<Omit<OpenAIInput, "openAIApiKey" | FireworksUnsupportedArgs>> & BaseLLMParams & {
|
|
27
|
+
fireworksApiKey?: string;
|
|
28
|
+
});
|
|
29
|
+
toJSON(): import("../load/serializable.js").Serialized;
|
|
30
|
+
completionWithRetry(request: OpenAIClient.CompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Completion>>;
|
|
31
|
+
completionWithRetry(request: OpenAIClient.CompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Completions.Completion>;
|
|
32
|
+
}
|
|
33
|
+
export {};
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { getEnvironmentVariable } from "../util/env.js";
|
|
2
|
+
import { OpenAI } from "./openai.js";
|
|
3
|
+
/**
|
|
4
|
+
* Wrapper around Fireworks API for large language models
|
|
5
|
+
*
|
|
6
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
7
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
8
|
+
*
|
|
9
|
+
* To use, you should have the `openai` package installed and
|
|
10
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
11
|
+
*/
|
|
12
|
+
export class Fireworks extends OpenAI {
|
|
13
|
+
static lc_name() {
|
|
14
|
+
return "Fireworks";
|
|
15
|
+
}
|
|
16
|
+
_llmType() {
|
|
17
|
+
return "fireworks";
|
|
18
|
+
}
|
|
19
|
+
get lc_secrets() {
|
|
20
|
+
return {
|
|
21
|
+
fireworksApiKey: "FIREWORKS_API_KEY",
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
constructor(fields) {
|
|
25
|
+
const fireworksApiKey = fields?.fireworksApiKey || getEnvironmentVariable("FIREWORKS_API_KEY");
|
|
26
|
+
if (!fireworksApiKey) {
|
|
27
|
+
throw new Error(`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`);
|
|
28
|
+
}
|
|
29
|
+
super({
|
|
30
|
+
...fields,
|
|
31
|
+
openAIApiKey: fireworksApiKey,
|
|
32
|
+
modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b",
|
|
33
|
+
configuration: {
|
|
34
|
+
baseURL: "https://api.fireworks.ai/inference/v1",
|
|
35
|
+
},
|
|
36
|
+
});
|
|
37
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
38
|
+
enumerable: true,
|
|
39
|
+
configurable: true,
|
|
40
|
+
writable: true,
|
|
41
|
+
value: true
|
|
42
|
+
});
|
|
43
|
+
Object.defineProperty(this, "fireworksApiKey", {
|
|
44
|
+
enumerable: true,
|
|
45
|
+
configurable: true,
|
|
46
|
+
writable: true,
|
|
47
|
+
value: void 0
|
|
48
|
+
});
|
|
49
|
+
this.fireworksApiKey = fireworksApiKey;
|
|
50
|
+
}
|
|
51
|
+
toJSON() {
|
|
52
|
+
const result = super.toJSON();
|
|
53
|
+
if ("kwargs" in result &&
|
|
54
|
+
typeof result.kwargs === "object" &&
|
|
55
|
+
result.kwargs != null) {
|
|
56
|
+
delete result.kwargs.openai_api_key;
|
|
57
|
+
delete result.kwargs.configuration;
|
|
58
|
+
}
|
|
59
|
+
return result;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Calls the Fireworks API with retry logic in case of failures.
|
|
63
|
+
* @param request The request to send to the Fireworks API.
|
|
64
|
+
* @param options Optional configuration for the API call.
|
|
65
|
+
* @returns The response from the Fireworks API.
|
|
66
|
+
*/
|
|
67
|
+
async completionWithRetry(request, options) {
|
|
68
|
+
// https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility
|
|
69
|
+
if (Array.isArray(request.prompt)) {
|
|
70
|
+
if (request.prompt.length > 1) {
|
|
71
|
+
throw new Error("Multiple prompts are not supported by Fireworks");
|
|
72
|
+
}
|
|
73
|
+
const prompt = request.prompt[0];
|
|
74
|
+
if (typeof prompt !== "string") {
|
|
75
|
+
throw new Error("Only string prompts are supported by Fireworks");
|
|
76
|
+
}
|
|
77
|
+
request.prompt = prompt;
|
|
78
|
+
}
|
|
79
|
+
delete request.frequency_penalty;
|
|
80
|
+
delete request.presence_penalty;
|
|
81
|
+
delete request.best_of;
|
|
82
|
+
delete request.logit_bias;
|
|
83
|
+
if (request.stream === true) {
|
|
84
|
+
return super.completionWithRetry(request, options);
|
|
85
|
+
}
|
|
86
|
+
return super.completionWithRetry(request, options);
|
|
87
|
+
}
|
|
88
|
+
}
|
package/dist/llms/ollama.cjs
CHANGED
|
@@ -290,14 +290,30 @@ class Ollama extends base_js_1.LLM {
|
|
|
290
290
|
async *_streamResponseChunks(prompt, options, runManager) {
|
|
291
291
|
const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
|
|
292
292
|
for await (const chunk of stream) {
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
293
|
+
if (!chunk.done) {
|
|
294
|
+
yield new index_js_1.GenerationChunk({
|
|
295
|
+
text: chunk.response,
|
|
296
|
+
generationInfo: {
|
|
297
|
+
...chunk,
|
|
298
|
+
response: undefined,
|
|
299
|
+
},
|
|
300
|
+
});
|
|
301
|
+
await runManager?.handleLLMNewToken(chunk.response ?? "");
|
|
302
|
+
}
|
|
303
|
+
else {
|
|
304
|
+
yield new index_js_1.GenerationChunk({
|
|
305
|
+
text: "",
|
|
306
|
+
generationInfo: {
|
|
307
|
+
model: chunk.model,
|
|
308
|
+
total_duration: chunk.total_duration,
|
|
309
|
+
load_duration: chunk.load_duration,
|
|
310
|
+
prompt_eval_count: chunk.prompt_eval_count,
|
|
311
|
+
prompt_eval_duration: chunk.prompt_eval_duration,
|
|
312
|
+
eval_count: chunk.eval_count,
|
|
313
|
+
eval_duration: chunk.eval_duration,
|
|
314
|
+
},
|
|
315
|
+
});
|
|
316
|
+
}
|
|
301
317
|
}
|
|
302
318
|
}
|
|
303
319
|
/** @ignore */
|
package/dist/llms/ollama.d.ts
CHANGED
|
@@ -6,8 +6,7 @@ import { GenerationChunk } from "../schema/index.js";
|
|
|
6
6
|
* Class that represents the Ollama language model. It extends the base
|
|
7
7
|
* LLM class and implements the OllamaInput interface.
|
|
8
8
|
*/
|
|
9
|
-
export declare class Ollama extends LLM implements OllamaInput {
|
|
10
|
-
CallOptions: OllamaCallOptions;
|
|
9
|
+
export declare class Ollama extends LLM<OllamaCallOptions> implements OllamaInput {
|
|
11
10
|
static lc_name(): string;
|
|
12
11
|
lc_serializable: boolean;
|
|
13
12
|
model: string;
|