langchain 0.0.151 → 0.0.153
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cache/cloudflare_kv.cjs +1 -0
- package/cache/cloudflare_kv.d.ts +1 -0
- package/cache/cloudflare_kv.js +1 -0
- package/chat_models/fireworks.cjs +1 -0
- package/chat_models/fireworks.d.ts +1 -0
- package/chat_models/fireworks.js +1 -0
- package/dist/agents/chat/index.cjs +1 -1
- package/dist/agents/chat/index.js +1 -1
- package/dist/agents/chat_convo/index.cjs +1 -1
- package/dist/agents/chat_convo/index.js +1 -1
- package/dist/agents/executor.cjs +9 -2
- package/dist/agents/executor.js +9 -2
- package/dist/agents/openai/index.cjs +1 -1
- package/dist/agents/openai/index.js +1 -1
- package/dist/agents/structured_chat/index.cjs +1 -1
- package/dist/agents/structured_chat/index.js +1 -1
- package/dist/agents/xml/index.cjs +1 -1
- package/dist/agents/xml/index.js +1 -1
- package/dist/base_language/count_tokens.cjs +2 -1
- package/dist/base_language/count_tokens.js +2 -1
- package/dist/base_language/index.cjs +5 -3
- package/dist/base_language/index.d.ts +1 -1
- package/dist/base_language/index.js +4 -3
- package/dist/cache/cloudflare_kv.cjs +61 -0
- package/dist/cache/cloudflare_kv.d.ts +29 -0
- package/dist/cache/cloudflare_kv.js +57 -0
- package/dist/chains/openai_functions/openapi.cjs +1 -1
- package/dist/chains/openai_functions/openapi.js +1 -1
- package/dist/chains/openai_functions/structured_output.d.ts +2 -2
- package/dist/chains/question_answering/map_reduce_prompts.cjs +2 -3
- package/dist/chains/question_answering/map_reduce_prompts.js +2 -3
- package/dist/chains/question_answering/refine_prompts.cjs +2 -2
- package/dist/chains/question_answering/refine_prompts.js +2 -2
- package/dist/chains/question_answering/stuff_prompts.cjs +1 -2
- package/dist/chains/question_answering/stuff_prompts.js +1 -2
- package/dist/chat_models/base.d.ts +1 -1
- package/dist/chat_models/fireworks.cjs +81 -0
- package/dist/chat_models/fireworks.d.ts +33 -0
- package/dist/chat_models/fireworks.js +77 -0
- package/dist/chat_models/ollama.cjs +25 -12
- package/dist/chat_models/ollama.d.ts +2 -3
- package/dist/chat_models/ollama.js +25 -12
- package/dist/chat_models/openai.d.ts +2 -2
- package/dist/document_loaders/web/pdf.cjs +87 -0
- package/dist/document_loaders/web/pdf.d.ts +17 -0
- package/dist/document_loaders/web/pdf.js +83 -0
- package/dist/evaluation/agents/prompt.cjs +2 -3
- package/dist/evaluation/agents/prompt.js +2 -3
- package/dist/experimental/plan_and_execute/prompt.cjs +1 -1
- package/dist/experimental/plan_and_execute/prompt.js +1 -1
- package/dist/llms/fireworks.cjs +92 -0
- package/dist/llms/fireworks.d.ts +33 -0
- package/dist/llms/fireworks.js +88 -0
- package/dist/llms/llama_cpp.cjs +10 -4
- package/dist/llms/llama_cpp.d.ts +2 -1
- package/dist/llms/llama_cpp.js +10 -4
- package/dist/llms/ollama.cjs +29 -14
- package/dist/llms/ollama.d.ts +3 -4
- package/dist/llms/ollama.js +29 -14
- package/dist/llms/openai-chat.cjs +1 -5
- package/dist/llms/openai-chat.d.ts +1 -1
- package/dist/llms/openai-chat.js +1 -5
- package/dist/llms/openai.cjs +3 -4
- package/dist/llms/openai.d.ts +2 -2
- package/dist/llms/openai.js +3 -4
- package/dist/load/import_constants.cjs +3 -0
- package/dist/load/import_constants.js +3 -0
- package/dist/load/import_map.cjs +4 -2
- package/dist/load/import_map.d.ts +2 -0
- package/dist/load/import_map.js +2 -0
- package/dist/prompts/chat.cjs +12 -1
- package/dist/prompts/chat.d.ts +8 -0
- package/dist/prompts/chat.js +12 -1
- package/dist/schema/output_parser.cjs +38 -6
- package/dist/schema/output_parser.d.ts +20 -5
- package/dist/schema/output_parser.js +38 -6
- package/dist/schema/runnable/base.cjs +65 -10
- package/dist/schema/runnable/base.d.ts +17 -3
- package/dist/schema/runnable/base.js +65 -10
- package/dist/stores/message/cloudflare_d1.cjs +134 -0
- package/dist/stores/message/cloudflare_d1.d.ts +49 -0
- package/dist/stores/message/cloudflare_d1.js +130 -0
- package/dist/types/openai-types.d.ts +2 -0
- package/dist/util/ollama.cjs +2 -2
- package/dist/util/ollama.d.ts +6 -0
- package/dist/util/ollama.js +2 -2
- package/document_loaders/web/pdf.cjs +1 -0
- package/document_loaders/web/pdf.d.ts +1 -0
- package/document_loaders/web/pdf.js +1 -0
- package/llms/fireworks.cjs +1 -0
- package/llms/fireworks.d.ts +1 -0
- package/llms/fireworks.js +1 -0
- package/package.json +46 -1
- package/stores/message/cloudflare_d1.cjs +1 -0
- package/stores/message/cloudflare_d1.d.ts +1 -0
- package/stores/message/cloudflare_d1.js +1 -0
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
|
+
import type { ChatOpenAICallOptions, OpenAIChatInput } from "./openai.js";
|
|
3
|
+
import type { OpenAICoreRequestOptions } from "../types/openai-types.js";
|
|
4
|
+
import type { BaseChatModelParams } from "./base.js";
|
|
5
|
+
import { ChatOpenAI } from "./openai.js";
|
|
6
|
+
type FireworksUnsupportedArgs = "frequencyPenalty" | "presencePenalty" | "logitBias" | "functions";
|
|
7
|
+
type FireworksUnsupportedCallOptions = "functions" | "function_call" | "tools";
|
|
8
|
+
export type ChatFireworksCallOptions = Partial<Omit<ChatOpenAICallOptions, FireworksUnsupportedCallOptions>>;
|
|
9
|
+
/**
|
|
10
|
+
* Wrapper around Fireworks API for large language models fine-tuned for chat
|
|
11
|
+
*
|
|
12
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
13
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
14
|
+
*
|
|
15
|
+
* To use, you should have the `openai` package installed and
|
|
16
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
17
|
+
*/
|
|
18
|
+
export declare class ChatFireworks extends ChatOpenAI<ChatFireworksCallOptions> {
|
|
19
|
+
static lc_name(): string;
|
|
20
|
+
_llmType(): string;
|
|
21
|
+
get lc_secrets(): {
|
|
22
|
+
[key: string]: string;
|
|
23
|
+
} | undefined;
|
|
24
|
+
lc_serializable: boolean;
|
|
25
|
+
fireworksApiKey?: string;
|
|
26
|
+
constructor(fields?: Partial<Omit<OpenAIChatInput, "openAIApiKey" | FireworksUnsupportedArgs>> & BaseChatModelParams & {
|
|
27
|
+
fireworksApiKey?: string;
|
|
28
|
+
});
|
|
29
|
+
toJSON(): import("../load/serializable.js").Serialized;
|
|
30
|
+
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>;
|
|
31
|
+
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Chat.Completions.ChatCompletion>;
|
|
32
|
+
}
|
|
33
|
+
export {};
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import { ChatOpenAI } from "./openai.js";
|
|
2
|
+
import { getEnvironmentVariable } from "../util/env.js";
|
|
3
|
+
/**
|
|
4
|
+
* Wrapper around Fireworks API for large language models fine-tuned for chat
|
|
5
|
+
*
|
|
6
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
7
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
8
|
+
*
|
|
9
|
+
* To use, you should have the `openai` package installed and
|
|
10
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
11
|
+
*/
|
|
12
|
+
export class ChatFireworks extends ChatOpenAI {
|
|
13
|
+
static lc_name() {
|
|
14
|
+
return "ChatFireworks";
|
|
15
|
+
}
|
|
16
|
+
_llmType() {
|
|
17
|
+
return "fireworks";
|
|
18
|
+
}
|
|
19
|
+
get lc_secrets() {
|
|
20
|
+
return {
|
|
21
|
+
fireworksApiKey: "FIREWORKS_API_KEY",
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
constructor(fields) {
|
|
25
|
+
const fireworksApiKey = fields?.fireworksApiKey || getEnvironmentVariable("FIREWORKS_API_KEY");
|
|
26
|
+
if (!fireworksApiKey) {
|
|
27
|
+
throw new Error(`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`);
|
|
28
|
+
}
|
|
29
|
+
super({
|
|
30
|
+
...fields,
|
|
31
|
+
modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b-chat",
|
|
32
|
+
openAIApiKey: fireworksApiKey,
|
|
33
|
+
configuration: {
|
|
34
|
+
baseURL: "https://api.fireworks.ai/inference/v1",
|
|
35
|
+
},
|
|
36
|
+
});
|
|
37
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
38
|
+
enumerable: true,
|
|
39
|
+
configurable: true,
|
|
40
|
+
writable: true,
|
|
41
|
+
value: true
|
|
42
|
+
});
|
|
43
|
+
Object.defineProperty(this, "fireworksApiKey", {
|
|
44
|
+
enumerable: true,
|
|
45
|
+
configurable: true,
|
|
46
|
+
writable: true,
|
|
47
|
+
value: void 0
|
|
48
|
+
});
|
|
49
|
+
this.fireworksApiKey = fireworksApiKey;
|
|
50
|
+
}
|
|
51
|
+
toJSON() {
|
|
52
|
+
const result = super.toJSON();
|
|
53
|
+
if ("kwargs" in result &&
|
|
54
|
+
typeof result.kwargs === "object" &&
|
|
55
|
+
result.kwargs != null) {
|
|
56
|
+
delete result.kwargs.openai_api_key;
|
|
57
|
+
delete result.kwargs.configuration;
|
|
58
|
+
}
|
|
59
|
+
return result;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Calls the Fireworks API with retry logic in case of failures.
|
|
63
|
+
* @param request The request to send to the Fireworks API.
|
|
64
|
+
* @param options Optional configuration for the API call.
|
|
65
|
+
* @returns The response from the Fireworks API.
|
|
66
|
+
*/
|
|
67
|
+
async completionWithRetry(request, options) {
|
|
68
|
+
delete request.frequency_penalty;
|
|
69
|
+
delete request.presence_penalty;
|
|
70
|
+
delete request.logit_bias;
|
|
71
|
+
delete request.functions;
|
|
72
|
+
if (request.stream === true) {
|
|
73
|
+
return super.completionWithRetry(request, options);
|
|
74
|
+
}
|
|
75
|
+
return super.completionWithRetry(request, options);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
@@ -303,11 +303,28 @@ class ChatOllama extends base_js_1.SimpleChatModel {
|
|
|
303
303
|
prompt: this._formatMessagesAsPrompt(input),
|
|
304
304
|
}, options));
|
|
305
305
|
for await (const chunk of stream) {
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
306
|
+
if (!chunk.done) {
|
|
307
|
+
yield new index_js_1.ChatGenerationChunk({
|
|
308
|
+
text: chunk.response,
|
|
309
|
+
message: new index_js_1.AIMessageChunk({ content: chunk.response }),
|
|
310
|
+
});
|
|
311
|
+
await runManager?.handleLLMNewToken(chunk.response ?? "");
|
|
312
|
+
}
|
|
313
|
+
else {
|
|
314
|
+
yield new index_js_1.ChatGenerationChunk({
|
|
315
|
+
text: "",
|
|
316
|
+
message: new index_js_1.AIMessageChunk({ content: "" }),
|
|
317
|
+
generationInfo: {
|
|
318
|
+
model: chunk.model,
|
|
319
|
+
total_duration: chunk.total_duration,
|
|
320
|
+
load_duration: chunk.load_duration,
|
|
321
|
+
prompt_eval_count: chunk.prompt_eval_count,
|
|
322
|
+
prompt_eval_duration: chunk.prompt_eval_duration,
|
|
323
|
+
eval_count: chunk.eval_count,
|
|
324
|
+
eval_duration: chunk.eval_duration,
|
|
325
|
+
},
|
|
326
|
+
});
|
|
327
|
+
}
|
|
311
328
|
}
|
|
312
329
|
}
|
|
313
330
|
_formatMessagesAsPrompt(messages) {
|
|
@@ -336,14 +353,10 @@ class ChatOllama extends base_js_1.SimpleChatModel {
|
|
|
336
353
|
return formattedMessages;
|
|
337
354
|
}
|
|
338
355
|
/** @ignore */
|
|
339
|
-
async _call(messages, options) {
|
|
340
|
-
const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, {
|
|
341
|
-
...this.invocationParams(options),
|
|
342
|
-
prompt: this._formatMessagesAsPrompt(messages),
|
|
343
|
-
}, options));
|
|
356
|
+
async _call(messages, options, runManager) {
|
|
344
357
|
const chunks = [];
|
|
345
|
-
for await (const chunk of
|
|
346
|
-
chunks.push(chunk.
|
|
358
|
+
for await (const chunk of this._streamResponseChunks(messages, options, runManager)) {
|
|
359
|
+
chunks.push(chunk.message.content);
|
|
347
360
|
}
|
|
348
361
|
return chunks.join("");
|
|
349
362
|
}
|
|
@@ -14,8 +14,7 @@ export interface OllamaCallOptions extends BaseLanguageModelCallOptions {
|
|
|
14
14
|
* models in a chat-like fashion. It extends the SimpleChatModel class and
|
|
15
15
|
* implements the OllamaInput interface.
|
|
16
16
|
*/
|
|
17
|
-
export declare class ChatOllama extends SimpleChatModel implements OllamaInput {
|
|
18
|
-
CallOptions: OllamaCallOptions;
|
|
17
|
+
export declare class ChatOllama extends SimpleChatModel<OllamaCallOptions> implements OllamaInput {
|
|
19
18
|
static lc_name(): string;
|
|
20
19
|
lc_serializable: boolean;
|
|
21
20
|
model: string;
|
|
@@ -97,5 +96,5 @@ export declare class ChatOllama extends SimpleChatModel implements OllamaInput {
|
|
|
97
96
|
_streamResponseChunks(input: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
98
97
|
protected _formatMessagesAsPrompt(messages: BaseMessage[]): string;
|
|
99
98
|
/** @ignore */
|
|
100
|
-
_call(messages: BaseMessage[], options: this["ParsedCallOptions"]): Promise<string>;
|
|
99
|
+
_call(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
|
|
101
100
|
}
|
|
@@ -300,11 +300,28 @@ export class ChatOllama extends SimpleChatModel {
|
|
|
300
300
|
prompt: this._formatMessagesAsPrompt(input),
|
|
301
301
|
}, options));
|
|
302
302
|
for await (const chunk of stream) {
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
303
|
+
if (!chunk.done) {
|
|
304
|
+
yield new ChatGenerationChunk({
|
|
305
|
+
text: chunk.response,
|
|
306
|
+
message: new AIMessageChunk({ content: chunk.response }),
|
|
307
|
+
});
|
|
308
|
+
await runManager?.handleLLMNewToken(chunk.response ?? "");
|
|
309
|
+
}
|
|
310
|
+
else {
|
|
311
|
+
yield new ChatGenerationChunk({
|
|
312
|
+
text: "",
|
|
313
|
+
message: new AIMessageChunk({ content: "" }),
|
|
314
|
+
generationInfo: {
|
|
315
|
+
model: chunk.model,
|
|
316
|
+
total_duration: chunk.total_duration,
|
|
317
|
+
load_duration: chunk.load_duration,
|
|
318
|
+
prompt_eval_count: chunk.prompt_eval_count,
|
|
319
|
+
prompt_eval_duration: chunk.prompt_eval_duration,
|
|
320
|
+
eval_count: chunk.eval_count,
|
|
321
|
+
eval_duration: chunk.eval_duration,
|
|
322
|
+
},
|
|
323
|
+
});
|
|
324
|
+
}
|
|
308
325
|
}
|
|
309
326
|
}
|
|
310
327
|
_formatMessagesAsPrompt(messages) {
|
|
@@ -333,14 +350,10 @@ export class ChatOllama extends SimpleChatModel {
|
|
|
333
350
|
return formattedMessages;
|
|
334
351
|
}
|
|
335
352
|
/** @ignore */
|
|
336
|
-
async _call(messages, options) {
|
|
337
|
-
const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, {
|
|
338
|
-
...this.invocationParams(options),
|
|
339
|
-
prompt: this._formatMessagesAsPrompt(messages),
|
|
340
|
-
}, options));
|
|
353
|
+
async _call(messages, options, runManager) {
|
|
341
354
|
const chunks = [];
|
|
342
|
-
for await (const chunk of
|
|
343
|
-
chunks.push(chunk.
|
|
355
|
+
for await (const chunk of this._streamResponseChunks(messages, options, runManager)) {
|
|
356
|
+
chunks.push(chunk.message.content);
|
|
344
357
|
}
|
|
345
358
|
return chunks.join("");
|
|
346
359
|
}
|
|
@@ -37,9 +37,9 @@ export interface ChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCa
|
|
|
37
37
|
* `openai.createChatCompletion`} can be passed through {@link modelKwargs}, even
|
|
38
38
|
* if not explicitly available on this class.
|
|
39
39
|
*/
|
|
40
|
-
export declare class ChatOpenAI extends BaseChatModel<
|
|
40
|
+
export declare class ChatOpenAI<CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions> extends BaseChatModel<CallOptions> implements OpenAIChatInput, AzureOpenAIInput {
|
|
41
41
|
static lc_name(): string;
|
|
42
|
-
get callKeys():
|
|
42
|
+
get callKeys(): string[];
|
|
43
43
|
lc_serializable: boolean;
|
|
44
44
|
get lc_secrets(): {
|
|
45
45
|
[key: string]: string;
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.WebPDFLoader = void 0;
|
|
4
|
+
const pdf_js_1 = require("pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js");
|
|
5
|
+
const document_js_1 = require("../../document.cjs");
|
|
6
|
+
const base_js_1 = require("../base.cjs");
|
|
7
|
+
/**
|
|
8
|
+
* A document loader for loading data from PDFs.
|
|
9
|
+
*/
|
|
10
|
+
class WebPDFLoader extends base_js_1.BaseDocumentLoader {
|
|
11
|
+
constructor(blob, { splitPages = true } = {}) {
|
|
12
|
+
super();
|
|
13
|
+
Object.defineProperty(this, "blob", {
|
|
14
|
+
enumerable: true,
|
|
15
|
+
configurable: true,
|
|
16
|
+
writable: true,
|
|
17
|
+
value: void 0
|
|
18
|
+
});
|
|
19
|
+
Object.defineProperty(this, "splitPages", {
|
|
20
|
+
enumerable: true,
|
|
21
|
+
configurable: true,
|
|
22
|
+
writable: true,
|
|
23
|
+
value: true
|
|
24
|
+
});
|
|
25
|
+
this.blob = blob;
|
|
26
|
+
this.splitPages = splitPages ?? this.splitPages;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Loads the contents of the PDF as documents.
|
|
30
|
+
* @returns An array of Documents representing the retrieved data.
|
|
31
|
+
*/
|
|
32
|
+
async load() {
|
|
33
|
+
const parsedPdf = await (0, pdf_js_1.getDocument)({
|
|
34
|
+
data: new Uint8Array(await this.blob.arrayBuffer()),
|
|
35
|
+
useWorkerFetch: false,
|
|
36
|
+
isEvalSupported: false,
|
|
37
|
+
useSystemFonts: true,
|
|
38
|
+
}).promise;
|
|
39
|
+
const meta = await parsedPdf.getMetadata().catch(() => null);
|
|
40
|
+
const documents = [];
|
|
41
|
+
for (let i = 1; i <= parsedPdf.numPages; i += 1) {
|
|
42
|
+
const page = await parsedPdf.getPage(i);
|
|
43
|
+
const content = await page.getTextContent();
|
|
44
|
+
if (content.items.length === 0) {
|
|
45
|
+
continue;
|
|
46
|
+
}
|
|
47
|
+
const text = content.items
|
|
48
|
+
.map((item) => item.str)
|
|
49
|
+
.join("\n");
|
|
50
|
+
documents.push(new document_js_1.Document({
|
|
51
|
+
pageContent: text,
|
|
52
|
+
metadata: {
|
|
53
|
+
pdf: {
|
|
54
|
+
version: pdf_js_1.version,
|
|
55
|
+
info: meta?.info,
|
|
56
|
+
metadata: meta?.metadata,
|
|
57
|
+
totalPages: parsedPdf.numPages,
|
|
58
|
+
},
|
|
59
|
+
loc: {
|
|
60
|
+
pageNumber: i,
|
|
61
|
+
},
|
|
62
|
+
},
|
|
63
|
+
}));
|
|
64
|
+
}
|
|
65
|
+
if (this.splitPages) {
|
|
66
|
+
return documents;
|
|
67
|
+
}
|
|
68
|
+
if (documents.length === 0) {
|
|
69
|
+
return [];
|
|
70
|
+
}
|
|
71
|
+
return [
|
|
72
|
+
new document_js_1.Document({
|
|
73
|
+
pageContent: documents.map((doc) => doc.pageContent).join("\n\n"),
|
|
74
|
+
metadata: {
|
|
75
|
+
pdf: {
|
|
76
|
+
version: pdf_js_1.version,
|
|
77
|
+
info: meta?.info,
|
|
78
|
+
metadata: meta?.metadata,
|
|
79
|
+
totalPages: parsedPdf.numPages,
|
|
80
|
+
},
|
|
81
|
+
},
|
|
82
|
+
}),
|
|
83
|
+
];
|
|
84
|
+
return documents;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
exports.WebPDFLoader = WebPDFLoader;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { Document } from "../../document.js";
|
|
2
|
+
import { BaseDocumentLoader } from "../base.js";
|
|
3
|
+
/**
|
|
4
|
+
* A document loader for loading data from PDFs.
|
|
5
|
+
*/
|
|
6
|
+
export declare class WebPDFLoader extends BaseDocumentLoader {
|
|
7
|
+
protected blob: Blob;
|
|
8
|
+
protected splitPages: boolean;
|
|
9
|
+
constructor(blob: Blob, { splitPages }?: {
|
|
10
|
+
splitPages?: boolean | undefined;
|
|
11
|
+
});
|
|
12
|
+
/**
|
|
13
|
+
* Loads the contents of the PDF as documents.
|
|
14
|
+
* @returns An array of Documents representing the retrieved data.
|
|
15
|
+
*/
|
|
16
|
+
load(): Promise<Document[]>;
|
|
17
|
+
}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import { getDocument, version, } from "pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js";
|
|
2
|
+
import { Document } from "../../document.js";
|
|
3
|
+
import { BaseDocumentLoader } from "../base.js";
|
|
4
|
+
/**
|
|
5
|
+
* A document loader for loading data from PDFs.
|
|
6
|
+
*/
|
|
7
|
+
export class WebPDFLoader extends BaseDocumentLoader {
|
|
8
|
+
constructor(blob, { splitPages = true } = {}) {
|
|
9
|
+
super();
|
|
10
|
+
Object.defineProperty(this, "blob", {
|
|
11
|
+
enumerable: true,
|
|
12
|
+
configurable: true,
|
|
13
|
+
writable: true,
|
|
14
|
+
value: void 0
|
|
15
|
+
});
|
|
16
|
+
Object.defineProperty(this, "splitPages", {
|
|
17
|
+
enumerable: true,
|
|
18
|
+
configurable: true,
|
|
19
|
+
writable: true,
|
|
20
|
+
value: true
|
|
21
|
+
});
|
|
22
|
+
this.blob = blob;
|
|
23
|
+
this.splitPages = splitPages ?? this.splitPages;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Loads the contents of the PDF as documents.
|
|
27
|
+
* @returns An array of Documents representing the retrieved data.
|
|
28
|
+
*/
|
|
29
|
+
async load() {
|
|
30
|
+
const parsedPdf = await getDocument({
|
|
31
|
+
data: new Uint8Array(await this.blob.arrayBuffer()),
|
|
32
|
+
useWorkerFetch: false,
|
|
33
|
+
isEvalSupported: false,
|
|
34
|
+
useSystemFonts: true,
|
|
35
|
+
}).promise;
|
|
36
|
+
const meta = await parsedPdf.getMetadata().catch(() => null);
|
|
37
|
+
const documents = [];
|
|
38
|
+
for (let i = 1; i <= parsedPdf.numPages; i += 1) {
|
|
39
|
+
const page = await parsedPdf.getPage(i);
|
|
40
|
+
const content = await page.getTextContent();
|
|
41
|
+
if (content.items.length === 0) {
|
|
42
|
+
continue;
|
|
43
|
+
}
|
|
44
|
+
const text = content.items
|
|
45
|
+
.map((item) => item.str)
|
|
46
|
+
.join("\n");
|
|
47
|
+
documents.push(new Document({
|
|
48
|
+
pageContent: text,
|
|
49
|
+
metadata: {
|
|
50
|
+
pdf: {
|
|
51
|
+
version,
|
|
52
|
+
info: meta?.info,
|
|
53
|
+
metadata: meta?.metadata,
|
|
54
|
+
totalPages: parsedPdf.numPages,
|
|
55
|
+
},
|
|
56
|
+
loc: {
|
|
57
|
+
pageNumber: i,
|
|
58
|
+
},
|
|
59
|
+
},
|
|
60
|
+
}));
|
|
61
|
+
}
|
|
62
|
+
if (this.splitPages) {
|
|
63
|
+
return documents;
|
|
64
|
+
}
|
|
65
|
+
if (documents.length === 0) {
|
|
66
|
+
return [];
|
|
67
|
+
}
|
|
68
|
+
return [
|
|
69
|
+
new Document({
|
|
70
|
+
pageContent: documents.map((doc) => doc.pageContent).join("\n\n"),
|
|
71
|
+
metadata: {
|
|
72
|
+
pdf: {
|
|
73
|
+
version,
|
|
74
|
+
info: meta?.info,
|
|
75
|
+
metadata: meta?.metadata,
|
|
76
|
+
totalPages: parsedPdf.numPages,
|
|
77
|
+
},
|
|
78
|
+
},
|
|
79
|
+
}),
|
|
80
|
+
];
|
|
81
|
+
return documents;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
@@ -90,8 +90,7 @@ The model did not use the appropriate tools to answer the question.\
|
|
|
90
90
|
Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.
|
|
91
91
|
|
|
92
92
|
Score: 2`;
|
|
93
|
-
exports.EVAL_CHAT_PROMPT =
|
|
94
|
-
/* #__PURE__ */ index_js_1.ChatPromptTemplate.fromPromptMessages([
|
|
93
|
+
exports.EVAL_CHAT_PROMPT = index_js_1.ChatPromptTemplate.fromMessages([
|
|
95
94
|
/* #__PURE__ */ index_js_1.SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
96
95
|
/* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
97
96
|
/* #__PURE__ */ index_js_1.AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
@@ -124,7 +123,7 @@ i. Is the final answer helpful?
|
|
|
124
123
|
iv. Does the AI language model use too many steps to answer the question?
|
|
125
124
|
v. Are the appropriate tools used to answer the question?`;
|
|
126
125
|
exports.TOOL_FREE_EVAL_CHAT_PROMPT =
|
|
127
|
-
/* #__PURE__ */ index_js_1.ChatPromptTemplate.
|
|
126
|
+
/* #__PURE__ */ index_js_1.ChatPromptTemplate.fromMessages([
|
|
128
127
|
/* #__PURE__ */ index_js_1.SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
129
128
|
/* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
130
129
|
/* #__PURE__ */ index_js_1.AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
@@ -87,8 +87,7 @@ The model did not use the appropriate tools to answer the question.\
|
|
|
87
87
|
Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.
|
|
88
88
|
|
|
89
89
|
Score: 2`;
|
|
90
|
-
export const EVAL_CHAT_PROMPT =
|
|
91
|
-
/* #__PURE__ */ ChatPromptTemplate.fromPromptMessages([
|
|
90
|
+
export const EVAL_CHAT_PROMPT = /* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
92
91
|
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
93
92
|
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
94
93
|
/* #__PURE__ */ AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
@@ -121,7 +120,7 @@ i. Is the final answer helpful?
|
|
|
121
120
|
iv. Does the AI language model use too many steps to answer the question?
|
|
122
121
|
v. Are the appropriate tools used to answer the question?`;
|
|
123
122
|
export const TOOL_FREE_EVAL_CHAT_PROMPT =
|
|
124
|
-
/* #__PURE__ */ ChatPromptTemplate.
|
|
123
|
+
/* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
125
124
|
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
126
125
|
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
127
126
|
/* #__PURE__ */ AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
@@ -14,7 +14,7 @@ exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
|
|
|
14
14
|
`At the end of your plan, say "<END_OF_PLAN>"`,
|
|
15
15
|
].join(" ");
|
|
16
16
|
exports.PLANNER_CHAT_PROMPT =
|
|
17
|
-
/* #__PURE__ */ chat_js_1.ChatPromptTemplate.
|
|
17
|
+
/* #__PURE__ */ chat_js_1.ChatPromptTemplate.fromMessages([
|
|
18
18
|
/* #__PURE__ */ chat_js_1.SystemMessagePromptTemplate.fromTemplate(exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
19
19
|
/* #__PURE__ */ chat_js_1.HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
20
20
|
]);
|
|
@@ -11,7 +11,7 @@ export const PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
|
|
|
11
11
|
`At the end of your plan, say "<END_OF_PLAN>"`,
|
|
12
12
|
].join(" ");
|
|
13
13
|
export const PLANNER_CHAT_PROMPT =
|
|
14
|
-
/* #__PURE__ */ ChatPromptTemplate.
|
|
14
|
+
/* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
15
15
|
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate(PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
16
16
|
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
17
17
|
]);
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Fireworks = void 0;
|
|
4
|
+
const env_js_1 = require("../util/env.cjs");
|
|
5
|
+
const openai_js_1 = require("./openai.cjs");
|
|
6
|
+
/**
|
|
7
|
+
* Wrapper around Fireworks API for large language models
|
|
8
|
+
*
|
|
9
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
10
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
11
|
+
*
|
|
12
|
+
* To use, you should have the `openai` package installed and
|
|
13
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
14
|
+
*/
|
|
15
|
+
class Fireworks extends openai_js_1.OpenAI {
|
|
16
|
+
static lc_name() {
|
|
17
|
+
return "Fireworks";
|
|
18
|
+
}
|
|
19
|
+
_llmType() {
|
|
20
|
+
return "fireworks";
|
|
21
|
+
}
|
|
22
|
+
get lc_secrets() {
|
|
23
|
+
return {
|
|
24
|
+
fireworksApiKey: "FIREWORKS_API_KEY",
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
constructor(fields) {
|
|
28
|
+
const fireworksApiKey = fields?.fireworksApiKey || (0, env_js_1.getEnvironmentVariable)("FIREWORKS_API_KEY");
|
|
29
|
+
if (!fireworksApiKey) {
|
|
30
|
+
throw new Error(`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`);
|
|
31
|
+
}
|
|
32
|
+
super({
|
|
33
|
+
...fields,
|
|
34
|
+
openAIApiKey: fireworksApiKey,
|
|
35
|
+
modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b",
|
|
36
|
+
configuration: {
|
|
37
|
+
baseURL: "https://api.fireworks.ai/inference/v1",
|
|
38
|
+
},
|
|
39
|
+
});
|
|
40
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
41
|
+
enumerable: true,
|
|
42
|
+
configurable: true,
|
|
43
|
+
writable: true,
|
|
44
|
+
value: true
|
|
45
|
+
});
|
|
46
|
+
Object.defineProperty(this, "fireworksApiKey", {
|
|
47
|
+
enumerable: true,
|
|
48
|
+
configurable: true,
|
|
49
|
+
writable: true,
|
|
50
|
+
value: void 0
|
|
51
|
+
});
|
|
52
|
+
this.fireworksApiKey = fireworksApiKey;
|
|
53
|
+
}
|
|
54
|
+
toJSON() {
|
|
55
|
+
const result = super.toJSON();
|
|
56
|
+
if ("kwargs" in result &&
|
|
57
|
+
typeof result.kwargs === "object" &&
|
|
58
|
+
result.kwargs != null) {
|
|
59
|
+
delete result.kwargs.openai_api_key;
|
|
60
|
+
delete result.kwargs.configuration;
|
|
61
|
+
}
|
|
62
|
+
return result;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Calls the Fireworks API with retry logic in case of failures.
|
|
66
|
+
* @param request The request to send to the Fireworks API.
|
|
67
|
+
* @param options Optional configuration for the API call.
|
|
68
|
+
* @returns The response from the Fireworks API.
|
|
69
|
+
*/
|
|
70
|
+
async completionWithRetry(request, options) {
|
|
71
|
+
// https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility
|
|
72
|
+
if (Array.isArray(request.prompt)) {
|
|
73
|
+
if (request.prompt.length > 1) {
|
|
74
|
+
throw new Error("Multiple prompts are not supported by Fireworks");
|
|
75
|
+
}
|
|
76
|
+
const prompt = request.prompt[0];
|
|
77
|
+
if (typeof prompt !== "string") {
|
|
78
|
+
throw new Error("Only string prompts are supported by Fireworks");
|
|
79
|
+
}
|
|
80
|
+
request.prompt = prompt;
|
|
81
|
+
}
|
|
82
|
+
delete request.frequency_penalty;
|
|
83
|
+
delete request.presence_penalty;
|
|
84
|
+
delete request.best_of;
|
|
85
|
+
delete request.logit_bias;
|
|
86
|
+
if (request.stream === true) {
|
|
87
|
+
return super.completionWithRetry(request, options);
|
|
88
|
+
}
|
|
89
|
+
return super.completionWithRetry(request, options);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
exports.Fireworks = Fireworks;
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
|
+
import type { BaseLLMParams } from "./base.js";
|
|
3
|
+
import type { OpenAICallOptions, OpenAIInput } from "./openai.js";
|
|
4
|
+
import type { OpenAICoreRequestOptions } from "../types/openai-types.js";
|
|
5
|
+
import { OpenAI } from "./openai.js";
|
|
6
|
+
type FireworksUnsupportedArgs = "frequencyPenalty" | "presencePenalty" | "bestOf" | "logitBias";
|
|
7
|
+
type FireworksUnsupportedCallOptions = "functions" | "function_call" | "tools";
|
|
8
|
+
export type FireworksCallOptions = Partial<Omit<OpenAICallOptions, FireworksUnsupportedCallOptions>>;
|
|
9
|
+
/**
|
|
10
|
+
* Wrapper around Fireworks API for large language models
|
|
11
|
+
*
|
|
12
|
+
* Fireworks API is compatible to the OpenAI API with some limitations described in
|
|
13
|
+
* https://readme.fireworks.ai/docs/openai-compatibility.
|
|
14
|
+
*
|
|
15
|
+
* To use, you should have the `openai` package installed and
|
|
16
|
+
* the `FIREWORKS_API_KEY` environment variable set.
|
|
17
|
+
*/
|
|
18
|
+
export declare class Fireworks extends OpenAI<FireworksCallOptions> {
|
|
19
|
+
static lc_name(): string;
|
|
20
|
+
_llmType(): string;
|
|
21
|
+
get lc_secrets(): {
|
|
22
|
+
[key: string]: string;
|
|
23
|
+
} | undefined;
|
|
24
|
+
lc_serializable: boolean;
|
|
25
|
+
fireworksApiKey?: string;
|
|
26
|
+
constructor(fields?: Partial<Omit<OpenAIInput, "openAIApiKey" | FireworksUnsupportedArgs>> & BaseLLMParams & {
|
|
27
|
+
fireworksApiKey?: string;
|
|
28
|
+
});
|
|
29
|
+
toJSON(): import("../load/serializable.js").Serialized;
|
|
30
|
+
completionWithRetry(request: OpenAIClient.CompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Completion>>;
|
|
31
|
+
completionWithRetry(request: OpenAIClient.CompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Completions.Completion>;
|
|
32
|
+
}
|
|
33
|
+
export {};
|