langchain 0.0.144 → 0.0.146
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chains/api/api_chain.d.ts +1 -1
- package/dist/chains/openai_functions/extraction.d.ts +1 -1
- package/dist/chains/openai_functions/openapi.d.ts +1 -1
- package/dist/chains/openai_functions/structured_output.d.ts +1 -1
- package/dist/chains/openai_functions/tagging.d.ts +1 -1
- package/dist/chat_models/anthropic.d.ts +2 -2
- package/dist/embeddings/ollama.cjs +114 -0
- package/dist/embeddings/ollama.d.ts +34 -0
- package/dist/embeddings/ollama.js +110 -0
- package/dist/hub.cjs +14 -0
- package/dist/hub.d.ts +14 -0
- package/dist/hub.js +14 -0
- package/dist/load/import_constants.cjs +1 -0
- package/dist/load/import_constants.js +1 -0
- package/dist/load/import_map.cjs +3 -2
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/schema/index.cjs +5 -22
- package/dist/schema/index.d.ts +1 -5
- package/dist/schema/index.js +5 -22
- package/dist/types/googlevertexai-types.d.ts +1 -1
- package/dist/vectorstores/voy.cjs +158 -0
- package/dist/vectorstores/voy.d.ts +73 -0
- package/dist/vectorstores/voy.js +154 -0
- package/embeddings/ollama.cjs +1 -0
- package/embeddings/ollama.d.ts +1 -0
- package/embeddings/ollama.js +1 -0
- package/package.json +32 -76
- package/vectorstores/voy.cjs +1 -0
- package/vectorstores/voy.d.ts +1 -0
- package/vectorstores/voy.js +1 -0
- package/chat_models.cjs +0 -1
- package/chat_models.d.ts +0 -1
- package/chat_models.js +0 -1
- package/dist/chat_models/index.cjs +0 -11
- package/dist/chat_models/index.d.ts +0 -3
- package/dist/chat_models/index.js +0 -4
- package/dist/document_loaders/index.cjs +0 -40
- package/dist/document_loaders/index.d.ts +0 -18
- package/dist/document_loaders/index.js +0 -18
- package/dist/embeddings/index.cjs +0 -12
- package/dist/embeddings/index.d.ts +0 -4
- package/dist/embeddings/index.js +0 -5
- package/dist/index.cjs +0 -12
- package/dist/index.d.ts +0 -3
- package/dist/index.js +0 -4
- package/dist/llms/index.cjs +0 -18
- package/dist/llms/index.d.ts +0 -6
- package/dist/llms/index.js +0 -7
- package/dist/retrievers/index.cjs +0 -14
- package/dist/retrievers/index.d.ts +0 -5
- package/dist/retrievers/index.js +0 -6
- package/dist/vectorstores/index.cjs +0 -17
- package/dist/vectorstores/index.d.ts +0 -6
- package/dist/vectorstores/index.js +0 -7
- package/document_loaders.cjs +0 -1
- package/document_loaders.d.ts +0 -1
- package/document_loaders.js +0 -1
- package/embeddings.cjs +0 -1
- package/embeddings.d.ts +0 -1
- package/embeddings.js +0 -1
- package/llms.cjs +0 -1
- package/llms.d.ts +0 -1
- package/llms.js +0 -1
- package/retrievers.cjs +0 -1
- package/retrievers.d.ts +0 -1
- package/retrievers.js +0 -1
- package/vectorstores.cjs +0 -1
- package/vectorstores.d.ts +0 -1
- package/vectorstores.js +0 -1
|
@@ -4,7 +4,7 @@ import { LLMChain } from "../llm_chain.js";
|
|
|
4
4
|
import { BaseLanguageModel } from "../../base_language/index.js";
|
|
5
5
|
import { CallbackManagerForChainRun } from "../../callbacks/manager.js";
|
|
6
6
|
import { ChainValues } from "../../schema/index.js";
|
|
7
|
-
import { BasePromptTemplate } from "../../
|
|
7
|
+
import { BasePromptTemplate } from "../../prompts/base.js";
|
|
8
8
|
/**
|
|
9
9
|
* Interface that extends ChainInputs and defines additional input
|
|
10
10
|
* parameters specific to an APIChain.
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { z } from "zod";
|
|
2
2
|
import { FunctionParameters } from "../../output_parsers/openai_functions.js";
|
|
3
3
|
import { LLMChain } from "../llm_chain.js";
|
|
4
|
-
import { BaseChatModel } from "../../chat_models/
|
|
4
|
+
import { BaseChatModel } from "../../chat_models/base.js";
|
|
5
5
|
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
6
6
|
/**
|
|
7
7
|
* Function that creates an extraction chain using the provided JSON schema.
|
|
@@ -3,7 +3,7 @@ import { BaseChain } from "../base.js";
|
|
|
3
3
|
import { LLMChainInput } from "../llm_chain.js";
|
|
4
4
|
import { BasePromptTemplate } from "../../prompts/base.js";
|
|
5
5
|
import { SequentialChain } from "../sequential_chain.js";
|
|
6
|
-
import { BaseChatModel } from "../../chat_models/
|
|
6
|
+
import { BaseChatModel } from "../../chat_models/base.js";
|
|
7
7
|
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
8
8
|
/**
|
|
9
9
|
* Type representing the options for creating an OpenAPI chain.
|
|
@@ -7,7 +7,7 @@ import { BasePromptTemplate } from "../../prompts/index.js";
|
|
|
7
7
|
import { BaseLLMOutputParser } from "../../schema/output_parser.js";
|
|
8
8
|
import { OutputFunctionsParser } from "../../output_parsers/openai_functions.js";
|
|
9
9
|
import { ChatGeneration } from "../../schema/index.js";
|
|
10
|
-
import { BaseChatModel } from "../../chat_models/
|
|
10
|
+
import { BaseChatModel } from "../../chat_models/base.js";
|
|
11
11
|
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
12
12
|
/**
|
|
13
13
|
* Type representing the input for creating a structured output chain. It
|
|
@@ -2,7 +2,7 @@ import { z } from "zod";
|
|
|
2
2
|
import { PromptTemplate } from "../../prompts/prompt.js";
|
|
3
3
|
import { FunctionParameters } from "../../output_parsers/openai_functions.js";
|
|
4
4
|
import { LLMChain, LLMChainInput } from "../llm_chain.js";
|
|
5
|
-
import { BaseChatModel } from "../../chat_models/
|
|
5
|
+
import { BaseChatModel } from "../../chat_models/base.js";
|
|
6
6
|
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
7
7
|
/**
|
|
8
8
|
* Type representing the options for creating a tagging chain.
|
|
@@ -103,9 +103,9 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
|
|
|
103
103
|
model: (string & {}) | "claude-2" | "claude-instant-1";
|
|
104
104
|
temperature?: number | undefined;
|
|
105
105
|
top_p?: number | undefined;
|
|
106
|
+
top_k?: number | undefined;
|
|
106
107
|
max_tokens_to_sample: number;
|
|
107
108
|
stop_sequences?: string[] | undefined;
|
|
108
|
-
top_k?: number | undefined;
|
|
109
109
|
model_name: string;
|
|
110
110
|
};
|
|
111
111
|
/**
|
|
@@ -117,9 +117,9 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
|
|
|
117
117
|
model: (string & {}) | "claude-2" | "claude-instant-1";
|
|
118
118
|
temperature?: number | undefined;
|
|
119
119
|
top_p?: number | undefined;
|
|
120
|
+
top_k?: number | undefined;
|
|
120
121
|
max_tokens_to_sample: number;
|
|
121
122
|
stop_sequences?: string[] | undefined;
|
|
122
|
-
top_k?: number | undefined;
|
|
123
123
|
model_name: string;
|
|
124
124
|
};
|
|
125
125
|
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.OllamaEmbeddings = void 0;
|
|
4
|
+
const base_js_1 = require("./base.cjs");
|
|
5
|
+
class OllamaEmbeddings extends base_js_1.Embeddings {
|
|
6
|
+
constructor(params) {
|
|
7
|
+
super(params || {});
|
|
8
|
+
Object.defineProperty(this, "model", {
|
|
9
|
+
enumerable: true,
|
|
10
|
+
configurable: true,
|
|
11
|
+
writable: true,
|
|
12
|
+
value: "llama2"
|
|
13
|
+
});
|
|
14
|
+
Object.defineProperty(this, "baseUrl", {
|
|
15
|
+
enumerable: true,
|
|
16
|
+
configurable: true,
|
|
17
|
+
writable: true,
|
|
18
|
+
value: "http://localhost:11434"
|
|
19
|
+
});
|
|
20
|
+
Object.defineProperty(this, "requestOptions", {
|
|
21
|
+
enumerable: true,
|
|
22
|
+
configurable: true,
|
|
23
|
+
writable: true,
|
|
24
|
+
value: void 0
|
|
25
|
+
});
|
|
26
|
+
if (params?.model) {
|
|
27
|
+
this.model = params.model;
|
|
28
|
+
}
|
|
29
|
+
if (params?.baseUrl) {
|
|
30
|
+
this.baseUrl = params.baseUrl;
|
|
31
|
+
}
|
|
32
|
+
if (params?.requestOptions) {
|
|
33
|
+
this.requestOptions = this._convertOptions(params.requestOptions);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
/** convert camelCased Ollama request options like "useMMap" to
|
|
37
|
+
* the snake_cased equivalent which the ollama API actually uses.
|
|
38
|
+
* Used only for consistency with the llms/Ollama and chatModels/Ollama classes
|
|
39
|
+
*/
|
|
40
|
+
_convertOptions(requestOptions) {
|
|
41
|
+
const snakeCasedOptions = {};
|
|
42
|
+
const mapping = {
|
|
43
|
+
embeddingOnly: "embedding_only",
|
|
44
|
+
f16KV: "f16_kv",
|
|
45
|
+
frequencyPenalty: "frequency_penalty",
|
|
46
|
+
logitsAll: "logits_all",
|
|
47
|
+
lowVram: "low_vram",
|
|
48
|
+
mainGpu: "main_gpu",
|
|
49
|
+
mirostat: "mirostat",
|
|
50
|
+
mirostatEta: "mirostat_eta",
|
|
51
|
+
mirostatTau: "mirostat_tau",
|
|
52
|
+
numBatch: "num_batch",
|
|
53
|
+
numCtx: "num_ctx",
|
|
54
|
+
numGpu: "num_gpu",
|
|
55
|
+
numGqa: "num_gqa",
|
|
56
|
+
numKeep: "num_keep",
|
|
57
|
+
numThread: "num_thread",
|
|
58
|
+
penalizeNewline: "penalize_newline",
|
|
59
|
+
presencePenalty: "presence_penalty",
|
|
60
|
+
repeatLastN: "repeat_last_n",
|
|
61
|
+
repeatPenalty: "repeat_penalty",
|
|
62
|
+
ropeFrequencyBase: "rope_frequency_base",
|
|
63
|
+
ropeFrequencyScale: "rope_frequency_scale",
|
|
64
|
+
temperature: "temperature",
|
|
65
|
+
stop: "stop",
|
|
66
|
+
tfsZ: "tfs_z",
|
|
67
|
+
topK: "top_k",
|
|
68
|
+
topP: "top_p",
|
|
69
|
+
typicalP: "typical_p",
|
|
70
|
+
useMLock: "use_mlock",
|
|
71
|
+
useMMap: "use_mmap",
|
|
72
|
+
vocabOnly: "vocab_only",
|
|
73
|
+
};
|
|
74
|
+
for (const [key, value] of Object.entries(requestOptions)) {
|
|
75
|
+
const snakeCasedOption = mapping[key];
|
|
76
|
+
if (snakeCasedOption) {
|
|
77
|
+
snakeCasedOptions[snakeCasedOption] = value;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
return snakeCasedOptions;
|
|
81
|
+
}
|
|
82
|
+
async _request(prompt) {
|
|
83
|
+
const { model, baseUrl, requestOptions } = this;
|
|
84
|
+
const response = await fetch(`${baseUrl}/api/embeddings`, {
|
|
85
|
+
method: "POST",
|
|
86
|
+
headers: { "Content-Type": "application/json" },
|
|
87
|
+
body: JSON.stringify({
|
|
88
|
+
prompt,
|
|
89
|
+
model,
|
|
90
|
+
options: requestOptions,
|
|
91
|
+
}),
|
|
92
|
+
});
|
|
93
|
+
if (!response.ok) {
|
|
94
|
+
throw new Error(`Request to Ollama server failed: ${response.status} ${response.statusText}`);
|
|
95
|
+
}
|
|
96
|
+
const json = await response.json();
|
|
97
|
+
return json.embedding;
|
|
98
|
+
}
|
|
99
|
+
async _embed(strings) {
|
|
100
|
+
const embeddings = [];
|
|
101
|
+
for await (const prompt of strings) {
|
|
102
|
+
const embedding = await this.caller.call(() => this._request(prompt));
|
|
103
|
+
embeddings.push(embedding);
|
|
104
|
+
}
|
|
105
|
+
return embeddings;
|
|
106
|
+
}
|
|
107
|
+
async embedDocuments(documents) {
|
|
108
|
+
return this._embed(documents);
|
|
109
|
+
}
|
|
110
|
+
async embedQuery(document) {
|
|
111
|
+
return (await this.embedDocuments([document]))[0];
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
exports.OllamaEmbeddings = OllamaEmbeddings;
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import { OllamaInput, OllamaRequestParams } from "../util/ollama.js";
|
|
2
|
+
import { Embeddings, EmbeddingsParams } from "./base.js";
|
|
3
|
+
type CamelCasedRequestOptions = Omit<OllamaInput, "baseUrl" | "model">;
|
|
4
|
+
/**
|
|
5
|
+
* Interface for OllamaEmbeddings parameters. Extends EmbeddingsParams and
|
|
6
|
+
* defines additional parameters specific to the OllamaEmbeddings class.
|
|
7
|
+
*/
|
|
8
|
+
interface OllamaEmbeddingsParams extends EmbeddingsParams {
|
|
9
|
+
/** The Ollama model to use, e.g: "llama2:13b" */
|
|
10
|
+
model?: string;
|
|
11
|
+
/** Base URL of the Ollama server, defaults to "http://localhost:11434" */
|
|
12
|
+
baseUrl?: string;
|
|
13
|
+
/** Advanced Ollama API request parameters in camelCase, see
|
|
14
|
+
* https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
|
|
15
|
+
* for details of the available parameters.
|
|
16
|
+
*/
|
|
17
|
+
requestOptions?: CamelCasedRequestOptions;
|
|
18
|
+
}
|
|
19
|
+
export declare class OllamaEmbeddings extends Embeddings {
|
|
20
|
+
model: string;
|
|
21
|
+
baseUrl: string;
|
|
22
|
+
requestOptions?: OllamaRequestParams["options"];
|
|
23
|
+
constructor(params?: OllamaEmbeddingsParams);
|
|
24
|
+
/** convert camelCased Ollama request options like "useMMap" to
|
|
25
|
+
* the snake_cased equivalent which the ollama API actually uses.
|
|
26
|
+
* Used only for consistency with the llms/Ollama and chatModels/Ollama classes
|
|
27
|
+
*/
|
|
28
|
+
_convertOptions(requestOptions: CamelCasedRequestOptions): Record<string, unknown>;
|
|
29
|
+
_request(prompt: string): Promise<number[]>;
|
|
30
|
+
_embed(strings: string[]): Promise<number[][]>;
|
|
31
|
+
embedDocuments(documents: string[]): Promise<number[][]>;
|
|
32
|
+
embedQuery(document: string): Promise<number[]>;
|
|
33
|
+
}
|
|
34
|
+
export {};
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import { Embeddings } from "./base.js";
|
|
2
|
+
export class OllamaEmbeddings extends Embeddings {
|
|
3
|
+
constructor(params) {
|
|
4
|
+
super(params || {});
|
|
5
|
+
Object.defineProperty(this, "model", {
|
|
6
|
+
enumerable: true,
|
|
7
|
+
configurable: true,
|
|
8
|
+
writable: true,
|
|
9
|
+
value: "llama2"
|
|
10
|
+
});
|
|
11
|
+
Object.defineProperty(this, "baseUrl", {
|
|
12
|
+
enumerable: true,
|
|
13
|
+
configurable: true,
|
|
14
|
+
writable: true,
|
|
15
|
+
value: "http://localhost:11434"
|
|
16
|
+
});
|
|
17
|
+
Object.defineProperty(this, "requestOptions", {
|
|
18
|
+
enumerable: true,
|
|
19
|
+
configurable: true,
|
|
20
|
+
writable: true,
|
|
21
|
+
value: void 0
|
|
22
|
+
});
|
|
23
|
+
if (params?.model) {
|
|
24
|
+
this.model = params.model;
|
|
25
|
+
}
|
|
26
|
+
if (params?.baseUrl) {
|
|
27
|
+
this.baseUrl = params.baseUrl;
|
|
28
|
+
}
|
|
29
|
+
if (params?.requestOptions) {
|
|
30
|
+
this.requestOptions = this._convertOptions(params.requestOptions);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
/** convert camelCased Ollama request options like "useMMap" to
|
|
34
|
+
* the snake_cased equivalent which the ollama API actually uses.
|
|
35
|
+
* Used only for consistency with the llms/Ollama and chatModels/Ollama classes
|
|
36
|
+
*/
|
|
37
|
+
_convertOptions(requestOptions) {
|
|
38
|
+
const snakeCasedOptions = {};
|
|
39
|
+
const mapping = {
|
|
40
|
+
embeddingOnly: "embedding_only",
|
|
41
|
+
f16KV: "f16_kv",
|
|
42
|
+
frequencyPenalty: "frequency_penalty",
|
|
43
|
+
logitsAll: "logits_all",
|
|
44
|
+
lowVram: "low_vram",
|
|
45
|
+
mainGpu: "main_gpu",
|
|
46
|
+
mirostat: "mirostat",
|
|
47
|
+
mirostatEta: "mirostat_eta",
|
|
48
|
+
mirostatTau: "mirostat_tau",
|
|
49
|
+
numBatch: "num_batch",
|
|
50
|
+
numCtx: "num_ctx",
|
|
51
|
+
numGpu: "num_gpu",
|
|
52
|
+
numGqa: "num_gqa",
|
|
53
|
+
numKeep: "num_keep",
|
|
54
|
+
numThread: "num_thread",
|
|
55
|
+
penalizeNewline: "penalize_newline",
|
|
56
|
+
presencePenalty: "presence_penalty",
|
|
57
|
+
repeatLastN: "repeat_last_n",
|
|
58
|
+
repeatPenalty: "repeat_penalty",
|
|
59
|
+
ropeFrequencyBase: "rope_frequency_base",
|
|
60
|
+
ropeFrequencyScale: "rope_frequency_scale",
|
|
61
|
+
temperature: "temperature",
|
|
62
|
+
stop: "stop",
|
|
63
|
+
tfsZ: "tfs_z",
|
|
64
|
+
topK: "top_k",
|
|
65
|
+
topP: "top_p",
|
|
66
|
+
typicalP: "typical_p",
|
|
67
|
+
useMLock: "use_mlock",
|
|
68
|
+
useMMap: "use_mmap",
|
|
69
|
+
vocabOnly: "vocab_only",
|
|
70
|
+
};
|
|
71
|
+
for (const [key, value] of Object.entries(requestOptions)) {
|
|
72
|
+
const snakeCasedOption = mapping[key];
|
|
73
|
+
if (snakeCasedOption) {
|
|
74
|
+
snakeCasedOptions[snakeCasedOption] = value;
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
return snakeCasedOptions;
|
|
78
|
+
}
|
|
79
|
+
async _request(prompt) {
|
|
80
|
+
const { model, baseUrl, requestOptions } = this;
|
|
81
|
+
const response = await fetch(`${baseUrl}/api/embeddings`, {
|
|
82
|
+
method: "POST",
|
|
83
|
+
headers: { "Content-Type": "application/json" },
|
|
84
|
+
body: JSON.stringify({
|
|
85
|
+
prompt,
|
|
86
|
+
model,
|
|
87
|
+
options: requestOptions,
|
|
88
|
+
}),
|
|
89
|
+
});
|
|
90
|
+
if (!response.ok) {
|
|
91
|
+
throw new Error(`Request to Ollama server failed: ${response.status} ${response.statusText}`);
|
|
92
|
+
}
|
|
93
|
+
const json = await response.json();
|
|
94
|
+
return json.embedding;
|
|
95
|
+
}
|
|
96
|
+
async _embed(strings) {
|
|
97
|
+
const embeddings = [];
|
|
98
|
+
for await (const prompt of strings) {
|
|
99
|
+
const embedding = await this.caller.call(() => this._request(prompt));
|
|
100
|
+
embeddings.push(embedding);
|
|
101
|
+
}
|
|
102
|
+
return embeddings;
|
|
103
|
+
}
|
|
104
|
+
async embedDocuments(documents) {
|
|
105
|
+
return this._embed(documents);
|
|
106
|
+
}
|
|
107
|
+
async embedQuery(document) {
|
|
108
|
+
return (await this.embedDocuments([document]))[0];
|
|
109
|
+
}
|
|
110
|
+
}
|
package/dist/hub.cjs
CHANGED
|
@@ -3,11 +3,25 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.pull = exports.push = void 0;
|
|
4
4
|
const langchainhub_1 = require("langchainhub");
|
|
5
5
|
const index_js_1 = require("./load/index.cjs");
|
|
6
|
+
/**
|
|
7
|
+
* Push a prompt to the hub.
|
|
8
|
+
* If the specified repo doesn't already exist, it will be created.
|
|
9
|
+
* @param repoFullName The full name of the repo.
|
|
10
|
+
* @param runnable The prompt to push.
|
|
11
|
+
* @param options
|
|
12
|
+
* @returns The URL of the newly pushed prompt in the hub.
|
|
13
|
+
*/
|
|
6
14
|
async function push(repoFullName, runnable, options) {
|
|
7
15
|
const client = new langchainhub_1.Client(options);
|
|
8
16
|
return client.push(repoFullName, JSON.stringify(runnable), options);
|
|
9
17
|
}
|
|
10
18
|
exports.push = push;
|
|
19
|
+
/**
|
|
20
|
+
* Pull a prompt from the hub.
|
|
21
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
22
|
+
* @param options
|
|
23
|
+
* @returns
|
|
24
|
+
*/
|
|
11
25
|
async function pull(ownerRepoCommit, options) {
|
|
12
26
|
const client = new langchainhub_1.Client(options);
|
|
13
27
|
const result = await client.pull(ownerRepoCommit);
|
package/dist/hub.d.ts
CHANGED
|
@@ -1,4 +1,18 @@
|
|
|
1
1
|
import { ClientConfiguration, HubPushOptions } from "langchainhub";
|
|
2
2
|
import { Runnable } from "./schema/runnable.js";
|
|
3
|
+
/**
|
|
4
|
+
* Push a prompt to the hub.
|
|
5
|
+
* If the specified repo doesn't already exist, it will be created.
|
|
6
|
+
* @param repoFullName The full name of the repo.
|
|
7
|
+
* @param runnable The prompt to push.
|
|
8
|
+
* @param options
|
|
9
|
+
* @returns The URL of the newly pushed prompt in the hub.
|
|
10
|
+
*/
|
|
3
11
|
export declare function push(repoFullName: string, runnable: Runnable, options?: HubPushOptions & ClientConfiguration): Promise<string>;
|
|
12
|
+
/**
|
|
13
|
+
* Pull a prompt from the hub.
|
|
14
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
15
|
+
* @param options
|
|
16
|
+
* @returns
|
|
17
|
+
*/
|
|
4
18
|
export declare function pull<T extends Runnable>(ownerRepoCommit: string, options?: ClientConfiguration): Promise<T>;
|
package/dist/hub.js
CHANGED
|
@@ -1,9 +1,23 @@
|
|
|
1
1
|
import { Client } from "langchainhub";
|
|
2
2
|
import { load } from "./load/index.js";
|
|
3
|
+
/**
|
|
4
|
+
* Push a prompt to the hub.
|
|
5
|
+
* If the specified repo doesn't already exist, it will be created.
|
|
6
|
+
* @param repoFullName The full name of the repo.
|
|
7
|
+
* @param runnable The prompt to push.
|
|
8
|
+
* @param options
|
|
9
|
+
* @returns The URL of the newly pushed prompt in the hub.
|
|
10
|
+
*/
|
|
3
11
|
export async function push(repoFullName, runnable, options) {
|
|
4
12
|
const client = new Client(options);
|
|
5
13
|
return client.push(repoFullName, JSON.stringify(runnable), options);
|
|
6
14
|
}
|
|
15
|
+
/**
|
|
16
|
+
* Pull a prompt from the hub.
|
|
17
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
18
|
+
* @param options
|
|
19
|
+
* @returns
|
|
20
|
+
*/
|
|
7
21
|
export async function pull(ownerRepoCommit, options) {
|
|
8
22
|
const client = new Client(options);
|
|
9
23
|
const result = await client.pull(ownerRepoCommit);
|
|
@@ -55,6 +55,7 @@ exports.optionalImportEntrypoints = [
|
|
|
55
55
|
"langchain/vectorstores/singlestore",
|
|
56
56
|
"langchain/vectorstores/tigris",
|
|
57
57
|
"langchain/vectorstores/usearch",
|
|
58
|
+
"langchain/vectorstores/voy",
|
|
58
59
|
"langchain/vectorstores/zep",
|
|
59
60
|
"langchain/memory/zep",
|
|
60
61
|
"langchain/document_loaders/web/apify_dataset",
|
|
@@ -52,6 +52,7 @@ export const optionalImportEntrypoints = [
|
|
|
52
52
|
"langchain/vectorstores/singlestore",
|
|
53
53
|
"langchain/vectorstores/tigris",
|
|
54
54
|
"langchain/vectorstores/usearch",
|
|
55
|
+
"langchain/vectorstores/voy",
|
|
55
56
|
"langchain/vectorstores/zep",
|
|
56
57
|
"langchain/memory/zep",
|
|
57
58
|
"langchain/document_loaders/web/apify_dataset",
|
package/dist/load/import_map.cjs
CHANGED
|
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
24
24
|
return result;
|
|
25
25
|
};
|
|
26
26
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
27
|
-
exports.
|
|
28
|
-
exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = void 0;
|
|
27
|
+
exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
|
|
28
|
+
exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = void 0;
|
|
29
29
|
exports.load__serializable = __importStar(require("../load/serializable.cjs"));
|
|
30
30
|
exports.agents = __importStar(require("../agents/index.cjs"));
|
|
31
31
|
exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
|
|
@@ -36,6 +36,7 @@ exports.chains__openai_functions = __importStar(require("../chains/openai_functi
|
|
|
36
36
|
exports.embeddings__base = __importStar(require("../embeddings/base.cjs"));
|
|
37
37
|
exports.embeddings__cache_backed = __importStar(require("../embeddings/cache_backed.cjs"));
|
|
38
38
|
exports.embeddings__fake = __importStar(require("../embeddings/fake.cjs"));
|
|
39
|
+
exports.embeddings__ollama = __importStar(require("../embeddings/ollama.cjs"));
|
|
39
40
|
exports.embeddings__openai = __importStar(require("../embeddings/openai.cjs"));
|
|
40
41
|
exports.embeddings__minimax = __importStar(require("../embeddings/minimax.cjs"));
|
|
41
42
|
exports.llms__base = __importStar(require("../llms/base.cjs"));
|
|
@@ -8,6 +8,7 @@ export * as chains__openai_functions from "../chains/openai_functions/index.js";
|
|
|
8
8
|
export * as embeddings__base from "../embeddings/base.js";
|
|
9
9
|
export * as embeddings__cache_backed from "../embeddings/cache_backed.js";
|
|
10
10
|
export * as embeddings__fake from "../embeddings/fake.js";
|
|
11
|
+
export * as embeddings__ollama from "../embeddings/ollama.js";
|
|
11
12
|
export * as embeddings__openai from "../embeddings/openai.js";
|
|
12
13
|
export * as embeddings__minimax from "../embeddings/minimax.js";
|
|
13
14
|
export * as llms__base from "../llms/base.js";
|
package/dist/load/import_map.js
CHANGED
|
@@ -9,6 +9,7 @@ export * as chains__openai_functions from "../chains/openai_functions/index.js";
|
|
|
9
9
|
export * as embeddings__base from "../embeddings/base.js";
|
|
10
10
|
export * as embeddings__cache_backed from "../embeddings/cache_backed.js";
|
|
11
11
|
export * as embeddings__fake from "../embeddings/fake.js";
|
|
12
|
+
export * as embeddings__ollama from "../embeddings/ollama.js";
|
|
12
13
|
export * as embeddings__openai from "../embeddings/openai.js";
|
|
13
14
|
export * as embeddings__minimax from "../embeddings/minimax.js";
|
|
14
15
|
export * as llms__base from "../llms/base.js";
|
package/dist/schema/index.cjs
CHANGED
|
@@ -333,35 +333,18 @@ function coerceMessageLikeToMessage(messageLike) {
|
|
|
333
333
|
else if (isBaseMessage(messageLike)) {
|
|
334
334
|
return messageLike;
|
|
335
335
|
}
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
let name;
|
|
339
|
-
if (Array.isArray(messageLike)) {
|
|
340
|
-
[role, content] = messageLike;
|
|
341
|
-
name = "";
|
|
342
|
-
}
|
|
343
|
-
else {
|
|
344
|
-
role = messageLike.role;
|
|
345
|
-
content = messageLike.content;
|
|
346
|
-
name = messageLike.name;
|
|
347
|
-
}
|
|
348
|
-
if (role === "human" || role === "user") {
|
|
336
|
+
const [type, content] = messageLike;
|
|
337
|
+
if (type === "human" || type === "user") {
|
|
349
338
|
return new HumanMessage({ content });
|
|
350
339
|
}
|
|
351
|
-
else if (
|
|
340
|
+
else if (type === "ai" || type === "assistant") {
|
|
352
341
|
return new AIMessage({ content });
|
|
353
342
|
}
|
|
354
|
-
else if (
|
|
343
|
+
else if (type === "system") {
|
|
355
344
|
return new SystemMessage({ content });
|
|
356
345
|
}
|
|
357
|
-
else if (role === "function") {
|
|
358
|
-
if (!name) {
|
|
359
|
-
throw new Error(`Unable to coerce function message from object: no "name" field provided.`);
|
|
360
|
-
}
|
|
361
|
-
return new FunctionMessage({ content, name });
|
|
362
|
-
}
|
|
363
346
|
else {
|
|
364
|
-
|
|
347
|
+
throw new Error(`Unable to coerce message from array: only human, AI, or system message coercion is currently supported.`);
|
|
365
348
|
}
|
|
366
349
|
}
|
|
367
350
|
exports.coerceMessageLikeToMessage = coerceMessageLikeToMessage;
|
package/dist/schema/index.d.ts
CHANGED
|
@@ -210,11 +210,7 @@ export declare class ChatMessage extends BaseMessage implements ChatMessageField
|
|
|
210
210
|
_getType(): MessageType;
|
|
211
211
|
static isInstance(message: BaseMessage): message is ChatMessage;
|
|
212
212
|
}
|
|
213
|
-
export type BaseMessageLike = BaseMessage |
|
|
214
|
-
role: MessageType | "user" | "assistant" | (string & Record<never, never>);
|
|
215
|
-
content: string;
|
|
216
|
-
name?: string;
|
|
217
|
-
} | [
|
|
213
|
+
export type BaseMessageLike = BaseMessage | [
|
|
218
214
|
MessageType | "user" | "assistant" | (string & Record<never, never>),
|
|
219
215
|
string
|
|
220
216
|
] | string;
|
package/dist/schema/index.js
CHANGED
|
@@ -317,35 +317,18 @@ export function coerceMessageLikeToMessage(messageLike) {
|
|
|
317
317
|
else if (isBaseMessage(messageLike)) {
|
|
318
318
|
return messageLike;
|
|
319
319
|
}
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
let name;
|
|
323
|
-
if (Array.isArray(messageLike)) {
|
|
324
|
-
[role, content] = messageLike;
|
|
325
|
-
name = "";
|
|
326
|
-
}
|
|
327
|
-
else {
|
|
328
|
-
role = messageLike.role;
|
|
329
|
-
content = messageLike.content;
|
|
330
|
-
name = messageLike.name;
|
|
331
|
-
}
|
|
332
|
-
if (role === "human" || role === "user") {
|
|
320
|
+
const [type, content] = messageLike;
|
|
321
|
+
if (type === "human" || type === "user") {
|
|
333
322
|
return new HumanMessage({ content });
|
|
334
323
|
}
|
|
335
|
-
else if (
|
|
324
|
+
else if (type === "ai" || type === "assistant") {
|
|
336
325
|
return new AIMessage({ content });
|
|
337
326
|
}
|
|
338
|
-
else if (
|
|
327
|
+
else if (type === "system") {
|
|
339
328
|
return new SystemMessage({ content });
|
|
340
329
|
}
|
|
341
|
-
else if (role === "function") {
|
|
342
|
-
if (!name) {
|
|
343
|
-
throw new Error(`Unable to coerce function message from object: no "name" field provided.`);
|
|
344
|
-
}
|
|
345
|
-
return new FunctionMessage({ content, name });
|
|
346
|
-
}
|
|
347
330
|
else {
|
|
348
|
-
|
|
331
|
+
throw new Error(`Unable to coerce message from array: only human, AI, or system message coercion is currently supported.`);
|
|
349
332
|
}
|
|
350
333
|
}
|
|
351
334
|
/**
|