langchain 0.0.146 → 0.0.148
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/chat_models/googlevertexai/web.cjs +1 -0
- package/chat_models/googlevertexai/web.d.ts +1 -0
- package/chat_models/googlevertexai/web.js +1 -0
- package/chat_models/googlevertexai.cjs +1 -1
- package/chat_models/googlevertexai.d.ts +1 -1
- package/chat_models/googlevertexai.js +1 -1
- package/dist/base_language/index.cjs +2 -2
- package/dist/base_language/index.d.ts +2 -1
- package/dist/base_language/index.js +1 -1
- package/dist/chains/base.d.ts +1 -1
- package/dist/chains/constitutional_ai/constitutional_principle.cjs +272 -1
- package/dist/chains/constitutional_ai/constitutional_principle.js +272 -1
- package/dist/chains/openai_functions/openapi.cjs +32 -27
- package/dist/chains/openai_functions/openapi.d.ts +9 -0
- package/dist/chains/openai_functions/openapi.js +31 -27
- package/dist/chat_models/base.d.ts +1 -1
- package/dist/chat_models/{googlevertexai.cjs → googlevertexai/common.cjs} +14 -26
- package/dist/chat_models/{googlevertexai.d.ts → googlevertexai/common.d.ts} +13 -22
- package/dist/chat_models/{googlevertexai.js → googlevertexai/common.js} +12 -24
- package/dist/chat_models/googlevertexai/index.cjs +36 -0
- package/dist/chat_models/googlevertexai/index.d.ts +21 -0
- package/dist/chat_models/googlevertexai/index.js +31 -0
- package/dist/chat_models/googlevertexai/web.cjs +33 -0
- package/dist/chat_models/googlevertexai/web.d.ts +19 -0
- package/dist/chat_models/googlevertexai/web.js +28 -0
- package/dist/chat_models/openai.cjs +1 -1
- package/dist/chat_models/openai.js +1 -1
- package/dist/document_loaders/web/notionapi.cjs +93 -70
- package/dist/document_loaders/web/notionapi.d.ts +33 -1
- package/dist/document_loaders/web/notionapi.js +89 -71
- package/dist/embeddings/googlevertexai.cjs +5 -1
- package/dist/embeddings/googlevertexai.d.ts +2 -1
- package/dist/embeddings/googlevertexai.js +5 -1
- package/dist/evaluation/agents/index.cjs +17 -0
- package/dist/evaluation/agents/index.d.ts +1 -0
- package/dist/evaluation/agents/index.js +1 -0
- package/dist/evaluation/agents/prompt.cjs +132 -0
- package/dist/evaluation/agents/prompt.d.ts +6 -0
- package/dist/evaluation/agents/prompt.js +129 -0
- package/dist/evaluation/agents/trajectory.cjs +189 -0
- package/dist/evaluation/agents/trajectory.d.ts +54 -0
- package/dist/evaluation/agents/trajectory.js +184 -0
- package/dist/evaluation/base.cjs +274 -0
- package/dist/evaluation/base.d.ts +232 -0
- package/dist/evaluation/base.js +263 -0
- package/dist/evaluation/comparison/index.cjs +17 -0
- package/dist/evaluation/comparison/index.d.ts +1 -0
- package/dist/evaluation/comparison/index.js +1 -0
- package/dist/evaluation/comparison/pairwise.cjs +244 -0
- package/dist/evaluation/comparison/pairwise.d.ts +50 -0
- package/dist/evaluation/comparison/pairwise.js +238 -0
- package/dist/evaluation/comparison/prompt.cjs +74 -0
- package/dist/evaluation/comparison/prompt.d.ts +21 -0
- package/dist/evaluation/comparison/prompt.js +71 -0
- package/dist/evaluation/criteria/criteria.cjs +259 -0
- package/dist/evaluation/criteria/criteria.d.ts +73 -0
- package/dist/evaluation/criteria/criteria.js +253 -0
- package/dist/evaluation/criteria/index.cjs +17 -0
- package/dist/evaluation/criteria/index.d.ts +1 -0
- package/dist/evaluation/criteria/index.js +1 -0
- package/dist/evaluation/criteria/prompt.cjs +36 -0
- package/dist/evaluation/criteria/prompt.d.ts +12 -0
- package/dist/evaluation/criteria/prompt.js +33 -0
- package/dist/evaluation/embedding_distance/base.cjs +163 -0
- package/dist/evaluation/embedding_distance/base.d.ts +78 -0
- package/dist/evaluation/embedding_distance/base.js +156 -0
- package/dist/evaluation/embedding_distance/index.cjs +17 -0
- package/dist/evaluation/embedding_distance/index.d.ts +1 -0
- package/dist/evaluation/embedding_distance/index.js +1 -0
- package/dist/evaluation/index.cjs +6 -0
- package/dist/evaluation/index.d.ts +6 -0
- package/dist/evaluation/index.js +6 -0
- package/dist/evaluation/loader.cjs +60 -0
- package/dist/evaluation/loader.d.ts +27 -0
- package/dist/evaluation/loader.js +56 -0
- package/dist/evaluation/types.cjs +2 -0
- package/dist/evaluation/types.d.ts +35 -0
- package/dist/evaluation/types.js +1 -0
- package/dist/experimental/llms/bittensor.cjs +141 -0
- package/dist/experimental/llms/bittensor.d.ts +33 -0
- package/dist/experimental/llms/bittensor.js +137 -0
- package/dist/experimental/multimodal_embeddings/googlevertexai.cjs +5 -1
- package/dist/experimental/multimodal_embeddings/googlevertexai.d.ts +2 -1
- package/dist/experimental/multimodal_embeddings/googlevertexai.js +5 -1
- package/dist/hub.d.ts +1 -1
- package/dist/llms/base.d.ts +1 -1
- package/dist/llms/{googlevertexai.js → googlevertexai/common.cjs} +21 -17
- package/dist/llms/{googlevertexai.d.ts → googlevertexai/common.d.ts} +13 -23
- package/dist/llms/{googlevertexai.cjs → googlevertexai/common.js} +17 -21
- package/dist/llms/googlevertexai/index.cjs +34 -0
- package/dist/llms/googlevertexai/index.d.ts +26 -0
- package/dist/llms/googlevertexai/index.js +30 -0
- package/dist/llms/googlevertexai/web.cjs +31 -0
- package/dist/llms/googlevertexai/web.d.ts +24 -0
- package/dist/llms/googlevertexai/web.js +27 -0
- package/dist/llms/openai-chat.cjs +1 -1
- package/dist/llms/openai-chat.js +1 -1
- package/dist/llms/openai.cjs +1 -1
- package/dist/llms/openai.js +1 -1
- package/dist/load/import_constants.cjs +3 -0
- package/dist/load/import_constants.js +3 -0
- package/dist/load/import_map.cjs +3 -2
- package/dist/load/import_map.d.ts +2 -1
- package/dist/load/import_map.js +2 -1
- package/dist/load/index.cjs +2 -1
- package/dist/load/index.js +2 -1
- package/dist/load/serializable.cjs +23 -4
- package/dist/load/serializable.js +23 -4
- package/dist/prompts/base.cjs +2 -2
- package/dist/prompts/base.d.ts +1 -1
- package/dist/prompts/base.js +1 -1
- package/dist/prompts/chat.cjs +2 -2
- package/dist/prompts/chat.d.ts +1 -1
- package/dist/prompts/chat.js +1 -1
- package/dist/retrievers/multi_query.cjs +140 -0
- package/dist/retrievers/multi_query.d.ts +33 -0
- package/dist/retrievers/multi_query.js +136 -0
- package/dist/schema/document.cjs +2 -2
- package/dist/schema/document.d.ts +1 -1
- package/dist/schema/document.js +1 -1
- package/dist/schema/output_parser.cjs +2 -2
- package/dist/schema/output_parser.d.ts +2 -1
- package/dist/schema/output_parser.js +1 -1
- package/dist/schema/retriever.cjs +2 -2
- package/dist/schema/retriever.d.ts +2 -1
- package/dist/schema/retriever.js +1 -1
- package/dist/schema/runnable/config.cjs +8 -0
- package/dist/schema/runnable/config.d.ts +3 -0
- package/dist/schema/runnable/config.js +4 -0
- package/dist/schema/{runnable.cjs → runnable/index.cjs} +290 -101
- package/dist/schema/{runnable.d.ts → runnable/index.d.ts} +127 -41
- package/dist/schema/{runnable.js → runnable/index.js} +284 -99
- package/dist/tools/base.d.ts +1 -1
- package/dist/types/googlevertexai-types.d.ts +11 -4
- package/dist/util/async_caller.cjs +35 -25
- package/dist/util/async_caller.d.ts +8 -0
- package/dist/util/async_caller.js +35 -25
- package/dist/util/googlevertexai-connection.cjs +14 -15
- package/dist/util/googlevertexai-connection.d.ts +7 -7
- package/dist/util/googlevertexai-connection.js +14 -15
- package/dist/util/googlevertexai-webauth.cjs +56 -0
- package/dist/util/googlevertexai-webauth.d.ts +25 -0
- package/dist/util/googlevertexai-webauth.js +52 -0
- package/dist/vectorstores/googlevertexai.cjs +9 -8
- package/dist/vectorstores/googlevertexai.d.ts +8 -7
- package/dist/vectorstores/googlevertexai.js +9 -8
- package/dist/vectorstores/pinecone.cjs +30 -22
- package/dist/vectorstores/pinecone.d.ts +3 -1
- package/dist/vectorstores/pinecone.js +30 -22
- package/dist/vectorstores/vectara.cjs +20 -23
- package/dist/vectorstores/vectara.d.ts +9 -2
- package/dist/vectorstores/vectara.js +20 -23
- package/experimental/llms/bittensor.cjs +1 -0
- package/experimental/llms/bittensor.d.ts +1 -0
- package/experimental/llms/bittensor.js +1 -0
- package/llms/googlevertexai/web.cjs +1 -0
- package/llms/googlevertexai/web.d.ts +1 -0
- package/llms/googlevertexai/web.js +1 -0
- package/llms/googlevertexai.cjs +1 -1
- package/llms/googlevertexai.d.ts +1 -1
- package/llms/googlevertexai.js +1 -1
- package/package.json +40 -3
- package/retrievers/multi_query.cjs +1 -0
- package/retrievers/multi_query.d.ts +1 -0
- package/retrievers/multi_query.js +1 -0
- package/schema/runnable.cjs +1 -1
- package/schema/runnable.d.ts +1 -1
- package/schema/runnable.js +1 -1
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import { CriteriaEvalChain, LabeledCriteriaEvalChain, } from "./criteria/index.js";
|
|
2
|
+
import { ChatOpenAI } from "../chat_models/openai.js";
|
|
3
|
+
import { LabeledPairwiseStringEvalChain, PairwiseStringEvalChain, } from "./comparison/index.js";
|
|
4
|
+
import { EmbeddingDistanceEvalChain, PairwiseEmbeddingDistanceEvalChain, } from "./embedding_distance/index.js";
|
|
5
|
+
import { TrajectoryEvalChain } from "./agents/index.js";
|
|
6
|
+
import { BaseChatModel } from "../chat_models/base.js";
|
|
7
|
+
/**
|
|
8
|
+
* Load the requested evaluation chain specified by a string
|
|
9
|
+
* @param type The type of evaluator to load.
|
|
10
|
+
* @param options
|
|
11
|
+
* - llm The language model to use for the evaluator.
|
|
12
|
+
* - criteria The criteria to use for the evaluator.
|
|
13
|
+
* - agentTools A list of tools available to the agent,for TrajectoryEvalChain.
|
|
14
|
+
*/
|
|
15
|
+
export async function loadEvaluator(type, options) {
|
|
16
|
+
const { llm, chainOptions, criteria, agentTools } = options || {};
|
|
17
|
+
const llm_ = llm ??
|
|
18
|
+
new ChatOpenAI({
|
|
19
|
+
modelName: "gpt-4",
|
|
20
|
+
temperature: 0.0,
|
|
21
|
+
});
|
|
22
|
+
let evaluator;
|
|
23
|
+
switch (type) {
|
|
24
|
+
case "criteria":
|
|
25
|
+
evaluator = await CriteriaEvalChain.fromLLM(llm_, criteria, chainOptions);
|
|
26
|
+
break;
|
|
27
|
+
case "labeled_criteria":
|
|
28
|
+
evaluator = await LabeledCriteriaEvalChain.fromLLM(llm_, criteria, chainOptions);
|
|
29
|
+
break;
|
|
30
|
+
case "pairwise_string":
|
|
31
|
+
evaluator = await PairwiseStringEvalChain.fromLLM(llm_, criteria, chainOptions);
|
|
32
|
+
break;
|
|
33
|
+
case "labeled_pairwise_string":
|
|
34
|
+
evaluator = await LabeledPairwiseStringEvalChain.fromLLM(llm_, criteria, chainOptions);
|
|
35
|
+
break;
|
|
36
|
+
case "trajectory":
|
|
37
|
+
// eslint-disable-next-line no-instanceof/no-instanceof
|
|
38
|
+
if (!(llm_ instanceof BaseChatModel)) {
|
|
39
|
+
throw new Error("LLM must be an instance of a base chat model.");
|
|
40
|
+
}
|
|
41
|
+
evaluator = await TrajectoryEvalChain.fromLLM(llm_, agentTools, chainOptions);
|
|
42
|
+
break;
|
|
43
|
+
case "embedding_distance":
|
|
44
|
+
evaluator = new EmbeddingDistanceEvalChain({
|
|
45
|
+
embedding: options?.embedding,
|
|
46
|
+
distanceMetric: options?.distanceMetric,
|
|
47
|
+
});
|
|
48
|
+
break;
|
|
49
|
+
case "pairwise_embedding_distance":
|
|
50
|
+
evaluator = new PairwiseEmbeddingDistanceEvalChain({});
|
|
51
|
+
break;
|
|
52
|
+
default:
|
|
53
|
+
throw new Error(`Unknown type: ${type}`);
|
|
54
|
+
}
|
|
55
|
+
return evaluator;
|
|
56
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { AgentTrajectoryEvaluator, LLMPairwiseStringEvaluator, LLMStringEvaluator, PairwiseStringEvaluator, StringEvaluator } from "./base.js";
|
|
2
|
+
export interface EvaluatorType {
|
|
3
|
+
/**
|
|
4
|
+
* The criteria evaluator, which evaluates a model based on a
|
|
5
|
+
* custom set of criteria without any reference labels.
|
|
6
|
+
* */
|
|
7
|
+
criteria: LLMStringEvaluator;
|
|
8
|
+
/**
|
|
9
|
+
* The labeled criteria evaluator, which evaluates a model based on a
|
|
10
|
+
* custom set of criteria, with a reference label.
|
|
11
|
+
* */
|
|
12
|
+
labeled_criteria: LLMStringEvaluator;
|
|
13
|
+
/**
|
|
14
|
+
* The pairwise string evaluator, which predicts the preferred prediction from
|
|
15
|
+
* between two models.
|
|
16
|
+
*/
|
|
17
|
+
pairwise_string: LLMPairwiseStringEvaluator;
|
|
18
|
+
/**
|
|
19
|
+
* The labeled pairwise string evaluator, which predicts the preferred prediction
|
|
20
|
+
* from between two models based on a ground truth reference label.
|
|
21
|
+
* */
|
|
22
|
+
labeled_pairwise_string: LLMPairwiseStringEvaluator;
|
|
23
|
+
/**
|
|
24
|
+
* The agent trajectory evaluator, which grades the agent's intermediate steps.
|
|
25
|
+
*/
|
|
26
|
+
trajectory: AgentTrajectoryEvaluator;
|
|
27
|
+
/**
|
|
28
|
+
* Compare a prediction to a reference label using embedding distance.
|
|
29
|
+
* */
|
|
30
|
+
embedding_distance: StringEvaluator;
|
|
31
|
+
/**
|
|
32
|
+
* Compare two predictions using embedding distance.
|
|
33
|
+
* */
|
|
34
|
+
pairwise_embedding_distance: PairwiseStringEvaluator;
|
|
35
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.NIBittensorLLM = void 0;
|
|
4
|
+
const base_js_1 = require("../../llms/base.cjs");
|
|
5
|
+
/**
|
|
6
|
+
* Class representing the Neural Internet language model powerd by Bittensor, a decentralized network
|
|
7
|
+
* full of different AI models.
|
|
8
|
+
* To analyze API_KEYS and logs of you usage visit
|
|
9
|
+
* https://api.neuralinternet.ai/api-keys
|
|
10
|
+
* https://api.neuralinternet.ai/logs
|
|
11
|
+
*/
|
|
12
|
+
class NIBittensorLLM extends base_js_1.LLM {
|
|
13
|
+
static lc_name() {
|
|
14
|
+
return "NIBittensorLLM";
|
|
15
|
+
}
|
|
16
|
+
constructor(fields) {
|
|
17
|
+
super(fields ?? {});
|
|
18
|
+
Object.defineProperty(this, "systemPrompt", {
|
|
19
|
+
enumerable: true,
|
|
20
|
+
configurable: true,
|
|
21
|
+
writable: true,
|
|
22
|
+
value: void 0
|
|
23
|
+
});
|
|
24
|
+
Object.defineProperty(this, "topResponses", {
|
|
25
|
+
enumerable: true,
|
|
26
|
+
configurable: true,
|
|
27
|
+
writable: true,
|
|
28
|
+
value: void 0
|
|
29
|
+
});
|
|
30
|
+
this.systemPrompt =
|
|
31
|
+
fields?.systemPrompt ??
|
|
32
|
+
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
|
|
33
|
+
this.topResponses = fields?.topResponses;
|
|
34
|
+
}
|
|
35
|
+
_llmType() {
|
|
36
|
+
return "NIBittensorLLM";
|
|
37
|
+
}
|
|
38
|
+
/** Call out to NIBittensorLLM's complete endpoint.
|
|
39
|
+
Args:
|
|
40
|
+
prompt: The prompt to pass into the model.
|
|
41
|
+
|
|
42
|
+
Returns: The string generated by the model.
|
|
43
|
+
|
|
44
|
+
Example:
|
|
45
|
+
let response = niBittensorLLM.call("Tell me a joke.");
|
|
46
|
+
*/
|
|
47
|
+
async _call(prompt) {
|
|
48
|
+
try {
|
|
49
|
+
// Retrieve API KEY
|
|
50
|
+
const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
|
|
51
|
+
if (!apiKeyResponse.ok) {
|
|
52
|
+
throw new Error("Network response was not ok");
|
|
53
|
+
}
|
|
54
|
+
const apiKeysData = await apiKeyResponse.json();
|
|
55
|
+
const apiKey = apiKeysData[0].api_key;
|
|
56
|
+
const headers = {
|
|
57
|
+
"Content-Type": "application/json",
|
|
58
|
+
Authorization: `Bearer ${apiKey}`,
|
|
59
|
+
"Endpoint-Version": "2023-05-19",
|
|
60
|
+
};
|
|
61
|
+
if (this.topResponses !== undefined) {
|
|
62
|
+
this.topResponses = this.topResponses > 100 ? 100 : this.topResponses;
|
|
63
|
+
}
|
|
64
|
+
else {
|
|
65
|
+
this.topResponses = 0;
|
|
66
|
+
}
|
|
67
|
+
const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
|
|
68
|
+
if (!minerResponse.ok) {
|
|
69
|
+
throw new Error("Network response was not ok");
|
|
70
|
+
}
|
|
71
|
+
const uids = await minerResponse.json();
|
|
72
|
+
if (Array.isArray(uids) && uids.length && this.topResponses === 0) {
|
|
73
|
+
for (const uid of uids) {
|
|
74
|
+
try {
|
|
75
|
+
const payload = {
|
|
76
|
+
uids: [uid],
|
|
77
|
+
messages: [
|
|
78
|
+
{ role: "system", content: this.systemPrompt },
|
|
79
|
+
{ role: "user", content: prompt },
|
|
80
|
+
],
|
|
81
|
+
};
|
|
82
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
83
|
+
method: "POST",
|
|
84
|
+
headers,
|
|
85
|
+
body: JSON.stringify(payload),
|
|
86
|
+
});
|
|
87
|
+
if (!response.ok) {
|
|
88
|
+
throw new Error("Network response was not ok");
|
|
89
|
+
}
|
|
90
|
+
const chatData = await response.json();
|
|
91
|
+
if (chatData.choices) {
|
|
92
|
+
return chatData.choices[0].message.content;
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
catch (error) {
|
|
96
|
+
continue;
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
// For top miner based on bittensor response
|
|
101
|
+
if (this.topResponses === 0) {
|
|
102
|
+
this.topResponses = 10;
|
|
103
|
+
}
|
|
104
|
+
const payload = {
|
|
105
|
+
top_n: this.topResponses,
|
|
106
|
+
messages: [
|
|
107
|
+
{ role: "system", content: this.systemPrompt },
|
|
108
|
+
{ role: "user", content: prompt },
|
|
109
|
+
],
|
|
110
|
+
};
|
|
111
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
112
|
+
method: "POST",
|
|
113
|
+
headers,
|
|
114
|
+
body: JSON.stringify(payload),
|
|
115
|
+
});
|
|
116
|
+
if (!response.ok) {
|
|
117
|
+
throw new Error("Network response was not ok");
|
|
118
|
+
}
|
|
119
|
+
const responseData = await response.json();
|
|
120
|
+
if (this.topResponses) {
|
|
121
|
+
return responseData;
|
|
122
|
+
}
|
|
123
|
+
else if (responseData.choices) {
|
|
124
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
125
|
+
const temp = responseData.choices;
|
|
126
|
+
return temp[0].message.content;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
catch (error) {
|
|
130
|
+
return "Sorry I am unable to provide response now, Please try again later.";
|
|
131
|
+
}
|
|
132
|
+
return "default";
|
|
133
|
+
}
|
|
134
|
+
identifyingParams() {
|
|
135
|
+
return {
|
|
136
|
+
systemPrompt: this.systemPrompt,
|
|
137
|
+
topResponses: this.topResponses,
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
exports.NIBittensorLLM = NIBittensorLLM;
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { BaseLLMParams, LLM } from "../../llms/base.js";
|
|
2
|
+
export interface BittensorInput extends BaseLLMParams {
|
|
3
|
+
systemPrompt?: string | null | undefined;
|
|
4
|
+
topResponses?: number | undefined;
|
|
5
|
+
}
|
|
6
|
+
/**
|
|
7
|
+
* Class representing the Neural Internet language model powerd by Bittensor, a decentralized network
|
|
8
|
+
* full of different AI models.
|
|
9
|
+
* To analyze API_KEYS and logs of you usage visit
|
|
10
|
+
* https://api.neuralinternet.ai/api-keys
|
|
11
|
+
* https://api.neuralinternet.ai/logs
|
|
12
|
+
*/
|
|
13
|
+
export declare class NIBittensorLLM extends LLM implements BittensorInput {
|
|
14
|
+
static lc_name(): string;
|
|
15
|
+
systemPrompt: string;
|
|
16
|
+
topResponses: number | undefined;
|
|
17
|
+
constructor(fields?: BittensorInput);
|
|
18
|
+
_llmType(): string;
|
|
19
|
+
/** Call out to NIBittensorLLM's complete endpoint.
|
|
20
|
+
Args:
|
|
21
|
+
prompt: The prompt to pass into the model.
|
|
22
|
+
|
|
23
|
+
Returns: The string generated by the model.
|
|
24
|
+
|
|
25
|
+
Example:
|
|
26
|
+
let response = niBittensorLLM.call("Tell me a joke.");
|
|
27
|
+
*/
|
|
28
|
+
_call(prompt: string): Promise<string>;
|
|
29
|
+
identifyingParams(): {
|
|
30
|
+
systemPrompt: string | null | undefined;
|
|
31
|
+
topResponses: number | undefined;
|
|
32
|
+
};
|
|
33
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import { LLM } from "../../llms/base.js";
|
|
2
|
+
/**
|
|
3
|
+
* Class representing the Neural Internet language model powerd by Bittensor, a decentralized network
|
|
4
|
+
* full of different AI models.
|
|
5
|
+
* To analyze API_KEYS and logs of you usage visit
|
|
6
|
+
* https://api.neuralinternet.ai/api-keys
|
|
7
|
+
* https://api.neuralinternet.ai/logs
|
|
8
|
+
*/
|
|
9
|
+
export class NIBittensorLLM extends LLM {
|
|
10
|
+
static lc_name() {
|
|
11
|
+
return "NIBittensorLLM";
|
|
12
|
+
}
|
|
13
|
+
constructor(fields) {
|
|
14
|
+
super(fields ?? {});
|
|
15
|
+
Object.defineProperty(this, "systemPrompt", {
|
|
16
|
+
enumerable: true,
|
|
17
|
+
configurable: true,
|
|
18
|
+
writable: true,
|
|
19
|
+
value: void 0
|
|
20
|
+
});
|
|
21
|
+
Object.defineProperty(this, "topResponses", {
|
|
22
|
+
enumerable: true,
|
|
23
|
+
configurable: true,
|
|
24
|
+
writable: true,
|
|
25
|
+
value: void 0
|
|
26
|
+
});
|
|
27
|
+
this.systemPrompt =
|
|
28
|
+
fields?.systemPrompt ??
|
|
29
|
+
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
|
|
30
|
+
this.topResponses = fields?.topResponses;
|
|
31
|
+
}
|
|
32
|
+
_llmType() {
|
|
33
|
+
return "NIBittensorLLM";
|
|
34
|
+
}
|
|
35
|
+
/** Call out to NIBittensorLLM's complete endpoint.
|
|
36
|
+
Args:
|
|
37
|
+
prompt: The prompt to pass into the model.
|
|
38
|
+
|
|
39
|
+
Returns: The string generated by the model.
|
|
40
|
+
|
|
41
|
+
Example:
|
|
42
|
+
let response = niBittensorLLM.call("Tell me a joke.");
|
|
43
|
+
*/
|
|
44
|
+
async _call(prompt) {
|
|
45
|
+
try {
|
|
46
|
+
// Retrieve API KEY
|
|
47
|
+
const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
|
|
48
|
+
if (!apiKeyResponse.ok) {
|
|
49
|
+
throw new Error("Network response was not ok");
|
|
50
|
+
}
|
|
51
|
+
const apiKeysData = await apiKeyResponse.json();
|
|
52
|
+
const apiKey = apiKeysData[0].api_key;
|
|
53
|
+
const headers = {
|
|
54
|
+
"Content-Type": "application/json",
|
|
55
|
+
Authorization: `Bearer ${apiKey}`,
|
|
56
|
+
"Endpoint-Version": "2023-05-19",
|
|
57
|
+
};
|
|
58
|
+
if (this.topResponses !== undefined) {
|
|
59
|
+
this.topResponses = this.topResponses > 100 ? 100 : this.topResponses;
|
|
60
|
+
}
|
|
61
|
+
else {
|
|
62
|
+
this.topResponses = 0;
|
|
63
|
+
}
|
|
64
|
+
const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
|
|
65
|
+
if (!minerResponse.ok) {
|
|
66
|
+
throw new Error("Network response was not ok");
|
|
67
|
+
}
|
|
68
|
+
const uids = await minerResponse.json();
|
|
69
|
+
if (Array.isArray(uids) && uids.length && this.topResponses === 0) {
|
|
70
|
+
for (const uid of uids) {
|
|
71
|
+
try {
|
|
72
|
+
const payload = {
|
|
73
|
+
uids: [uid],
|
|
74
|
+
messages: [
|
|
75
|
+
{ role: "system", content: this.systemPrompt },
|
|
76
|
+
{ role: "user", content: prompt },
|
|
77
|
+
],
|
|
78
|
+
};
|
|
79
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
80
|
+
method: "POST",
|
|
81
|
+
headers,
|
|
82
|
+
body: JSON.stringify(payload),
|
|
83
|
+
});
|
|
84
|
+
if (!response.ok) {
|
|
85
|
+
throw new Error("Network response was not ok");
|
|
86
|
+
}
|
|
87
|
+
const chatData = await response.json();
|
|
88
|
+
if (chatData.choices) {
|
|
89
|
+
return chatData.choices[0].message.content;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
catch (error) {
|
|
93
|
+
continue;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
// For top miner based on bittensor response
|
|
98
|
+
if (this.topResponses === 0) {
|
|
99
|
+
this.topResponses = 10;
|
|
100
|
+
}
|
|
101
|
+
const payload = {
|
|
102
|
+
top_n: this.topResponses,
|
|
103
|
+
messages: [
|
|
104
|
+
{ role: "system", content: this.systemPrompt },
|
|
105
|
+
{ role: "user", content: prompt },
|
|
106
|
+
],
|
|
107
|
+
};
|
|
108
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
109
|
+
method: "POST",
|
|
110
|
+
headers,
|
|
111
|
+
body: JSON.stringify(payload),
|
|
112
|
+
});
|
|
113
|
+
if (!response.ok) {
|
|
114
|
+
throw new Error("Network response was not ok");
|
|
115
|
+
}
|
|
116
|
+
const responseData = await response.json();
|
|
117
|
+
if (this.topResponses) {
|
|
118
|
+
return responseData;
|
|
119
|
+
}
|
|
120
|
+
else if (responseData.choices) {
|
|
121
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
122
|
+
const temp = responseData.choices;
|
|
123
|
+
return temp[0].message.content;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
catch (error) {
|
|
127
|
+
return "Sorry I am unable to provide response now, Please try again later.";
|
|
128
|
+
}
|
|
129
|
+
return "default";
|
|
130
|
+
}
|
|
131
|
+
identifyingParams() {
|
|
132
|
+
return {
|
|
133
|
+
systemPrompt: this.systemPrompt,
|
|
134
|
+
topResponses: this.topResponses,
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.GoogleVertexAIMultimodalEmbeddings = void 0;
|
|
4
|
+
const google_auth_library_1 = require("google-auth-library");
|
|
4
5
|
const base_js_1 = require("../../embeddings/base.cjs");
|
|
5
6
|
const googlevertexai_connection_js_1 = require("../../util/googlevertexai-connection.cjs");
|
|
6
7
|
/**
|
|
@@ -24,7 +25,10 @@ class GoogleVertexAIMultimodalEmbeddings extends base_js_1.Embeddings {
|
|
|
24
25
|
value: void 0
|
|
25
26
|
});
|
|
26
27
|
this.model = fields?.model ?? this.model;
|
|
27
|
-
this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller
|
|
28
|
+
this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, new google_auth_library_1.GoogleAuth({
|
|
29
|
+
scopes: "https://www.googleapis.com/auth/cloud-platform",
|
|
30
|
+
...fields?.authOptions,
|
|
31
|
+
}));
|
|
28
32
|
}
|
|
29
33
|
/**
|
|
30
34
|
* Converts media (text or image) to an instance that can be used for
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
/// <reference types="node" resolution-mode="require"/>
|
|
2
|
+
import { GoogleAuthOptions } from "google-auth-library";
|
|
2
3
|
import { Embeddings, EmbeddingsParams } from "../../embeddings/base.js";
|
|
3
4
|
import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAILLMResponse } from "../../types/googlevertexai-types.js";
|
|
4
5
|
/**
|
|
5
6
|
* Parameters for the GoogleVertexAIMultimodalEmbeddings class, extending
|
|
6
7
|
* both EmbeddingsParams and GoogleVertexAIConnectionParams.
|
|
7
8
|
*/
|
|
8
|
-
export interface GoogleVertexAIMultimodalEmbeddingsParams extends EmbeddingsParams, GoogleVertexAIBaseLLMInput {
|
|
9
|
+
export interface GoogleVertexAIMultimodalEmbeddingsParams extends EmbeddingsParams, GoogleVertexAIBaseLLMInput<GoogleAuthOptions> {
|
|
9
10
|
}
|
|
10
11
|
/**
|
|
11
12
|
* An instance of media (text or image) that can be used for generating
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { GoogleAuth } from "google-auth-library";
|
|
1
2
|
import { Embeddings } from "../../embeddings/base.js";
|
|
2
3
|
import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
|
|
3
4
|
/**
|
|
@@ -21,7 +22,10 @@ export class GoogleVertexAIMultimodalEmbeddings extends Embeddings {
|
|
|
21
22
|
value: void 0
|
|
22
23
|
});
|
|
23
24
|
this.model = fields?.model ?? this.model;
|
|
24
|
-
this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller
|
|
25
|
+
this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, new GoogleAuth({
|
|
26
|
+
scopes: "https://www.googleapis.com/auth/cloud-platform",
|
|
27
|
+
...fields?.authOptions,
|
|
28
|
+
}));
|
|
25
29
|
}
|
|
26
30
|
/**
|
|
27
31
|
* Converts media (text or image) to an instance that can be used for
|
package/dist/hub.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { ClientConfiguration, HubPushOptions } from "langchainhub";
|
|
2
|
-
import { Runnable } from "./schema/runnable.js";
|
|
2
|
+
import { Runnable } from "./schema/runnable/index.js";
|
|
3
3
|
/**
|
|
4
4
|
* Push a prompt to the hub.
|
|
5
5
|
* If the specified repo doesn't already exist, it will be created.
|
package/dist/llms/base.d.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { BaseCache, BaseMessage, BasePromptValue, GenerationChunk, LLMResult } from "../schema/index.js";
|
|
2
2
|
import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "../base_language/index.js";
|
|
3
3
|
import { BaseCallbackConfig, CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
|
|
4
|
-
import { RunnableConfig } from "../schema/runnable.js";
|
|
4
|
+
import { RunnableConfig } from "../schema/runnable/config.js";
|
|
5
5
|
export type SerializedLLM = {
|
|
6
6
|
_model: string;
|
|
7
7
|
_type: string;
|
|
@@ -1,22 +1,26 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.BaseGoogleVertexAI = void 0;
|
|
4
|
+
const base_js_1 = require("../base.cjs");
|
|
3
5
|
/**
|
|
4
|
-
*
|
|
5
|
-
*
|
|
6
|
-
*
|
|
7
|
-
* To use, you will need to have one of the following authentication
|
|
8
|
-
* methods in place:
|
|
9
|
-
* - You are logged into an account permitted to the Google Cloud project
|
|
10
|
-
* using Vertex AI.
|
|
11
|
-
* - You are running this on a machine using a service account permitted to
|
|
12
|
-
* the Google Cloud project using Vertex AI.
|
|
13
|
-
* - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
|
|
14
|
-
* path of a credentials file for a service account permitted to the
|
|
15
|
-
* Google Cloud project using Vertex AI.
|
|
6
|
+
* Base class for Google Vertex AI LLMs.
|
|
7
|
+
* Implemented subclasses must provide a GoogleVertexAILLMConnection
|
|
8
|
+
* with an appropriate auth client.
|
|
16
9
|
*/
|
|
17
|
-
|
|
10
|
+
class BaseGoogleVertexAI extends base_js_1.BaseLLM {
|
|
11
|
+
get lc_aliases() {
|
|
12
|
+
return {
|
|
13
|
+
model: "model_name",
|
|
14
|
+
};
|
|
15
|
+
}
|
|
18
16
|
constructor(fields) {
|
|
19
17
|
super(fields ?? {});
|
|
18
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
19
|
+
enumerable: true,
|
|
20
|
+
configurable: true,
|
|
21
|
+
writable: true,
|
|
22
|
+
value: true
|
|
23
|
+
});
|
|
20
24
|
Object.defineProperty(this, "model", {
|
|
21
25
|
enumerable: true,
|
|
22
26
|
configurable: true,
|
|
@@ -65,10 +69,9 @@ export class GoogleVertexAI extends BaseLLM {
|
|
|
65
69
|
this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens;
|
|
66
70
|
this.topP = fields?.topP ?? this.topP;
|
|
67
71
|
this.topK = fields?.topK ?? this.topK;
|
|
68
|
-
this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller);
|
|
69
72
|
}
|
|
70
73
|
_llmType() {
|
|
71
|
-
return "
|
|
74
|
+
return "vertexai";
|
|
72
75
|
}
|
|
73
76
|
async _generate(prompts, options) {
|
|
74
77
|
const generations = await Promise.all(prompts.map((prompt) => this._generatePrompt(prompt, options)));
|
|
@@ -129,3 +132,4 @@ export class GoogleVertexAI extends BaseLLM {
|
|
|
129
132
|
return result?.data?.predictions[0];
|
|
130
133
|
}
|
|
131
134
|
}
|
|
135
|
+
exports.BaseGoogleVertexAI = BaseGoogleVertexAI;
|
|
@@ -1,11 +1,8 @@
|
|
|
1
|
-
import { BaseLLM } from "
|
|
2
|
-
import { Generation, LLMResult } from "
|
|
3
|
-
import {
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
*/
|
|
7
|
-
export interface GoogleVertexAITextInput extends GoogleVertexAIBaseLLMInput {
|
|
8
|
-
}
|
|
1
|
+
import { BaseLLM } from "../base.js";
|
|
2
|
+
import { Generation, LLMResult } from "../../schema/index.js";
|
|
3
|
+
import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
|
|
4
|
+
import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAILLMResponse } from "../../types/googlevertexai-types.js";
|
|
5
|
+
import { BaseLanguageModelCallOptions } from "../../base_language/index.js";
|
|
9
6
|
/**
|
|
10
7
|
* Interface representing the instance of text input to the Google Vertex
|
|
11
8
|
* AI model.
|
|
@@ -32,27 +29,20 @@ interface TextPrediction extends GoogleVertexAIBasePrediction {
|
|
|
32
29
|
content: string;
|
|
33
30
|
}
|
|
34
31
|
/**
|
|
35
|
-
*
|
|
36
|
-
*
|
|
37
|
-
*
|
|
38
|
-
* To use, you will need to have one of the following authentication
|
|
39
|
-
* methods in place:
|
|
40
|
-
* - You are logged into an account permitted to the Google Cloud project
|
|
41
|
-
* using Vertex AI.
|
|
42
|
-
* - You are running this on a machine using a service account permitted to
|
|
43
|
-
* the Google Cloud project using Vertex AI.
|
|
44
|
-
* - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
|
|
45
|
-
* path of a credentials file for a service account permitted to the
|
|
46
|
-
* Google Cloud project using Vertex AI.
|
|
32
|
+
* Base class for Google Vertex AI LLMs.
|
|
33
|
+
* Implemented subclasses must provide a GoogleVertexAILLMConnection
|
|
34
|
+
* with an appropriate auth client.
|
|
47
35
|
*/
|
|
48
|
-
export declare class
|
|
36
|
+
export declare class BaseGoogleVertexAI<AuthOptions> extends BaseLLM implements GoogleVertexAIBaseLLMInput<AuthOptions> {
|
|
37
|
+
lc_serializable: boolean;
|
|
49
38
|
model: string;
|
|
50
39
|
temperature: number;
|
|
51
40
|
maxOutputTokens: number;
|
|
52
41
|
topP: number;
|
|
53
42
|
topK: number;
|
|
54
|
-
|
|
55
|
-
|
|
43
|
+
protected connection: GoogleVertexAILLMConnection<BaseLanguageModelCallOptions, GoogleVertexAILLMInstance, TextPrediction, AuthOptions>;
|
|
44
|
+
get lc_aliases(): Record<string, string>;
|
|
45
|
+
constructor(fields?: GoogleVertexAIBaseLLMInput<AuthOptions>);
|
|
56
46
|
_llmType(): string;
|
|
57
47
|
_generate(prompts: string[], options: this["ParsedCallOptions"]): Promise<LLMResult>;
|
|
58
48
|
_generatePrompt(prompt: string, options: this["ParsedCallOptions"]): Promise<Generation[]>;
|
|
@@ -1,25 +1,23 @@
|
|
|
1
|
-
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.GoogleVertexAI = void 0;
|
|
4
|
-
const base_js_1 = require("./base.cjs");
|
|
5
|
-
const googlevertexai_connection_js_1 = require("../util/googlevertexai-connection.cjs");
|
|
1
|
+
import { BaseLLM } from "../base.js";
|
|
6
2
|
/**
|
|
7
|
-
*
|
|
8
|
-
*
|
|
9
|
-
*
|
|
10
|
-
* To use, you will need to have one of the following authentication
|
|
11
|
-
* methods in place:
|
|
12
|
-
* - You are logged into an account permitted to the Google Cloud project
|
|
13
|
-
* using Vertex AI.
|
|
14
|
-
* - You are running this on a machine using a service account permitted to
|
|
15
|
-
* the Google Cloud project using Vertex AI.
|
|
16
|
-
* - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
|
|
17
|
-
* path of a credentials file for a service account permitted to the
|
|
18
|
-
* Google Cloud project using Vertex AI.
|
|
3
|
+
* Base class for Google Vertex AI LLMs.
|
|
4
|
+
* Implemented subclasses must provide a GoogleVertexAILLMConnection
|
|
5
|
+
* with an appropriate auth client.
|
|
19
6
|
*/
|
|
20
|
-
class
|
|
7
|
+
export class BaseGoogleVertexAI extends BaseLLM {
|
|
8
|
+
get lc_aliases() {
|
|
9
|
+
return {
|
|
10
|
+
model: "model_name",
|
|
11
|
+
};
|
|
12
|
+
}
|
|
21
13
|
constructor(fields) {
|
|
22
14
|
super(fields ?? {});
|
|
15
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
16
|
+
enumerable: true,
|
|
17
|
+
configurable: true,
|
|
18
|
+
writable: true,
|
|
19
|
+
value: true
|
|
20
|
+
});
|
|
23
21
|
Object.defineProperty(this, "model", {
|
|
24
22
|
enumerable: true,
|
|
25
23
|
configurable: true,
|
|
@@ -68,10 +66,9 @@ class GoogleVertexAI extends base_js_1.BaseLLM {
|
|
|
68
66
|
this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens;
|
|
69
67
|
this.topP = fields?.topP ?? this.topP;
|
|
70
68
|
this.topK = fields?.topK ?? this.topK;
|
|
71
|
-
this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller);
|
|
72
69
|
}
|
|
73
70
|
_llmType() {
|
|
74
|
-
return "
|
|
71
|
+
return "vertexai";
|
|
75
72
|
}
|
|
76
73
|
async _generate(prompts, options) {
|
|
77
74
|
const generations = await Promise.all(prompts.map((prompt) => this._generatePrompt(prompt, options)));
|
|
@@ -132,4 +129,3 @@ class GoogleVertexAI extends base_js_1.BaseLLM {
|
|
|
132
129
|
return result?.data?.predictions[0];
|
|
133
130
|
}
|
|
134
131
|
}
|
|
135
|
-
exports.GoogleVertexAI = GoogleVertexAI;
|