langchain 0.0.150 → 0.0.152
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cache/cloudflare_kv.cjs +1 -0
- package/cache/cloudflare_kv.d.ts +1 -0
- package/cache/cloudflare_kv.js +1 -0
- package/dist/agents/chat/index.cjs +1 -1
- package/dist/agents/chat/index.js +1 -1
- package/dist/agents/chat_convo/index.cjs +1 -1
- package/dist/agents/chat_convo/index.js +1 -1
- package/dist/agents/openai/index.cjs +1 -1
- package/dist/agents/openai/index.js +1 -1
- package/dist/agents/structured_chat/index.cjs +1 -1
- package/dist/agents/structured_chat/index.js +1 -1
- package/dist/agents/xml/index.cjs +1 -1
- package/dist/agents/xml/index.js +1 -1
- package/dist/base_language/count_tokens.cjs +1 -0
- package/dist/base_language/count_tokens.js +1 -0
- package/dist/base_language/index.cjs +5 -3
- package/dist/base_language/index.d.ts +1 -1
- package/dist/base_language/index.js +4 -3
- package/dist/cache/cloudflare_kv.cjs +61 -0
- package/dist/cache/cloudflare_kv.d.ts +29 -0
- package/dist/cache/cloudflare_kv.js +57 -0
- package/dist/chains/openai_functions/openapi.cjs +1 -1
- package/dist/chains/openai_functions/openapi.js +1 -1
- package/dist/chains/question_answering/map_reduce_prompts.cjs +2 -3
- package/dist/chains/question_answering/map_reduce_prompts.js +2 -3
- package/dist/chains/question_answering/refine_prompts.cjs +2 -2
- package/dist/chains/question_answering/refine_prompts.js +2 -2
- package/dist/chains/question_answering/stuff_prompts.cjs +1 -2
- package/dist/chains/question_answering/stuff_prompts.js +1 -2
- package/dist/chat_models/ollama.cjs +3 -7
- package/dist/chat_models/ollama.d.ts +1 -1
- package/dist/chat_models/ollama.js +3 -7
- package/dist/document_loaders/web/pdf.cjs +87 -0
- package/dist/document_loaders/web/pdf.d.ts +17 -0
- package/dist/document_loaders/web/pdf.js +83 -0
- package/dist/evaluation/agents/prompt.cjs +2 -3
- package/dist/evaluation/agents/prompt.js +2 -3
- package/dist/experimental/chat_models/bittensor.cjs +141 -0
- package/dist/experimental/chat_models/bittensor.d.ts +36 -0
- package/dist/experimental/chat_models/bittensor.js +137 -0
- package/dist/experimental/plan_and_execute/prompt.cjs +1 -1
- package/dist/experimental/plan_and_execute/prompt.js +1 -1
- package/dist/llms/llama_cpp.cjs +10 -4
- package/dist/llms/llama_cpp.d.ts +2 -1
- package/dist/llms/llama_cpp.js +10 -4
- package/dist/llms/ollama.cjs +5 -6
- package/dist/llms/ollama.d.ts +2 -2
- package/dist/llms/ollama.js +5 -6
- package/dist/llms/openai.cjs +3 -3
- package/dist/llms/openai.js +3 -3
- package/dist/load/import_constants.cjs +4 -0
- package/dist/load/import_constants.js +4 -0
- package/dist/load/import_map.cjs +2 -1
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/prompts/chat.cjs +12 -1
- package/dist/prompts/chat.d.ts +8 -0
- package/dist/prompts/chat.js +12 -1
- package/dist/schema/runnable/base.cjs +10 -2
- package/dist/schema/runnable/base.d.ts +2 -0
- package/dist/schema/runnable/base.js +9 -2
- package/dist/schema/runnable/branch.cjs +106 -0
- package/dist/schema/runnable/branch.d.ts +66 -0
- package/dist/schema/runnable/branch.js +102 -0
- package/dist/schema/runnable/index.cjs +12 -16
- package/dist/schema/runnable/index.d.ts +2 -1
- package/dist/schema/runnable/index.js +2 -1
- package/dist/stores/message/cloudflare_d1.cjs +134 -0
- package/dist/stores/message/cloudflare_d1.d.ts +49 -0
- package/dist/stores/message/cloudflare_d1.js +130 -0
- package/dist/types/openai-types.d.ts +2 -0
- package/dist/vectorstores/pgvector.cjs +277 -0
- package/dist/vectorstores/pgvector.d.ts +132 -0
- package/dist/vectorstores/pgvector.js +270 -0
- package/document_loaders/web/pdf.cjs +1 -0
- package/document_loaders/web/pdf.d.ts +1 -0
- package/document_loaders/web/pdf.js +1 -0
- package/experimental/chat_models/bittensor.cjs +1 -0
- package/experimental/chat_models/bittensor.d.ts +1 -0
- package/experimental/chat_models/bittensor.js +1 -0
- package/package.json +46 -1
- package/stores/message/cloudflare_d1.cjs +1 -0
- package/stores/message/cloudflare_d1.d.ts +1 -0
- package/stores/message/cloudflare_d1.js +1 -0
- package/vectorstores/pgvector.cjs +1 -0
- package/vectorstores/pgvector.d.ts +1 -0
- package/vectorstores/pgvector.js +1 -0
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import { getDocument, version, } from "pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js";
|
|
2
|
+
import { Document } from "../../document.js";
|
|
3
|
+
import { BaseDocumentLoader } from "../base.js";
|
|
4
|
+
/**
|
|
5
|
+
* A document loader for loading data from PDFs.
|
|
6
|
+
*/
|
|
7
|
+
export class WebPDFLoader extends BaseDocumentLoader {
|
|
8
|
+
constructor(blob, { splitPages = true } = {}) {
|
|
9
|
+
super();
|
|
10
|
+
Object.defineProperty(this, "blob", {
|
|
11
|
+
enumerable: true,
|
|
12
|
+
configurable: true,
|
|
13
|
+
writable: true,
|
|
14
|
+
value: void 0
|
|
15
|
+
});
|
|
16
|
+
Object.defineProperty(this, "splitPages", {
|
|
17
|
+
enumerable: true,
|
|
18
|
+
configurable: true,
|
|
19
|
+
writable: true,
|
|
20
|
+
value: true
|
|
21
|
+
});
|
|
22
|
+
this.blob = blob;
|
|
23
|
+
this.splitPages = splitPages ?? this.splitPages;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Loads the contents of the PDF as documents.
|
|
27
|
+
* @returns An array of Documents representing the retrieved data.
|
|
28
|
+
*/
|
|
29
|
+
async load() {
|
|
30
|
+
const parsedPdf = await getDocument({
|
|
31
|
+
data: new Uint8Array(await this.blob.arrayBuffer()),
|
|
32
|
+
useWorkerFetch: false,
|
|
33
|
+
isEvalSupported: false,
|
|
34
|
+
useSystemFonts: true,
|
|
35
|
+
}).promise;
|
|
36
|
+
const meta = await parsedPdf.getMetadata().catch(() => null);
|
|
37
|
+
const documents = [];
|
|
38
|
+
for (let i = 1; i <= parsedPdf.numPages; i += 1) {
|
|
39
|
+
const page = await parsedPdf.getPage(i);
|
|
40
|
+
const content = await page.getTextContent();
|
|
41
|
+
if (content.items.length === 0) {
|
|
42
|
+
continue;
|
|
43
|
+
}
|
|
44
|
+
const text = content.items
|
|
45
|
+
.map((item) => item.str)
|
|
46
|
+
.join("\n");
|
|
47
|
+
documents.push(new Document({
|
|
48
|
+
pageContent: text,
|
|
49
|
+
metadata: {
|
|
50
|
+
pdf: {
|
|
51
|
+
version,
|
|
52
|
+
info: meta?.info,
|
|
53
|
+
metadata: meta?.metadata,
|
|
54
|
+
totalPages: parsedPdf.numPages,
|
|
55
|
+
},
|
|
56
|
+
loc: {
|
|
57
|
+
pageNumber: i,
|
|
58
|
+
},
|
|
59
|
+
},
|
|
60
|
+
}));
|
|
61
|
+
}
|
|
62
|
+
if (this.splitPages) {
|
|
63
|
+
return documents;
|
|
64
|
+
}
|
|
65
|
+
if (documents.length === 0) {
|
|
66
|
+
return [];
|
|
67
|
+
}
|
|
68
|
+
return [
|
|
69
|
+
new Document({
|
|
70
|
+
pageContent: documents.map((doc) => doc.pageContent).join("\n\n"),
|
|
71
|
+
metadata: {
|
|
72
|
+
pdf: {
|
|
73
|
+
version,
|
|
74
|
+
info: meta?.info,
|
|
75
|
+
metadata: meta?.metadata,
|
|
76
|
+
totalPages: parsedPdf.numPages,
|
|
77
|
+
},
|
|
78
|
+
},
|
|
79
|
+
}),
|
|
80
|
+
];
|
|
81
|
+
return documents;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
@@ -90,8 +90,7 @@ The model did not use the appropriate tools to answer the question.\
|
|
|
90
90
|
Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.
|
|
91
91
|
|
|
92
92
|
Score: 2`;
|
|
93
|
-
exports.EVAL_CHAT_PROMPT =
|
|
94
|
-
/* #__PURE__ */ index_js_1.ChatPromptTemplate.fromPromptMessages([
|
|
93
|
+
exports.EVAL_CHAT_PROMPT = index_js_1.ChatPromptTemplate.fromMessages([
|
|
95
94
|
/* #__PURE__ */ index_js_1.SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
96
95
|
/* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
97
96
|
/* #__PURE__ */ index_js_1.AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
@@ -124,7 +123,7 @@ i. Is the final answer helpful?
|
|
|
124
123
|
iv. Does the AI language model use too many steps to answer the question?
|
|
125
124
|
v. Are the appropriate tools used to answer the question?`;
|
|
126
125
|
exports.TOOL_FREE_EVAL_CHAT_PROMPT =
|
|
127
|
-
/* #__PURE__ */ index_js_1.ChatPromptTemplate.
|
|
126
|
+
/* #__PURE__ */ index_js_1.ChatPromptTemplate.fromMessages([
|
|
128
127
|
/* #__PURE__ */ index_js_1.SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
129
128
|
/* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
130
129
|
/* #__PURE__ */ index_js_1.AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
@@ -87,8 +87,7 @@ The model did not use the appropriate tools to answer the question.\
|
|
|
87
87
|
Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.
|
|
88
88
|
|
|
89
89
|
Score: 2`;
|
|
90
|
-
export const EVAL_CHAT_PROMPT =
|
|
91
|
-
/* #__PURE__ */ ChatPromptTemplate.fromPromptMessages([
|
|
90
|
+
export const EVAL_CHAT_PROMPT = /* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
92
91
|
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
93
92
|
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
94
93
|
/* #__PURE__ */ AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
@@ -121,7 +120,7 @@ i. Is the final answer helpful?
|
|
|
121
120
|
iv. Does the AI language model use too many steps to answer the question?
|
|
122
121
|
v. Are the appropriate tools used to answer the question?`;
|
|
123
122
|
export const TOOL_FREE_EVAL_CHAT_PROMPT =
|
|
124
|
-
/* #__PURE__ */ ChatPromptTemplate.
|
|
123
|
+
/* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
125
124
|
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
126
125
|
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
127
126
|
/* #__PURE__ */ AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.NIBittensorChatModel = void 0;
|
|
4
|
+
const base_js_1 = require("../../chat_models/base.cjs");
|
|
5
|
+
const index_js_1 = require("../../schema/index.cjs");
|
|
6
|
+
/**
|
|
7
|
+
* Class representing the Neural Internet chat model powerd by Bittensor, a decentralized network
|
|
8
|
+
* full of different AI models.s
|
|
9
|
+
* To analyze API_KEYS and logs of you usage visit
|
|
10
|
+
* https://api.neuralinternet.ai/api-keys
|
|
11
|
+
* https://api.neuralinternet.ai/logs
|
|
12
|
+
*/
|
|
13
|
+
class NIBittensorChatModel extends base_js_1.BaseChatModel {
|
|
14
|
+
static lc_name() {
|
|
15
|
+
return "NIBittensorLLM";
|
|
16
|
+
}
|
|
17
|
+
constructor(fields) {
|
|
18
|
+
super(fields ?? {});
|
|
19
|
+
Object.defineProperty(this, "systemPrompt", {
|
|
20
|
+
enumerable: true,
|
|
21
|
+
configurable: true,
|
|
22
|
+
writable: true,
|
|
23
|
+
value: void 0
|
|
24
|
+
});
|
|
25
|
+
this.systemPrompt =
|
|
26
|
+
fields?.systemPrompt ??
|
|
27
|
+
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
|
|
28
|
+
}
|
|
29
|
+
_combineLLMOutput() {
|
|
30
|
+
return [];
|
|
31
|
+
}
|
|
32
|
+
_llmType() {
|
|
33
|
+
return "NIBittensorLLM";
|
|
34
|
+
}
|
|
35
|
+
messageToOpenAIRole(message) {
|
|
36
|
+
const type = message._getType();
|
|
37
|
+
switch (type) {
|
|
38
|
+
case "system":
|
|
39
|
+
return "system";
|
|
40
|
+
case "ai":
|
|
41
|
+
return "assistant";
|
|
42
|
+
case "human":
|
|
43
|
+
return "user";
|
|
44
|
+
default:
|
|
45
|
+
return "user";
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
stringToChatMessage(message) {
|
|
49
|
+
return new index_js_1.ChatMessage(message, "assistant");
|
|
50
|
+
}
|
|
51
|
+
/** Call out to NIBittensorChatModel's complete endpoint.
|
|
52
|
+
Args:
|
|
53
|
+
messages: The messages to pass into the model.
|
|
54
|
+
|
|
55
|
+
Returns: The chat response by the model.
|
|
56
|
+
|
|
57
|
+
Example:
|
|
58
|
+
const chat = new NIBittensorChatModel();
|
|
59
|
+
const message = new HumanMessage('What is bittensor?');
|
|
60
|
+
const res = await chat.call([message]);
|
|
61
|
+
*/
|
|
62
|
+
async _generate(messages) {
|
|
63
|
+
const processed_messages = messages.map((message) => ({
|
|
64
|
+
role: this.messageToOpenAIRole(message),
|
|
65
|
+
content: message.content,
|
|
66
|
+
}));
|
|
67
|
+
const generations = [];
|
|
68
|
+
try {
|
|
69
|
+
// Retrieve API KEY
|
|
70
|
+
const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
|
|
71
|
+
if (!apiKeyResponse.ok) {
|
|
72
|
+
throw new Error("Network response was not ok");
|
|
73
|
+
}
|
|
74
|
+
const apiKeysData = await apiKeyResponse.json();
|
|
75
|
+
const apiKey = apiKeysData[0].api_key;
|
|
76
|
+
const headers = {
|
|
77
|
+
"Content-Type": "application/json",
|
|
78
|
+
Authorization: `Bearer ${apiKey}`,
|
|
79
|
+
"Endpoint-Version": "2023-05-19",
|
|
80
|
+
};
|
|
81
|
+
const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
|
|
82
|
+
if (!minerResponse.ok) {
|
|
83
|
+
throw new Error("Network response was not ok");
|
|
84
|
+
}
|
|
85
|
+
const uids = await minerResponse.json();
|
|
86
|
+
if (Array.isArray(uids) && uids.length) {
|
|
87
|
+
for (const uid of uids) {
|
|
88
|
+
try {
|
|
89
|
+
const payload = {
|
|
90
|
+
uids: [uid],
|
|
91
|
+
messages: [
|
|
92
|
+
{ role: "system", content: this.systemPrompt },
|
|
93
|
+
...processed_messages,
|
|
94
|
+
],
|
|
95
|
+
};
|
|
96
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
97
|
+
method: "POST",
|
|
98
|
+
headers,
|
|
99
|
+
body: JSON.stringify(payload),
|
|
100
|
+
});
|
|
101
|
+
if (!response.ok) {
|
|
102
|
+
throw new Error("Network response was not ok");
|
|
103
|
+
}
|
|
104
|
+
const chatData = await response.json();
|
|
105
|
+
if (chatData.choices) {
|
|
106
|
+
const generation = {
|
|
107
|
+
text: chatData.choices[0].message.content,
|
|
108
|
+
message: this.stringToChatMessage(chatData.choices[0].message.content),
|
|
109
|
+
};
|
|
110
|
+
generations.push(generation);
|
|
111
|
+
return { generations, llmOutput: {} };
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
catch (error) {
|
|
115
|
+
continue;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
catch (error) {
|
|
121
|
+
const generation = {
|
|
122
|
+
text: "Sorry I am unable to provide response now, Please try again later.",
|
|
123
|
+
message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
|
|
124
|
+
};
|
|
125
|
+
generations.push(generation);
|
|
126
|
+
return { generations, llmOutput: {} };
|
|
127
|
+
}
|
|
128
|
+
const generation = {
|
|
129
|
+
text: "Sorry I am unable to provide response now, Please try again later.",
|
|
130
|
+
message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
|
|
131
|
+
};
|
|
132
|
+
generations.push(generation);
|
|
133
|
+
return { generations, llmOutput: {} };
|
|
134
|
+
}
|
|
135
|
+
identifyingParams() {
|
|
136
|
+
return {
|
|
137
|
+
systemPrompt: this.systemPrompt,
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
exports.NIBittensorChatModel = NIBittensorChatModel;
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { BaseChatModel, BaseChatModelParams } from "../../chat_models/base.js";
|
|
2
|
+
import { BaseMessage, ChatResult } from "../../schema/index.js";
|
|
3
|
+
export interface BittensorInput extends BaseChatModelParams {
|
|
4
|
+
systemPrompt?: string | null | undefined;
|
|
5
|
+
}
|
|
6
|
+
/**
|
|
7
|
+
* Class representing the Neural Internet chat model powerd by Bittensor, a decentralized network
|
|
8
|
+
* full of different AI models.s
|
|
9
|
+
* To analyze API_KEYS and logs of you usage visit
|
|
10
|
+
* https://api.neuralinternet.ai/api-keys
|
|
11
|
+
* https://api.neuralinternet.ai/logs
|
|
12
|
+
*/
|
|
13
|
+
export declare class NIBittensorChatModel extends BaseChatModel implements BittensorInput {
|
|
14
|
+
static lc_name(): string;
|
|
15
|
+
systemPrompt: string;
|
|
16
|
+
constructor(fields?: BittensorInput);
|
|
17
|
+
_combineLLMOutput(): never[];
|
|
18
|
+
_llmType(): string;
|
|
19
|
+
messageToOpenAIRole(message: BaseMessage): "system" | "user" | "assistant";
|
|
20
|
+
stringToChatMessage(message: string): BaseMessage;
|
|
21
|
+
/** Call out to NIBittensorChatModel's complete endpoint.
|
|
22
|
+
Args:
|
|
23
|
+
messages: The messages to pass into the model.
|
|
24
|
+
|
|
25
|
+
Returns: The chat response by the model.
|
|
26
|
+
|
|
27
|
+
Example:
|
|
28
|
+
const chat = new NIBittensorChatModel();
|
|
29
|
+
const message = new HumanMessage('What is bittensor?');
|
|
30
|
+
const res = await chat.call([message]);
|
|
31
|
+
*/
|
|
32
|
+
_generate(messages: BaseMessage[]): Promise<ChatResult>;
|
|
33
|
+
identifyingParams(): {
|
|
34
|
+
systemPrompt: string | null | undefined;
|
|
35
|
+
};
|
|
36
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import { BaseChatModel } from "../../chat_models/base.js";
|
|
2
|
+
import { ChatMessage, } from "../../schema/index.js";
|
|
3
|
+
/**
|
|
4
|
+
* Class representing the Neural Internet chat model powerd by Bittensor, a decentralized network
|
|
5
|
+
* full of different AI models.s
|
|
6
|
+
* To analyze API_KEYS and logs of you usage visit
|
|
7
|
+
* https://api.neuralinternet.ai/api-keys
|
|
8
|
+
* https://api.neuralinternet.ai/logs
|
|
9
|
+
*/
|
|
10
|
+
export class NIBittensorChatModel extends BaseChatModel {
|
|
11
|
+
static lc_name() {
|
|
12
|
+
return "NIBittensorLLM";
|
|
13
|
+
}
|
|
14
|
+
constructor(fields) {
|
|
15
|
+
super(fields ?? {});
|
|
16
|
+
Object.defineProperty(this, "systemPrompt", {
|
|
17
|
+
enumerable: true,
|
|
18
|
+
configurable: true,
|
|
19
|
+
writable: true,
|
|
20
|
+
value: void 0
|
|
21
|
+
});
|
|
22
|
+
this.systemPrompt =
|
|
23
|
+
fields?.systemPrompt ??
|
|
24
|
+
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
|
|
25
|
+
}
|
|
26
|
+
_combineLLMOutput() {
|
|
27
|
+
return [];
|
|
28
|
+
}
|
|
29
|
+
_llmType() {
|
|
30
|
+
return "NIBittensorLLM";
|
|
31
|
+
}
|
|
32
|
+
messageToOpenAIRole(message) {
|
|
33
|
+
const type = message._getType();
|
|
34
|
+
switch (type) {
|
|
35
|
+
case "system":
|
|
36
|
+
return "system";
|
|
37
|
+
case "ai":
|
|
38
|
+
return "assistant";
|
|
39
|
+
case "human":
|
|
40
|
+
return "user";
|
|
41
|
+
default:
|
|
42
|
+
return "user";
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
stringToChatMessage(message) {
|
|
46
|
+
return new ChatMessage(message, "assistant");
|
|
47
|
+
}
|
|
48
|
+
/** Call out to NIBittensorChatModel's complete endpoint.
|
|
49
|
+
Args:
|
|
50
|
+
messages: The messages to pass into the model.
|
|
51
|
+
|
|
52
|
+
Returns: The chat response by the model.
|
|
53
|
+
|
|
54
|
+
Example:
|
|
55
|
+
const chat = new NIBittensorChatModel();
|
|
56
|
+
const message = new HumanMessage('What is bittensor?');
|
|
57
|
+
const res = await chat.call([message]);
|
|
58
|
+
*/
|
|
59
|
+
async _generate(messages) {
|
|
60
|
+
const processed_messages = messages.map((message) => ({
|
|
61
|
+
role: this.messageToOpenAIRole(message),
|
|
62
|
+
content: message.content,
|
|
63
|
+
}));
|
|
64
|
+
const generations = [];
|
|
65
|
+
try {
|
|
66
|
+
// Retrieve API KEY
|
|
67
|
+
const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
|
|
68
|
+
if (!apiKeyResponse.ok) {
|
|
69
|
+
throw new Error("Network response was not ok");
|
|
70
|
+
}
|
|
71
|
+
const apiKeysData = await apiKeyResponse.json();
|
|
72
|
+
const apiKey = apiKeysData[0].api_key;
|
|
73
|
+
const headers = {
|
|
74
|
+
"Content-Type": "application/json",
|
|
75
|
+
Authorization: `Bearer ${apiKey}`,
|
|
76
|
+
"Endpoint-Version": "2023-05-19",
|
|
77
|
+
};
|
|
78
|
+
const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
|
|
79
|
+
if (!minerResponse.ok) {
|
|
80
|
+
throw new Error("Network response was not ok");
|
|
81
|
+
}
|
|
82
|
+
const uids = await minerResponse.json();
|
|
83
|
+
if (Array.isArray(uids) && uids.length) {
|
|
84
|
+
for (const uid of uids) {
|
|
85
|
+
try {
|
|
86
|
+
const payload = {
|
|
87
|
+
uids: [uid],
|
|
88
|
+
messages: [
|
|
89
|
+
{ role: "system", content: this.systemPrompt },
|
|
90
|
+
...processed_messages,
|
|
91
|
+
],
|
|
92
|
+
};
|
|
93
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
94
|
+
method: "POST",
|
|
95
|
+
headers,
|
|
96
|
+
body: JSON.stringify(payload),
|
|
97
|
+
});
|
|
98
|
+
if (!response.ok) {
|
|
99
|
+
throw new Error("Network response was not ok");
|
|
100
|
+
}
|
|
101
|
+
const chatData = await response.json();
|
|
102
|
+
if (chatData.choices) {
|
|
103
|
+
const generation = {
|
|
104
|
+
text: chatData.choices[0].message.content,
|
|
105
|
+
message: this.stringToChatMessage(chatData.choices[0].message.content),
|
|
106
|
+
};
|
|
107
|
+
generations.push(generation);
|
|
108
|
+
return { generations, llmOutput: {} };
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
catch (error) {
|
|
112
|
+
continue;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
catch (error) {
|
|
118
|
+
const generation = {
|
|
119
|
+
text: "Sorry I am unable to provide response now, Please try again later.",
|
|
120
|
+
message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
|
|
121
|
+
};
|
|
122
|
+
generations.push(generation);
|
|
123
|
+
return { generations, llmOutput: {} };
|
|
124
|
+
}
|
|
125
|
+
const generation = {
|
|
126
|
+
text: "Sorry I am unable to provide response now, Please try again later.",
|
|
127
|
+
message: this.stringToChatMessage("Sorry I am unable to provide response now, Please try again later."),
|
|
128
|
+
};
|
|
129
|
+
generations.push(generation);
|
|
130
|
+
return { generations, llmOutput: {} };
|
|
131
|
+
}
|
|
132
|
+
identifyingParams() {
|
|
133
|
+
return {
|
|
134
|
+
systemPrompt: this.systemPrompt,
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
}
|
|
@@ -14,7 +14,7 @@ exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
|
|
|
14
14
|
`At the end of your plan, say "<END_OF_PLAN>"`,
|
|
15
15
|
].join(" ");
|
|
16
16
|
exports.PLANNER_CHAT_PROMPT =
|
|
17
|
-
/* #__PURE__ */ chat_js_1.ChatPromptTemplate.
|
|
17
|
+
/* #__PURE__ */ chat_js_1.ChatPromptTemplate.fromMessages([
|
|
18
18
|
/* #__PURE__ */ chat_js_1.SystemMessagePromptTemplate.fromTemplate(exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
19
19
|
/* #__PURE__ */ chat_js_1.HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
20
20
|
]);
|
|
@@ -11,7 +11,7 @@ export const PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
|
|
|
11
11
|
`At the end of your plan, say "<END_OF_PLAN>"`,
|
|
12
12
|
].join(" ");
|
|
13
13
|
export const PLANNER_CHAT_PROMPT =
|
|
14
|
-
/* #__PURE__ */ ChatPromptTemplate.
|
|
14
|
+
/* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
15
15
|
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate(PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
16
16
|
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
17
17
|
]);
|
package/dist/llms/llama_cpp.cjs
CHANGED
|
@@ -99,6 +99,12 @@ class LlamaCpp extends base_js_1.LLM {
|
|
|
99
99
|
writable: true,
|
|
100
100
|
value: void 0
|
|
101
101
|
});
|
|
102
|
+
Object.defineProperty(this, "_session", {
|
|
103
|
+
enumerable: true,
|
|
104
|
+
configurable: true,
|
|
105
|
+
writable: true,
|
|
106
|
+
value: void 0
|
|
107
|
+
});
|
|
102
108
|
this.batchSize = inputs.batchSize;
|
|
103
109
|
this.contextSize = inputs.contextSize;
|
|
104
110
|
this.embedding = inputs.embedding;
|
|
@@ -113,19 +119,19 @@ class LlamaCpp extends base_js_1.LLM {
|
|
|
113
119
|
this.vocabOnly = inputs.vocabOnly;
|
|
114
120
|
this._model = new node_llama_cpp_1.LlamaModel(inputs);
|
|
115
121
|
this._context = new node_llama_cpp_1.LlamaContext({ model: this._model });
|
|
122
|
+
this._session = new node_llama_cpp_1.LlamaChatSession({ context: this._context });
|
|
116
123
|
}
|
|
117
124
|
_llmType() {
|
|
118
125
|
return "llama2_cpp";
|
|
119
126
|
}
|
|
120
127
|
/** @ignore */
|
|
121
128
|
async _call(prompt, options) {
|
|
122
|
-
const session = new node_llama_cpp_1.LlamaChatSession({ context: this._context });
|
|
123
129
|
try {
|
|
124
|
-
const
|
|
125
|
-
return
|
|
130
|
+
const completion = await this._session.prompt(prompt, options);
|
|
131
|
+
return completion;
|
|
126
132
|
}
|
|
127
133
|
catch (e) {
|
|
128
|
-
throw new Error("Error getting prompt
|
|
134
|
+
throw new Error("Error getting prompt completion.");
|
|
129
135
|
}
|
|
130
136
|
}
|
|
131
137
|
}
|
package/dist/llms/llama_cpp.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { LlamaModel, LlamaContext } from "node-llama-cpp";
|
|
1
|
+
import { LlamaModel, LlamaContext, LlamaChatSession } from "node-llama-cpp";
|
|
2
2
|
import { LLM, BaseLLMCallOptions, BaseLLMParams } from "./base.js";
|
|
3
3
|
/**
|
|
4
4
|
* Note that the modelPath is the only required parameter. For testing you
|
|
@@ -65,6 +65,7 @@ export declare class LlamaCpp extends LLM<LlamaCppCallOptions> {
|
|
|
65
65
|
modelPath: string;
|
|
66
66
|
_model: LlamaModel;
|
|
67
67
|
_context: LlamaContext;
|
|
68
|
+
_session: LlamaChatSession;
|
|
68
69
|
static lc_name(): string;
|
|
69
70
|
constructor(inputs: LlamaCppInputs);
|
|
70
71
|
_llmType(): string;
|
package/dist/llms/llama_cpp.js
CHANGED
|
@@ -96,6 +96,12 @@ export class LlamaCpp extends LLM {
|
|
|
96
96
|
writable: true,
|
|
97
97
|
value: void 0
|
|
98
98
|
});
|
|
99
|
+
Object.defineProperty(this, "_session", {
|
|
100
|
+
enumerable: true,
|
|
101
|
+
configurable: true,
|
|
102
|
+
writable: true,
|
|
103
|
+
value: void 0
|
|
104
|
+
});
|
|
99
105
|
this.batchSize = inputs.batchSize;
|
|
100
106
|
this.contextSize = inputs.contextSize;
|
|
101
107
|
this.embedding = inputs.embedding;
|
|
@@ -110,19 +116,19 @@ export class LlamaCpp extends LLM {
|
|
|
110
116
|
this.vocabOnly = inputs.vocabOnly;
|
|
111
117
|
this._model = new LlamaModel(inputs);
|
|
112
118
|
this._context = new LlamaContext({ model: this._model });
|
|
119
|
+
this._session = new LlamaChatSession({ context: this._context });
|
|
113
120
|
}
|
|
114
121
|
_llmType() {
|
|
115
122
|
return "llama2_cpp";
|
|
116
123
|
}
|
|
117
124
|
/** @ignore */
|
|
118
125
|
async _call(prompt, options) {
|
|
119
|
-
const session = new LlamaChatSession({ context: this._context });
|
|
120
126
|
try {
|
|
121
|
-
const
|
|
122
|
-
return
|
|
127
|
+
const completion = await this._session.prompt(prompt, options);
|
|
128
|
+
return completion;
|
|
123
129
|
}
|
|
124
130
|
catch (e) {
|
|
125
|
-
throw new Error("Error getting prompt
|
|
131
|
+
throw new Error("Error getting prompt completion.");
|
|
126
132
|
}
|
|
127
133
|
}
|
|
128
134
|
}
|
package/dist/llms/ollama.cjs
CHANGED
|
@@ -287,8 +287,8 @@ class Ollama extends base_js_1.LLM {
|
|
|
287
287
|
},
|
|
288
288
|
};
|
|
289
289
|
}
|
|
290
|
-
async *_streamResponseChunks(
|
|
291
|
-
const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt
|
|
290
|
+
async *_streamResponseChunks(prompt, options, runManager) {
|
|
291
|
+
const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
|
|
292
292
|
for await (const chunk of stream) {
|
|
293
293
|
yield new index_js_1.GenerationChunk({
|
|
294
294
|
text: chunk.response,
|
|
@@ -301,11 +301,10 @@ class Ollama extends base_js_1.LLM {
|
|
|
301
301
|
}
|
|
302
302
|
}
|
|
303
303
|
/** @ignore */
|
|
304
|
-
async _call(prompt, options) {
|
|
305
|
-
const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
|
|
304
|
+
async _call(prompt, options, runManager) {
|
|
306
305
|
const chunks = [];
|
|
307
|
-
for await (const chunk of
|
|
308
|
-
chunks.push(chunk.
|
|
306
|
+
for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
|
|
307
|
+
chunks.push(chunk.text);
|
|
309
308
|
}
|
|
310
309
|
return chunks.join("");
|
|
311
310
|
}
|
package/dist/llms/ollama.d.ts
CHANGED
|
@@ -79,7 +79,7 @@ export declare class Ollama extends LLM implements OllamaInput {
|
|
|
79
79
|
vocab_only: boolean | undefined;
|
|
80
80
|
};
|
|
81
81
|
};
|
|
82
|
-
_streamResponseChunks(
|
|
82
|
+
_streamResponseChunks(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
|
|
83
83
|
/** @ignore */
|
|
84
|
-
_call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
|
|
84
|
+
_call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
|
|
85
85
|
}
|
package/dist/llms/ollama.js
CHANGED
|
@@ -284,8 +284,8 @@ export class Ollama extends LLM {
|
|
|
284
284
|
},
|
|
285
285
|
};
|
|
286
286
|
}
|
|
287
|
-
async *_streamResponseChunks(
|
|
288
|
-
const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt
|
|
287
|
+
async *_streamResponseChunks(prompt, options, runManager) {
|
|
288
|
+
const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
|
|
289
289
|
for await (const chunk of stream) {
|
|
290
290
|
yield new GenerationChunk({
|
|
291
291
|
text: chunk.response,
|
|
@@ -298,11 +298,10 @@ export class Ollama extends LLM {
|
|
|
298
298
|
}
|
|
299
299
|
}
|
|
300
300
|
/** @ignore */
|
|
301
|
-
async _call(prompt, options) {
|
|
302
|
-
const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
|
|
301
|
+
async _call(prompt, options, runManager) {
|
|
303
302
|
const chunks = [];
|
|
304
|
-
for await (const chunk of
|
|
305
|
-
chunks.push(chunk.
|
|
303
|
+
for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
|
|
304
|
+
chunks.push(chunk.text);
|
|
306
305
|
}
|
|
307
306
|
return chunks.join("");
|
|
308
307
|
}
|
package/dist/llms/openai.cjs
CHANGED
|
@@ -56,9 +56,9 @@ class OpenAI extends base_js_1.BaseLLM {
|
|
|
56
56
|
constructor(fields,
|
|
57
57
|
/** @deprecated */
|
|
58
58
|
configuration) {
|
|
59
|
-
if (fields?.modelName?.startsWith("gpt-3.5-turbo") ||
|
|
60
|
-
fields?.modelName?.startsWith("gpt-4")
|
|
61
|
-
fields?.modelName?.
|
|
59
|
+
if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
|
|
60
|
+
fields?.modelName?.startsWith("gpt-4")) &&
|
|
61
|
+
!fields?.modelName?.includes("-instruct")) {
|
|
62
62
|
// eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
|
|
63
63
|
return new openai_chat_js_1.OpenAIChat(fields, configuration);
|
|
64
64
|
}
|
package/dist/llms/openai.js
CHANGED
|
@@ -53,9 +53,9 @@ export class OpenAI extends BaseLLM {
|
|
|
53
53
|
constructor(fields,
|
|
54
54
|
/** @deprecated */
|
|
55
55
|
configuration) {
|
|
56
|
-
if (fields?.modelName?.startsWith("gpt-3.5-turbo") ||
|
|
57
|
-
fields?.modelName?.startsWith("gpt-4")
|
|
58
|
-
fields?.modelName?.
|
|
56
|
+
if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
|
|
57
|
+
fields?.modelName?.startsWith("gpt-4")) &&
|
|
58
|
+
!fields?.modelName?.includes("-instruct")) {
|
|
59
59
|
// eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
|
|
60
60
|
return new OpenAIChat(fields, configuration);
|
|
61
61
|
}
|