langchain 0.0.168 → 0.0.169
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/chat_models/yandex.cjs +1 -0
- package/chat_models/yandex.d.ts +1 -0
- package/chat_models/yandex.js +1 -0
- package/dist/callbacks/handlers/llmonitor.cjs +21 -17
- package/dist/callbacks/handlers/llmonitor.js +21 -17
- package/dist/chat_models/cloudflare_workersai.cjs +7 -2
- package/dist/chat_models/cloudflare_workersai.d.ts +1 -1
- package/dist/chat_models/cloudflare_workersai.js +7 -2
- package/dist/chat_models/yandex.cjs +117 -0
- package/dist/chat_models/yandex.d.ts +16 -0
- package/dist/chat_models/yandex.js +113 -0
- package/dist/evaluation/comparison/prompt.d.ts +2 -2
- package/dist/experimental/chains/violation_of_expectations/index.cjs +5 -0
- package/dist/experimental/chains/violation_of_expectations/index.d.ts +1 -0
- package/dist/experimental/chains/violation_of_expectations/index.js +1 -0
- package/dist/experimental/chains/violation_of_expectations/types.cjs +49 -0
- package/dist/experimental/chains/violation_of_expectations/types.d.ts +69 -0
- package/dist/experimental/chains/violation_of_expectations/types.js +46 -0
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs +328 -0
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.d.ts +148 -0
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js +324 -0
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.cjs +49 -0
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.d.ts +5 -0
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.js +46 -0
- package/dist/llms/cloudflare_workersai.cjs +14 -7
- package/dist/llms/cloudflare_workersai.d.ts +1 -1
- package/dist/llms/cloudflare_workersai.js +14 -7
- package/dist/load/import_map.cjs +4 -2
- package/dist/load/import_map.d.ts +2 -0
- package/dist/load/import_map.js +2 -0
- package/dist/retrievers/zep.cjs +29 -3
- package/dist/retrievers/zep.d.ts +14 -0
- package/dist/retrievers/zep.js +29 -3
- package/dist/vectorstores/faiss.cjs +38 -6
- package/dist/vectorstores/faiss.d.ts +14 -2
- package/dist/vectorstores/faiss.js +38 -6
- package/dist/vectorstores/weaviate.cjs +13 -2
- package/dist/vectorstores/weaviate.js +13 -2
- package/experimental/chains/violation_of_expectations.cjs +1 -0
- package/experimental/chains/violation_of_expectations.d.ts +1 -0
- package/experimental/chains/violation_of_expectations.js +1 -0
- package/package.json +25 -9
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../dist/chat_models/yandex.cjs');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/chat_models/yandex.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/chat_models/yandex.js'
|
|
@@ -72,13 +72,15 @@ const parseInput = (rawInput) => {
|
|
|
72
72
|
const parseOutput = (rawOutput) => {
|
|
73
73
|
if (!rawOutput)
|
|
74
74
|
return null;
|
|
75
|
-
const { text, output, answer } = rawOutput;
|
|
75
|
+
const { text, output, answer, result } = rawOutput;
|
|
76
76
|
if (text)
|
|
77
77
|
return text;
|
|
78
78
|
if (answer)
|
|
79
79
|
return answer;
|
|
80
80
|
if (output)
|
|
81
81
|
return output;
|
|
82
|
+
if (result)
|
|
83
|
+
return result;
|
|
82
84
|
return rawOutput;
|
|
83
85
|
};
|
|
84
86
|
class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
|
|
@@ -111,15 +113,14 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
|
|
|
111
113
|
...(extraParams?.invocation_params || {}),
|
|
112
114
|
...(metadata || {}),
|
|
113
115
|
};
|
|
114
|
-
const
|
|
115
|
-
const
|
|
116
|
-
const userProps = params?.userProps || undefined;
|
|
116
|
+
const { model, model_name, modelName, userId, userProps, ...rest } = params;
|
|
117
|
+
const name = model || modelName || model_name || llm.id.at(-1);
|
|
117
118
|
await this.monitor.trackEvent("llm", "start", {
|
|
118
119
|
runId,
|
|
119
120
|
parentRunId,
|
|
120
121
|
name,
|
|
121
122
|
input: (0, exports.convertToLLMonitorMessages)(prompts),
|
|
122
|
-
extra:
|
|
123
|
+
extra: rest,
|
|
123
124
|
userId,
|
|
124
125
|
userProps,
|
|
125
126
|
tags,
|
|
@@ -131,15 +132,15 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
|
|
|
131
132
|
...(extraParams?.invocation_params || {}),
|
|
132
133
|
...(metadata || {}),
|
|
133
134
|
};
|
|
134
|
-
|
|
135
|
-
const userId = params
|
|
136
|
-
const
|
|
135
|
+
// Expand them so they're excluded from the "extra" field
|
|
136
|
+
const { model, model_name, modelName, userId, userProps, ...rest } = params;
|
|
137
|
+
const name = model || modelName || model_name || llm.id.at(-1);
|
|
137
138
|
await this.monitor.trackEvent("llm", "start", {
|
|
138
139
|
runId,
|
|
139
140
|
parentRunId,
|
|
140
141
|
name,
|
|
141
142
|
input: (0, exports.convertToLLMonitorMessages)(messages),
|
|
142
|
-
extra:
|
|
143
|
+
extra: rest,
|
|
143
144
|
userId,
|
|
144
145
|
userProps,
|
|
145
146
|
tags,
|
|
@@ -164,20 +165,19 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
|
|
|
164
165
|
});
|
|
165
166
|
}
|
|
166
167
|
async handleChainStart(chain, inputs, runId, parentRunId, tags, metadata) {
|
|
168
|
+
const { agentName, userId, userProps, ...rest } = metadata || {};
|
|
167
169
|
// allow the user to specify an agent name
|
|
168
|
-
const
|
|
169
|
-
const name = (metadata?.agentName ?? chainName);
|
|
170
|
+
const name = agentName || chain.id.at(-1);
|
|
170
171
|
// Attempt to automatically detect if this is an agent or chain
|
|
171
|
-
const runType =
|
|
172
|
-
["AgentExecutor", "PlanAndExecute"].includes(chainName)
|
|
172
|
+
const runType = agentName || ["AgentExecutor", "PlanAndExecute"].includes(name)
|
|
173
173
|
? "agent"
|
|
174
174
|
: "chain";
|
|
175
|
-
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
176
|
-
const { agentName, ...rest } = metadata || {};
|
|
177
175
|
await this.monitor.trackEvent(runType, "start", {
|
|
178
176
|
runId,
|
|
179
177
|
parentRunId,
|
|
180
178
|
name,
|
|
179
|
+
userId,
|
|
180
|
+
userProps,
|
|
181
181
|
input: parseInput(inputs),
|
|
182
182
|
extra: rest,
|
|
183
183
|
tags,
|
|
@@ -197,12 +197,16 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
|
|
|
197
197
|
});
|
|
198
198
|
}
|
|
199
199
|
async handleToolStart(tool, input, runId, parentRunId, tags, metadata) {
|
|
200
|
+
const { toolName, userId, userProps, ...rest } = metadata || {};
|
|
201
|
+
const name = toolName || tool.id.at(-1);
|
|
200
202
|
await this.monitor.trackEvent("tool", "start", {
|
|
201
203
|
runId,
|
|
202
204
|
parentRunId,
|
|
203
|
-
name
|
|
205
|
+
name,
|
|
206
|
+
userId,
|
|
207
|
+
userProps,
|
|
204
208
|
input,
|
|
205
|
-
extra:
|
|
209
|
+
extra: rest,
|
|
206
210
|
tags,
|
|
207
211
|
runtime: "langchain-js",
|
|
208
212
|
});
|
|
@@ -65,13 +65,15 @@ const parseInput = (rawInput) => {
|
|
|
65
65
|
const parseOutput = (rawOutput) => {
|
|
66
66
|
if (!rawOutput)
|
|
67
67
|
return null;
|
|
68
|
-
const { text, output, answer } = rawOutput;
|
|
68
|
+
const { text, output, answer, result } = rawOutput;
|
|
69
69
|
if (text)
|
|
70
70
|
return text;
|
|
71
71
|
if (answer)
|
|
72
72
|
return answer;
|
|
73
73
|
if (output)
|
|
74
74
|
return output;
|
|
75
|
+
if (result)
|
|
76
|
+
return result;
|
|
75
77
|
return rawOutput;
|
|
76
78
|
};
|
|
77
79
|
export class LLMonitorHandler extends BaseCallbackHandler {
|
|
@@ -104,15 +106,14 @@ export class LLMonitorHandler extends BaseCallbackHandler {
|
|
|
104
106
|
...(extraParams?.invocation_params || {}),
|
|
105
107
|
...(metadata || {}),
|
|
106
108
|
};
|
|
107
|
-
const
|
|
108
|
-
const
|
|
109
|
-
const userProps = params?.userProps || undefined;
|
|
109
|
+
const { model, model_name, modelName, userId, userProps, ...rest } = params;
|
|
110
|
+
const name = model || modelName || model_name || llm.id.at(-1);
|
|
110
111
|
await this.monitor.trackEvent("llm", "start", {
|
|
111
112
|
runId,
|
|
112
113
|
parentRunId,
|
|
113
114
|
name,
|
|
114
115
|
input: convertToLLMonitorMessages(prompts),
|
|
115
|
-
extra:
|
|
116
|
+
extra: rest,
|
|
116
117
|
userId,
|
|
117
118
|
userProps,
|
|
118
119
|
tags,
|
|
@@ -124,15 +125,15 @@ export class LLMonitorHandler extends BaseCallbackHandler {
|
|
|
124
125
|
...(extraParams?.invocation_params || {}),
|
|
125
126
|
...(metadata || {}),
|
|
126
127
|
};
|
|
127
|
-
|
|
128
|
-
const userId = params
|
|
129
|
-
const
|
|
128
|
+
// Expand them so they're excluded from the "extra" field
|
|
129
|
+
const { model, model_name, modelName, userId, userProps, ...rest } = params;
|
|
130
|
+
const name = model || modelName || model_name || llm.id.at(-1);
|
|
130
131
|
await this.monitor.trackEvent("llm", "start", {
|
|
131
132
|
runId,
|
|
132
133
|
parentRunId,
|
|
133
134
|
name,
|
|
134
135
|
input: convertToLLMonitorMessages(messages),
|
|
135
|
-
extra:
|
|
136
|
+
extra: rest,
|
|
136
137
|
userId,
|
|
137
138
|
userProps,
|
|
138
139
|
tags,
|
|
@@ -157,20 +158,19 @@ export class LLMonitorHandler extends BaseCallbackHandler {
|
|
|
157
158
|
});
|
|
158
159
|
}
|
|
159
160
|
async handleChainStart(chain, inputs, runId, parentRunId, tags, metadata) {
|
|
161
|
+
const { agentName, userId, userProps, ...rest } = metadata || {};
|
|
160
162
|
// allow the user to specify an agent name
|
|
161
|
-
const
|
|
162
|
-
const name = (metadata?.agentName ?? chainName);
|
|
163
|
+
const name = agentName || chain.id.at(-1);
|
|
163
164
|
// Attempt to automatically detect if this is an agent or chain
|
|
164
|
-
const runType =
|
|
165
|
-
["AgentExecutor", "PlanAndExecute"].includes(chainName)
|
|
165
|
+
const runType = agentName || ["AgentExecutor", "PlanAndExecute"].includes(name)
|
|
166
166
|
? "agent"
|
|
167
167
|
: "chain";
|
|
168
|
-
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
169
|
-
const { agentName, ...rest } = metadata || {};
|
|
170
168
|
await this.monitor.trackEvent(runType, "start", {
|
|
171
169
|
runId,
|
|
172
170
|
parentRunId,
|
|
173
171
|
name,
|
|
172
|
+
userId,
|
|
173
|
+
userProps,
|
|
174
174
|
input: parseInput(inputs),
|
|
175
175
|
extra: rest,
|
|
176
176
|
tags,
|
|
@@ -190,12 +190,16 @@ export class LLMonitorHandler extends BaseCallbackHandler {
|
|
|
190
190
|
});
|
|
191
191
|
}
|
|
192
192
|
async handleToolStart(tool, input, runId, parentRunId, tags, metadata) {
|
|
193
|
+
const { toolName, userId, userProps, ...rest } = metadata || {};
|
|
194
|
+
const name = toolName || tool.id.at(-1);
|
|
193
195
|
await this.monitor.trackEvent("tool", "start", {
|
|
194
196
|
runId,
|
|
195
197
|
parentRunId,
|
|
196
|
-
name
|
|
198
|
+
name,
|
|
199
|
+
userId,
|
|
200
|
+
userProps,
|
|
197
201
|
input,
|
|
198
|
-
extra:
|
|
202
|
+
extra: rest,
|
|
199
203
|
tags,
|
|
200
204
|
runtime: "langchain-js",
|
|
201
205
|
});
|
|
@@ -52,7 +52,12 @@ class ChatCloudflareWorkersAI extends base_js_1.SimpleChatModel {
|
|
|
52
52
|
this.cloudflareApiToken =
|
|
53
53
|
fields?.cloudflareApiToken ??
|
|
54
54
|
(0, env_js_1.getEnvironmentVariable)("CLOUDFLARE_API_TOKEN");
|
|
55
|
-
this.baseUrl =
|
|
55
|
+
this.baseUrl =
|
|
56
|
+
fields?.baseUrl ??
|
|
57
|
+
`https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run`;
|
|
58
|
+
if (this.baseUrl.endsWith("/")) {
|
|
59
|
+
this.baseUrl = this.baseUrl.slice(0, -1);
|
|
60
|
+
}
|
|
56
61
|
}
|
|
57
62
|
_llmType() {
|
|
58
63
|
return "cloudflare";
|
|
@@ -112,7 +117,7 @@ class ChatCloudflareWorkersAI extends base_js_1.SimpleChatModel {
|
|
|
112
117
|
/** @ignore */
|
|
113
118
|
async _call(messages, options) {
|
|
114
119
|
this.validateEnvironment();
|
|
115
|
-
const url =
|
|
120
|
+
const url = `${this.baseUrl}/${this.model}`;
|
|
116
121
|
const headers = {
|
|
117
122
|
Authorization: `Bearer ${this.cloudflareApiToken}`,
|
|
118
123
|
"Content-Type": "application/json",
|
|
@@ -19,7 +19,7 @@ export declare class ChatCloudflareWorkersAI extends SimpleChatModel implements
|
|
|
19
19
|
model: string;
|
|
20
20
|
cloudflareAccountId?: string;
|
|
21
21
|
cloudflareApiToken?: string;
|
|
22
|
-
baseUrl
|
|
22
|
+
baseUrl: string;
|
|
23
23
|
constructor(fields?: CloudflareWorkersAIInput & BaseChatModelParams);
|
|
24
24
|
_llmType(): string;
|
|
25
25
|
/** Get the identifying parameters for this LLM. */
|
|
@@ -49,7 +49,12 @@ export class ChatCloudflareWorkersAI extends SimpleChatModel {
|
|
|
49
49
|
this.cloudflareApiToken =
|
|
50
50
|
fields?.cloudflareApiToken ??
|
|
51
51
|
getEnvironmentVariable("CLOUDFLARE_API_TOKEN");
|
|
52
|
-
this.baseUrl =
|
|
52
|
+
this.baseUrl =
|
|
53
|
+
fields?.baseUrl ??
|
|
54
|
+
`https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run`;
|
|
55
|
+
if (this.baseUrl.endsWith("/")) {
|
|
56
|
+
this.baseUrl = this.baseUrl.slice(0, -1);
|
|
57
|
+
}
|
|
53
58
|
}
|
|
54
59
|
_llmType() {
|
|
55
60
|
return "cloudflare";
|
|
@@ -109,7 +114,7 @@ export class ChatCloudflareWorkersAI extends SimpleChatModel {
|
|
|
109
114
|
/** @ignore */
|
|
110
115
|
async _call(messages, options) {
|
|
111
116
|
this.validateEnvironment();
|
|
112
|
-
const url =
|
|
117
|
+
const url = `${this.baseUrl}/${this.model}`;
|
|
113
118
|
const headers = {
|
|
114
119
|
Authorization: `Bearer ${this.cloudflareApiToken}`,
|
|
115
120
|
"Content-Type": "application/json",
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ChatYandexGPT = void 0;
|
|
4
|
+
const index_js_1 = require("../schema/index.cjs");
|
|
5
|
+
const env_js_1 = require("../util/env.cjs");
|
|
6
|
+
const base_js_1 = require("./base.cjs");
|
|
7
|
+
const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/chat";
|
|
8
|
+
function _parseChatHistory(history) {
|
|
9
|
+
const chatHistory = [];
|
|
10
|
+
let instruction = "";
|
|
11
|
+
for (const message of history) {
|
|
12
|
+
if ("content" in message) {
|
|
13
|
+
if (message._getType() === "human") {
|
|
14
|
+
chatHistory.push({ role: "user", text: message.content });
|
|
15
|
+
}
|
|
16
|
+
else if (message._getType() === "ai") {
|
|
17
|
+
chatHistory.push({ role: "assistant", text: message.content });
|
|
18
|
+
}
|
|
19
|
+
else if (message._getType() === "system") {
|
|
20
|
+
instruction = message.content;
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
return [chatHistory, instruction];
|
|
25
|
+
}
|
|
26
|
+
class ChatYandexGPT extends base_js_1.BaseChatModel {
|
|
27
|
+
constructor(fields) {
|
|
28
|
+
super(fields ?? {});
|
|
29
|
+
Object.defineProperty(this, "apiKey", {
|
|
30
|
+
enumerable: true,
|
|
31
|
+
configurable: true,
|
|
32
|
+
writable: true,
|
|
33
|
+
value: void 0
|
|
34
|
+
});
|
|
35
|
+
Object.defineProperty(this, "iamToken", {
|
|
36
|
+
enumerable: true,
|
|
37
|
+
configurable: true,
|
|
38
|
+
writable: true,
|
|
39
|
+
value: void 0
|
|
40
|
+
});
|
|
41
|
+
Object.defineProperty(this, "temperature", {
|
|
42
|
+
enumerable: true,
|
|
43
|
+
configurable: true,
|
|
44
|
+
writable: true,
|
|
45
|
+
value: 0.6
|
|
46
|
+
});
|
|
47
|
+
Object.defineProperty(this, "maxTokens", {
|
|
48
|
+
enumerable: true,
|
|
49
|
+
configurable: true,
|
|
50
|
+
writable: true,
|
|
51
|
+
value: 1700
|
|
52
|
+
});
|
|
53
|
+
Object.defineProperty(this, "model", {
|
|
54
|
+
enumerable: true,
|
|
55
|
+
configurable: true,
|
|
56
|
+
writable: true,
|
|
57
|
+
value: "general"
|
|
58
|
+
});
|
|
59
|
+
const apiKey = fields?.apiKey ?? (0, env_js_1.getEnvironmentVariable)("YC_API_KEY");
|
|
60
|
+
const iamToken = fields?.iamToken ?? (0, env_js_1.getEnvironmentVariable)("YC_IAM_TOKEN");
|
|
61
|
+
if (apiKey === undefined && iamToken === undefined) {
|
|
62
|
+
throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
|
|
63
|
+
}
|
|
64
|
+
this.apiKey = apiKey;
|
|
65
|
+
this.iamToken = iamToken;
|
|
66
|
+
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
|
|
67
|
+
this.temperature = fields?.temperature ?? this.temperature;
|
|
68
|
+
this.model = fields?.model ?? this.model;
|
|
69
|
+
}
|
|
70
|
+
_llmType() {
|
|
71
|
+
return "yandexgpt";
|
|
72
|
+
}
|
|
73
|
+
_combineLLMOutput() {
|
|
74
|
+
return {};
|
|
75
|
+
}
|
|
76
|
+
/** @ignore */
|
|
77
|
+
async _generate(messages, options, _) {
|
|
78
|
+
const [messageHistory, instruction] = _parseChatHistory(messages);
|
|
79
|
+
const headers = { "Content-Type": "application/json", Authorization: "" };
|
|
80
|
+
if (this.apiKey !== undefined) {
|
|
81
|
+
headers.Authorization = `Api-Key ${this.apiKey}`;
|
|
82
|
+
}
|
|
83
|
+
else {
|
|
84
|
+
headers.Authorization = `Bearer ${this.iamToken}`;
|
|
85
|
+
}
|
|
86
|
+
const bodyData = {
|
|
87
|
+
model: this.model,
|
|
88
|
+
generationOptions: {
|
|
89
|
+
temperature: this.temperature,
|
|
90
|
+
maxTokens: this.maxTokens,
|
|
91
|
+
},
|
|
92
|
+
messages: messageHistory,
|
|
93
|
+
instructionText: instruction,
|
|
94
|
+
};
|
|
95
|
+
const response = await fetch(apiUrl, {
|
|
96
|
+
method: "POST",
|
|
97
|
+
headers,
|
|
98
|
+
body: JSON.stringify(bodyData),
|
|
99
|
+
signal: options?.signal,
|
|
100
|
+
});
|
|
101
|
+
if (!response.ok) {
|
|
102
|
+
throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
|
|
103
|
+
}
|
|
104
|
+
const responseData = await response.json();
|
|
105
|
+
const { result } = responseData;
|
|
106
|
+
const { text } = result.message;
|
|
107
|
+
const totalTokens = result.num_tokens;
|
|
108
|
+
const generations = [
|
|
109
|
+
{ text, message: new index_js_1.AIMessage(text) },
|
|
110
|
+
];
|
|
111
|
+
return {
|
|
112
|
+
generations,
|
|
113
|
+
llmOutput: { totalTokens },
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
exports.ChatYandexGPT = ChatYandexGPT;
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
2
|
+
import { YandexGPTInputs } from "../llms/yandex.js";
|
|
3
|
+
import { BaseMessage, ChatResult } from "../schema/index.js";
|
|
4
|
+
import { BaseChatModel } from "./base.js";
|
|
5
|
+
export declare class ChatYandexGPT extends BaseChatModel {
|
|
6
|
+
apiKey?: string;
|
|
7
|
+
iamToken?: string;
|
|
8
|
+
temperature: number;
|
|
9
|
+
maxTokens: number;
|
|
10
|
+
model: string;
|
|
11
|
+
constructor(fields?: YandexGPTInputs);
|
|
12
|
+
_llmType(): string;
|
|
13
|
+
_combineLLMOutput?(): {};
|
|
14
|
+
/** @ignore */
|
|
15
|
+
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], _?: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
|
|
16
|
+
}
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import { AIMessage, } from "../schema/index.js";
|
|
2
|
+
import { getEnvironmentVariable } from "../util/env.js";
|
|
3
|
+
import { BaseChatModel } from "./base.js";
|
|
4
|
+
const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/chat";
|
|
5
|
+
function _parseChatHistory(history) {
|
|
6
|
+
const chatHistory = [];
|
|
7
|
+
let instruction = "";
|
|
8
|
+
for (const message of history) {
|
|
9
|
+
if ("content" in message) {
|
|
10
|
+
if (message._getType() === "human") {
|
|
11
|
+
chatHistory.push({ role: "user", text: message.content });
|
|
12
|
+
}
|
|
13
|
+
else if (message._getType() === "ai") {
|
|
14
|
+
chatHistory.push({ role: "assistant", text: message.content });
|
|
15
|
+
}
|
|
16
|
+
else if (message._getType() === "system") {
|
|
17
|
+
instruction = message.content;
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
return [chatHistory, instruction];
|
|
22
|
+
}
|
|
23
|
+
export class ChatYandexGPT extends BaseChatModel {
|
|
24
|
+
constructor(fields) {
|
|
25
|
+
super(fields ?? {});
|
|
26
|
+
Object.defineProperty(this, "apiKey", {
|
|
27
|
+
enumerable: true,
|
|
28
|
+
configurable: true,
|
|
29
|
+
writable: true,
|
|
30
|
+
value: void 0
|
|
31
|
+
});
|
|
32
|
+
Object.defineProperty(this, "iamToken", {
|
|
33
|
+
enumerable: true,
|
|
34
|
+
configurable: true,
|
|
35
|
+
writable: true,
|
|
36
|
+
value: void 0
|
|
37
|
+
});
|
|
38
|
+
Object.defineProperty(this, "temperature", {
|
|
39
|
+
enumerable: true,
|
|
40
|
+
configurable: true,
|
|
41
|
+
writable: true,
|
|
42
|
+
value: 0.6
|
|
43
|
+
});
|
|
44
|
+
Object.defineProperty(this, "maxTokens", {
|
|
45
|
+
enumerable: true,
|
|
46
|
+
configurable: true,
|
|
47
|
+
writable: true,
|
|
48
|
+
value: 1700
|
|
49
|
+
});
|
|
50
|
+
Object.defineProperty(this, "model", {
|
|
51
|
+
enumerable: true,
|
|
52
|
+
configurable: true,
|
|
53
|
+
writable: true,
|
|
54
|
+
value: "general"
|
|
55
|
+
});
|
|
56
|
+
const apiKey = fields?.apiKey ?? getEnvironmentVariable("YC_API_KEY");
|
|
57
|
+
const iamToken = fields?.iamToken ?? getEnvironmentVariable("YC_IAM_TOKEN");
|
|
58
|
+
if (apiKey === undefined && iamToken === undefined) {
|
|
59
|
+
throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
|
|
60
|
+
}
|
|
61
|
+
this.apiKey = apiKey;
|
|
62
|
+
this.iamToken = iamToken;
|
|
63
|
+
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
|
|
64
|
+
this.temperature = fields?.temperature ?? this.temperature;
|
|
65
|
+
this.model = fields?.model ?? this.model;
|
|
66
|
+
}
|
|
67
|
+
_llmType() {
|
|
68
|
+
return "yandexgpt";
|
|
69
|
+
}
|
|
70
|
+
_combineLLMOutput() {
|
|
71
|
+
return {};
|
|
72
|
+
}
|
|
73
|
+
/** @ignore */
|
|
74
|
+
async _generate(messages, options, _) {
|
|
75
|
+
const [messageHistory, instruction] = _parseChatHistory(messages);
|
|
76
|
+
const headers = { "Content-Type": "application/json", Authorization: "" };
|
|
77
|
+
if (this.apiKey !== undefined) {
|
|
78
|
+
headers.Authorization = `Api-Key ${this.apiKey}`;
|
|
79
|
+
}
|
|
80
|
+
else {
|
|
81
|
+
headers.Authorization = `Bearer ${this.iamToken}`;
|
|
82
|
+
}
|
|
83
|
+
const bodyData = {
|
|
84
|
+
model: this.model,
|
|
85
|
+
generationOptions: {
|
|
86
|
+
temperature: this.temperature,
|
|
87
|
+
maxTokens: this.maxTokens,
|
|
88
|
+
},
|
|
89
|
+
messages: messageHistory,
|
|
90
|
+
instructionText: instruction,
|
|
91
|
+
};
|
|
92
|
+
const response = await fetch(apiUrl, {
|
|
93
|
+
method: "POST",
|
|
94
|
+
headers,
|
|
95
|
+
body: JSON.stringify(bodyData),
|
|
96
|
+
signal: options?.signal,
|
|
97
|
+
});
|
|
98
|
+
if (!response.ok) {
|
|
99
|
+
throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
|
|
100
|
+
}
|
|
101
|
+
const responseData = await response.json();
|
|
102
|
+
const { result } = responseData;
|
|
103
|
+
const { text } = result.message;
|
|
104
|
+
const totalTokens = result.num_tokens;
|
|
105
|
+
const generations = [
|
|
106
|
+
{ text, message: new AIMessage(text) },
|
|
107
|
+
];
|
|
108
|
+
return {
|
|
109
|
+
generations,
|
|
110
|
+
llmOutput: { totalTokens },
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
}
|
|
@@ -8,14 +8,14 @@
|
|
|
8
8
|
import { PromptTemplate } from "../../prompts/index.js";
|
|
9
9
|
export declare const PROMPT: PromptTemplate<{
|
|
10
10
|
input: any;
|
|
11
|
-
criteria: any;
|
|
12
11
|
prediction: any;
|
|
12
|
+
criteria: any;
|
|
13
13
|
predictionB: any;
|
|
14
14
|
}, any>;
|
|
15
15
|
export declare const PROMPT_WITH_REFERENCES: PromptTemplate<{
|
|
16
16
|
input: any;
|
|
17
|
+
prediction: any;
|
|
17
18
|
criteria: any;
|
|
18
19
|
reference: any;
|
|
19
|
-
prediction: any;
|
|
20
20
|
predictionB: any;
|
|
21
21
|
}, any>;
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ViolationOfExpectationsChain = void 0;
|
|
4
|
+
var violation_of_expectations_chain_js_1 = require("./violation_of_expectations_chain.cjs");
|
|
5
|
+
Object.defineProperty(exports, "ViolationOfExpectationsChain", { enumerable: true, get: function () { return violation_of_expectations_chain_js_1.ViolationOfExpectationsChain; } });
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { type ViolationOfExpectationsChainInput, ViolationOfExpectationsChain, } from "./violation_of_expectations_chain.js";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { ViolationOfExpectationsChain, } from "./violation_of_expectations_chain.js";
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.PREDICTION_VIOLATIONS_FUNCTION = exports.PREDICT_NEXT_USER_MESSAGE_FUNCTION = void 0;
|
|
4
|
+
exports.PREDICT_NEXT_USER_MESSAGE_FUNCTION = {
|
|
5
|
+
name: "predictNextUserMessage",
|
|
6
|
+
description: "Predicts the next user message, along with insights.",
|
|
7
|
+
parameters: {
|
|
8
|
+
type: "object",
|
|
9
|
+
properties: {
|
|
10
|
+
userState: {
|
|
11
|
+
type: "string",
|
|
12
|
+
description: "Concise reasoning about the users internal mental state.",
|
|
13
|
+
},
|
|
14
|
+
predictedUserMessage: {
|
|
15
|
+
type: "string",
|
|
16
|
+
description: "Your prediction on how they will respond to the AI's most recent message.",
|
|
17
|
+
},
|
|
18
|
+
insights: {
|
|
19
|
+
type: "array",
|
|
20
|
+
items: {
|
|
21
|
+
type: "string",
|
|
22
|
+
},
|
|
23
|
+
description: "A concise list of any additional insights that would be useful to improve prediction.",
|
|
24
|
+
},
|
|
25
|
+
},
|
|
26
|
+
required: ["userState", "predictedUserMessage", "insights"],
|
|
27
|
+
},
|
|
28
|
+
};
|
|
29
|
+
exports.PREDICTION_VIOLATIONS_FUNCTION = {
|
|
30
|
+
name: "predictionViolations",
|
|
31
|
+
description: "Generates violations, errors and differences between the predicted user response, and the actual response.",
|
|
32
|
+
parameters: {
|
|
33
|
+
type: "object",
|
|
34
|
+
properties: {
|
|
35
|
+
violationExplanation: {
|
|
36
|
+
type: "string",
|
|
37
|
+
description: "How was the predication violated?",
|
|
38
|
+
},
|
|
39
|
+
explainedPredictionErrors: {
|
|
40
|
+
type: "array",
|
|
41
|
+
items: {
|
|
42
|
+
type: "string",
|
|
43
|
+
},
|
|
44
|
+
description: "Explanations of how the prediction was violated and why",
|
|
45
|
+
},
|
|
46
|
+
},
|
|
47
|
+
required: ["violationExplanation", "explainedPredictionErrors"],
|
|
48
|
+
},
|
|
49
|
+
};
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import { BaseMessage, HumanMessage } from "../../../schema/index.js";
|
|
2
|
+
/**
|
|
3
|
+
* Contains the chunk of messages, along with the
|
|
4
|
+
* users response, which is the next message after the chunk.
|
|
5
|
+
*/
|
|
6
|
+
export type MessageChunkResult = {
|
|
7
|
+
chunkedMessages: BaseMessage[];
|
|
8
|
+
/**
|
|
9
|
+
* User response can be undefined if the last message in
|
|
10
|
+
* the chat history was from the AI.
|
|
11
|
+
*/
|
|
12
|
+
userResponse?: HumanMessage;
|
|
13
|
+
};
|
|
14
|
+
export type PredictNextUserMessageResponse = {
|
|
15
|
+
userState: string;
|
|
16
|
+
predictedUserMessage: string;
|
|
17
|
+
insights: Array<string>;
|
|
18
|
+
};
|
|
19
|
+
export type GetPredictionViolationsResponse = {
|
|
20
|
+
userResponse?: HumanMessage;
|
|
21
|
+
revisedPrediction: string;
|
|
22
|
+
explainedPredictionErrors: Array<string>;
|
|
23
|
+
};
|
|
24
|
+
export declare const PREDICT_NEXT_USER_MESSAGE_FUNCTION: {
|
|
25
|
+
name: string;
|
|
26
|
+
description: string;
|
|
27
|
+
parameters: {
|
|
28
|
+
type: string;
|
|
29
|
+
properties: {
|
|
30
|
+
userState: {
|
|
31
|
+
type: string;
|
|
32
|
+
description: string;
|
|
33
|
+
};
|
|
34
|
+
predictedUserMessage: {
|
|
35
|
+
type: string;
|
|
36
|
+
description: string;
|
|
37
|
+
};
|
|
38
|
+
insights: {
|
|
39
|
+
type: string;
|
|
40
|
+
items: {
|
|
41
|
+
type: string;
|
|
42
|
+
};
|
|
43
|
+
description: string;
|
|
44
|
+
};
|
|
45
|
+
};
|
|
46
|
+
required: string[];
|
|
47
|
+
};
|
|
48
|
+
};
|
|
49
|
+
export declare const PREDICTION_VIOLATIONS_FUNCTION: {
|
|
50
|
+
name: string;
|
|
51
|
+
description: string;
|
|
52
|
+
parameters: {
|
|
53
|
+
type: string;
|
|
54
|
+
properties: {
|
|
55
|
+
violationExplanation: {
|
|
56
|
+
type: string;
|
|
57
|
+
description: string;
|
|
58
|
+
};
|
|
59
|
+
explainedPredictionErrors: {
|
|
60
|
+
type: string;
|
|
61
|
+
items: {
|
|
62
|
+
type: string;
|
|
63
|
+
};
|
|
64
|
+
description: string;
|
|
65
|
+
};
|
|
66
|
+
};
|
|
67
|
+
required: string[];
|
|
68
|
+
};
|
|
69
|
+
};
|