langchain 0.0.197-rc.1 → 0.0.198
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chains/openai_moderation.cjs +2 -2
- package/dist/chains/openai_moderation.d.ts +1 -1
- package/dist/chains/openai_moderation.js +1 -1
- package/dist/chat_models/anthropic.cjs +351 -15
- package/dist/chat_models/anthropic.d.ts +157 -1
- package/dist/chat_models/anthropic.js +348 -1
- package/dist/chat_models/cloudflare_workersai.cjs +5 -0
- package/dist/chat_models/cloudflare_workersai.d.ts +3 -0
- package/dist/chat_models/cloudflare_workersai.js +5 -0
- package/dist/chat_models/fireworks.d.ts +1 -1
- package/dist/chat_models/iflytek_xinghuo/common.d.ts +1 -1
- package/dist/chat_models/llama_cpp.cjs +24 -0
- package/dist/chat_models/llama_cpp.d.ts +3 -1
- package/dist/chat_models/llama_cpp.js +24 -0
- package/dist/chat_models/minimax.d.ts +1 -1
- package/dist/chat_models/openai.cjs +698 -4
- package/dist/chat_models/openai.d.ts +137 -4
- package/dist/chat_models/openai.js +695 -2
- package/dist/document_loaders/fs/openai_whisper_audio.cjs +2 -2
- package/dist/document_loaders/fs/openai_whisper_audio.d.ts +1 -1
- package/dist/document_loaders/fs/openai_whisper_audio.js +1 -1
- package/dist/document_loaders/fs/pptx.cjs +39 -0
- package/dist/document_loaders/fs/pptx.d.ts +23 -0
- package/dist/document_loaders/fs/pptx.js +35 -0
- package/dist/embeddings/openai.cjs +240 -2
- package/dist/embeddings/openai.d.ts +82 -1
- package/dist/embeddings/openai.js +239 -1
- package/dist/experimental/openai_assistant/index.cjs +35 -3
- package/dist/experimental/openai_assistant/index.d.ts +27 -1
- package/dist/experimental/openai_assistant/index.js +33 -1
- package/dist/experimental/openai_assistant/schema.d.ts +1 -1
- package/dist/experimental/openai_files/index.cjs +2 -2
- package/dist/experimental/openai_files/index.d.ts +1 -1
- package/dist/experimental/openai_files/index.js +1 -1
- package/dist/experimental/tools/pyinterpreter.cjs +248 -0
- package/dist/experimental/tools/pyinterpreter.d.ts +18 -0
- package/dist/experimental/tools/pyinterpreter.js +244 -0
- package/dist/graphs/neo4j_graph.cjs +49 -14
- package/dist/graphs/neo4j_graph.d.ts +30 -0
- package/dist/graphs/neo4j_graph.js +49 -14
- package/dist/llms/fireworks.d.ts +1 -1
- package/dist/llms/hf.cjs +13 -2
- package/dist/llms/hf.d.ts +5 -0
- package/dist/llms/hf.js +13 -2
- package/dist/llms/llama_cpp.cjs +17 -3
- package/dist/llms/llama_cpp.d.ts +4 -1
- package/dist/llms/llama_cpp.js +17 -3
- package/dist/llms/openai-chat.cjs +445 -3
- package/dist/llms/openai-chat.d.ts +123 -4
- package/dist/llms/openai-chat.js +443 -2
- package/dist/llms/openai.cjs +530 -6
- package/dist/llms/openai.d.ts +123 -4
- package/dist/llms/openai.js +525 -2
- package/dist/load/import_constants.cjs +3 -0
- package/dist/load/import_constants.js +3 -0
- package/dist/output_parsers/json.cjs +4 -0
- package/dist/output_parsers/json.js +4 -0
- package/dist/schema/index.d.ts +1 -1
- package/dist/tools/convert_to_openai.cjs +38 -4
- package/dist/tools/convert_to_openai.d.ts +11 -1
- package/dist/tools/convert_to_openai.js +35 -1
- package/dist/types/openai-types.d.ts +133 -1
- package/dist/util/env.cjs +9 -70
- package/dist/util/env.d.ts +1 -21
- package/dist/util/env.js +1 -62
- package/dist/util/openai-format-fndef.cjs +81 -0
- package/dist/util/openai-format-fndef.d.ts +44 -0
- package/dist/util/openai-format-fndef.js +77 -0
- package/dist/util/openai.cjs +18 -2
- package/dist/util/openai.d.ts +1 -1
- package/dist/util/openai.js +17 -1
- package/dist/util/openapi.d.ts +2 -2
- package/dist/util/prompt-layer.d.ts +1 -1
- package/dist/vectorstores/clickhouse.cjs +286 -0
- package/dist/vectorstores/clickhouse.d.ts +126 -0
- package/dist/vectorstores/clickhouse.js +259 -0
- package/dist/vectorstores/pgvector.cjs +142 -18
- package/dist/vectorstores/pgvector.d.ts +21 -0
- package/dist/vectorstores/pgvector.js +142 -18
- package/dist/vectorstores/weaviate.cjs +45 -2
- package/dist/vectorstores/weaviate.d.ts +27 -1
- package/dist/vectorstores/weaviate.js +45 -2
- package/document_loaders/fs/pptx.cjs +1 -0
- package/document_loaders/fs/pptx.d.ts +1 -0
- package/document_loaders/fs/pptx.js +1 -0
- package/experimental/tools/pyinterpreter.cjs +1 -0
- package/experimental/tools/pyinterpreter.d.ts +1 -0
- package/experimental/tools/pyinterpreter.js +1 -0
- package/package.json +41 -9
- package/vectorstores/clickhouse.cjs +1 -0
- package/vectorstores/clickhouse.d.ts +1 -0
- package/vectorstores/clickhouse.js +1 -0
|
@@ -1 +1,348 @@
|
|
|
1
|
-
|
|
1
|
+
import { Anthropic, AI_PROMPT, HUMAN_PROMPT, } from "@anthropic-ai/sdk";
|
|
2
|
+
import { AIMessage, AIMessageChunk, ChatMessage, } from "@langchain/core/messages";
|
|
3
|
+
import { ChatGenerationChunk, } from "@langchain/core/outputs";
|
|
4
|
+
import { getEnvironmentVariable } from "@langchain/core/utils/env";
|
|
5
|
+
import { BaseChatModel, } from "@langchain/core/language_models/chat_models";
|
|
6
|
+
export { AI_PROMPT, HUMAN_PROMPT };
|
|
7
|
+
/**
|
|
8
|
+
* Extracts the custom role of a generic chat message.
|
|
9
|
+
* @param message The chat message from which to extract the custom role.
|
|
10
|
+
* @returns The custom role of the chat message.
|
|
11
|
+
*/
|
|
12
|
+
function extractGenericMessageCustomRole(message) {
|
|
13
|
+
if (message.role !== AI_PROMPT &&
|
|
14
|
+
message.role !== HUMAN_PROMPT &&
|
|
15
|
+
message.role !== "") {
|
|
16
|
+
console.warn(`Unknown message role: ${message.role}`);
|
|
17
|
+
}
|
|
18
|
+
return message.role;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Gets the Anthropic prompt from a base message.
|
|
22
|
+
* @param message The base message from which to get the Anthropic prompt.
|
|
23
|
+
* @returns The Anthropic prompt from the base message.
|
|
24
|
+
*/
|
|
25
|
+
function getAnthropicPromptFromMessage(message) {
|
|
26
|
+
const type = message._getType();
|
|
27
|
+
switch (type) {
|
|
28
|
+
case "ai":
|
|
29
|
+
return AI_PROMPT;
|
|
30
|
+
case "human":
|
|
31
|
+
return HUMAN_PROMPT;
|
|
32
|
+
case "system":
|
|
33
|
+
return "";
|
|
34
|
+
case "generic": {
|
|
35
|
+
if (!ChatMessage.isInstance(message))
|
|
36
|
+
throw new Error("Invalid generic chat message");
|
|
37
|
+
return extractGenericMessageCustomRole(message);
|
|
38
|
+
}
|
|
39
|
+
default:
|
|
40
|
+
throw new Error(`Unknown message type: ${type}`);
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
export const DEFAULT_STOP_SEQUENCES = [HUMAN_PROMPT];
|
|
44
|
+
/**
|
|
45
|
+
* Wrapper around Anthropic large language models.
|
|
46
|
+
*
|
|
47
|
+
* To use you should have the `@anthropic-ai/sdk` package installed, with the
|
|
48
|
+
* `ANTHROPIC_API_KEY` environment variable set.
|
|
49
|
+
*
|
|
50
|
+
* @remarks
|
|
51
|
+
* Any parameters that are valid to be passed to {@link
|
|
52
|
+
* https://console.anthropic.com/docs/api/reference |
|
|
53
|
+
* `anthropic.complete`} can be passed through {@link invocationKwargs},
|
|
54
|
+
* even if not explicitly available on this class.
|
|
55
|
+
* @example
|
|
56
|
+
* ```typescript
|
|
57
|
+
* const model = new ChatAnthropic({
|
|
58
|
+
* temperature: 0.9,
|
|
59
|
+
* anthropicApiKey: 'YOUR-API-KEY',
|
|
60
|
+
* });
|
|
61
|
+
* const res = await model.invoke({ input: 'Hello!' });
|
|
62
|
+
* console.log(res);
|
|
63
|
+
* ```
|
|
64
|
+
*/
|
|
65
|
+
export class ChatAnthropic extends BaseChatModel {
|
|
66
|
+
static lc_name() {
|
|
67
|
+
return "ChatAnthropic";
|
|
68
|
+
}
|
|
69
|
+
get lc_secrets() {
|
|
70
|
+
return {
|
|
71
|
+
anthropicApiKey: "ANTHROPIC_API_KEY",
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
get lc_aliases() {
|
|
75
|
+
return {
|
|
76
|
+
modelName: "model",
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
constructor(fields) {
|
|
80
|
+
super(fields ?? {});
|
|
81
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
82
|
+
enumerable: true,
|
|
83
|
+
configurable: true,
|
|
84
|
+
writable: true,
|
|
85
|
+
value: true
|
|
86
|
+
});
|
|
87
|
+
Object.defineProperty(this, "anthropicApiKey", {
|
|
88
|
+
enumerable: true,
|
|
89
|
+
configurable: true,
|
|
90
|
+
writable: true,
|
|
91
|
+
value: void 0
|
|
92
|
+
});
|
|
93
|
+
Object.defineProperty(this, "apiUrl", {
|
|
94
|
+
enumerable: true,
|
|
95
|
+
configurable: true,
|
|
96
|
+
writable: true,
|
|
97
|
+
value: void 0
|
|
98
|
+
});
|
|
99
|
+
Object.defineProperty(this, "temperature", {
|
|
100
|
+
enumerable: true,
|
|
101
|
+
configurable: true,
|
|
102
|
+
writable: true,
|
|
103
|
+
value: 1
|
|
104
|
+
});
|
|
105
|
+
Object.defineProperty(this, "topK", {
|
|
106
|
+
enumerable: true,
|
|
107
|
+
configurable: true,
|
|
108
|
+
writable: true,
|
|
109
|
+
value: -1
|
|
110
|
+
});
|
|
111
|
+
Object.defineProperty(this, "topP", {
|
|
112
|
+
enumerable: true,
|
|
113
|
+
configurable: true,
|
|
114
|
+
writable: true,
|
|
115
|
+
value: -1
|
|
116
|
+
});
|
|
117
|
+
Object.defineProperty(this, "maxTokensToSample", {
|
|
118
|
+
enumerable: true,
|
|
119
|
+
configurable: true,
|
|
120
|
+
writable: true,
|
|
121
|
+
value: 2048
|
|
122
|
+
});
|
|
123
|
+
Object.defineProperty(this, "modelName", {
|
|
124
|
+
enumerable: true,
|
|
125
|
+
configurable: true,
|
|
126
|
+
writable: true,
|
|
127
|
+
value: "claude-2"
|
|
128
|
+
});
|
|
129
|
+
Object.defineProperty(this, "invocationKwargs", {
|
|
130
|
+
enumerable: true,
|
|
131
|
+
configurable: true,
|
|
132
|
+
writable: true,
|
|
133
|
+
value: void 0
|
|
134
|
+
});
|
|
135
|
+
Object.defineProperty(this, "stopSequences", {
|
|
136
|
+
enumerable: true,
|
|
137
|
+
configurable: true,
|
|
138
|
+
writable: true,
|
|
139
|
+
value: void 0
|
|
140
|
+
});
|
|
141
|
+
Object.defineProperty(this, "streaming", {
|
|
142
|
+
enumerable: true,
|
|
143
|
+
configurable: true,
|
|
144
|
+
writable: true,
|
|
145
|
+
value: false
|
|
146
|
+
});
|
|
147
|
+
Object.defineProperty(this, "clientOptions", {
|
|
148
|
+
enumerable: true,
|
|
149
|
+
configurable: true,
|
|
150
|
+
writable: true,
|
|
151
|
+
value: void 0
|
|
152
|
+
});
|
|
153
|
+
// Used for non-streaming requests
|
|
154
|
+
Object.defineProperty(this, "batchClient", {
|
|
155
|
+
enumerable: true,
|
|
156
|
+
configurable: true,
|
|
157
|
+
writable: true,
|
|
158
|
+
value: void 0
|
|
159
|
+
});
|
|
160
|
+
// Used for streaming requests
|
|
161
|
+
Object.defineProperty(this, "streamingClient", {
|
|
162
|
+
enumerable: true,
|
|
163
|
+
configurable: true,
|
|
164
|
+
writable: true,
|
|
165
|
+
value: void 0
|
|
166
|
+
});
|
|
167
|
+
this.anthropicApiKey =
|
|
168
|
+
fields?.anthropicApiKey ?? getEnvironmentVariable("ANTHROPIC_API_KEY");
|
|
169
|
+
if (!this.anthropicApiKey) {
|
|
170
|
+
throw new Error("Anthropic API key not found");
|
|
171
|
+
}
|
|
172
|
+
// Support overriding the default API URL (i.e., https://api.anthropic.com)
|
|
173
|
+
this.apiUrl = fields?.anthropicApiUrl;
|
|
174
|
+
this.modelName = fields?.modelName ?? this.modelName;
|
|
175
|
+
this.invocationKwargs = fields?.invocationKwargs ?? {};
|
|
176
|
+
this.temperature = fields?.temperature ?? this.temperature;
|
|
177
|
+
this.topK = fields?.topK ?? this.topK;
|
|
178
|
+
this.topP = fields?.topP ?? this.topP;
|
|
179
|
+
this.maxTokensToSample =
|
|
180
|
+
fields?.maxTokensToSample ?? this.maxTokensToSample;
|
|
181
|
+
this.stopSequences = fields?.stopSequences ?? this.stopSequences;
|
|
182
|
+
this.streaming = fields?.streaming ?? false;
|
|
183
|
+
this.clientOptions = fields?.clientOptions ?? {};
|
|
184
|
+
}
|
|
185
|
+
/**
|
|
186
|
+
* Get the parameters used to invoke the model
|
|
187
|
+
*/
|
|
188
|
+
invocationParams(options) {
|
|
189
|
+
return {
|
|
190
|
+
model: this.modelName,
|
|
191
|
+
temperature: this.temperature,
|
|
192
|
+
top_k: this.topK,
|
|
193
|
+
top_p: this.topP,
|
|
194
|
+
stop_sequences: options?.stop?.concat(DEFAULT_STOP_SEQUENCES) ??
|
|
195
|
+
this.stopSequences ??
|
|
196
|
+
DEFAULT_STOP_SEQUENCES,
|
|
197
|
+
max_tokens_to_sample: this.maxTokensToSample,
|
|
198
|
+
stream: this.streaming,
|
|
199
|
+
...this.invocationKwargs,
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
/** @ignore */
|
|
203
|
+
_identifyingParams() {
|
|
204
|
+
return {
|
|
205
|
+
model_name: this.modelName,
|
|
206
|
+
...this.invocationParams(),
|
|
207
|
+
};
|
|
208
|
+
}
|
|
209
|
+
/**
|
|
210
|
+
* Get the identifying parameters for the model
|
|
211
|
+
*/
|
|
212
|
+
identifyingParams() {
|
|
213
|
+
return {
|
|
214
|
+
model_name: this.modelName,
|
|
215
|
+
...this.invocationParams(),
|
|
216
|
+
};
|
|
217
|
+
}
|
|
218
|
+
async *_streamResponseChunks(messages, options, runManager) {
|
|
219
|
+
const params = this.invocationParams(options);
|
|
220
|
+
const stream = await this.createStreamWithRetry({
|
|
221
|
+
...params,
|
|
222
|
+
prompt: this.formatMessagesAsPrompt(messages),
|
|
223
|
+
});
|
|
224
|
+
let modelSent = false;
|
|
225
|
+
let stopReasonSent = false;
|
|
226
|
+
for await (const data of stream) {
|
|
227
|
+
if (options.signal?.aborted) {
|
|
228
|
+
stream.controller.abort();
|
|
229
|
+
throw new Error("AbortError: User aborted the request.");
|
|
230
|
+
}
|
|
231
|
+
const additional_kwargs = {};
|
|
232
|
+
if (data.model && !modelSent) {
|
|
233
|
+
additional_kwargs.model = data.model;
|
|
234
|
+
modelSent = true;
|
|
235
|
+
}
|
|
236
|
+
else if (data.stop_reason && !stopReasonSent) {
|
|
237
|
+
additional_kwargs.stop_reason = data.stop_reason;
|
|
238
|
+
stopReasonSent = true;
|
|
239
|
+
}
|
|
240
|
+
const delta = data.completion ?? "";
|
|
241
|
+
yield new ChatGenerationChunk({
|
|
242
|
+
message: new AIMessageChunk({
|
|
243
|
+
content: delta,
|
|
244
|
+
additional_kwargs,
|
|
245
|
+
}),
|
|
246
|
+
text: delta,
|
|
247
|
+
});
|
|
248
|
+
await runManager?.handleLLMNewToken(delta);
|
|
249
|
+
if (data.stop_reason) {
|
|
250
|
+
break;
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
/**
|
|
255
|
+
* Formats messages as a prompt for the model.
|
|
256
|
+
* @param messages The base messages to format as a prompt.
|
|
257
|
+
* @returns The formatted prompt.
|
|
258
|
+
*/
|
|
259
|
+
formatMessagesAsPrompt(messages) {
|
|
260
|
+
return (messages
|
|
261
|
+
.map((message) => {
|
|
262
|
+
const messagePrompt = getAnthropicPromptFromMessage(message);
|
|
263
|
+
return `${messagePrompt} ${message.content}`;
|
|
264
|
+
})
|
|
265
|
+
.join("") + AI_PROMPT);
|
|
266
|
+
}
|
|
267
|
+
/** @ignore */
|
|
268
|
+
async _generate(messages, options, runManager) {
|
|
269
|
+
if (this.stopSequences && options.stop) {
|
|
270
|
+
throw new Error(`"stopSequence" parameter found in input and default params`);
|
|
271
|
+
}
|
|
272
|
+
const params = this.invocationParams(options);
|
|
273
|
+
let response;
|
|
274
|
+
if (params.stream) {
|
|
275
|
+
response = {
|
|
276
|
+
completion: "",
|
|
277
|
+
model: "",
|
|
278
|
+
stop_reason: "",
|
|
279
|
+
};
|
|
280
|
+
const stream = await this._streamResponseChunks(messages, options, runManager);
|
|
281
|
+
for await (const chunk of stream) {
|
|
282
|
+
response.completion += chunk.message.content;
|
|
283
|
+
response.model =
|
|
284
|
+
chunk.message.additional_kwargs.model ?? response.model;
|
|
285
|
+
response.stop_reason =
|
|
286
|
+
chunk.message.additional_kwargs.stop_reason ??
|
|
287
|
+
response.stop_reason;
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
else {
|
|
291
|
+
response = await this.completionWithRetry({
|
|
292
|
+
...params,
|
|
293
|
+
prompt: this.formatMessagesAsPrompt(messages),
|
|
294
|
+
}, { signal: options.signal });
|
|
295
|
+
}
|
|
296
|
+
const generations = (response.completion ?? "")
|
|
297
|
+
.split(AI_PROMPT)
|
|
298
|
+
.map((message) => ({
|
|
299
|
+
text: message,
|
|
300
|
+
message: new AIMessage(message),
|
|
301
|
+
}));
|
|
302
|
+
return {
|
|
303
|
+
generations,
|
|
304
|
+
};
|
|
305
|
+
}
|
|
306
|
+
/**
|
|
307
|
+
* Creates a streaming request with retry.
|
|
308
|
+
* @param request The parameters for creating a completion.
|
|
309
|
+
* @returns A streaming request.
|
|
310
|
+
*/
|
|
311
|
+
async createStreamWithRetry(request) {
|
|
312
|
+
if (!this.streamingClient) {
|
|
313
|
+
const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
|
|
314
|
+
this.streamingClient = new Anthropic({
|
|
315
|
+
...this.clientOptions,
|
|
316
|
+
...options,
|
|
317
|
+
apiKey: this.anthropicApiKey,
|
|
318
|
+
maxRetries: 0,
|
|
319
|
+
});
|
|
320
|
+
}
|
|
321
|
+
const makeCompletionRequest = async () => this.streamingClient.completions.create({ ...request, stream: true }, { headers: request.headers });
|
|
322
|
+
return this.caller.call(makeCompletionRequest);
|
|
323
|
+
}
|
|
324
|
+
/** @ignore */
|
|
325
|
+
async completionWithRetry(request, options) {
|
|
326
|
+
if (!this.anthropicApiKey) {
|
|
327
|
+
throw new Error("Missing Anthropic API key.");
|
|
328
|
+
}
|
|
329
|
+
if (!this.batchClient) {
|
|
330
|
+
const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
|
|
331
|
+
this.batchClient = new Anthropic({
|
|
332
|
+
...this.clientOptions,
|
|
333
|
+
...options,
|
|
334
|
+
apiKey: this.anthropicApiKey,
|
|
335
|
+
maxRetries: 0,
|
|
336
|
+
});
|
|
337
|
+
}
|
|
338
|
+
const makeCompletionRequest = async () => this.batchClient.completions.create({ ...request, stream: false }, { headers: request.headers });
|
|
339
|
+
return this.caller.callWithOptions({ signal: options.signal }, makeCompletionRequest);
|
|
340
|
+
}
|
|
341
|
+
_llmType() {
|
|
342
|
+
return "anthropic";
|
|
343
|
+
}
|
|
344
|
+
/** @ignore */
|
|
345
|
+
_combineLLMOutput() {
|
|
346
|
+
return [];
|
|
347
|
+
}
|
|
348
|
+
}
|
|
@@ -82,6 +82,11 @@ class ChatCloudflareWorkersAI extends base_js_1.SimpleChatModel {
|
|
|
82
82
|
this.baseUrl = this.baseUrl.slice(0, -1);
|
|
83
83
|
}
|
|
84
84
|
}
|
|
85
|
+
get lc_secrets() {
|
|
86
|
+
return {
|
|
87
|
+
cloudflareApiToken: "CLOUDFLARE_API_TOKEN",
|
|
88
|
+
};
|
|
89
|
+
}
|
|
85
90
|
_llmType() {
|
|
86
91
|
return "cloudflare";
|
|
87
92
|
}
|
|
@@ -38,6 +38,9 @@ export declare class ChatCloudflareWorkersAI extends SimpleChatModel implements
|
|
|
38
38
|
baseUrl: string;
|
|
39
39
|
streaming: boolean;
|
|
40
40
|
constructor(fields?: CloudflareWorkersAIInput & BaseChatModelParams);
|
|
41
|
+
get lc_secrets(): {
|
|
42
|
+
[key: string]: string;
|
|
43
|
+
} | undefined;
|
|
41
44
|
_llmType(): string;
|
|
42
45
|
/** Get the identifying parameters for this LLM. */
|
|
43
46
|
get identifyingParams(): {
|
|
@@ -79,6 +79,11 @@ export class ChatCloudflareWorkersAI extends SimpleChatModel {
|
|
|
79
79
|
this.baseUrl = this.baseUrl.slice(0, -1);
|
|
80
80
|
}
|
|
81
81
|
}
|
|
82
|
+
get lc_secrets() {
|
|
83
|
+
return {
|
|
84
|
+
cloudflareApiToken: "CLOUDFLARE_API_TOKEN",
|
|
85
|
+
};
|
|
86
|
+
}
|
|
82
87
|
_llmType() {
|
|
83
88
|
return "cloudflare";
|
|
84
89
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { OpenAIClient } from "
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
2
|
import type { ChatOpenAICallOptions, OpenAIChatInput } from "./openai.js";
|
|
3
3
|
import type { OpenAICoreRequestOptions } from "../types/openai-types.js";
|
|
4
4
|
import type { BaseChatModelParams } from "./base.js";
|
|
@@ -131,8 +131,8 @@ export declare abstract class BaseChatIflytekXinghuo extends BaseChatModel imple
|
|
|
131
131
|
* Get the identifying parameters for the model
|
|
132
132
|
*/
|
|
133
133
|
identifyingParams(): {
|
|
134
|
-
temperature?: number | undefined;
|
|
135
134
|
max_tokens?: number | undefined;
|
|
135
|
+
temperature?: number | undefined;
|
|
136
136
|
top_k?: number | undefined;
|
|
137
137
|
chat_id?: string | undefined;
|
|
138
138
|
streaming: boolean;
|
|
@@ -4,6 +4,7 @@ exports.ChatLlamaCpp = void 0;
|
|
|
4
4
|
const node_llama_cpp_1 = require("node-llama-cpp");
|
|
5
5
|
const base_js_1 = require("./base.cjs");
|
|
6
6
|
const llama_cpp_js_1 = require("../util/llama_cpp.cjs");
|
|
7
|
+
const index_js_1 = require("../schema/index.cjs");
|
|
7
8
|
/**
|
|
8
9
|
* To use this model you need to have the `node-llama-cpp` module installed.
|
|
9
10
|
* This can be installed using `npm install -S node-llama-cpp` and the minimum
|
|
@@ -139,6 +140,29 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
139
140
|
throw new Error("Error getting prompt completion.");
|
|
140
141
|
}
|
|
141
142
|
}
|
|
143
|
+
async *_streamResponseChunks(input, _options, runManager) {
|
|
144
|
+
if (input.length !== 1) {
|
|
145
|
+
throw new Error("Only one human message should be provided.");
|
|
146
|
+
}
|
|
147
|
+
else {
|
|
148
|
+
const promptOptions = {
|
|
149
|
+
temperature: this?.temperature,
|
|
150
|
+
topK: this?.topK,
|
|
151
|
+
topP: this?.topP,
|
|
152
|
+
};
|
|
153
|
+
const stream = await this.caller.call(async () => this._context.evaluate(this._context.encode(`${input[0].content}`), promptOptions));
|
|
154
|
+
for await (const chunk of stream) {
|
|
155
|
+
yield new index_js_1.ChatGenerationChunk({
|
|
156
|
+
text: this._context.decode([chunk]),
|
|
157
|
+
message: new index_js_1.AIMessageChunk({
|
|
158
|
+
content: this._context.decode([chunk]),
|
|
159
|
+
}),
|
|
160
|
+
generationInfo: {},
|
|
161
|
+
});
|
|
162
|
+
await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? "");
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
142
166
|
// This constructs a new session if we need to adding in any sys messages or previous chats
|
|
143
167
|
_buildSession(messages) {
|
|
144
168
|
let prompt = "";
|
|
@@ -2,7 +2,8 @@ import { LlamaModel, LlamaContext, LlamaChatSession, type ConversationInteractio
|
|
|
2
2
|
import { SimpleChatModel, BaseChatModelParams } from "./base.js";
|
|
3
3
|
import { LlamaBaseCppInputs } from "../util/llama_cpp.js";
|
|
4
4
|
import { BaseLanguageModelCallOptions } from "../base_language/index.js";
|
|
5
|
-
import
|
|
5
|
+
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
6
|
+
import { BaseMessage, ChatGenerationChunk } from "../schema/index.js";
|
|
6
7
|
/**
|
|
7
8
|
* Note that the modelPath is the only required parameter. For testing you
|
|
8
9
|
* can set this in the environment variable `LLAMA_PATH`.
|
|
@@ -63,6 +64,7 @@ export declare class ChatLlamaCpp extends SimpleChatModel<LlamaCppCallOptions> {
|
|
|
63
64
|
};
|
|
64
65
|
/** @ignore */
|
|
65
66
|
_call(messages: BaseMessage[], _options: this["ParsedCallOptions"]): Promise<string>;
|
|
67
|
+
_streamResponseChunks(input: BaseMessage[], _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
66
68
|
protected _buildSession(messages: BaseMessage[]): string;
|
|
67
69
|
protected _convertMessagesToInteractions(messages: BaseMessage[]): ConversationInteraction[];
|
|
68
70
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { LlamaChatSession, } from "node-llama-cpp";
|
|
2
2
|
import { SimpleChatModel } from "./base.js";
|
|
3
3
|
import { createLlamaModel, createLlamaContext, } from "../util/llama_cpp.js";
|
|
4
|
+
import { ChatGenerationChunk, AIMessageChunk, } from "../schema/index.js";
|
|
4
5
|
/**
|
|
5
6
|
* To use this model you need to have the `node-llama-cpp` module installed.
|
|
6
7
|
* This can be installed using `npm install -S node-llama-cpp` and the minimum
|
|
@@ -136,6 +137,29 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
136
137
|
throw new Error("Error getting prompt completion.");
|
|
137
138
|
}
|
|
138
139
|
}
|
|
140
|
+
async *_streamResponseChunks(input, _options, runManager) {
|
|
141
|
+
if (input.length !== 1) {
|
|
142
|
+
throw new Error("Only one human message should be provided.");
|
|
143
|
+
}
|
|
144
|
+
else {
|
|
145
|
+
const promptOptions = {
|
|
146
|
+
temperature: this?.temperature,
|
|
147
|
+
topK: this?.topK,
|
|
148
|
+
topP: this?.topP,
|
|
149
|
+
};
|
|
150
|
+
const stream = await this.caller.call(async () => this._context.evaluate(this._context.encode(`${input[0].content}`), promptOptions));
|
|
151
|
+
for await (const chunk of stream) {
|
|
152
|
+
yield new ChatGenerationChunk({
|
|
153
|
+
text: this._context.decode([chunk]),
|
|
154
|
+
message: new AIMessageChunk({
|
|
155
|
+
content: this._context.decode([chunk]),
|
|
156
|
+
}),
|
|
157
|
+
generationInfo: {},
|
|
158
|
+
});
|
|
159
|
+
await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? "");
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
}
|
|
139
163
|
// This constructs a new session if we need to adding in any sys messages or previous chats
|
|
140
164
|
_buildSession(messages) {
|
|
141
165
|
let prompt = "";
|
|
@@ -258,8 +258,8 @@ export declare class ChatMinimax extends BaseChatModel<ChatMinimaxCallOptions> i
|
|
|
258
258
|
prompt?: string | undefined;
|
|
259
259
|
stream?: boolean | undefined;
|
|
260
260
|
functions?: OpenAIClient.Chat.Completions.ChatCompletionCreateParams.Function[] | undefined;
|
|
261
|
-
temperature?: number | undefined;
|
|
262
261
|
model: string;
|
|
262
|
+
temperature?: number | undefined;
|
|
263
263
|
top_p?: number | undefined;
|
|
264
264
|
plugins?: string[] | undefined;
|
|
265
265
|
tokens_to_generate?: number | undefined;
|