langchain 0.0.197-rc.1 → 0.0.198
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chains/openai_moderation.cjs +2 -2
- package/dist/chains/openai_moderation.d.ts +1 -1
- package/dist/chains/openai_moderation.js +1 -1
- package/dist/chat_models/anthropic.cjs +351 -15
- package/dist/chat_models/anthropic.d.ts +157 -1
- package/dist/chat_models/anthropic.js +348 -1
- package/dist/chat_models/cloudflare_workersai.cjs +5 -0
- package/dist/chat_models/cloudflare_workersai.d.ts +3 -0
- package/dist/chat_models/cloudflare_workersai.js +5 -0
- package/dist/chat_models/fireworks.d.ts +1 -1
- package/dist/chat_models/iflytek_xinghuo/common.d.ts +1 -1
- package/dist/chat_models/llama_cpp.cjs +24 -0
- package/dist/chat_models/llama_cpp.d.ts +3 -1
- package/dist/chat_models/llama_cpp.js +24 -0
- package/dist/chat_models/minimax.d.ts +1 -1
- package/dist/chat_models/openai.cjs +698 -4
- package/dist/chat_models/openai.d.ts +137 -4
- package/dist/chat_models/openai.js +695 -2
- package/dist/document_loaders/fs/openai_whisper_audio.cjs +2 -2
- package/dist/document_loaders/fs/openai_whisper_audio.d.ts +1 -1
- package/dist/document_loaders/fs/openai_whisper_audio.js +1 -1
- package/dist/document_loaders/fs/pptx.cjs +39 -0
- package/dist/document_loaders/fs/pptx.d.ts +23 -0
- package/dist/document_loaders/fs/pptx.js +35 -0
- package/dist/embeddings/openai.cjs +240 -2
- package/dist/embeddings/openai.d.ts +82 -1
- package/dist/embeddings/openai.js +239 -1
- package/dist/experimental/openai_assistant/index.cjs +35 -3
- package/dist/experimental/openai_assistant/index.d.ts +27 -1
- package/dist/experimental/openai_assistant/index.js +33 -1
- package/dist/experimental/openai_assistant/schema.d.ts +1 -1
- package/dist/experimental/openai_files/index.cjs +2 -2
- package/dist/experimental/openai_files/index.d.ts +1 -1
- package/dist/experimental/openai_files/index.js +1 -1
- package/dist/experimental/tools/pyinterpreter.cjs +248 -0
- package/dist/experimental/tools/pyinterpreter.d.ts +18 -0
- package/dist/experimental/tools/pyinterpreter.js +244 -0
- package/dist/graphs/neo4j_graph.cjs +49 -14
- package/dist/graphs/neo4j_graph.d.ts +30 -0
- package/dist/graphs/neo4j_graph.js +49 -14
- package/dist/llms/fireworks.d.ts +1 -1
- package/dist/llms/hf.cjs +13 -2
- package/dist/llms/hf.d.ts +5 -0
- package/dist/llms/hf.js +13 -2
- package/dist/llms/llama_cpp.cjs +17 -3
- package/dist/llms/llama_cpp.d.ts +4 -1
- package/dist/llms/llama_cpp.js +17 -3
- package/dist/llms/openai-chat.cjs +445 -3
- package/dist/llms/openai-chat.d.ts +123 -4
- package/dist/llms/openai-chat.js +443 -2
- package/dist/llms/openai.cjs +530 -6
- package/dist/llms/openai.d.ts +123 -4
- package/dist/llms/openai.js +525 -2
- package/dist/load/import_constants.cjs +3 -0
- package/dist/load/import_constants.js +3 -0
- package/dist/output_parsers/json.cjs +4 -0
- package/dist/output_parsers/json.js +4 -0
- package/dist/schema/index.d.ts +1 -1
- package/dist/tools/convert_to_openai.cjs +38 -4
- package/dist/tools/convert_to_openai.d.ts +11 -1
- package/dist/tools/convert_to_openai.js +35 -1
- package/dist/types/openai-types.d.ts +133 -1
- package/dist/util/env.cjs +9 -70
- package/dist/util/env.d.ts +1 -21
- package/dist/util/env.js +1 -62
- package/dist/util/openai-format-fndef.cjs +81 -0
- package/dist/util/openai-format-fndef.d.ts +44 -0
- package/dist/util/openai-format-fndef.js +77 -0
- package/dist/util/openai.cjs +18 -2
- package/dist/util/openai.d.ts +1 -1
- package/dist/util/openai.js +17 -1
- package/dist/util/openapi.d.ts +2 -2
- package/dist/util/prompt-layer.d.ts +1 -1
- package/dist/vectorstores/clickhouse.cjs +286 -0
- package/dist/vectorstores/clickhouse.d.ts +126 -0
- package/dist/vectorstores/clickhouse.js +259 -0
- package/dist/vectorstores/pgvector.cjs +142 -18
- package/dist/vectorstores/pgvector.d.ts +21 -0
- package/dist/vectorstores/pgvector.js +142 -18
- package/dist/vectorstores/weaviate.cjs +45 -2
- package/dist/vectorstores/weaviate.d.ts +27 -1
- package/dist/vectorstores/weaviate.js +45 -2
- package/document_loaders/fs/pptx.cjs +1 -0
- package/document_loaders/fs/pptx.d.ts +1 -0
- package/document_loaders/fs/pptx.js +1 -0
- package/experimental/tools/pyinterpreter.cjs +1 -0
- package/experimental/tools/pyinterpreter.d.ts +1 -0
- package/experimental/tools/pyinterpreter.js +1 -0
- package/package.json +41 -9
- package/vectorstores/clickhouse.cjs +1 -0
- package/vectorstores/clickhouse.d.ts +1 -0
- package/vectorstores/clickhouse.js +1 -0
package/dist/llms/openai.cjs
CHANGED
|
@@ -1,15 +1,539 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.PromptLayerOpenAIChat = exports.OpenAIChat = exports.PromptLayerOpenAI = exports.OpenAI = void 0;
|
|
4
|
-
const openai_1 = require("
|
|
5
|
-
|
|
4
|
+
const openai_1 = require("openai");
|
|
5
|
+
const count_tokens_js_1 = require("../base_language/count_tokens.cjs");
|
|
6
|
+
const index_js_1 = require("../schema/index.cjs");
|
|
7
|
+
const azure_js_1 = require("../util/azure.cjs");
|
|
8
|
+
const chunk_js_1 = require("../util/chunk.cjs");
|
|
6
9
|
const env_js_1 = require("../util/env.cjs");
|
|
7
10
|
const prompt_layer_js_1 = require("../util/prompt-layer.cjs");
|
|
11
|
+
const base_js_1 = require("./base.cjs");
|
|
12
|
+
const openai_chat_js_1 = require("./openai-chat.cjs");
|
|
13
|
+
const openai_js_1 = require("../util/openai.cjs");
|
|
14
|
+
/**
|
|
15
|
+
* Wrapper around OpenAI large language models.
|
|
16
|
+
*
|
|
17
|
+
* To use you should have the `openai` package installed, with the
|
|
18
|
+
* `OPENAI_API_KEY` environment variable set.
|
|
19
|
+
*
|
|
20
|
+
* To use with Azure you should have the `openai` package installed, with the
|
|
21
|
+
* `AZURE_OPENAI_API_KEY`,
|
|
22
|
+
* `AZURE_OPENAI_API_INSTANCE_NAME`,
|
|
23
|
+
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`
|
|
24
|
+
* and `AZURE_OPENAI_API_VERSION` environment variable set.
|
|
25
|
+
*
|
|
26
|
+
* @remarks
|
|
27
|
+
* Any parameters that are valid to be passed to {@link
|
|
28
|
+
* https://platform.openai.com/docs/api-reference/completions/create |
|
|
29
|
+
* `openai.createCompletion`} can be passed through {@link modelKwargs}, even
|
|
30
|
+
* if not explicitly available on this class.
|
|
31
|
+
* @example
|
|
32
|
+
* ```typescript
|
|
33
|
+
* const model = new OpenAI({
|
|
34
|
+
* modelName: "gpt-4",
|
|
35
|
+
* temperature: 0.7,
|
|
36
|
+
* maxTokens: 1000,
|
|
37
|
+
* maxRetries: 5,
|
|
38
|
+
* });
|
|
39
|
+
*
|
|
40
|
+
* const res = await model.call(
|
|
41
|
+
* "Question: What would be a good company name for a company that makes colorful socks?\nAnswer:"
|
|
42
|
+
* );
|
|
43
|
+
* console.log({ res });
|
|
44
|
+
* ```
|
|
45
|
+
*/
|
|
46
|
+
class OpenAI extends base_js_1.BaseLLM {
|
|
47
|
+
static lc_name() {
|
|
48
|
+
return "OpenAI";
|
|
49
|
+
}
|
|
50
|
+
get callKeys() {
|
|
51
|
+
return [...super.callKeys, "options"];
|
|
52
|
+
}
|
|
53
|
+
get lc_secrets() {
|
|
54
|
+
return {
|
|
55
|
+
openAIApiKey: "OPENAI_API_KEY",
|
|
56
|
+
azureOpenAIApiKey: "AZURE_OPENAI_API_KEY",
|
|
57
|
+
organization: "OPENAI_ORGANIZATION",
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
get lc_aliases() {
|
|
61
|
+
return {
|
|
62
|
+
modelName: "model",
|
|
63
|
+
openAIApiKey: "openai_api_key",
|
|
64
|
+
azureOpenAIApiVersion: "azure_openai_api_version",
|
|
65
|
+
azureOpenAIApiKey: "azure_openai_api_key",
|
|
66
|
+
azureOpenAIApiInstanceName: "azure_openai_api_instance_name",
|
|
67
|
+
azureOpenAIApiDeploymentName: "azure_openai_api_deployment_name",
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
constructor(fields,
|
|
71
|
+
/** @deprecated */
|
|
72
|
+
configuration) {
|
|
73
|
+
if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
|
|
74
|
+
fields?.modelName?.startsWith("gpt-4")) &&
|
|
75
|
+
!fields?.modelName?.includes("-instruct")) {
|
|
76
|
+
// eslint-disable-next-line no-constructor-return
|
|
77
|
+
return new openai_chat_js_1.OpenAIChat(fields, configuration);
|
|
78
|
+
}
|
|
79
|
+
super(fields ?? {});
|
|
80
|
+
Object.defineProperty(this, "lc_serializable", {
|
|
81
|
+
enumerable: true,
|
|
82
|
+
configurable: true,
|
|
83
|
+
writable: true,
|
|
84
|
+
value: true
|
|
85
|
+
});
|
|
86
|
+
Object.defineProperty(this, "temperature", {
|
|
87
|
+
enumerable: true,
|
|
88
|
+
configurable: true,
|
|
89
|
+
writable: true,
|
|
90
|
+
value: 0.7
|
|
91
|
+
});
|
|
92
|
+
Object.defineProperty(this, "maxTokens", {
|
|
93
|
+
enumerable: true,
|
|
94
|
+
configurable: true,
|
|
95
|
+
writable: true,
|
|
96
|
+
value: 256
|
|
97
|
+
});
|
|
98
|
+
Object.defineProperty(this, "topP", {
|
|
99
|
+
enumerable: true,
|
|
100
|
+
configurable: true,
|
|
101
|
+
writable: true,
|
|
102
|
+
value: 1
|
|
103
|
+
});
|
|
104
|
+
Object.defineProperty(this, "frequencyPenalty", {
|
|
105
|
+
enumerable: true,
|
|
106
|
+
configurable: true,
|
|
107
|
+
writable: true,
|
|
108
|
+
value: 0
|
|
109
|
+
});
|
|
110
|
+
Object.defineProperty(this, "presencePenalty", {
|
|
111
|
+
enumerable: true,
|
|
112
|
+
configurable: true,
|
|
113
|
+
writable: true,
|
|
114
|
+
value: 0
|
|
115
|
+
});
|
|
116
|
+
Object.defineProperty(this, "n", {
|
|
117
|
+
enumerable: true,
|
|
118
|
+
configurable: true,
|
|
119
|
+
writable: true,
|
|
120
|
+
value: 1
|
|
121
|
+
});
|
|
122
|
+
Object.defineProperty(this, "bestOf", {
|
|
123
|
+
enumerable: true,
|
|
124
|
+
configurable: true,
|
|
125
|
+
writable: true,
|
|
126
|
+
value: void 0
|
|
127
|
+
});
|
|
128
|
+
Object.defineProperty(this, "logitBias", {
|
|
129
|
+
enumerable: true,
|
|
130
|
+
configurable: true,
|
|
131
|
+
writable: true,
|
|
132
|
+
value: void 0
|
|
133
|
+
});
|
|
134
|
+
Object.defineProperty(this, "modelName", {
|
|
135
|
+
enumerable: true,
|
|
136
|
+
configurable: true,
|
|
137
|
+
writable: true,
|
|
138
|
+
value: "gpt-3.5-turbo-instruct"
|
|
139
|
+
});
|
|
140
|
+
Object.defineProperty(this, "modelKwargs", {
|
|
141
|
+
enumerable: true,
|
|
142
|
+
configurable: true,
|
|
143
|
+
writable: true,
|
|
144
|
+
value: void 0
|
|
145
|
+
});
|
|
146
|
+
Object.defineProperty(this, "batchSize", {
|
|
147
|
+
enumerable: true,
|
|
148
|
+
configurable: true,
|
|
149
|
+
writable: true,
|
|
150
|
+
value: 20
|
|
151
|
+
});
|
|
152
|
+
Object.defineProperty(this, "timeout", {
|
|
153
|
+
enumerable: true,
|
|
154
|
+
configurable: true,
|
|
155
|
+
writable: true,
|
|
156
|
+
value: void 0
|
|
157
|
+
});
|
|
158
|
+
Object.defineProperty(this, "stop", {
|
|
159
|
+
enumerable: true,
|
|
160
|
+
configurable: true,
|
|
161
|
+
writable: true,
|
|
162
|
+
value: void 0
|
|
163
|
+
});
|
|
164
|
+
Object.defineProperty(this, "user", {
|
|
165
|
+
enumerable: true,
|
|
166
|
+
configurable: true,
|
|
167
|
+
writable: true,
|
|
168
|
+
value: void 0
|
|
169
|
+
});
|
|
170
|
+
Object.defineProperty(this, "streaming", {
|
|
171
|
+
enumerable: true,
|
|
172
|
+
configurable: true,
|
|
173
|
+
writable: true,
|
|
174
|
+
value: false
|
|
175
|
+
});
|
|
176
|
+
Object.defineProperty(this, "openAIApiKey", {
|
|
177
|
+
enumerable: true,
|
|
178
|
+
configurable: true,
|
|
179
|
+
writable: true,
|
|
180
|
+
value: void 0
|
|
181
|
+
});
|
|
182
|
+
Object.defineProperty(this, "azureOpenAIApiVersion", {
|
|
183
|
+
enumerable: true,
|
|
184
|
+
configurable: true,
|
|
185
|
+
writable: true,
|
|
186
|
+
value: void 0
|
|
187
|
+
});
|
|
188
|
+
Object.defineProperty(this, "azureOpenAIApiKey", {
|
|
189
|
+
enumerable: true,
|
|
190
|
+
configurable: true,
|
|
191
|
+
writable: true,
|
|
192
|
+
value: void 0
|
|
193
|
+
});
|
|
194
|
+
Object.defineProperty(this, "azureOpenAIApiInstanceName", {
|
|
195
|
+
enumerable: true,
|
|
196
|
+
configurable: true,
|
|
197
|
+
writable: true,
|
|
198
|
+
value: void 0
|
|
199
|
+
});
|
|
200
|
+
Object.defineProperty(this, "azureOpenAIApiDeploymentName", {
|
|
201
|
+
enumerable: true,
|
|
202
|
+
configurable: true,
|
|
203
|
+
writable: true,
|
|
204
|
+
value: void 0
|
|
205
|
+
});
|
|
206
|
+
Object.defineProperty(this, "azureOpenAIBasePath", {
|
|
207
|
+
enumerable: true,
|
|
208
|
+
configurable: true,
|
|
209
|
+
writable: true,
|
|
210
|
+
value: void 0
|
|
211
|
+
});
|
|
212
|
+
Object.defineProperty(this, "organization", {
|
|
213
|
+
enumerable: true,
|
|
214
|
+
configurable: true,
|
|
215
|
+
writable: true,
|
|
216
|
+
value: void 0
|
|
217
|
+
});
|
|
218
|
+
Object.defineProperty(this, "client", {
|
|
219
|
+
enumerable: true,
|
|
220
|
+
configurable: true,
|
|
221
|
+
writable: true,
|
|
222
|
+
value: void 0
|
|
223
|
+
});
|
|
224
|
+
Object.defineProperty(this, "clientConfig", {
|
|
225
|
+
enumerable: true,
|
|
226
|
+
configurable: true,
|
|
227
|
+
writable: true,
|
|
228
|
+
value: void 0
|
|
229
|
+
});
|
|
230
|
+
this.openAIApiKey =
|
|
231
|
+
fields?.openAIApiKey ?? (0, env_js_1.getEnvironmentVariable)("OPENAI_API_KEY");
|
|
232
|
+
this.azureOpenAIApiKey =
|
|
233
|
+
fields?.azureOpenAIApiKey ??
|
|
234
|
+
(0, env_js_1.getEnvironmentVariable)("AZURE_OPENAI_API_KEY");
|
|
235
|
+
if (!this.azureOpenAIApiKey && !this.openAIApiKey) {
|
|
236
|
+
throw new Error("OpenAI or Azure OpenAI API key not found");
|
|
237
|
+
}
|
|
238
|
+
this.azureOpenAIApiInstanceName =
|
|
239
|
+
fields?.azureOpenAIApiInstanceName ??
|
|
240
|
+
(0, env_js_1.getEnvironmentVariable)("AZURE_OPENAI_API_INSTANCE_NAME");
|
|
241
|
+
this.azureOpenAIApiDeploymentName =
|
|
242
|
+
(fields?.azureOpenAIApiCompletionsDeploymentName ||
|
|
243
|
+
fields?.azureOpenAIApiDeploymentName) ??
|
|
244
|
+
((0, env_js_1.getEnvironmentVariable)("AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME") ||
|
|
245
|
+
(0, env_js_1.getEnvironmentVariable)("AZURE_OPENAI_API_DEPLOYMENT_NAME"));
|
|
246
|
+
this.azureOpenAIApiVersion =
|
|
247
|
+
fields?.azureOpenAIApiVersion ??
|
|
248
|
+
(0, env_js_1.getEnvironmentVariable)("AZURE_OPENAI_API_VERSION");
|
|
249
|
+
this.azureOpenAIBasePath =
|
|
250
|
+
fields?.azureOpenAIBasePath ??
|
|
251
|
+
(0, env_js_1.getEnvironmentVariable)("AZURE_OPENAI_BASE_PATH");
|
|
252
|
+
this.organization =
|
|
253
|
+
fields?.configuration?.organization ??
|
|
254
|
+
(0, env_js_1.getEnvironmentVariable)("OPENAI_ORGANIZATION");
|
|
255
|
+
this.modelName = fields?.modelName ?? this.modelName;
|
|
256
|
+
this.modelKwargs = fields?.modelKwargs ?? {};
|
|
257
|
+
this.batchSize = fields?.batchSize ?? this.batchSize;
|
|
258
|
+
this.timeout = fields?.timeout;
|
|
259
|
+
this.temperature = fields?.temperature ?? this.temperature;
|
|
260
|
+
this.maxTokens = fields?.maxTokens ?? this.maxTokens;
|
|
261
|
+
this.topP = fields?.topP ?? this.topP;
|
|
262
|
+
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;
|
|
263
|
+
this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty;
|
|
264
|
+
this.n = fields?.n ?? this.n;
|
|
265
|
+
this.bestOf = fields?.bestOf ?? this.bestOf;
|
|
266
|
+
this.logitBias = fields?.logitBias;
|
|
267
|
+
this.stop = fields?.stop;
|
|
268
|
+
this.user = fields?.user;
|
|
269
|
+
this.streaming = fields?.streaming ?? false;
|
|
270
|
+
if (this.streaming && this.bestOf && this.bestOf > 1) {
|
|
271
|
+
throw new Error("Cannot stream results when bestOf > 1");
|
|
272
|
+
}
|
|
273
|
+
if (this.azureOpenAIApiKey) {
|
|
274
|
+
if (!this.azureOpenAIApiInstanceName && !this.azureOpenAIBasePath) {
|
|
275
|
+
throw new Error("Azure OpenAI API instance name not found");
|
|
276
|
+
}
|
|
277
|
+
if (!this.azureOpenAIApiDeploymentName) {
|
|
278
|
+
throw new Error("Azure OpenAI API deployment name not found");
|
|
279
|
+
}
|
|
280
|
+
if (!this.azureOpenAIApiVersion) {
|
|
281
|
+
throw new Error("Azure OpenAI API version not found");
|
|
282
|
+
}
|
|
283
|
+
this.openAIApiKey = this.openAIApiKey ?? "";
|
|
284
|
+
}
|
|
285
|
+
this.clientConfig = {
|
|
286
|
+
apiKey: this.openAIApiKey,
|
|
287
|
+
organization: this.organization,
|
|
288
|
+
baseURL: configuration?.basePath ?? fields?.configuration?.basePath,
|
|
289
|
+
dangerouslyAllowBrowser: true,
|
|
290
|
+
defaultHeaders: configuration?.baseOptions?.headers ??
|
|
291
|
+
fields?.configuration?.baseOptions?.headers,
|
|
292
|
+
defaultQuery: configuration?.baseOptions?.params ??
|
|
293
|
+
fields?.configuration?.baseOptions?.params,
|
|
294
|
+
...configuration,
|
|
295
|
+
...fields?.configuration,
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
/**
|
|
299
|
+
* Get the parameters used to invoke the model
|
|
300
|
+
*/
|
|
301
|
+
invocationParams(options) {
|
|
302
|
+
return {
|
|
303
|
+
model: this.modelName,
|
|
304
|
+
temperature: this.temperature,
|
|
305
|
+
max_tokens: this.maxTokens,
|
|
306
|
+
top_p: this.topP,
|
|
307
|
+
frequency_penalty: this.frequencyPenalty,
|
|
308
|
+
presence_penalty: this.presencePenalty,
|
|
309
|
+
n: this.n,
|
|
310
|
+
best_of: this.bestOf,
|
|
311
|
+
logit_bias: this.logitBias,
|
|
312
|
+
stop: options?.stop ?? this.stop,
|
|
313
|
+
user: this.user,
|
|
314
|
+
stream: this.streaming,
|
|
315
|
+
...this.modelKwargs,
|
|
316
|
+
};
|
|
317
|
+
}
|
|
318
|
+
/** @ignore */
|
|
319
|
+
_identifyingParams() {
|
|
320
|
+
return {
|
|
321
|
+
model_name: this.modelName,
|
|
322
|
+
...this.invocationParams(),
|
|
323
|
+
...this.clientConfig,
|
|
324
|
+
};
|
|
325
|
+
}
|
|
326
|
+
/**
|
|
327
|
+
* Get the identifying parameters for the model
|
|
328
|
+
*/
|
|
329
|
+
identifyingParams() {
|
|
330
|
+
return this._identifyingParams();
|
|
331
|
+
}
|
|
332
|
+
/**
|
|
333
|
+
* Call out to OpenAI's endpoint with k unique prompts
|
|
334
|
+
*
|
|
335
|
+
* @param [prompts] - The prompts to pass into the model.
|
|
336
|
+
* @param [options] - Optional list of stop words to use when generating.
|
|
337
|
+
* @param [runManager] - Optional callback manager to use when generating.
|
|
338
|
+
*
|
|
339
|
+
* @returns The full LLM output.
|
|
340
|
+
*
|
|
341
|
+
* @example
|
|
342
|
+
* ```ts
|
|
343
|
+
* import { OpenAI } from "langchain/llms/openai";
|
|
344
|
+
* const openai = new OpenAI();
|
|
345
|
+
* const response = await openai.generate(["Tell me a joke."]);
|
|
346
|
+
* ```
|
|
347
|
+
*/
|
|
348
|
+
async _generate(prompts, options, runManager) {
|
|
349
|
+
const subPrompts = (0, chunk_js_1.chunkArray)(prompts, this.batchSize);
|
|
350
|
+
const choices = [];
|
|
351
|
+
const tokenUsage = {};
|
|
352
|
+
const params = this.invocationParams(options);
|
|
353
|
+
if (params.max_tokens === -1) {
|
|
354
|
+
if (prompts.length !== 1) {
|
|
355
|
+
throw new Error("max_tokens set to -1 not supported for multiple inputs");
|
|
356
|
+
}
|
|
357
|
+
params.max_tokens = await (0, count_tokens_js_1.calculateMaxTokens)({
|
|
358
|
+
prompt: prompts[0],
|
|
359
|
+
// Cast here to allow for other models that may not fit the union
|
|
360
|
+
modelName: this.modelName,
|
|
361
|
+
});
|
|
362
|
+
}
|
|
363
|
+
for (let i = 0; i < subPrompts.length; i += 1) {
|
|
364
|
+
const data = params.stream
|
|
365
|
+
? await (async () => {
|
|
366
|
+
const choices = [];
|
|
367
|
+
let response;
|
|
368
|
+
const stream = await this.completionWithRetry({
|
|
369
|
+
...params,
|
|
370
|
+
stream: true,
|
|
371
|
+
prompt: subPrompts[i],
|
|
372
|
+
}, options);
|
|
373
|
+
for await (const message of stream) {
|
|
374
|
+
// on the first message set the response properties
|
|
375
|
+
if (!response) {
|
|
376
|
+
response = {
|
|
377
|
+
id: message.id,
|
|
378
|
+
object: message.object,
|
|
379
|
+
created: message.created,
|
|
380
|
+
model: message.model,
|
|
381
|
+
};
|
|
382
|
+
}
|
|
383
|
+
// on all messages, update choice
|
|
384
|
+
for (const part of message.choices) {
|
|
385
|
+
if (!choices[part.index]) {
|
|
386
|
+
choices[part.index] = part;
|
|
387
|
+
}
|
|
388
|
+
else {
|
|
389
|
+
const choice = choices[part.index];
|
|
390
|
+
choice.text += part.text;
|
|
391
|
+
choice.finish_reason = part.finish_reason;
|
|
392
|
+
choice.logprobs = part.logprobs;
|
|
393
|
+
}
|
|
394
|
+
void runManager?.handleLLMNewToken(part.text, {
|
|
395
|
+
prompt: Math.floor(part.index / this.n),
|
|
396
|
+
completion: part.index % this.n,
|
|
397
|
+
});
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
if (options.signal?.aborted) {
|
|
401
|
+
throw new Error("AbortError");
|
|
402
|
+
}
|
|
403
|
+
return { ...response, choices };
|
|
404
|
+
})()
|
|
405
|
+
: await this.completionWithRetry({
|
|
406
|
+
...params,
|
|
407
|
+
stream: false,
|
|
408
|
+
prompt: subPrompts[i],
|
|
409
|
+
}, {
|
|
410
|
+
signal: options.signal,
|
|
411
|
+
...options.options,
|
|
412
|
+
});
|
|
413
|
+
choices.push(...data.choices);
|
|
414
|
+
const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage
|
|
415
|
+
? data.usage
|
|
416
|
+
: {
|
|
417
|
+
completion_tokens: undefined,
|
|
418
|
+
prompt_tokens: undefined,
|
|
419
|
+
total_tokens: undefined,
|
|
420
|
+
};
|
|
421
|
+
if (completionTokens) {
|
|
422
|
+
tokenUsage.completionTokens =
|
|
423
|
+
(tokenUsage.completionTokens ?? 0) + completionTokens;
|
|
424
|
+
}
|
|
425
|
+
if (promptTokens) {
|
|
426
|
+
tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens;
|
|
427
|
+
}
|
|
428
|
+
if (totalTokens) {
|
|
429
|
+
tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens;
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
const generations = (0, chunk_js_1.chunkArray)(choices, this.n).map((promptChoices) => promptChoices.map((choice) => ({
|
|
433
|
+
text: choice.text ?? "",
|
|
434
|
+
generationInfo: {
|
|
435
|
+
finishReason: choice.finish_reason,
|
|
436
|
+
logprobs: choice.logprobs,
|
|
437
|
+
},
|
|
438
|
+
})));
|
|
439
|
+
return {
|
|
440
|
+
generations,
|
|
441
|
+
llmOutput: { tokenUsage },
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
// TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation?
|
|
445
|
+
async *_streamResponseChunks(input, options, runManager) {
|
|
446
|
+
const params = {
|
|
447
|
+
...this.invocationParams(options),
|
|
448
|
+
prompt: input,
|
|
449
|
+
stream: true,
|
|
450
|
+
};
|
|
451
|
+
const stream = await this.completionWithRetry(params, options);
|
|
452
|
+
for await (const data of stream) {
|
|
453
|
+
const choice = data?.choices[0];
|
|
454
|
+
if (!choice) {
|
|
455
|
+
continue;
|
|
456
|
+
}
|
|
457
|
+
const chunk = new index_js_1.GenerationChunk({
|
|
458
|
+
text: choice.text,
|
|
459
|
+
generationInfo: {
|
|
460
|
+
finishReason: choice.finish_reason,
|
|
461
|
+
},
|
|
462
|
+
});
|
|
463
|
+
yield chunk;
|
|
464
|
+
// eslint-disable-next-line no-void
|
|
465
|
+
void runManager?.handleLLMNewToken(chunk.text ?? "");
|
|
466
|
+
}
|
|
467
|
+
if (options.signal?.aborted) {
|
|
468
|
+
throw new Error("AbortError");
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
async completionWithRetry(request, options) {
|
|
472
|
+
const requestOptions = this._getClientOptions(options);
|
|
473
|
+
return this.caller.call(async () => {
|
|
474
|
+
try {
|
|
475
|
+
const res = await this.client.completions.create(request, requestOptions);
|
|
476
|
+
return res;
|
|
477
|
+
}
|
|
478
|
+
catch (e) {
|
|
479
|
+
const error = (0, openai_js_1.wrapOpenAIClientError)(e);
|
|
480
|
+
throw error;
|
|
481
|
+
}
|
|
482
|
+
});
|
|
483
|
+
}
|
|
484
|
+
/**
|
|
485
|
+
* Calls the OpenAI API with retry logic in case of failures.
|
|
486
|
+
* @param request The request to send to the OpenAI API.
|
|
487
|
+
* @param options Optional configuration for the API call.
|
|
488
|
+
* @returns The response from the OpenAI API.
|
|
489
|
+
*/
|
|
490
|
+
_getClientOptions(options) {
|
|
491
|
+
if (!this.client) {
|
|
492
|
+
const openAIEndpointConfig = {
|
|
493
|
+
azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
|
|
494
|
+
azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
|
|
495
|
+
azureOpenAIApiKey: this.azureOpenAIApiKey,
|
|
496
|
+
azureOpenAIBasePath: this.azureOpenAIBasePath,
|
|
497
|
+
baseURL: this.clientConfig.baseURL,
|
|
498
|
+
};
|
|
499
|
+
const endpoint = (0, azure_js_1.getEndpoint)(openAIEndpointConfig);
|
|
500
|
+
const params = {
|
|
501
|
+
...this.clientConfig,
|
|
502
|
+
baseURL: endpoint,
|
|
503
|
+
timeout: this.timeout,
|
|
504
|
+
maxRetries: 0,
|
|
505
|
+
};
|
|
506
|
+
if (!params.baseURL) {
|
|
507
|
+
delete params.baseURL;
|
|
508
|
+
}
|
|
509
|
+
this.client = new openai_1.OpenAI(params);
|
|
510
|
+
}
|
|
511
|
+
const requestOptions = {
|
|
512
|
+
...this.clientConfig,
|
|
513
|
+
...options,
|
|
514
|
+
};
|
|
515
|
+
if (this.azureOpenAIApiKey) {
|
|
516
|
+
requestOptions.headers = {
|
|
517
|
+
"api-key": this.azureOpenAIApiKey,
|
|
518
|
+
...requestOptions.headers,
|
|
519
|
+
};
|
|
520
|
+
requestOptions.query = {
|
|
521
|
+
"api-version": this.azureOpenAIApiVersion,
|
|
522
|
+
...requestOptions.query,
|
|
523
|
+
};
|
|
524
|
+
}
|
|
525
|
+
return requestOptions;
|
|
526
|
+
}
|
|
527
|
+
_llmType() {
|
|
528
|
+
return "openai";
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
exports.OpenAI = OpenAI;
|
|
8
532
|
/**
|
|
9
533
|
* PromptLayer wrapper to OpenAI
|
|
10
534
|
* @augments OpenAI
|
|
11
535
|
*/
|
|
12
|
-
class PromptLayerOpenAI extends
|
|
536
|
+
class PromptLayerOpenAI extends OpenAI {
|
|
13
537
|
get lc_secrets() {
|
|
14
538
|
return {
|
|
15
539
|
promptLayerApiKey: "PROMPTLAYER_API_KEY",
|
|
@@ -77,6 +601,6 @@ class PromptLayerOpenAI extends openai_1.OpenAI {
|
|
|
77
601
|
}
|
|
78
602
|
}
|
|
79
603
|
exports.PromptLayerOpenAI = PromptLayerOpenAI;
|
|
80
|
-
var
|
|
81
|
-
Object.defineProperty(exports, "OpenAIChat", { enumerable: true, get: function () { return
|
|
82
|
-
Object.defineProperty(exports, "PromptLayerOpenAIChat", { enumerable: true, get: function () { return
|
|
604
|
+
var openai_chat_js_2 = require("./openai-chat.cjs");
|
|
605
|
+
Object.defineProperty(exports, "OpenAIChat", { enumerable: true, get: function () { return openai_chat_js_2.OpenAIChat; } });
|
|
606
|
+
Object.defineProperty(exports, "PromptLayerOpenAIChat", { enumerable: true, get: function () { return openai_chat_js_2.PromptLayerOpenAIChat; } });
|
package/dist/llms/openai.d.ts
CHANGED
|
@@ -1,8 +1,127 @@
|
|
|
1
|
-
import { OpenAI } from "
|
|
1
|
+
import { type ClientOptions, OpenAI as OpenAIClient } from "openai";
|
|
2
2
|
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
3
|
-
import
|
|
4
|
-
|
|
5
|
-
|
|
3
|
+
import { GenerationChunk, LLMResult } from "../schema/index.js";
|
|
4
|
+
import { AzureOpenAIInput, OpenAICallOptions, OpenAICoreRequestOptions, OpenAIInput, LegacyOpenAIInput } from "../types/openai-types.js";
|
|
5
|
+
import { BaseLLM, BaseLLMParams } from "./base.js";
|
|
6
|
+
export type { AzureOpenAIInput, OpenAICallOptions, OpenAIInput };
|
|
7
|
+
/**
|
|
8
|
+
* Wrapper around OpenAI large language models.
|
|
9
|
+
*
|
|
10
|
+
* To use you should have the `openai` package installed, with the
|
|
11
|
+
* `OPENAI_API_KEY` environment variable set.
|
|
12
|
+
*
|
|
13
|
+
* To use with Azure you should have the `openai` package installed, with the
|
|
14
|
+
* `AZURE_OPENAI_API_KEY`,
|
|
15
|
+
* `AZURE_OPENAI_API_INSTANCE_NAME`,
|
|
16
|
+
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`
|
|
17
|
+
* and `AZURE_OPENAI_API_VERSION` environment variable set.
|
|
18
|
+
*
|
|
19
|
+
* @remarks
|
|
20
|
+
* Any parameters that are valid to be passed to {@link
|
|
21
|
+
* https://platform.openai.com/docs/api-reference/completions/create |
|
|
22
|
+
* `openai.createCompletion`} can be passed through {@link modelKwargs}, even
|
|
23
|
+
* if not explicitly available on this class.
|
|
24
|
+
* @example
|
|
25
|
+
* ```typescript
|
|
26
|
+
* const model = new OpenAI({
|
|
27
|
+
* modelName: "gpt-4",
|
|
28
|
+
* temperature: 0.7,
|
|
29
|
+
* maxTokens: 1000,
|
|
30
|
+
* maxRetries: 5,
|
|
31
|
+
* });
|
|
32
|
+
*
|
|
33
|
+
* const res = await model.call(
|
|
34
|
+
* "Question: What would be a good company name for a company that makes colorful socks?\nAnswer:"
|
|
35
|
+
* );
|
|
36
|
+
* console.log({ res });
|
|
37
|
+
* ```
|
|
38
|
+
*/
|
|
39
|
+
export declare class OpenAI<CallOptions extends OpenAICallOptions = OpenAICallOptions> extends BaseLLM<CallOptions> implements OpenAIInput, AzureOpenAIInput {
|
|
40
|
+
static lc_name(): string;
|
|
41
|
+
get callKeys(): string[];
|
|
42
|
+
lc_serializable: boolean;
|
|
43
|
+
get lc_secrets(): {
|
|
44
|
+
[key: string]: string;
|
|
45
|
+
} | undefined;
|
|
46
|
+
get lc_aliases(): Record<string, string>;
|
|
47
|
+
temperature: number;
|
|
48
|
+
maxTokens: number;
|
|
49
|
+
topP: number;
|
|
50
|
+
frequencyPenalty: number;
|
|
51
|
+
presencePenalty: number;
|
|
52
|
+
n: number;
|
|
53
|
+
bestOf?: number;
|
|
54
|
+
logitBias?: Record<string, number>;
|
|
55
|
+
modelName: string;
|
|
56
|
+
modelKwargs?: OpenAIInput["modelKwargs"];
|
|
57
|
+
batchSize: number;
|
|
58
|
+
timeout?: number;
|
|
59
|
+
stop?: string[];
|
|
60
|
+
user?: string;
|
|
61
|
+
streaming: boolean;
|
|
62
|
+
openAIApiKey?: string;
|
|
63
|
+
azureOpenAIApiVersion?: string;
|
|
64
|
+
azureOpenAIApiKey?: string;
|
|
65
|
+
azureOpenAIApiInstanceName?: string;
|
|
66
|
+
azureOpenAIApiDeploymentName?: string;
|
|
67
|
+
azureOpenAIBasePath?: string;
|
|
68
|
+
organization?: string;
|
|
69
|
+
private client;
|
|
70
|
+
private clientConfig;
|
|
71
|
+
constructor(fields?: Partial<OpenAIInput> & Partial<AzureOpenAIInput> & BaseLLMParams & {
|
|
72
|
+
configuration?: ClientOptions & LegacyOpenAIInput;
|
|
73
|
+
},
|
|
74
|
+
/** @deprecated */
|
|
75
|
+
configuration?: ClientOptions & LegacyOpenAIInput);
|
|
76
|
+
/**
|
|
77
|
+
* Get the parameters used to invoke the model
|
|
78
|
+
*/
|
|
79
|
+
invocationParams(options?: this["ParsedCallOptions"]): Omit<OpenAIClient.CompletionCreateParams, "prompt">;
|
|
80
|
+
/** @ignore */
|
|
81
|
+
_identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, "prompt"> & {
|
|
82
|
+
model_name: string;
|
|
83
|
+
} & ClientOptions;
|
|
84
|
+
/**
|
|
85
|
+
* Get the identifying parameters for the model
|
|
86
|
+
*/
|
|
87
|
+
identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, "prompt"> & {
|
|
88
|
+
model_name: string;
|
|
89
|
+
} & ClientOptions;
|
|
90
|
+
/**
|
|
91
|
+
* Call out to OpenAI's endpoint with k unique prompts
|
|
92
|
+
*
|
|
93
|
+
* @param [prompts] - The prompts to pass into the model.
|
|
94
|
+
* @param [options] - Optional list of stop words to use when generating.
|
|
95
|
+
* @param [runManager] - Optional callback manager to use when generating.
|
|
96
|
+
*
|
|
97
|
+
* @returns The full LLM output.
|
|
98
|
+
*
|
|
99
|
+
* @example
|
|
100
|
+
* ```ts
|
|
101
|
+
* import { OpenAI } from "langchain/llms/openai";
|
|
102
|
+
* const openai = new OpenAI();
|
|
103
|
+
* const response = await openai.generate(["Tell me a joke."]);
|
|
104
|
+
* ```
|
|
105
|
+
*/
|
|
106
|
+
_generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
|
|
107
|
+
_streamResponseChunks(input: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
|
|
108
|
+
/**
|
|
109
|
+
* Calls the OpenAI API with retry logic in case of failures.
|
|
110
|
+
* @param request The request to send to the OpenAI API.
|
|
111
|
+
* @param options Optional configuration for the API call.
|
|
112
|
+
* @returns The response from the OpenAI API.
|
|
113
|
+
*/
|
|
114
|
+
completionWithRetry(request: OpenAIClient.CompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Completion>>;
|
|
115
|
+
completionWithRetry(request: OpenAIClient.CompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Completions.Completion>;
|
|
116
|
+
/**
|
|
117
|
+
* Calls the OpenAI API with retry logic in case of failures.
|
|
118
|
+
* @param request The request to send to the OpenAI API.
|
|
119
|
+
* @param options Optional configuration for the API call.
|
|
120
|
+
* @returns The response from the OpenAI API.
|
|
121
|
+
*/
|
|
122
|
+
private _getClientOptions;
|
|
123
|
+
_llmType(): string;
|
|
124
|
+
}
|
|
6
125
|
/**
|
|
7
126
|
* PromptLayer wrapper to OpenAI
|
|
8
127
|
* @augments OpenAI
|