langchain 0.0.69 → 0.0.71
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/mrkl/prompt.cjs +1 -1
- package/dist/agents/mrkl/prompt.d.ts +1 -1
- package/dist/agents/mrkl/prompt.js +1 -1
- package/dist/callbacks/base.cjs +25 -2
- package/dist/callbacks/base.js +2 -2
- package/dist/chains/llm_chain.d.ts +7 -7
- package/dist/chains/question_answering/load.cjs +14 -40
- package/dist/chains/question_answering/load.d.ts +3 -3
- package/dist/chains/question_answering/load.js +16 -42
- package/dist/chains/sql_db/sql_db_chain.cjs +4 -0
- package/dist/chains/sql_db/sql_db_chain.d.ts +3 -1
- package/dist/chains/sql_db/sql_db_chain.js +4 -0
- package/dist/chains/sql_db/sql_db_prompt.cjs +58 -1
- package/dist/chains/sql_db/sql_db_prompt.d.ts +3 -0
- package/dist/chains/sql_db/sql_db_prompt.js +57 -0
- package/dist/chat_models/openai.cjs +89 -4
- package/dist/chat_models/openai.d.ts +18 -53
- package/dist/chat_models/openai.js +90 -5
- package/dist/document_loaders/fs/directory.d.ts +5 -6
- package/dist/document_loaders/fs/pdf.d.ts +1 -1
- package/dist/document_loaders/fs/unstructured.cjs +68 -49
- package/dist/document_loaders/fs/unstructured.d.ts +16 -11
- package/dist/document_loaders/fs/unstructured.js +69 -50
- package/dist/document_loaders/web/s3.cjs +2 -1
- package/dist/document_loaders/web/s3.js +2 -1
- package/dist/embeddings/openai.cjs +86 -6
- package/dist/embeddings/openai.d.ts +7 -2
- package/dist/embeddings/openai.js +86 -6
- package/dist/llms/openai-chat.cjs +97 -6
- package/dist/llms/openai-chat.d.ts +22 -58
- package/dist/llms/openai-chat.js +97 -6
- package/dist/llms/openai.cjs +94 -5
- package/dist/llms/openai.d.ts +18 -59
- package/dist/llms/openai.js +95 -6
- package/dist/memory/base.cjs +1 -1
- package/dist/memory/base.js +1 -1
- package/dist/memory/index.cjs +3 -1
- package/dist/memory/index.d.ts +1 -0
- package/dist/memory/index.js +1 -0
- package/dist/memory/vector_store.cjs +61 -0
- package/dist/memory/vector_store.d.ts +19 -0
- package/dist/memory/vector_store.js +57 -0
- package/dist/output_parsers/combining.cjs +15 -4
- package/dist/output_parsers/combining.d.ts +1 -0
- package/dist/output_parsers/combining.js +15 -4
- package/dist/output_parsers/structured.cjs +10 -5
- package/dist/output_parsers/structured.js +10 -5
- package/dist/retrievers/time_weighted.d.ts +1 -1
- package/dist/tools/webbrowser.cjs +1 -1
- package/dist/tools/webbrowser.js +1 -1
- package/dist/util/sql_utils.cjs +15 -1
- package/dist/util/sql_utils.d.ts +2 -0
- package/dist/util/sql_utils.js +13 -0
- package/dist/vectorstores/chroma.cjs +25 -2
- package/dist/vectorstores/chroma.js +2 -2
- package/dist/vectorstores/milvus.cjs +25 -2
- package/dist/vectorstores/milvus.js +2 -2
- package/dist/vectorstores/myscale.cjs +26 -3
- package/dist/vectorstores/myscale.js +3 -3
- package/dist/vectorstores/opensearch.cjs +25 -2
- package/dist/vectorstores/opensearch.js +2 -2
- package/dist/vectorstores/pinecone.cjs +25 -2
- package/dist/vectorstores/pinecone.js +2 -2
- package/dist/vectorstores/prisma.cjs +1 -1
- package/dist/vectorstores/prisma.js +2 -1
- package/dist/vectorstores/supabase.cjs +13 -1
- package/dist/vectorstores/supabase.d.ts +6 -1
- package/dist/vectorstores/supabase.js +13 -1
- package/dist/vectorstores/weaviate.cjs +25 -2
- package/dist/vectorstores/weaviate.js +2 -2
- package/package.json +5 -5
|
@@ -1,72 +1,33 @@
|
|
|
1
1
|
import { ChatCompletionRequestMessage, CreateChatCompletionRequest, ConfigurationParameters, CreateChatCompletionResponse } from "openai";
|
|
2
|
-
import
|
|
2
|
+
import { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput } from "../types/open-ai-types.js";
|
|
3
3
|
import type { StreamingAxiosConfiguration } from "../util/axios-types.js";
|
|
4
|
-
import {
|
|
4
|
+
import { BaseLLMParams, LLM } from "./base.js";
|
|
5
5
|
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
6
|
-
|
|
7
|
-
* Input to OpenAI class.
|
|
8
|
-
*/
|
|
9
|
-
export interface OpenAIChatInput {
|
|
10
|
-
/** Sampling temperature to use, between 0 and 2, defaults to 1 */
|
|
11
|
-
temperature: number;
|
|
12
|
-
/** Total probability mass of tokens to consider at each step, between 0 and 1, defaults to 1 */
|
|
13
|
-
topP: number;
|
|
14
|
-
/** Penalizes repeated tokens according to frequency */
|
|
15
|
-
frequencyPenalty: number;
|
|
16
|
-
/** Penalizes repeated tokens */
|
|
17
|
-
presencePenalty: number;
|
|
18
|
-
/** Number of chat completions to generate for each prompt */
|
|
19
|
-
n: number;
|
|
20
|
-
/** Dictionary used to adjust the probability of specific tokens being generated */
|
|
21
|
-
logitBias?: Record<string, number>;
|
|
22
|
-
/** Whether to stream the results or not */
|
|
23
|
-
streaming: boolean;
|
|
24
|
-
/** Model name to use */
|
|
25
|
-
modelName: string;
|
|
26
|
-
/** ChatGPT messages to pass as a prefix to the prompt */
|
|
27
|
-
prefixMessages?: ChatCompletionRequestMessage[];
|
|
28
|
-
/** Holds any additional parameters that are valid to pass to {@link
|
|
29
|
-
* https://platform.openai.com/docs/api-reference/completions/create |
|
|
30
|
-
* `openai.create`} that are not explicitly specified on this class.
|
|
31
|
-
*/
|
|
32
|
-
modelKwargs?: Kwargs;
|
|
33
|
-
/** List of stop words to use when generating */
|
|
34
|
-
stop?: string[];
|
|
35
|
-
/**
|
|
36
|
-
* Timeout to use when making requests to OpenAI.
|
|
37
|
-
*/
|
|
38
|
-
timeout?: number;
|
|
39
|
-
/**
|
|
40
|
-
* Maximum number of tokens to generate in the completion. If not specified,
|
|
41
|
-
* defaults to the maximum number of tokens allowed by the model.
|
|
42
|
-
*/
|
|
43
|
-
maxTokens?: number;
|
|
44
|
-
}
|
|
45
|
-
export interface OpenAIChatCallOptions extends BaseLLMCallOptions {
|
|
46
|
-
/**
|
|
47
|
-
* List of stop words to use when generating
|
|
48
|
-
*/
|
|
49
|
-
stop?: string[];
|
|
50
|
-
/**
|
|
51
|
-
* Additional options to pass to the underlying axios request.
|
|
52
|
-
*/
|
|
53
|
-
options?: AxiosRequestConfig;
|
|
54
|
-
}
|
|
55
|
-
type Kwargs = Record<string, any>;
|
|
6
|
+
export { OpenAICallOptions, OpenAIChatInput, AzureOpenAIInput };
|
|
56
7
|
/**
|
|
57
8
|
* Wrapper around OpenAI large language models that use the Chat endpoint.
|
|
58
9
|
*
|
|
59
10
|
* To use you should have the `openai` package installed, with the
|
|
60
11
|
* `OPENAI_API_KEY` environment variable set.
|
|
61
12
|
*
|
|
13
|
+
* To use with Azure you should have the `openai` package installed, with the
|
|
14
|
+
* `AZURE_OPENAI_API_KEY`,
|
|
15
|
+
* `AZURE_OPENAI_API_INSTANCE_NAME`,
|
|
16
|
+
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`
|
|
17
|
+
* and `AZURE_OPENAI_API_VERSION` environment variable set.
|
|
18
|
+
*
|
|
62
19
|
* @remarks
|
|
63
20
|
* Any parameters that are valid to be passed to {@link
|
|
64
21
|
* https://platform.openai.com/docs/api-reference/chat/create |
|
|
65
22
|
* `openai.createCompletion`} can be passed through {@link modelKwargs}, even
|
|
66
23
|
* if not explicitly available on this class.
|
|
24
|
+
*
|
|
25
|
+
* @augments BaseLLM
|
|
26
|
+
* @augments OpenAIInput
|
|
27
|
+
* @augments AzureOpenAIChatInput
|
|
67
28
|
*/
|
|
68
|
-
export declare class OpenAIChat extends LLM implements OpenAIChatInput {
|
|
69
|
-
CallOptions:
|
|
29
|
+
export declare class OpenAIChat extends LLM implements OpenAIChatInput, AzureOpenAIInput {
|
|
30
|
+
CallOptions: OpenAICallOptions;
|
|
70
31
|
temperature: number;
|
|
71
32
|
topP: number;
|
|
72
33
|
frequencyPenalty: number;
|
|
@@ -76,19 +37,23 @@ export declare class OpenAIChat extends LLM implements OpenAIChatInput {
|
|
|
76
37
|
maxTokens?: number;
|
|
77
38
|
modelName: string;
|
|
78
39
|
prefixMessages?: ChatCompletionRequestMessage[];
|
|
79
|
-
modelKwargs?:
|
|
40
|
+
modelKwargs?: OpenAIChatInput["modelKwargs"];
|
|
80
41
|
timeout?: number;
|
|
81
42
|
stop?: string[];
|
|
82
43
|
streaming: boolean;
|
|
44
|
+
azureOpenAIApiVersion?: string;
|
|
45
|
+
azureOpenAIApiKey?: string;
|
|
46
|
+
azureOpenAIApiInstanceName?: string;
|
|
47
|
+
azureOpenAIApiDeploymentName?: string;
|
|
83
48
|
private client;
|
|
84
49
|
private clientConfig;
|
|
85
|
-
constructor(fields?: Partial<OpenAIChatInput> & BaseLLMParams & {
|
|
50
|
+
constructor(fields?: Partial<OpenAIChatInput> & Partial<AzureOpenAIInput> & BaseLLMParams & {
|
|
86
51
|
openAIApiKey?: string;
|
|
87
52
|
}, configuration?: ConfigurationParameters);
|
|
88
53
|
/**
|
|
89
54
|
* Get the parameters used to invoke the model
|
|
90
55
|
*/
|
|
91
|
-
invocationParams(): Omit<CreateChatCompletionRequest, "messages"
|
|
56
|
+
invocationParams(): Omit<CreateChatCompletionRequest, "messages">;
|
|
92
57
|
/** @ignore */
|
|
93
58
|
_identifyingParams(): {
|
|
94
59
|
apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>) | undefined;
|
|
@@ -156,4 +121,3 @@ export declare class PromptLayerOpenAIChat extends OpenAIChat {
|
|
|
156
121
|
});
|
|
157
122
|
completionWithRetry(request: CreateChatCompletionRequest, options?: StreamingAxiosConfiguration): Promise<CreateChatCompletionResponse>;
|
|
158
123
|
}
|
|
159
|
-
export {};
|
package/dist/llms/openai-chat.js
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { isNode } from "browser-or-node";
|
|
1
2
|
import { Configuration, OpenAIApi, } from "openai";
|
|
2
3
|
import fetchAdapter from "../util/axios-fetch-adapter.js";
|
|
3
4
|
import { LLM } from "./base.js";
|
|
@@ -7,11 +8,21 @@ import { LLM } from "./base.js";
|
|
|
7
8
|
* To use you should have the `openai` package installed, with the
|
|
8
9
|
* `OPENAI_API_KEY` environment variable set.
|
|
9
10
|
*
|
|
11
|
+
* To use with Azure you should have the `openai` package installed, with the
|
|
12
|
+
* `AZURE_OPENAI_API_KEY`,
|
|
13
|
+
* `AZURE_OPENAI_API_INSTANCE_NAME`,
|
|
14
|
+
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`
|
|
15
|
+
* and `AZURE_OPENAI_API_VERSION` environment variable set.
|
|
16
|
+
*
|
|
10
17
|
* @remarks
|
|
11
18
|
* Any parameters that are valid to be passed to {@link
|
|
12
19
|
* https://platform.openai.com/docs/api-reference/chat/create |
|
|
13
20
|
* `openai.createCompletion`} can be passed through {@link modelKwargs}, even
|
|
14
21
|
* if not explicitly available on this class.
|
|
22
|
+
*
|
|
23
|
+
* @augments BaseLLM
|
|
24
|
+
* @augments OpenAIInput
|
|
25
|
+
* @augments AzureOpenAIChatInput
|
|
15
26
|
*/
|
|
16
27
|
export class OpenAIChat extends LLM {
|
|
17
28
|
constructor(fields, configuration) {
|
|
@@ -94,6 +105,30 @@ export class OpenAIChat extends LLM {
|
|
|
94
105
|
writable: true,
|
|
95
106
|
value: false
|
|
96
107
|
});
|
|
108
|
+
Object.defineProperty(this, "azureOpenAIApiVersion", {
|
|
109
|
+
enumerable: true,
|
|
110
|
+
configurable: true,
|
|
111
|
+
writable: true,
|
|
112
|
+
value: void 0
|
|
113
|
+
});
|
|
114
|
+
Object.defineProperty(this, "azureOpenAIApiKey", {
|
|
115
|
+
enumerable: true,
|
|
116
|
+
configurable: true,
|
|
117
|
+
writable: true,
|
|
118
|
+
value: void 0
|
|
119
|
+
});
|
|
120
|
+
Object.defineProperty(this, "azureOpenAIApiInstanceName", {
|
|
121
|
+
enumerable: true,
|
|
122
|
+
configurable: true,
|
|
123
|
+
writable: true,
|
|
124
|
+
value: void 0
|
|
125
|
+
});
|
|
126
|
+
Object.defineProperty(this, "azureOpenAIApiDeploymentName", {
|
|
127
|
+
enumerable: true,
|
|
128
|
+
configurable: true,
|
|
129
|
+
writable: true,
|
|
130
|
+
value: void 0
|
|
131
|
+
});
|
|
97
132
|
Object.defineProperty(this, "client", {
|
|
98
133
|
enumerable: true,
|
|
99
134
|
configurable: true,
|
|
@@ -107,11 +142,33 @@ export class OpenAIChat extends LLM {
|
|
|
107
142
|
value: void 0
|
|
108
143
|
});
|
|
109
144
|
const apiKey = fields?.openAIApiKey ??
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
145
|
+
(typeof process !== "undefined"
|
|
146
|
+
? // eslint-disable-next-line no-process-env
|
|
147
|
+
process.env?.OPENAI_API_KEY
|
|
148
|
+
: undefined);
|
|
149
|
+
const azureApiKey = fields?.azureOpenAIApiKey ??
|
|
150
|
+
(typeof process !== "undefined"
|
|
151
|
+
? // eslint-disable-next-line no-process-env
|
|
152
|
+
process.env?.AZURE_OPENAI_API_KEY
|
|
153
|
+
: undefined);
|
|
154
|
+
if (!azureApiKey && !apiKey) {
|
|
155
|
+
throw new Error("(Azure) OpenAI API key not found");
|
|
114
156
|
}
|
|
157
|
+
const azureApiInstanceName = fields?.azureOpenAIApiInstanceName ??
|
|
158
|
+
(typeof process !== "undefined"
|
|
159
|
+
? // eslint-disable-next-line no-process-env
|
|
160
|
+
process.env?.AZURE_OPENAI_API_INSTANCE_NAME
|
|
161
|
+
: undefined);
|
|
162
|
+
const azureApiDeploymentName = fields?.azureOpenAIApiDeploymentName ??
|
|
163
|
+
(typeof process !== "undefined"
|
|
164
|
+
? // eslint-disable-next-line no-process-env
|
|
165
|
+
process.env?.AZURE_OPENAI_API_DEPLOYMENT_NAME
|
|
166
|
+
: undefined);
|
|
167
|
+
const azureApiVersion = fields?.azureOpenAIApiVersion ??
|
|
168
|
+
(typeof process !== "undefined"
|
|
169
|
+
? // eslint-disable-next-line no-process-env
|
|
170
|
+
process.env?.AZURE_OPENAI_API_VERSION
|
|
171
|
+
: undefined);
|
|
115
172
|
this.modelName = fields?.modelName ?? this.modelName;
|
|
116
173
|
this.prefixMessages = fields?.prefixMessages ?? this.prefixMessages;
|
|
117
174
|
this.modelKwargs = fields?.modelKwargs ?? {};
|
|
@@ -125,9 +182,24 @@ export class OpenAIChat extends LLM {
|
|
|
125
182
|
this.maxTokens = fields?.maxTokens;
|
|
126
183
|
this.stop = fields?.stop;
|
|
127
184
|
this.streaming = fields?.streaming ?? false;
|
|
185
|
+
this.azureOpenAIApiVersion = azureApiVersion;
|
|
186
|
+
this.azureOpenAIApiKey = azureApiKey;
|
|
187
|
+
this.azureOpenAIApiInstanceName = azureApiInstanceName;
|
|
188
|
+
this.azureOpenAIApiDeploymentName = azureApiDeploymentName;
|
|
128
189
|
if (this.streaming && this.n > 1) {
|
|
129
190
|
throw new Error("Cannot stream results when n > 1");
|
|
130
191
|
}
|
|
192
|
+
if (this.azureOpenAIApiKey) {
|
|
193
|
+
if (!this.azureOpenAIApiInstanceName) {
|
|
194
|
+
throw new Error("Azure OpenAI API instance name not found");
|
|
195
|
+
}
|
|
196
|
+
if (!this.azureOpenAIApiDeploymentName) {
|
|
197
|
+
throw new Error("Azure OpenAI API deployment name not found");
|
|
198
|
+
}
|
|
199
|
+
if (!this.azureOpenAIApiVersion) {
|
|
200
|
+
throw new Error("Azure OpenAI API version not found");
|
|
201
|
+
}
|
|
202
|
+
}
|
|
131
203
|
this.clientConfig = {
|
|
132
204
|
apiKey,
|
|
133
205
|
...configuration,
|
|
@@ -198,6 +270,7 @@ export class OpenAIChat extends LLM {
|
|
|
198
270
|
messages: this.formatMessages(prompt),
|
|
199
271
|
}, {
|
|
200
272
|
...options,
|
|
273
|
+
adapter: fetchAdapter,
|
|
201
274
|
responseType: "stream",
|
|
202
275
|
onmessage: (event) => {
|
|
203
276
|
if (event.data?.trim?.() === "[DONE]") {
|
|
@@ -255,18 +328,36 @@ export class OpenAIChat extends LLM {
|
|
|
255
328
|
/** @ignore */
|
|
256
329
|
async completionWithRetry(request, options) {
|
|
257
330
|
if (!this.client) {
|
|
331
|
+
const endpoint = this.azureOpenAIApiKey
|
|
332
|
+
? `https://${this.azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${this.azureOpenAIApiDeploymentName}`
|
|
333
|
+
: this.clientConfig.basePath;
|
|
258
334
|
const clientConfig = new Configuration({
|
|
259
335
|
...this.clientConfig,
|
|
336
|
+
basePath: endpoint,
|
|
260
337
|
baseOptions: {
|
|
261
338
|
timeout: this.timeout,
|
|
262
|
-
adapter: fetchAdapter,
|
|
263
339
|
...this.clientConfig.baseOptions,
|
|
264
340
|
},
|
|
265
341
|
});
|
|
266
342
|
this.client = new OpenAIApi(clientConfig);
|
|
267
343
|
}
|
|
344
|
+
const axiosOptions = {
|
|
345
|
+
adapter: isNode ? undefined : fetchAdapter,
|
|
346
|
+
...this.clientConfig.baseOptions,
|
|
347
|
+
...options,
|
|
348
|
+
};
|
|
349
|
+
if (this.azureOpenAIApiKey) {
|
|
350
|
+
axiosOptions.headers = {
|
|
351
|
+
"api-key": this.azureOpenAIApiKey,
|
|
352
|
+
...axiosOptions.headers,
|
|
353
|
+
};
|
|
354
|
+
axiosOptions.params = {
|
|
355
|
+
"api-version": this.azureOpenAIApiVersion,
|
|
356
|
+
...axiosOptions.params,
|
|
357
|
+
};
|
|
358
|
+
}
|
|
268
359
|
return this.caller
|
|
269
|
-
.call(this.client.createChatCompletion.bind(this.client), request,
|
|
360
|
+
.call(this.client.createChatCompletion.bind(this.client), request, axiosOptions)
|
|
270
361
|
.then((res) => res.data);
|
|
271
362
|
}
|
|
272
363
|
_llmType() {
|
package/dist/llms/openai.cjs
CHANGED
|
@@ -4,6 +4,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
4
4
|
};
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
6
|
exports.PromptLayerOpenAIChat = exports.OpenAIChat = exports.PromptLayerOpenAI = exports.OpenAI = void 0;
|
|
7
|
+
const browser_or_node_1 = require("browser-or-node");
|
|
7
8
|
const openai_1 = require("openai");
|
|
8
9
|
const axios_fetch_adapter_js_1 = __importDefault(require("../util/axios-fetch-adapter.cjs"));
|
|
9
10
|
const chunk_js_1 = require("../util/chunk.cjs");
|
|
@@ -16,6 +17,12 @@ const openai_chat_js_1 = require("./openai-chat.cjs");
|
|
|
16
17
|
* To use you should have the `openai` package installed, with the
|
|
17
18
|
* `OPENAI_API_KEY` environment variable set.
|
|
18
19
|
*
|
|
20
|
+
* To use with Azure you should have the `openai` package installed, with the
|
|
21
|
+
* `AZURE_OPENAI_API_KEY`,
|
|
22
|
+
* `AZURE_OPENAI_API_INSTANCE_NAME`,
|
|
23
|
+
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`
|
|
24
|
+
* and `AZURE_OPENAI_API_VERSION` environment variable set.
|
|
25
|
+
*
|
|
19
26
|
* @remarks
|
|
20
27
|
* Any parameters that are valid to be passed to {@link
|
|
21
28
|
* https://platform.openai.com/docs/api-reference/completions/create |
|
|
@@ -25,7 +32,8 @@ const openai_chat_js_1 = require("./openai-chat.cjs");
|
|
|
25
32
|
class OpenAI extends base_js_1.BaseLLM {
|
|
26
33
|
constructor(fields, configuration) {
|
|
27
34
|
if (fields?.modelName?.startsWith("gpt-3.5-turbo") ||
|
|
28
|
-
fields?.modelName?.startsWith("gpt-4")
|
|
35
|
+
fields?.modelName?.startsWith("gpt-4") ||
|
|
36
|
+
fields?.modelName?.startsWith("gpt-4-32k")) {
|
|
29
37
|
// eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
|
|
30
38
|
return new openai_chat_js_1.OpenAIChat(fields, configuration);
|
|
31
39
|
}
|
|
@@ -114,6 +122,30 @@ class OpenAI extends base_js_1.BaseLLM {
|
|
|
114
122
|
writable: true,
|
|
115
123
|
value: false
|
|
116
124
|
});
|
|
125
|
+
Object.defineProperty(this, "azureOpenAIApiVersion", {
|
|
126
|
+
enumerable: true,
|
|
127
|
+
configurable: true,
|
|
128
|
+
writable: true,
|
|
129
|
+
value: void 0
|
|
130
|
+
});
|
|
131
|
+
Object.defineProperty(this, "azureOpenAIApiKey", {
|
|
132
|
+
enumerable: true,
|
|
133
|
+
configurable: true,
|
|
134
|
+
writable: true,
|
|
135
|
+
value: void 0
|
|
136
|
+
});
|
|
137
|
+
Object.defineProperty(this, "azureOpenAIApiInstanceName", {
|
|
138
|
+
enumerable: true,
|
|
139
|
+
configurable: true,
|
|
140
|
+
writable: true,
|
|
141
|
+
value: void 0
|
|
142
|
+
});
|
|
143
|
+
Object.defineProperty(this, "azureOpenAIApiDeploymentName", {
|
|
144
|
+
enumerable: true,
|
|
145
|
+
configurable: true,
|
|
146
|
+
writable: true,
|
|
147
|
+
value: void 0
|
|
148
|
+
});
|
|
117
149
|
Object.defineProperty(this, "client", {
|
|
118
150
|
enumerable: true,
|
|
119
151
|
configurable: true,
|
|
@@ -131,9 +163,32 @@ class OpenAI extends base_js_1.BaseLLM {
|
|
|
131
163
|
? // eslint-disable-next-line no-process-env
|
|
132
164
|
process.env?.OPENAI_API_KEY
|
|
133
165
|
: undefined);
|
|
134
|
-
|
|
135
|
-
|
|
166
|
+
const azureApiKey = fields?.azureOpenAIApiKey ??
|
|
167
|
+
(typeof process !== "undefined"
|
|
168
|
+
? // eslint-disable-next-line no-process-env
|
|
169
|
+
process.env?.AZURE_OPENAI_API_KEY
|
|
170
|
+
: undefined);
|
|
171
|
+
if (!azureApiKey && !apiKey) {
|
|
172
|
+
throw new Error("(Azure) OpenAI API key not found");
|
|
136
173
|
}
|
|
174
|
+
const azureApiInstanceName = fields?.azureOpenAIApiInstanceName ??
|
|
175
|
+
(typeof process !== "undefined"
|
|
176
|
+
? // eslint-disable-next-line no-process-env
|
|
177
|
+
process.env?.AZURE_OPENAI_API_INSTANCE_NAME
|
|
178
|
+
: undefined);
|
|
179
|
+
const azureApiDeploymentName = (fields?.azureOpenAIApiCompletionsDeploymentName ||
|
|
180
|
+
fields?.azureOpenAIApiDeploymentName) ??
|
|
181
|
+
(typeof process !== "undefined"
|
|
182
|
+
? // eslint-disable-next-line no-process-env
|
|
183
|
+
process.env?.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME ||
|
|
184
|
+
// eslint-disable-next-line no-process-env
|
|
185
|
+
process.env?.AZURE_OPENAI_API_DEPLOYMENT_NAME
|
|
186
|
+
: undefined);
|
|
187
|
+
const azureApiVersion = fields?.azureOpenAIApiVersion ??
|
|
188
|
+
(typeof process !== "undefined"
|
|
189
|
+
? // eslint-disable-next-line no-process-env
|
|
190
|
+
process.env?.AZURE_OPENAI_API_VERSION
|
|
191
|
+
: undefined);
|
|
137
192
|
this.modelName = fields?.modelName ?? this.modelName;
|
|
138
193
|
this.modelKwargs = fields?.modelKwargs ?? {};
|
|
139
194
|
this.batchSize = fields?.batchSize ?? this.batchSize;
|
|
@@ -148,12 +203,27 @@ class OpenAI extends base_js_1.BaseLLM {
|
|
|
148
203
|
this.logitBias = fields?.logitBias;
|
|
149
204
|
this.stop = fields?.stop;
|
|
150
205
|
this.streaming = fields?.streaming ?? false;
|
|
206
|
+
this.azureOpenAIApiVersion = azureApiVersion;
|
|
207
|
+
this.azureOpenAIApiKey = azureApiKey;
|
|
208
|
+
this.azureOpenAIApiInstanceName = azureApiInstanceName;
|
|
209
|
+
this.azureOpenAIApiDeploymentName = azureApiDeploymentName;
|
|
151
210
|
if (this.streaming && this.n > 1) {
|
|
152
211
|
throw new Error("Cannot stream results when n > 1");
|
|
153
212
|
}
|
|
154
213
|
if (this.streaming && this.bestOf > 1) {
|
|
155
214
|
throw new Error("Cannot stream results when bestOf > 1");
|
|
156
215
|
}
|
|
216
|
+
if (this.azureOpenAIApiKey) {
|
|
217
|
+
if (!this.azureOpenAIApiInstanceName) {
|
|
218
|
+
throw new Error("Azure OpenAI API instance name not found");
|
|
219
|
+
}
|
|
220
|
+
if (!this.azureOpenAIApiDeploymentName) {
|
|
221
|
+
throw new Error("Azure OpenAI API deployment name not found");
|
|
222
|
+
}
|
|
223
|
+
if (!this.azureOpenAIApiVersion) {
|
|
224
|
+
throw new Error("Azure OpenAI API version not found");
|
|
225
|
+
}
|
|
226
|
+
}
|
|
157
227
|
this.clientConfig = {
|
|
158
228
|
apiKey,
|
|
159
229
|
...configuration,
|
|
@@ -243,6 +313,7 @@ class OpenAI extends base_js_1.BaseLLM {
|
|
|
243
313
|
prompt: subPrompts[i],
|
|
244
314
|
}, {
|
|
245
315
|
...options,
|
|
316
|
+
adapter: axios_fetch_adapter_js_1.default,
|
|
246
317
|
responseType: "stream",
|
|
247
318
|
onmessage: (event) => {
|
|
248
319
|
if (event.data?.trim?.() === "[DONE]") {
|
|
@@ -312,18 +383,36 @@ class OpenAI extends base_js_1.BaseLLM {
|
|
|
312
383
|
/** @ignore */
|
|
313
384
|
async completionWithRetry(request, options) {
|
|
314
385
|
if (!this.client) {
|
|
386
|
+
const endpoint = this.azureOpenAIApiKey
|
|
387
|
+
? `https://${this.azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${this.azureOpenAIApiDeploymentName}`
|
|
388
|
+
: this.clientConfig.basePath;
|
|
315
389
|
const clientConfig = new openai_1.Configuration({
|
|
316
390
|
...this.clientConfig,
|
|
391
|
+
basePath: endpoint,
|
|
317
392
|
baseOptions: {
|
|
318
393
|
timeout: this.timeout,
|
|
319
|
-
adapter: axios_fetch_adapter_js_1.default,
|
|
320
394
|
...this.clientConfig.baseOptions,
|
|
321
395
|
},
|
|
322
396
|
});
|
|
323
397
|
this.client = new openai_1.OpenAIApi(clientConfig);
|
|
324
398
|
}
|
|
399
|
+
const axiosOptions = {
|
|
400
|
+
adapter: browser_or_node_1.isNode ? undefined : axios_fetch_adapter_js_1.default,
|
|
401
|
+
...this.clientConfig.baseOptions,
|
|
402
|
+
...options,
|
|
403
|
+
};
|
|
404
|
+
if (this.azureOpenAIApiKey) {
|
|
405
|
+
axiosOptions.headers = {
|
|
406
|
+
"api-key": this.azureOpenAIApiKey,
|
|
407
|
+
...axiosOptions.headers,
|
|
408
|
+
};
|
|
409
|
+
axiosOptions.params = {
|
|
410
|
+
"api-version": this.azureOpenAIApiVersion,
|
|
411
|
+
...axiosOptions.params,
|
|
412
|
+
};
|
|
413
|
+
}
|
|
325
414
|
return this.caller
|
|
326
|
-
.call(this.client.createCompletion.bind(this.client), request,
|
|
415
|
+
.call(this.client.createCompletion.bind(this.client), request, axiosOptions)
|
|
327
416
|
.then((res) => res.data);
|
|
328
417
|
}
|
|
329
418
|
_llmType() {
|
package/dist/llms/openai.d.ts
CHANGED
|
@@ -1,74 +1,29 @@
|
|
|
1
1
|
import { ConfigurationParameters, CreateCompletionRequest, CreateCompletionResponse } from "openai";
|
|
2
|
-
import
|
|
2
|
+
import { AzureOpenAIInput, OpenAICallOptions, OpenAIInput } from "../types/open-ai-types.js";
|
|
3
3
|
import type { StreamingAxiosConfiguration } from "../util/axios-types.js";
|
|
4
|
-
import { BaseLLM,
|
|
4
|
+
import { BaseLLM, BaseLLMParams } from "./base.js";
|
|
5
5
|
import { LLMResult } from "../schema/index.js";
|
|
6
6
|
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
7
|
-
|
|
8
|
-
* Input to OpenAI class.
|
|
9
|
-
*/
|
|
10
|
-
export interface OpenAIInput {
|
|
11
|
-
/** Sampling temperature to use */
|
|
12
|
-
temperature: number;
|
|
13
|
-
/**
|
|
14
|
-
* Maximum number of tokens to generate in the completion. -1 returns as many
|
|
15
|
-
* tokens as possible given the prompt and the model's maximum context size.
|
|
16
|
-
*/
|
|
17
|
-
maxTokens: number;
|
|
18
|
-
/** Total probability mass of tokens to consider at each step */
|
|
19
|
-
topP: number;
|
|
20
|
-
/** Penalizes repeated tokens according to frequency */
|
|
21
|
-
frequencyPenalty: number;
|
|
22
|
-
/** Penalizes repeated tokens */
|
|
23
|
-
presencePenalty: number;
|
|
24
|
-
/** Number of completions to generate for each prompt */
|
|
25
|
-
n: number;
|
|
26
|
-
/** Generates `bestOf` completions server side and returns the "best" */
|
|
27
|
-
bestOf: number;
|
|
28
|
-
/** Dictionary used to adjust the probability of specific tokens being generated */
|
|
29
|
-
logitBias?: Record<string, number>;
|
|
30
|
-
/** Whether to stream the results or not. Enabling disables tokenUsage reporting */
|
|
31
|
-
streaming: boolean;
|
|
32
|
-
/** Model name to use */
|
|
33
|
-
modelName: string;
|
|
34
|
-
/** Holds any additional parameters that are valid to pass to {@link
|
|
35
|
-
* https://platform.openai.com/docs/api-reference/completions/create |
|
|
36
|
-
* `openai.createCompletion`} that are not explicitly specified on this class.
|
|
37
|
-
*/
|
|
38
|
-
modelKwargs?: Kwargs;
|
|
39
|
-
/** Batch size to use when passing multiple documents to generate */
|
|
40
|
-
batchSize: number;
|
|
41
|
-
/** List of stop words to use when generating */
|
|
42
|
-
stop?: string[];
|
|
43
|
-
/**
|
|
44
|
-
* Timeout to use when making requests to OpenAI.
|
|
45
|
-
*/
|
|
46
|
-
timeout?: number;
|
|
47
|
-
}
|
|
48
|
-
export interface OpenAICallOptions extends BaseLLMCallOptions {
|
|
49
|
-
/**
|
|
50
|
-
* List of stop words to use when generating
|
|
51
|
-
*/
|
|
52
|
-
stop?: string[];
|
|
53
|
-
/**
|
|
54
|
-
* Additional options to pass to the underlying axios request.
|
|
55
|
-
*/
|
|
56
|
-
options?: AxiosRequestConfig;
|
|
57
|
-
}
|
|
58
|
-
type Kwargs = Record<string, any>;
|
|
7
|
+
export { OpenAICallOptions, AzureOpenAIInput, OpenAIInput };
|
|
59
8
|
/**
|
|
60
9
|
* Wrapper around OpenAI large language models.
|
|
61
10
|
*
|
|
62
11
|
* To use you should have the `openai` package installed, with the
|
|
63
12
|
* `OPENAI_API_KEY` environment variable set.
|
|
64
13
|
*
|
|
14
|
+
* To use with Azure you should have the `openai` package installed, with the
|
|
15
|
+
* `AZURE_OPENAI_API_KEY`,
|
|
16
|
+
* `AZURE_OPENAI_API_INSTANCE_NAME`,
|
|
17
|
+
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`
|
|
18
|
+
* and `AZURE_OPENAI_API_VERSION` environment variable set.
|
|
19
|
+
*
|
|
65
20
|
* @remarks
|
|
66
21
|
* Any parameters that are valid to be passed to {@link
|
|
67
22
|
* https://platform.openai.com/docs/api-reference/completions/create |
|
|
68
23
|
* `openai.createCompletion`} can be passed through {@link modelKwargs}, even
|
|
69
24
|
* if not explicitly available on this class.
|
|
70
25
|
*/
|
|
71
|
-
export declare class OpenAI extends BaseLLM implements OpenAIInput {
|
|
26
|
+
export declare class OpenAI extends BaseLLM implements OpenAIInput, AzureOpenAIInput {
|
|
72
27
|
CallOptions: OpenAICallOptions;
|
|
73
28
|
temperature: number;
|
|
74
29
|
maxTokens: number;
|
|
@@ -79,20 +34,24 @@ export declare class OpenAI extends BaseLLM implements OpenAIInput {
|
|
|
79
34
|
bestOf: number;
|
|
80
35
|
logitBias?: Record<string, number>;
|
|
81
36
|
modelName: string;
|
|
82
|
-
modelKwargs?:
|
|
37
|
+
modelKwargs?: OpenAIInput["modelKwargs"];
|
|
83
38
|
batchSize: number;
|
|
84
39
|
timeout?: number;
|
|
85
40
|
stop?: string[];
|
|
86
41
|
streaming: boolean;
|
|
42
|
+
azureOpenAIApiVersion?: string;
|
|
43
|
+
azureOpenAIApiKey?: string;
|
|
44
|
+
azureOpenAIApiInstanceName?: string;
|
|
45
|
+
azureOpenAIApiDeploymentName?: string;
|
|
87
46
|
private client;
|
|
88
47
|
private clientConfig;
|
|
89
|
-
constructor(fields?: Partial<OpenAIInput> & BaseLLMParams & {
|
|
48
|
+
constructor(fields?: Partial<OpenAIInput> & Partial<AzureOpenAIInput> & BaseLLMParams & {
|
|
90
49
|
openAIApiKey?: string;
|
|
91
50
|
}, configuration?: ConfigurationParameters);
|
|
92
51
|
/**
|
|
93
52
|
* Get the parameters used to invoke the model
|
|
94
53
|
*/
|
|
95
|
-
invocationParams(): CreateCompletionRequest
|
|
54
|
+
invocationParams(): CreateCompletionRequest;
|
|
96
55
|
_identifyingParams(): {
|
|
97
56
|
apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>) | undefined;
|
|
98
57
|
organization?: string | undefined;
|
|
@@ -184,4 +143,4 @@ export declare class PromptLayerOpenAI extends OpenAI {
|
|
|
184
143
|
});
|
|
185
144
|
completionWithRetry(request: CreateCompletionRequest, options?: StreamingAxiosConfiguration): Promise<CreateCompletionResponse>;
|
|
186
145
|
}
|
|
187
|
-
export { OpenAIChat,
|
|
146
|
+
export { OpenAIChat, PromptLayerOpenAIChat } from "./openai-chat.js";
|