langchain 0.0.140 → 0.0.142
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chains/openai_moderation.cjs +5 -13
- package/dist/chains/openai_moderation.d.ts +5 -5
- package/dist/chains/openai_moderation.js +6 -11
- package/dist/chat_models/anthropic.d.ts +2 -2
- package/dist/chat_models/openai.cjs +99 -215
- package/dist/chat_models/openai.d.ts +20 -60
- package/dist/chat_models/openai.js +101 -214
- package/dist/document_loaders/web/github.cjs +4 -0
- package/dist/document_loaders/web/github.js +4 -0
- package/dist/embeddings/openai.cjs +32 -22
- package/dist/embeddings/openai.d.ts +3 -3
- package/dist/embeddings/openai.js +34 -21
- package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
- package/dist/experimental/chat_models/anthropic_functions.d.ts +3 -3
- package/dist/experimental/chat_models/anthropic_functions.js +3 -0
- package/dist/llms/openai-chat.cjs +69 -187
- package/dist/llms/openai-chat.d.ts +19 -71
- package/dist/llms/openai-chat.js +71 -186
- package/dist/llms/openai.cjs +92 -166
- package/dist/llms/openai.d.ts +25 -71
- package/dist/llms/openai.js +94 -165
- package/dist/load/import_map.cjs +3 -2
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/prompts/chat.cjs +21 -9
- package/dist/prompts/chat.d.ts +3 -3
- package/dist/prompts/chat.js +22 -10
- package/dist/schema/index.d.ts +2 -2
- package/dist/schema/runnable.cjs +3 -0
- package/dist/schema/runnable.d.ts +1 -0
- package/dist/schema/runnable.js +3 -0
- package/dist/tools/convert_to_openai.d.ts +2 -2
- package/dist/types/openai-types.d.ts +27 -4
- package/dist/util/async_caller.cjs +10 -7
- package/dist/util/async_caller.js +10 -7
- package/dist/util/azure.cjs +4 -4
- package/dist/util/azure.d.ts +3 -3
- package/dist/util/azure.js +4 -4
- package/dist/util/openai.cjs +21 -0
- package/dist/util/openai.d.ts +1 -0
- package/dist/util/openai.js +17 -0
- package/dist/util/prompt-layer.cjs +1 -2
- package/dist/util/prompt-layer.d.ts +2 -2
- package/dist/util/prompt-layer.js +1 -2
- package/package.json +10 -2
- package/schema/document.cjs +1 -0
- package/schema/document.d.ts +1 -0
- package/schema/document.js +1 -0
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { AzureOpenAIInput } from "../types/openai-types.js";
|
|
1
|
+
import { type ClientOptions } from "openai";
|
|
2
|
+
import { AzureOpenAIInput, LegacyOpenAIInput } from "../types/openai-types.js";
|
|
3
3
|
import { Embeddings, EmbeddingsParams } from "./base.js";
|
|
4
4
|
/**
|
|
5
5
|
* Interface for OpenAIEmbeddings parameters. Extends EmbeddingsParams and
|
|
@@ -43,7 +43,7 @@ export declare class OpenAIEmbeddings extends Embeddings implements OpenAIEmbedd
|
|
|
43
43
|
constructor(fields?: Partial<OpenAIEmbeddingsParams> & Partial<AzureOpenAIInput> & {
|
|
44
44
|
verbose?: boolean;
|
|
45
45
|
openAIApiKey?: string;
|
|
46
|
-
}, configuration?:
|
|
46
|
+
}, configuration?: ClientOptions & LegacyOpenAIInput);
|
|
47
47
|
/**
|
|
48
48
|
* Method to generate embeddings for an array of documents. Splits the
|
|
49
49
|
* documents into batches and makes requests to the OpenAI API to generate
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { getEnvironmentVariable
|
|
3
|
-
import fetchAdapter from "../util/axios-fetch-adapter.js";
|
|
1
|
+
import { OpenAI as OpenAIClient } from "openai";
|
|
2
|
+
import { getEnvironmentVariable } from "../util/env.js";
|
|
4
3
|
import { chunkArray } from "../util/chunk.js";
|
|
5
4
|
import { Embeddings } from "./base.js";
|
|
6
5
|
import { getEndpoint } from "../util/azure.js";
|
|
6
|
+
import { wrapOpenAIClientError } from "../util/openai.js";
|
|
7
7
|
/**
|
|
8
8
|
* Class for generating embeddings using the OpenAI API. Extends the
|
|
9
9
|
* Embeddings class and implements OpenAIEmbeddingsParams and
|
|
@@ -120,6 +120,10 @@ export class OpenAIEmbeddings extends Embeddings {
|
|
|
120
120
|
}
|
|
121
121
|
this.clientConfig = {
|
|
122
122
|
apiKey,
|
|
123
|
+
baseURL: configuration?.basePath,
|
|
124
|
+
dangerouslyAllowBrowser: true,
|
|
125
|
+
defaultHeaders: configuration?.baseOptions?.headers,
|
|
126
|
+
defaultQuery: configuration?.baseOptions?.params,
|
|
123
127
|
...configuration,
|
|
124
128
|
};
|
|
125
129
|
}
|
|
@@ -142,7 +146,7 @@ export class OpenAIEmbeddings extends Embeddings {
|
|
|
142
146
|
const batch = batches[i];
|
|
143
147
|
const { data: batchResponse } = batchResponses[i];
|
|
144
148
|
for (let j = 0; j < batch.length; j += 1) {
|
|
145
|
-
embeddings.push(batchResponse
|
|
149
|
+
embeddings.push(batchResponse[j].embedding);
|
|
146
150
|
}
|
|
147
151
|
}
|
|
148
152
|
return embeddings;
|
|
@@ -158,7 +162,7 @@ export class OpenAIEmbeddings extends Embeddings {
|
|
|
158
162
|
model: this.modelName,
|
|
159
163
|
input: this.stripNewLines ? text.replace(/\n/g, " ") : text,
|
|
160
164
|
});
|
|
161
|
-
return data
|
|
165
|
+
return data[0].embedding;
|
|
162
166
|
}
|
|
163
167
|
/**
|
|
164
168
|
* Private method to make a request to the OpenAI API to generate
|
|
@@ -174,31 +178,40 @@ export class OpenAIEmbeddings extends Embeddings {
|
|
|
174
178
|
azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
|
|
175
179
|
azureOpenAIApiKey: this.azureOpenAIApiKey,
|
|
176
180
|
azureOpenAIBasePath: this.azureOpenAIBasePath,
|
|
177
|
-
|
|
181
|
+
baseURL: this.clientConfig.baseURL,
|
|
178
182
|
};
|
|
179
183
|
const endpoint = getEndpoint(openAIEndpointConfig);
|
|
180
|
-
const
|
|
184
|
+
const params = {
|
|
181
185
|
...this.clientConfig,
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
}
|
|
189
|
-
this.client = new
|
|
186
|
+
baseURL: endpoint,
|
|
187
|
+
timeout: this.timeout,
|
|
188
|
+
maxRetries: 0,
|
|
189
|
+
};
|
|
190
|
+
if (!params.baseURL) {
|
|
191
|
+
delete params.baseURL;
|
|
192
|
+
}
|
|
193
|
+
this.client = new OpenAIClient(params);
|
|
190
194
|
}
|
|
191
|
-
const
|
|
195
|
+
const requestOptions = {};
|
|
192
196
|
if (this.azureOpenAIApiKey) {
|
|
193
|
-
|
|
197
|
+
requestOptions.headers = {
|
|
194
198
|
"api-key": this.azureOpenAIApiKey,
|
|
195
|
-
...
|
|
199
|
+
...requestOptions.headers,
|
|
196
200
|
};
|
|
197
|
-
|
|
201
|
+
requestOptions.query = {
|
|
198
202
|
"api-version": this.azureOpenAIApiVersion,
|
|
199
|
-
...
|
|
203
|
+
...requestOptions.query,
|
|
200
204
|
};
|
|
201
205
|
}
|
|
202
|
-
return this.caller.call(
|
|
206
|
+
return this.caller.call(async () => {
|
|
207
|
+
try {
|
|
208
|
+
const res = await this.client.embeddings.create(request, requestOptions);
|
|
209
|
+
return res;
|
|
210
|
+
}
|
|
211
|
+
catch (e) {
|
|
212
|
+
const error = wrapOpenAIClientError(e);
|
|
213
|
+
throw error;
|
|
214
|
+
}
|
|
215
|
+
});
|
|
203
216
|
}
|
|
204
217
|
}
|
|
@@ -83,6 +83,9 @@ class AnthropicFunctions extends anthropic_js_1.ChatAnthropic {
|
|
|
83
83
|
if (forced) {
|
|
84
84
|
const parser = new fast_xml_parser_1.XMLParser();
|
|
85
85
|
const result = parser.parse(`${chatGenerationContent}</tool_input>`);
|
|
86
|
+
if (functionCall === undefined) {
|
|
87
|
+
throw new Error(`Could not parse called function from model output.`);
|
|
88
|
+
}
|
|
86
89
|
const responseMessageWithFunctions = new index_js_1.AIMessage({
|
|
87
90
|
content: "",
|
|
88
91
|
additional_kwargs: {
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
2
|
import { BaseChatModelParams } from "../../chat_models/base.js";
|
|
3
3
|
import { CallbackManagerForLLMRun } from "../../callbacks/manager.js";
|
|
4
4
|
import { BaseMessage, ChatResult } from "../../schema/index.js";
|
|
@@ -6,8 +6,8 @@ import { ChatAnthropic, type AnthropicInput } from "../../chat_models/anthropic.
|
|
|
6
6
|
import { BaseLanguageModelCallOptions } from "../../base_language/index.js";
|
|
7
7
|
import { StructuredTool } from "../../tools/base.js";
|
|
8
8
|
export interface ChatAnthropicFunctionsCallOptions extends BaseLanguageModelCallOptions {
|
|
9
|
-
function_call?:
|
|
10
|
-
functions?:
|
|
9
|
+
function_call?: OpenAIClient.Chat.ChatCompletionCreateParams.FunctionCallOption;
|
|
10
|
+
functions?: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];
|
|
11
11
|
tools?: StructuredTool[];
|
|
12
12
|
}
|
|
13
13
|
export declare class AnthropicFunctions extends ChatAnthropic<ChatAnthropicFunctionsCallOptions> {
|
|
@@ -80,6 +80,9 @@ export class AnthropicFunctions extends ChatAnthropic {
|
|
|
80
80
|
if (forced) {
|
|
81
81
|
const parser = new XMLParser();
|
|
82
82
|
const result = parser.parse(`${chatGenerationContent}</tool_input>`);
|
|
83
|
+
if (functionCall === undefined) {
|
|
84
|
+
throw new Error(`Could not parse called function from model output.`);
|
|
85
|
+
}
|
|
83
86
|
const responseMessageWithFunctions = new AIMessage({
|
|
84
87
|
content: "",
|
|
85
88
|
additional_kwargs: {
|
|
@@ -1,17 +1,13 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
5
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
3
|
exports.PromptLayerOpenAIChat = exports.OpenAIChat = void 0;
|
|
7
4
|
const openai_1 = require("openai");
|
|
8
5
|
const index_js_1 = require("../schema/index.cjs");
|
|
9
|
-
const axios_fetch_adapter_js_1 = __importDefault(require("../util/axios-fetch-adapter.cjs"));
|
|
10
6
|
const azure_js_1 = require("../util/azure.cjs");
|
|
11
7
|
const env_js_1 = require("../util/env.cjs");
|
|
12
8
|
const prompt_layer_js_1 = require("../util/prompt-layer.cjs");
|
|
13
|
-
const stream_js_1 = require("../util/stream.cjs");
|
|
14
9
|
const base_js_1 = require("./base.cjs");
|
|
10
|
+
const openai_js_1 = require("../util/openai.cjs");
|
|
15
11
|
/**
|
|
16
12
|
* Wrapper around OpenAI large language models that use the Chat endpoint.
|
|
17
13
|
*
|
|
@@ -266,6 +262,12 @@ class OpenAIChat extends base_js_1.LLM {
|
|
|
266
262
|
this.clientConfig = {
|
|
267
263
|
apiKey: this.openAIApiKey,
|
|
268
264
|
organization: this.organization,
|
|
265
|
+
baseURL: configuration?.basePath ?? fields?.configuration?.basePath,
|
|
266
|
+
dangerouslyAllowBrowser: true,
|
|
267
|
+
defaultHeaders: configuration?.baseOptions?.headers ??
|
|
268
|
+
fields?.configuration?.baseOptions?.headers,
|
|
269
|
+
defaultQuery: configuration?.baseOptions?.params ??
|
|
270
|
+
fields?.configuration?.baseOptions?.params,
|
|
269
271
|
...configuration,
|
|
270
272
|
...fields?.configuration,
|
|
271
273
|
};
|
|
@@ -319,18 +321,15 @@ class OpenAIChat extends base_js_1.LLM {
|
|
|
319
321
|
};
|
|
320
322
|
return this.prefixMessages ? [...this.prefixMessages, message] : [message];
|
|
321
323
|
}
|
|
322
|
-
// TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation
|
|
323
|
-
// when we integrate OpenAI's new SDK.
|
|
324
324
|
async *_streamResponseChunks(prompt, options, runManager) {
|
|
325
325
|
const params = {
|
|
326
326
|
...this.invocationParams(options),
|
|
327
327
|
messages: this.formatMessages(prompt),
|
|
328
328
|
stream: true,
|
|
329
329
|
};
|
|
330
|
-
const
|
|
331
|
-
for await (const
|
|
332
|
-
const
|
|
333
|
-
const choice = data.choices?.[0];
|
|
330
|
+
const stream = await this.completionWithRetry(params, options);
|
|
331
|
+
for await (const data of stream) {
|
|
332
|
+
const choice = data.choices[0];
|
|
334
333
|
if (!choice) {
|
|
335
334
|
continue;
|
|
336
335
|
}
|
|
@@ -339,201 +338,95 @@ class OpenAIChat extends base_js_1.LLM {
|
|
|
339
338
|
text: delta.content ?? "",
|
|
340
339
|
});
|
|
341
340
|
yield generationChunk;
|
|
341
|
+
const newTokenIndices = {
|
|
342
|
+
prompt: options.promptIndex ?? 0,
|
|
343
|
+
completion: choice.index ?? 0,
|
|
344
|
+
};
|
|
342
345
|
// eslint-disable-next-line no-void
|
|
343
|
-
void runManager?.handleLLMNewToken(generationChunk.text ?? "");
|
|
346
|
+
void runManager?.handleLLMNewToken(generationChunk.text ?? "", newTokenIndices);
|
|
347
|
+
}
|
|
348
|
+
if (options.signal?.aborted) {
|
|
349
|
+
throw new Error("AbortError");
|
|
344
350
|
}
|
|
345
351
|
}
|
|
346
|
-
/**
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
const writer = stream.writable.getWriter();
|
|
356
|
-
const iterable = (0, stream_js_1.readableStreamToAsyncIterable)(stream.readable);
|
|
357
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
358
|
-
let err;
|
|
359
|
-
this.completionWithRetry(request, {
|
|
360
|
-
...options,
|
|
361
|
-
adapter: axios_fetch_adapter_js_1.default,
|
|
362
|
-
responseType: "stream",
|
|
363
|
-
onmessage: (event) => {
|
|
364
|
-
if (done)
|
|
365
|
-
return;
|
|
366
|
-
if (event.data?.trim?.() === "[DONE]") {
|
|
367
|
-
done = true;
|
|
368
|
-
// eslint-disable-next-line no-void
|
|
369
|
-
void writer.close();
|
|
352
|
+
/** @ignore */
|
|
353
|
+
async _call(prompt, options, runManager) {
|
|
354
|
+
const params = this.invocationParams(options);
|
|
355
|
+
if (params.stream) {
|
|
356
|
+
const stream = await this._streamResponseChunks(prompt, options, runManager);
|
|
357
|
+
let finalChunk;
|
|
358
|
+
for await (const chunk of stream) {
|
|
359
|
+
if (finalChunk === undefined) {
|
|
360
|
+
finalChunk = chunk;
|
|
370
361
|
}
|
|
371
362
|
else {
|
|
372
|
-
|
|
373
|
-
if (data.error) {
|
|
374
|
-
done = true;
|
|
375
|
-
throw data.error;
|
|
376
|
-
}
|
|
377
|
-
// eslint-disable-next-line no-void
|
|
378
|
-
void writer.write(event.data);
|
|
363
|
+
finalChunk = finalChunk.concat(chunk);
|
|
379
364
|
}
|
|
380
|
-
},
|
|
381
|
-
}).catch((error) => {
|
|
382
|
-
if (!done) {
|
|
383
|
-
err = error;
|
|
384
|
-
done = true;
|
|
385
|
-
// eslint-disable-next-line no-void
|
|
386
|
-
void writer.close();
|
|
387
365
|
}
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
if (err) {
|
|
393
|
-
throw err;
|
|
394
|
-
}
|
|
395
|
-
return chunk;
|
|
396
|
-
},
|
|
397
|
-
[Symbol.asyncIterator]() {
|
|
398
|
-
return this;
|
|
399
|
-
},
|
|
400
|
-
};
|
|
401
|
-
}
|
|
402
|
-
/** @ignore */
|
|
403
|
-
async _call(prompt, options, runManager) {
|
|
404
|
-
const params = this.invocationParams(options);
|
|
405
|
-
const data = params.stream
|
|
406
|
-
? await new Promise((resolve, reject) => {
|
|
407
|
-
let response;
|
|
408
|
-
let rejected = false;
|
|
409
|
-
let resolved = false;
|
|
410
|
-
this.completionWithRetry({
|
|
411
|
-
...params,
|
|
412
|
-
messages: this.formatMessages(prompt),
|
|
413
|
-
}, {
|
|
414
|
-
signal: options.signal,
|
|
415
|
-
...options.options,
|
|
416
|
-
adapter: axios_fetch_adapter_js_1.default,
|
|
417
|
-
responseType: "stream",
|
|
418
|
-
onmessage: (event) => {
|
|
419
|
-
if (event.data?.trim?.() === "[DONE]") {
|
|
420
|
-
if (resolved || rejected) {
|
|
421
|
-
return;
|
|
422
|
-
}
|
|
423
|
-
resolved = true;
|
|
424
|
-
resolve(response);
|
|
425
|
-
}
|
|
426
|
-
else {
|
|
427
|
-
const data = JSON.parse(event.data);
|
|
428
|
-
if (data?.error) {
|
|
429
|
-
if (rejected) {
|
|
430
|
-
return;
|
|
431
|
-
}
|
|
432
|
-
rejected = true;
|
|
433
|
-
reject(data.error);
|
|
434
|
-
return;
|
|
435
|
-
}
|
|
436
|
-
const message = data;
|
|
437
|
-
// on the first message set the response properties
|
|
438
|
-
if (!response) {
|
|
439
|
-
response = {
|
|
440
|
-
id: message.id,
|
|
441
|
-
object: message.object,
|
|
442
|
-
created: message.created,
|
|
443
|
-
model: message.model,
|
|
444
|
-
choices: [],
|
|
445
|
-
};
|
|
446
|
-
}
|
|
447
|
-
// on all messages, update choice
|
|
448
|
-
for (const part of message.choices) {
|
|
449
|
-
if (part != null) {
|
|
450
|
-
let choice = response.choices.find((c) => c.index === part.index);
|
|
451
|
-
if (!choice) {
|
|
452
|
-
choice = {
|
|
453
|
-
index: part.index,
|
|
454
|
-
finish_reason: part.finish_reason ?? undefined,
|
|
455
|
-
};
|
|
456
|
-
response.choices.push(choice);
|
|
457
|
-
}
|
|
458
|
-
if (!choice.message) {
|
|
459
|
-
choice.message = {
|
|
460
|
-
role: part.delta
|
|
461
|
-
?.role,
|
|
462
|
-
content: part.delta?.content ?? "",
|
|
463
|
-
};
|
|
464
|
-
}
|
|
465
|
-
choice.message.content += part.delta?.content ?? "";
|
|
466
|
-
// eslint-disable-next-line no-void
|
|
467
|
-
void runManager?.handleLLMNewToken(part.delta?.content ?? "", {
|
|
468
|
-
prompt: options.promptIndex ?? 0,
|
|
469
|
-
completion: part.index,
|
|
470
|
-
});
|
|
471
|
-
}
|
|
472
|
-
}
|
|
473
|
-
// when all messages are finished, resolve
|
|
474
|
-
if (!resolved &&
|
|
475
|
-
!rejected &&
|
|
476
|
-
message.choices.every((c) => c.finish_reason != null)) {
|
|
477
|
-
resolved = true;
|
|
478
|
-
resolve(response);
|
|
479
|
-
}
|
|
480
|
-
}
|
|
481
|
-
},
|
|
482
|
-
}).catch((error) => {
|
|
483
|
-
if (!rejected) {
|
|
484
|
-
rejected = true;
|
|
485
|
-
reject(error);
|
|
486
|
-
}
|
|
487
|
-
});
|
|
488
|
-
})
|
|
489
|
-
: await this.completionWithRetry({
|
|
366
|
+
return finalChunk?.text ?? "";
|
|
367
|
+
}
|
|
368
|
+
else {
|
|
369
|
+
const response = await this.completionWithRetry({
|
|
490
370
|
...params,
|
|
371
|
+
stream: false,
|
|
491
372
|
messages: this.formatMessages(prompt),
|
|
492
373
|
}, {
|
|
493
374
|
signal: options.signal,
|
|
494
375
|
...options.options,
|
|
495
376
|
});
|
|
496
|
-
|
|
377
|
+
return response?.choices[0]?.message?.content ?? "";
|
|
378
|
+
}
|
|
497
379
|
}
|
|
498
|
-
/** @ignore */
|
|
499
380
|
async completionWithRetry(request, options) {
|
|
381
|
+
const requestOptions = this._getClientOptions(options);
|
|
382
|
+
return this.caller.call(async () => {
|
|
383
|
+
try {
|
|
384
|
+
const res = await this.client.chat.completions.create(request, requestOptions);
|
|
385
|
+
return res;
|
|
386
|
+
}
|
|
387
|
+
catch (e) {
|
|
388
|
+
const error = (0, openai_js_1.wrapOpenAIClientError)(e);
|
|
389
|
+
throw error;
|
|
390
|
+
}
|
|
391
|
+
});
|
|
392
|
+
}
|
|
393
|
+
/** @ignore */
|
|
394
|
+
_getClientOptions(options) {
|
|
500
395
|
if (!this.client) {
|
|
501
396
|
const openAIEndpointConfig = {
|
|
502
397
|
azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
|
|
503
398
|
azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
|
|
504
399
|
azureOpenAIApiKey: this.azureOpenAIApiKey,
|
|
505
400
|
azureOpenAIBasePath: this.azureOpenAIBasePath,
|
|
506
|
-
|
|
401
|
+
baseURL: this.clientConfig.baseURL,
|
|
507
402
|
};
|
|
508
403
|
const endpoint = (0, azure_js_1.getEndpoint)(openAIEndpointConfig);
|
|
509
|
-
const
|
|
404
|
+
const params = {
|
|
510
405
|
...this.clientConfig,
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
406
|
+
baseURL: endpoint,
|
|
407
|
+
timeout: this.timeout,
|
|
408
|
+
maxRetries: 0,
|
|
409
|
+
};
|
|
410
|
+
if (!params.baseURL) {
|
|
411
|
+
delete params.baseURL;
|
|
412
|
+
}
|
|
413
|
+
this.client = new openai_1.OpenAI(params);
|
|
518
414
|
}
|
|
519
|
-
const
|
|
520
|
-
|
|
521
|
-
...this.clientConfig.baseOptions,
|
|
415
|
+
const requestOptions = {
|
|
416
|
+
...this.clientConfig,
|
|
522
417
|
...options,
|
|
523
418
|
};
|
|
524
419
|
if (this.azureOpenAIApiKey) {
|
|
525
|
-
|
|
420
|
+
requestOptions.headers = {
|
|
526
421
|
"api-key": this.azureOpenAIApiKey,
|
|
527
|
-
...
|
|
422
|
+
...requestOptions.headers,
|
|
528
423
|
};
|
|
529
|
-
|
|
424
|
+
requestOptions.query = {
|
|
530
425
|
"api-version": this.azureOpenAIApiVersion,
|
|
531
|
-
...
|
|
426
|
+
...requestOptions.query,
|
|
532
427
|
};
|
|
533
428
|
}
|
|
534
|
-
return
|
|
535
|
-
.call(this.client.createChatCompletion.bind(this.client), request, axiosOptions)
|
|
536
|
-
.then((res) => res.data);
|
|
429
|
+
return requestOptions;
|
|
537
430
|
}
|
|
538
431
|
_llmType() {
|
|
539
432
|
return "openai";
|
|
@@ -584,19 +477,6 @@ class PromptLayerOpenAIChat extends OpenAIChat {
|
|
|
584
477
|
throw new Error("Missing PromptLayer API key");
|
|
585
478
|
}
|
|
586
479
|
}
|
|
587
|
-
/**
|
|
588
|
-
* Makes a call to the OpenAI API with retry logic in case of failures.
|
|
589
|
-
* @param request The request to be sent to the OpenAI API.
|
|
590
|
-
* @param options Optional configuration for the Axios request.
|
|
591
|
-
* @returns The response from the OpenAI API.
|
|
592
|
-
*/
|
|
593
|
-
async completionWithRetry(request, options) {
|
|
594
|
-
if (request.stream) {
|
|
595
|
-
return super.completionWithRetry(request, options);
|
|
596
|
-
}
|
|
597
|
-
const response = await super.completionWithRetry(request);
|
|
598
|
-
return response;
|
|
599
|
-
}
|
|
600
480
|
async _generate(prompts, options, runManager) {
|
|
601
481
|
let choice;
|
|
602
482
|
const generations = await Promise.all(prompts.map(async (prompt) => {
|
|
@@ -607,7 +487,9 @@ class PromptLayerOpenAIChat extends OpenAIChat {
|
|
|
607
487
|
const parsedResp = {
|
|
608
488
|
text,
|
|
609
489
|
};
|
|
610
|
-
const promptLayerRespBody = await (0, prompt_layer_js_1.promptLayerTrackRequest)(this.caller, "langchain.PromptLayerOpenAIChat",
|
|
490
|
+
const promptLayerRespBody = await (0, prompt_layer_js_1.promptLayerTrackRequest)(this.caller, "langchain.PromptLayerOpenAIChat",
|
|
491
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
492
|
+
{ ...this._identifyingParams(), prompt }, this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
|
|
611
493
|
if (this.returnPromptLayerId === true &&
|
|
612
494
|
promptLayerRespBody.success === true) {
|
|
613
495
|
choice[0].generationInfo = {
|
|
@@ -1,8 +1,7 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { type ClientOptions, OpenAI as OpenAIClient } from "openai";
|
|
2
2
|
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
3
3
|
import { GenerationChunk, LLMResult } from "../schema/index.js";
|
|
4
|
-
import { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput } from "../types/openai-types.js";
|
|
5
|
-
import type { StreamingAxiosConfiguration } from "../util/axios-types.js";
|
|
4
|
+
import { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput, OpenAICoreRequestOptions, LegacyOpenAIInput } from "../types/openai-types.js";
|
|
6
5
|
import { BaseLLMParams, LLM } from "./base.js";
|
|
7
6
|
export { AzureOpenAIInput, OpenAIChatInput };
|
|
8
7
|
/**
|
|
@@ -51,7 +50,7 @@ export declare class OpenAIChat extends LLM<OpenAIChatCallOptions> implements Op
|
|
|
51
50
|
logitBias?: Record<string, number>;
|
|
52
51
|
maxTokens?: number;
|
|
53
52
|
modelName: string;
|
|
54
|
-
prefixMessages?:
|
|
53
|
+
prefixMessages?: OpenAIClient.Chat.ChatCompletionMessageParam[];
|
|
55
54
|
modelKwargs?: OpenAIChatInput["modelKwargs"];
|
|
56
55
|
timeout?: number;
|
|
57
56
|
stop?: string[];
|
|
@@ -67,66 +66,24 @@ export declare class OpenAIChat extends LLM<OpenAIChatCallOptions> implements Op
|
|
|
67
66
|
private client;
|
|
68
67
|
private clientConfig;
|
|
69
68
|
constructor(fields?: Partial<OpenAIChatInput> & Partial<AzureOpenAIInput> & BaseLLMParams & {
|
|
70
|
-
configuration?:
|
|
69
|
+
configuration?: ClientOptions & LegacyOpenAIInput;
|
|
71
70
|
},
|
|
72
71
|
/** @deprecated */
|
|
73
|
-
configuration?:
|
|
72
|
+
configuration?: ClientOptions & LegacyOpenAIInput);
|
|
74
73
|
/**
|
|
75
74
|
* Get the parameters used to invoke the model
|
|
76
75
|
*/
|
|
77
|
-
invocationParams(options?: this["ParsedCallOptions"]): Omit<
|
|
76
|
+
invocationParams(options?: this["ParsedCallOptions"]): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages">;
|
|
78
77
|
/** @ignore */
|
|
79
|
-
_identifyingParams(): {
|
|
80
|
-
apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>) | undefined;
|
|
81
|
-
organization?: string | undefined;
|
|
82
|
-
username?: string | undefined;
|
|
83
|
-
password?: string | undefined;
|
|
84
|
-
accessToken?: string | Promise<string> | ((name?: string | undefined, scopes?: string[] | undefined) => string) | ((name?: string | undefined, scopes?: string[] | undefined) => Promise<string>) | undefined;
|
|
85
|
-
basePath?: string | undefined;
|
|
86
|
-
baseOptions?: any;
|
|
87
|
-
formDataCtor?: (new () => any) | undefined;
|
|
88
|
-
function_call?: import("openai").CreateChatCompletionRequestFunctionCall | undefined;
|
|
89
|
-
stop?: import("openai").CreateChatCompletionRequestStop | undefined;
|
|
90
|
-
stream?: boolean | null | undefined;
|
|
91
|
-
user?: string | undefined;
|
|
92
|
-
functions?: import("openai").ChatCompletionFunctions[] | undefined;
|
|
93
|
-
model: string;
|
|
94
|
-
temperature?: number | null | undefined;
|
|
95
|
-
top_p?: number | null | undefined;
|
|
96
|
-
n?: number | null | undefined;
|
|
97
|
-
max_tokens?: number | undefined;
|
|
98
|
-
presence_penalty?: number | null | undefined;
|
|
99
|
-
frequency_penalty?: number | null | undefined;
|
|
100
|
-
logit_bias?: object | null | undefined;
|
|
78
|
+
_identifyingParams(): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages"> & {
|
|
101
79
|
model_name: string;
|
|
102
|
-
};
|
|
80
|
+
} & ClientOptions;
|
|
103
81
|
/**
|
|
104
82
|
* Get the identifying parameters for the model
|
|
105
83
|
*/
|
|
106
|
-
identifyingParams(): {
|
|
107
|
-
apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>) | undefined;
|
|
108
|
-
organization?: string | undefined;
|
|
109
|
-
username?: string | undefined;
|
|
110
|
-
password?: string | undefined;
|
|
111
|
-
accessToken?: string | Promise<string> | ((name?: string | undefined, scopes?: string[] | undefined) => string) | ((name?: string | undefined, scopes?: string[] | undefined) => Promise<string>) | undefined;
|
|
112
|
-
basePath?: string | undefined;
|
|
113
|
-
baseOptions?: any;
|
|
114
|
-
formDataCtor?: (new () => any) | undefined;
|
|
115
|
-
function_call?: import("openai").CreateChatCompletionRequestFunctionCall | undefined;
|
|
116
|
-
stop?: import("openai").CreateChatCompletionRequestStop | undefined;
|
|
117
|
-
stream?: boolean | null | undefined;
|
|
118
|
-
user?: string | undefined;
|
|
119
|
-
functions?: import("openai").ChatCompletionFunctions[] | undefined;
|
|
120
|
-
model: string;
|
|
121
|
-
temperature?: number | null | undefined;
|
|
122
|
-
top_p?: number | null | undefined;
|
|
123
|
-
n?: number | null | undefined;
|
|
124
|
-
max_tokens?: number | undefined;
|
|
125
|
-
presence_penalty?: number | null | undefined;
|
|
126
|
-
frequency_penalty?: number | null | undefined;
|
|
127
|
-
logit_bias?: object | null | undefined;
|
|
84
|
+
identifyingParams(): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages"> & {
|
|
128
85
|
model_name: string;
|
|
129
|
-
};
|
|
86
|
+
} & ClientOptions;
|
|
130
87
|
/**
|
|
131
88
|
* Formats the messages for the OpenAI API.
|
|
132
89
|
* @param prompt The prompt to be formatted.
|
|
@@ -134,20 +91,18 @@ export declare class OpenAIChat extends LLM<OpenAIChatCallOptions> implements Op
|
|
|
134
91
|
*/
|
|
135
92
|
private formatMessages;
|
|
136
93
|
_streamResponseChunks(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
|
|
137
|
-
/**
|
|
138
|
-
* Starts a stream of responses from the OpenAI API.
|
|
139
|
-
* @param request The request to be sent to the OpenAI API.
|
|
140
|
-
* @param options Optional configuration for the Axios request.
|
|
141
|
-
* @returns An iterable object that can be used to iterate over the response chunks.
|
|
142
|
-
*/
|
|
143
|
-
startStream(request: CreateChatCompletionRequest, options?: StreamingAxiosConfiguration): {
|
|
144
|
-
next(): Promise<any>;
|
|
145
|
-
[Symbol.asyncIterator](): any;
|
|
146
|
-
};
|
|
147
94
|
/** @ignore */
|
|
148
95
|
_call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
|
|
96
|
+
/**
|
|
97
|
+
* Calls the OpenAI API with retry logic in case of failures.
|
|
98
|
+
* @param request The request to send to the OpenAI API.
|
|
99
|
+
* @param options Optional configuration for the API call.
|
|
100
|
+
* @returns The response from the OpenAI API.
|
|
101
|
+
*/
|
|
102
|
+
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>;
|
|
103
|
+
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Chat.Completions.ChatCompletion>;
|
|
149
104
|
/** @ignore */
|
|
150
|
-
|
|
105
|
+
private _getClientOptions;
|
|
151
106
|
_llmType(): string;
|
|
152
107
|
}
|
|
153
108
|
/**
|
|
@@ -166,12 +121,5 @@ export declare class PromptLayerOpenAIChat extends OpenAIChat {
|
|
|
166
121
|
plTags?: string[];
|
|
167
122
|
returnPromptLayerId?: boolean;
|
|
168
123
|
});
|
|
169
|
-
/**
|
|
170
|
-
* Makes a call to the OpenAI API with retry logic in case of failures.
|
|
171
|
-
* @param request The request to be sent to the OpenAI API.
|
|
172
|
-
* @param options Optional configuration for the Axios request.
|
|
173
|
-
* @returns The response from the OpenAI API.
|
|
174
|
-
*/
|
|
175
|
-
completionWithRetry(request: CreateChatCompletionRequest, options?: StreamingAxiosConfiguration): Promise<CreateChatCompletionResponse>;
|
|
176
124
|
_generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
|
|
177
125
|
}
|