langchain 0.0.140 → 0.0.141
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chains/openai_moderation.cjs +5 -13
- package/dist/chains/openai_moderation.d.ts +5 -5
- package/dist/chains/openai_moderation.js +6 -11
- package/dist/chat_models/anthropic.d.ts +2 -2
- package/dist/chat_models/openai.cjs +99 -215
- package/dist/chat_models/openai.d.ts +20 -60
- package/dist/chat_models/openai.js +101 -214
- package/dist/document_loaders/web/github.cjs +4 -0
- package/dist/document_loaders/web/github.js +4 -0
- package/dist/embeddings/openai.cjs +32 -22
- package/dist/embeddings/openai.d.ts +3 -3
- package/dist/embeddings/openai.js +34 -21
- package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
- package/dist/experimental/chat_models/anthropic_functions.d.ts +3 -3
- package/dist/experimental/chat_models/anthropic_functions.js +3 -0
- package/dist/llms/openai-chat.cjs +69 -187
- package/dist/llms/openai-chat.d.ts +19 -71
- package/dist/llms/openai-chat.js +71 -186
- package/dist/llms/openai.cjs +92 -166
- package/dist/llms/openai.d.ts +25 -71
- package/dist/llms/openai.js +94 -165
- package/dist/load/import_map.cjs +3 -2
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/schema/index.d.ts +2 -2
- package/dist/tools/convert_to_openai.d.ts +2 -2
- package/dist/types/openai-types.d.ts +27 -4
- package/dist/util/async_caller.cjs +10 -7
- package/dist/util/async_caller.js +10 -7
- package/dist/util/azure.cjs +4 -4
- package/dist/util/azure.d.ts +3 -3
- package/dist/util/azure.js +4 -4
- package/dist/util/openai.cjs +21 -0
- package/dist/util/openai.d.ts +1 -0
- package/dist/util/openai.js +17 -0
- package/dist/util/prompt-layer.cjs +1 -2
- package/dist/util/prompt-layer.d.ts +2 -2
- package/dist/util/prompt-layer.js +1 -2
- package/package.json +10 -2
- package/schema/document.cjs +1 -0
- package/schema/document.d.ts +1 -0
- package/schema/document.js +1 -0
package/dist/llms/openai.js
CHANGED
|
@@ -1,14 +1,13 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { OpenAI as OpenAIClient } from "openai";
|
|
2
2
|
import { calculateMaxTokens } from "../base_language/count_tokens.js";
|
|
3
3
|
import { GenerationChunk } from "../schema/index.js";
|
|
4
|
-
import fetchAdapter from "../util/axios-fetch-adapter.js";
|
|
5
4
|
import { getEndpoint } from "../util/azure.js";
|
|
6
5
|
import { chunkArray } from "../util/chunk.js";
|
|
7
|
-
import { getEnvironmentVariable
|
|
6
|
+
import { getEnvironmentVariable } from "../util/env.js";
|
|
8
7
|
import { promptLayerTrackRequest } from "../util/prompt-layer.js";
|
|
9
|
-
import { readableStreamToAsyncIterable } from "../util/stream.js";
|
|
10
8
|
import { BaseLLM } from "./base.js";
|
|
11
9
|
import { OpenAIChat } from "./openai-chat.js";
|
|
10
|
+
import { wrapOpenAIClientError } from "../util/openai.js";
|
|
12
11
|
/**
|
|
13
12
|
* Wrapper around OpenAI large language models.
|
|
14
13
|
*
|
|
@@ -268,6 +267,12 @@ export class OpenAI extends BaseLLM {
|
|
|
268
267
|
this.clientConfig = {
|
|
269
268
|
apiKey: this.openAIApiKey,
|
|
270
269
|
organization: this.organization,
|
|
270
|
+
baseURL: configuration?.basePath ?? fields?.configuration?.basePath,
|
|
271
|
+
dangerouslyAllowBrowser: true,
|
|
272
|
+
defaultHeaders: configuration?.baseOptions?.headers ??
|
|
273
|
+
fields?.configuration?.baseOptions?.headers,
|
|
274
|
+
defaultQuery: configuration?.baseOptions?.params ??
|
|
275
|
+
fields?.configuration?.baseOptions?.params,
|
|
271
276
|
...configuration,
|
|
272
277
|
...fields?.configuration,
|
|
273
278
|
};
|
|
@@ -292,6 +297,7 @@ export class OpenAI extends BaseLLM {
|
|
|
292
297
|
...this.modelKwargs,
|
|
293
298
|
};
|
|
294
299
|
}
|
|
300
|
+
/** @ignore */
|
|
295
301
|
_identifyingParams() {
|
|
296
302
|
return {
|
|
297
303
|
model_name: this.modelName,
|
|
@@ -338,94 +344,62 @@ export class OpenAI extends BaseLLM {
|
|
|
338
344
|
}
|
|
339
345
|
for (let i = 0; i < subPrompts.length; i += 1) {
|
|
340
346
|
const data = params.stream
|
|
341
|
-
? await
|
|
347
|
+
? await (async () => {
|
|
342
348
|
const choices = [];
|
|
343
349
|
let response;
|
|
344
|
-
|
|
345
|
-
let resolved = false;
|
|
346
|
-
this.completionWithRetry({
|
|
350
|
+
const stream = await this.completionWithRetry({
|
|
347
351
|
...params,
|
|
352
|
+
stream: true,
|
|
348
353
|
prompt: subPrompts[i],
|
|
349
|
-
},
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
354
|
+
}, options);
|
|
355
|
+
for await (const message of stream) {
|
|
356
|
+
// on the first message set the response properties
|
|
357
|
+
if (!response) {
|
|
358
|
+
response = {
|
|
359
|
+
id: message.id,
|
|
360
|
+
object: message.object,
|
|
361
|
+
created: message.created,
|
|
362
|
+
model: message.model,
|
|
363
|
+
};
|
|
364
|
+
}
|
|
365
|
+
// on all messages, update choice
|
|
366
|
+
for (const part of message.choices) {
|
|
367
|
+
if (!choices[part.index]) {
|
|
368
|
+
choices[part.index] = part;
|
|
364
369
|
}
|
|
365
370
|
else {
|
|
366
|
-
const
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
}
|
|
371
|
-
rejected = true;
|
|
372
|
-
reject(data.error);
|
|
373
|
-
return;
|
|
374
|
-
}
|
|
375
|
-
const message = data;
|
|
376
|
-
// on the first message set the response properties
|
|
377
|
-
if (!response) {
|
|
378
|
-
response = {
|
|
379
|
-
id: message.id,
|
|
380
|
-
object: message.object,
|
|
381
|
-
created: message.created,
|
|
382
|
-
model: message.model,
|
|
383
|
-
};
|
|
384
|
-
}
|
|
385
|
-
// on all messages, update choice
|
|
386
|
-
for (const part of message.choices) {
|
|
387
|
-
if (part != null && part.index != null) {
|
|
388
|
-
if (!choices[part.index])
|
|
389
|
-
choices[part.index] = {};
|
|
390
|
-
const choice = choices[part.index];
|
|
391
|
-
choice.text = (choice.text ?? "") + (part.text ?? "");
|
|
392
|
-
choice.finish_reason = part.finish_reason;
|
|
393
|
-
choice.logprobs = part.logprobs;
|
|
394
|
-
// eslint-disable-next-line no-void
|
|
395
|
-
void runManager?.handleLLMNewToken(part.text ?? "", {
|
|
396
|
-
prompt: Math.floor(part.index / this.n),
|
|
397
|
-
completion: part.index % this.n,
|
|
398
|
-
});
|
|
399
|
-
}
|
|
400
|
-
}
|
|
401
|
-
// when all messages are finished, resolve
|
|
402
|
-
if (!resolved &&
|
|
403
|
-
!rejected &&
|
|
404
|
-
choices.every((c) => c.finish_reason != null)) {
|
|
405
|
-
resolved = true;
|
|
406
|
-
resolve({
|
|
407
|
-
...response,
|
|
408
|
-
choices,
|
|
409
|
-
});
|
|
410
|
-
}
|
|
371
|
+
const choice = choices[part.index];
|
|
372
|
+
choice.text += part.text;
|
|
373
|
+
choice.finish_reason = part.finish_reason;
|
|
374
|
+
choice.logprobs = part.logprobs;
|
|
411
375
|
}
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
reject(error);
|
|
376
|
+
void runManager?.handleLLMNewToken(part.text, {
|
|
377
|
+
prompt: Math.floor(part.index / this.n),
|
|
378
|
+
completion: part.index % this.n,
|
|
379
|
+
});
|
|
417
380
|
}
|
|
418
|
-
}
|
|
419
|
-
|
|
381
|
+
}
|
|
382
|
+
if (options.signal?.aborted) {
|
|
383
|
+
throw new Error("AbortError");
|
|
384
|
+
}
|
|
385
|
+
return { ...response, choices };
|
|
386
|
+
})()
|
|
420
387
|
: await this.completionWithRetry({
|
|
421
388
|
...params,
|
|
389
|
+
stream: false,
|
|
422
390
|
prompt: subPrompts[i],
|
|
423
391
|
}, {
|
|
424
392
|
signal: options.signal,
|
|
425
393
|
...options.options,
|
|
426
394
|
});
|
|
427
395
|
choices.push(...data.choices);
|
|
428
|
-
const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage
|
|
396
|
+
const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage
|
|
397
|
+
? data.usage
|
|
398
|
+
: {
|
|
399
|
+
completion_tokens: undefined,
|
|
400
|
+
prompt_tokens: undefined,
|
|
401
|
+
total_tokens: undefined,
|
|
402
|
+
};
|
|
429
403
|
if (completionTokens) {
|
|
430
404
|
tokenUsage.completionTokens =
|
|
431
405
|
(tokenUsage.completionTokens ?? 0) + completionTokens;
|
|
@@ -449,18 +423,16 @@ export class OpenAI extends BaseLLM {
|
|
|
449
423
|
llmOutput: { tokenUsage },
|
|
450
424
|
};
|
|
451
425
|
}
|
|
452
|
-
// TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation
|
|
453
|
-
// when we integrate OpenAI's new SDK.
|
|
426
|
+
// TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation?
|
|
454
427
|
async *_streamResponseChunks(input, options, runManager) {
|
|
455
428
|
const params = {
|
|
456
429
|
...this.invocationParams(options),
|
|
457
430
|
prompt: input,
|
|
458
431
|
stream: true,
|
|
459
432
|
};
|
|
460
|
-
const
|
|
461
|
-
for await (const
|
|
462
|
-
const
|
|
463
|
-
const choice = data.choices?.[0];
|
|
433
|
+
const stream = await this.completionWithRetry(params, options);
|
|
434
|
+
for await (const data of stream) {
|
|
435
|
+
const choice = data.choices[0];
|
|
464
436
|
if (!choice) {
|
|
465
437
|
continue;
|
|
466
438
|
}
|
|
@@ -468,103 +440,71 @@ export class OpenAI extends BaseLLM {
|
|
|
468
440
|
text: choice.text,
|
|
469
441
|
generationInfo: {
|
|
470
442
|
finishReason: choice.finish_reason,
|
|
471
|
-
logprobs: choice.logprobs,
|
|
472
443
|
},
|
|
473
444
|
});
|
|
474
445
|
yield chunk;
|
|
475
446
|
// eslint-disable-next-line no-void
|
|
476
447
|
void runManager?.handleLLMNewToken(chunk.text ?? "");
|
|
477
448
|
}
|
|
449
|
+
if (options.signal?.aborted) {
|
|
450
|
+
throw new Error("AbortError");
|
|
451
|
+
}
|
|
478
452
|
}
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
responseType: "stream",
|
|
490
|
-
onmessage: (event) => {
|
|
491
|
-
if (done)
|
|
492
|
-
return;
|
|
493
|
-
if (event.data?.trim?.() === "[DONE]") {
|
|
494
|
-
done = true;
|
|
495
|
-
// eslint-disable-next-line no-void
|
|
496
|
-
void writer.close();
|
|
497
|
-
}
|
|
498
|
-
else {
|
|
499
|
-
const data = JSON.parse(event.data);
|
|
500
|
-
if (data.error) {
|
|
501
|
-
done = true;
|
|
502
|
-
throw data.error;
|
|
503
|
-
}
|
|
504
|
-
// eslint-disable-next-line no-void
|
|
505
|
-
void writer.write(event.data);
|
|
506
|
-
}
|
|
507
|
-
},
|
|
508
|
-
}).catch((error) => {
|
|
509
|
-
if (!done) {
|
|
510
|
-
err = error;
|
|
511
|
-
done = true;
|
|
512
|
-
// eslint-disable-next-line no-void
|
|
513
|
-
void writer.close();
|
|
453
|
+
async completionWithRetry(request, options) {
|
|
454
|
+
const requestOptions = this._getClientOptions(options);
|
|
455
|
+
return this.caller.call(async () => {
|
|
456
|
+
try {
|
|
457
|
+
const res = await this.client.completions.create(request, requestOptions);
|
|
458
|
+
return res;
|
|
459
|
+
}
|
|
460
|
+
catch (e) {
|
|
461
|
+
const error = wrapOpenAIClientError(e);
|
|
462
|
+
throw error;
|
|
514
463
|
}
|
|
515
464
|
});
|
|
516
|
-
return {
|
|
517
|
-
async next() {
|
|
518
|
-
const chunk = await iterable.next();
|
|
519
|
-
if (err) {
|
|
520
|
-
throw err;
|
|
521
|
-
}
|
|
522
|
-
return chunk;
|
|
523
|
-
},
|
|
524
|
-
[Symbol.asyncIterator]() {
|
|
525
|
-
return this;
|
|
526
|
-
},
|
|
527
|
-
};
|
|
528
465
|
}
|
|
529
|
-
/**
|
|
530
|
-
|
|
466
|
+
/**
|
|
467
|
+
* Calls the OpenAI API with retry logic in case of failures.
|
|
468
|
+
* @param request The request to send to the OpenAI API.
|
|
469
|
+
* @param options Optional configuration for the API call.
|
|
470
|
+
* @returns The response from the OpenAI API.
|
|
471
|
+
*/
|
|
472
|
+
_getClientOptions(options) {
|
|
531
473
|
if (!this.client) {
|
|
532
474
|
const openAIEndpointConfig = {
|
|
533
475
|
azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
|
|
534
476
|
azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
|
|
535
477
|
azureOpenAIApiKey: this.azureOpenAIApiKey,
|
|
536
478
|
azureOpenAIBasePath: this.azureOpenAIBasePath,
|
|
537
|
-
|
|
479
|
+
baseURL: this.clientConfig.baseURL,
|
|
538
480
|
};
|
|
539
481
|
const endpoint = getEndpoint(openAIEndpointConfig);
|
|
540
|
-
const
|
|
482
|
+
const params = {
|
|
541
483
|
...this.clientConfig,
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
484
|
+
baseURL: endpoint,
|
|
485
|
+
timeout: this.timeout,
|
|
486
|
+
maxRetries: 0,
|
|
487
|
+
};
|
|
488
|
+
if (!params.baseURL) {
|
|
489
|
+
delete params.baseURL;
|
|
490
|
+
}
|
|
491
|
+
this.client = new OpenAIClient(params);
|
|
549
492
|
}
|
|
550
|
-
const
|
|
551
|
-
|
|
552
|
-
...this.clientConfig.baseOptions,
|
|
493
|
+
const requestOptions = {
|
|
494
|
+
...this.clientConfig,
|
|
553
495
|
...options,
|
|
554
496
|
};
|
|
555
497
|
if (this.azureOpenAIApiKey) {
|
|
556
|
-
|
|
498
|
+
requestOptions.headers = {
|
|
557
499
|
"api-key": this.azureOpenAIApiKey,
|
|
558
|
-
...
|
|
500
|
+
...requestOptions.headers,
|
|
559
501
|
};
|
|
560
|
-
|
|
502
|
+
requestOptions.query = {
|
|
561
503
|
"api-version": this.azureOpenAIApiVersion,
|
|
562
|
-
...
|
|
504
|
+
...requestOptions.query,
|
|
563
505
|
};
|
|
564
506
|
}
|
|
565
|
-
return
|
|
566
|
-
.call(this.client.createCompletion.bind(this.client), request, axiosOptions)
|
|
567
|
-
.then((res) => res.data);
|
|
507
|
+
return requestOptions;
|
|
568
508
|
}
|
|
569
509
|
_llmType() {
|
|
570
510
|
return "openai";
|
|
@@ -615,19 +555,6 @@ export class PromptLayerOpenAI extends OpenAI {
|
|
|
615
555
|
throw new Error("Missing PromptLayer API key");
|
|
616
556
|
}
|
|
617
557
|
}
|
|
618
|
-
/**
|
|
619
|
-
* Calls the OpenAI API with retry logic in case of failures.
|
|
620
|
-
* @param request The request to send to the OpenAI API.
|
|
621
|
-
* @param options Optional configuration for the API call.
|
|
622
|
-
* @returns The response from the OpenAI API.
|
|
623
|
-
*/
|
|
624
|
-
async completionWithRetry(request, options) {
|
|
625
|
-
if (request.stream) {
|
|
626
|
-
return super.completionWithRetry(request, options);
|
|
627
|
-
}
|
|
628
|
-
const response = await super.completionWithRetry(request);
|
|
629
|
-
return response;
|
|
630
|
-
}
|
|
631
558
|
async _generate(prompts, options, runManager) {
|
|
632
559
|
const requestStartTime = Date.now();
|
|
633
560
|
const generations = await super._generate(prompts, options, runManager);
|
|
@@ -637,7 +564,9 @@ export class PromptLayerOpenAI extends OpenAI {
|
|
|
637
564
|
text: generations.generations[i][0].text,
|
|
638
565
|
llm_output: generations.llmOutput,
|
|
639
566
|
};
|
|
640
|
-
const promptLayerRespBody = await promptLayerTrackRequest(this.caller, "langchain.PromptLayerOpenAI",
|
|
567
|
+
const promptLayerRespBody = await promptLayerTrackRequest(this.caller, "langchain.PromptLayerOpenAI",
|
|
568
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
569
|
+
{ ...this._identifyingParams(), prompt: prompts[i] }, this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
|
|
641
570
|
let promptLayerRequestId;
|
|
642
571
|
if (this.returnPromptLayerId === true) {
|
|
643
572
|
if (promptLayerRespBody && promptLayerRespBody.success === true) {
|
package/dist/load/import_map.cjs
CHANGED
|
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
24
24
|
return result;
|
|
25
25
|
};
|
|
26
26
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
27
|
-
exports.
|
|
28
|
-
exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = void 0;
|
|
27
|
+
exports.retrievers__multi_vector = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
|
|
28
|
+
exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = void 0;
|
|
29
29
|
exports.load__serializable = __importStar(require("../load/serializable.cjs"));
|
|
30
30
|
exports.agents = __importStar(require("../agents/index.cjs"));
|
|
31
31
|
exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
|
|
@@ -63,6 +63,7 @@ exports.chat_models__baiduwenxin = __importStar(require("../chat_models/baiduwen
|
|
|
63
63
|
exports.chat_models__ollama = __importStar(require("../chat_models/ollama.cjs"));
|
|
64
64
|
exports.chat_models__minimax = __importStar(require("../chat_models/minimax.cjs"));
|
|
65
65
|
exports.schema = __importStar(require("../schema/index.cjs"));
|
|
66
|
+
exports.schema__document = __importStar(require("../schema/document.cjs"));
|
|
66
67
|
exports.schema__output_parser = __importStar(require("../schema/output_parser.cjs"));
|
|
67
68
|
exports.schema__query_constructor = __importStar(require("../schema/query_constructor.cjs"));
|
|
68
69
|
exports.schema__retriever = __importStar(require("../schema/retriever.cjs"));
|
|
@@ -35,6 +35,7 @@ export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
|
|
|
35
35
|
export * as chat_models__ollama from "../chat_models/ollama.js";
|
|
36
36
|
export * as chat_models__minimax from "../chat_models/minimax.js";
|
|
37
37
|
export * as schema from "../schema/index.js";
|
|
38
|
+
export * as schema__document from "../schema/document.js";
|
|
38
39
|
export * as schema__output_parser from "../schema/output_parser.js";
|
|
39
40
|
export * as schema__query_constructor from "../schema/query_constructor.js";
|
|
40
41
|
export * as schema__retriever from "../schema/retriever.js";
|
package/dist/load/import_map.js
CHANGED
|
@@ -36,6 +36,7 @@ export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
|
|
|
36
36
|
export * as chat_models__ollama from "../chat_models/ollama.js";
|
|
37
37
|
export * as chat_models__minimax from "../chat_models/minimax.js";
|
|
38
38
|
export * as schema from "../schema/index.js";
|
|
39
|
+
export * as schema__document from "../schema/document.js";
|
|
39
40
|
export * as schema__output_parser from "../schema/output_parser.js";
|
|
40
41
|
export * as schema__query_constructor from "../schema/query_constructor.js";
|
|
41
42
|
export * as schema__retriever from "../schema/retriever.js";
|
package/dist/schema/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
2
|
import { Document } from "../document.js";
|
|
3
3
|
import { Serializable } from "../load/serializable.js";
|
|
4
4
|
export declare const RUN_KEY = "__run";
|
|
@@ -64,7 +64,7 @@ export interface BaseMessageFields {
|
|
|
64
64
|
content: string;
|
|
65
65
|
name?: string;
|
|
66
66
|
additional_kwargs?: {
|
|
67
|
-
function_call?:
|
|
67
|
+
function_call?: OpenAIClient.Chat.ChatCompletionMessage.FunctionCall;
|
|
68
68
|
[key: string]: unknown;
|
|
69
69
|
};
|
|
70
70
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
2
|
import { StructuredTool } from "./base.js";
|
|
3
3
|
/**
|
|
4
4
|
* Formats a `StructuredTool` instance into a format that is compatible
|
|
@@ -6,4 +6,4 @@ import { StructuredTool } from "./base.js";
|
|
|
6
6
|
* function to convert the schema of the `StructuredTool` into a JSON
|
|
7
7
|
* schema, which is then used as the parameters for the OpenAI function.
|
|
8
8
|
*/
|
|
9
|
-
export declare function formatToOpenAIFunction(tool: StructuredTool):
|
|
9
|
+
export declare function formatToOpenAIFunction(tool: StructuredTool): OpenAIClient.Chat.ChatCompletionCreateParams.Function;
|
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { ChatCompletionRequestMessage } from "openai";
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
3
2
|
import { BaseLanguageModelCallOptions } from "../base_language/index.js";
|
|
4
3
|
export declare interface OpenAIBaseInput {
|
|
5
4
|
/** Sampling temperature to use */
|
|
@@ -42,11 +41,23 @@ export declare interface OpenAIBaseInput {
|
|
|
42
41
|
*/
|
|
43
42
|
openAIApiKey?: string;
|
|
44
43
|
}
|
|
44
|
+
export type OpenAICoreRequestOptions<Req extends object = Record<string, unknown>> = {
|
|
45
|
+
path?: string;
|
|
46
|
+
query?: Req | undefined;
|
|
47
|
+
body?: Req | undefined;
|
|
48
|
+
headers?: Record<string, string | null | undefined> | undefined;
|
|
49
|
+
maxRetries?: number;
|
|
50
|
+
stream?: boolean | undefined;
|
|
51
|
+
timeout?: number;
|
|
52
|
+
httpAgent?: any;
|
|
53
|
+
signal?: AbortSignal | undefined | null;
|
|
54
|
+
idempotencyKey?: string;
|
|
55
|
+
};
|
|
45
56
|
export interface OpenAICallOptions extends BaseLanguageModelCallOptions {
|
|
46
57
|
/**
|
|
47
58
|
* Additional options to pass to the underlying axios request.
|
|
48
59
|
*/
|
|
49
|
-
options?:
|
|
60
|
+
options?: OpenAICoreRequestOptions;
|
|
50
61
|
}
|
|
51
62
|
/**
|
|
52
63
|
* Input to OpenAI class.
|
|
@@ -57,9 +68,21 @@ export declare interface OpenAIInput extends OpenAIBaseInput {
|
|
|
57
68
|
/** Batch size to use when passing multiple documents to generate */
|
|
58
69
|
batchSize: number;
|
|
59
70
|
}
|
|
71
|
+
/**
|
|
72
|
+
* @deprecated Use "baseURL", "defaultHeaders", and "defaultParams" instead.
|
|
73
|
+
*/
|
|
74
|
+
export interface LegacyOpenAIInput {
|
|
75
|
+
/** @deprecated Use baseURL instead */
|
|
76
|
+
basePath?: string;
|
|
77
|
+
/** @deprecated Use defaultHeaders and defaultQuery instead */
|
|
78
|
+
baseOptions?: {
|
|
79
|
+
headers?: Record<string, string>;
|
|
80
|
+
params?: Record<string, string>;
|
|
81
|
+
};
|
|
82
|
+
}
|
|
60
83
|
export interface OpenAIChatInput extends OpenAIBaseInput {
|
|
61
84
|
/** ChatGPT messages to pass as a prefix to the prompt */
|
|
62
|
-
prefixMessages?:
|
|
85
|
+
prefixMessages?: OpenAIClient.Chat.CreateChatCompletionRequestMessage[];
|
|
63
86
|
}
|
|
64
87
|
export declare interface AzureOpenAIInput {
|
|
65
88
|
/**
|
|
@@ -9,6 +9,7 @@ const p_queue_1 = __importDefault(require("p-queue"));
|
|
|
9
9
|
const STATUS_NO_RETRY = [
|
|
10
10
|
400,
|
|
11
11
|
401,
|
|
12
|
+
402,
|
|
12
13
|
403,
|
|
13
14
|
404,
|
|
14
15
|
405,
|
|
@@ -69,24 +70,26 @@ class AsyncCaller {
|
|
|
69
70
|
onFailedAttempt(error) {
|
|
70
71
|
if (error.message.startsWith("Cancel") ||
|
|
71
72
|
error.message.startsWith("TimeoutError") ||
|
|
72
|
-
error.
|
|
73
|
+
error.name === "TimeoutError" ||
|
|
74
|
+
error.message.startsWith("AbortError") ||
|
|
75
|
+
error.name === "AbortError") {
|
|
73
76
|
throw error;
|
|
74
77
|
}
|
|
75
78
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
76
79
|
if (error?.code === "ECONNABORTED") {
|
|
77
80
|
throw error;
|
|
78
81
|
}
|
|
82
|
+
const status =
|
|
79
83
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
80
|
-
|
|
84
|
+
error?.response?.status ?? error?.status;
|
|
81
85
|
if (status && STATUS_NO_RETRY.includes(+status)) {
|
|
82
86
|
throw error;
|
|
83
87
|
}
|
|
84
88
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
throw error;
|
|
89
|
+
if (error?.error?.code === "insufficient_quota") {
|
|
90
|
+
const err = new Error(error?.message);
|
|
91
|
+
err.name = "InsufficientQuotaError";
|
|
92
|
+
throw err;
|
|
90
93
|
}
|
|
91
94
|
},
|
|
92
95
|
retries: this.maxRetries,
|
|
@@ -3,6 +3,7 @@ import PQueueMod from "p-queue";
|
|
|
3
3
|
const STATUS_NO_RETRY = [
|
|
4
4
|
400,
|
|
5
5
|
401,
|
|
6
|
+
402,
|
|
6
7
|
403,
|
|
7
8
|
404,
|
|
8
9
|
405,
|
|
@@ -63,24 +64,26 @@ export class AsyncCaller {
|
|
|
63
64
|
onFailedAttempt(error) {
|
|
64
65
|
if (error.message.startsWith("Cancel") ||
|
|
65
66
|
error.message.startsWith("TimeoutError") ||
|
|
66
|
-
error.
|
|
67
|
+
error.name === "TimeoutError" ||
|
|
68
|
+
error.message.startsWith("AbortError") ||
|
|
69
|
+
error.name === "AbortError") {
|
|
67
70
|
throw error;
|
|
68
71
|
}
|
|
69
72
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
70
73
|
if (error?.code === "ECONNABORTED") {
|
|
71
74
|
throw error;
|
|
72
75
|
}
|
|
76
|
+
const status =
|
|
73
77
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
74
|
-
|
|
78
|
+
error?.response?.status ?? error?.status;
|
|
75
79
|
if (status && STATUS_NO_RETRY.includes(+status)) {
|
|
76
80
|
throw error;
|
|
77
81
|
}
|
|
78
82
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
throw error;
|
|
83
|
+
if (error?.error?.code === "insufficient_quota") {
|
|
84
|
+
const err = new Error(error?.message);
|
|
85
|
+
err.name = "InsufficientQuotaError";
|
|
86
|
+
throw err;
|
|
84
87
|
}
|
|
85
88
|
},
|
|
86
89
|
retries: this.maxRetries,
|
package/dist/util/azure.cjs
CHANGED
|
@@ -11,12 +11,12 @@ exports.getEndpoint = void 0;
|
|
|
11
11
|
* @property {string} config.azureOpenAIApiInstanceName - The instance name of Azure OpenAI.
|
|
12
12
|
* @property {string} config.azureOpenAIApiKey - The API Key for Azure OpenAI.
|
|
13
13
|
* @property {string} config.azureOpenAIBasePath - The base path for Azure OpenAI.
|
|
14
|
-
* @property {string} config.
|
|
14
|
+
* @property {string} config.baseURL - Some other custom base path URL.
|
|
15
15
|
*
|
|
16
16
|
* The function operates as follows:
|
|
17
17
|
* - If both `azureOpenAIBasePath` and `azureOpenAIApiDeploymentName` (plus `azureOpenAIApiKey`) are provided, it returns an URL combining these two parameters (`${azureOpenAIBasePath}/${azureOpenAIApiDeploymentName}`).
|
|
18
18
|
* - If `azureOpenAIApiKey` is provided, it checks for `azureOpenAIApiInstanceName` and `azureOpenAIApiDeploymentName` and throws an error if any of these is missing. If both are provided, it generates an URL incorporating these parameters.
|
|
19
|
-
* - If none of the above conditions are met, return any custom `
|
|
19
|
+
* - If none of the above conditions are met, return any custom `baseURL`.
|
|
20
20
|
* - The function returns the generated URL as a string, or undefined if no custom paths are specified.
|
|
21
21
|
*
|
|
22
22
|
* @throws Will throw an error if the necessary parameters for generating the URL are missing.
|
|
@@ -24,7 +24,7 @@ exports.getEndpoint = void 0;
|
|
|
24
24
|
* @returns {string | undefined} The generated (Azure) OpenAI endpoint URL.
|
|
25
25
|
*/
|
|
26
26
|
function getEndpoint(config) {
|
|
27
|
-
const { azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName, azureOpenAIApiKey, azureOpenAIBasePath,
|
|
27
|
+
const { azureOpenAIApiDeploymentName, azureOpenAIApiInstanceName, azureOpenAIApiKey, azureOpenAIBasePath, baseURL, } = config;
|
|
28
28
|
if (azureOpenAIApiKey &&
|
|
29
29
|
azureOpenAIBasePath &&
|
|
30
30
|
azureOpenAIApiDeploymentName) {
|
|
@@ -39,6 +39,6 @@ function getEndpoint(config) {
|
|
|
39
39
|
}
|
|
40
40
|
return `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}`;
|
|
41
41
|
}
|
|
42
|
-
return
|
|
42
|
+
return baseURL;
|
|
43
43
|
}
|
|
44
44
|
exports.getEndpoint = getEndpoint;
|
package/dist/util/azure.d.ts
CHANGED
|
@@ -3,7 +3,7 @@ export interface OpenAIEndpointConfig {
|
|
|
3
3
|
azureOpenAIApiInstanceName?: string;
|
|
4
4
|
azureOpenAIApiKey?: string;
|
|
5
5
|
azureOpenAIBasePath?: string;
|
|
6
|
-
|
|
6
|
+
baseURL?: string;
|
|
7
7
|
}
|
|
8
8
|
/**
|
|
9
9
|
* This function generates an endpoint URL for (Azure) OpenAI
|
|
@@ -15,12 +15,12 @@ export interface OpenAIEndpointConfig {
|
|
|
15
15
|
* @property {string} config.azureOpenAIApiInstanceName - The instance name of Azure OpenAI.
|
|
16
16
|
* @property {string} config.azureOpenAIApiKey - The API Key for Azure OpenAI.
|
|
17
17
|
* @property {string} config.azureOpenAIBasePath - The base path for Azure OpenAI.
|
|
18
|
-
* @property {string} config.
|
|
18
|
+
* @property {string} config.baseURL - Some other custom base path URL.
|
|
19
19
|
*
|
|
20
20
|
* The function operates as follows:
|
|
21
21
|
* - If both `azureOpenAIBasePath` and `azureOpenAIApiDeploymentName` (plus `azureOpenAIApiKey`) are provided, it returns an URL combining these two parameters (`${azureOpenAIBasePath}/${azureOpenAIApiDeploymentName}`).
|
|
22
22
|
* - If `azureOpenAIApiKey` is provided, it checks for `azureOpenAIApiInstanceName` and `azureOpenAIApiDeploymentName` and throws an error if any of these is missing. If both are provided, it generates an URL incorporating these parameters.
|
|
23
|
-
* - If none of the above conditions are met, return any custom `
|
|
23
|
+
* - If none of the above conditions are met, return any custom `baseURL`.
|
|
24
24
|
* - The function returns the generated URL as a string, or undefined if no custom paths are specified.
|
|
25
25
|
*
|
|
26
26
|
* @throws Will throw an error if the necessary parameters for generating the URL are missing.
|