langchain 0.0.140 → 0.0.142

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dist/chains/openai_moderation.cjs +5 -13
  2. package/dist/chains/openai_moderation.d.ts +5 -5
  3. package/dist/chains/openai_moderation.js +6 -11
  4. package/dist/chat_models/anthropic.d.ts +2 -2
  5. package/dist/chat_models/openai.cjs +99 -215
  6. package/dist/chat_models/openai.d.ts +20 -60
  7. package/dist/chat_models/openai.js +101 -214
  8. package/dist/document_loaders/web/github.cjs +4 -0
  9. package/dist/document_loaders/web/github.js +4 -0
  10. package/dist/embeddings/openai.cjs +32 -22
  11. package/dist/embeddings/openai.d.ts +3 -3
  12. package/dist/embeddings/openai.js +34 -21
  13. package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
  14. package/dist/experimental/chat_models/anthropic_functions.d.ts +3 -3
  15. package/dist/experimental/chat_models/anthropic_functions.js +3 -0
  16. package/dist/llms/openai-chat.cjs +69 -187
  17. package/dist/llms/openai-chat.d.ts +19 -71
  18. package/dist/llms/openai-chat.js +71 -186
  19. package/dist/llms/openai.cjs +92 -166
  20. package/dist/llms/openai.d.ts +25 -71
  21. package/dist/llms/openai.js +94 -165
  22. package/dist/load/import_map.cjs +3 -2
  23. package/dist/load/import_map.d.ts +1 -0
  24. package/dist/load/import_map.js +1 -0
  25. package/dist/prompts/chat.cjs +21 -9
  26. package/dist/prompts/chat.d.ts +3 -3
  27. package/dist/prompts/chat.js +22 -10
  28. package/dist/schema/index.d.ts +2 -2
  29. package/dist/schema/runnable.cjs +3 -0
  30. package/dist/schema/runnable.d.ts +1 -0
  31. package/dist/schema/runnable.js +3 -0
  32. package/dist/tools/convert_to_openai.d.ts +2 -2
  33. package/dist/types/openai-types.d.ts +27 -4
  34. package/dist/util/async_caller.cjs +10 -7
  35. package/dist/util/async_caller.js +10 -7
  36. package/dist/util/azure.cjs +4 -4
  37. package/dist/util/azure.d.ts +3 -3
  38. package/dist/util/azure.js +4 -4
  39. package/dist/util/openai.cjs +21 -0
  40. package/dist/util/openai.d.ts +1 -0
  41. package/dist/util/openai.js +17 -0
  42. package/dist/util/prompt-layer.cjs +1 -2
  43. package/dist/util/prompt-layer.d.ts +2 -2
  44. package/dist/util/prompt-layer.js +1 -2
  45. package/package.json +10 -2
  46. package/schema/document.cjs +1 -0
  47. package/schema/document.d.ts +1 -0
  48. package/schema/document.js +1 -0
@@ -1,9 +1,8 @@
1
- import { ChatCompletionFunctions, ConfigurationParameters, CreateChatCompletionRequest, CreateChatCompletionRequestFunctionCall, CreateChatCompletionResponse } from "openai";
1
+ import { type ClientOptions, OpenAI as OpenAIClient } from "openai";
2
2
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
3
3
  import { BaseMessage, ChatGenerationChunk, ChatResult } from "../schema/index.js";
4
4
  import { StructuredTool } from "../tools/base.js";
5
- import { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput } from "../types/openai-types.js";
6
- import type { StreamingAxiosConfiguration } from "../util/axios-types.js";
5
+ import { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput, OpenAICoreRequestOptions, LegacyOpenAIInput } from "../types/openai-types.js";
7
6
  import { BaseChatModel, BaseChatModelParams } from "./base.js";
8
7
  export { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput };
9
8
  interface TokenUsage {
@@ -15,8 +14,8 @@ interface OpenAILLMOutput {
15
14
  tokenUsage: TokenUsage;
16
15
  }
17
16
  export interface ChatOpenAICallOptions extends OpenAICallOptions {
18
- function_call?: CreateChatCompletionRequestFunctionCall;
19
- functions?: ChatCompletionFunctions[];
17
+ function_call?: OpenAIClient.Chat.ChatCompletionCreateParams.FunctionCallOption;
18
+ functions?: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];
20
19
  tools?: StructuredTool[];
21
20
  promptIndex?: number;
22
21
  }
@@ -70,79 +69,40 @@ export declare class ChatOpenAI extends BaseChatModel<ChatOpenAICallOptions> imp
70
69
  private client;
71
70
  private clientConfig;
72
71
  constructor(fields?: Partial<OpenAIChatInput> & Partial<AzureOpenAIInput> & BaseChatModelParams & {
73
- configuration?: ConfigurationParameters;
72
+ configuration?: ClientOptions & LegacyOpenAIInput;
74
73
  },
75
74
  /** @deprecated */
76
- configuration?: ConfigurationParameters);
75
+ configuration?: ClientOptions & LegacyOpenAIInput);
77
76
  /**
78
77
  * Get the parameters used to invoke the model
79
78
  */
80
- invocationParams(options?: this["ParsedCallOptions"]): Omit<CreateChatCompletionRequest, "messages">;
79
+ invocationParams(options?: this["ParsedCallOptions"]): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages">;
81
80
  /** @ignore */
82
- _identifyingParams(): {
83
- apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>) | undefined;
84
- organization?: string | undefined;
85
- username?: string | undefined;
86
- password?: string | undefined;
87
- accessToken?: string | Promise<string> | ((name?: string | undefined, scopes?: string[] | undefined) => string) | ((name?: string | undefined, scopes?: string[] | undefined) => Promise<string>) | undefined;
88
- basePath?: string | undefined;
89
- baseOptions?: any;
90
- formDataCtor?: (new () => any) | undefined;
91
- function_call?: CreateChatCompletionRequestFunctionCall | undefined;
92
- stop?: import("openai").CreateChatCompletionRequestStop | undefined;
93
- stream?: boolean | null | undefined;
94
- user?: string | undefined;
95
- functions?: ChatCompletionFunctions[] | undefined;
96
- model: string;
97
- temperature?: number | null | undefined;
98
- top_p?: number | null | undefined;
99
- n?: number | null | undefined;
100
- max_tokens?: number | undefined;
101
- presence_penalty?: number | null | undefined;
102
- frequency_penalty?: number | null | undefined;
103
- logit_bias?: object | null | undefined;
81
+ _identifyingParams(): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages"> & {
104
82
  model_name: string;
105
- };
83
+ } & ClientOptions;
106
84
  _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
107
- startStream(request: CreateChatCompletionRequest, options?: StreamingAxiosConfiguration): {
108
- next(): Promise<any>;
109
- [Symbol.asyncIterator](): any;
110
- };
111
85
  /**
112
86
  * Get the identifying parameters for the model
113
87
  */
114
- identifyingParams(): {
115
- apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>) | undefined;
116
- organization?: string | undefined;
117
- username?: string | undefined;
118
- password?: string | undefined;
119
- accessToken?: string | Promise<string> | ((name?: string | undefined, scopes?: string[] | undefined) => string) | ((name?: string | undefined, scopes?: string[] | undefined) => Promise<string>) | undefined;
120
- basePath?: string | undefined;
121
- baseOptions?: any;
122
- formDataCtor?: (new () => any) | undefined;
123
- function_call?: CreateChatCompletionRequestFunctionCall | undefined;
124
- stop?: import("openai").CreateChatCompletionRequestStop | undefined;
125
- stream?: boolean | null | undefined;
126
- user?: string | undefined;
127
- functions?: ChatCompletionFunctions[] | undefined;
128
- model: string;
129
- temperature?: number | null | undefined;
130
- top_p?: number | null | undefined;
131
- n?: number | null | undefined;
132
- max_tokens?: number | undefined;
133
- presence_penalty?: number | null | undefined;
134
- frequency_penalty?: number | null | undefined;
135
- logit_bias?: object | null | undefined;
88
+ identifyingParams(): Omit<OpenAIClient.Chat.Completions.ChatCompletionCreateParams, "messages"> & {
136
89
  model_name: string;
137
- };
90
+ } & ClientOptions;
138
91
  /** @ignore */
139
92
  _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
140
93
  getNumTokensFromMessages(messages: BaseMessage[]): Promise<{
141
94
  totalCount: number;
142
95
  countPerMessage: number[];
143
96
  }>;
144
- /** @ignore */
145
- completionWithRetry(request: CreateChatCompletionRequest, options?: StreamingAxiosConfiguration): Promise<CreateChatCompletionResponse>;
97
+ /**
98
+ * Calls the OpenAI API with retry logic in case of failures.
99
+ * @param request The request to send to the OpenAI API.
100
+ * @param options Optional configuration for the API call.
101
+ * @returns The response from the OpenAI API.
102
+ */
103
+ completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>;
104
+ completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Chat.Completions.ChatCompletion>;
105
+ private _getClientOptions;
146
106
  _llmType(): string;
147
107
  /** @ignore */
148
108
  _combineLLMOutput(...llmOutputs: OpenAILLMOutput[]): OpenAILLMOutput;
@@ -1,13 +1,12 @@
1
- import { Configuration, OpenAIApi, } from "openai";
1
+ import { OpenAI as OpenAIClient } from "openai";
2
2
  import { getModelNameForTiktoken } from "../base_language/count_tokens.js";
3
3
  import { AIMessage, AIMessageChunk, ChatGenerationChunk, ChatMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, } from "../schema/index.js";
4
4
  import { formatToOpenAIFunction } from "../tools/convert_to_openai.js";
5
- import fetchAdapter from "../util/axios-fetch-adapter.js";
6
5
  import { getEndpoint } from "../util/azure.js";
7
- import { getEnvironmentVariable, isNode } from "../util/env.js";
6
+ import { getEnvironmentVariable } from "../util/env.js";
8
7
  import { promptLayerTrackRequest } from "../util/prompt-layer.js";
9
- import { readableStreamToAsyncIterable } from "../util/stream.js";
10
8
  import { BaseChatModel } from "./base.js";
9
+ import { wrapOpenAIClientError } from "../util/openai.js";
11
10
  function extractGenericMessageCustomRole(message) {
12
11
  if (message.role !== "system" &&
13
12
  message.role !== "assistant" &&
@@ -327,6 +326,12 @@ export class ChatOpenAI extends BaseChatModel {
327
326
  this.clientConfig = {
328
327
  apiKey: this.openAIApiKey,
329
328
  organization: this.organization,
329
+ baseURL: configuration?.basePath ?? fields?.configuration?.basePath,
330
+ dangerouslyAllowBrowser: true,
331
+ defaultHeaders: configuration?.baseOptions?.headers ??
332
+ fields?.configuration?.baseOptions?.headers,
333
+ defaultQuery: configuration?.baseOptions?.params ??
334
+ fields?.configuration?.baseOptions?.params,
330
335
  ...configuration,
331
336
  ...fields?.configuration,
332
337
  };
@@ -363,8 +368,6 @@ export class ChatOpenAI extends BaseChatModel {
363
368
  ...this.clientConfig,
364
369
  };
365
370
  }
366
- // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation
367
- // when we integrate OpenAI's new SDK.
368
371
  async *_streamResponseChunks(messages, options, runManager) {
369
372
  const messagesMapped = messages.map((message) => ({
370
373
  role: messageToOpenAIRole(message),
@@ -378,79 +381,32 @@ export class ChatOpenAI extends BaseChatModel {
378
381
  messages: messagesMapped,
379
382
  stream: true,
380
383
  };
381
- let defaultRole = "assistant";
382
- const streamIterable = this.startStream(params, options);
383
- for await (const streamedResponse of streamIterable) {
384
- const data = JSON.parse(streamedResponse);
385
- const choice = data.choices?.[0];
384
+ let defaultRole;
385
+ const streamIterable = await this.completionWithRetry(params, options);
386
+ for await (const data of streamIterable) {
387
+ const choice = data.choices[0];
386
388
  if (!choice) {
387
389
  continue;
388
390
  }
389
391
  const { delta } = choice;
390
392
  const chunk = _convertDeltaToMessageChunk(delta, defaultRole);
391
- defaultRole = (delta.role ??
392
- defaultRole);
393
+ defaultRole = delta.role ?? defaultRole;
394
+ const newTokenIndices = {
395
+ prompt: options.promptIndex ?? 0,
396
+ completion: choice.index ?? 0,
397
+ };
393
398
  const generationChunk = new ChatGenerationChunk({
394
399
  message: chunk,
395
400
  text: chunk.content,
401
+ generationInfo: newTokenIndices,
396
402
  });
397
403
  yield generationChunk;
398
404
  // eslint-disable-next-line no-void
399
- void runManager?.handleLLMNewToken(generationChunk.text ?? "", {
400
- prompt: 0,
401
- completion: choice.index,
402
- }, undefined, undefined, undefined, { chunk: generationChunk });
405
+ void runManager?.handleLLMNewToken(generationChunk.text ?? "", newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk });
406
+ }
407
+ if (options.signal?.aborted) {
408
+ throw new Error("AbortError");
403
409
  }
404
- }
405
- startStream(request, options) {
406
- let done = false;
407
- const stream = new TransformStream();
408
- const writer = stream.writable.getWriter();
409
- const iterable = readableStreamToAsyncIterable(stream.readable);
410
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
411
- let err;
412
- this.completionWithRetry(request, {
413
- ...options,
414
- adapter: fetchAdapter,
415
- responseType: "stream",
416
- onmessage: (event) => {
417
- if (done)
418
- return;
419
- if (event.data?.trim?.() === "[DONE]") {
420
- done = true;
421
- // eslint-disable-next-line no-void
422
- void writer.close();
423
- }
424
- else {
425
- const data = JSON.parse(event.data);
426
- if (data.error) {
427
- done = true;
428
- throw data.error;
429
- }
430
- // eslint-disable-next-line no-void
431
- void writer.write(event.data);
432
- }
433
- },
434
- }).catch((error) => {
435
- if (!done) {
436
- err = error;
437
- done = true;
438
- // eslint-disable-next-line no-void
439
- void writer.close();
440
- }
441
- });
442
- return {
443
- async next() {
444
- const chunk = await iterable.next();
445
- if (err) {
446
- throw err;
447
- }
448
- return chunk;
449
- },
450
- [Symbol.asyncIterator]() {
451
- return this;
452
- },
453
- };
454
410
  }
455
411
  /**
456
412
  * Get the identifying parameters for the model
@@ -469,139 +425,60 @@ export class ChatOpenAI extends BaseChatModel {
469
425
  function_call: message.additional_kwargs
470
426
  .function_call,
471
427
  }));
472
- const data = params.stream
473
- ? await new Promise((resolve, reject) => {
474
- let response;
475
- let rejected = false;
476
- let resolved = false;
477
- this.completionWithRetry({
478
- ...params,
479
- messages: messagesMapped,
480
- }, {
481
- signal: options?.signal,
482
- ...options?.options,
483
- adapter: fetchAdapter,
484
- responseType: "stream",
485
- onmessage: (event) => {
486
- if (event.data?.trim?.() === "[DONE]") {
487
- if (resolved || rejected) {
488
- return;
489
- }
490
- resolved = true;
491
- resolve(response);
492
- }
493
- else {
494
- const data = JSON.parse(event.data);
495
- if (!data.id)
496
- return;
497
- if (data?.error) {
498
- if (rejected) {
499
- return;
500
- }
501
- rejected = true;
502
- reject(data.error);
503
- return;
504
- }
505
- const message = data;
506
- // on the first message set the response properties
507
- if (!response) {
508
- response = {
509
- id: message.id,
510
- object: message.object,
511
- created: message.created,
512
- model: message.model,
513
- choices: [],
514
- };
515
- }
516
- // on all messages, update choice
517
- for (const part of message.choices ?? []) {
518
- if (part != null) {
519
- let choice = response.choices.find((c) => c.index === part.index);
520
- if (!choice) {
521
- choice = {
522
- index: part.index,
523
- finish_reason: part.finish_reason ?? undefined,
524
- };
525
- response.choices[part.index] = choice;
526
- }
527
- if (!choice.message) {
528
- choice.message = {
529
- role: part.delta
530
- ?.role,
531
- content: "",
532
- };
533
- }
534
- if (part.delta.function_call &&
535
- !choice.message.function_call) {
536
- choice.message.function_call = {
537
- name: "",
538
- arguments: "",
539
- };
540
- }
541
- choice.message.content += part.delta?.content ?? "";
542
- if (choice.message.function_call) {
543
- choice.message.function_call.name +=
544
- part.delta?.function_call?.name ?? "";
545
- choice.message.function_call.arguments +=
546
- part.delta?.function_call?.arguments ?? "";
547
- }
548
- const chunk = _convertDeltaToMessageChunk(part.delta, "assistant");
549
- const generationChunk = new ChatGenerationChunk({
550
- message: chunk,
551
- text: chunk.content,
552
- });
553
- void runManager?.handleLLMNewToken(part.delta?.content ?? "", {
554
- prompt: options.promptIndex ?? 0,
555
- completion: part.index,
556
- }, undefined, undefined, undefined, { chunk: generationChunk });
557
- }
558
- }
559
- // when all messages are finished, resolve
560
- if (!resolved &&
561
- !rejected &&
562
- message.choices?.every((c) => c.finish_reason != null)) {
563
- resolved = true;
564
- resolve(response);
565
- }
566
- }
567
- },
568
- }).catch((error) => {
569
- if (!rejected) {
570
- rejected = true;
571
- reject(error);
572
- }
573
- });
574
- })
575
- : await this.completionWithRetry({
428
+ if (params.stream) {
429
+ const stream = await this._streamResponseChunks(messages, options, runManager);
430
+ const finalChunks = {};
431
+ for await (const chunk of stream) {
432
+ const index = chunk.generationInfo?.completion ?? 0;
433
+ if (finalChunks[index] === undefined) {
434
+ finalChunks[index] = chunk;
435
+ }
436
+ else {
437
+ finalChunks[index] = finalChunks[index].concat(chunk);
438
+ }
439
+ }
440
+ const generations = Object.entries(finalChunks)
441
+ .sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
442
+ .map(([_, value]) => value);
443
+ return { generations };
444
+ }
445
+ else {
446
+ const data = await this.completionWithRetry({
576
447
  ...params,
448
+ stream: false,
577
449
  messages: messagesMapped,
578
450
  }, {
579
451
  signal: options?.signal,
580
452
  ...options?.options,
581
453
  });
582
- const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage ?? {};
583
- if (completionTokens) {
584
- tokenUsage.completionTokens =
585
- (tokenUsage.completionTokens ?? 0) + completionTokens;
586
- }
587
- if (promptTokens) {
588
- tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens;
589
- }
590
- if (totalTokens) {
591
- tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens;
592
- }
593
- const generations = [];
594
- for (const part of data.choices) {
595
- const text = part.message?.content ?? "";
596
- generations.push({
597
- text,
598
- message: openAIResponseToChatMessage(part.message ?? { role: "assistant" }),
599
- });
454
+ const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data?.usage ?? {};
455
+ if (completionTokens) {
456
+ tokenUsage.completionTokens =
457
+ (tokenUsage.completionTokens ?? 0) + completionTokens;
458
+ }
459
+ if (promptTokens) {
460
+ tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens;
461
+ }
462
+ if (totalTokens) {
463
+ tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens;
464
+ }
465
+ const generations = [];
466
+ for (const part of data?.choices ?? []) {
467
+ const text = part.message?.content ?? "";
468
+ const generation = {
469
+ text,
470
+ message: openAIResponseToChatMessage(part.message ?? { role: "assistant" }),
471
+ };
472
+ if (part.finish_reason) {
473
+ generation.generationInfo = { finish_reason: part.finish_reason };
474
+ }
475
+ generations.push(generation);
476
+ }
477
+ return {
478
+ generations,
479
+ llmOutput: { tokenUsage },
480
+ };
600
481
  }
601
- return {
602
- generations,
603
- llmOutput: { tokenUsage },
604
- };
605
482
  }
606
483
  async getNumTokensFromMessages(messages) {
607
484
  let totalCount = 0;
@@ -629,45 +506,55 @@ export class ChatOpenAI extends BaseChatModel {
629
506
  totalCount += 3; // every reply is primed with <|start|>assistant<|message|>
630
507
  return { totalCount, countPerMessage };
631
508
  }
632
- /** @ignore */
633
509
  async completionWithRetry(request, options) {
510
+ const requestOptions = this._getClientOptions(options);
511
+ return this.caller.call(async () => {
512
+ try {
513
+ const res = await this.client.chat.completions.create(request, requestOptions);
514
+ return res;
515
+ }
516
+ catch (e) {
517
+ const error = wrapOpenAIClientError(e);
518
+ throw error;
519
+ }
520
+ });
521
+ }
522
+ _getClientOptions(options) {
634
523
  if (!this.client) {
635
524
  const openAIEndpointConfig = {
636
525
  azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
637
526
  azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
638
527
  azureOpenAIApiKey: this.azureOpenAIApiKey,
639
528
  azureOpenAIBasePath: this.azureOpenAIBasePath,
640
- basePath: this.clientConfig.basePath,
529
+ baseURL: this.clientConfig.baseURL,
641
530
  };
642
531
  const endpoint = getEndpoint(openAIEndpointConfig);
643
- const clientConfig = new Configuration({
532
+ const params = {
644
533
  ...this.clientConfig,
645
- basePath: endpoint,
646
- baseOptions: {
647
- timeout: this.timeout,
648
- ...this.clientConfig.baseOptions,
649
- },
650
- });
651
- this.client = new OpenAIApi(clientConfig);
534
+ baseURL: endpoint,
535
+ timeout: this.timeout,
536
+ maxRetries: 0,
537
+ };
538
+ if (!params.baseURL) {
539
+ delete params.baseURL;
540
+ }
541
+ this.client = new OpenAIClient(params);
652
542
  }
653
- const axiosOptions = {
654
- adapter: isNode() ? undefined : fetchAdapter,
655
- ...this.clientConfig.baseOptions,
543
+ const requestOptions = {
544
+ ...this.clientConfig,
656
545
  ...options,
657
546
  };
658
547
  if (this.azureOpenAIApiKey) {
659
- axiosOptions.headers = {
548
+ requestOptions.headers = {
660
549
  "api-key": this.azureOpenAIApiKey,
661
- ...axiosOptions.headers,
550
+ ...requestOptions.headers,
662
551
  };
663
- axiosOptions.params = {
552
+ requestOptions.query = {
664
553
  "api-version": this.azureOpenAIApiVersion,
665
- ...axiosOptions.params,
554
+ ...requestOptions.query,
666
555
  };
667
556
  }
668
- return this.caller
669
- .call(this.client.createChatCompletion.bind(this.client), request, axiosOptions)
670
- .then((res) => res.data);
557
+ return requestOptions;
671
558
  }
672
559
  _llmType() {
673
560
  return "openai";
@@ -787,7 +674,7 @@ export class PromptLayerChatOpenAI extends ChatOpenAI {
787
674
  role: messageToOpenAIRole(generation.message),
788
675
  },
789
676
  ];
790
- const promptLayerRespBody = await promptLayerTrackRequest(this.caller, "langchain.PromptLayerChatOpenAI", messageDicts, this._identifyingParams(), this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
677
+ const promptLayerRespBody = await promptLayerTrackRequest(this.caller, "langchain.PromptLayerChatOpenAI", { ...this._identifyingParams(), messages: messageDicts, stream: false }, this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
791
678
  if (this.returnPromptLayerId === true) {
792
679
  if (promptLayerRespBody.success === true) {
793
680
  promptLayerRequestId = promptLayerRespBody.request_id;
@@ -112,6 +112,9 @@ class GithubRepoLoader extends base_js_1.BaseDocumentLoader {
112
112
  this.accessToken = accessToken;
113
113
  this.ignoreFiles = ignoreFiles;
114
114
  this.verbose = verbose;
115
+ this.headers = {
116
+ "User-Agent": "langchain",
117
+ };
115
118
  this.caller = new async_caller_js_1.AsyncCaller({
116
119
  maxConcurrency,
117
120
  maxRetries,
@@ -122,6 +125,7 @@ class GithubRepoLoader extends base_js_1.BaseDocumentLoader {
122
125
  }
123
126
  if (this.accessToken) {
124
127
  this.headers = {
128
+ ...this.headers,
125
129
  Authorization: `Bearer ${this.accessToken}`,
126
130
  };
127
131
  }
@@ -106,6 +106,9 @@ export class GithubRepoLoader extends BaseDocumentLoader {
106
106
  this.accessToken = accessToken;
107
107
  this.ignoreFiles = ignoreFiles;
108
108
  this.verbose = verbose;
109
+ this.headers = {
110
+ "User-Agent": "langchain",
111
+ };
109
112
  this.caller = new AsyncCaller({
110
113
  maxConcurrency,
111
114
  maxRetries,
@@ -116,6 +119,7 @@ export class GithubRepoLoader extends BaseDocumentLoader {
116
119
  }
117
120
  if (this.accessToken) {
118
121
  this.headers = {
122
+ ...this.headers,
119
123
  Authorization: `Bearer ${this.accessToken}`,
120
124
  };
121
125
  }
@@ -1,15 +1,12 @@
1
1
  "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
2
  Object.defineProperty(exports, "__esModule", { value: true });
6
3
  exports.OpenAIEmbeddings = void 0;
7
4
  const openai_1 = require("openai");
8
5
  const env_js_1 = require("../util/env.cjs");
9
- const axios_fetch_adapter_js_1 = __importDefault(require("../util/axios-fetch-adapter.cjs"));
10
6
  const chunk_js_1 = require("../util/chunk.cjs");
11
7
  const base_js_1 = require("./base.cjs");
12
8
  const azure_js_1 = require("../util/azure.cjs");
9
+ const openai_js_1 = require("../util/openai.cjs");
13
10
  /**
14
11
  * Class for generating embeddings using the OpenAI API. Extends the
15
12
  * Embeddings class and implements OpenAIEmbeddingsParams and
@@ -126,6 +123,10 @@ class OpenAIEmbeddings extends base_js_1.Embeddings {
126
123
  }
127
124
  this.clientConfig = {
128
125
  apiKey,
126
+ baseURL: configuration?.basePath,
127
+ dangerouslyAllowBrowser: true,
128
+ defaultHeaders: configuration?.baseOptions?.headers,
129
+ defaultQuery: configuration?.baseOptions?.params,
129
130
  ...configuration,
130
131
  };
131
132
  }
@@ -148,7 +149,7 @@ class OpenAIEmbeddings extends base_js_1.Embeddings {
148
149
  const batch = batches[i];
149
150
  const { data: batchResponse } = batchResponses[i];
150
151
  for (let j = 0; j < batch.length; j += 1) {
151
- embeddings.push(batchResponse.data[j].embedding);
152
+ embeddings.push(batchResponse[j].embedding);
152
153
  }
153
154
  }
154
155
  return embeddings;
@@ -164,7 +165,7 @@ class OpenAIEmbeddings extends base_js_1.Embeddings {
164
165
  model: this.modelName,
165
166
  input: this.stripNewLines ? text.replace(/\n/g, " ") : text,
166
167
  });
167
- return data.data[0].embedding;
168
+ return data[0].embedding;
168
169
  }
169
170
  /**
170
171
  * Private method to make a request to the OpenAI API to generate
@@ -180,32 +181,41 @@ class OpenAIEmbeddings extends base_js_1.Embeddings {
180
181
  azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
181
182
  azureOpenAIApiKey: this.azureOpenAIApiKey,
182
183
  azureOpenAIBasePath: this.azureOpenAIBasePath,
183
- basePath: this.clientConfig.basePath,
184
+ baseURL: this.clientConfig.baseURL,
184
185
  };
185
186
  const endpoint = (0, azure_js_1.getEndpoint)(openAIEndpointConfig);
186
- const clientConfig = new openai_1.Configuration({
187
+ const params = {
187
188
  ...this.clientConfig,
188
- basePath: endpoint,
189
- baseOptions: {
190
- timeout: this.timeout,
191
- adapter: (0, env_js_1.isNode)() ? undefined : axios_fetch_adapter_js_1.default,
192
- ...this.clientConfig.baseOptions,
193
- },
194
- });
195
- this.client = new openai_1.OpenAIApi(clientConfig);
189
+ baseURL: endpoint,
190
+ timeout: this.timeout,
191
+ maxRetries: 0,
192
+ };
193
+ if (!params.baseURL) {
194
+ delete params.baseURL;
195
+ }
196
+ this.client = new openai_1.OpenAI(params);
196
197
  }
197
- const axiosOptions = {};
198
+ const requestOptions = {};
198
199
  if (this.azureOpenAIApiKey) {
199
- axiosOptions.headers = {
200
+ requestOptions.headers = {
200
201
  "api-key": this.azureOpenAIApiKey,
201
- ...axiosOptions.headers,
202
+ ...requestOptions.headers,
202
203
  };
203
- axiosOptions.params = {
204
+ requestOptions.query = {
204
205
  "api-version": this.azureOpenAIApiVersion,
205
- ...axiosOptions.params,
206
+ ...requestOptions.query,
206
207
  };
207
208
  }
208
- return this.caller.call(this.client.createEmbedding.bind(this.client), request, axiosOptions);
209
+ return this.caller.call(async () => {
210
+ try {
211
+ const res = await this.client.embeddings.create(request, requestOptions);
212
+ return res;
213
+ }
214
+ catch (e) {
215
+ const error = (0, openai_js_1.wrapOpenAIClientError)(e);
216
+ throw error;
217
+ }
218
+ });
209
219
  }
210
220
  }
211
221
  exports.OpenAIEmbeddings = OpenAIEmbeddings;