langchain 0.0.140 → 0.0.142

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dist/chains/openai_moderation.cjs +5 -13
  2. package/dist/chains/openai_moderation.d.ts +5 -5
  3. package/dist/chains/openai_moderation.js +6 -11
  4. package/dist/chat_models/anthropic.d.ts +2 -2
  5. package/dist/chat_models/openai.cjs +99 -215
  6. package/dist/chat_models/openai.d.ts +20 -60
  7. package/dist/chat_models/openai.js +101 -214
  8. package/dist/document_loaders/web/github.cjs +4 -0
  9. package/dist/document_loaders/web/github.js +4 -0
  10. package/dist/embeddings/openai.cjs +32 -22
  11. package/dist/embeddings/openai.d.ts +3 -3
  12. package/dist/embeddings/openai.js +34 -21
  13. package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
  14. package/dist/experimental/chat_models/anthropic_functions.d.ts +3 -3
  15. package/dist/experimental/chat_models/anthropic_functions.js +3 -0
  16. package/dist/llms/openai-chat.cjs +69 -187
  17. package/dist/llms/openai-chat.d.ts +19 -71
  18. package/dist/llms/openai-chat.js +71 -186
  19. package/dist/llms/openai.cjs +92 -166
  20. package/dist/llms/openai.d.ts +25 -71
  21. package/dist/llms/openai.js +94 -165
  22. package/dist/load/import_map.cjs +3 -2
  23. package/dist/load/import_map.d.ts +1 -0
  24. package/dist/load/import_map.js +1 -0
  25. package/dist/prompts/chat.cjs +21 -9
  26. package/dist/prompts/chat.d.ts +3 -3
  27. package/dist/prompts/chat.js +22 -10
  28. package/dist/schema/index.d.ts +2 -2
  29. package/dist/schema/runnable.cjs +3 -0
  30. package/dist/schema/runnable.d.ts +1 -0
  31. package/dist/schema/runnable.js +3 -0
  32. package/dist/tools/convert_to_openai.d.ts +2 -2
  33. package/dist/types/openai-types.d.ts +27 -4
  34. package/dist/util/async_caller.cjs +10 -7
  35. package/dist/util/async_caller.js +10 -7
  36. package/dist/util/azure.cjs +4 -4
  37. package/dist/util/azure.d.ts +3 -3
  38. package/dist/util/azure.js +4 -4
  39. package/dist/util/openai.cjs +21 -0
  40. package/dist/util/openai.d.ts +1 -0
  41. package/dist/util/openai.js +17 -0
  42. package/dist/util/prompt-layer.cjs +1 -2
  43. package/dist/util/prompt-layer.d.ts +2 -2
  44. package/dist/util/prompt-layer.js +1 -2
  45. package/package.json +10 -2
  46. package/schema/document.cjs +1 -0
  47. package/schema/document.d.ts +1 -0
  48. package/schema/document.js +1 -0
@@ -1,12 +1,8 @@
1
1
  "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
2
  Object.defineProperty(exports, "__esModule", { value: true });
6
3
  exports.OpenAIModerationChain = void 0;
7
4
  const openai_1 = require("openai");
8
5
  const base_js_1 = require("./base.cjs");
9
- const axios_fetch_adapter_js_1 = __importDefault(require("../util/axios-fetch-adapter.cjs"));
10
6
  const async_caller_js_1 = require("../util/async_caller.cjs");
11
7
  const env_js_1 = require("../util/env.cjs");
12
8
  /**
@@ -80,16 +76,12 @@ class OpenAIModerationChain extends base_js_1.BaseChain {
80
76
  throw new Error("OpenAI API key not found");
81
77
  }
82
78
  this.openAIOrganization = fields?.openAIOrganization;
83
- this.clientConfig = new openai_1.Configuration({
79
+ this.clientConfig = {
84
80
  ...fields?.configuration,
85
81
  apiKey: this.openAIApiKey,
86
82
  organization: this.openAIOrganization,
87
- baseOptions: {
88
- adapter: axios_fetch_adapter_js_1.default,
89
- ...fields?.configuration?.baseOptions,
90
- },
91
- });
92
- this.client = new openai_1.OpenAIApi(this.clientConfig);
83
+ };
84
+ this.client = new openai_1.OpenAI(this.clientConfig);
93
85
  this.caller = new async_caller_js_1.AsyncCaller(fields ?? {});
94
86
  }
95
87
  _moderate(text, results) {
@@ -111,7 +103,7 @@ class OpenAIModerationChain extends base_js_1.BaseChain {
111
103
  };
112
104
  let mod;
113
105
  try {
114
- mod = await this.caller.call(() => this.client.createModeration(moderationRequest));
106
+ mod = await this.caller.call(() => this.client.moderations.create(moderationRequest));
115
107
  }
116
108
  catch (error) {
117
109
  // eslint-disable-next-line no-instanceof/no-instanceof
@@ -122,7 +114,7 @@ class OpenAIModerationChain extends base_js_1.BaseChain {
122
114
  throw new Error(error);
123
115
  }
124
116
  }
125
- const output = this._moderate(text, mod.data.results[0]);
117
+ const output = this._moderate(text, mod.results[0]);
126
118
  return {
127
119
  [this.outputKey]: output,
128
120
  };
@@ -1,4 +1,4 @@
1
- import { Configuration, OpenAIApi, ConfigurationParameters, CreateModerationResponseResultsInner } from "openai";
1
+ import { type ClientOptions, OpenAI as OpenAIClient } from "openai";
2
2
  import { BaseChain, ChainInputs } from "./base.js";
3
3
  import { ChainValues } from "../schema/index.js";
4
4
  import { AsyncCaller, AsyncCallerParams } from "../util/async_caller.js";
@@ -9,7 +9,7 @@ export interface OpenAIModerationChainInput extends ChainInputs, AsyncCallerPara
9
9
  openAIApiKey?: string;
10
10
  openAIOrganization?: string;
11
11
  throwError?: boolean;
12
- configuration?: ConfigurationParameters;
12
+ configuration?: ClientOptions;
13
13
  }
14
14
  /**
15
15
  * Class representing a chain for moderating text using the OpenAI
@@ -25,12 +25,12 @@ export declare class OpenAIModerationChain extends BaseChain implements OpenAIMo
25
25
  outputKey: string;
26
26
  openAIApiKey?: string;
27
27
  openAIOrganization?: string;
28
- clientConfig: Configuration;
29
- client: OpenAIApi;
28
+ clientConfig: ClientOptions;
29
+ client: OpenAIClient;
30
30
  throwError: boolean;
31
31
  caller: AsyncCaller;
32
32
  constructor(fields?: OpenAIModerationChainInput);
33
- _moderate(text: string, results: CreateModerationResponseResultsInner): string;
33
+ _moderate(text: string, results: OpenAIClient.Moderation): string;
34
34
  _call(values: ChainValues): Promise<ChainValues>;
35
35
  _chainType(): string;
36
36
  get inputKeys(): string[];
@@ -1,6 +1,5 @@
1
- import { Configuration, OpenAIApi, } from "openai";
1
+ import { OpenAI as OpenAIClient } from "openai";
2
2
  import { BaseChain } from "./base.js";
3
- import fetchAdapter from "../util/axios-fetch-adapter.js";
4
3
  import { AsyncCaller } from "../util/async_caller.js";
5
4
  import { getEnvironmentVariable } from "../util/env.js";
6
5
  /**
@@ -74,16 +73,12 @@ export class OpenAIModerationChain extends BaseChain {
74
73
  throw new Error("OpenAI API key not found");
75
74
  }
76
75
  this.openAIOrganization = fields?.openAIOrganization;
77
- this.clientConfig = new Configuration({
76
+ this.clientConfig = {
78
77
  ...fields?.configuration,
79
78
  apiKey: this.openAIApiKey,
80
79
  organization: this.openAIOrganization,
81
- baseOptions: {
82
- adapter: fetchAdapter,
83
- ...fields?.configuration?.baseOptions,
84
- },
85
- });
86
- this.client = new OpenAIApi(this.clientConfig);
80
+ };
81
+ this.client = new OpenAIClient(this.clientConfig);
87
82
  this.caller = new AsyncCaller(fields ?? {});
88
83
  }
89
84
  _moderate(text, results) {
@@ -105,7 +100,7 @@ export class OpenAIModerationChain extends BaseChain {
105
100
  };
106
101
  let mod;
107
102
  try {
108
- mod = await this.caller.call(() => this.client.createModeration(moderationRequest));
103
+ mod = await this.caller.call(() => this.client.moderations.create(moderationRequest));
109
104
  }
110
105
  catch (error) {
111
106
  // eslint-disable-next-line no-instanceof/no-instanceof
@@ -116,7 +111,7 @@ export class OpenAIModerationChain extends BaseChain {
116
111
  throw new Error(error);
117
112
  }
118
113
  }
119
- const output = this._moderate(text, mod.data.results[0]);
114
+ const output = this._moderate(text, mod.results[0]);
120
115
  return {
121
116
  [this.outputKey]: output,
122
117
  };
@@ -100,7 +100,7 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
100
100
  _identifyingParams(): {
101
101
  metadata?: Anthropic.Completions.CompletionCreateParams.Metadata | undefined;
102
102
  stream?: boolean | undefined;
103
- model: "claude-2" | (string & {}) | "claude-instant-1";
103
+ model: (string & {}) | "claude-2" | "claude-instant-1";
104
104
  temperature?: number | undefined;
105
105
  top_p?: number | undefined;
106
106
  max_tokens_to_sample: number;
@@ -114,7 +114,7 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
114
114
  identifyingParams(): {
115
115
  metadata?: Anthropic.Completions.CompletionCreateParams.Metadata | undefined;
116
116
  stream?: boolean | undefined;
117
- model: "claude-2" | (string & {}) | "claude-instant-1";
117
+ model: (string & {}) | "claude-2" | "claude-instant-1";
118
118
  temperature?: number | undefined;
119
119
  top_p?: number | undefined;
120
120
  max_tokens_to_sample: number;
@@ -1,19 +1,15 @@
1
1
  "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
2
  Object.defineProperty(exports, "__esModule", { value: true });
6
3
  exports.PromptLayerChatOpenAI = exports.ChatOpenAI = void 0;
7
4
  const openai_1 = require("openai");
8
5
  const count_tokens_js_1 = require("../base_language/count_tokens.cjs");
9
6
  const index_js_1 = require("../schema/index.cjs");
10
7
  const convert_to_openai_js_1 = require("../tools/convert_to_openai.cjs");
11
- const axios_fetch_adapter_js_1 = __importDefault(require("../util/axios-fetch-adapter.cjs"));
12
8
  const azure_js_1 = require("../util/azure.cjs");
13
9
  const env_js_1 = require("../util/env.cjs");
14
10
  const prompt_layer_js_1 = require("../util/prompt-layer.cjs");
15
- const stream_js_1 = require("../util/stream.cjs");
16
11
  const base_js_1 = require("./base.cjs");
12
+ const openai_js_1 = require("../util/openai.cjs");
17
13
  function extractGenericMessageCustomRole(message) {
18
14
  if (message.role !== "system" &&
19
15
  message.role !== "assistant" &&
@@ -333,6 +329,12 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
333
329
  this.clientConfig = {
334
330
  apiKey: this.openAIApiKey,
335
331
  organization: this.organization,
332
+ baseURL: configuration?.basePath ?? fields?.configuration?.basePath,
333
+ dangerouslyAllowBrowser: true,
334
+ defaultHeaders: configuration?.baseOptions?.headers ??
335
+ fields?.configuration?.baseOptions?.headers,
336
+ defaultQuery: configuration?.baseOptions?.params ??
337
+ fields?.configuration?.baseOptions?.params,
336
338
  ...configuration,
337
339
  ...fields?.configuration,
338
340
  };
@@ -369,8 +371,6 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
369
371
  ...this.clientConfig,
370
372
  };
371
373
  }
372
- // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation
373
- // when we integrate OpenAI's new SDK.
374
374
  async *_streamResponseChunks(messages, options, runManager) {
375
375
  const messagesMapped = messages.map((message) => ({
376
376
  role: messageToOpenAIRole(message),
@@ -384,79 +384,32 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
384
384
  messages: messagesMapped,
385
385
  stream: true,
386
386
  };
387
- let defaultRole = "assistant";
388
- const streamIterable = this.startStream(params, options);
389
- for await (const streamedResponse of streamIterable) {
390
- const data = JSON.parse(streamedResponse);
391
- const choice = data.choices?.[0];
387
+ let defaultRole;
388
+ const streamIterable = await this.completionWithRetry(params, options);
389
+ for await (const data of streamIterable) {
390
+ const choice = data.choices[0];
392
391
  if (!choice) {
393
392
  continue;
394
393
  }
395
394
  const { delta } = choice;
396
395
  const chunk = _convertDeltaToMessageChunk(delta, defaultRole);
397
- defaultRole = (delta.role ??
398
- defaultRole);
396
+ defaultRole = delta.role ?? defaultRole;
397
+ const newTokenIndices = {
398
+ prompt: options.promptIndex ?? 0,
399
+ completion: choice.index ?? 0,
400
+ };
399
401
  const generationChunk = new index_js_1.ChatGenerationChunk({
400
402
  message: chunk,
401
403
  text: chunk.content,
404
+ generationInfo: newTokenIndices,
402
405
  });
403
406
  yield generationChunk;
404
407
  // eslint-disable-next-line no-void
405
- void runManager?.handleLLMNewToken(generationChunk.text ?? "", {
406
- prompt: 0,
407
- completion: choice.index,
408
- }, undefined, undefined, undefined, { chunk: generationChunk });
408
+ void runManager?.handleLLMNewToken(generationChunk.text ?? "", newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk });
409
+ }
410
+ if (options.signal?.aborted) {
411
+ throw new Error("AbortError");
409
412
  }
410
- }
411
- startStream(request, options) {
412
- let done = false;
413
- const stream = new TransformStream();
414
- const writer = stream.writable.getWriter();
415
- const iterable = (0, stream_js_1.readableStreamToAsyncIterable)(stream.readable);
416
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
417
- let err;
418
- this.completionWithRetry(request, {
419
- ...options,
420
- adapter: axios_fetch_adapter_js_1.default,
421
- responseType: "stream",
422
- onmessage: (event) => {
423
- if (done)
424
- return;
425
- if (event.data?.trim?.() === "[DONE]") {
426
- done = true;
427
- // eslint-disable-next-line no-void
428
- void writer.close();
429
- }
430
- else {
431
- const data = JSON.parse(event.data);
432
- if (data.error) {
433
- done = true;
434
- throw data.error;
435
- }
436
- // eslint-disable-next-line no-void
437
- void writer.write(event.data);
438
- }
439
- },
440
- }).catch((error) => {
441
- if (!done) {
442
- err = error;
443
- done = true;
444
- // eslint-disable-next-line no-void
445
- void writer.close();
446
- }
447
- });
448
- return {
449
- async next() {
450
- const chunk = await iterable.next();
451
- if (err) {
452
- throw err;
453
- }
454
- return chunk;
455
- },
456
- [Symbol.asyncIterator]() {
457
- return this;
458
- },
459
- };
460
413
  }
461
414
  /**
462
415
  * Get the identifying parameters for the model
@@ -475,139 +428,60 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
475
428
  function_call: message.additional_kwargs
476
429
  .function_call,
477
430
  }));
478
- const data = params.stream
479
- ? await new Promise((resolve, reject) => {
480
- let response;
481
- let rejected = false;
482
- let resolved = false;
483
- this.completionWithRetry({
484
- ...params,
485
- messages: messagesMapped,
486
- }, {
487
- signal: options?.signal,
488
- ...options?.options,
489
- adapter: axios_fetch_adapter_js_1.default,
490
- responseType: "stream",
491
- onmessage: (event) => {
492
- if (event.data?.trim?.() === "[DONE]") {
493
- if (resolved || rejected) {
494
- return;
495
- }
496
- resolved = true;
497
- resolve(response);
498
- }
499
- else {
500
- const data = JSON.parse(event.data);
501
- if (!data.id)
502
- return;
503
- if (data?.error) {
504
- if (rejected) {
505
- return;
506
- }
507
- rejected = true;
508
- reject(data.error);
509
- return;
510
- }
511
- const message = data;
512
- // on the first message set the response properties
513
- if (!response) {
514
- response = {
515
- id: message.id,
516
- object: message.object,
517
- created: message.created,
518
- model: message.model,
519
- choices: [],
520
- };
521
- }
522
- // on all messages, update choice
523
- for (const part of message.choices ?? []) {
524
- if (part != null) {
525
- let choice = response.choices.find((c) => c.index === part.index);
526
- if (!choice) {
527
- choice = {
528
- index: part.index,
529
- finish_reason: part.finish_reason ?? undefined,
530
- };
531
- response.choices[part.index] = choice;
532
- }
533
- if (!choice.message) {
534
- choice.message = {
535
- role: part.delta
536
- ?.role,
537
- content: "",
538
- };
539
- }
540
- if (part.delta.function_call &&
541
- !choice.message.function_call) {
542
- choice.message.function_call = {
543
- name: "",
544
- arguments: "",
545
- };
546
- }
547
- choice.message.content += part.delta?.content ?? "";
548
- if (choice.message.function_call) {
549
- choice.message.function_call.name +=
550
- part.delta?.function_call?.name ?? "";
551
- choice.message.function_call.arguments +=
552
- part.delta?.function_call?.arguments ?? "";
553
- }
554
- const chunk = _convertDeltaToMessageChunk(part.delta, "assistant");
555
- const generationChunk = new index_js_1.ChatGenerationChunk({
556
- message: chunk,
557
- text: chunk.content,
558
- });
559
- void runManager?.handleLLMNewToken(part.delta?.content ?? "", {
560
- prompt: options.promptIndex ?? 0,
561
- completion: part.index,
562
- }, undefined, undefined, undefined, { chunk: generationChunk });
563
- }
564
- }
565
- // when all messages are finished, resolve
566
- if (!resolved &&
567
- !rejected &&
568
- message.choices?.every((c) => c.finish_reason != null)) {
569
- resolved = true;
570
- resolve(response);
571
- }
572
- }
573
- },
574
- }).catch((error) => {
575
- if (!rejected) {
576
- rejected = true;
577
- reject(error);
578
- }
579
- });
580
- })
581
- : await this.completionWithRetry({
431
+ if (params.stream) {
432
+ const stream = await this._streamResponseChunks(messages, options, runManager);
433
+ const finalChunks = {};
434
+ for await (const chunk of stream) {
435
+ const index = chunk.generationInfo?.completion ?? 0;
436
+ if (finalChunks[index] === undefined) {
437
+ finalChunks[index] = chunk;
438
+ }
439
+ else {
440
+ finalChunks[index] = finalChunks[index].concat(chunk);
441
+ }
442
+ }
443
+ const generations = Object.entries(finalChunks)
444
+ .sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
445
+ .map(([_, value]) => value);
446
+ return { generations };
447
+ }
448
+ else {
449
+ const data = await this.completionWithRetry({
582
450
  ...params,
451
+ stream: false,
583
452
  messages: messagesMapped,
584
453
  }, {
585
454
  signal: options?.signal,
586
455
  ...options?.options,
587
456
  });
588
- const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage ?? {};
589
- if (completionTokens) {
590
- tokenUsage.completionTokens =
591
- (tokenUsage.completionTokens ?? 0) + completionTokens;
592
- }
593
- if (promptTokens) {
594
- tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens;
595
- }
596
- if (totalTokens) {
597
- tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens;
598
- }
599
- const generations = [];
600
- for (const part of data.choices) {
601
- const text = part.message?.content ?? "";
602
- generations.push({
603
- text,
604
- message: openAIResponseToChatMessage(part.message ?? { role: "assistant" }),
605
- });
457
+ const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data?.usage ?? {};
458
+ if (completionTokens) {
459
+ tokenUsage.completionTokens =
460
+ (tokenUsage.completionTokens ?? 0) + completionTokens;
461
+ }
462
+ if (promptTokens) {
463
+ tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens;
464
+ }
465
+ if (totalTokens) {
466
+ tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens;
467
+ }
468
+ const generations = [];
469
+ for (const part of data?.choices ?? []) {
470
+ const text = part.message?.content ?? "";
471
+ const generation = {
472
+ text,
473
+ message: openAIResponseToChatMessage(part.message ?? { role: "assistant" }),
474
+ };
475
+ if (part.finish_reason) {
476
+ generation.generationInfo = { finish_reason: part.finish_reason };
477
+ }
478
+ generations.push(generation);
479
+ }
480
+ return {
481
+ generations,
482
+ llmOutput: { tokenUsage },
483
+ };
606
484
  }
607
- return {
608
- generations,
609
- llmOutput: { tokenUsage },
610
- };
611
485
  }
612
486
  async getNumTokensFromMessages(messages) {
613
487
  let totalCount = 0;
@@ -635,45 +509,55 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
635
509
  totalCount += 3; // every reply is primed with <|start|>assistant<|message|>
636
510
  return { totalCount, countPerMessage };
637
511
  }
638
- /** @ignore */
639
512
  async completionWithRetry(request, options) {
513
+ const requestOptions = this._getClientOptions(options);
514
+ return this.caller.call(async () => {
515
+ try {
516
+ const res = await this.client.chat.completions.create(request, requestOptions);
517
+ return res;
518
+ }
519
+ catch (e) {
520
+ const error = (0, openai_js_1.wrapOpenAIClientError)(e);
521
+ throw error;
522
+ }
523
+ });
524
+ }
525
+ _getClientOptions(options) {
640
526
  if (!this.client) {
641
527
  const openAIEndpointConfig = {
642
528
  azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
643
529
  azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
644
530
  azureOpenAIApiKey: this.azureOpenAIApiKey,
645
531
  azureOpenAIBasePath: this.azureOpenAIBasePath,
646
- basePath: this.clientConfig.basePath,
532
+ baseURL: this.clientConfig.baseURL,
647
533
  };
648
534
  const endpoint = (0, azure_js_1.getEndpoint)(openAIEndpointConfig);
649
- const clientConfig = new openai_1.Configuration({
535
+ const params = {
650
536
  ...this.clientConfig,
651
- basePath: endpoint,
652
- baseOptions: {
653
- timeout: this.timeout,
654
- ...this.clientConfig.baseOptions,
655
- },
656
- });
657
- this.client = new openai_1.OpenAIApi(clientConfig);
537
+ baseURL: endpoint,
538
+ timeout: this.timeout,
539
+ maxRetries: 0,
540
+ };
541
+ if (!params.baseURL) {
542
+ delete params.baseURL;
543
+ }
544
+ this.client = new openai_1.OpenAI(params);
658
545
  }
659
- const axiosOptions = {
660
- adapter: (0, env_js_1.isNode)() ? undefined : axios_fetch_adapter_js_1.default,
661
- ...this.clientConfig.baseOptions,
546
+ const requestOptions = {
547
+ ...this.clientConfig,
662
548
  ...options,
663
549
  };
664
550
  if (this.azureOpenAIApiKey) {
665
- axiosOptions.headers = {
551
+ requestOptions.headers = {
666
552
  "api-key": this.azureOpenAIApiKey,
667
- ...axiosOptions.headers,
553
+ ...requestOptions.headers,
668
554
  };
669
- axiosOptions.params = {
555
+ requestOptions.query = {
670
556
  "api-version": this.azureOpenAIApiVersion,
671
- ...axiosOptions.params,
557
+ ...requestOptions.query,
672
558
  };
673
559
  }
674
- return this.caller
675
- .call(this.client.createChatCompletion.bind(this.client), request, axiosOptions)
676
- .then((res) => res.data);
560
+ return requestOptions;
677
561
  }
678
562
  _llmType() {
679
563
  return "openai";
@@ -794,7 +678,7 @@ class PromptLayerChatOpenAI extends ChatOpenAI {
794
678
  role: messageToOpenAIRole(generation.message),
795
679
  },
796
680
  ];
797
- const promptLayerRespBody = await (0, prompt_layer_js_1.promptLayerTrackRequest)(this.caller, "langchain.PromptLayerChatOpenAI", messageDicts, this._identifyingParams(), this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
681
+ const promptLayerRespBody = await (0, prompt_layer_js_1.promptLayerTrackRequest)(this.caller, "langchain.PromptLayerChatOpenAI", { ...this._identifyingParams(), messages: messageDicts, stream: false }, this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
798
682
  if (this.returnPromptLayerId === true) {
799
683
  if (promptLayerRespBody.success === true) {
800
684
  promptLayerRequestId = promptLayerRespBody.request_id;