langchain 0.0.140 → 0.0.141

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/chains/openai_moderation.cjs +5 -13
  2. package/dist/chains/openai_moderation.d.ts +5 -5
  3. package/dist/chains/openai_moderation.js +6 -11
  4. package/dist/chat_models/anthropic.d.ts +2 -2
  5. package/dist/chat_models/openai.cjs +99 -215
  6. package/dist/chat_models/openai.d.ts +20 -60
  7. package/dist/chat_models/openai.js +101 -214
  8. package/dist/document_loaders/web/github.cjs +4 -0
  9. package/dist/document_loaders/web/github.js +4 -0
  10. package/dist/embeddings/openai.cjs +32 -22
  11. package/dist/embeddings/openai.d.ts +3 -3
  12. package/dist/embeddings/openai.js +34 -21
  13. package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
  14. package/dist/experimental/chat_models/anthropic_functions.d.ts +3 -3
  15. package/dist/experimental/chat_models/anthropic_functions.js +3 -0
  16. package/dist/llms/openai-chat.cjs +69 -187
  17. package/dist/llms/openai-chat.d.ts +19 -71
  18. package/dist/llms/openai-chat.js +71 -186
  19. package/dist/llms/openai.cjs +92 -166
  20. package/dist/llms/openai.d.ts +25 -71
  21. package/dist/llms/openai.js +94 -165
  22. package/dist/load/import_map.cjs +3 -2
  23. package/dist/load/import_map.d.ts +1 -0
  24. package/dist/load/import_map.js +1 -0
  25. package/dist/schema/index.d.ts +2 -2
  26. package/dist/tools/convert_to_openai.d.ts +2 -2
  27. package/dist/types/openai-types.d.ts +27 -4
  28. package/dist/util/async_caller.cjs +10 -7
  29. package/dist/util/async_caller.js +10 -7
  30. package/dist/util/azure.cjs +4 -4
  31. package/dist/util/azure.d.ts +3 -3
  32. package/dist/util/azure.js +4 -4
  33. package/dist/util/openai.cjs +21 -0
  34. package/dist/util/openai.d.ts +1 -0
  35. package/dist/util/openai.js +17 -0
  36. package/dist/util/prompt-layer.cjs +1 -2
  37. package/dist/util/prompt-layer.d.ts +2 -2
  38. package/dist/util/prompt-layer.js +1 -2
  39. package/package.json +10 -2
  40. package/schema/document.cjs +1 -0
  41. package/schema/document.d.ts +1 -0
  42. package/schema/document.js +1 -0
@@ -1,20 +1,16 @@
1
1
  "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
2
  Object.defineProperty(exports, "__esModule", { value: true });
6
3
  exports.PromptLayerOpenAIChat = exports.OpenAIChat = exports.PromptLayerOpenAI = exports.OpenAI = void 0;
7
4
  const openai_1 = require("openai");
8
5
  const count_tokens_js_1 = require("../base_language/count_tokens.cjs");
9
6
  const index_js_1 = require("../schema/index.cjs");
10
- const axios_fetch_adapter_js_1 = __importDefault(require("../util/axios-fetch-adapter.cjs"));
11
7
  const azure_js_1 = require("../util/azure.cjs");
12
8
  const chunk_js_1 = require("../util/chunk.cjs");
13
9
  const env_js_1 = require("../util/env.cjs");
14
10
  const prompt_layer_js_1 = require("../util/prompt-layer.cjs");
15
- const stream_js_1 = require("../util/stream.cjs");
16
11
  const base_js_1 = require("./base.cjs");
17
12
  const openai_chat_js_1 = require("./openai-chat.cjs");
13
+ const openai_js_1 = require("../util/openai.cjs");
18
14
  /**
19
15
  * Wrapper around OpenAI large language models.
20
16
  *
@@ -274,6 +270,12 @@ class OpenAI extends base_js_1.BaseLLM {
274
270
  this.clientConfig = {
275
271
  apiKey: this.openAIApiKey,
276
272
  organization: this.organization,
273
+ baseURL: configuration?.basePath ?? fields?.configuration?.basePath,
274
+ dangerouslyAllowBrowser: true,
275
+ defaultHeaders: configuration?.baseOptions?.headers ??
276
+ fields?.configuration?.baseOptions?.headers,
277
+ defaultQuery: configuration?.baseOptions?.params ??
278
+ fields?.configuration?.baseOptions?.params,
277
279
  ...configuration,
278
280
  ...fields?.configuration,
279
281
  };
@@ -298,6 +300,7 @@ class OpenAI extends base_js_1.BaseLLM {
298
300
  ...this.modelKwargs,
299
301
  };
300
302
  }
303
+ /** @ignore */
301
304
  _identifyingParams() {
302
305
  return {
303
306
  model_name: this.modelName,
@@ -344,94 +347,62 @@ class OpenAI extends base_js_1.BaseLLM {
344
347
  }
345
348
  for (let i = 0; i < subPrompts.length; i += 1) {
346
349
  const data = params.stream
347
- ? await new Promise((resolve, reject) => {
350
+ ? await (async () => {
348
351
  const choices = [];
349
352
  let response;
350
- let rejected = false;
351
- let resolved = false;
352
- this.completionWithRetry({
353
+ const stream = await this.completionWithRetry({
353
354
  ...params,
355
+ stream: true,
354
356
  prompt: subPrompts[i],
355
- }, {
356
- signal: options.signal,
357
- ...options.options,
358
- adapter: axios_fetch_adapter_js_1.default,
359
- responseType: "stream",
360
- onmessage: (event) => {
361
- if (event.data?.trim?.() === "[DONE]") {
362
- if (resolved || rejected) {
363
- return;
364
- }
365
- resolved = true;
366
- resolve({
367
- ...response,
368
- choices,
369
- });
357
+ }, options);
358
+ for await (const message of stream) {
359
+ // on the first message set the response properties
360
+ if (!response) {
361
+ response = {
362
+ id: message.id,
363
+ object: message.object,
364
+ created: message.created,
365
+ model: message.model,
366
+ };
367
+ }
368
+ // on all messages, update choice
369
+ for (const part of message.choices) {
370
+ if (!choices[part.index]) {
371
+ choices[part.index] = part;
370
372
  }
371
373
  else {
372
- const data = JSON.parse(event.data);
373
- if (data?.error) {
374
- if (rejected) {
375
- return;
376
- }
377
- rejected = true;
378
- reject(data.error);
379
- return;
380
- }
381
- const message = data;
382
- // on the first message set the response properties
383
- if (!response) {
384
- response = {
385
- id: message.id,
386
- object: message.object,
387
- created: message.created,
388
- model: message.model,
389
- };
390
- }
391
- // on all messages, update choice
392
- for (const part of message.choices) {
393
- if (part != null && part.index != null) {
394
- if (!choices[part.index])
395
- choices[part.index] = {};
396
- const choice = choices[part.index];
397
- choice.text = (choice.text ?? "") + (part.text ?? "");
398
- choice.finish_reason = part.finish_reason;
399
- choice.logprobs = part.logprobs;
400
- // eslint-disable-next-line no-void
401
- void runManager?.handleLLMNewToken(part.text ?? "", {
402
- prompt: Math.floor(part.index / this.n),
403
- completion: part.index % this.n,
404
- });
405
- }
406
- }
407
- // when all messages are finished, resolve
408
- if (!resolved &&
409
- !rejected &&
410
- choices.every((c) => c.finish_reason != null)) {
411
- resolved = true;
412
- resolve({
413
- ...response,
414
- choices,
415
- });
416
- }
374
+ const choice = choices[part.index];
375
+ choice.text += part.text;
376
+ choice.finish_reason = part.finish_reason;
377
+ choice.logprobs = part.logprobs;
417
378
  }
418
- },
419
- }).catch((error) => {
420
- if (!rejected) {
421
- rejected = true;
422
- reject(error);
379
+ void runManager?.handleLLMNewToken(part.text, {
380
+ prompt: Math.floor(part.index / this.n),
381
+ completion: part.index % this.n,
382
+ });
423
383
  }
424
- });
425
- })
384
+ }
385
+ if (options.signal?.aborted) {
386
+ throw new Error("AbortError");
387
+ }
388
+ return { ...response, choices };
389
+ })()
426
390
  : await this.completionWithRetry({
427
391
  ...params,
392
+ stream: false,
428
393
  prompt: subPrompts[i],
429
394
  }, {
430
395
  signal: options.signal,
431
396
  ...options.options,
432
397
  });
433
398
  choices.push(...data.choices);
434
- const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage ?? {};
399
+ const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage
400
+ ? data.usage
401
+ : {
402
+ completion_tokens: undefined,
403
+ prompt_tokens: undefined,
404
+ total_tokens: undefined,
405
+ };
435
406
  if (completionTokens) {
436
407
  tokenUsage.completionTokens =
437
408
  (tokenUsage.completionTokens ?? 0) + completionTokens;
@@ -455,18 +426,16 @@ class OpenAI extends base_js_1.BaseLLM {
455
426
  llmOutput: { tokenUsage },
456
427
  };
457
428
  }
458
- // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation
459
- // when we integrate OpenAI's new SDK.
429
+ // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation?
460
430
  async *_streamResponseChunks(input, options, runManager) {
461
431
  const params = {
462
432
  ...this.invocationParams(options),
463
433
  prompt: input,
464
434
  stream: true,
465
435
  };
466
- const streamIterable = this.startStream(params, options);
467
- for await (const streamedResponse of streamIterable) {
468
- const data = JSON.parse(streamedResponse);
469
- const choice = data.choices?.[0];
436
+ const stream = await this.completionWithRetry(params, options);
437
+ for await (const data of stream) {
438
+ const choice = data.choices[0];
470
439
  if (!choice) {
471
440
  continue;
472
441
  }
@@ -474,103 +443,71 @@ class OpenAI extends base_js_1.BaseLLM {
474
443
  text: choice.text,
475
444
  generationInfo: {
476
445
  finishReason: choice.finish_reason,
477
- logprobs: choice.logprobs,
478
446
  },
479
447
  });
480
448
  yield chunk;
481
449
  // eslint-disable-next-line no-void
482
450
  void runManager?.handleLLMNewToken(chunk.text ?? "");
483
451
  }
452
+ if (options.signal?.aborted) {
453
+ throw new Error("AbortError");
454
+ }
484
455
  }
485
- startStream(request, options) {
486
- let done = false;
487
- const stream = new TransformStream();
488
- const writer = stream.writable.getWriter();
489
- const iterable = (0, stream_js_1.readableStreamToAsyncIterable)(stream.readable);
490
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
491
- let err;
492
- this.completionWithRetry(request, {
493
- ...options,
494
- adapter: axios_fetch_adapter_js_1.default,
495
- responseType: "stream",
496
- onmessage: (event) => {
497
- if (done)
498
- return;
499
- if (event.data?.trim?.() === "[DONE]") {
500
- done = true;
501
- // eslint-disable-next-line no-void
502
- void writer.close();
503
- }
504
- else {
505
- const data = JSON.parse(event.data);
506
- if (data.error) {
507
- done = true;
508
- throw data.error;
509
- }
510
- // eslint-disable-next-line no-void
511
- void writer.write(event.data);
512
- }
513
- },
514
- }).catch((error) => {
515
- if (!done) {
516
- err = error;
517
- done = true;
518
- // eslint-disable-next-line no-void
519
- void writer.close();
456
+ async completionWithRetry(request, options) {
457
+ const requestOptions = this._getClientOptions(options);
458
+ return this.caller.call(async () => {
459
+ try {
460
+ const res = await this.client.completions.create(request, requestOptions);
461
+ return res;
462
+ }
463
+ catch (e) {
464
+ const error = (0, openai_js_1.wrapOpenAIClientError)(e);
465
+ throw error;
520
466
  }
521
467
  });
522
- return {
523
- async next() {
524
- const chunk = await iterable.next();
525
- if (err) {
526
- throw err;
527
- }
528
- return chunk;
529
- },
530
- [Symbol.asyncIterator]() {
531
- return this;
532
- },
533
- };
534
468
  }
535
- /** @ignore */
536
- async completionWithRetry(request, options) {
469
+ /**
470
+ * Calls the OpenAI API with retry logic in case of failures.
471
+ * @param request The request to send to the OpenAI API.
472
+ * @param options Optional configuration for the API call.
473
+ * @returns The response from the OpenAI API.
474
+ */
475
+ _getClientOptions(options) {
537
476
  if (!this.client) {
538
477
  const openAIEndpointConfig = {
539
478
  azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
540
479
  azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
541
480
  azureOpenAIApiKey: this.azureOpenAIApiKey,
542
481
  azureOpenAIBasePath: this.azureOpenAIBasePath,
543
- basePath: this.clientConfig.basePath,
482
+ baseURL: this.clientConfig.baseURL,
544
483
  };
545
484
  const endpoint = (0, azure_js_1.getEndpoint)(openAIEndpointConfig);
546
- const clientConfig = new openai_1.Configuration({
485
+ const params = {
547
486
  ...this.clientConfig,
548
- basePath: endpoint,
549
- baseOptions: {
550
- timeout: this.timeout,
551
- ...this.clientConfig.baseOptions,
552
- },
553
- });
554
- this.client = new openai_1.OpenAIApi(clientConfig);
487
+ baseURL: endpoint,
488
+ timeout: this.timeout,
489
+ maxRetries: 0,
490
+ };
491
+ if (!params.baseURL) {
492
+ delete params.baseURL;
493
+ }
494
+ this.client = new openai_1.OpenAI(params);
555
495
  }
556
- const axiosOptions = {
557
- adapter: (0, env_js_1.isNode)() ? undefined : axios_fetch_adapter_js_1.default,
558
- ...this.clientConfig.baseOptions,
496
+ const requestOptions = {
497
+ ...this.clientConfig,
559
498
  ...options,
560
499
  };
561
500
  if (this.azureOpenAIApiKey) {
562
- axiosOptions.headers = {
501
+ requestOptions.headers = {
563
502
  "api-key": this.azureOpenAIApiKey,
564
- ...axiosOptions.headers,
503
+ ...requestOptions.headers,
565
504
  };
566
- axiosOptions.params = {
505
+ requestOptions.query = {
567
506
  "api-version": this.azureOpenAIApiVersion,
568
- ...axiosOptions.params,
507
+ ...requestOptions.query,
569
508
  };
570
509
  }
571
- return this.caller
572
- .call(this.client.createCompletion.bind(this.client), request, axiosOptions)
573
- .then((res) => res.data);
510
+ return requestOptions;
574
511
  }
575
512
  _llmType() {
576
513
  return "openai";
@@ -622,19 +559,6 @@ class PromptLayerOpenAI extends OpenAI {
622
559
  throw new Error("Missing PromptLayer API key");
623
560
  }
624
561
  }
625
- /**
626
- * Calls the OpenAI API with retry logic in case of failures.
627
- * @param request The request to send to the OpenAI API.
628
- * @param options Optional configuration for the API call.
629
- * @returns The response from the OpenAI API.
630
- */
631
- async completionWithRetry(request, options) {
632
- if (request.stream) {
633
- return super.completionWithRetry(request, options);
634
- }
635
- const response = await super.completionWithRetry(request);
636
- return response;
637
- }
638
562
  async _generate(prompts, options, runManager) {
639
563
  const requestStartTime = Date.now();
640
564
  const generations = await super._generate(prompts, options, runManager);
@@ -644,7 +568,9 @@ class PromptLayerOpenAI extends OpenAI {
644
568
  text: generations.generations[i][0].text,
645
569
  llm_output: generations.llmOutput,
646
570
  };
647
- const promptLayerRespBody = await (0, prompt_layer_js_1.promptLayerTrackRequest)(this.caller, "langchain.PromptLayerOpenAI", [prompts[i]], this._identifyingParams(), this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
571
+ const promptLayerRespBody = await (0, prompt_layer_js_1.promptLayerTrackRequest)(this.caller, "langchain.PromptLayerOpenAI",
572
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
573
+ { ...this._identifyingParams(), prompt: prompts[i] }, this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
648
574
  let promptLayerRequestId;
649
575
  if (this.returnPromptLayerId === true) {
650
576
  if (promptLayerRespBody && promptLayerRespBody.success === true) {
@@ -1,8 +1,7 @@
1
- import { ConfigurationParameters, CreateCompletionRequest, CreateCompletionResponse } from "openai";
1
+ import { type ClientOptions, OpenAI as OpenAIClient } from "openai";
2
2
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
3
3
  import { GenerationChunk, LLMResult } from "../schema/index.js";
4
- import { AzureOpenAIInput, OpenAICallOptions, OpenAIInput } from "../types/openai-types.js";
5
- import type { StreamingAxiosConfiguration } from "../util/axios-types.js";
4
+ import { AzureOpenAIInput, OpenAICallOptions, OpenAICoreRequestOptions, OpenAIInput, LegacyOpenAIInput } from "../types/openai-types.js";
6
5
  import { BaseLLM, BaseLLMParams } from "./base.js";
7
6
  export { AzureOpenAIInput, OpenAICallOptions, OpenAIInput };
8
7
  /**
@@ -56,71 +55,24 @@ export declare class OpenAI extends BaseLLM<OpenAICallOptions> implements OpenAI
56
55
  private client;
57
56
  private clientConfig;
58
57
  constructor(fields?: Partial<OpenAIInput> & Partial<AzureOpenAIInput> & BaseLLMParams & {
59
- configuration?: ConfigurationParameters;
58
+ configuration?: ClientOptions & LegacyOpenAIInput;
60
59
  },
61
60
  /** @deprecated */
62
- configuration?: ConfigurationParameters);
61
+ configuration?: ClientOptions & LegacyOpenAIInput);
63
62
  /**
64
63
  * Get the parameters used to invoke the model
65
64
  */
66
- invocationParams(options?: this["ParsedCallOptions"]): CreateCompletionRequest;
67
- _identifyingParams(): {
68
- apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>) | undefined;
69
- organization?: string | undefined;
70
- username?: string | undefined;
71
- password?: string | undefined;
72
- accessToken?: string | Promise<string> | ((name?: string | undefined, scopes?: string[] | undefined) => string) | ((name?: string | undefined, scopes?: string[] | undefined) => Promise<string>) | undefined;
73
- basePath?: string | undefined;
74
- baseOptions?: any;
75
- formDataCtor?: (new () => any) | undefined;
76
- model: string;
77
- prompt?: import("openai").CreateCompletionRequestPrompt | null | undefined;
78
- suffix?: string | null | undefined;
79
- max_tokens?: number | null | undefined;
80
- temperature?: number | null | undefined;
81
- top_p?: number | null | undefined;
82
- n?: number | null | undefined;
83
- stream?: boolean | null | undefined;
84
- logprobs?: number | null | undefined;
85
- echo?: boolean | null | undefined;
86
- stop?: import("openai").CreateCompletionRequestStop | null | undefined;
87
- presence_penalty?: number | null | undefined;
88
- frequency_penalty?: number | null | undefined;
89
- best_of?: number | null | undefined;
90
- logit_bias?: object | null | undefined;
91
- user?: string | undefined;
65
+ invocationParams(options?: this["ParsedCallOptions"]): Omit<OpenAIClient.CompletionCreateParams, "prompt">;
66
+ /** @ignore */
67
+ _identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, "prompt"> & {
92
68
  model_name: string;
93
- };
69
+ } & ClientOptions;
94
70
  /**
95
71
  * Get the identifying parameters for the model
96
72
  */
97
- identifyingParams(): {
98
- apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>) | undefined;
99
- organization?: string | undefined;
100
- username?: string | undefined;
101
- password?: string | undefined;
102
- accessToken?: string | Promise<string> | ((name?: string | undefined, scopes?: string[] | undefined) => string) | ((name?: string | undefined, scopes?: string[] | undefined) => Promise<string>) | undefined;
103
- basePath?: string | undefined;
104
- baseOptions?: any;
105
- formDataCtor?: (new () => any) | undefined;
106
- model: string;
107
- prompt?: import("openai").CreateCompletionRequestPrompt | null | undefined;
108
- suffix?: string | null | undefined;
109
- max_tokens?: number | null | undefined;
110
- temperature?: number | null | undefined;
111
- top_p?: number | null | undefined;
112
- n?: number | null | undefined;
113
- stream?: boolean | null | undefined;
114
- logprobs?: number | null | undefined;
115
- echo?: boolean | null | undefined;
116
- stop?: import("openai").CreateCompletionRequestStop | null | undefined;
117
- presence_penalty?: number | null | undefined;
118
- frequency_penalty?: number | null | undefined;
119
- best_of?: number | null | undefined;
120
- logit_bias?: object | null | undefined;
121
- user?: string | undefined;
73
+ identifyingParams(): Omit<OpenAIClient.CompletionCreateParams, "prompt"> & {
122
74
  model_name: string;
123
- };
75
+ } & ClientOptions;
124
76
  /**
125
77
  * Call out to OpenAI's endpoint with k unique prompts
126
78
  *
@@ -139,12 +91,21 @@ export declare class OpenAI extends BaseLLM<OpenAICallOptions> implements OpenAI
139
91
  */
140
92
  _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
141
93
  _streamResponseChunks(input: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
142
- startStream(request: CreateCompletionRequest, options?: StreamingAxiosConfiguration): {
143
- next(): Promise<any>;
144
- [Symbol.asyncIterator](): any;
145
- };
146
- /** @ignore */
147
- completionWithRetry(request: CreateCompletionRequest, options?: StreamingAxiosConfiguration): Promise<CreateCompletionResponse>;
94
+ /**
95
+ * Calls the OpenAI API with retry logic in case of failures.
96
+ * @param request The request to send to the OpenAI API.
97
+ * @param options Optional configuration for the API call.
98
+ * @returns The response from the OpenAI API.
99
+ */
100
+ completionWithRetry(request: OpenAIClient.CompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Completion>>;
101
+ completionWithRetry(request: OpenAIClient.CompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Completions.Completion>;
102
+ /**
103
+ * Calls the OpenAI API with retry logic in case of failures.
104
+ * @param request The request to send to the OpenAI API.
105
+ * @param options Optional configuration for the API call.
106
+ * @returns The response from the OpenAI API.
107
+ */
108
+ private _getClientOptions;
148
109
  _llmType(): string;
149
110
  }
150
111
  /**
@@ -164,13 +125,6 @@ export declare class PromptLayerOpenAI extends OpenAI {
164
125
  plTags?: string[];
165
126
  returnPromptLayerId?: boolean;
166
127
  });
167
- /**
168
- * Calls the OpenAI API with retry logic in case of failures.
169
- * @param request The request to send to the OpenAI API.
170
- * @param options Optional configuration for the API call.
171
- * @returns The response from the OpenAI API.
172
- */
173
- completionWithRetry(request: CreateCompletionRequest, options?: StreamingAxiosConfiguration): Promise<CreateCompletionResponse>;
174
128
  _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
175
129
  }
176
130
  export { OpenAIChat, PromptLayerOpenAIChat } from "./openai-chat.js";