langchain 0.0.140 → 0.0.141

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/chains/openai_moderation.cjs +5 -13
  2. package/dist/chains/openai_moderation.d.ts +5 -5
  3. package/dist/chains/openai_moderation.js +6 -11
  4. package/dist/chat_models/anthropic.d.ts +2 -2
  5. package/dist/chat_models/openai.cjs +99 -215
  6. package/dist/chat_models/openai.d.ts +20 -60
  7. package/dist/chat_models/openai.js +101 -214
  8. package/dist/document_loaders/web/github.cjs +4 -0
  9. package/dist/document_loaders/web/github.js +4 -0
  10. package/dist/embeddings/openai.cjs +32 -22
  11. package/dist/embeddings/openai.d.ts +3 -3
  12. package/dist/embeddings/openai.js +34 -21
  13. package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
  14. package/dist/experimental/chat_models/anthropic_functions.d.ts +3 -3
  15. package/dist/experimental/chat_models/anthropic_functions.js +3 -0
  16. package/dist/llms/openai-chat.cjs +69 -187
  17. package/dist/llms/openai-chat.d.ts +19 -71
  18. package/dist/llms/openai-chat.js +71 -186
  19. package/dist/llms/openai.cjs +92 -166
  20. package/dist/llms/openai.d.ts +25 -71
  21. package/dist/llms/openai.js +94 -165
  22. package/dist/load/import_map.cjs +3 -2
  23. package/dist/load/import_map.d.ts +1 -0
  24. package/dist/load/import_map.js +1 -0
  25. package/dist/schema/index.d.ts +2 -2
  26. package/dist/tools/convert_to_openai.d.ts +2 -2
  27. package/dist/types/openai-types.d.ts +27 -4
  28. package/dist/util/async_caller.cjs +10 -7
  29. package/dist/util/async_caller.js +10 -7
  30. package/dist/util/azure.cjs +4 -4
  31. package/dist/util/azure.d.ts +3 -3
  32. package/dist/util/azure.js +4 -4
  33. package/dist/util/openai.cjs +21 -0
  34. package/dist/util/openai.d.ts +1 -0
  35. package/dist/util/openai.js +17 -0
  36. package/dist/util/prompt-layer.cjs +1 -2
  37. package/dist/util/prompt-layer.d.ts +2 -2
  38. package/dist/util/prompt-layer.js +1 -2
  39. package/package.json +10 -2
  40. package/schema/document.cjs +1 -0
  41. package/schema/document.d.ts +1 -0
  42. package/schema/document.js +1 -0
@@ -1,11 +1,10 @@
1
- import { Configuration, OpenAIApi, } from "openai";
1
+ import { OpenAI as OpenAIClient } from "openai";
2
2
  import { GenerationChunk } from "../schema/index.js";
3
- import fetchAdapter from "../util/axios-fetch-adapter.js";
4
3
  import { getEndpoint } from "../util/azure.js";
5
- import { getEnvironmentVariable, isNode } from "../util/env.js";
4
+ import { getEnvironmentVariable } from "../util/env.js";
6
5
  import { promptLayerTrackRequest } from "../util/prompt-layer.js";
7
- import { readableStreamToAsyncIterable } from "../util/stream.js";
8
6
  import { LLM } from "./base.js";
7
+ import { wrapOpenAIClientError } from "../util/openai.js";
9
8
  /**
10
9
  * Wrapper around OpenAI large language models that use the Chat endpoint.
11
10
  *
@@ -260,6 +259,12 @@ export class OpenAIChat extends LLM {
260
259
  this.clientConfig = {
261
260
  apiKey: this.openAIApiKey,
262
261
  organization: this.organization,
262
+ baseURL: configuration?.basePath ?? fields?.configuration?.basePath,
263
+ dangerouslyAllowBrowser: true,
264
+ defaultHeaders: configuration?.baseOptions?.headers ??
265
+ fields?.configuration?.baseOptions?.headers,
266
+ defaultQuery: configuration?.baseOptions?.params ??
267
+ fields?.configuration?.baseOptions?.params,
263
268
  ...configuration,
264
269
  ...fields?.configuration,
265
270
  };
@@ -313,18 +318,15 @@ export class OpenAIChat extends LLM {
313
318
  };
314
319
  return this.prefixMessages ? [...this.prefixMessages, message] : [message];
315
320
  }
316
- // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation
317
- // when we integrate OpenAI's new SDK.
318
321
  async *_streamResponseChunks(prompt, options, runManager) {
319
322
  const params = {
320
323
  ...this.invocationParams(options),
321
324
  messages: this.formatMessages(prompt),
322
325
  stream: true,
323
326
  };
324
- const streamIterable = this.startStream(params, options);
325
- for await (const streamedResponse of streamIterable) {
326
- const data = JSON.parse(streamedResponse);
327
- const choice = data.choices?.[0];
327
+ const stream = await this.completionWithRetry(params, options);
328
+ for await (const data of stream) {
329
+ const choice = data.choices[0];
328
330
  if (!choice) {
329
331
  continue;
330
332
  }
@@ -333,201 +335,95 @@ export class OpenAIChat extends LLM {
333
335
  text: delta.content ?? "",
334
336
  });
335
337
  yield generationChunk;
338
+ const newTokenIndices = {
339
+ prompt: options.promptIndex ?? 0,
340
+ completion: choice.index ?? 0,
341
+ };
336
342
  // eslint-disable-next-line no-void
337
- void runManager?.handleLLMNewToken(generationChunk.text ?? "");
343
+ void runManager?.handleLLMNewToken(generationChunk.text ?? "", newTokenIndices);
344
+ }
345
+ if (options.signal?.aborted) {
346
+ throw new Error("AbortError");
338
347
  }
339
348
  }
340
- /**
341
- * Starts a stream of responses from the OpenAI API.
342
- * @param request The request to be sent to the OpenAI API.
343
- * @param options Optional configuration for the Axios request.
344
- * @returns An iterable object that can be used to iterate over the response chunks.
345
- */
346
- startStream(request, options) {
347
- let done = false;
348
- const stream = new TransformStream();
349
- const writer = stream.writable.getWriter();
350
- const iterable = readableStreamToAsyncIterable(stream.readable);
351
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
352
- let err;
353
- this.completionWithRetry(request, {
354
- ...options,
355
- adapter: fetchAdapter,
356
- responseType: "stream",
357
- onmessage: (event) => {
358
- if (done)
359
- return;
360
- if (event.data?.trim?.() === "[DONE]") {
361
- done = true;
362
- // eslint-disable-next-line no-void
363
- void writer.close();
349
+ /** @ignore */
350
+ async _call(prompt, options, runManager) {
351
+ const params = this.invocationParams(options);
352
+ if (params.stream) {
353
+ const stream = await this._streamResponseChunks(prompt, options, runManager);
354
+ let finalChunk;
355
+ for await (const chunk of stream) {
356
+ if (finalChunk === undefined) {
357
+ finalChunk = chunk;
364
358
  }
365
359
  else {
366
- const data = JSON.parse(event.data);
367
- if (data.error) {
368
- done = true;
369
- throw data.error;
370
- }
371
- // eslint-disable-next-line no-void
372
- void writer.write(event.data);
360
+ finalChunk = finalChunk.concat(chunk);
373
361
  }
374
- },
375
- }).catch((error) => {
376
- if (!done) {
377
- err = error;
378
- done = true;
379
- // eslint-disable-next-line no-void
380
- void writer.close();
381
362
  }
382
- });
383
- return {
384
- async next() {
385
- const chunk = await iterable.next();
386
- if (err) {
387
- throw err;
388
- }
389
- return chunk;
390
- },
391
- [Symbol.asyncIterator]() {
392
- return this;
393
- },
394
- };
395
- }
396
- /** @ignore */
397
- async _call(prompt, options, runManager) {
398
- const params = this.invocationParams(options);
399
- const data = params.stream
400
- ? await new Promise((resolve, reject) => {
401
- let response;
402
- let rejected = false;
403
- let resolved = false;
404
- this.completionWithRetry({
405
- ...params,
406
- messages: this.formatMessages(prompt),
407
- }, {
408
- signal: options.signal,
409
- ...options.options,
410
- adapter: fetchAdapter,
411
- responseType: "stream",
412
- onmessage: (event) => {
413
- if (event.data?.trim?.() === "[DONE]") {
414
- if (resolved || rejected) {
415
- return;
416
- }
417
- resolved = true;
418
- resolve(response);
419
- }
420
- else {
421
- const data = JSON.parse(event.data);
422
- if (data?.error) {
423
- if (rejected) {
424
- return;
425
- }
426
- rejected = true;
427
- reject(data.error);
428
- return;
429
- }
430
- const message = data;
431
- // on the first message set the response properties
432
- if (!response) {
433
- response = {
434
- id: message.id,
435
- object: message.object,
436
- created: message.created,
437
- model: message.model,
438
- choices: [],
439
- };
440
- }
441
- // on all messages, update choice
442
- for (const part of message.choices) {
443
- if (part != null) {
444
- let choice = response.choices.find((c) => c.index === part.index);
445
- if (!choice) {
446
- choice = {
447
- index: part.index,
448
- finish_reason: part.finish_reason ?? undefined,
449
- };
450
- response.choices.push(choice);
451
- }
452
- if (!choice.message) {
453
- choice.message = {
454
- role: part.delta
455
- ?.role,
456
- content: part.delta?.content ?? "",
457
- };
458
- }
459
- choice.message.content += part.delta?.content ?? "";
460
- // eslint-disable-next-line no-void
461
- void runManager?.handleLLMNewToken(part.delta?.content ?? "", {
462
- prompt: options.promptIndex ?? 0,
463
- completion: part.index,
464
- });
465
- }
466
- }
467
- // when all messages are finished, resolve
468
- if (!resolved &&
469
- !rejected &&
470
- message.choices.every((c) => c.finish_reason != null)) {
471
- resolved = true;
472
- resolve(response);
473
- }
474
- }
475
- },
476
- }).catch((error) => {
477
- if (!rejected) {
478
- rejected = true;
479
- reject(error);
480
- }
481
- });
482
- })
483
- : await this.completionWithRetry({
363
+ return finalChunk?.text ?? "";
364
+ }
365
+ else {
366
+ const response = await this.completionWithRetry({
484
367
  ...params,
368
+ stream: false,
485
369
  messages: this.formatMessages(prompt),
486
370
  }, {
487
371
  signal: options.signal,
488
372
  ...options.options,
489
373
  });
490
- return data.choices[0].message?.content ?? "";
374
+ return response?.choices[0]?.message?.content ?? "";
375
+ }
491
376
  }
492
- /** @ignore */
493
377
  async completionWithRetry(request, options) {
378
+ const requestOptions = this._getClientOptions(options);
379
+ return this.caller.call(async () => {
380
+ try {
381
+ const res = await this.client.chat.completions.create(request, requestOptions);
382
+ return res;
383
+ }
384
+ catch (e) {
385
+ const error = wrapOpenAIClientError(e);
386
+ throw error;
387
+ }
388
+ });
389
+ }
390
+ /** @ignore */
391
+ _getClientOptions(options) {
494
392
  if (!this.client) {
495
393
  const openAIEndpointConfig = {
496
394
  azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
497
395
  azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
498
396
  azureOpenAIApiKey: this.azureOpenAIApiKey,
499
397
  azureOpenAIBasePath: this.azureOpenAIBasePath,
500
- basePath: this.clientConfig.basePath,
398
+ baseURL: this.clientConfig.baseURL,
501
399
  };
502
400
  const endpoint = getEndpoint(openAIEndpointConfig);
503
- const clientConfig = new Configuration({
401
+ const params = {
504
402
  ...this.clientConfig,
505
- basePath: endpoint,
506
- baseOptions: {
507
- timeout: this.timeout,
508
- ...this.clientConfig.baseOptions,
509
- },
510
- });
511
- this.client = new OpenAIApi(clientConfig);
403
+ baseURL: endpoint,
404
+ timeout: this.timeout,
405
+ maxRetries: 0,
406
+ };
407
+ if (!params.baseURL) {
408
+ delete params.baseURL;
409
+ }
410
+ this.client = new OpenAIClient(params);
512
411
  }
513
- const axiosOptions = {
514
- adapter: isNode() ? undefined : fetchAdapter,
515
- ...this.clientConfig.baseOptions,
412
+ const requestOptions = {
413
+ ...this.clientConfig,
516
414
  ...options,
517
415
  };
518
416
  if (this.azureOpenAIApiKey) {
519
- axiosOptions.headers = {
417
+ requestOptions.headers = {
520
418
  "api-key": this.azureOpenAIApiKey,
521
- ...axiosOptions.headers,
419
+ ...requestOptions.headers,
522
420
  };
523
- axiosOptions.params = {
421
+ requestOptions.query = {
524
422
  "api-version": this.azureOpenAIApiVersion,
525
- ...axiosOptions.params,
423
+ ...requestOptions.query,
526
424
  };
527
425
  }
528
- return this.caller
529
- .call(this.client.createChatCompletion.bind(this.client), request, axiosOptions)
530
- .then((res) => res.data);
426
+ return requestOptions;
531
427
  }
532
428
  _llmType() {
533
429
  return "openai";
@@ -577,19 +473,6 @@ export class PromptLayerOpenAIChat extends OpenAIChat {
577
473
  throw new Error("Missing PromptLayer API key");
578
474
  }
579
475
  }
580
- /**
581
- * Makes a call to the OpenAI API with retry logic in case of failures.
582
- * @param request The request to be sent to the OpenAI API.
583
- * @param options Optional configuration for the Axios request.
584
- * @returns The response from the OpenAI API.
585
- */
586
- async completionWithRetry(request, options) {
587
- if (request.stream) {
588
- return super.completionWithRetry(request, options);
589
- }
590
- const response = await super.completionWithRetry(request);
591
- return response;
592
- }
593
476
  async _generate(prompts, options, runManager) {
594
477
  let choice;
595
478
  const generations = await Promise.all(prompts.map(async (prompt) => {
@@ -600,7 +483,9 @@ export class PromptLayerOpenAIChat extends OpenAIChat {
600
483
  const parsedResp = {
601
484
  text,
602
485
  };
603
- const promptLayerRespBody = await promptLayerTrackRequest(this.caller, "langchain.PromptLayerOpenAIChat", [prompt], this._identifyingParams(), this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
486
+ const promptLayerRespBody = await promptLayerTrackRequest(this.caller, "langchain.PromptLayerOpenAIChat",
487
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
488
+ { ...this._identifyingParams(), prompt }, this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
604
489
  if (this.returnPromptLayerId === true &&
605
490
  promptLayerRespBody.success === true) {
606
491
  choice[0].generationInfo = {