langchain 0.0.140 → 0.0.142

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dist/chains/openai_moderation.cjs +5 -13
  2. package/dist/chains/openai_moderation.d.ts +5 -5
  3. package/dist/chains/openai_moderation.js +6 -11
  4. package/dist/chat_models/anthropic.d.ts +2 -2
  5. package/dist/chat_models/openai.cjs +99 -215
  6. package/dist/chat_models/openai.d.ts +20 -60
  7. package/dist/chat_models/openai.js +101 -214
  8. package/dist/document_loaders/web/github.cjs +4 -0
  9. package/dist/document_loaders/web/github.js +4 -0
  10. package/dist/embeddings/openai.cjs +32 -22
  11. package/dist/embeddings/openai.d.ts +3 -3
  12. package/dist/embeddings/openai.js +34 -21
  13. package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
  14. package/dist/experimental/chat_models/anthropic_functions.d.ts +3 -3
  15. package/dist/experimental/chat_models/anthropic_functions.js +3 -0
  16. package/dist/llms/openai-chat.cjs +69 -187
  17. package/dist/llms/openai-chat.d.ts +19 -71
  18. package/dist/llms/openai-chat.js +71 -186
  19. package/dist/llms/openai.cjs +92 -166
  20. package/dist/llms/openai.d.ts +25 -71
  21. package/dist/llms/openai.js +94 -165
  22. package/dist/load/import_map.cjs +3 -2
  23. package/dist/load/import_map.d.ts +1 -0
  24. package/dist/load/import_map.js +1 -0
  25. package/dist/prompts/chat.cjs +21 -9
  26. package/dist/prompts/chat.d.ts +3 -3
  27. package/dist/prompts/chat.js +22 -10
  28. package/dist/schema/index.d.ts +2 -2
  29. package/dist/schema/runnable.cjs +3 -0
  30. package/dist/schema/runnable.d.ts +1 -0
  31. package/dist/schema/runnable.js +3 -0
  32. package/dist/tools/convert_to_openai.d.ts +2 -2
  33. package/dist/types/openai-types.d.ts +27 -4
  34. package/dist/util/async_caller.cjs +10 -7
  35. package/dist/util/async_caller.js +10 -7
  36. package/dist/util/azure.cjs +4 -4
  37. package/dist/util/azure.d.ts +3 -3
  38. package/dist/util/azure.js +4 -4
  39. package/dist/util/openai.cjs +21 -0
  40. package/dist/util/openai.d.ts +1 -0
  41. package/dist/util/openai.js +17 -0
  42. package/dist/util/prompt-layer.cjs +1 -2
  43. package/dist/util/prompt-layer.d.ts +2 -2
  44. package/dist/util/prompt-layer.js +1 -2
  45. package/package.json +10 -2
  46. package/schema/document.cjs +1 -0
  47. package/schema/document.d.ts +1 -0
  48. package/schema/document.js +1 -0
@@ -1,14 +1,13 @@
1
- import { Configuration, OpenAIApi, } from "openai";
1
+ import { OpenAI as OpenAIClient } from "openai";
2
2
  import { calculateMaxTokens } from "../base_language/count_tokens.js";
3
3
  import { GenerationChunk } from "../schema/index.js";
4
- import fetchAdapter from "../util/axios-fetch-adapter.js";
5
4
  import { getEndpoint } from "../util/azure.js";
6
5
  import { chunkArray } from "../util/chunk.js";
7
- import { getEnvironmentVariable, isNode } from "../util/env.js";
6
+ import { getEnvironmentVariable } from "../util/env.js";
8
7
  import { promptLayerTrackRequest } from "../util/prompt-layer.js";
9
- import { readableStreamToAsyncIterable } from "../util/stream.js";
10
8
  import { BaseLLM } from "./base.js";
11
9
  import { OpenAIChat } from "./openai-chat.js";
10
+ import { wrapOpenAIClientError } from "../util/openai.js";
12
11
  /**
13
12
  * Wrapper around OpenAI large language models.
14
13
  *
@@ -268,6 +267,12 @@ export class OpenAI extends BaseLLM {
268
267
  this.clientConfig = {
269
268
  apiKey: this.openAIApiKey,
270
269
  organization: this.organization,
270
+ baseURL: configuration?.basePath ?? fields?.configuration?.basePath,
271
+ dangerouslyAllowBrowser: true,
272
+ defaultHeaders: configuration?.baseOptions?.headers ??
273
+ fields?.configuration?.baseOptions?.headers,
274
+ defaultQuery: configuration?.baseOptions?.params ??
275
+ fields?.configuration?.baseOptions?.params,
271
276
  ...configuration,
272
277
  ...fields?.configuration,
273
278
  };
@@ -292,6 +297,7 @@ export class OpenAI extends BaseLLM {
292
297
  ...this.modelKwargs,
293
298
  };
294
299
  }
300
+ /** @ignore */
295
301
  _identifyingParams() {
296
302
  return {
297
303
  model_name: this.modelName,
@@ -338,94 +344,62 @@ export class OpenAI extends BaseLLM {
338
344
  }
339
345
  for (let i = 0; i < subPrompts.length; i += 1) {
340
346
  const data = params.stream
341
- ? await new Promise((resolve, reject) => {
347
+ ? await (async () => {
342
348
  const choices = [];
343
349
  let response;
344
- let rejected = false;
345
- let resolved = false;
346
- this.completionWithRetry({
350
+ const stream = await this.completionWithRetry({
347
351
  ...params,
352
+ stream: true,
348
353
  prompt: subPrompts[i],
349
- }, {
350
- signal: options.signal,
351
- ...options.options,
352
- adapter: fetchAdapter,
353
- responseType: "stream",
354
- onmessage: (event) => {
355
- if (event.data?.trim?.() === "[DONE]") {
356
- if (resolved || rejected) {
357
- return;
358
- }
359
- resolved = true;
360
- resolve({
361
- ...response,
362
- choices,
363
- });
354
+ }, options);
355
+ for await (const message of stream) {
356
+ // on the first message set the response properties
357
+ if (!response) {
358
+ response = {
359
+ id: message.id,
360
+ object: message.object,
361
+ created: message.created,
362
+ model: message.model,
363
+ };
364
+ }
365
+ // on all messages, update choice
366
+ for (const part of message.choices) {
367
+ if (!choices[part.index]) {
368
+ choices[part.index] = part;
364
369
  }
365
370
  else {
366
- const data = JSON.parse(event.data);
367
- if (data?.error) {
368
- if (rejected) {
369
- return;
370
- }
371
- rejected = true;
372
- reject(data.error);
373
- return;
374
- }
375
- const message = data;
376
- // on the first message set the response properties
377
- if (!response) {
378
- response = {
379
- id: message.id,
380
- object: message.object,
381
- created: message.created,
382
- model: message.model,
383
- };
384
- }
385
- // on all messages, update choice
386
- for (const part of message.choices) {
387
- if (part != null && part.index != null) {
388
- if (!choices[part.index])
389
- choices[part.index] = {};
390
- const choice = choices[part.index];
391
- choice.text = (choice.text ?? "") + (part.text ?? "");
392
- choice.finish_reason = part.finish_reason;
393
- choice.logprobs = part.logprobs;
394
- // eslint-disable-next-line no-void
395
- void runManager?.handleLLMNewToken(part.text ?? "", {
396
- prompt: Math.floor(part.index / this.n),
397
- completion: part.index % this.n,
398
- });
399
- }
400
- }
401
- // when all messages are finished, resolve
402
- if (!resolved &&
403
- !rejected &&
404
- choices.every((c) => c.finish_reason != null)) {
405
- resolved = true;
406
- resolve({
407
- ...response,
408
- choices,
409
- });
410
- }
371
+ const choice = choices[part.index];
372
+ choice.text += part.text;
373
+ choice.finish_reason = part.finish_reason;
374
+ choice.logprobs = part.logprobs;
411
375
  }
412
- },
413
- }).catch((error) => {
414
- if (!rejected) {
415
- rejected = true;
416
- reject(error);
376
+ void runManager?.handleLLMNewToken(part.text, {
377
+ prompt: Math.floor(part.index / this.n),
378
+ completion: part.index % this.n,
379
+ });
417
380
  }
418
- });
419
- })
381
+ }
382
+ if (options.signal?.aborted) {
383
+ throw new Error("AbortError");
384
+ }
385
+ return { ...response, choices };
386
+ })()
420
387
  : await this.completionWithRetry({
421
388
  ...params,
389
+ stream: false,
422
390
  prompt: subPrompts[i],
423
391
  }, {
424
392
  signal: options.signal,
425
393
  ...options.options,
426
394
  });
427
395
  choices.push(...data.choices);
428
- const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage ?? {};
396
+ const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens, } = data.usage
397
+ ? data.usage
398
+ : {
399
+ completion_tokens: undefined,
400
+ prompt_tokens: undefined,
401
+ total_tokens: undefined,
402
+ };
429
403
  if (completionTokens) {
430
404
  tokenUsage.completionTokens =
431
405
  (tokenUsage.completionTokens ?? 0) + completionTokens;
@@ -449,18 +423,16 @@ export class OpenAI extends BaseLLM {
449
423
  llmOutput: { tokenUsage },
450
424
  };
451
425
  }
452
- // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation
453
- // when we integrate OpenAI's new SDK.
426
+ // TODO(jacoblee): Refactor with _generate(..., {stream: true}) implementation?
454
427
  async *_streamResponseChunks(input, options, runManager) {
455
428
  const params = {
456
429
  ...this.invocationParams(options),
457
430
  prompt: input,
458
431
  stream: true,
459
432
  };
460
- const streamIterable = this.startStream(params, options);
461
- for await (const streamedResponse of streamIterable) {
462
- const data = JSON.parse(streamedResponse);
463
- const choice = data.choices?.[0];
433
+ const stream = await this.completionWithRetry(params, options);
434
+ for await (const data of stream) {
435
+ const choice = data.choices[0];
464
436
  if (!choice) {
465
437
  continue;
466
438
  }
@@ -468,103 +440,71 @@ export class OpenAI extends BaseLLM {
468
440
  text: choice.text,
469
441
  generationInfo: {
470
442
  finishReason: choice.finish_reason,
471
- logprobs: choice.logprobs,
472
443
  },
473
444
  });
474
445
  yield chunk;
475
446
  // eslint-disable-next-line no-void
476
447
  void runManager?.handleLLMNewToken(chunk.text ?? "");
477
448
  }
449
+ if (options.signal?.aborted) {
450
+ throw new Error("AbortError");
451
+ }
478
452
  }
479
- startStream(request, options) {
480
- let done = false;
481
- const stream = new TransformStream();
482
- const writer = stream.writable.getWriter();
483
- const iterable = readableStreamToAsyncIterable(stream.readable);
484
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
485
- let err;
486
- this.completionWithRetry(request, {
487
- ...options,
488
- adapter: fetchAdapter,
489
- responseType: "stream",
490
- onmessage: (event) => {
491
- if (done)
492
- return;
493
- if (event.data?.trim?.() === "[DONE]") {
494
- done = true;
495
- // eslint-disable-next-line no-void
496
- void writer.close();
497
- }
498
- else {
499
- const data = JSON.parse(event.data);
500
- if (data.error) {
501
- done = true;
502
- throw data.error;
503
- }
504
- // eslint-disable-next-line no-void
505
- void writer.write(event.data);
506
- }
507
- },
508
- }).catch((error) => {
509
- if (!done) {
510
- err = error;
511
- done = true;
512
- // eslint-disable-next-line no-void
513
- void writer.close();
453
+ async completionWithRetry(request, options) {
454
+ const requestOptions = this._getClientOptions(options);
455
+ return this.caller.call(async () => {
456
+ try {
457
+ const res = await this.client.completions.create(request, requestOptions);
458
+ return res;
459
+ }
460
+ catch (e) {
461
+ const error = wrapOpenAIClientError(e);
462
+ throw error;
514
463
  }
515
464
  });
516
- return {
517
- async next() {
518
- const chunk = await iterable.next();
519
- if (err) {
520
- throw err;
521
- }
522
- return chunk;
523
- },
524
- [Symbol.asyncIterator]() {
525
- return this;
526
- },
527
- };
528
465
  }
529
- /** @ignore */
530
- async completionWithRetry(request, options) {
466
+ /**
467
+ * Calls the OpenAI API with retry logic in case of failures.
468
+ * @param request The request to send to the OpenAI API.
469
+ * @param options Optional configuration for the API call.
470
+ * @returns The response from the OpenAI API.
471
+ */
472
+ _getClientOptions(options) {
531
473
  if (!this.client) {
532
474
  const openAIEndpointConfig = {
533
475
  azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
534
476
  azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
535
477
  azureOpenAIApiKey: this.azureOpenAIApiKey,
536
478
  azureOpenAIBasePath: this.azureOpenAIBasePath,
537
- basePath: this.clientConfig.basePath,
479
+ baseURL: this.clientConfig.baseURL,
538
480
  };
539
481
  const endpoint = getEndpoint(openAIEndpointConfig);
540
- const clientConfig = new Configuration({
482
+ const params = {
541
483
  ...this.clientConfig,
542
- basePath: endpoint,
543
- baseOptions: {
544
- timeout: this.timeout,
545
- ...this.clientConfig.baseOptions,
546
- },
547
- });
548
- this.client = new OpenAIApi(clientConfig);
484
+ baseURL: endpoint,
485
+ timeout: this.timeout,
486
+ maxRetries: 0,
487
+ };
488
+ if (!params.baseURL) {
489
+ delete params.baseURL;
490
+ }
491
+ this.client = new OpenAIClient(params);
549
492
  }
550
- const axiosOptions = {
551
- adapter: isNode() ? undefined : fetchAdapter,
552
- ...this.clientConfig.baseOptions,
493
+ const requestOptions = {
494
+ ...this.clientConfig,
553
495
  ...options,
554
496
  };
555
497
  if (this.azureOpenAIApiKey) {
556
- axiosOptions.headers = {
498
+ requestOptions.headers = {
557
499
  "api-key": this.azureOpenAIApiKey,
558
- ...axiosOptions.headers,
500
+ ...requestOptions.headers,
559
501
  };
560
- axiosOptions.params = {
502
+ requestOptions.query = {
561
503
  "api-version": this.azureOpenAIApiVersion,
562
- ...axiosOptions.params,
504
+ ...requestOptions.query,
563
505
  };
564
506
  }
565
- return this.caller
566
- .call(this.client.createCompletion.bind(this.client), request, axiosOptions)
567
- .then((res) => res.data);
507
+ return requestOptions;
568
508
  }
569
509
  _llmType() {
570
510
  return "openai";
@@ -615,19 +555,6 @@ export class PromptLayerOpenAI extends OpenAI {
615
555
  throw new Error("Missing PromptLayer API key");
616
556
  }
617
557
  }
618
- /**
619
- * Calls the OpenAI API with retry logic in case of failures.
620
- * @param request The request to send to the OpenAI API.
621
- * @param options Optional configuration for the API call.
622
- * @returns The response from the OpenAI API.
623
- */
624
- async completionWithRetry(request, options) {
625
- if (request.stream) {
626
- return super.completionWithRetry(request, options);
627
- }
628
- const response = await super.completionWithRetry(request);
629
- return response;
630
- }
631
558
  async _generate(prompts, options, runManager) {
632
559
  const requestStartTime = Date.now();
633
560
  const generations = await super._generate(prompts, options, runManager);
@@ -637,7 +564,9 @@ export class PromptLayerOpenAI extends OpenAI {
637
564
  text: generations.generations[i][0].text,
638
565
  llm_output: generations.llmOutput,
639
566
  };
640
- const promptLayerRespBody = await promptLayerTrackRequest(this.caller, "langchain.PromptLayerOpenAI", [prompts[i]], this._identifyingParams(), this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
567
+ const promptLayerRespBody = await promptLayerTrackRequest(this.caller, "langchain.PromptLayerOpenAI",
568
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
569
+ { ...this._identifyingParams(), prompt: prompts[i] }, this.plTags, parsedResp, requestStartTime, requestEndTime, this.promptLayerApiKey);
641
570
  let promptLayerRequestId;
642
571
  if (this.returnPromptLayerId === true) {
643
572
  if (promptLayerRespBody && promptLayerRespBody.success === true) {
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  return result;
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
- exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = void 0;
27
+ exports.retrievers__multi_vector = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
+ exports.evaluation = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -63,6 +63,7 @@ exports.chat_models__baiduwenxin = __importStar(require("../chat_models/baiduwen
63
63
  exports.chat_models__ollama = __importStar(require("../chat_models/ollama.cjs"));
64
64
  exports.chat_models__minimax = __importStar(require("../chat_models/minimax.cjs"));
65
65
  exports.schema = __importStar(require("../schema/index.cjs"));
66
+ exports.schema__document = __importStar(require("../schema/document.cjs"));
66
67
  exports.schema__output_parser = __importStar(require("../schema/output_parser.cjs"));
67
68
  exports.schema__query_constructor = __importStar(require("../schema/query_constructor.cjs"));
68
69
  exports.schema__retriever = __importStar(require("../schema/retriever.cjs"));
@@ -35,6 +35,7 @@ export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
35
35
  export * as chat_models__ollama from "../chat_models/ollama.js";
36
36
  export * as chat_models__minimax from "../chat_models/minimax.js";
37
37
  export * as schema from "../schema/index.js";
38
+ export * as schema__document from "../schema/document.js";
38
39
  export * as schema__output_parser from "../schema/output_parser.js";
39
40
  export * as schema__query_constructor from "../schema/query_constructor.js";
40
41
  export * as schema__retriever from "../schema/retriever.js";
@@ -36,6 +36,7 @@ export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
36
36
  export * as chat_models__ollama from "../chat_models/ollama.js";
37
37
  export * as chat_models__minimax from "../chat_models/minimax.js";
38
38
  export * as schema from "../schema/index.js";
39
+ export * as schema__document from "../schema/document.js";
39
40
  export * as schema__output_parser from "../schema/output_parser.js";
40
41
  export * as schema__query_constructor from "../schema/query_constructor.js";
41
42
  export * as schema__retriever from "../schema/retriever.js";
@@ -265,6 +265,9 @@ class ChatPromptTemplate extends BaseChatPromptTemplate {
265
265
  if (this.validateTemplate) {
266
266
  const inputVariablesMessages = new Set();
267
267
  for (const promptMessage of this.promptMessages) {
268
+ // eslint-disable-next-line no-instanceof/no-instanceof
269
+ if (promptMessage instanceof index_js_1.BaseMessage)
270
+ continue;
268
271
  for (const inputVariable of promptMessage.inputVariables) {
269
272
  inputVariablesMessages.add(inputVariable);
270
273
  }
@@ -294,15 +297,21 @@ class ChatPromptTemplate extends BaseChatPromptTemplate {
294
297
  const allValues = await this.mergePartialAndUserVariables(values);
295
298
  let resultMessages = [];
296
299
  for (const promptMessage of this.promptMessages) {
297
- const inputValues = promptMessage.inputVariables.reduce((acc, inputVariable) => {
298
- if (!(inputVariable in allValues)) {
299
- throw new Error(`Missing value for input variable \`${inputVariable.toString()}\``);
300
- }
301
- acc[inputVariable] = allValues[inputVariable];
302
- return acc;
303
- }, {});
304
- const message = await promptMessage.formatMessages(inputValues);
305
- resultMessages = resultMessages.concat(message);
300
+ // eslint-disable-next-line no-instanceof/no-instanceof
301
+ if (promptMessage instanceof index_js_1.BaseMessage) {
302
+ resultMessages.push(promptMessage);
303
+ }
304
+ else {
305
+ const inputValues = promptMessage.inputVariables.reduce((acc, inputVariable) => {
306
+ if (!(inputVariable in allValues)) {
307
+ throw new Error(`Missing value for input variable \`${inputVariable.toString()}\``);
308
+ }
309
+ acc[inputVariable] = allValues[inputVariable];
310
+ return acc;
311
+ }, {});
312
+ const message = await promptMessage.formatMessages(inputValues);
313
+ resultMessages = resultMessages.concat(message);
314
+ }
306
315
  }
307
316
  return resultMessages;
308
317
  }
@@ -335,6 +344,9 @@ class ChatPromptTemplate extends BaseChatPromptTemplate {
335
344
  : acc, Object.create(null));
336
345
  const inputVariables = new Set();
337
346
  for (const promptMessage of flattenedMessages) {
347
+ // eslint-disable-next-line no-instanceof/no-instanceof
348
+ if (promptMessage instanceof index_js_1.BaseMessage)
349
+ continue;
338
350
  for (const inputVariable of promptMessage.inputVariables) {
339
351
  if (inputVariable in flattenedPartialVariables) {
340
352
  continue;
@@ -144,7 +144,7 @@ export interface ChatPromptTemplateInput<RunInput extends InputValues = any, Par
144
144
  /**
145
145
  * The prompt messages
146
146
  */
147
- promptMessages: BaseMessagePromptTemplate[];
147
+ promptMessages: Array<BaseMessagePromptTemplate | BaseMessage>;
148
148
  /**
149
149
  * Whether to try validating the template on initialization
150
150
  *
@@ -162,11 +162,11 @@ export declare class ChatPromptTemplate<RunInput extends InputValues = any, Part
162
162
  get lc_aliases(): {
163
163
  promptMessages: string;
164
164
  };
165
- promptMessages: BaseMessagePromptTemplate[];
165
+ promptMessages: Array<BaseMessagePromptTemplate | BaseMessage>;
166
166
  validateTemplate: boolean;
167
167
  constructor(input: ChatPromptTemplateInput<RunInput, PartialVariableName>);
168
168
  _getPromptType(): "chat";
169
169
  formatMessages(values: TypedPromptInputValues<RunInput>): Promise<BaseMessage[]>;
170
170
  partial<NewPartialVariableName extends string>(values: PartialValues<NewPartialVariableName>): Promise<ChatPromptTemplate<InputValues<Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>>, any>>;
171
- static fromPromptMessages<RunInput extends InputValues = any>(promptMessages: (BaseMessagePromptTemplate<InputValues> | ChatPromptTemplate<InputValues, string>)[]): ChatPromptTemplate<RunInput>;
171
+ static fromPromptMessages<RunInput extends InputValues = any>(promptMessages: (BaseMessagePromptTemplate<InputValues> | ChatPromptTemplate<InputValues, string> | BaseMessage)[]): ChatPromptTemplate<RunInput>;
172
172
  }
@@ -1,6 +1,6 @@
1
1
  // Default generic "any" values are for backwards compatibility.
2
2
  // Replace with "string" when we are comfortable with a breaking change.
3
- import { AIMessage, BasePromptValue, ChatMessage, HumanMessage, SystemMessage, } from "../schema/index.js";
3
+ import { AIMessage, BaseMessage, BasePromptValue, ChatMessage, HumanMessage, SystemMessage, } from "../schema/index.js";
4
4
  import { Runnable } from "../schema/runnable.js";
5
5
  import { BasePromptTemplate, } from "./base.js";
6
6
  import { PromptTemplate } from "./prompt.js";
@@ -253,6 +253,9 @@ export class ChatPromptTemplate extends BaseChatPromptTemplate {
253
253
  if (this.validateTemplate) {
254
254
  const inputVariablesMessages = new Set();
255
255
  for (const promptMessage of this.promptMessages) {
256
+ // eslint-disable-next-line no-instanceof/no-instanceof
257
+ if (promptMessage instanceof BaseMessage)
258
+ continue;
256
259
  for (const inputVariable of promptMessage.inputVariables) {
257
260
  inputVariablesMessages.add(inputVariable);
258
261
  }
@@ -282,15 +285,21 @@ export class ChatPromptTemplate extends BaseChatPromptTemplate {
282
285
  const allValues = await this.mergePartialAndUserVariables(values);
283
286
  let resultMessages = [];
284
287
  for (const promptMessage of this.promptMessages) {
285
- const inputValues = promptMessage.inputVariables.reduce((acc, inputVariable) => {
286
- if (!(inputVariable in allValues)) {
287
- throw new Error(`Missing value for input variable \`${inputVariable.toString()}\``);
288
- }
289
- acc[inputVariable] = allValues[inputVariable];
290
- return acc;
291
- }, {});
292
- const message = await promptMessage.formatMessages(inputValues);
293
- resultMessages = resultMessages.concat(message);
288
+ // eslint-disable-next-line no-instanceof/no-instanceof
289
+ if (promptMessage instanceof BaseMessage) {
290
+ resultMessages.push(promptMessage);
291
+ }
292
+ else {
293
+ const inputValues = promptMessage.inputVariables.reduce((acc, inputVariable) => {
294
+ if (!(inputVariable in allValues)) {
295
+ throw new Error(`Missing value for input variable \`${inputVariable.toString()}\``);
296
+ }
297
+ acc[inputVariable] = allValues[inputVariable];
298
+ return acc;
299
+ }, {});
300
+ const message = await promptMessage.formatMessages(inputValues);
301
+ resultMessages = resultMessages.concat(message);
302
+ }
294
303
  }
295
304
  return resultMessages;
296
305
  }
@@ -323,6 +332,9 @@ export class ChatPromptTemplate extends BaseChatPromptTemplate {
323
332
  : acc, Object.create(null));
324
333
  const inputVariables = new Set();
325
334
  for (const promptMessage of flattenedMessages) {
335
+ // eslint-disable-next-line no-instanceof/no-instanceof
336
+ if (promptMessage instanceof BaseMessage)
337
+ continue;
326
338
  for (const inputVariable of promptMessage.inputVariables) {
327
339
  if (inputVariable in flattenedPartialVariables) {
328
340
  continue;
@@ -1,4 +1,4 @@
1
- import { ChatCompletionRequestMessageFunctionCall } from "openai";
1
+ import type { OpenAI as OpenAIClient } from "openai";
2
2
  import { Document } from "../document.js";
3
3
  import { Serializable } from "../load/serializable.js";
4
4
  export declare const RUN_KEY = "__run";
@@ -64,7 +64,7 @@ export interface BaseMessageFields {
64
64
  content: string;
65
65
  name?: string;
66
66
  additional_kwargs?: {
67
- function_call?: ChatCompletionRequestMessageFunctionCall;
67
+ function_call?: OpenAIClient.Chat.ChatCompletionMessage.FunctionCall;
68
68
  [key: string]: unknown;
69
69
  };
70
70
  }
@@ -552,6 +552,9 @@ class RunnableBinding extends Runnable {
552
552
  : { ...options, ...this.kwargs };
553
553
  return this.bound.batch(inputs, mergedOptions, batchOptions);
554
554
  }
555
+ async *_streamIterator(input, options) {
556
+ yield* this.bound._streamIterator(input, { ...options, ...this.kwargs });
557
+ }
555
558
  async stream(input, options) {
556
559
  return this.bound.stream(input, { ...options, ...this.kwargs });
557
560
  }
@@ -166,6 +166,7 @@ export declare class RunnableBinding<RunInput, RunOutput, CallOptions extends Ba
166
166
  batch(inputs: RunInput[], options?: Partial<CallOptions> | Partial<CallOptions>[], batchOptions?: {
167
167
  maxConcurrency?: number;
168
168
  }): Promise<RunOutput[]>;
169
+ _streamIterator(input: RunInput, options?: Partial<CallOptions> | undefined): AsyncGenerator<Awaited<RunOutput>, void, unknown>;
169
170
  stream(input: RunInput, options?: Partial<CallOptions> | undefined): Promise<IterableReadableStream<RunOutput>>;
170
171
  }
171
172
  export type RouterInput = {
@@ -544,6 +544,9 @@ export class RunnableBinding extends Runnable {
544
544
  : { ...options, ...this.kwargs };
545
545
  return this.bound.batch(inputs, mergedOptions, batchOptions);
546
546
  }
547
+ async *_streamIterator(input, options) {
548
+ yield* this.bound._streamIterator(input, { ...options, ...this.kwargs });
549
+ }
547
550
  async stream(input, options) {
548
551
  return this.bound.stream(input, { ...options, ...this.kwargs });
549
552
  }
@@ -1,4 +1,4 @@
1
- import { ChatCompletionFunctions } from "openai";
1
+ import type { OpenAI as OpenAIClient } from "openai";
2
2
  import { StructuredTool } from "./base.js";
3
3
  /**
4
4
  * Formats a `StructuredTool` instance into a format that is compatible
@@ -6,4 +6,4 @@ import { StructuredTool } from "./base.js";
6
6
  * function to convert the schema of the `StructuredTool` into a JSON
7
7
  * schema, which is then used as the parameters for the OpenAI function.
8
8
  */
9
- export declare function formatToOpenAIFunction(tool: StructuredTool): ChatCompletionFunctions;
9
+ export declare function formatToOpenAIFunction(tool: StructuredTool): OpenAIClient.Chat.ChatCompletionCreateParams.Function;