langchain 0.3.29 → 0.3.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. package/README.md +15 -12
  2. package/dist/agents/agent.cjs +1 -1
  3. package/dist/agents/agent.d.ts +1 -1
  4. package/dist/agents/agent.js +1 -1
  5. package/dist/agents/mrkl/index.cjs +1 -1
  6. package/dist/agents/mrkl/index.d.ts +1 -1
  7. package/dist/agents/mrkl/index.js +1 -1
  8. package/dist/agents/openai_functions/index.cjs +1 -0
  9. package/dist/agents/openai_functions/index.d.ts +1 -0
  10. package/dist/agents/openai_functions/index.js +1 -0
  11. package/dist/agents/openai_functions/output_parser.cjs +1 -1
  12. package/dist/agents/openai_functions/output_parser.d.ts +1 -1
  13. package/dist/agents/openai_functions/output_parser.js +1 -1
  14. package/dist/agents/openai_tools/index.cjs +1 -1
  15. package/dist/agents/openai_tools/index.d.ts +1 -1
  16. package/dist/agents/openai_tools/index.js +1 -1
  17. package/dist/agents/openai_tools/output_parser.cjs +19 -14
  18. package/dist/agents/openai_tools/output_parser.d.ts +1 -1
  19. package/dist/agents/openai_tools/output_parser.js +19 -14
  20. package/dist/agents/structured_chat/index.cjs +1 -1
  21. package/dist/agents/structured_chat/index.d.ts +1 -1
  22. package/dist/agents/structured_chat/index.js +1 -1
  23. package/dist/agents/structured_chat/outputParser.cjs +1 -1
  24. package/dist/agents/structured_chat/outputParser.d.ts +1 -1
  25. package/dist/agents/structured_chat/outputParser.js +1 -1
  26. package/dist/agents/tool_calling/output_parser.cjs +1 -1
  27. package/dist/agents/tool_calling/output_parser.js +1 -1
  28. package/dist/agents/toolkits/openapi/openapi.cjs +1 -1
  29. package/dist/agents/toolkits/openapi/openapi.d.ts +1 -1
  30. package/dist/agents/toolkits/openapi/openapi.js +1 -1
  31. package/dist/agents/toolkits/sql/sql.cjs +1 -1
  32. package/dist/agents/toolkits/sql/sql.d.ts +1 -1
  33. package/dist/agents/toolkits/sql/sql.js +1 -1
  34. package/dist/agents/toolkits/vectorstore/vectorstore.cjs +1 -1
  35. package/dist/agents/toolkits/vectorstore/vectorstore.d.ts +1 -1
  36. package/dist/agents/toolkits/vectorstore/vectorstore.js +1 -1
  37. package/dist/chains/analyze_documents_chain.cjs +1 -1
  38. package/dist/chains/analyze_documents_chain.d.ts +1 -1
  39. package/dist/chains/analyze_documents_chain.js +1 -1
  40. package/dist/chains/constitutional_ai/constitutional_principle.cjs +2 -2
  41. package/dist/chains/constitutional_ai/constitutional_principle.d.ts +2 -2
  42. package/dist/chains/constitutional_ai/constitutional_principle.js +2 -2
  43. package/dist/chains/conversation.cjs +1 -1
  44. package/dist/chains/conversation.d.ts +1 -1
  45. package/dist/chains/conversation.js +1 -1
  46. package/dist/chains/graph_qa/cypher.cjs +1 -1
  47. package/dist/chains/graph_qa/cypher.d.ts +1 -1
  48. package/dist/chains/graph_qa/cypher.js +1 -1
  49. package/dist/chains/history_aware_retriever.cjs +1 -1
  50. package/dist/chains/history_aware_retriever.d.ts +1 -1
  51. package/dist/chains/history_aware_retriever.js +1 -1
  52. package/dist/chains/llm_chain.cjs +1 -1
  53. package/dist/chains/llm_chain.d.ts +1 -1
  54. package/dist/chains/llm_chain.js +1 -1
  55. package/dist/chains/openai_functions/base.cjs +1 -1
  56. package/dist/chains/openai_functions/base.d.ts +1 -1
  57. package/dist/chains/openai_functions/base.js +1 -1
  58. package/dist/chains/openai_functions/structured_output.cjs +1 -1
  59. package/dist/chains/openai_functions/structured_output.js +1 -1
  60. package/dist/chains/retrieval.cjs +1 -1
  61. package/dist/chains/retrieval.d.ts +1 -1
  62. package/dist/chains/retrieval.js +1 -1
  63. package/dist/chains/router/multi_prompt.cjs +16 -13
  64. package/dist/chains/router/multi_prompt.d.ts +16 -13
  65. package/dist/chains/router/multi_prompt.js +16 -13
  66. package/dist/chains/router/multi_retrieval_qa.cjs +1 -1
  67. package/dist/chains/router/multi_retrieval_qa.d.ts +1 -1
  68. package/dist/chains/router/multi_retrieval_qa.js +1 -1
  69. package/dist/chains/sequential_chain.cjs +2 -2
  70. package/dist/chains/sequential_chain.d.ts +2 -2
  71. package/dist/chains/sequential_chain.js +2 -2
  72. package/dist/chains/sql_db/sql_db_chain.cjs +1 -1
  73. package/dist/chains/sql_db/sql_db_chain.d.ts +1 -1
  74. package/dist/chains/sql_db/sql_db_chain.js +1 -1
  75. package/dist/chat_models/universal.cjs +1 -1
  76. package/dist/chat_models/universal.js +1 -1
  77. package/dist/document_transformers/openai_functions.cjs +1 -1
  78. package/dist/document_transformers/openai_functions.js +1 -1
  79. package/dist/evaluation/loader.cjs +1 -1
  80. package/dist/evaluation/loader.js +1 -1
  81. package/dist/experimental/autogpt/agent.cjs +1 -1
  82. package/dist/experimental/autogpt/agent.d.ts +1 -1
  83. package/dist/experimental/autogpt/agent.js +1 -1
  84. package/dist/experimental/generative_agents/generative_agent.cjs +1 -1
  85. package/dist/experimental/generative_agents/generative_agent.d.ts +1 -1
  86. package/dist/experimental/generative_agents/generative_agent.js +1 -1
  87. package/dist/hub/base.cjs +19 -0
  88. package/dist/hub/base.d.ts +1 -0
  89. package/dist/hub/base.js +18 -0
  90. package/dist/hub/index.cjs +1 -1
  91. package/dist/hub/index.js +2 -2
  92. package/dist/hub/node.cjs +1 -1
  93. package/dist/hub/node.js +2 -2
  94. package/dist/memory/buffer_memory.cjs +1 -1
  95. package/dist/memory/buffer_memory.d.ts +1 -1
  96. package/dist/memory/buffer_memory.js +1 -1
  97. package/dist/memory/buffer_token_memory.cjs +1 -1
  98. package/dist/memory/buffer_token_memory.d.ts +1 -1
  99. package/dist/memory/buffer_token_memory.js +1 -1
  100. package/dist/memory/buffer_window_memory.cjs +1 -1
  101. package/dist/memory/buffer_window_memory.d.ts +1 -1
  102. package/dist/memory/buffer_window_memory.js +1 -1
  103. package/dist/memory/entity_memory.cjs +2 -2
  104. package/dist/memory/entity_memory.d.ts +2 -2
  105. package/dist/memory/entity_memory.js +2 -2
  106. package/dist/memory/summary.cjs +2 -2
  107. package/dist/memory/summary.d.ts +2 -2
  108. package/dist/memory/summary.js +2 -2
  109. package/dist/memory/summary_buffer.cjs +2 -2
  110. package/dist/memory/summary_buffer.d.ts +2 -2
  111. package/dist/memory/summary_buffer.js +2 -2
  112. package/dist/retrievers/hyde.cjs +1 -1
  113. package/dist/retrievers/hyde.d.ts +1 -1
  114. package/dist/retrievers/hyde.js +1 -1
  115. package/dist/retrievers/self_query/index.cjs +1 -1
  116. package/dist/retrievers/self_query/index.d.ts +1 -1
  117. package/dist/retrievers/self_query/index.js +1 -1
  118. package/dist/smith/runner_utils.cjs +2 -1
  119. package/dist/smith/runner_utils.js +2 -1
  120. package/dist/tools/webbrowser.cjs +1 -1
  121. package/dist/tools/webbrowser.d.ts +1 -1
  122. package/dist/tools/webbrowser.js +1 -1
  123. package/dist/util/sql_utils.cjs +1 -0
  124. package/dist/util/sql_utils.js +1 -0
  125. package/package.json +4 -4
@@ -31,7 +31,7 @@ export interface FromLLMInput {
31
31
  * @example
32
32
  * ```typescript
33
33
  * const chain = new GraphCypherQAChain({
34
- * llm: new ChatOpenAI({ temperature: 0 }),
34
+ * llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 }),
35
35
  * graph: new Neo4jGraph(),
36
36
  * });
37
37
  * const res = await chain.invoke("Who played in Pulp Fiction?");
@@ -13,7 +13,7 @@ export const INTERMEDIATE_STEPS_KEY = "intermediateSteps";
13
13
  * @example
14
14
  * ```typescript
15
15
  * const chain = new GraphCypherQAChain({
16
- * llm: new ChatOpenAI({ temperature: 0 }),
16
+ * llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 }),
17
17
  * graph: new Neo4jGraph(),
18
18
  * });
19
19
  * const res = await chain.invoke("Who played in Pulp Fiction?");
@@ -21,7 +21,7 @@ const output_parsers_1 = require("@langchain/core/output_parsers");
21
21
  * import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
22
22
  *
23
23
  * const rephrasePrompt = await pull("langchain-ai/chat-langchain-rephrase");
24
- * const llm = new ChatOpenAI({});
24
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini" });
25
25
  * const retriever = ...
26
26
  * const chain = await createHistoryAwareRetriever({
27
27
  * llm,
@@ -38,7 +38,7 @@ export type CreateHistoryAwareRetrieverParams = {
38
38
  * import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
39
39
  *
40
40
  * const rephrasePrompt = await pull("langchain-ai/chat-langchain-rephrase");
41
- * const llm = new ChatOpenAI({});
41
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini" });
42
42
  * const retriever = ...
43
43
  * const chain = await createHistoryAwareRetriever({
44
44
  * llm,
@@ -18,7 +18,7 @@ import { StringOutputParser } from "@langchain/core/output_parsers";
18
18
  * import { createHistoryAwareRetriever } from "langchain/chains/history_aware_retriever";
19
19
  *
20
20
  * const rephrasePrompt = await pull("langchain-ai/chat-langchain-rephrase");
21
- * const llm = new ChatOpenAI({});
21
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini" });
22
22
  * const retriever = ...
23
23
  * const chain = await createHistoryAwareRetriever({
24
24
  * llm,
@@ -40,7 +40,7 @@ function _getLanguageModel(llmLike) {
40
40
  * import { ChatOpenAI } from "@langchain/openai";
41
41
  *
42
42
  * const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
43
- * const llm = new ChatOpenAI();
43
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini" });
44
44
  * const chain = prompt.pipe(llm);
45
45
  *
46
46
  * const response = await chain.invoke({ adjective: "funny" });
@@ -40,7 +40,7 @@ export interface LLMChainInput<T extends string | object = string, Model extends
40
40
  * import { ChatOpenAI } from "@langchain/openai";
41
41
  *
42
42
  * const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
43
- * const llm = new ChatOpenAI();
43
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini" });
44
44
  * const chain = prompt.pipe(llm);
45
45
  *
46
46
  * const response = await chain.invoke({ adjective: "funny" });
@@ -37,7 +37,7 @@ function _getLanguageModel(llmLike) {
37
37
  * import { ChatOpenAI } from "@langchain/openai";
38
38
  *
39
39
  * const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
40
- * const llm = new ChatOpenAI();
40
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini" });
41
41
  * const chain = prompt.pipe(llm);
42
42
  *
43
43
  * const response = await chain.invoke({ adjective: "funny" });
@@ -98,7 +98,7 @@ function createOpenAIFnRunnable(config) {
98
98
  * required: ["name", "age"],
99
99
  * };
100
100
  *
101
- * const model = new ChatOpenAI();
101
+ * const model = new ChatOpenAI({ model: "gpt-4o-mini" });
102
102
  * const prompt = ChatPromptTemplate.fromMessages([
103
103
  * ["human", "Human description: {description}"],
104
104
  * ]);
@@ -126,7 +126,7 @@ export type CreateStructuredOutputRunnableConfig<RunInput extends Record<string,
126
126
  * required: ["name", "age"],
127
127
  * };
128
128
  *
129
- * const model = new ChatOpenAI();
129
+ * const model = new ChatOpenAI({ model: "gpt-4o-mini" });
130
130
  * const prompt = ChatPromptTemplate.fromMessages([
131
131
  * ["human", "Human description: {description}"],
132
132
  * ]);
@@ -94,7 +94,7 @@ export function createOpenAIFnRunnable(config) {
94
94
  * required: ["name", "age"],
95
95
  * };
96
96
  *
97
- * const model = new ChatOpenAI();
97
+ * const model = new ChatOpenAI({ model: "gpt-4o-mini" });
98
98
  * const prompt = ChatPromptTemplate.fromMessages([
99
99
  * ["human", "Human description: {description}"],
100
100
  * ]);
@@ -111,7 +111,7 @@ exports.FunctionCallStructuredOutputParser = FunctionCallStructuredOutputParser;
111
111
  * @returns OpenAPIChain
112
112
  */
113
113
  function createStructuredOutputChain(input) {
114
- const { outputSchema, llm = new openai_1.ChatOpenAI({ modelName: "gpt-3.5-turbo-0613", temperature: 0 }), outputKey = "output", llmKwargs = {}, zodSchema, ...rest } = input;
114
+ const { outputSchema, llm = new openai_1.ChatOpenAI({ model: "gpt-3.5-turbo-0613", temperature: 0 }), outputKey = "output", llmKwargs = {}, zodSchema, ...rest } = input;
115
115
  if (outputSchema === undefined && zodSchema === undefined) {
116
116
  throw new Error(`Must provide one of "outputSchema" or "zodSchema".`);
117
117
  }
@@ -105,7 +105,7 @@ export class FunctionCallStructuredOutputParser extends BaseLLMOutputParser {
105
105
  * @returns OpenAPIChain
106
106
  */
107
107
  export function createStructuredOutputChain(input) {
108
- const { outputSchema, llm = new ChatOpenAI({ modelName: "gpt-3.5-turbo-0613", temperature: 0 }), outputKey = "output", llmKwargs = {}, zodSchema, ...rest } = input;
108
+ const { outputSchema, llm = new ChatOpenAI({ model: "gpt-3.5-turbo-0613", temperature: 0 }), outputKey = "output", llmKwargs = {}, zodSchema, ...rest } = input;
109
109
  if (outputSchema === undefined && zodSchema === undefined) {
110
110
  throw new Error(`Must provide one of "outputSchema" or "zodSchema".`);
111
111
  }
@@ -22,7 +22,7 @@ function isBaseRetriever(x) {
22
22
  * import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
23
23
  *
24
24
  * const retrievalQAChatPrompt = await pull("langchain-ai/retrieval-qa-chat");
25
- * const llm = new ChatOpenAI({});
25
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini" });
26
26
  * const retriever = ...
27
27
  * const combineDocsChain = await createStuffDocumentsChain(...);
28
28
  * const retrievalChain = await createRetrievalChain({
@@ -42,7 +42,7 @@ export type CreateRetrievalChainParams<RunOutput> = {
42
42
  * import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
43
43
  *
44
44
  * const retrievalQAChatPrompt = await pull("langchain-ai/retrieval-qa-chat");
45
- * const llm = new ChatOpenAI({});
45
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini" });
46
46
  * const retriever = ...
47
47
  * const combineDocsChain = await createStuffDocumentsChain(...);
48
48
  * const retrievalChain = await createRetrievalChain({
@@ -19,7 +19,7 @@ function isBaseRetriever(x) {
19
19
  * import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
20
20
  *
21
21
  * const retrievalQAChatPrompt = await pull("langchain-ai/retrieval-qa-chat");
22
- * const llm = new ChatOpenAI({});
22
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini" });
23
23
  * const retriever = ...
24
24
  * const combineDocsChain = await createStuffDocumentsChain(...);
25
25
  * const retrievalChain = await createRetrievalChain({
@@ -16,19 +16,22 @@ const router_js_1 = require("../../output_parsers/router.cjs");
16
16
  * functionality specific to multi-prompt chains.
17
17
  * @example
18
18
  * ```typescript
19
- * const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(new ChatOpenAI(), {
20
- * promptNames: ["physics", "math", "history"],
21
- * promptDescriptions: [
22
- * "Good for answering questions about physics",
23
- * "Good for answering math questions",
24
- * "Good for answering questions about history",
25
- * ],
26
- * promptTemplates: [
27
- * `You are a very smart physics professor. Here is a question:\n{input}\n`,
28
- * `You are a very good mathematician. Here is a question:\n{input}\n`,
29
- * `You are a very smart history professor. Here is a question:\n{input}\n`,
30
- * ],
31
- * });
19
+ * const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(
20
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
21
+ * {
22
+ * promptNames: ["physics", "math", "history"],
23
+ * promptDescriptions: [
24
+ * "Good for answering questions about physics",
25
+ * "Good for answering math questions",
26
+ * "Good for answering questions about history",
27
+ * ],
28
+ * promptTemplates: [
29
+ * `You are a very smart physics professor. Here is a question:\n{input}\n`,
30
+ * `You are a very good mathematician. Here is a question:\n{input}\n`,
31
+ * `You are a very smart history professor. Here is a question:\n{input}\n`,
32
+ * ],
33
+ * }
34
+ * );
32
35
  * const result = await multiPromptChain.call({
33
36
  * input: "What is the speed of light?",
34
37
  * });
@@ -9,19 +9,22 @@ import { LLMChainInput } from "../../chains/llm_chain.js";
9
9
  * functionality specific to multi-prompt chains.
10
10
  * @example
11
11
  * ```typescript
12
- * const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(new ChatOpenAI(), {
13
- * promptNames: ["physics", "math", "history"],
14
- * promptDescriptions: [
15
- * "Good for answering questions about physics",
16
- * "Good for answering math questions",
17
- * "Good for answering questions about history",
18
- * ],
19
- * promptTemplates: [
20
- * `You are a very smart physics professor. Here is a question:\n{input}\n`,
21
- * `You are a very good mathematician. Here is a question:\n{input}\n`,
22
- * `You are a very smart history professor. Here is a question:\n{input}\n`,
23
- * ],
24
- * });
12
+ * const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(
13
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
14
+ * {
15
+ * promptNames: ["physics", "math", "history"],
16
+ * promptDescriptions: [
17
+ * "Good for answering questions about physics",
18
+ * "Good for answering math questions",
19
+ * "Good for answering questions about history",
20
+ * ],
21
+ * promptTemplates: [
22
+ * `You are a very smart physics professor. Here is a question:\n{input}\n`,
23
+ * `You are a very good mathematician. Here is a question:\n{input}\n`,
24
+ * `You are a very smart history professor. Here is a question:\n{input}\n`,
25
+ * ],
26
+ * }
27
+ * );
25
28
  * const result = await multiPromptChain.call({
26
29
  * input: "What is the speed of light?",
27
30
  * });
@@ -13,19 +13,22 @@ import { RouterOutputParser } from "../../output_parsers/router.js";
13
13
  * functionality specific to multi-prompt chains.
14
14
  * @example
15
15
  * ```typescript
16
- * const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(new ChatOpenAI(), {
17
- * promptNames: ["physics", "math", "history"],
18
- * promptDescriptions: [
19
- * "Good for answering questions about physics",
20
- * "Good for answering math questions",
21
- * "Good for answering questions about history",
22
- * ],
23
- * promptTemplates: [
24
- * `You are a very smart physics professor. Here is a question:\n{input}\n`,
25
- * `You are a very good mathematician. Here is a question:\n{input}\n`,
26
- * `You are a very smart history professor. Here is a question:\n{input}\n`,
27
- * ],
28
- * });
16
+ * const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(
17
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
18
+ * {
19
+ * promptNames: ["physics", "math", "history"],
20
+ * promptDescriptions: [
21
+ * "Good for answering questions about physics",
22
+ * "Good for answering math questions",
23
+ * "Good for answering questions about history",
24
+ * ],
25
+ * promptTemplates: [
26
+ * `You are a very smart physics professor. Here is a question:\n{input}\n`,
27
+ * `You are a very good mathematician. Here is a question:\n{input}\n`,
28
+ * `You are a very smart history professor. Here is a question:\n{input}\n`,
29
+ * ],
30
+ * }
31
+ * );
29
32
  * const result = await multiPromptChain.call({
30
33
  * input: "What is the speed of light?",
31
34
  * });
@@ -18,7 +18,7 @@ const router_js_1 = require("../../output_parsers/router.cjs");
18
18
  * @example
19
19
  * ```typescript
20
20
  * const multiRetrievalQAChain = MultiRetrievalQAChain.fromLLMAndRetrievers(
21
- * new ChatOpenAI(),
21
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
22
22
  * {
23
23
  * retrieverNames: ["aqua teen", "mst3k", "animaniacs"],
24
24
  * retrieverDescriptions: [
@@ -22,7 +22,7 @@ export type MultiRetrievalDefaults = {
22
22
  * @example
23
23
  * ```typescript
24
24
  * const multiRetrievalQAChain = MultiRetrievalQAChain.fromLLMAndRetrievers(
25
- * new ChatOpenAI(),
25
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
26
26
  * {
27
27
  * retrieverNames: ["aqua teen", "mst3k", "animaniacs"],
28
28
  * retrieverDescriptions: [
@@ -15,7 +15,7 @@ import { RouterOutputParser } from "../../output_parsers/router.js";
15
15
  * @example
16
16
  * ```typescript
17
17
  * const multiRetrievalQAChain = MultiRetrievalQAChain.fromLLMAndRetrievers(
18
- * new ChatOpenAI(),
18
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
19
19
  * {
20
20
  * retrieverNames: ["aqua teen", "mst3k", "animaniacs"],
21
21
  * retrieverDescriptions: [
@@ -32,12 +32,12 @@ function formatSet(input) {
32
32
  * const overallChain = new SequentialChain({
33
33
  * chains: [
34
34
  * new LLMChain({
35
- * llm: new ChatOpenAI({ temperature: 0 }),
35
+ * llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 }),
36
36
  * prompt: promptTemplate,
37
37
  * outputKey: "synopsis",
38
38
  * }),
39
39
  * new LLMChain({
40
- * llm: new OpenAI({ temperature: 0 }),
40
+ * llm: new OpenAI({ model: "gpt-4o-mini", temperature: 0 }),
41
41
  * prompt: reviewPromptTemplate,
42
42
  * outputKey: "review",
43
43
  * }),
@@ -43,12 +43,12 @@ export interface SequentialChainInput extends ChainInputs {
43
43
  * const overallChain = new SequentialChain({
44
44
  * chains: [
45
45
  * new LLMChain({
46
- * llm: new ChatOpenAI({ temperature: 0 }),
46
+ * llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 }),
47
47
  * prompt: promptTemplate,
48
48
  * outputKey: "synopsis",
49
49
  * }),
50
50
  * new LLMChain({
51
- * llm: new OpenAI({ temperature: 0 }),
51
+ * llm: new OpenAI({ model: "gpt-4o-mini", temperature: 0 }),
52
52
  * prompt: reviewPromptTemplate,
53
53
  * outputKey: "review",
54
54
  * }),
@@ -29,12 +29,12 @@ function formatSet(input) {
29
29
  * const overallChain = new SequentialChain({
30
30
  * chains: [
31
31
  * new LLMChain({
32
- * llm: new ChatOpenAI({ temperature: 0 }),
32
+ * llm: new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 }),
33
33
  * prompt: promptTemplate,
34
34
  * outputKey: "synopsis",
35
35
  * }),
36
36
  * new LLMChain({
37
- * llm: new OpenAI({ temperature: 0 }),
37
+ * llm: new OpenAI({ model: "gpt-4o-mini", temperature: 0 }),
38
38
  * prompt: reviewPromptTemplate,
39
39
  * outputKey: "review",
40
40
  * }),
@@ -218,7 +218,7 @@ const difference = (setA, setB) => new Set([...setA].filter((x) => !setB.has(x))
218
218
  * const db = await SqlDatabase.fromDataSourceParams({
219
219
  * appDataSource: datasource,
220
220
  * });
221
- * const llm = new ChatOpenAI({ temperature: 0 });
221
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 });
222
222
  * const chain = await createSqlQueryChain({
223
223
  * llm,
224
224
  * db,
@@ -104,7 +104,7 @@ export interface CreateSqlQueryChainFields {
104
104
  * const db = await SqlDatabase.fromDataSourceParams({
105
105
  * appDataSource: datasource,
106
106
  * });
107
- * const llm = new ChatOpenAI({ temperature: 0 });
107
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 });
108
108
  * const chain = await createSqlQueryChain({
109
109
  * llm,
110
110
  * db,
@@ -213,7 +213,7 @@ const difference = (setA, setB) => new Set([...setA].filter((x) => !setB.has(x))
213
213
  * const db = await SqlDatabase.fromDataSourceParams({
214
214
  * appDataSource: datasource,
215
215
  * });
216
- * const llm = new ChatOpenAI({ temperature: 0 });
216
+ * const llm = new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 });
217
217
  * const chain = await createSqlQueryChain({
218
218
  * llm,
219
219
  * db,
@@ -616,7 +616,7 @@ exports.ConfigurableModel = ConfigurableModel;
616
616
  * This function initializes a ChatModel based on the provided model name and provider.
617
617
  * It supports various model providers and allows for runtime configuration of model parameters.
618
618
  *
619
- * Security Note: Setting `configurableFields` to "any" means fields like api_key, base_url, etc.
619
+ * Security Note: Setting `configurableFields` to "any" means fields like apiKey, baseUrl, etc.
620
620
  * can be altered at runtime, potentially redirecting model requests to a different service/user.
621
621
  * Make sure that if you're accepting untrusted configurations, you enumerate the
622
622
  * `configurableFields` explicitly.
@@ -577,7 +577,7 @@ export class ConfigurableModel extends BaseChatModel {
577
577
  * This function initializes a ChatModel based on the provided model name and provider.
578
578
  * It supports various model providers and allows for runtime configuration of model parameters.
579
579
  *
580
- * Security Note: Setting `configurableFields` to "any" means fields like api_key, base_url, etc.
580
+ * Security Note: Setting `configurableFields` to "any" means fields like apiKey, baseUrl, etc.
581
581
  * can be altered at runtime, potentially redirecting model requests to a different service/user.
582
582
  * Make sure that if you're accepting untrusted configurations, you enumerate the
583
583
  * `configurableFields` explicitly.
@@ -43,7 +43,7 @@ class MetadataTagger extends documents_1.MappingDocumentTransformer {
43
43
  }
44
44
  exports.MetadataTagger = MetadataTagger;
45
45
  function createMetadataTagger(schema, options) {
46
- const { llm = new openai_1.ChatOpenAI({ modelName: "gpt-3.5-turbo-0613" }), ...rest } = options;
46
+ const { llm = new openai_1.ChatOpenAI({ model: "gpt-3.5-turbo-0613" }), ...rest } = options;
47
47
  const taggingChain = (0, index_js_1.createTaggingChain)(schema, llm, rest);
48
48
  return new MetadataTagger({ taggingChain });
49
49
  }
@@ -37,7 +37,7 @@ export class MetadataTagger extends MappingDocumentTransformer {
37
37
  }
38
38
  }
39
39
  export function createMetadataTagger(schema, options) {
40
- const { llm = new ChatOpenAI({ modelName: "gpt-3.5-turbo-0613" }), ...rest } = options;
40
+ const { llm = new ChatOpenAI({ model: "gpt-3.5-turbo-0613" }), ...rest } = options;
41
41
  const taggingChain = createTaggingChain(schema, llm, rest);
42
42
  return new MetadataTagger({ taggingChain });
43
43
  }
@@ -19,7 +19,7 @@ async function loadEvaluator(type, options) {
19
19
  const { llm, chainOptions, criteria, agentTools } = options || {};
20
20
  const llm_ = llm ??
21
21
  new openai_1.ChatOpenAI({
22
- modelName: "gpt-4",
22
+ model: "gpt-4",
23
23
  temperature: 0.0,
24
24
  });
25
25
  let evaluator;
@@ -16,7 +16,7 @@ export async function loadEvaluator(type, options) {
16
16
  const { llm, chainOptions, criteria, agentTools } = options || {};
17
17
  const llm_ = llm ??
18
18
  new ChatOpenAI({
19
- modelName: "gpt-4",
19
+ model: "gpt-4",
20
20
  temperature: 0.0,
21
21
  });
22
22
  let evaluator;
@@ -16,7 +16,7 @@ const text_splitter_js_1 = require("../../text_splitter.cjs");
16
16
  * @example
17
17
  * ```typescript
18
18
  * const autogpt = AutoGPT.fromLLMAndTools(
19
- * new ChatOpenAI({ temperature: 0 }),
19
+ * new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 }),
20
20
  * [
21
21
  * new ReadFileTool({ store: new InMemoryFileStore() }),
22
22
  * new WriteFileTool({ store: new InMemoryFileStore() }),
@@ -24,7 +24,7 @@ export interface AutoGPTInput {
24
24
  * @example
25
25
  * ```typescript
26
26
  * const autogpt = AutoGPT.fromLLMAndTools(
27
- * new ChatOpenAI({ temperature: 0 }),
27
+ * new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 }),
28
28
  * [
29
29
  * new ReadFileTool({ store: new InMemoryFileStore() }),
30
30
  * new WriteFileTool({ store: new InMemoryFileStore() }),
@@ -13,7 +13,7 @@ import { TokenTextSplitter } from "../../text_splitter.js";
13
13
  * @example
14
14
  * ```typescript
15
15
  * const autogpt = AutoGPT.fromLLMAndTools(
16
- * new ChatOpenAI({ temperature: 0 }),
16
+ * new ChatOpenAI({ model: "gpt-4o-mini", temperature: 0 }),
17
17
  * [
18
18
  * new ReadFileTool({ store: new InMemoryFileStore() }),
19
19
  * new WriteFileTool({ store: new InMemoryFileStore() }),
@@ -13,7 +13,7 @@ const base_js_1 = require("../../chains/base.cjs");
13
13
  * const tommie: GenerativeAgent = new GenerativeAgent(
14
14
  * new OpenAI({ temperature: 0.9, maxTokens: 1500 }),
15
15
  * new GenerativeAgentMemory(
16
- * new ChatOpenAI(),
16
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
17
17
  * new TimeWeightedVectorStoreRetriever({
18
18
  * vectorStore: new MemoryVectorStore(new OpenAIEmbeddings()),
19
19
  * otherScoreKeys: ["importance"],
@@ -27,7 +27,7 @@ export type GenerativeAgentConfig = {
27
27
  * const tommie: GenerativeAgent = new GenerativeAgent(
28
28
  * new OpenAI({ temperature: 0.9, maxTokens: 1500 }),
29
29
  * new GenerativeAgentMemory(
30
- * new ChatOpenAI(),
30
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
31
31
  * new TimeWeightedVectorStoreRetriever({
32
32
  * vectorStore: new MemoryVectorStore(new OpenAIEmbeddings()),
33
33
  * otherScoreKeys: ["importance"],
@@ -10,7 +10,7 @@ import { BaseChain } from "../../chains/base.js";
10
10
  * const tommie: GenerativeAgent = new GenerativeAgent(
11
11
  * new OpenAI({ temperature: 0.9, maxTokens: 1500 }),
12
12
  * new GenerativeAgentMemory(
13
- * new ChatOpenAI(),
13
+ * new ChatOpenAI({ model: "gpt-4o-mini" }),
14
14
  * new TimeWeightedVectorStoreRetriever({
15
15
  * vectorStore: new MemoryVectorStore(new OpenAIEmbeddings()),
16
16
  * otherScoreKeys: ["importance"],
package/dist/hub/base.cjs CHANGED
@@ -4,6 +4,7 @@ exports.basePush = basePush;
4
4
  exports.basePull = basePull;
5
5
  exports.generateModelImportMap = generateModelImportMap;
6
6
  exports.generateOptionalImportMap = generateOptionalImportMap;
7
+ exports.bindOutputSchema = bindOutputSchema;
7
8
  const langsmith_1 = require("langsmith");
8
9
  /**
9
10
  * Push a prompt to the hub.
@@ -138,3 +139,21 @@ modelClass) {
138
139
  }
139
140
  return optionalImportMap;
140
141
  }
142
+ function bindOutputSchema(loadedSequence) {
143
+ if ("first" in loadedSequence &&
144
+ loadedSequence.first !== null &&
145
+ typeof loadedSequence.first === "object" &&
146
+ "schema" in loadedSequence.first &&
147
+ "last" in loadedSequence &&
148
+ loadedSequence.last !== null &&
149
+ typeof loadedSequence.last === "object" &&
150
+ "bound" in loadedSequence.last &&
151
+ loadedSequence.last.bound !== null &&
152
+ typeof loadedSequence.last.bound === "object" &&
153
+ "withStructuredOutput" in loadedSequence.last.bound &&
154
+ typeof loadedSequence.last.bound.withStructuredOutput === "function") {
155
+ // eslint-disable-next-line no-param-reassign
156
+ loadedSequence.last.bound = loadedSequence.last.bound.withStructuredOutput(loadedSequence.first.schema);
157
+ }
158
+ return loadedSequence;
159
+ }
@@ -28,3 +28,4 @@ export declare function basePull(ownerRepoCommit: string, options?: {
28
28
  }): Promise<import("langsmith/schemas").PromptCommit>;
29
29
  export declare function generateModelImportMap(modelClass?: new (...args: any[]) => BaseLanguageModel): Record<string, any>;
30
30
  export declare function generateOptionalImportMap(modelClass?: new (...args: any[]) => BaseLanguageModel): Record<string, any>;
31
+ export declare function bindOutputSchema<T extends Runnable>(loadedSequence: T): T;
package/dist/hub/base.js CHANGED
@@ -132,3 +132,21 @@ modelClass) {
132
132
  }
133
133
  return optionalImportMap;
134
134
  }
135
+ export function bindOutputSchema(loadedSequence) {
136
+ if ("first" in loadedSequence &&
137
+ loadedSequence.first !== null &&
138
+ typeof loadedSequence.first === "object" &&
139
+ "schema" in loadedSequence.first &&
140
+ "last" in loadedSequence &&
141
+ loadedSequence.last !== null &&
142
+ typeof loadedSequence.last === "object" &&
143
+ "bound" in loadedSequence.last &&
144
+ loadedSequence.last.bound !== null &&
145
+ typeof loadedSequence.last.bound === "object" &&
146
+ "withStructuredOutput" in loadedSequence.last.bound &&
147
+ typeof loadedSequence.last.bound.withStructuredOutput === "function") {
148
+ // eslint-disable-next-line no-param-reassign
149
+ loadedSequence.last.bound = loadedSequence.last.bound.withStructuredOutput(loadedSequence.first.schema);
150
+ }
151
+ return loadedSequence;
152
+ }
@@ -25,7 +25,7 @@ async function pull(ownerRepoCommit, options) {
25
25
  const promptObject = await (0, base_js_1.basePull)(ownerRepoCommit, options);
26
26
  try {
27
27
  const loadedPrompt = await (0, index_js_1.load)(JSON.stringify(promptObject.manifest), undefined, (0, base_js_1.generateOptionalImportMap)(options?.modelClass), (0, base_js_1.generateModelImportMap)(options?.modelClass));
28
- return loadedPrompt;
28
+ return (0, base_js_1.bindOutputSchema)(loadedPrompt);
29
29
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
30
30
  }
31
31
  catch (e) {
package/dist/hub/index.js CHANGED
@@ -1,5 +1,5 @@
1
1
  import { load } from "../load/index.js";
2
- import { basePush, basePull, generateModelImportMap, generateOptionalImportMap, } from "./base.js";
2
+ import { basePush, basePull, generateModelImportMap, generateOptionalImportMap, bindOutputSchema, } from "./base.js";
3
3
  export { basePush as push };
4
4
  /**
5
5
  * Pull a prompt from the hub.
@@ -21,7 +21,7 @@ export async function pull(ownerRepoCommit, options) {
21
21
  const promptObject = await basePull(ownerRepoCommit, options);
22
22
  try {
23
23
  const loadedPrompt = await load(JSON.stringify(promptObject.manifest), undefined, generateOptionalImportMap(options?.modelClass), generateModelImportMap(options?.modelClass));
24
- return loadedPrompt;
24
+ return bindOutputSchema(loadedPrompt);
25
25
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
26
26
  }
27
27
  catch (e) {
package/dist/hub/node.cjs CHANGED
@@ -85,5 +85,5 @@ async function pull(ownerRepoCommit, options) {
85
85
  }
86
86
  }
87
87
  const loadedPrompt = await (0, index_js_1.load)(JSON.stringify(promptObject.manifest), undefined, (0, base_js_1.generateOptionalImportMap)(modelClass), (0, base_js_1.generateModelImportMap)(modelClass));
88
- return loadedPrompt;
88
+ return (0, base_js_1.bindOutputSchema)(loadedPrompt);
89
89
  }
package/dist/hub/node.js CHANGED
@@ -1,4 +1,4 @@
1
- import { basePush, basePull, generateModelImportMap, generateOptionalImportMap, } from "./base.js";
1
+ import { basePush, basePull, generateModelImportMap, generateOptionalImportMap, bindOutputSchema, } from "./base.js";
2
2
  import { load } from "../load/index.js";
3
3
  // TODO: Make this the default, add web entrypoint in next breaking release
4
4
  export { basePush as push };
@@ -49,5 +49,5 @@ export async function pull(ownerRepoCommit, options) {
49
49
  }
50
50
  }
51
51
  const loadedPrompt = await load(JSON.stringify(promptObject.manifest), undefined, generateOptionalImportMap(modelClass), generateModelImportMap(modelClass));
52
- return loadedPrompt;
52
+ return bindOutputSchema(loadedPrompt);
53
53
  }