langchain 1.0.0-alpha.1 → 1.0.0-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/dist/agents/ReactAgent.cjs +1 -1
  2. package/dist/agents/ReactAgent.cjs.map +1 -1
  3. package/dist/agents/ReactAgent.d.cts.map +1 -1
  4. package/dist/agents/ReactAgent.d.ts.map +1 -1
  5. package/dist/agents/ReactAgent.js +2 -2
  6. package/dist/agents/ReactAgent.js.map +1 -1
  7. package/dist/agents/annotation.cjs +120 -0
  8. package/dist/agents/annotation.cjs.map +1 -1
  9. package/dist/agents/annotation.d.cts +15 -11
  10. package/dist/agents/annotation.d.cts.map +1 -1
  11. package/dist/agents/annotation.d.ts +15 -11
  12. package/dist/agents/annotation.d.ts.map +1 -1
  13. package/dist/agents/annotation.js +120 -1
  14. package/dist/agents/annotation.js.map +1 -1
  15. package/dist/agents/index.d.cts +1 -1
  16. package/dist/agents/index.d.ts +1 -1
  17. package/dist/agents/nodes/AgentNode.cjs +8 -3
  18. package/dist/agents/nodes/AgentNode.cjs.map +1 -1
  19. package/dist/agents/nodes/AgentNode.js +8 -3
  20. package/dist/agents/nodes/AgentNode.js.map +1 -1
  21. package/dist/agents/nodes/ToolNode.cjs +1 -1
  22. package/dist/agents/nodes/ToolNode.cjs.map +1 -1
  23. package/dist/agents/nodes/ToolNode.js +1 -1
  24. package/dist/agents/nodes/ToolNode.js.map +1 -1
  25. package/dist/agents/responses.cjs +1 -2
  26. package/dist/agents/responses.cjs.map +1 -1
  27. package/dist/agents/responses.d.cts +5 -0
  28. package/dist/agents/responses.d.cts.map +1 -1
  29. package/dist/agents/responses.d.ts +5 -0
  30. package/dist/agents/responses.d.ts.map +1 -1
  31. package/dist/agents/responses.js +1 -2
  32. package/dist/agents/responses.js.map +1 -1
  33. package/dist/agents/types.d.cts +21 -3
  34. package/dist/agents/types.d.cts.map +1 -1
  35. package/dist/agents/types.d.ts +21 -3
  36. package/dist/agents/types.d.ts.map +1 -1
  37. package/dist/evaluation/comparison/pairwise.d.ts.map +1 -1
  38. package/dist/evaluation/criteria/criteria.d.ts.map +1 -1
  39. package/dist/hub/base.cjs.map +1 -1
  40. package/dist/hub/base.js.map +1 -1
  41. package/dist/index.cjs +50 -1
  42. package/dist/index.cjs.map +1 -1
  43. package/dist/index.d.cts +3 -3
  44. package/dist/index.d.ts +3 -3
  45. package/dist/index.js +10 -3
  46. package/dist/index.js.map +1 -1
  47. package/package.json +26 -25
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","names":["InteropZodObject","InteropZodType","LangGraphRunnableConfig","START","Runtime","StateGraph","LanguageModelLike","BaseChatModel","SystemMessage","BaseMessageLike","BaseMessage","All","BaseCheckpointSaver","BaseStore","DynamicTool","StructuredToolInterface","Runnable","RunnableLike","RunnableToolLike","ToolNode","PreHookAnnotation","AnyAnnotationRoot","ToAnnotationRoot","ResponseFormat","ToolStrategy","TypedToolStrategy","ProviderStrategy","ResponseFormatUndefined","JsonSchemaFormat","META_EXTRAS_DESCRIPTION_PREFIX","N","ExecutedToolCall","Record","LLMCall","ExtractZodType","T","U","ExtractZodArrayTypes","Rest","A","InferResponseFormatType","ReducedZodChannel","TReducerSchema","ServerTool","ClientTool","Prompt","StateSchema","ContextSchema","Promise","AgentState","AnnotationRoot","AgentRuntime","CreateReactAgentParams","StructuredResponseType","DynamicLLMFunction","ResponseFormatType","AbortSignal","ConfigurableModelInterface","InternalAgentState","WithStateGraphNodes","Graph","SD","S","K","I","O","C"],"sources":["../../src/agents/types.d.ts"],"sourcesContent":["import type { InteropZodObject, InteropZodType } from \"@langchain/core/utils/types\";\nimport type { LangGraphRunnableConfig, START, Runtime, StateGraph } from \"@langchain/langgraph\";\nimport type { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport type { SystemMessage, BaseMessageLike, BaseMessage } from \"@langchain/core/messages\";\nimport type { All, BaseCheckpointSaver, BaseStore } from \"@langchain/langgraph-checkpoint\";\nimport type { DynamicTool, StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { Runnable, RunnableLike, RunnableToolLike } from \"@langchain/core/runnables\";\nimport type { ToolNode } from \"./nodes/ToolNode.js\";\nimport type { PreHookAnnotation, AnyAnnotationRoot, ToAnnotationRoot } from \"./annotation.js\";\nimport type { ResponseFormat, ToolStrategy, TypedToolStrategy, ProviderStrategy, ResponseFormatUndefined, JsonSchemaFormat } from \"./responses.js\";\nexport declare const META_EXTRAS_DESCRIPTION_PREFIX = \"lg:\";\nexport type N = typeof START | \"agent\" | \"tools\";\n/**\n * Information about a tool call that has been executed.\n */\nexport interface ExecutedToolCall {\n /**\n * The name of the tool that was called.\n */\n name: string;\n /**\n * The arguments that were passed to the tool.\n */\n args: Record<string, unknown>;\n /**\n * The ID of the tool call.\n */\n tool_id: string;\n /**\n * The result of the tool call (if available).\n */\n result?: unknown;\n}\n/**\n * Information about an LLM invocation.\n */\nexport interface LLMCall {\n /**\n * The messages that were sent to the LLM.\n */\n messages: BaseMessage[];\n /**\n * The response from the LLM.\n */\n response?: BaseMessage;\n}\n/**\n * Type helper to extract the inferred type from a single Zod schema or array of schemas\n */\nexport type ExtractZodType<T> = T extends InteropZodType<infer U> ? U : T extends readonly InteropZodType<any>[] ? ExtractZodArrayTypes<T> : never;\n/**\n * Type helper to extract union type from an array of Zod schemas\n */\nexport type ExtractZodArrayTypes<T extends readonly InteropZodType<any>[]> = T extends readonly [InteropZodType<infer A>, ...infer Rest] ? Rest extends readonly InteropZodType<any>[] ? A | ExtractZodArrayTypes<Rest> : A : never;\n/**\n * Type helper to extract the structured response type from responseFormat\n */\nexport type InferResponseFormatType<T> = T extends InteropZodType<infer U> ? U extends Record<string, any> ? U : Record<string, any> : T extends readonly InteropZodType<any>[] ? ExtractZodArrayTypes<T> : T extends ToolStrategy[] ? Record<string, any> // ToolStrategy arrays will be handled at runtime\n : T extends ResponseFormat ? Record<string, any> // Single ResponseFormat will be handled at runtime\n : Record<string, any>;\n/** @internal */\nexport type ReducedZodChannel<T extends InteropZodType, TReducerSchema extends InteropZodType> = T & {\n lg_reducer_schema: TReducerSchema;\n};\nexport type ServerTool = Record<string, unknown>;\nexport type ClientTool = StructuredToolInterface | DynamicTool | RunnableToolLike;\nexport type Prompt<StateSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot> = SystemMessage | string | ((state: ToAnnotationRoot<StateSchema>[\"State\"] & PreHookAnnotation[\"State\"], config: LangGraphRunnableConfig<ToAnnotationRoot<ContextSchema>[\"State\"]>) => BaseMessageLike[] | Promise<BaseMessageLike[]>) | Runnable;\nexport type AgentState<AnnotationRoot extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot> = ToAnnotationRoot<AnnotationRoot>[\"State\"] & PreHookAnnotation[\"State\"];\nexport type AgentRuntime<AnnotationRoot extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot> = Runtime<ToAnnotationRoot<AnnotationRoot>[\"State\"]>;\nexport type CreateReactAgentParams<StateSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, \n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nStructuredResponseType extends Record<string, any> = Record<string, any>, ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, ResponseFormatType = InteropZodType<StructuredResponseType> | InteropZodType<unknown>[] | JsonSchemaFormat | JsonSchemaFormat[] | ResponseFormat | TypedToolStrategy<StructuredResponseType> | ToolStrategy<StructuredResponseType> | ProviderStrategy<StructuredResponseType> | ResponseFormatUndefined> = {\n /** The chat model that can utilize OpenAI-style tool calling. */\n llm?: LanguageModelLike | DynamicLLMFunction<StateSchema, ContextSchema>;\n /**\n * Initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * @uses {@link initChatModel}\n * @example\n * ```ts\n * const agent = createReactAgent({\n * model: \"anthropic:claude-3-7-sonnet-latest\",\n * // ...\n * });\n * ```\n */\n model?: string;\n /** A list of tools or a ToolNode. */\n tools: ToolNode | (ServerTool | ClientTool)[];\n /**\n * An optional prompt for the LLM. This takes full graph state BEFORE the LLM is called and prepares the input to LLM.\n *\n * Can take a few different forms:\n *\n * - str: This is converted to a SystemMessage and added to the beginning of the list of messages in state[\"messages\"].\n * - SystemMessage: this is added to the beginning of the list of messages in state[\"messages\"].\n * - Function: This function should take in full graph state and the output is then passed to the language model.\n * - Runnable: This runnable should take in full graph state and the output is then passed to the language model.\n *\n * Note:\n * Prior to `v0.2.46`, the prompt was set using `stateModifier` / `messagesModifier` parameters.\n * This is now deprecated and will be removed in a future release.\n *\n * Cannot be used together with `prepareCall`.\n */\n prompt?: Prompt<StateSchema, ContextSchema>;\n /**\n * Additional state schema for the agent. It allows to define additional state keys that will be\n * persisted between agent invocations.\n *\n * @example\n * ```ts\n * // State schema defines data that persists across agent invocations\n * const stateSchema = z.object({\n * userPreferences: z.object({\n * theme: z.enum([\"light\", \"dark\"]),\n * language: z.string(),\n * }),\n * taskHistory: z.array(z.string()),\n * currentWorkflow: z.string().optional(),\n * });\n *\n * // Context schema defines runtime parameters passed per invocation\n * const contextSchema = z.object({ ... });\n *\n * const agent = createReactAgent({\n * llm: model,\n * tools: [updatePreferences, addTask],\n * stateSchema, // Persisted: preferences, e.g. task history, workflow state\n * contextSchema, // Per-invocation: user ID, session, API keys, etc.\n * prompt: (state, config) => {\n * // ...\n * },\n * });\n *\n * // First invocation - state starts empty, context provided\n * await agent.invoke({\n * messages: [new HumanMessage(\"Set my theme to dark\")],\n * }, {\n * context: { userId: \"user123\", sessionId: \"sess456\", apiKeys: {...} }\n * });\n *\n * // Second invocation - state persists, new context\n * await agent.invoke({\n * messages: [new HumanMessage(\"Add a task to review code\")],\n * }, {\n * context: { userId: \"user123\", sessionId: \"sess789\", apiKeys: {...} }\n * });\n * // State now contains: userPreferences.theme=\"dark\", taskHistory=[\"review code\"]\n * ```\n */\n stateSchema?: StateSchema;\n /**\n * An optional schema for the context. It allows to pass in a typed context object into the agent\n * invocation and allows to access it in hooks such as `prompt`, `preModelHook`, `postModelHook`, etc.\n * As opposed to the agent state, defined in `stateSchema`, the context is not persisted between\n * agent invocations.\n *\n * @example\n * ```ts\n * const agent = createReactAgent({\n * llm: model,\n * tools: [getWeather],\n * contextSchema: z.object({\n * capital: z.string(),\n * }),\n * prompt: (state, config) => {\n * return [\n * new SystemMessage(`You are a helpful assistant. The capital of France is ${config.context.capital}.`),\n * ];\n * },\n * });\n *\n * const result = await agent.invoke({\n * messages: [\n * new SystemMessage(\"You are a helpful assistant.\"),\n * new HumanMessage(\"What is the capital of France?\"),\n * ],\n * }, {\n * context: {\n * capital: \"Paris\",\n * },\n * });\n * ```\n */\n contextSchema?: ContextSchema;\n /** An optional checkpoint saver to persist the agent's state. */\n checkpointSaver?: BaseCheckpointSaver | boolean;\n /** An optional checkpoint saver to persist the agent's state. Alias of \"checkpointSaver\". */\n checkpointer?: BaseCheckpointSaver | boolean;\n /** An optional list of node names to interrupt before running. */\n interruptBefore?: N[] | All;\n /** An optional list of node names to interrupt after running. */\n interruptAfter?: N[] | All;\n store?: BaseStore;\n /**\n * An optional schema for the final agent output.\n *\n * If provided, output will be formatted to match the given schema and returned in the 'structuredResponse' state key.\n * If not provided, `structuredResponse` will not be present in the output state.\n *\n * Can be passed in as:\n * - Zod schema\n * ```ts\n * const agent = createReactAgent({\n * responseFormat: z.object({\n * capital: z.string(),\n * }),\n * // ...\n * });\n * ```\n * - JSON schema\n * ```ts\n * const agent = createReactAgent({\n * responseFormat: {\n * type: \"json_schema\",\n * schema: {\n * type: \"object\",\n * properties: {\n * capital: { type: \"string\" },\n * },\n * required: [\"capital\"],\n * },\n * },\n * // ...\n * });\n * ```\n * - Create React Agent ResponseFormat\n * ```ts\n * import { providerStrategy, toolStrategy } from \"langchain\";\n * const agent = createReactAgent({\n * responseFormat: providerStrategy(\n * z.object({\n * capital: z.string(),\n * })\n * ),\n * // or\n * responseFormat: [\n * toolStrategy({ ... }),\n * toolStrategy({ ... }),\n * ]\n * // ...\n * });\n * ```\n *\n * **Note**: The graph will make a separate call to the LLM to generate the structured response after the agent loop is finished.\n * This is not the only strategy to get structured responses, see more options in [this guide](https://langchain-ai.github.io/langgraph/how-tos/react-agent-structured-output/).\n */\n responseFormat?: ResponseFormatType;\n /**\n * An optional name for the agent.\n */\n name?: string;\n /**\n * An optional description for the agent.\n * This can be used to describe the agent to the underlying supervisor LLM.\n */\n description?: string;\n /**\n * Use to specify how to expose the agent name to the underlying supervisor LLM.\n * - `undefined`: Relies on the LLM provider {@link AIMessage#name}. Currently, only OpenAI supports this.\n * - `\"inline\"`: Add the agent name directly into the content field of the {@link AIMessage} using XML-style tags.\n * Example: `\"How can I help you\"` -> `\"<name>agent_name</name><content>How can I help you?</content>\"`\n */\n includeAgentName?: \"inline\" | undefined;\n /**\n * An optional node to add before the `agent` node (i.e., the node that calls the LLM).\n * Useful for managing long message histories (e.g., message trimming, summarization, etc.).\n */\n preModelHook?: RunnableLike<ToAnnotationRoot<StateSchema>[\"State\"] & PreHookAnnotation[\"State\"], ToAnnotationRoot<StateSchema>[\"Update\"] & PreHookAnnotation[\"Update\"], LangGraphRunnableConfig<ToAnnotationRoot<ContextSchema>[\"State\"]>>;\n /**\n * An optional node to add after the `agent` node (i.e., the node that calls the LLM).\n * Useful for implementing human-in-the-loop, guardrails, validation, or other post-processing.\n */\n postModelHook?: RunnableLike<ToAnnotationRoot<StateSchema>[\"State\"] & PreHookAnnotation[\"State\"], ToAnnotationRoot<StateSchema>[\"Update\"] & PreHookAnnotation[\"Update\"], LangGraphRunnableConfig<ToAnnotationRoot<ContextSchema>[\"State\"]>>;\n /**\n * An optional abort signal that indicates that the overall operation should be aborted.\n */\n signal?: AbortSignal;\n /**\n * Determines the version of the graph to create.\n *\n * Can be one of\n * - `\"v1\"`: The tool node processes a single message. All tool calls in the message are\n * executed in parallel within the tool node.\n * - `\"v2\"`: The tool node processes a single tool call. Tool calls are distributed across\n * multiple instances of the tool node using the Send API.\n *\n * @default `\"v2\"`\n */\n version?: \"v1\" | \"v2\";\n};\nexport interface ConfigurableModelInterface {\n _queuedMethodOperations: Record<string, unknown>;\n _model: () => Promise<BaseChatModel>;\n}\nexport type InternalAgentState<StructuredResponseType extends Record<string, unknown> | undefined = Record<string, unknown>> = {\n messages: BaseMessage[];\n} & (StructuredResponseType extends ResponseFormatUndefined ? Record<string, never> : {\n structuredResponse: StructuredResponseType;\n});\nexport type WithStateGraphNodes<K extends string, Graph> = Graph extends StateGraph<infer SD, infer S, infer U, infer N, infer I, infer O, infer C> ? StateGraph<SD, S, U, N | K, I, O, C> : never;\n/**\n * @deprecated likely to be removed in the next version of the agent\n */\ntype DynamicLLMFunction<StateSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot> = (state: ToAnnotationRoot<StateSchema>[\"State\"] & PreHookAnnotation[\"State\"], runtime: Runtime<ToAnnotationRoot<ContextSchema>[\"State\"]>) => Promise<LanguageModelLike> | LanguageModelLike;\nexport {};\n"],"mappings":";;;;;;;;;;;;;KAYY8B,CAAAA,UAAW3B;AAAvB;AA0CA;;;;AAaka;AACla;AAAsB,KAdVkC,oBAcU,CAAA,UAAA,SAd8BpC,cAc9B,CAAA,GAAA,CAAA,EAAA,CAAA,GAduDkC,CAcvD,SAAA,SAAA,CAd2ElC,cAc3E,CAAA,KAAA,EAAA,CAAA,EAAA,GAAA,KAAA,KAAA,CAAA,GAdqHqC,IAcrH,SAAA,SAd2IrC,cAc3I,CAAA,GAAA,CAAA,EAAA,GAdmKsC,CAcnK,GAduKF,oBAcvK,CAd4LC,IAc5L,CAAA,GAdoMC,CAcpM,GAAA,KAAA;;;;;AAEqDvC,KAL/D2C,UAAAA,GAAaX,MAKkDhC,CAAAA,MAAAA,EAAAA,OAAAA,CAAAA;AAAmBqB,KAJlFuB,UAAAA,GAAa7B,uBAIqEM,GAJ3CP,WAI2CO,GAJ7BH,gBAI6BG;AAE/DW,KALnBa,MAKmBb,CAAAA,oBALQX,iBAKRW,GAL4BhC,gBAK5BgC,GAL+CX,iBAK/CW,EAAAA,sBALwFX,iBAKxFW,GAL4GhC,gBAK5GgC,GAL+HX,iBAK/HW,CAAAA,GALoJxB,aAKpJwB,GAAAA,MAAAA,GAAAA,CAAAA,CAAAA,KAAAA,EALsLV,gBAKtLU,CALuMc,WAKvMd,CAAAA,CAAAA,OAAAA,CAAAA,GAL+NZ,iBAK/NY,CAAAA,OAAAA,CAAAA,EAAAA,MAAAA,EALmQ9B,uBAKnQ8B,CAL2RV,gBAK3RU,CAL4Se,aAK5Sf,CAAAA,CAAAA,OAAAA,CAAAA,CAAAA,EAAAA,GALyUvB,eAKzUuB,EAAAA,GAL6VgB,OAK7VhB,CALqWvB,eAKrWuB,EAAAA,CAAAA,CAAAA,GAL2XhB,QAK3XgB;AAAsBA,KAJzCiB,UAIyCjB,CAAAA,uBAJPX,iBAIOW,GAJahC,gBAIbgC,GAJgCX,iBAIhCW,CAAAA,GAJqDV,gBAIrDU,CAJsEkB,cAItElB,CAAAA,CAAAA,OAAAA,CAAAA,GAJiGZ,iBAIjGY,CAAAA,OAAAA,CAAAA;AAA+DhC,KAFxGoD,sBAEwGpD,CAAAA,oBAF7DqB,iBAE6DrB,GAFzCA,gBAEyCA,GAFtBqB,iBAEsBrB;;+BAArFgC,MAA+JqB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,GAAzIrB,MAAyIqB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,EAAAA,sBAA9FhC,iBAA8FgC,GAA1ErD,gBAA0EqD,GAAvDhC,iBAAuDgC,EAAAA,qBAAfpD,cAAeoD,CAAAA,sBAAAA,CAAAA,GAA0BpD,cAA1BoD,CAAAA,OAAAA,CAAAA,EAAAA,GAAsDzB,gBAAtDyB,GAAyEzB,gBAAzEyB,EAAAA,GAA8F9B,cAA9F8B,GAA+G5B,iBAA/G4B,CAAiIA,sBAAjIA,CAAAA,GAA2J7B,YAA3J6B,CAAwKA,sBAAxKA,CAAAA,GAAkM3B,gBAAlM2B,CAAmNA,sBAAnNA,CAAAA,GAA6O1B,uBAA7O0B,CAAAA,GAAAA;EAAsB;EAAvB,GAA2BpD,CAAAA,EAE9MK,iBAF8ML,GAE1LqD,kBAF0LrD,CAEvK6C,WAFuK7C,EAE1J8C,aAF0J9C,CAAAA;EAAc;;;;;;;;;;;;;EAE3J,KAA7CqD,CAAAA,EAAAA,MAAAA;EAAkB;EAgB7B,KAAIX,EAAZxB,QAAYwB,GAAAA,CAAAA,UAAAA,GAAaC,UAAbD,CAAAA,EAAAA;EAAU;;;;;;;;;;;;;;;;EAqLe,MAAyBvB,CAAAA,EApK5DyB,MAoK4DzB,CApKrD0B,WAoKqD1B,EApKxC2B,aAoKwC3B,CAAAA;EAAiB;;;;;;;;;;;;;;;;;;AASlE;AAuB2K;;;;;;;;;;;;;;;;;AAI+K;;;;;;;;;gBA1JhW0B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;kBAkCEC;;oBAEEnC;;iBAEHA;;oBAEGkB,MAAMnB;;mBAEPmB,MAAMnB;UACfE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;mBAsDS0C;;;;;;;;;;;;;;;;;;;;;iBAqBFtC,aAAaK,iBAAiBwB,wBAAwB1B,4BAA4BE,iBAAiBwB,yBAAyB1B,6BAA6BlB,wBAAwBoB,iBAAiByB;;;;;kBAKjM9B,aAAaK,iBAAiBwB,wBAAwB1B,4BAA4BE,iBAAiBwB,yBAAyB1B,6BAA6BlB,wBAAwBoB,iBAAiByB;;;;WAIzMS;;;;;;;;;;;;;;;;;KA2BRF,uCAAuCjC,oBAAoBrB,mBAAmBqB,yCAAyCA,oBAAoBrB,mBAAmBqB,6BAA6BC,iBAAiBwB,wBAAwB1B,qCAAqChB,QAAQkB,iBAAiByB,6BAA6BC,QAAQ1C,qBAAqBA"}
1
+ {"version":3,"file":"types.d.ts","names":["InteropZodObject","InteropZodType","LangGraphRunnableConfig","START","StateGraph","LanguageModelLike","BaseChatModel","SystemMessage","BaseMessageLike","BaseMessage","All","BaseCheckpointSaver","BaseStore","DynamicTool","StructuredToolInterface","Runnable","RunnableLike","RunnableToolLike","ToolNode","PreHookAnnotation","AnyAnnotationRoot","ToAnnotationRoot","ResponseFormat","ToolStrategy","TypedToolStrategy","ProviderStrategy","ResponseFormatUndefined","JsonSchemaFormat","META_EXTRAS_DESCRIPTION_PREFIX","N","ExecutedToolCall","Record","LLMCall","ExtractZodType","T","U","ExtractZodArrayTypes","Rest","A","InferResponseFormatType","ReducedZodChannel","TReducerSchema","ServerTool","ClientTool","Prompt","StateSchema","ContextSchema","Promise","AgentState","AnnotationRoot","AgentRuntime","ContextType","AbortSignal","CreateReactAgentParams","StructuredResponseType","DynamicLLMFunction","ResponseFormatType","ConfigurableModelInterface","InternalAgentState","WithStateGraphNodes","Graph","SD","S","K","I","O","C"],"sources":["../../src/agents/types.d.ts"],"sourcesContent":["import type { InteropZodObject, InteropZodType } from \"@langchain/core/utils/types\";\nimport type { LangGraphRunnableConfig, START, StateGraph } from \"@langchain/langgraph\";\nimport type { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport type { SystemMessage, BaseMessageLike, BaseMessage } from \"@langchain/core/messages\";\nimport type { All, BaseCheckpointSaver, BaseStore } from \"@langchain/langgraph-checkpoint\";\nimport type { DynamicTool, StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { Runnable, RunnableLike, RunnableToolLike } from \"@langchain/core/runnables\";\nimport type { ToolNode } from \"./nodes/ToolNode.js\";\nimport type { PreHookAnnotation, AnyAnnotationRoot, ToAnnotationRoot } from \"./annotation.js\";\nimport type { ResponseFormat, ToolStrategy, TypedToolStrategy, ProviderStrategy, ResponseFormatUndefined, JsonSchemaFormat } from \"./responses.js\";\nexport declare const META_EXTRAS_DESCRIPTION_PREFIX = \"lg:\";\nexport type N = typeof START | \"agent\" | \"tools\";\n/**\n * Information about a tool call that has been executed.\n */\nexport interface ExecutedToolCall {\n /**\n * The name of the tool that was called.\n */\n name: string;\n /**\n * The arguments that were passed to the tool.\n */\n args: Record<string, unknown>;\n /**\n * The ID of the tool call.\n */\n tool_id: string;\n /**\n * The result of the tool call (if available).\n */\n result?: unknown;\n}\n/**\n * Information about an LLM invocation.\n */\nexport interface LLMCall {\n /**\n * The messages that were sent to the LLM.\n */\n messages: BaseMessage[];\n /**\n * The response from the LLM.\n */\n response?: BaseMessage;\n}\n/**\n * Type helper to extract the inferred type from a single Zod schema or array of schemas\n */\nexport type ExtractZodType<T> = T extends InteropZodType<infer U> ? U : T extends readonly InteropZodType<any>[] ? ExtractZodArrayTypes<T> : never;\n/**\n * Type helper to extract union type from an array of Zod schemas\n */\nexport type ExtractZodArrayTypes<T extends readonly InteropZodType<any>[]> = T extends readonly [InteropZodType<infer A>, ...infer Rest] ? Rest extends readonly InteropZodType<any>[] ? A | ExtractZodArrayTypes<Rest> : A : never;\n/**\n * Type helper to extract the structured response type from responseFormat\n */\nexport type InferResponseFormatType<T> = T extends InteropZodType<infer U> ? U extends Record<string, any> ? U : Record<string, any> : T extends readonly InteropZodType<any>[] ? ExtractZodArrayTypes<T> : T extends ToolStrategy[] ? Record<string, any> // ToolStrategy arrays will be handled at runtime\n : T extends ResponseFormat ? Record<string, any> // Single ResponseFormat will be handled at runtime\n : Record<string, any>;\n/** @internal */\nexport type ReducedZodChannel<T extends InteropZodType, TReducerSchema extends InteropZodType> = T & {\n lg_reducer_schema: TReducerSchema;\n};\nexport type ServerTool = Record<string, unknown>;\nexport type ClientTool = StructuredToolInterface | DynamicTool | RunnableToolLike;\nexport type Prompt<StateSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot> = SystemMessage | string | ((state: ToAnnotationRoot<StateSchema>[\"State\"] & PreHookAnnotation[\"State\"], config: LangGraphRunnableConfig<ToAnnotationRoot<ContextSchema>[\"State\"]>) => BaseMessageLike[] | Promise<BaseMessageLike[]>) | Runnable;\nexport type AgentState<AnnotationRoot extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot> = ToAnnotationRoot<AnnotationRoot>[\"State\"] & PreHookAnnotation[\"State\"];\nexport interface AgentRuntime<ContextType = Record<string, unknown>> {\n /**\n * The context of the agent.\n */\n context?: ContextType;\n /**\n * The store passed to the agent.\n */\n store?: BaseStore;\n /**\n * The writer of the agent to write to the output stream.\n */\n writer?: (chunk: unknown) => void;\n /**\n * An optional abort signal that indicates that the overall operation should be aborted.\n */\n signal?: AbortSignal;\n}\nexport type CreateReactAgentParams<StateSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, \n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nStructuredResponseType extends Record<string, any> = Record<string, any>, ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, ResponseFormatType = InteropZodType<StructuredResponseType> | InteropZodType<unknown>[] | JsonSchemaFormat | JsonSchemaFormat[] | ResponseFormat | TypedToolStrategy<StructuredResponseType> | ToolStrategy<StructuredResponseType> | ProviderStrategy<StructuredResponseType> | ResponseFormatUndefined> = {\n /** The chat model that can utilize OpenAI-style tool calling. */\n llm?: LanguageModelLike | DynamicLLMFunction<StateSchema, ContextSchema>;\n /**\n * Initializes a ChatModel based on the provided model name and provider.\n * It supports various model providers and allows for runtime configuration of model parameters.\n *\n * @uses {@link initChatModel}\n * @example\n * ```ts\n * const agent = createReactAgent({\n * model: \"anthropic:claude-3-7-sonnet-latest\",\n * // ...\n * });\n * ```\n */\n model?: string;\n /** A list of tools or a ToolNode. */\n tools: ToolNode | (ServerTool | ClientTool)[];\n /**\n * An optional prompt for the LLM. This takes full graph state BEFORE the LLM is called and prepares the input to LLM.\n *\n * Can take a few different forms:\n *\n * - str: This is converted to a SystemMessage and added to the beginning of the list of messages in state[\"messages\"].\n * - SystemMessage: this is added to the beginning of the list of messages in state[\"messages\"].\n * - Function: This function should take in full graph state and the output is then passed to the language model.\n * - Runnable: This runnable should take in full graph state and the output is then passed to the language model.\n *\n * Note:\n * Prior to `v0.2.46`, the prompt was set using `stateModifier` / `messagesModifier` parameters.\n * This is now deprecated and will be removed in a future release.\n *\n * Cannot be used together with `prepareCall`.\n */\n prompt?: Prompt<StateSchema, ContextSchema>;\n /**\n * Additional state schema for the agent. It allows to define additional state keys that will be\n * persisted between agent invocations.\n *\n * @example\n * ```ts\n * // State schema defines data that persists across agent invocations\n * const stateSchema = z.object({\n * userPreferences: z.object({\n * theme: z.enum([\"light\", \"dark\"]),\n * language: z.string(),\n * }),\n * taskHistory: z.array(z.string()),\n * currentWorkflow: z.string().optional(),\n * });\n *\n * // Context schema defines runtime parameters passed per invocation\n * const contextSchema = z.object({ ... });\n *\n * const agent = createReactAgent({\n * llm: model,\n * tools: [updatePreferences, addTask],\n * stateSchema, // Persisted: preferences, e.g. task history, workflow state\n * contextSchema, // Per-invocation: user ID, session, API keys, etc.\n * prompt: (state, config) => {\n * // ...\n * },\n * });\n *\n * // First invocation - state starts empty, context provided\n * await agent.invoke({\n * messages: [new HumanMessage(\"Set my theme to dark\")],\n * }, {\n * context: { userId: \"user123\", sessionId: \"sess456\", apiKeys: {...} }\n * });\n *\n * // Second invocation - state persists, new context\n * await agent.invoke({\n * messages: [new HumanMessage(\"Add a task to review code\")],\n * }, {\n * context: { userId: \"user123\", sessionId: \"sess789\", apiKeys: {...} }\n * });\n * // State now contains: userPreferences.theme=\"dark\", taskHistory=[\"review code\"]\n * ```\n */\n stateSchema?: StateSchema;\n /**\n * An optional schema for the context. It allows to pass in a typed context object into the agent\n * invocation and allows to access it in hooks such as `prompt`, `preModelHook`, `postModelHook`, etc.\n * As opposed to the agent state, defined in `stateSchema`, the context is not persisted between\n * agent invocations.\n *\n * @example\n * ```ts\n * const agent = createReactAgent({\n * llm: model,\n * tools: [getWeather],\n * contextSchema: z.object({\n * capital: z.string(),\n * }),\n * prompt: (state, config) => {\n * return [\n * new SystemMessage(`You are a helpful assistant. The capital of France is ${config.context.capital}.`),\n * ];\n * },\n * });\n *\n * const result = await agent.invoke({\n * messages: [\n * new SystemMessage(\"You are a helpful assistant.\"),\n * new HumanMessage(\"What is the capital of France?\"),\n * ],\n * }, {\n * context: {\n * capital: \"Paris\",\n * },\n * });\n * ```\n */\n contextSchema?: ContextSchema;\n /** An optional checkpoint saver to persist the agent's state. */\n checkpointSaver?: BaseCheckpointSaver | boolean;\n /** An optional checkpoint saver to persist the agent's state. Alias of \"checkpointSaver\". */\n checkpointer?: BaseCheckpointSaver | boolean;\n /** An optional list of node names to interrupt before running. */\n interruptBefore?: N[] | All;\n /** An optional list of node names to interrupt after running. */\n interruptAfter?: N[] | All;\n store?: BaseStore;\n /**\n * An optional schema for the final agent output.\n *\n * If provided, output will be formatted to match the given schema and returned in the 'structuredResponse' state key.\n * If not provided, `structuredResponse` will not be present in the output state.\n *\n * Can be passed in as:\n * - Zod schema\n * ```ts\n * const agent = createReactAgent({\n * responseFormat: z.object({\n * capital: z.string(),\n * }),\n * // ...\n * });\n * ```\n * - JSON schema\n * ```ts\n * const agent = createReactAgent({\n * responseFormat: {\n * type: \"json_schema\",\n * schema: {\n * type: \"object\",\n * properties: {\n * capital: { type: \"string\" },\n * },\n * required: [\"capital\"],\n * },\n * },\n * // ...\n * });\n * ```\n * - Create React Agent ResponseFormat\n * ```ts\n * import { providerStrategy, toolStrategy } from \"langchain\";\n * const agent = createReactAgent({\n * responseFormat: providerStrategy(\n * z.object({\n * capital: z.string(),\n * })\n * ),\n * // or\n * responseFormat: [\n * toolStrategy({ ... }),\n * toolStrategy({ ... }),\n * ]\n * // ...\n * });\n * ```\n *\n * **Note**: The graph will make a separate call to the LLM to generate the structured response after the agent loop is finished.\n * This is not the only strategy to get structured responses, see more options in [this guide](https://langchain-ai.github.io/langgraph/how-tos/react-agent-structured-output/).\n */\n responseFormat?: ResponseFormatType;\n /**\n * An optional name for the agent.\n */\n name?: string;\n /**\n * An optional description for the agent.\n * This can be used to describe the agent to the underlying supervisor LLM.\n */\n description?: string;\n /**\n * Use to specify how to expose the agent name to the underlying supervisor LLM.\n * - `undefined`: Relies on the LLM provider {@link AIMessage#name}. Currently, only OpenAI supports this.\n * - `\"inline\"`: Add the agent name directly into the content field of the {@link AIMessage} using XML-style tags.\n * Example: `\"How can I help you\"` -> `\"<name>agent_name</name><content>How can I help you?</content>\"`\n */\n includeAgentName?: \"inline\" | undefined;\n /**\n * An optional node to add before the `agent` node (i.e., the node that calls the LLM).\n * Useful for managing long message histories (e.g., message trimming, summarization, etc.).\n */\n preModelHook?: RunnableLike<ToAnnotationRoot<StateSchema>[\"State\"] & PreHookAnnotation[\"State\"], ToAnnotationRoot<StateSchema>[\"Update\"] & PreHookAnnotation[\"Update\"], LangGraphRunnableConfig<ToAnnotationRoot<ContextSchema>[\"State\"]>>;\n /**\n * An optional node to add after the `agent` node (i.e., the node that calls the LLM).\n * Useful for implementing human-in-the-loop, guardrails, validation, or other post-processing.\n */\n postModelHook?: RunnableLike<ToAnnotationRoot<StateSchema>[\"State\"] & PreHookAnnotation[\"State\"], ToAnnotationRoot<StateSchema>[\"Update\"] & PreHookAnnotation[\"Update\"], LangGraphRunnableConfig<ToAnnotationRoot<ContextSchema>[\"State\"]>>;\n /**\n * An optional abort signal that indicates that the overall operation should be aborted.\n */\n signal?: AbortSignal;\n /**\n * Determines the version of the graph to create.\n *\n * Can be one of\n * - `\"v1\"`: The tool node processes a single message. All tool calls in the message are\n * executed in parallel within the tool node.\n * - `\"v2\"`: The tool node processes a single tool call. Tool calls are distributed across\n * multiple instances of the tool node using the Send API.\n *\n * @default `\"v2\"`\n */\n version?: \"v1\" | \"v2\";\n};\nexport interface ConfigurableModelInterface {\n _queuedMethodOperations: Record<string, unknown>;\n _model: () => Promise<BaseChatModel>;\n}\nexport type InternalAgentState<StructuredResponseType extends Record<string, unknown> | undefined = Record<string, unknown>> = {\n messages: BaseMessage[];\n} & (StructuredResponseType extends ResponseFormatUndefined ? Record<string, never> : {\n structuredResponse: StructuredResponseType;\n});\nexport type WithStateGraphNodes<K extends string, Graph> = Graph extends StateGraph<infer SD, infer S, infer U, infer N, infer I, infer O, infer C> ? StateGraph<SD, S, U, N | K, I, O, C> : never;\n/**\n * @deprecated likely to be removed in the next version of the agent\n */\ntype DynamicLLMFunction<StateSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot, ContextSchema extends AnyAnnotationRoot | InteropZodObject = AnyAnnotationRoot> = (state: ToAnnotationRoot<StateSchema>[\"State\"] & PreHookAnnotation[\"State\"], runtime: AgentRuntime<ToAnnotationRoot<ContextSchema>[\"State\"]>) => Promise<LanguageModelLike> | LanguageModelLike;\nexport {};\n"],"mappings":";;;;;;;;;;;;;KAYY6B,CAAAA,UAAW1B;AAAvB;AA0CA;;;;AAaka;AACla;AAAsB,KAdViC,oBAcU,CAAA,UAAA,SAd8BnC,cAc9B,CAAA,GAAA,CAAA,EAAA,CAAA,GAduDiC,CAcvD,SAAA,SAAA,CAd2EjC,cAc3E,CAAA,KAAA,EAAA,CAAA,EAAA,GAAA,KAAA,KAAA,CAAA,GAdqHoC,IAcrH,SAAA,SAd2IpC,cAc3I,CAAA,GAAA,CAAA,EAAA,GAdmKqC,CAcnK,GAduKF,oBAcvK,CAd4LC,IAc5L,CAAA,GAdoMC,CAcpM,GAAA,KAAA;;;;;AAKRa,KARFT,UAAAA,GAAaX,MAQXoB,CAAAA,MAAAA,EAAAA,OAAAA,CAAAA;AAIFvC,KAXA+B,UAAAA,GAAa7B,uBAWbF,GAXuCC,WAWvCD,GAXqDK,gBAWrDL;AAQCwC,KAlBDR,MAkBCQ,CAAAA,oBAlB0BhC,iBAkB1BgC,GAlB8CpD,gBAkB9CoD,GAlBiEhC,iBAkBjEgC,EAAAA,sBAlB0GhC,iBAkB1GgC,GAlB8HpD,gBAkB9HoD,GAlBiJhC,iBAkBjJgC,CAAAA,GAlBsK7C,aAkBtK6C,GAAAA,MAAAA,GAAAA,CAAAA,CAAAA,KAAAA,EAlBwM/B,gBAkBxM+B,CAlByNP,WAkBzNO,CAAAA,CAAAA,OAAAA,CAAAA,GAlBiPjC,iBAkBjPiC,CAAAA,OAAAA,CAAAA,EAAAA,MAAAA,EAlBqRlD,uBAkBrRkD,CAlB6S/B,gBAkB7S+B,CAlB8TN,aAkB9TM,CAAAA,CAAAA,OAAAA,CAAAA,CAAAA,EAAAA,GAlB2V5C,eAkB3V4C,EAAAA,GAlB+WL,OAkB/WK,CAlBuX5C,eAkBvX4C,EAAAA,CAAAA,CAAAA,GAlB6YrC,QAkB7YqC;AAAW,KAjBZJ,UAiBY,CAAA,uBAjBsB5B,iBAiBtB,GAjB0CpB,gBAiB1C,GAjB6DoB,iBAiB7D,CAAA,GAjBkFC,gBAiBlF,CAjBmG4B,cAiBnG,CAAA,CAAA,OAAA,CAAA,GAjB8H9B,iBAiB9H,CAAA,OAAA,CAAA;AAEZkC,UAlBKH,YAkBiB,CAAA,cAlBUnB,MAkBV,CAAA,MAAA,EAAA,OAAA,CAAA,CAAA,CAAA;EAAA;;;EAAyD,OAAGX,CAAAA,EAdhF+B,WAcgF/B;EAAiB;;;EAEE,KAAGpB,CAAAA,EAZxGY,SAYwGZ;EAAgB;;;EAAyD,MAA2BC,CAAAA,EAAAA,CAAAA,KAAAA,EAAAA,OAAAA,EAAAA,GAAAA,IAAAA;EAAc;;;EAAoE,MAAqBqD,CAAAA,EAJlTF,WAIkTE;;AAAuCA,KAF1VD,sBAE0VC,CAAAA,oBAF/SlC,iBAE+SkC,GAF3RtD,gBAE2RsD,GAFxQlC,iBAEwQkC;;+BAAvUvB,MAAkXuB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,GAA5VvB,MAA4VuB,CAAAA,MAAAA,EAAAA,GAAAA,CAAAA,EAAAA,sBAAjTlC,iBAAiTkC,GAA7RtD,gBAA6RsD,GAA1QlC,iBAA0QkC,EAAAA,qBAAlOrD,cAAkOqD,CAAnNA,sBAAmNA,CAAAA,GAAzLrD,cAAyLqD,CAAAA,OAAAA,CAAAA,EAAAA,GAA7J3B,gBAA6J2B,GAA1I3B,gBAA0I2B,EAAAA,GAArHhC,cAAqHgC,GAApG9B,iBAAoG8B,CAAlFA,sBAAkFA,CAAAA,GAAxD/B,YAAwD+B,CAA3CA,sBAA2CA,CAAAA,GAAjB7B,gBAAiB6B,CAAAA,sBAAAA,CAAAA,GAA0B5B,uBAA1B4B,CAAAA,GAAAA;EAAsB;EAAvB,GAA2B5B,CAAAA,EAEjarB,iBAFiaqB,GAE7Y6B,kBAF6Y7B,CAE1XmB,WAF0XnB,EAE7WoB,aAF6WpB,CAAAA;EAAuB;;;;;;;;;;;;;EAqHzZ,KAEtBf,CAAAA,EAAAA,MAAAA;EAAmB;EAEf,KAAKD,EAvGjBQ,QAuGiBR,GAAAA,CAvGLgC,UAuGKhC,GAvGQiC,UAuGRjC,CAAAA,EAAAA;EAAG;;;;;;;;;;;;;;;;EAmFkB,MAAyBS,CAAAA,EAzK7DyB,MAyK6DzB,CAzKtD0B,WAyKsD1B,EAzKzC2B,aAyKyC3B,CAAAA;EAAiB;;;;;;;;AAInE;AAuB2K;;;;;;;;;;;;;;;;;AAIoL;;;;;;;;;;;;;;;;;;;gBA1JrW0B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;kBAkCEC;;oBAEEnC;;iBAEHA;;oBAEGkB,MAAMnB;;mBAEPmB,MAAMnB;UACfE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;mBAsDS4C;;;;;;;;;;;;;;;;;;;;;iBAqBFxC,aAAaK,iBAAiBwB,wBAAwB1B,4BAA4BE,iBAAiBwB,yBAAyB1B,6BAA6BjB,wBAAwBmB,iBAAiByB;;;;;kBAKjM9B,aAAaK,iBAAiBwB,wBAAwB1B,4BAA4BE,iBAAiBwB,yBAAyB1B,6BAA6BjB,wBAAwBmB,iBAAiByB;;;;WAIzMM;;;;;;;;;;;;;;;;;KA2BRG,uCAAuCnC,oBAAoBpB,mBAAmBoB,yCAAyCA,oBAAoBpB,mBAAmBoB,6BAA6BC,iBAAiBwB,wBAAwB1B,qCAAqC+B,aAAa7B,iBAAiByB,6BAA6BC,QAAQ1C,qBAAqBA"}
@@ -1 +1 @@
1
- {"version":3,"file":"pairwise.d.ts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMPairwiseStringEvaluator","LLMPairwiseStringEvaluatorArgs","ExtractLLMCallOptions","CriteriaLike","PairwiseStringResultOutputParser","Promise","PairwiseStringEvalChain","Record","____________langchain_core_dist_prompt_values_js0","BasePromptValueInterface","Omit","Partial","LabeledPairwiseStringEvalChain"],"sources":["../../../src/evaluation/comparison/pairwise.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMPairwiseStringEvaluator, LLMPairwiseStringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { CriteriaLike } from \"../criteria/criteria.js\";\n/**\n * A parser for the output of the PairwiseStringEvalChain.\n */\nexport declare class PairwiseStringResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs.\n */\nexport declare class PairwiseStringEvalChain extends LLMPairwiseStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n outputParser: PairwiseStringResultOutputParser;\n static resolvePairwiseCriteria(criteria?: CriteriaLike): Record<string, string>;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the PairwiseStringEvalChain.\n * @param llm\n * @param criteria The criteria to use for evaluation.\n * @param chainOptions Options to pass to the chain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<PairwiseStringEvalChain>;\n _prepareOutput(result: ChainValues): any;\n _evaluateStringPairs(args: LLMPairwiseStringEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs,\n * with labeled preferences.\n */\nexport declare class LabeledPairwiseStringEvalChain extends PairwiseStringEvalChain {\n static lc_name(): string;\n requiresReference: boolean;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWqBc,cAAAA,gCAAAA,SAAyCb,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGnEJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EI,cAA7EJ,CAAAA;;;;;;AAHoD,cAS5DY,uBAAAA,SAAgCN,0BAAAA,CAT4B;EAS5DM,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,CAAA,EAAA,MAAA;EAAA,cAO1BF,CAAAA,EAAAA,MAAAA;EAAgC,aACJD,EAAAA,OAAAA;EAAY,iBAAGI,EAAAA,OAAAA;EAAM,oBACzBZ,EAAAA,MAAAA;EAAkB,YAAA,EAF1CS,gCAE0C;EAA8G,OAA3GT,uBAAAA,CAAAA,QAAAA,CAAAA,EADjBQ,YACiBR,CAAAA,EADFY,MACEZ,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAkB,OAOzDL,qBAAAA,CAAAA,MAAAA,CAAAA,EAPkBK,kBAOlBL,CAAAA,EAPuCK,kBAOvCL,CAAAA,GAAAA,EAPoC,wBAAA,EAOpCA,GAAAA,CAAAA;EAA0B;;;;;;EAA2F,OAClHE,OAAAA,CAAAA,GAAAA,EADHF,0BACGE,EAAAA,QAAAA,CAAAA,EADoCW,YACpCX,EAAAA,YAAAA,CAAAA,EADiEmB,OACjEnB,CADyEkB,IACzElB,CAD8EO,iBAC9EP,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAD2Ga,OAC3Gb,CADmHc,uBACnHd,CAAAA;EAAW,cACPS,CAAAA,MAAAA,EADJT,WACIS,CAAAA,EAAAA,GAAAA;EAA8B,oBAAeC,CAAAA,IAAAA,EAA7CD,8BAA6CC,EAAAA,WAAAA,EAAAA,qBAAAA,CAAAA,IAAAA,CAAAA,KAAAA,CAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAA6CN,SAA7CM,GAAyDL,kBAAzDK,CAAAA,EAA8EG,OAA9EH,CAAsFV,WAAtFU,CAAAA;;;;;;AAlBG;AAyB1DU,cAAAA,8BAAAA,SAAuCN,uBAAAA,CAAT;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,iBAGTX,EAAAA,OAAAA;EAAkB,OAAA,qBAAA,CAAA,MAAA,CAAA,EAAlBA,kBAAkB,CAAA,EAAGA,kBAAH,CAAA,GAAA,EAAA,wBAAA,EAAA,GAAA,CAAA"}
1
+ {"version":3,"file":"pairwise.d.ts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMPairwiseStringEvaluator","LLMPairwiseStringEvaluatorArgs","ExtractLLMCallOptions","CriteriaLike","PairwiseStringResultOutputParser","Promise","PairwiseStringEvalChain","Record","____________langchain_core_dist_prompt_values_js2","BasePromptValueInterface","Omit","Partial","LabeledPairwiseStringEvalChain"],"sources":["../../../src/evaluation/comparison/pairwise.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMPairwiseStringEvaluator, LLMPairwiseStringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { CriteriaLike } from \"../criteria/criteria.js\";\n/**\n * A parser for the output of the PairwiseStringEvalChain.\n */\nexport declare class PairwiseStringResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs.\n */\nexport declare class PairwiseStringEvalChain extends LLMPairwiseStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n outputParser: PairwiseStringResultOutputParser;\n static resolvePairwiseCriteria(criteria?: CriteriaLike): Record<string, string>;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the PairwiseStringEvalChain.\n * @param llm\n * @param criteria The criteria to use for evaluation.\n * @param chainOptions Options to pass to the chain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<PairwiseStringEvalChain>;\n _prepareOutput(result: ChainValues): any;\n _evaluateStringPairs(args: LLMPairwiseStringEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs,\n * with labeled preferences.\n */\nexport declare class LabeledPairwiseStringEvalChain extends PairwiseStringEvalChain {\n static lc_name(): string;\n requiresReference: boolean;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWqBc,cAAAA,gCAAAA,SAAyCb,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGnEJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EI,cAA7EJ,CAAAA;;;;;;AAHoD,cAS5DY,uBAAAA,SAAgCN,0BAAAA,CAT4B;EAS5DM,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,CAAA,EAAA,MAAA;EAAA,cAO1BF,CAAAA,EAAAA,MAAAA;EAAgC,aACJD,EAAAA,OAAAA;EAAY,iBAAGI,EAAAA,OAAAA;EAAM,oBACzBZ,EAAAA,MAAAA;EAAkB,YAAA,EAF1CS,gCAE0C;EAA8G,OAA3GT,uBAAAA,CAAAA,QAAAA,CAAAA,EADjBQ,YACiBR,CAAAA,EADFY,MACEZ,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAkB,OAOzDL,qBAAAA,CAAAA,MAAAA,CAAAA,EAPkBK,kBAOlBL,CAAAA,EAPuCK,kBAOvCL,CAAAA,GAAAA,EAPoC,wBAAA,EAOpCA,GAAAA,CAAAA;EAA0B;;;;;;EAA2F,OAClHE,OAAAA,CAAAA,GAAAA,EADHF,0BACGE,EAAAA,QAAAA,CAAAA,EADoCW,YACpCX,EAAAA,YAAAA,CAAAA,EADiEmB,OACjEnB,CADyEkB,IACzElB,CAD8EO,iBAC9EP,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAD2Ga,OAC3Gb,CADmHc,uBACnHd,CAAAA;EAAW,cACPS,CAAAA,MAAAA,EADJT,WACIS,CAAAA,EAAAA,GAAAA;EAA8B,oBAAeC,CAAAA,IAAAA,EAA7CD,8BAA6CC,EAAAA,WAAAA,EAAAA,qBAAAA,CAAAA,IAAAA,CAAAA,KAAAA,CAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAA6CN,SAA7CM,GAAyDL,kBAAzDK,CAAAA,EAA8EG,OAA9EH,CAAsFV,WAAtFU,CAAAA;;;;;;AAlBG;AAyB1DU,cAAAA,8BAAAA,SAAuCN,uBAAAA,CAAT;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,iBAGTX,EAAAA,OAAAA;EAAkB,OAAA,qBAAA,CAAA,MAAA,CAAA,EAAlBA,kBAAkB,CAAA,EAAGA,kBAAH,CAAA,GAAA,EAAA,wBAAA,EAAA,GAAA,CAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"criteria.d.ts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMStringEvaluator","StringEvaluatorArgs","ExtractLLMCallOptions","ConstitutionalPrinciple","Criteria","CriteriaLike","CriteriaResultOutputParser","Promise","CriteriaEvalInput","CriteriaEvalChain","Record","____________langchain_core_dist_prompt_values_js2","BasePromptValueInterface","Omit","Partial","input","prediction","reference","LabeledCriteriaEvalChain"],"sources":["../../../src/evaluation/criteria/criteria.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMStringEvaluator, StringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { ConstitutionalPrinciple } from \"../../chains/constitutional_ai/constitutional_principle.js\";\n/**\n * A Criteria to evaluate.\n */\nexport type Criteria = \"conciseness\" | \"relevance\" | \"correctness\" | \"coherence\" | \"harmfulness\" | \"maliciousness\" | \"helpfulness\" | \"controversiality\" | \"misogyny\" | \"criminality\" | \"insensitivity\" | \"depth\" | \"creativity\" | \"detail\";\nexport type CriteriaLike = {\n [key: string]: string;\n} | Criteria | ConstitutionalPrinciple;\n/**\n * A parser for the output of the CriteriaEvalChain.\n */\nexport declare class CriteriaResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\nexport interface CriteriaEvalInput {\n input?: string;\n output: string;\n reference?: string;\n}\nexport declare class CriteriaEvalChain extends LLMStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n // The output parser to use for the evaluation chain.\n outputParser: BaseLLMOutputParser<EvalOutputType>;\n /**\n * Resolve the criteria to evaluate.\n * @param criteria The criteria to evaluate the runs against. It can be:\n * - a mapping of a criterion name to its description\n * - a single criterion name present in one of the default criteria\n * - a single `ConstitutionalPrinciple` instance\n *\n * @return A dictionary mapping criterion names to descriptions.\n */\n static resolveCriteria(criteria?: CriteriaLike): Record<string, string>;\n /**\n * Resolve the prompt to use for the evaluation.\n * @param prompt\n */\n static resolvePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the CriteriaEvalChain.\n * @param llm\n * @param criteria\n * @param chainOptions Options to pass to the constructor of the LLMChain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<CriteriaEvalChain>;\n getEvalInput({ input, prediction, reference }: StringEvaluatorArgs): CriteriaEvalInput;\n /**\n * Prepare the output of the evaluation.\n * @param result\n */\n _prepareOutput(result: ChainValues): any;\n _evaluateStrings(args: StringEvaluatorArgs & ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * Criteria evaluation chain that requires references.\n */\nexport declare class LabeledCriteriaEvalChain extends CriteriaEvalChain {\n static lc_name(): string;\n // Whether the evaluation requires a reference text.\n requiresReference: boolean;\n static resolvePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWYc,KAAAA,QAAAA,GAAQ,aAAA,GAAA,WAAA,GAAA,aAAA,GAAA,WAAA,GAAA,aAAA,GAAA,eAAA,GAAA,aAAA,GAAA,kBAAA,GAAA,UAAA,GAAA,aAAA,GAAA,eAAA,GAAA,OAAA,GAAA,YAAA,GAAA,QAAA;AACRC,KAAAA,YAAAA,GAAY;EAAA,CAAA,GAAA,EAAA,MAAA,CAAA,EAAA,MAAA;CAAA,GAEpBD,QAAAA,GAAWD,uBAAXC;;AAAkC;AAItC;AAA+C,cAA1BE,0BAAAA,SAAmCf,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAE7DJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEa,OAArEb,CAA6EI,cAA7EJ,CAAAA;;AAA6CE,UAEzDY,iBAAAA,CAFyDZ;EAAS,KAAuBE,CAAAA,EAAAA,MAAAA;EAAc,MAAtBS,EAAAA,MAAAA;EAAO,SAFjDhB,CAAAA,EAAAA,MAAAA;AAAmB;AAI1DiB,cAKIC,iBAAAA,SAA0BT,kBAAAA,CALb;EAKbS,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAiB;EAAA,aAAA,CAAA,EAAA,MAAA;EAAA,cAQAX,CAAAA,EAAAA,MAAAA;EAAc,aAAlCP,EAAAA,OAAAA;EAAmB,iBAUCc,EAAAA,OAAAA;EAAY,oBAAGK,EAAAA,MAAAA;EAAM;EAKP,YAAA,EAflCnB,mBAekC,CAfdO,cAec,CAAA;EAA8G;;;;;;;;;EAQ1I,OAAEkB,eAAAA,CAAAA,QAAAA,CAAAA,EAbYX,YAaZW,CAAAA,EAb2BN,MAa3BM,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAU;;;;EAKE,OACXf,aAAAA,CAAAA,MAAAA,CAAAA,EAdON,kBAcPM,CAAAA,EAd4BN,kBAc5BM,CAAAA,GAAAA,EAdyB,wBAAA,EAczBA,GAAAA,CAAAA;EAAmB;;;;;;EArCmB,OAAA,OAAA,CAAA,GAAA,EA8BzCX,0BA9ByC,EAAA,QAAA,CAAA,EA8BFe,YA9BE,EAAA,YAAA,CAAA,EA8B2BS,OA9B3B,CA8BmCD,IA9BnC,CA8BwCd,iBA9BxC,EAAA,KAAA,CAAA,CAAA,CAAA,EA8BqEQ,OA9BrE,CA8B6EE,iBA9B7E,CAAA;EA0C5CS,YAAAA,CAAAA;IAAAA,KAAAA;IAAAA,UAAwB;IAAA;EAAA,CAAA,EAXMjB,mBAWN,CAAA,EAX4BO,iBAW5B;EAAA;;;;EAI4B,cAJnBC,CAAAA,MAAAA,EAN3BjB,WAM2BiB,CAAAA,EAAAA,GAAAA;EAAiB,gBAAA,CAAA,IAAA,EAL5CR,mBAK4C,GALtBC,qBAKsB,CAAA,IAAA,CAAA,KAAA,CAAA,CAAA,EAAA,MAAA,CAAA,EALuBN,SAKvB,GALmCC,kBAKnC,CAAA,EALwDU,OAKxD,CALgEf,WAKhE,CAAA;;;;;cAAlD0B,wBAAAA,SAAiCT,iBAAAA;;;;gCAIpBd,qBAAqBA,wBAAH,wBAAA"}
1
+ {"version":3,"file":"criteria.d.ts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMStringEvaluator","StringEvaluatorArgs","ExtractLLMCallOptions","ConstitutionalPrinciple","Criteria","CriteriaLike","CriteriaResultOutputParser","Promise","CriteriaEvalInput","CriteriaEvalChain","Record","____________langchain_core_dist_prompt_values_js0","BasePromptValueInterface","Omit","Partial","input","prediction","reference","LabeledCriteriaEvalChain"],"sources":["../../../src/evaluation/criteria/criteria.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMStringEvaluator, StringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { ConstitutionalPrinciple } from \"../../chains/constitutional_ai/constitutional_principle.js\";\n/**\n * A Criteria to evaluate.\n */\nexport type Criteria = \"conciseness\" | \"relevance\" | \"correctness\" | \"coherence\" | \"harmfulness\" | \"maliciousness\" | \"helpfulness\" | \"controversiality\" | \"misogyny\" | \"criminality\" | \"insensitivity\" | \"depth\" | \"creativity\" | \"detail\";\nexport type CriteriaLike = {\n [key: string]: string;\n} | Criteria | ConstitutionalPrinciple;\n/**\n * A parser for the output of the CriteriaEvalChain.\n */\nexport declare class CriteriaResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\nexport interface CriteriaEvalInput {\n input?: string;\n output: string;\n reference?: string;\n}\nexport declare class CriteriaEvalChain extends LLMStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n // The output parser to use for the evaluation chain.\n outputParser: BaseLLMOutputParser<EvalOutputType>;\n /**\n * Resolve the criteria to evaluate.\n * @param criteria The criteria to evaluate the runs against. It can be:\n * - a mapping of a criterion name to its description\n * - a single criterion name present in one of the default criteria\n * - a single `ConstitutionalPrinciple` instance\n *\n * @return A dictionary mapping criterion names to descriptions.\n */\n static resolveCriteria(criteria?: CriteriaLike): Record<string, string>;\n /**\n * Resolve the prompt to use for the evaluation.\n * @param prompt\n */\n static resolvePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the CriteriaEvalChain.\n * @param llm\n * @param criteria\n * @param chainOptions Options to pass to the constructor of the LLMChain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<CriteriaEvalChain>;\n getEvalInput({ input, prediction, reference }: StringEvaluatorArgs): CriteriaEvalInput;\n /**\n * Prepare the output of the evaluation.\n * @param result\n */\n _prepareOutput(result: ChainValues): any;\n _evaluateStrings(args: StringEvaluatorArgs & ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * Criteria evaluation chain that requires references.\n */\nexport declare class LabeledCriteriaEvalChain extends CriteriaEvalChain {\n static lc_name(): string;\n // Whether the evaluation requires a reference text.\n requiresReference: boolean;\n static resolvePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWYc,KAAAA,QAAAA,GAAQ,aAAA,GAAA,WAAA,GAAA,aAAA,GAAA,WAAA,GAAA,aAAA,GAAA,eAAA,GAAA,aAAA,GAAA,kBAAA,GAAA,UAAA,GAAA,aAAA,GAAA,eAAA,GAAA,OAAA,GAAA,YAAA,GAAA,QAAA;AACRC,KAAAA,YAAAA,GAAY;EAAA,CAAA,GAAA,EAAA,MAAA,CAAA,EAAA,MAAA;CAAA,GAEpBD,QAAAA,GAAWD,uBAAXC;;AAAkC;AAItC;AAA+C,cAA1BE,0BAAAA,SAAmCf,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAE7DJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEa,OAArEb,CAA6EI,cAA7EJ,CAAAA;;AAA6CE,UAEzDY,iBAAAA,CAFyDZ;EAAS,KAAuBE,CAAAA,EAAAA,MAAAA;EAAc,MAAtBS,EAAAA,MAAAA;EAAO,SAFjDhB,CAAAA,EAAAA,MAAAA;AAAmB;AAI1DiB,cAKIC,iBAAAA,SAA0BT,kBAAAA,CALb;EAKbS,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAiB;EAAA,aAAA,CAAA,EAAA,MAAA;EAAA,cAQAX,CAAAA,EAAAA,MAAAA;EAAc,aAAlCP,EAAAA,OAAAA;EAAmB,iBAUCc,EAAAA,OAAAA;EAAY,oBAAGK,EAAAA,MAAAA;EAAM;EAKP,YAAA,EAflCnB,mBAekC,CAfdO,cAec,CAAA;EAA8G;;;;;;;;;EAQ1I,OAAEkB,eAAAA,CAAAA,QAAAA,CAAAA,EAbYX,YAaZW,CAAAA,EAb2BN,MAa3BM,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAU;;;;EAKE,OACXf,aAAAA,CAAAA,MAAAA,CAAAA,EAdON,kBAcPM,CAAAA,EAd4BN,kBAc5BM,CAAAA,GAAAA,EAdyB,wBAAA,EAczBA,GAAAA,CAAAA;EAAmB;;;;;;EArCmB,OAAA,OAAA,CAAA,GAAA,EA8BzCX,0BA9ByC,EAAA,QAAA,CAAA,EA8BFe,YA9BE,EAAA,YAAA,CAAA,EA8B2BS,OA9B3B,CA8BmCD,IA9BnC,CA8BwCd,iBA9BxC,EAAA,KAAA,CAAA,CAAA,CAAA,EA8BqEQ,OA9BrE,CA8B6EE,iBA9B7E,CAAA;EA0C5CS,YAAAA,CAAAA;IAAAA,KAAAA;IAAAA,UAAwB;IAAA;EAAA,CAAA,EAXMjB,mBAWN,CAAA,EAX4BO,iBAW5B;EAAA;;;;EAI4B,cAJnBC,CAAAA,MAAAA,EAN3BjB,WAM2BiB,CAAAA,EAAAA,GAAAA;EAAiB,gBAAA,CAAA,IAAA,EAL5CR,mBAK4C,GALtBC,qBAKsB,CAAA,IAAA,CAAA,KAAA,CAAA,CAAA,EAAA,MAAA,CAAA,EALuBN,SAKvB,GALmCC,kBAKnC,CAAA,EALwDU,OAKxD,CALgEf,WAKhE,CAAA;;;;;cAAlD0B,wBAAAA,SAAiCT,iBAAAA;;;;gCAIpBd,qBAAqBA,wBAAH,wBAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"base.cjs","names":["repoFullName: string","runnable: Runnable","options?: {\n apiKey?: string;\n apiUrl?: string;\n parentCommitHash?: string;\n /** @deprecated Use isPublic instead. */\n newRepoIsPublic?: boolean;\n isPublic?: boolean;\n /** @deprecated Use description instead. */\n newRepoDescription?: string;\n description?: string;\n readme?: string;\n tags?: string[];\n }","ownerRepoCommit: string","options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean }","varName: string","message: any","modelClass?: new (...args: any[]) => BaseLanguageModel","modelImportMap: Record<string, any>","optionalImportMap: Record<string, any>","loadedSequence: T"],"sources":["../../src/hub/base.ts"],"sourcesContent":["import type { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport type { Runnable } from \"@langchain/core/runnables\";\nimport type { Client, ClientConfig } from \"langsmith\";\nimport type { PromptCommit } from \"langsmith/schemas\";\n\n/**\n * Push a prompt to the hub.\n * If the specified repo doesn't already exist, it will be created.\n * @param repoFullName The full name of the repo.\n * @param runnable The prompt to push.\n * @param options\n * @returns The URL of the newly pushed prompt in the hub.\n */\nexport async function basePush(\n repoFullName: string,\n runnable: Runnable,\n options?: {\n apiKey?: string;\n apiUrl?: string;\n parentCommitHash?: string;\n /** @deprecated Use isPublic instead. */\n newRepoIsPublic?: boolean;\n isPublic?: boolean;\n /** @deprecated Use description instead. */\n newRepoDescription?: string;\n description?: string;\n readme?: string;\n tags?: string[];\n }\n): Promise<string> {\n const Client = await loadLangSmith();\n const client = new Client(options);\n const payloadOptions = {\n object: runnable,\n parentCommitHash: options?.parentCommitHash,\n isPublic: options?.isPublic ?? options?.newRepoIsPublic,\n description: options?.description ?? options?.newRepoDescription,\n readme: options?.readme,\n tags: options?.tags,\n };\n return client.pushPrompt(repoFullName, payloadOptions);\n}\n\nexport async function basePull(\n ownerRepoCommit: string,\n options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean }\n): Promise<PromptCommit> {\n const Client = await loadLangSmith();\n const client = new Client(options);\n\n const promptObject = await client.pullPromptCommit(ownerRepoCommit, {\n includeModel: options?.includeModel,\n });\n\n if (promptObject.manifest.kwargs?.metadata === undefined) {\n promptObject.manifest.kwargs = {\n ...promptObject.manifest.kwargs,\n metadata: {},\n };\n }\n\n promptObject.manifest.kwargs.metadata = {\n ...promptObject.manifest.kwargs.metadata,\n lc_hub_owner: promptObject.owner,\n lc_hub_repo: promptObject.repo,\n lc_hub_commit_hash: promptObject.commit_hash,\n };\n\n // Some nested mustache prompts have improperly parsed variables that include a dot.\n if (promptObject.manifest.kwargs.template_format === \"mustache\") {\n const stripDotNotation = (varName: string) => varName.split(\".\")[0];\n\n const { input_variables } = promptObject.manifest.kwargs;\n if (Array.isArray(input_variables)) {\n promptObject.manifest.kwargs.input_variables =\n input_variables.map(stripDotNotation);\n }\n\n const { messages } = promptObject.manifest.kwargs;\n if (Array.isArray(messages)) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n promptObject.manifest.kwargs.messages = messages.map((message: any) => {\n const nestedVars = message?.kwargs?.prompt?.kwargs?.input_variables;\n if (Array.isArray(nestedVars)) {\n // eslint-disable-next-line no-param-reassign\n message.kwargs.prompt.kwargs.input_variables =\n nestedVars.map(stripDotNotation);\n }\n return message;\n });\n }\n }\n return promptObject;\n}\n\nexport function generateModelImportMap(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel\n) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelImportMap: Record<string, any> = {};\n if (modelClass !== undefined) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelLcName = (modelClass as any)?.lc_name();\n let importMapKey;\n if (modelLcName === \"ChatOpenAI\") {\n importMapKey = \"chat_models__openai\";\n } else if (modelLcName === \"ChatAnthropic\") {\n importMapKey = \"chat_models__anthropic\";\n } else if (modelLcName === \"ChatAzureOpenAI\") {\n importMapKey = \"chat_models__openai\";\n } else if (modelLcName === \"ChatVertexAI\") {\n importMapKey = \"chat_models__vertexai\";\n } else if (modelLcName === \"ChatGoogleGenerativeAI\") {\n importMapKey = \"chat_models__google_genai\";\n } else if (modelLcName === \"ChatBedrockConverse\") {\n importMapKey = \"chat_models__chat_bedrock_converse\";\n } else if (modelLcName === \"ChatMistral\") {\n importMapKey = \"chat_models__mistralai\";\n } else if (modelLcName === \"ChatFireworks\") {\n importMapKey = \"chat_models__fireworks\";\n } else if (modelLcName === \"ChatGroq\") {\n importMapKey = \"chat_models__groq\";\n } else {\n throw new Error(\"Received unsupported model class when pulling prompt.\");\n }\n modelImportMap[importMapKey] = {\n ...modelImportMap[importMapKey],\n [modelLcName]: modelClass,\n };\n }\n return modelImportMap;\n}\n\nexport function generateOptionalImportMap(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel\n) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const optionalImportMap: Record<string, any> = {};\n if (modelClass !== undefined) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelLcName = (modelClass as any)?.lc_name();\n let optionalImportMapKey;\n if (modelLcName === \"ChatGoogleGenerativeAI\") {\n optionalImportMapKey = \"langchain_google_genai/chat_models\";\n } else if (modelLcName === \"ChatBedrockConverse\") {\n optionalImportMapKey = \"langchain_aws/chat_models\";\n } else if (modelLcName === \"ChatGroq\") {\n optionalImportMapKey = \"langchain_groq/chat_models\";\n }\n if (optionalImportMapKey !== undefined) {\n optionalImportMap[optionalImportMapKey] = {\n [modelLcName]: modelClass,\n };\n }\n }\n return optionalImportMap;\n}\n\nexport function bindOutputSchema<T extends Runnable>(loadedSequence: T) {\n if (\n \"first\" in loadedSequence &&\n loadedSequence.first !== null &&\n typeof loadedSequence.first === \"object\" &&\n \"schema\" in loadedSequence.first &&\n \"last\" in loadedSequence &&\n loadedSequence.last !== null &&\n typeof loadedSequence.last === \"object\" &&\n \"bound\" in loadedSequence.last &&\n loadedSequence.last.bound !== null &&\n typeof loadedSequence.last.bound === \"object\" &&\n \"withStructuredOutput\" in loadedSequence.last.bound &&\n typeof loadedSequence.last.bound.withStructuredOutput === \"function\"\n ) {\n // eslint-disable-next-line no-param-reassign\n loadedSequence.last.bound = loadedSequence.last.bound.withStructuredOutput(\n loadedSequence.first.schema\n );\n }\n return loadedSequence;\n}\n\n/**\n * Dynamically load the LangSmith client.\n * @returns The LangSmith client.\n */\nasync function loadLangSmith(): Promise<new (config?: ClientConfig) => Client> {\n try {\n const { Client } = await import(\"langsmith\");\n return Client;\n } catch (error) {\n // eslint-disable-next-line no-instanceof/no-instanceof\n const errorMessage = error instanceof Error ? error.message : String(error);\n throw new Error(\n `Error loading \"langsmith\" package, install it via \\`npm install langsmith\\` before you use this function.\\nError: ${errorMessage}`\n );\n }\n}\n"],"mappings":";;;;;;;;;;AAaA,eAAsB,SACpBA,cACAC,UACAC,SAaiB;CACjB,MAAM,SAAS,MAAM,eAAe;CACpC,MAAM,SAAS,IAAI,OAAO;CAC1B,MAAM,iBAAiB;EACrB,QAAQ;EACR,kBAAkB,SAAS;EAC3B,UAAU,SAAS,YAAY,SAAS;EACxC,aAAa,SAAS,eAAe,SAAS;EAC9C,QAAQ,SAAS;EACjB,MAAM,SAAS;CAChB;AACD,QAAO,OAAO,WAAW,cAAc,eAAe;AACvD;AAED,eAAsB,SACpBC,iBACAC,SACuB;CACvB,MAAM,SAAS,MAAM,eAAe;CACpC,MAAM,SAAS,IAAI,OAAO;CAE1B,MAAM,eAAe,MAAM,OAAO,iBAAiB,iBAAiB,EAClE,cAAc,SAAS,aACxB,EAAC;AAEF,KAAI,aAAa,SAAS,QAAQ,aAAa,QAC7C,aAAa,SAAS,SAAS;EAC7B,GAAG,aAAa,SAAS;EACzB,UAAU,CAAE;CACb;CAGH,aAAa,SAAS,OAAO,WAAW;EACtC,GAAG,aAAa,SAAS,OAAO;EAChC,cAAc,aAAa;EAC3B,aAAa,aAAa;EAC1B,oBAAoB,aAAa;CAClC;AAGD,KAAI,aAAa,SAAS,OAAO,oBAAoB,YAAY;EAC/D,MAAM,mBAAmB,CAACC,YAAoB,QAAQ,MAAM,IAAI,CAAC;EAEjE,MAAM,EAAE,iBAAiB,GAAG,aAAa,SAAS;AAClD,MAAI,MAAM,QAAQ,gBAAgB,EAChC,aAAa,SAAS,OAAO,kBAC3B,gBAAgB,IAAI,iBAAiB;EAGzC,MAAM,EAAE,UAAU,GAAG,aAAa,SAAS;AAC3C,MAAI,MAAM,QAAQ,SAAS,EAEzB,aAAa,SAAS,OAAO,WAAW,SAAS,IAAI,CAACC,YAAiB;GACrE,MAAM,aAAa,SAAS,QAAQ,QAAQ,QAAQ;AACpD,OAAI,MAAM,QAAQ,WAAW,EAE3B,QAAQ,OAAO,OAAO,OAAO,kBAC3B,WAAW,IAAI,iBAAiB;AAEpC,UAAO;EACR,EAAC;CAEL;AACD,QAAO;AACR;AAED,SAAgB,uBAEdC,YACA;CAEA,MAAMC,iBAAsC,CAAE;AAC9C,KAAI,eAAe,QAAW;EAE5B,MAAM,cAAe,YAAoB,SAAS;EAClD,IAAI;AACJ,MAAI,gBAAgB,cAClB,eAAe;WACN,gBAAgB,iBACzB,eAAe;WACN,gBAAgB,mBACzB,eAAe;WACN,gBAAgB,gBACzB,eAAe;WACN,gBAAgB,0BACzB,eAAe;WACN,gBAAgB,uBACzB,eAAe;WACN,gBAAgB,eACzB,eAAe;WACN,gBAAgB,iBACzB,eAAe;WACN,gBAAgB,YACzB,eAAe;MAEf,OAAM,IAAI,MAAM;EAElB,eAAe,gBAAgB;GAC7B,GAAG,eAAe;IACjB,cAAc;EAChB;CACF;AACD,QAAO;AACR;AAED,SAAgB,0BAEdD,YACA;CAEA,MAAME,oBAAyC,CAAE;AACjD,KAAI,eAAe,QAAW;EAE5B,MAAM,cAAe,YAAoB,SAAS;EAClD,IAAI;AACJ,MAAI,gBAAgB,0BAClB,uBAAuB;WACd,gBAAgB,uBACzB,uBAAuB;WACd,gBAAgB,YACzB,uBAAuB;AAEzB,MAAI,yBAAyB,QAC3B,kBAAkB,wBAAwB,GACvC,cAAc,WAChB;CAEJ;AACD,QAAO;AACR;AAED,SAAgB,iBAAqCC,gBAAmB;AACtE,KACE,WAAW,kBACX,eAAe,UAAU,QACzB,OAAO,eAAe,UAAU,YAChC,YAAY,eAAe,SAC3B,UAAU,kBACV,eAAe,SAAS,QACxB,OAAO,eAAe,SAAS,YAC/B,WAAW,eAAe,QAC1B,eAAe,KAAK,UAAU,QAC9B,OAAO,eAAe,KAAK,UAAU,YACrC,0BAA0B,eAAe,KAAK,SAC9C,OAAO,eAAe,KAAK,MAAM,yBAAyB,YAG1D,eAAe,KAAK,QAAQ,eAAe,KAAK,MAAM,qBACpD,eAAe,MAAM,OACtB;AAEH,QAAO;AACR;;;;;AAMD,eAAe,gBAAgE;AAC7E,KAAI;EACF,MAAM,EAAE,QAAQ,GAAG,2CAAM;AACzB,SAAO;CACR,SAAQ,OAAO;EAEd,MAAM,eAAe,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;AAC3E,QAAM,IAAI,MACR,CAAC,kHAAkH,EAAE,cAAc;CAEtI;AACF"}
1
+ {"version":3,"file":"base.cjs","names":["repoFullName: string","runnable: Runnable","options?: {\n apiKey?: string;\n apiUrl?: string;\n parentCommitHash?: string;\n /** @deprecated Use isPublic instead. */\n newRepoIsPublic?: boolean;\n isPublic?: boolean;\n /** @deprecated Use description instead. */\n newRepoDescription?: string;\n description?: string;\n readme?: string;\n tags?: string[];\n }","ownerRepoCommit: string","options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean }","varName: string","message: any","modelClass?: new (...args: any[]) => BaseLanguageModel","modelImportMap: Record<string, any>","optionalImportMap: Record<string, any>","loadedSequence: T"],"sources":["../../src/hub/base.ts"],"sourcesContent":["import type { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport type { Runnable } from \"@langchain/core/runnables\";\n\nimport type { Client, ClientConfig } from \"langsmith\";\nimport type { PromptCommit } from \"langsmith/schemas\";\n\n/**\n * Push a prompt to the hub.\n * If the specified repo doesn't already exist, it will be created.\n * @param repoFullName The full name of the repo.\n * @param runnable The prompt to push.\n * @param options\n * @returns The URL of the newly pushed prompt in the hub.\n */\nexport async function basePush(\n repoFullName: string,\n runnable: Runnable,\n options?: {\n apiKey?: string;\n apiUrl?: string;\n parentCommitHash?: string;\n /** @deprecated Use isPublic instead. */\n newRepoIsPublic?: boolean;\n isPublic?: boolean;\n /** @deprecated Use description instead. */\n newRepoDescription?: string;\n description?: string;\n readme?: string;\n tags?: string[];\n }\n): Promise<string> {\n const Client = await loadLangSmith();\n const client = new Client(options);\n const payloadOptions = {\n object: runnable,\n parentCommitHash: options?.parentCommitHash,\n isPublic: options?.isPublic ?? options?.newRepoIsPublic,\n description: options?.description ?? options?.newRepoDescription,\n readme: options?.readme,\n tags: options?.tags,\n };\n return client.pushPrompt(repoFullName, payloadOptions);\n}\n\nexport async function basePull(\n ownerRepoCommit: string,\n options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean }\n): Promise<PromptCommit> {\n const Client = await loadLangSmith();\n const client = new Client(options);\n\n const promptObject = await client.pullPromptCommit(ownerRepoCommit, {\n includeModel: options?.includeModel,\n });\n\n if (promptObject.manifest.kwargs?.metadata === undefined) {\n promptObject.manifest.kwargs = {\n ...promptObject.manifest.kwargs,\n metadata: {},\n };\n }\n\n promptObject.manifest.kwargs.metadata = {\n ...promptObject.manifest.kwargs.metadata,\n lc_hub_owner: promptObject.owner,\n lc_hub_repo: promptObject.repo,\n lc_hub_commit_hash: promptObject.commit_hash,\n };\n\n // Some nested mustache prompts have improperly parsed variables that include a dot.\n if (promptObject.manifest.kwargs.template_format === \"mustache\") {\n const stripDotNotation = (varName: string) => varName.split(\".\")[0];\n\n const { input_variables } = promptObject.manifest.kwargs;\n if (Array.isArray(input_variables)) {\n promptObject.manifest.kwargs.input_variables =\n input_variables.map(stripDotNotation);\n }\n\n const { messages } = promptObject.manifest.kwargs;\n if (Array.isArray(messages)) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n promptObject.manifest.kwargs.messages = messages.map((message: any) => {\n const nestedVars = message?.kwargs?.prompt?.kwargs?.input_variables;\n if (Array.isArray(nestedVars)) {\n // eslint-disable-next-line no-param-reassign\n message.kwargs.prompt.kwargs.input_variables =\n nestedVars.map(stripDotNotation);\n }\n return message;\n });\n }\n }\n return promptObject;\n}\n\nexport function generateModelImportMap(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel\n) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelImportMap: Record<string, any> = {};\n if (modelClass !== undefined) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelLcName = (modelClass as any)?.lc_name();\n let importMapKey;\n if (modelLcName === \"ChatOpenAI\") {\n importMapKey = \"chat_models__openai\";\n } else if (modelLcName === \"ChatAnthropic\") {\n importMapKey = \"chat_models__anthropic\";\n } else if (modelLcName === \"ChatAzureOpenAI\") {\n importMapKey = \"chat_models__openai\";\n } else if (modelLcName === \"ChatVertexAI\") {\n importMapKey = \"chat_models__vertexai\";\n } else if (modelLcName === \"ChatGoogleGenerativeAI\") {\n importMapKey = \"chat_models__google_genai\";\n } else if (modelLcName === \"ChatBedrockConverse\") {\n importMapKey = \"chat_models__chat_bedrock_converse\";\n } else if (modelLcName === \"ChatMistral\") {\n importMapKey = \"chat_models__mistralai\";\n } else if (modelLcName === \"ChatFireworks\") {\n importMapKey = \"chat_models__fireworks\";\n } else if (modelLcName === \"ChatGroq\") {\n importMapKey = \"chat_models__groq\";\n } else {\n throw new Error(\"Received unsupported model class when pulling prompt.\");\n }\n modelImportMap[importMapKey] = {\n ...modelImportMap[importMapKey],\n [modelLcName]: modelClass,\n };\n }\n return modelImportMap;\n}\n\nexport function generateOptionalImportMap(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel\n) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const optionalImportMap: Record<string, any> = {};\n if (modelClass !== undefined) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelLcName = (modelClass as any)?.lc_name();\n let optionalImportMapKey;\n if (modelLcName === \"ChatGoogleGenerativeAI\") {\n optionalImportMapKey = \"langchain_google_genai/chat_models\";\n } else if (modelLcName === \"ChatBedrockConverse\") {\n optionalImportMapKey = \"langchain_aws/chat_models\";\n } else if (modelLcName === \"ChatGroq\") {\n optionalImportMapKey = \"langchain_groq/chat_models\";\n }\n if (optionalImportMapKey !== undefined) {\n optionalImportMap[optionalImportMapKey] = {\n [modelLcName]: modelClass,\n };\n }\n }\n return optionalImportMap;\n}\n\nexport function bindOutputSchema<T extends Runnable>(loadedSequence: T) {\n if (\n \"first\" in loadedSequence &&\n loadedSequence.first !== null &&\n typeof loadedSequence.first === \"object\" &&\n \"schema\" in loadedSequence.first &&\n \"last\" in loadedSequence &&\n loadedSequence.last !== null &&\n typeof loadedSequence.last === \"object\" &&\n \"bound\" in loadedSequence.last &&\n loadedSequence.last.bound !== null &&\n typeof loadedSequence.last.bound === \"object\" &&\n \"withStructuredOutput\" in loadedSequence.last.bound &&\n typeof loadedSequence.last.bound.withStructuredOutput === \"function\"\n ) {\n // eslint-disable-next-line no-param-reassign\n loadedSequence.last.bound = loadedSequence.last.bound.withStructuredOutput(\n loadedSequence.first.schema\n );\n }\n return loadedSequence;\n}\n\n/**\n * Dynamically load the LangSmith client.\n * @returns The LangSmith client.\n */\nasync function loadLangSmith(): Promise<new (config?: ClientConfig) => Client> {\n try {\n const { Client } = await import(\"langsmith\");\n return Client;\n } catch (error) {\n // eslint-disable-next-line no-instanceof/no-instanceof\n const errorMessage = error instanceof Error ? error.message : String(error);\n throw new Error(\n `Error loading \"langsmith\" package, install it via \\`npm install langsmith\\` before you use this function.\\nError: ${errorMessage}`\n );\n }\n}\n"],"mappings":";;;;;;;;;;AAcA,eAAsB,SACpBA,cACAC,UACAC,SAaiB;CACjB,MAAM,SAAS,MAAM,eAAe;CACpC,MAAM,SAAS,IAAI,OAAO;CAC1B,MAAM,iBAAiB;EACrB,QAAQ;EACR,kBAAkB,SAAS;EAC3B,UAAU,SAAS,YAAY,SAAS;EACxC,aAAa,SAAS,eAAe,SAAS;EAC9C,QAAQ,SAAS;EACjB,MAAM,SAAS;CAChB;AACD,QAAO,OAAO,WAAW,cAAc,eAAe;AACvD;AAED,eAAsB,SACpBC,iBACAC,SACuB;CACvB,MAAM,SAAS,MAAM,eAAe;CACpC,MAAM,SAAS,IAAI,OAAO;CAE1B,MAAM,eAAe,MAAM,OAAO,iBAAiB,iBAAiB,EAClE,cAAc,SAAS,aACxB,EAAC;AAEF,KAAI,aAAa,SAAS,QAAQ,aAAa,QAC7C,aAAa,SAAS,SAAS;EAC7B,GAAG,aAAa,SAAS;EACzB,UAAU,CAAE;CACb;CAGH,aAAa,SAAS,OAAO,WAAW;EACtC,GAAG,aAAa,SAAS,OAAO;EAChC,cAAc,aAAa;EAC3B,aAAa,aAAa;EAC1B,oBAAoB,aAAa;CAClC;AAGD,KAAI,aAAa,SAAS,OAAO,oBAAoB,YAAY;EAC/D,MAAM,mBAAmB,CAACC,YAAoB,QAAQ,MAAM,IAAI,CAAC;EAEjE,MAAM,EAAE,iBAAiB,GAAG,aAAa,SAAS;AAClD,MAAI,MAAM,QAAQ,gBAAgB,EAChC,aAAa,SAAS,OAAO,kBAC3B,gBAAgB,IAAI,iBAAiB;EAGzC,MAAM,EAAE,UAAU,GAAG,aAAa,SAAS;AAC3C,MAAI,MAAM,QAAQ,SAAS,EAEzB,aAAa,SAAS,OAAO,WAAW,SAAS,IAAI,CAACC,YAAiB;GACrE,MAAM,aAAa,SAAS,QAAQ,QAAQ,QAAQ;AACpD,OAAI,MAAM,QAAQ,WAAW,EAE3B,QAAQ,OAAO,OAAO,OAAO,kBAC3B,WAAW,IAAI,iBAAiB;AAEpC,UAAO;EACR,EAAC;CAEL;AACD,QAAO;AACR;AAED,SAAgB,uBAEdC,YACA;CAEA,MAAMC,iBAAsC,CAAE;AAC9C,KAAI,eAAe,QAAW;EAE5B,MAAM,cAAe,YAAoB,SAAS;EAClD,IAAI;AACJ,MAAI,gBAAgB,cAClB,eAAe;WACN,gBAAgB,iBACzB,eAAe;WACN,gBAAgB,mBACzB,eAAe;WACN,gBAAgB,gBACzB,eAAe;WACN,gBAAgB,0BACzB,eAAe;WACN,gBAAgB,uBACzB,eAAe;WACN,gBAAgB,eACzB,eAAe;WACN,gBAAgB,iBACzB,eAAe;WACN,gBAAgB,YACzB,eAAe;MAEf,OAAM,IAAI,MAAM;EAElB,eAAe,gBAAgB;GAC7B,GAAG,eAAe;IACjB,cAAc;EAChB;CACF;AACD,QAAO;AACR;AAED,SAAgB,0BAEdD,YACA;CAEA,MAAME,oBAAyC,CAAE;AACjD,KAAI,eAAe,QAAW;EAE5B,MAAM,cAAe,YAAoB,SAAS;EAClD,IAAI;AACJ,MAAI,gBAAgB,0BAClB,uBAAuB;WACd,gBAAgB,uBACzB,uBAAuB;WACd,gBAAgB,YACzB,uBAAuB;AAEzB,MAAI,yBAAyB,QAC3B,kBAAkB,wBAAwB,GACvC,cAAc,WAChB;CAEJ;AACD,QAAO;AACR;AAED,SAAgB,iBAAqCC,gBAAmB;AACtE,KACE,WAAW,kBACX,eAAe,UAAU,QACzB,OAAO,eAAe,UAAU,YAChC,YAAY,eAAe,SAC3B,UAAU,kBACV,eAAe,SAAS,QACxB,OAAO,eAAe,SAAS,YAC/B,WAAW,eAAe,QAC1B,eAAe,KAAK,UAAU,QAC9B,OAAO,eAAe,KAAK,UAAU,YACrC,0BAA0B,eAAe,KAAK,SAC9C,OAAO,eAAe,KAAK,MAAM,yBAAyB,YAG1D,eAAe,KAAK,QAAQ,eAAe,KAAK,MAAM,qBACpD,eAAe,MAAM,OACtB;AAEH,QAAO;AACR;;;;;AAMD,eAAe,gBAAgE;AAC7E,KAAI;EACF,MAAM,EAAE,QAAQ,GAAG,2CAAM;AACzB,SAAO;CACR,SAAQ,OAAO;EAEd,MAAM,eAAe,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;AAC3E,QAAM,IAAI,MACR,CAAC,kHAAkH,EAAE,cAAc;CAEtI;AACF"}
@@ -1 +1 @@
1
- {"version":3,"file":"base.js","names":["repoFullName: string","runnable: Runnable","options?: {\n apiKey?: string;\n apiUrl?: string;\n parentCommitHash?: string;\n /** @deprecated Use isPublic instead. */\n newRepoIsPublic?: boolean;\n isPublic?: boolean;\n /** @deprecated Use description instead. */\n newRepoDescription?: string;\n description?: string;\n readme?: string;\n tags?: string[];\n }","ownerRepoCommit: string","options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean }","varName: string","message: any","modelClass?: new (...args: any[]) => BaseLanguageModel","modelImportMap: Record<string, any>","optionalImportMap: Record<string, any>","loadedSequence: T"],"sources":["../../src/hub/base.ts"],"sourcesContent":["import type { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport type { Runnable } from \"@langchain/core/runnables\";\nimport type { Client, ClientConfig } from \"langsmith\";\nimport type { PromptCommit } from \"langsmith/schemas\";\n\n/**\n * Push a prompt to the hub.\n * If the specified repo doesn't already exist, it will be created.\n * @param repoFullName The full name of the repo.\n * @param runnable The prompt to push.\n * @param options\n * @returns The URL of the newly pushed prompt in the hub.\n */\nexport async function basePush(\n repoFullName: string,\n runnable: Runnable,\n options?: {\n apiKey?: string;\n apiUrl?: string;\n parentCommitHash?: string;\n /** @deprecated Use isPublic instead. */\n newRepoIsPublic?: boolean;\n isPublic?: boolean;\n /** @deprecated Use description instead. */\n newRepoDescription?: string;\n description?: string;\n readme?: string;\n tags?: string[];\n }\n): Promise<string> {\n const Client = await loadLangSmith();\n const client = new Client(options);\n const payloadOptions = {\n object: runnable,\n parentCommitHash: options?.parentCommitHash,\n isPublic: options?.isPublic ?? options?.newRepoIsPublic,\n description: options?.description ?? options?.newRepoDescription,\n readme: options?.readme,\n tags: options?.tags,\n };\n return client.pushPrompt(repoFullName, payloadOptions);\n}\n\nexport async function basePull(\n ownerRepoCommit: string,\n options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean }\n): Promise<PromptCommit> {\n const Client = await loadLangSmith();\n const client = new Client(options);\n\n const promptObject = await client.pullPromptCommit(ownerRepoCommit, {\n includeModel: options?.includeModel,\n });\n\n if (promptObject.manifest.kwargs?.metadata === undefined) {\n promptObject.manifest.kwargs = {\n ...promptObject.manifest.kwargs,\n metadata: {},\n };\n }\n\n promptObject.manifest.kwargs.metadata = {\n ...promptObject.manifest.kwargs.metadata,\n lc_hub_owner: promptObject.owner,\n lc_hub_repo: promptObject.repo,\n lc_hub_commit_hash: promptObject.commit_hash,\n };\n\n // Some nested mustache prompts have improperly parsed variables that include a dot.\n if (promptObject.manifest.kwargs.template_format === \"mustache\") {\n const stripDotNotation = (varName: string) => varName.split(\".\")[0];\n\n const { input_variables } = promptObject.manifest.kwargs;\n if (Array.isArray(input_variables)) {\n promptObject.manifest.kwargs.input_variables =\n input_variables.map(stripDotNotation);\n }\n\n const { messages } = promptObject.manifest.kwargs;\n if (Array.isArray(messages)) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n promptObject.manifest.kwargs.messages = messages.map((message: any) => {\n const nestedVars = message?.kwargs?.prompt?.kwargs?.input_variables;\n if (Array.isArray(nestedVars)) {\n // eslint-disable-next-line no-param-reassign\n message.kwargs.prompt.kwargs.input_variables =\n nestedVars.map(stripDotNotation);\n }\n return message;\n });\n }\n }\n return promptObject;\n}\n\nexport function generateModelImportMap(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel\n) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelImportMap: Record<string, any> = {};\n if (modelClass !== undefined) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelLcName = (modelClass as any)?.lc_name();\n let importMapKey;\n if (modelLcName === \"ChatOpenAI\") {\n importMapKey = \"chat_models__openai\";\n } else if (modelLcName === \"ChatAnthropic\") {\n importMapKey = \"chat_models__anthropic\";\n } else if (modelLcName === \"ChatAzureOpenAI\") {\n importMapKey = \"chat_models__openai\";\n } else if (modelLcName === \"ChatVertexAI\") {\n importMapKey = \"chat_models__vertexai\";\n } else if (modelLcName === \"ChatGoogleGenerativeAI\") {\n importMapKey = \"chat_models__google_genai\";\n } else if (modelLcName === \"ChatBedrockConverse\") {\n importMapKey = \"chat_models__chat_bedrock_converse\";\n } else if (modelLcName === \"ChatMistral\") {\n importMapKey = \"chat_models__mistralai\";\n } else if (modelLcName === \"ChatFireworks\") {\n importMapKey = \"chat_models__fireworks\";\n } else if (modelLcName === \"ChatGroq\") {\n importMapKey = \"chat_models__groq\";\n } else {\n throw new Error(\"Received unsupported model class when pulling prompt.\");\n }\n modelImportMap[importMapKey] = {\n ...modelImportMap[importMapKey],\n [modelLcName]: modelClass,\n };\n }\n return modelImportMap;\n}\n\nexport function generateOptionalImportMap(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel\n) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const optionalImportMap: Record<string, any> = {};\n if (modelClass !== undefined) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelLcName = (modelClass as any)?.lc_name();\n let optionalImportMapKey;\n if (modelLcName === \"ChatGoogleGenerativeAI\") {\n optionalImportMapKey = \"langchain_google_genai/chat_models\";\n } else if (modelLcName === \"ChatBedrockConverse\") {\n optionalImportMapKey = \"langchain_aws/chat_models\";\n } else if (modelLcName === \"ChatGroq\") {\n optionalImportMapKey = \"langchain_groq/chat_models\";\n }\n if (optionalImportMapKey !== undefined) {\n optionalImportMap[optionalImportMapKey] = {\n [modelLcName]: modelClass,\n };\n }\n }\n return optionalImportMap;\n}\n\nexport function bindOutputSchema<T extends Runnable>(loadedSequence: T) {\n if (\n \"first\" in loadedSequence &&\n loadedSequence.first !== null &&\n typeof loadedSequence.first === \"object\" &&\n \"schema\" in loadedSequence.first &&\n \"last\" in loadedSequence &&\n loadedSequence.last !== null &&\n typeof loadedSequence.last === \"object\" &&\n \"bound\" in loadedSequence.last &&\n loadedSequence.last.bound !== null &&\n typeof loadedSequence.last.bound === \"object\" &&\n \"withStructuredOutput\" in loadedSequence.last.bound &&\n typeof loadedSequence.last.bound.withStructuredOutput === \"function\"\n ) {\n // eslint-disable-next-line no-param-reassign\n loadedSequence.last.bound = loadedSequence.last.bound.withStructuredOutput(\n loadedSequence.first.schema\n );\n }\n return loadedSequence;\n}\n\n/**\n * Dynamically load the LangSmith client.\n * @returns The LangSmith client.\n */\nasync function loadLangSmith(): Promise<new (config?: ClientConfig) => Client> {\n try {\n const { Client } = await import(\"langsmith\");\n return Client;\n } catch (error) {\n // eslint-disable-next-line no-instanceof/no-instanceof\n const errorMessage = error instanceof Error ? error.message : String(error);\n throw new Error(\n `Error loading \"langsmith\" package, install it via \\`npm install langsmith\\` before you use this function.\\nError: ${errorMessage}`\n );\n }\n}\n"],"mappings":";;;;;;;;;AAaA,eAAsB,SACpBA,cACAC,UACAC,SAaiB;CACjB,MAAM,SAAS,MAAM,eAAe;CACpC,MAAM,SAAS,IAAI,OAAO;CAC1B,MAAM,iBAAiB;EACrB,QAAQ;EACR,kBAAkB,SAAS;EAC3B,UAAU,SAAS,YAAY,SAAS;EACxC,aAAa,SAAS,eAAe,SAAS;EAC9C,QAAQ,SAAS;EACjB,MAAM,SAAS;CAChB;AACD,QAAO,OAAO,WAAW,cAAc,eAAe;AACvD;AAED,eAAsB,SACpBC,iBACAC,SACuB;CACvB,MAAM,SAAS,MAAM,eAAe;CACpC,MAAM,SAAS,IAAI,OAAO;CAE1B,MAAM,eAAe,MAAM,OAAO,iBAAiB,iBAAiB,EAClE,cAAc,SAAS,aACxB,EAAC;AAEF,KAAI,aAAa,SAAS,QAAQ,aAAa,QAC7C,aAAa,SAAS,SAAS;EAC7B,GAAG,aAAa,SAAS;EACzB,UAAU,CAAE;CACb;CAGH,aAAa,SAAS,OAAO,WAAW;EACtC,GAAG,aAAa,SAAS,OAAO;EAChC,cAAc,aAAa;EAC3B,aAAa,aAAa;EAC1B,oBAAoB,aAAa;CAClC;AAGD,KAAI,aAAa,SAAS,OAAO,oBAAoB,YAAY;EAC/D,MAAM,mBAAmB,CAACC,YAAoB,QAAQ,MAAM,IAAI,CAAC;EAEjE,MAAM,EAAE,iBAAiB,GAAG,aAAa,SAAS;AAClD,MAAI,MAAM,QAAQ,gBAAgB,EAChC,aAAa,SAAS,OAAO,kBAC3B,gBAAgB,IAAI,iBAAiB;EAGzC,MAAM,EAAE,UAAU,GAAG,aAAa,SAAS;AAC3C,MAAI,MAAM,QAAQ,SAAS,EAEzB,aAAa,SAAS,OAAO,WAAW,SAAS,IAAI,CAACC,YAAiB;GACrE,MAAM,aAAa,SAAS,QAAQ,QAAQ,QAAQ;AACpD,OAAI,MAAM,QAAQ,WAAW,EAE3B,QAAQ,OAAO,OAAO,OAAO,kBAC3B,WAAW,IAAI,iBAAiB;AAEpC,UAAO;EACR,EAAC;CAEL;AACD,QAAO;AACR;AAED,SAAgB,uBAEdC,YACA;CAEA,MAAMC,iBAAsC,CAAE;AAC9C,KAAI,eAAe,QAAW;EAE5B,MAAM,cAAe,YAAoB,SAAS;EAClD,IAAI;AACJ,MAAI,gBAAgB,cAClB,eAAe;WACN,gBAAgB,iBACzB,eAAe;WACN,gBAAgB,mBACzB,eAAe;WACN,gBAAgB,gBACzB,eAAe;WACN,gBAAgB,0BACzB,eAAe;WACN,gBAAgB,uBACzB,eAAe;WACN,gBAAgB,eACzB,eAAe;WACN,gBAAgB,iBACzB,eAAe;WACN,gBAAgB,YACzB,eAAe;MAEf,OAAM,IAAI,MAAM;EAElB,eAAe,gBAAgB;GAC7B,GAAG,eAAe;IACjB,cAAc;EAChB;CACF;AACD,QAAO;AACR;AAED,SAAgB,0BAEdD,YACA;CAEA,MAAME,oBAAyC,CAAE;AACjD,KAAI,eAAe,QAAW;EAE5B,MAAM,cAAe,YAAoB,SAAS;EAClD,IAAI;AACJ,MAAI,gBAAgB,0BAClB,uBAAuB;WACd,gBAAgB,uBACzB,uBAAuB;WACd,gBAAgB,YACzB,uBAAuB;AAEzB,MAAI,yBAAyB,QAC3B,kBAAkB,wBAAwB,GACvC,cAAc,WAChB;CAEJ;AACD,QAAO;AACR;AAED,SAAgB,iBAAqCC,gBAAmB;AACtE,KACE,WAAW,kBACX,eAAe,UAAU,QACzB,OAAO,eAAe,UAAU,YAChC,YAAY,eAAe,SAC3B,UAAU,kBACV,eAAe,SAAS,QACxB,OAAO,eAAe,SAAS,YAC/B,WAAW,eAAe,QAC1B,eAAe,KAAK,UAAU,QAC9B,OAAO,eAAe,KAAK,UAAU,YACrC,0BAA0B,eAAe,KAAK,SAC9C,OAAO,eAAe,KAAK,MAAM,yBAAyB,YAG1D,eAAe,KAAK,QAAQ,eAAe,KAAK,MAAM,qBACpD,eAAe,MAAM,OACtB;AAEH,QAAO;AACR;;;;;AAMD,eAAe,gBAAgE;AAC7E,KAAI;EACF,MAAM,EAAE,QAAQ,GAAG,MAAM,OAAO;AAChC,SAAO;CACR,SAAQ,OAAO;EAEd,MAAM,eAAe,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;AAC3E,QAAM,IAAI,MACR,CAAC,kHAAkH,EAAE,cAAc;CAEtI;AACF"}
1
+ {"version":3,"file":"base.js","names":["repoFullName: string","runnable: Runnable","options?: {\n apiKey?: string;\n apiUrl?: string;\n parentCommitHash?: string;\n /** @deprecated Use isPublic instead. */\n newRepoIsPublic?: boolean;\n isPublic?: boolean;\n /** @deprecated Use description instead. */\n newRepoDescription?: string;\n description?: string;\n readme?: string;\n tags?: string[];\n }","ownerRepoCommit: string","options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean }","varName: string","message: any","modelClass?: new (...args: any[]) => BaseLanguageModel","modelImportMap: Record<string, any>","optionalImportMap: Record<string, any>","loadedSequence: T"],"sources":["../../src/hub/base.ts"],"sourcesContent":["import type { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport type { Runnable } from \"@langchain/core/runnables\";\n\nimport type { Client, ClientConfig } from \"langsmith\";\nimport type { PromptCommit } from \"langsmith/schemas\";\n\n/**\n * Push a prompt to the hub.\n * If the specified repo doesn't already exist, it will be created.\n * @param repoFullName The full name of the repo.\n * @param runnable The prompt to push.\n * @param options\n * @returns The URL of the newly pushed prompt in the hub.\n */\nexport async function basePush(\n repoFullName: string,\n runnable: Runnable,\n options?: {\n apiKey?: string;\n apiUrl?: string;\n parentCommitHash?: string;\n /** @deprecated Use isPublic instead. */\n newRepoIsPublic?: boolean;\n isPublic?: boolean;\n /** @deprecated Use description instead. */\n newRepoDescription?: string;\n description?: string;\n readme?: string;\n tags?: string[];\n }\n): Promise<string> {\n const Client = await loadLangSmith();\n const client = new Client(options);\n const payloadOptions = {\n object: runnable,\n parentCommitHash: options?.parentCommitHash,\n isPublic: options?.isPublic ?? options?.newRepoIsPublic,\n description: options?.description ?? options?.newRepoDescription,\n readme: options?.readme,\n tags: options?.tags,\n };\n return client.pushPrompt(repoFullName, payloadOptions);\n}\n\nexport async function basePull(\n ownerRepoCommit: string,\n options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean }\n): Promise<PromptCommit> {\n const Client = await loadLangSmith();\n const client = new Client(options);\n\n const promptObject = await client.pullPromptCommit(ownerRepoCommit, {\n includeModel: options?.includeModel,\n });\n\n if (promptObject.manifest.kwargs?.metadata === undefined) {\n promptObject.manifest.kwargs = {\n ...promptObject.manifest.kwargs,\n metadata: {},\n };\n }\n\n promptObject.manifest.kwargs.metadata = {\n ...promptObject.manifest.kwargs.metadata,\n lc_hub_owner: promptObject.owner,\n lc_hub_repo: promptObject.repo,\n lc_hub_commit_hash: promptObject.commit_hash,\n };\n\n // Some nested mustache prompts have improperly parsed variables that include a dot.\n if (promptObject.manifest.kwargs.template_format === \"mustache\") {\n const stripDotNotation = (varName: string) => varName.split(\".\")[0];\n\n const { input_variables } = promptObject.manifest.kwargs;\n if (Array.isArray(input_variables)) {\n promptObject.manifest.kwargs.input_variables =\n input_variables.map(stripDotNotation);\n }\n\n const { messages } = promptObject.manifest.kwargs;\n if (Array.isArray(messages)) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n promptObject.manifest.kwargs.messages = messages.map((message: any) => {\n const nestedVars = message?.kwargs?.prompt?.kwargs?.input_variables;\n if (Array.isArray(nestedVars)) {\n // eslint-disable-next-line no-param-reassign\n message.kwargs.prompt.kwargs.input_variables =\n nestedVars.map(stripDotNotation);\n }\n return message;\n });\n }\n }\n return promptObject;\n}\n\nexport function generateModelImportMap(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel\n) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelImportMap: Record<string, any> = {};\n if (modelClass !== undefined) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelLcName = (modelClass as any)?.lc_name();\n let importMapKey;\n if (modelLcName === \"ChatOpenAI\") {\n importMapKey = \"chat_models__openai\";\n } else if (modelLcName === \"ChatAnthropic\") {\n importMapKey = \"chat_models__anthropic\";\n } else if (modelLcName === \"ChatAzureOpenAI\") {\n importMapKey = \"chat_models__openai\";\n } else if (modelLcName === \"ChatVertexAI\") {\n importMapKey = \"chat_models__vertexai\";\n } else if (modelLcName === \"ChatGoogleGenerativeAI\") {\n importMapKey = \"chat_models__google_genai\";\n } else if (modelLcName === \"ChatBedrockConverse\") {\n importMapKey = \"chat_models__chat_bedrock_converse\";\n } else if (modelLcName === \"ChatMistral\") {\n importMapKey = \"chat_models__mistralai\";\n } else if (modelLcName === \"ChatFireworks\") {\n importMapKey = \"chat_models__fireworks\";\n } else if (modelLcName === \"ChatGroq\") {\n importMapKey = \"chat_models__groq\";\n } else {\n throw new Error(\"Received unsupported model class when pulling prompt.\");\n }\n modelImportMap[importMapKey] = {\n ...modelImportMap[importMapKey],\n [modelLcName]: modelClass,\n };\n }\n return modelImportMap;\n}\n\nexport function generateOptionalImportMap(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n modelClass?: new (...args: any[]) => BaseLanguageModel\n) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const optionalImportMap: Record<string, any> = {};\n if (modelClass !== undefined) {\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const modelLcName = (modelClass as any)?.lc_name();\n let optionalImportMapKey;\n if (modelLcName === \"ChatGoogleGenerativeAI\") {\n optionalImportMapKey = \"langchain_google_genai/chat_models\";\n } else if (modelLcName === \"ChatBedrockConverse\") {\n optionalImportMapKey = \"langchain_aws/chat_models\";\n } else if (modelLcName === \"ChatGroq\") {\n optionalImportMapKey = \"langchain_groq/chat_models\";\n }\n if (optionalImportMapKey !== undefined) {\n optionalImportMap[optionalImportMapKey] = {\n [modelLcName]: modelClass,\n };\n }\n }\n return optionalImportMap;\n}\n\nexport function bindOutputSchema<T extends Runnable>(loadedSequence: T) {\n if (\n \"first\" in loadedSequence &&\n loadedSequence.first !== null &&\n typeof loadedSequence.first === \"object\" &&\n \"schema\" in loadedSequence.first &&\n \"last\" in loadedSequence &&\n loadedSequence.last !== null &&\n typeof loadedSequence.last === \"object\" &&\n \"bound\" in loadedSequence.last &&\n loadedSequence.last.bound !== null &&\n typeof loadedSequence.last.bound === \"object\" &&\n \"withStructuredOutput\" in loadedSequence.last.bound &&\n typeof loadedSequence.last.bound.withStructuredOutput === \"function\"\n ) {\n // eslint-disable-next-line no-param-reassign\n loadedSequence.last.bound = loadedSequence.last.bound.withStructuredOutput(\n loadedSequence.first.schema\n );\n }\n return loadedSequence;\n}\n\n/**\n * Dynamically load the LangSmith client.\n * @returns The LangSmith client.\n */\nasync function loadLangSmith(): Promise<new (config?: ClientConfig) => Client> {\n try {\n const { Client } = await import(\"langsmith\");\n return Client;\n } catch (error) {\n // eslint-disable-next-line no-instanceof/no-instanceof\n const errorMessage = error instanceof Error ? error.message : String(error);\n throw new Error(\n `Error loading \"langsmith\" package, install it via \\`npm install langsmith\\` before you use this function.\\nError: ${errorMessage}`\n );\n }\n}\n"],"mappings":";;;;;;;;;AAcA,eAAsB,SACpBA,cACAC,UACAC,SAaiB;CACjB,MAAM,SAAS,MAAM,eAAe;CACpC,MAAM,SAAS,IAAI,OAAO;CAC1B,MAAM,iBAAiB;EACrB,QAAQ;EACR,kBAAkB,SAAS;EAC3B,UAAU,SAAS,YAAY,SAAS;EACxC,aAAa,SAAS,eAAe,SAAS;EAC9C,QAAQ,SAAS;EACjB,MAAM,SAAS;CAChB;AACD,QAAO,OAAO,WAAW,cAAc,eAAe;AACvD;AAED,eAAsB,SACpBC,iBACAC,SACuB;CACvB,MAAM,SAAS,MAAM,eAAe;CACpC,MAAM,SAAS,IAAI,OAAO;CAE1B,MAAM,eAAe,MAAM,OAAO,iBAAiB,iBAAiB,EAClE,cAAc,SAAS,aACxB,EAAC;AAEF,KAAI,aAAa,SAAS,QAAQ,aAAa,QAC7C,aAAa,SAAS,SAAS;EAC7B,GAAG,aAAa,SAAS;EACzB,UAAU,CAAE;CACb;CAGH,aAAa,SAAS,OAAO,WAAW;EACtC,GAAG,aAAa,SAAS,OAAO;EAChC,cAAc,aAAa;EAC3B,aAAa,aAAa;EAC1B,oBAAoB,aAAa;CAClC;AAGD,KAAI,aAAa,SAAS,OAAO,oBAAoB,YAAY;EAC/D,MAAM,mBAAmB,CAACC,YAAoB,QAAQ,MAAM,IAAI,CAAC;EAEjE,MAAM,EAAE,iBAAiB,GAAG,aAAa,SAAS;AAClD,MAAI,MAAM,QAAQ,gBAAgB,EAChC,aAAa,SAAS,OAAO,kBAC3B,gBAAgB,IAAI,iBAAiB;EAGzC,MAAM,EAAE,UAAU,GAAG,aAAa,SAAS;AAC3C,MAAI,MAAM,QAAQ,SAAS,EAEzB,aAAa,SAAS,OAAO,WAAW,SAAS,IAAI,CAACC,YAAiB;GACrE,MAAM,aAAa,SAAS,QAAQ,QAAQ,QAAQ;AACpD,OAAI,MAAM,QAAQ,WAAW,EAE3B,QAAQ,OAAO,OAAO,OAAO,kBAC3B,WAAW,IAAI,iBAAiB;AAEpC,UAAO;EACR,EAAC;CAEL;AACD,QAAO;AACR;AAED,SAAgB,uBAEdC,YACA;CAEA,MAAMC,iBAAsC,CAAE;AAC9C,KAAI,eAAe,QAAW;EAE5B,MAAM,cAAe,YAAoB,SAAS;EAClD,IAAI;AACJ,MAAI,gBAAgB,cAClB,eAAe;WACN,gBAAgB,iBACzB,eAAe;WACN,gBAAgB,mBACzB,eAAe;WACN,gBAAgB,gBACzB,eAAe;WACN,gBAAgB,0BACzB,eAAe;WACN,gBAAgB,uBACzB,eAAe;WACN,gBAAgB,eACzB,eAAe;WACN,gBAAgB,iBACzB,eAAe;WACN,gBAAgB,YACzB,eAAe;MAEf,OAAM,IAAI,MAAM;EAElB,eAAe,gBAAgB;GAC7B,GAAG,eAAe;IACjB,cAAc;EAChB;CACF;AACD,QAAO;AACR;AAED,SAAgB,0BAEdD,YACA;CAEA,MAAME,oBAAyC,CAAE;AACjD,KAAI,eAAe,QAAW;EAE5B,MAAM,cAAe,YAAoB,SAAS;EAClD,IAAI;AACJ,MAAI,gBAAgB,0BAClB,uBAAuB;WACd,gBAAgB,uBACzB,uBAAuB;WACd,gBAAgB,YACzB,uBAAuB;AAEzB,MAAI,yBAAyB,QAC3B,kBAAkB,wBAAwB,GACvC,cAAc,WAChB;CAEJ;AACD,QAAO;AACR;AAED,SAAgB,iBAAqCC,gBAAmB;AACtE,KACE,WAAW,kBACX,eAAe,UAAU,QACzB,OAAO,eAAe,UAAU,YAChC,YAAY,eAAe,SAC3B,UAAU,kBACV,eAAe,SAAS,QACxB,OAAO,eAAe,SAAS,YAC/B,WAAW,eAAe,QAC1B,eAAe,KAAK,UAAU,QAC9B,OAAO,eAAe,KAAK,UAAU,YACrC,0BAA0B,eAAe,KAAK,SAC9C,OAAO,eAAe,KAAK,MAAM,yBAAyB,YAG1D,eAAe,KAAK,QAAQ,eAAe,KAAK,MAAM,qBACpD,eAAe,MAAM,OACtB;AAEH,QAAO;AACR;;;;;AAMD,eAAe,gBAAgE;AAC7E,KAAI;EACF,MAAM,EAAE,QAAQ,GAAG,MAAM,OAAO;AAChC,SAAO;CACR,SAAQ,OAAO;EAEd,MAAM,eAAe,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;AAC3E,QAAM,IAAI,MACR,CAAC,kHAAkH,EAAE,cAAc;CAEtI;AACF"}
package/dist/index.cjs CHANGED
@@ -13,22 +13,29 @@ const __langchain_core_context = require_rolldown_runtime.__toESM(require("@lang
13
13
  var src_exports = {};
14
14
  require_rolldown_runtime.__export(src_exports, {
15
15
  AIMessage: () => __langchain_core_messages.AIMessage,
16
+ AIMessageChunk: () => __langchain_core_messages.AIMessageChunk,
16
17
  BaseMessage: () => __langchain_core_messages.BaseMessage,
18
+ BaseMessageChunk: () => __langchain_core_messages.BaseMessageChunk,
17
19
  Document: () => __langchain_core_documents.Document,
18
20
  DynamicStructuredTool: () => __langchain_core_tools.DynamicStructuredTool,
19
21
  HumanMessage: () => __langchain_core_messages.HumanMessage,
22
+ HumanMessageChunk: () => __langchain_core_messages.HumanMessageChunk,
20
23
  InMemoryStore: () => __langchain_langgraph.InMemoryStore,
21
24
  MemorySaver: () => __langchain_langgraph.MemorySaver,
22
25
  SystemMessage: () => __langchain_core_messages.SystemMessage,
26
+ SystemMessageChunk: () => __langchain_core_messages.SystemMessageChunk,
23
27
  ToolMessage: () => __langchain_core_messages.ToolMessage,
28
+ ToolMessageChunk: () => __langchain_core_messages.ToolMessageChunk,
24
29
  ToolNode: () => require_ToolNode.ToolNode,
25
30
  createReactAgent: () => require_index.createReactAgent,
31
+ filterMessages: () => __langchain_core_messages.filterMessages,
26
32
  getContextVariable: () => __langchain_core_context.getContextVariable,
27
33
  initChatModel: () => require_chat_models_universal.initChatModel,
28
34
  providerStrategy: () => require_responses.providerStrategy,
29
35
  setContextVariable: () => __langchain_core_context.setContextVariable,
30
36
  tool: () => __langchain_core_tools.tool,
31
- toolStrategy: () => require_responses.toolStrategy
37
+ toolStrategy: () => require_responses.toolStrategy,
38
+ trimMessages: () => __langchain_core_messages.trimMessages
32
39
  });
33
40
 
34
41
  //#endregion
@@ -38,12 +45,24 @@ Object.defineProperty(exports, 'AIMessage', {
38
45
  return __langchain_core_messages.AIMessage;
39
46
  }
40
47
  });
48
+ Object.defineProperty(exports, 'AIMessageChunk', {
49
+ enumerable: true,
50
+ get: function () {
51
+ return __langchain_core_messages.AIMessageChunk;
52
+ }
53
+ });
41
54
  Object.defineProperty(exports, 'BaseMessage', {
42
55
  enumerable: true,
43
56
  get: function () {
44
57
  return __langchain_core_messages.BaseMessage;
45
58
  }
46
59
  });
60
+ Object.defineProperty(exports, 'BaseMessageChunk', {
61
+ enumerable: true,
62
+ get: function () {
63
+ return __langchain_core_messages.BaseMessageChunk;
64
+ }
65
+ });
47
66
  Object.defineProperty(exports, 'Document', {
48
67
  enumerable: true,
49
68
  get: function () {
@@ -62,6 +81,12 @@ Object.defineProperty(exports, 'HumanMessage', {
62
81
  return __langchain_core_messages.HumanMessage;
63
82
  }
64
83
  });
84
+ Object.defineProperty(exports, 'HumanMessageChunk', {
85
+ enumerable: true,
86
+ get: function () {
87
+ return __langchain_core_messages.HumanMessageChunk;
88
+ }
89
+ });
65
90
  Object.defineProperty(exports, 'InMemoryStore', {
66
91
  enumerable: true,
67
92
  get: function () {
@@ -80,14 +105,32 @@ Object.defineProperty(exports, 'SystemMessage', {
80
105
  return __langchain_core_messages.SystemMessage;
81
106
  }
82
107
  });
108
+ Object.defineProperty(exports, 'SystemMessageChunk', {
109
+ enumerable: true,
110
+ get: function () {
111
+ return __langchain_core_messages.SystemMessageChunk;
112
+ }
113
+ });
83
114
  Object.defineProperty(exports, 'ToolMessage', {
84
115
  enumerable: true,
85
116
  get: function () {
86
117
  return __langchain_core_messages.ToolMessage;
87
118
  }
88
119
  });
120
+ Object.defineProperty(exports, 'ToolMessageChunk', {
121
+ enumerable: true,
122
+ get: function () {
123
+ return __langchain_core_messages.ToolMessageChunk;
124
+ }
125
+ });
89
126
  exports.ToolNode = require_ToolNode.ToolNode;
90
127
  exports.createReactAgent = require_index.createReactAgent;
128
+ Object.defineProperty(exports, 'filterMessages', {
129
+ enumerable: true,
130
+ get: function () {
131
+ return __langchain_core_messages.filterMessages;
132
+ }
133
+ });
91
134
  Object.defineProperty(exports, 'getContextVariable', {
92
135
  enumerable: true,
93
136
  get: function () {
@@ -115,4 +158,10 @@ Object.defineProperty(exports, 'tool', {
115
158
  }
116
159
  });
117
160
  exports.toolStrategy = require_responses.toolStrategy;
161
+ Object.defineProperty(exports, 'trimMessages', {
162
+ enumerable: true,
163
+ get: function () {
164
+ return __langchain_core_messages.trimMessages;
165
+ }
166
+ });
118
167
  //# sourceMappingURL=index.cjs.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.cjs","names":[],"sources":["../src/index.ts"],"sourcesContent":["/**\n * LangChain Messages\n */\nexport {\n BaseMessage,\n AIMessage,\n SystemMessage,\n HumanMessage,\n ToolMessage,\n} from \"@langchain/core/messages\";\n\n/**\n * Universal Chat Model\n */\nexport { initChatModel } from \"./chat_models/universal.js\";\n\n/**\n * LangChain Tools\n */\nexport { tool, DynamicStructuredTool } from \"@langchain/core/tools\";\n\n/**\n * LangChain Agents\n */\nexport {\n createReactAgent,\n toolStrategy,\n providerStrategy,\n ToolNode,\n type AgentState,\n type HumanInterrupt,\n type HumanInterruptConfig,\n type ActionRequest,\n type HumanResponse,\n} from \"./agents/index.js\";\n\n/**\n * LangChain Memory\n * Check in what we want to export here\n */\nexport { MemorySaver, InMemoryStore } from \"@langchain/langgraph\";\n\n/**\n * LangChain Context\n */\nexport {\n setContextVariable,\n getContextVariable,\n} from \"@langchain/core/context\";\n\n/**\n * LangChain Documents\n */\nexport { Document } from \"@langchain/core/documents\";\n"],"mappings":""}
1
+ {"version":3,"file":"index.cjs","names":[],"sources":["../src/index.ts"],"sourcesContent":["/**\n * LangChain Messages\n */\nexport {\n BaseMessage,\n BaseMessageChunk,\n AIMessage,\n AIMessageChunk,\n SystemMessage,\n SystemMessageChunk,\n HumanMessage,\n HumanMessageChunk,\n ToolMessage,\n ToolMessageChunk,\n type ContentBlock,\n filterMessages,\n trimMessages,\n} from \"@langchain/core/messages\";\n\n/**\n * Universal Chat Model\n */\nexport { initChatModel } from \"./chat_models/universal.js\";\n\n/**\n * LangChain Tools\n */\nexport { tool, DynamicStructuredTool } from \"@langchain/core/tools\";\n\n/**\n * LangChain Agents\n */\nexport {\n createReactAgent,\n toolStrategy,\n providerStrategy,\n ToolNode,\n type AgentState,\n type AgentRuntime,\n type HumanInterrupt,\n type HumanInterruptConfig,\n type ActionRequest,\n type HumanResponse,\n} from \"./agents/index.js\";\n\n/**\n * LangChain Memory\n * Check in what we want to export here\n */\nexport { MemorySaver, InMemoryStore } from \"@langchain/langgraph\";\n\n/**\n * LangChain Context\n */\nexport {\n setContextVariable,\n getContextVariable,\n} from \"@langchain/core/context\";\n\n/**\n * LangChain Documents\n */\nexport { Document } from \"@langchain/core/documents\";\n"],"mappings":""}
package/dist/index.d.cts CHANGED
@@ -1,12 +1,12 @@
1
1
  import { initChatModel } from "./chat_models/universal.cjs";
2
2
  import { providerStrategy, toolStrategy } from "./agents/responses.cjs";
3
3
  import { ToolNode } from "./agents/nodes/ToolNode.cjs";
4
- import { AgentState } from "./agents/types.cjs";
4
+ import { AgentRuntime, AgentState } from "./agents/types.cjs";
5
5
  import { ActionRequest, HumanInterrupt, HumanInterruptConfig, HumanResponse } from "./agents/interrupt.cjs";
6
6
  import { createReactAgent } from "./agents/index.cjs";
7
7
  import { Document } from "@langchain/core/documents";
8
- import { AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
8
+ import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ContentBlock, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, filterMessages, trimMessages } from "@langchain/core/messages";
9
9
  import { DynamicStructuredTool, tool } from "@langchain/core/tools";
10
10
  import { InMemoryStore, MemorySaver } from "@langchain/langgraph";
11
11
  import { getContextVariable, setContextVariable } from "@langchain/core/context";
12
- export { AIMessage, type ActionRequest, type AgentState, BaseMessage, Document, DynamicStructuredTool, type HumanInterrupt, type HumanInterruptConfig, HumanMessage, type HumanResponse, InMemoryStore, MemorySaver, SystemMessage, ToolMessage, ToolNode, createReactAgent, getContextVariable, initChatModel, providerStrategy, setContextVariable, tool, toolStrategy };
12
+ export { AIMessage, AIMessageChunk, type ActionRequest, type AgentRuntime, type AgentState, BaseMessage, BaseMessageChunk, type ContentBlock, Document, DynamicStructuredTool, type HumanInterrupt, type HumanInterruptConfig, HumanMessage, HumanMessageChunk, type HumanResponse, InMemoryStore, MemorySaver, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, ToolNode, createReactAgent, filterMessages, getContextVariable, initChatModel, providerStrategy, setContextVariable, tool, toolStrategy, trimMessages };
package/dist/index.d.ts CHANGED
@@ -1,12 +1,12 @@
1
1
  import { initChatModel } from "./chat_models/universal.js";
2
2
  import { providerStrategy, toolStrategy } from "./agents/responses.js";
3
3
  import { ToolNode } from "./agents/nodes/ToolNode.js";
4
- import { AgentState } from "./agents/types.js";
4
+ import { AgentRuntime, AgentState } from "./agents/types.js";
5
5
  import { ActionRequest, HumanInterrupt, HumanInterruptConfig, HumanResponse } from "./agents/interrupt.js";
6
6
  import { createReactAgent } from "./agents/index.js";
7
7
  import { Document } from "@langchain/core/documents";
8
- import { AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
8
+ import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ContentBlock, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, filterMessages, trimMessages } from "@langchain/core/messages";
9
9
  import { DynamicStructuredTool, tool } from "@langchain/core/tools";
10
10
  import { InMemoryStore, MemorySaver } from "@langchain/langgraph";
11
11
  import { getContextVariable, setContextVariable } from "@langchain/core/context";
12
- export { AIMessage, type ActionRequest, type AgentState, BaseMessage, Document, DynamicStructuredTool, type HumanInterrupt, type HumanInterruptConfig, HumanMessage, type HumanResponse, InMemoryStore, MemorySaver, SystemMessage, ToolMessage, ToolNode, createReactAgent, getContextVariable, initChatModel, providerStrategy, setContextVariable, tool, toolStrategy };
12
+ export { AIMessage, AIMessageChunk, type ActionRequest, type AgentRuntime, type AgentState, BaseMessage, BaseMessageChunk, type ContentBlock, Document, DynamicStructuredTool, type HumanInterrupt, type HumanInterruptConfig, HumanMessage, HumanMessageChunk, type HumanResponse, InMemoryStore, MemorySaver, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, ToolNode, createReactAgent, filterMessages, getContextVariable, initChatModel, providerStrategy, setContextVariable, tool, toolStrategy, trimMessages };
package/dist/index.js CHANGED
@@ -4,7 +4,7 @@ import { providerStrategy, toolStrategy } from "./agents/responses.js";
4
4
  import { ToolNode } from "./agents/nodes/ToolNode.js";
5
5
  import { createReactAgent } from "./agents/index.js";
6
6
  import { Document } from "@langchain/core/documents";
7
- import { AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
7
+ import { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, filterMessages, trimMessages } from "@langchain/core/messages";
8
8
  import { DynamicStructuredTool, tool } from "@langchain/core/tools";
9
9
  import { InMemoryStore, MemorySaver } from "@langchain/langgraph";
10
10
  import { getContextVariable, setContextVariable } from "@langchain/core/context";
@@ -13,24 +13,31 @@ import { getContextVariable, setContextVariable } from "@langchain/core/context"
13
13
  var src_exports = {};
14
14
  __export(src_exports, {
15
15
  AIMessage: () => AIMessage,
16
+ AIMessageChunk: () => AIMessageChunk,
16
17
  BaseMessage: () => BaseMessage,
18
+ BaseMessageChunk: () => BaseMessageChunk,
17
19
  Document: () => Document,
18
20
  DynamicStructuredTool: () => DynamicStructuredTool,
19
21
  HumanMessage: () => HumanMessage,
22
+ HumanMessageChunk: () => HumanMessageChunk,
20
23
  InMemoryStore: () => InMemoryStore,
21
24
  MemorySaver: () => MemorySaver,
22
25
  SystemMessage: () => SystemMessage,
26
+ SystemMessageChunk: () => SystemMessageChunk,
23
27
  ToolMessage: () => ToolMessage,
28
+ ToolMessageChunk: () => ToolMessageChunk,
24
29
  ToolNode: () => ToolNode,
25
30
  createReactAgent: () => createReactAgent,
31
+ filterMessages: () => filterMessages,
26
32
  getContextVariable: () => getContextVariable,
27
33
  initChatModel: () => initChatModel,
28
34
  providerStrategy: () => providerStrategy,
29
35
  setContextVariable: () => setContextVariable,
30
36
  tool: () => tool,
31
- toolStrategy: () => toolStrategy
37
+ toolStrategy: () => toolStrategy,
38
+ trimMessages: () => trimMessages
32
39
  });
33
40
 
34
41
  //#endregion
35
- export { AIMessage, BaseMessage, Document, DynamicStructuredTool, HumanMessage, InMemoryStore, MemorySaver, SystemMessage, ToolMessage, ToolNode, createReactAgent, getContextVariable, initChatModel, providerStrategy, setContextVariable, src_exports, tool, toolStrategy };
42
+ export { AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, Document, DynamicStructuredTool, HumanMessage, HumanMessageChunk, InMemoryStore, MemorySaver, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, ToolNode, createReactAgent, filterMessages, getContextVariable, initChatModel, providerStrategy, setContextVariable, src_exports, tool, toolStrategy, trimMessages };
36
43
  //# sourceMappingURL=index.js.map
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","names":[],"sources":["../src/index.ts"],"sourcesContent":["/**\n * LangChain Messages\n */\nexport {\n BaseMessage,\n AIMessage,\n SystemMessage,\n HumanMessage,\n ToolMessage,\n} from \"@langchain/core/messages\";\n\n/**\n * Universal Chat Model\n */\nexport { initChatModel } from \"./chat_models/universal.js\";\n\n/**\n * LangChain Tools\n */\nexport { tool, DynamicStructuredTool } from \"@langchain/core/tools\";\n\n/**\n * LangChain Agents\n */\nexport {\n createReactAgent,\n toolStrategy,\n providerStrategy,\n ToolNode,\n type AgentState,\n type HumanInterrupt,\n type HumanInterruptConfig,\n type ActionRequest,\n type HumanResponse,\n} from \"./agents/index.js\";\n\n/**\n * LangChain Memory\n * Check in what we want to export here\n */\nexport { MemorySaver, InMemoryStore } from \"@langchain/langgraph\";\n\n/**\n * LangChain Context\n */\nexport {\n setContextVariable,\n getContextVariable,\n} from \"@langchain/core/context\";\n\n/**\n * LangChain Documents\n */\nexport { Document } from \"@langchain/core/documents\";\n"],"mappings":""}
1
+ {"version":3,"file":"index.js","names":[],"sources":["../src/index.ts"],"sourcesContent":["/**\n * LangChain Messages\n */\nexport {\n BaseMessage,\n BaseMessageChunk,\n AIMessage,\n AIMessageChunk,\n SystemMessage,\n SystemMessageChunk,\n HumanMessage,\n HumanMessageChunk,\n ToolMessage,\n ToolMessageChunk,\n type ContentBlock,\n filterMessages,\n trimMessages,\n} from \"@langchain/core/messages\";\n\n/**\n * Universal Chat Model\n */\nexport { initChatModel } from \"./chat_models/universal.js\";\n\n/**\n * LangChain Tools\n */\nexport { tool, DynamicStructuredTool } from \"@langchain/core/tools\";\n\n/**\n * LangChain Agents\n */\nexport {\n createReactAgent,\n toolStrategy,\n providerStrategy,\n ToolNode,\n type AgentState,\n type AgentRuntime,\n type HumanInterrupt,\n type HumanInterruptConfig,\n type ActionRequest,\n type HumanResponse,\n} from \"./agents/index.js\";\n\n/**\n * LangChain Memory\n * Check in what we want to export here\n */\nexport { MemorySaver, InMemoryStore } from \"@langchain/langgraph\";\n\n/**\n * LangChain Context\n */\nexport {\n setContextVariable,\n getContextVariable,\n} from \"@langchain/core/context\";\n\n/**\n * LangChain Documents\n */\nexport { Document } from \"@langchain/core/documents\";\n"],"mappings":""}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "1.0.0-alpha.1",
3
+ "version": "1.0.0-alpha.3",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "author": "LangChain",
6
6
  "license": "MIT",
@@ -16,25 +16,7 @@
16
16
  "url": "git@github.com:langchain-ai/langchainjs.git"
17
17
  },
18
18
  "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/langchain/",
19
- "scripts": {
20
- "build": "pnpm --filter @langchain/build compile langchain",
21
- "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
22
- "lint:dpdm": "dpdm --skip-dynamic-imports circular --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
23
- "lint": "pnpm lint:eslint && pnpm lint:dpdm",
24
- "lint:fix": "pnpm lint:eslint --fix && pnpm lint:dpdm",
25
- "precommit": "lint-staged",
26
- "clean": "rm -rf .turbo dist/",
27
- "release": "release-it --only-version --config .release-it.json",
28
- "test": "vitest run",
29
- "test:watch": "vitest --watch",
30
- "test:integration": "vitest --mode int",
31
- "format": "prettier --config .prettierrc --write \"src\"",
32
- "format:check": "prettier --config .prettierrc --check \"src\""
33
- },
34
19
  "devDependencies": {
35
- "@langchain/anthropic": "workspace:*",
36
- "@langchain/cohere": "workspace:*",
37
- "@langchain/core": "workspace:*",
38
20
  "@tsconfig/recommended": "^1.0.2",
39
21
  "@types/js-yaml": "^4",
40
22
  "@types/jsdom": "^21.1.1",
@@ -57,12 +39,16 @@
57
39
  "peggy": "^3.0.2",
58
40
  "prettier": "^2.8.3",
59
41
  "reflect-metadata": "^0.2.2",
60
- "release-it": "^18.1.2",
42
+ "release-it": "^19.0.4",
43
+ "release-it-pnpm": "^4.6.6",
61
44
  "rimraf": "^5.0.1",
62
45
  "rollup": "^3.19.1",
63
46
  "typeorm": "^0.3.20",
64
47
  "typescript": "~5.8.3",
65
- "vitest": "^3.2.4"
48
+ "vitest": "^3.2.4",
49
+ "@langchain/anthropic": "1.0.0-alpha.1",
50
+ "@langchain/core": "1.0.0-alpha.2",
51
+ "@langchain/cohere": "0.3.4"
66
52
  },
67
53
  "peerDependencies": {
68
54
  "@langchain/core": "^1.0.0-alpha.1 <2.0.0",
@@ -82,8 +68,6 @@
82
68
  }
83
69
  },
84
70
  "dependencies": {
85
- "@langchain/openai": "workspace:*",
86
- "@langchain/textsplitters": "workspace:*",
87
71
  "@langchain/langgraph": "^1.0.0-alpha",
88
72
  "@langchain/langgraph-checkpoint": "^0.1.1",
89
73
  "js-yaml": "^4.1.0",
@@ -92,7 +76,9 @@
92
76
  "p-retry": "4",
93
77
  "uuid": "^10.0.0",
94
78
  "yaml": "^2.2.1",
95
- "zod": "^3.25.32"
79
+ "zod": "^3.25.32",
80
+ "@langchain/openai": "1.0.0-alpha.1",
81
+ "@langchain/textsplitters": "1.0.0-alpha.1"
96
82
  },
97
83
  "optionalDependencies": {
98
84
  "langsmith": "^0.3.64"
@@ -833,5 +819,20 @@
833
819
  "input": "./src/evaluation/index.ts"
834
820
  },
835
821
  "./package.json": "./package.json"
822
+ },
823
+ "scripts": {
824
+ "build": "pnpm --filter @langchain/build compile langchain",
825
+ "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/",
826
+ "lint:dpdm": "dpdm --skip-dynamic-imports circular --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
827
+ "lint": "pnpm lint:eslint && pnpm lint:dpdm",
828
+ "lint:fix": "pnpm lint:eslint --fix && pnpm lint:dpdm",
829
+ "precommit": "lint-staged",
830
+ "clean": "rm -rf .turbo dist/",
831
+ "release": "release-it --only-version --config .release-it.json",
832
+ "test": "vitest run",
833
+ "test:watch": "vitest --watch",
834
+ "test:integration": "vitest --mode int",
835
+ "format": "prettier --config .prettierrc --write \"src\"",
836
+ "format:check": "prettier --config .prettierrc --check \"src\""
836
837
  }
837
- }
838
+ }