langchain-core 1.0.0a2__py3-none-any.whl → 1.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (130) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/callbacks/base.py +28 -15
  7. langchain_core/callbacks/manager.py +81 -69
  8. langchain_core/callbacks/usage.py +4 -2
  9. langchain_core/chat_history.py +29 -21
  10. langchain_core/document_loaders/base.py +34 -9
  11. langchain_core/document_loaders/langsmith.py +3 -0
  12. langchain_core/documents/base.py +35 -10
  13. langchain_core/documents/transformers.py +4 -2
  14. langchain_core/embeddings/fake.py +8 -5
  15. langchain_core/env.py +2 -3
  16. langchain_core/example_selectors/base.py +12 -0
  17. langchain_core/exceptions.py +7 -0
  18. langchain_core/globals.py +17 -28
  19. langchain_core/indexing/api.py +57 -45
  20. langchain_core/indexing/base.py +5 -8
  21. langchain_core/indexing/in_memory.py +23 -3
  22. langchain_core/language_models/__init__.py +6 -2
  23. langchain_core/language_models/_utils.py +27 -5
  24. langchain_core/language_models/base.py +33 -21
  25. langchain_core/language_models/chat_models.py +99 -27
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +54 -20
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/__init__.py +1 -1
  33. langchain_core/messages/ai.py +41 -34
  34. langchain_core/messages/base.py +16 -7
  35. langchain_core/messages/block_translators/__init__.py +10 -8
  36. langchain_core/messages/block_translators/anthropic.py +3 -1
  37. langchain_core/messages/block_translators/bedrock.py +3 -1
  38. langchain_core/messages/block_translators/bedrock_converse.py +3 -1
  39. langchain_core/messages/block_translators/google_genai.py +3 -1
  40. langchain_core/messages/block_translators/google_vertexai.py +3 -1
  41. langchain_core/messages/block_translators/groq.py +3 -1
  42. langchain_core/messages/block_translators/ollama.py +3 -1
  43. langchain_core/messages/block_translators/openai.py +50 -20
  44. langchain_core/messages/content.py +23 -13
  45. langchain_core/messages/human.py +2 -13
  46. langchain_core/messages/system.py +2 -6
  47. langchain_core/messages/tool.py +34 -14
  48. langchain_core/messages/utils.py +186 -73
  49. langchain_core/output_parsers/base.py +5 -2
  50. langchain_core/output_parsers/json.py +4 -4
  51. langchain_core/output_parsers/list.py +7 -22
  52. langchain_core/output_parsers/openai_functions.py +3 -0
  53. langchain_core/output_parsers/openai_tools.py +6 -1
  54. langchain_core/output_parsers/pydantic.py +4 -0
  55. langchain_core/output_parsers/string.py +5 -1
  56. langchain_core/output_parsers/xml.py +19 -19
  57. langchain_core/outputs/chat_generation.py +18 -7
  58. langchain_core/outputs/generation.py +14 -3
  59. langchain_core/outputs/llm_result.py +8 -1
  60. langchain_core/prompt_values.py +10 -4
  61. langchain_core/prompts/base.py +6 -11
  62. langchain_core/prompts/chat.py +88 -60
  63. langchain_core/prompts/dict.py +16 -8
  64. langchain_core/prompts/few_shot.py +9 -11
  65. langchain_core/prompts/few_shot_with_templates.py +5 -1
  66. langchain_core/prompts/image.py +12 -5
  67. langchain_core/prompts/loading.py +2 -2
  68. langchain_core/prompts/message.py +5 -6
  69. langchain_core/prompts/pipeline.py +13 -8
  70. langchain_core/prompts/prompt.py +22 -8
  71. langchain_core/prompts/string.py +18 -10
  72. langchain_core/prompts/structured.py +7 -2
  73. langchain_core/rate_limiters.py +2 -2
  74. langchain_core/retrievers.py +7 -6
  75. langchain_core/runnables/base.py +387 -246
  76. langchain_core/runnables/branch.py +11 -28
  77. langchain_core/runnables/config.py +20 -17
  78. langchain_core/runnables/configurable.py +34 -19
  79. langchain_core/runnables/fallbacks.py +20 -13
  80. langchain_core/runnables/graph.py +48 -38
  81. langchain_core/runnables/graph_ascii.py +40 -17
  82. langchain_core/runnables/graph_mermaid.py +54 -25
  83. langchain_core/runnables/graph_png.py +27 -31
  84. langchain_core/runnables/history.py +55 -58
  85. langchain_core/runnables/passthrough.py +44 -21
  86. langchain_core/runnables/retry.py +44 -23
  87. langchain_core/runnables/router.py +9 -8
  88. langchain_core/runnables/schema.py +9 -0
  89. langchain_core/runnables/utils.py +53 -90
  90. langchain_core/stores.py +19 -31
  91. langchain_core/sys_info.py +9 -8
  92. langchain_core/tools/base.py +36 -27
  93. langchain_core/tools/convert.py +25 -14
  94. langchain_core/tools/simple.py +36 -8
  95. langchain_core/tools/structured.py +25 -12
  96. langchain_core/tracers/base.py +2 -2
  97. langchain_core/tracers/context.py +5 -1
  98. langchain_core/tracers/core.py +110 -46
  99. langchain_core/tracers/evaluation.py +22 -26
  100. langchain_core/tracers/event_stream.py +97 -42
  101. langchain_core/tracers/langchain.py +12 -3
  102. langchain_core/tracers/langchain_v1.py +10 -2
  103. langchain_core/tracers/log_stream.py +56 -17
  104. langchain_core/tracers/root_listeners.py +4 -20
  105. langchain_core/tracers/run_collector.py +6 -16
  106. langchain_core/tracers/schemas.py +5 -1
  107. langchain_core/utils/aiter.py +14 -6
  108. langchain_core/utils/env.py +3 -0
  109. langchain_core/utils/function_calling.py +46 -20
  110. langchain_core/utils/interactive_env.py +6 -2
  111. langchain_core/utils/iter.py +12 -5
  112. langchain_core/utils/json.py +12 -3
  113. langchain_core/utils/json_schema.py +156 -40
  114. langchain_core/utils/loading.py +5 -1
  115. langchain_core/utils/mustache.py +25 -16
  116. langchain_core/utils/pydantic.py +38 -9
  117. langchain_core/utils/utils.py +25 -9
  118. langchain_core/vectorstores/base.py +7 -20
  119. langchain_core/vectorstores/in_memory.py +20 -14
  120. langchain_core/vectorstores/utils.py +18 -12
  121. langchain_core/version.py +1 -1
  122. langchain_core-1.0.0a3.dist-info/METADATA +77 -0
  123. langchain_core-1.0.0a3.dist-info/RECORD +181 -0
  124. langchain_core/beta/__init__.py +0 -1
  125. langchain_core/beta/runnables/__init__.py +0 -1
  126. langchain_core/beta/runnables/context.py +0 -448
  127. langchain_core-1.0.0a2.dist-info/METADATA +0 -106
  128. langchain_core-1.0.0a2.dist-info/RECORD +0 -184
  129. {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a3.dist-info}/WHEEL +0 -0
  130. {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a3.dist-info}/entry_points.txt +0 -0
@@ -92,12 +92,13 @@ def tool(
92
92
  positional argument.
93
93
  description: Optional description for the tool.
94
94
  Precedence for the tool description value is as follows:
95
- - `description` argument
96
- (used even if docstring and/or `args_schema` are provided)
97
- - tool function docstring
98
- (used even if `args_schema` is provided)
99
- - `args_schema` description
100
- (used only if `description` / docstring are not provided)
95
+
96
+ - ``description`` argument
97
+ (used even if docstring and/or ``args_schema`` are provided)
98
+ - tool function docstring
99
+ (used even if ``args_schema`` is provided)
100
+ - ``args_schema`` description
101
+ (used only if `description` / docstring are not provided)
101
102
  *args: Extra positional arguments. Must be empty.
102
103
  return_direct: Whether to return directly from the tool rather
103
104
  than continuing the agent loop. Defaults to False.
@@ -119,6 +120,17 @@ def tool(
119
120
  whether to raise ValueError on invalid Google Style docstrings.
120
121
  Defaults to True.
121
122
 
123
+ Raises:
124
+ ValueError: If too many positional arguments are provided.
125
+ ValueError: If a runnable is provided without a string name.
126
+ ValueError: If the first argument is not a string or callable with
127
+ a ``__name__`` attribute.
128
+ ValueError: If the function does not have a docstring and description
129
+ is not provided and ``infer_schema`` is False.
130
+ ValueError: If ``parse_docstring`` is True and the function has an invalid
131
+ Google-style docstring and ``error_on_invalid_docstring`` is True.
132
+ ValueError: If a Runnable is provided that does not have an object schema.
133
+
122
134
  Returns:
123
135
  The tool.
124
136
 
@@ -134,11 +146,13 @@ def tool(
134
146
  # Searches the API for the query.
135
147
  return
136
148
 
149
+
137
150
  @tool("search", return_direct=True)
138
151
  def search_api(query: str) -> str:
139
152
  # Searches the API for the query.
140
153
  return
141
154
 
155
+
142
156
  @tool(response_format="content_and_artifact")
143
157
  def search_api(query: str) -> tuple[str, dict]:
144
158
  return "partial json of results", {"full": "object of results"}
@@ -171,18 +185,15 @@ def tool(
171
185
  "bar": {
172
186
  "title": "Bar",
173
187
  "description": "The bar.",
174
- "type": "string"
188
+ "type": "string",
175
189
  },
176
190
  "baz": {
177
191
  "title": "Baz",
178
192
  "description": "The baz.",
179
- "type": "integer"
180
- }
193
+ "type": "integer",
194
+ },
181
195
  },
182
- "required": [
183
- "bar",
184
- "baz"
185
- ]
196
+ "required": ["bar", "baz"],
186
197
  }
187
198
 
188
199
  Note that parsing by default will raise ``ValueError`` if the docstring
@@ -304,7 +315,7 @@ def tool(
304
315
 
305
316
  if runnable is not None:
306
317
  # tool is used as a function
307
- # tool_from_runnable = tool("name", runnable)
318
+ # for instance tool_from_runnable = tool("name", runnable)
308
319
  if not name_or_callable:
309
320
  msg = "Runnable without name for tool constructor"
310
321
  raise ValueError(msg)
@@ -64,11 +64,7 @@ class Tool(BaseTool):
64
64
  The input arguments for the tool.
65
65
  """
66
66
  if self.args_schema is not None:
67
- if isinstance(self.args_schema, dict):
68
- json_schema = self.args_schema
69
- else:
70
- json_schema = self.args_schema.model_json_schema()
71
- return json_schema["properties"]
67
+ return super().args
72
68
  # For backwards compatibility, if the function signature is ambiguous,
73
69
  # assume it takes a single string input.
74
70
  return {"tool_input": {"type": "string"}}
@@ -76,7 +72,19 @@ class Tool(BaseTool):
76
72
  def _to_args_and_kwargs(
77
73
  self, tool_input: Union[str, dict], tool_call_id: Optional[str]
78
74
  ) -> tuple[tuple, dict]:
79
- """Convert tool input to pydantic model."""
75
+ """Convert tool input to pydantic model.
76
+
77
+ Args:
78
+ tool_input: The input to the tool.
79
+ tool_call_id: The ID of the tool call.
80
+
81
+ Raises:
82
+ ToolException: If the tool input is invalid.
83
+
84
+ Returns:
85
+ the pydantic model args and kwargs.
86
+
87
+ """
80
88
  args, kwargs = super()._to_args_and_kwargs(tool_input, tool_call_id)
81
89
  # For backwards compatibility. The tool must be run with a single input
82
90
  all_args = list(args) + list(kwargs.values())
@@ -96,7 +104,17 @@ class Tool(BaseTool):
96
104
  run_manager: Optional[CallbackManagerForToolRun] = None,
97
105
  **kwargs: Any,
98
106
  ) -> Any:
99
- """Use the tool."""
107
+ """Use the tool.
108
+
109
+ Args:
110
+ *args: Positional arguments to pass to the tool
111
+ config: Configuration for the run
112
+ run_manager: Optional callback manager to use for the run
113
+ **kwargs: Keyword arguments to pass to the tool
114
+
115
+ Returns:
116
+ The result of the tool execution
117
+ """
100
118
  if self.func:
101
119
  if run_manager and signature(self.func).parameters.get("callbacks"):
102
120
  kwargs["callbacks"] = run_manager.get_child()
@@ -113,7 +131,17 @@ class Tool(BaseTool):
113
131
  run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
114
132
  **kwargs: Any,
115
133
  ) -> Any:
116
- """Use the tool asynchronously."""
134
+ """Use the tool asynchronously.
135
+
136
+ Args:
137
+ *args: Positional arguments to pass to the tool
138
+ config: Configuration for the run
139
+ run_manager: Optional callback manager to use for the run
140
+ **kwargs: Keyword arguments to pass to the tool
141
+
142
+ Returns:
143
+ The result of the tool execution
144
+ """
117
145
  if self.coroutine:
118
146
  if run_manager and signature(self.coroutine).parameters.get("callbacks"):
119
147
  kwargs["callbacks"] = run_manager.get_child()
@@ -67,16 +67,6 @@ class StructuredTool(BaseTool):
67
67
 
68
68
  # --- Tool ---
69
69
 
70
- @property
71
- def args(self) -> dict:
72
- """The tool's input arguments."""
73
- if isinstance(self.args_schema, dict):
74
- json_schema = self.args_schema
75
- else:
76
- input_schema = self.get_input_schema()
77
- json_schema = input_schema.model_json_schema()
78
- return json_schema["properties"]
79
-
80
70
  def _run(
81
71
  self,
82
72
  *args: Any,
@@ -84,7 +74,17 @@ class StructuredTool(BaseTool):
84
74
  run_manager: Optional[CallbackManagerForToolRun] = None,
85
75
  **kwargs: Any,
86
76
  ) -> Any:
87
- """Use the tool."""
77
+ """Use the tool.
78
+
79
+ Args:
80
+ *args: Positional arguments to pass to the tool
81
+ config: Configuration for the run
82
+ run_manager: Optional callback manager to use for the run
83
+ **kwargs: Keyword arguments to pass to the tool
84
+
85
+ Returns:
86
+ The result of the tool execution
87
+ """
88
88
  if self.func:
89
89
  if run_manager and signature(self.func).parameters.get("callbacks"):
90
90
  kwargs["callbacks"] = run_manager.get_child()
@@ -101,7 +101,17 @@ class StructuredTool(BaseTool):
101
101
  run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
102
102
  **kwargs: Any,
103
103
  ) -> Any:
104
- """Use the tool asynchronously."""
104
+ """Use the tool asynchronously.
105
+
106
+ Args:
107
+ *args: Positional arguments to pass to the tool
108
+ config: Configuration for the run
109
+ run_manager: Optional callback manager to use for the run
110
+ **kwargs: Keyword arguments to pass to the tool
111
+
112
+ Returns:
113
+ The result of the tool execution
114
+ """
105
115
  if self.coroutine:
106
116
  if run_manager and signature(self.coroutine).parameters.get("callbacks"):
107
117
  kwargs["callbacks"] = run_manager.get_child()
@@ -164,6 +174,9 @@ class StructuredTool(BaseTool):
164
174
 
165
175
  Raises:
166
176
  ValueError: If the function is not provided.
177
+ ValueError: If the function does not have a docstring and description
178
+ is not provided.
179
+ TypeError: If the ``args_schema`` is not a ``BaseModel`` or dict.
167
180
 
168
181
  Examples:
169
182
 
@@ -520,11 +520,11 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
520
520
  return retrieval_run
521
521
 
522
522
  def __deepcopy__(self, memo: dict) -> BaseTracer:
523
- """Deepcopy the tracer."""
523
+ """Return self."""
524
524
  return self
525
525
 
526
526
  def __copy__(self) -> BaseTracer:
527
- """Copy the tracer."""
527
+ """Return self."""
528
528
  return self
529
529
 
530
530
 
@@ -43,7 +43,11 @@ run_collector_var: ContextVar[Optional[RunCollectorCallbackHandler]] = ContextVa
43
43
  def tracing_enabled(
44
44
  session_name: str = "default", # noqa: ARG001
45
45
  ) -> Generator[TracerSessionV1, None, None]:
46
- """Throw an error because this has been replaced by tracing_v2_enabled."""
46
+ """Throw an error because this has been replaced by ``tracing_v2_enabled``.
47
+
48
+ Raises:
49
+ RuntimeError: Always, because this function is deprecated.
50
+ """
47
51
  msg = (
48
52
  "tracing_enabled is no longer supported. Please use tracing_enabled_v2 instead."
49
53
  )
@@ -3,7 +3,6 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import logging
6
- import sys
7
6
  import traceback
8
7
  from abc import ABC, abstractmethod
9
8
  from datetime import datetime, timezone
@@ -71,7 +70,7 @@ class _TracerCore(ABC):
71
70
  for streaming events.
72
71
  - 'original+chat' is a format that is the same as 'original'
73
72
  except it does NOT raise an attribute error on_chat_model_start
74
- kwargs: Additional keyword arguments that will be passed to
73
+ **kwargs: Additional keyword arguments that will be passed to
75
74
  the superclass.
76
75
  """
77
76
  super().__init__(**kwargs)
@@ -82,7 +81,7 @@ class _TracerCore(ABC):
82
81
  """Map of run ID to (trace_id, dotted_order). Cleared when tracer GCed."""
83
82
 
84
83
  @abstractmethod
85
- def _persist_run(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]:
84
+ def _persist_run(self, run: Run) -> Union[Coroutine[Any, Any, None], None]:
86
85
  """Persist a run."""
87
86
 
88
87
  @staticmethod
@@ -98,17 +97,12 @@ class _TracerCore(ABC):
98
97
  """Get the stacktrace of the parent error."""
99
98
  msg = repr(error)
100
99
  try:
101
- if sys.version_info < (3, 10):
102
- tb = traceback.format_exception(
103
- error.__class__, error, error.__traceback__
104
- )
105
- else:
106
- tb = traceback.format_exception(error)
100
+ tb = traceback.format_exception(error)
107
101
  return (msg + "\n\n".join(tb)).strip()
108
102
  except: # noqa: E722
109
103
  return msg
110
104
 
111
- def _start_trace(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # type: ignore[return]
105
+ def _start_trace(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # type: ignore[return]
112
106
  current_dotted_order = run.start_time.strftime("%Y%m%dT%H%M%S%fZ") + str(run.id)
113
107
  if run.parent_run_id:
114
108
  if parent := self.order_map.get(run.parent_run_id):
@@ -531,27 +525,43 @@ class _TracerCore(ABC):
531
525
  return retrieval_run
532
526
 
533
527
  def __deepcopy__(self, memo: dict) -> _TracerCore:
534
- """Deepcopy the tracer."""
528
+ """Return self deepcopied."""
535
529
  return self
536
530
 
537
531
  def __copy__(self) -> _TracerCore:
538
- """Copy the tracer."""
532
+ """Return self copied."""
539
533
  return self
540
534
 
541
- def _end_trace(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
542
- """End a trace for a run."""
535
+ def _end_trace(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
536
+ """End a trace for a run.
537
+
538
+ Args:
539
+ run: The run.
540
+ """
543
541
  return None
544
542
 
545
- def _on_run_create(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
546
- """Process a run upon creation."""
543
+ def _on_run_create(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
544
+ """Process a run upon creation.
545
+
546
+ Args:
547
+ run: The created run.
548
+ """
547
549
  return None
548
550
 
549
- def _on_run_update(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
550
- """Process a run upon update."""
551
+ def _on_run_update(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
552
+ """Process a run upon update.
553
+
554
+ Args:
555
+ run: The updated run.
556
+ """
551
557
  return None
552
558
 
553
- def _on_llm_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
554
- """Process the LLM Run upon start."""
559
+ def _on_llm_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
560
+ """Process the LLM Run upon start.
561
+
562
+ Args:
563
+ run: The LLM run.
564
+ """
555
565
  return None
556
566
 
557
567
  def _on_llm_new_token(
@@ -559,54 +569,108 @@ class _TracerCore(ABC):
559
569
  run: Run, # noqa: ARG002
560
570
  token: str, # noqa: ARG002
561
571
  chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]], # noqa: ARG002
562
- ) -> Union[None, Coroutine[Any, Any, None]]:
563
- """Process new LLM token."""
572
+ ) -> Union[Coroutine[Any, Any, None], None]:
573
+ """Process new LLM token.
574
+
575
+ Args:
576
+ run: The LLM run.
577
+ token: The new token.
578
+ chunk: Optional chunk.
579
+ """
564
580
  return None
565
581
 
566
- def _on_llm_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
567
- """Process the LLM Run."""
582
+ def _on_llm_end(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
583
+ """Process the LLM Run.
584
+
585
+ Args:
586
+ run: The LLM run.
587
+ """
568
588
  return None
569
589
 
570
- def _on_llm_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
571
- """Process the LLM Run upon error."""
590
+ def _on_llm_error(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
591
+ """Process the LLM Run upon error.
592
+
593
+ Args:
594
+ run: The LLM run.
595
+ """
572
596
  return None
573
597
 
574
- def _on_chain_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
575
- """Process the Chain Run upon start."""
598
+ def _on_chain_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
599
+ """Process the Chain Run upon start.
600
+
601
+ Args:
602
+ run: The chain run.
603
+ """
576
604
  return None
577
605
 
578
- def _on_chain_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
579
- """Process the Chain Run."""
606
+ def _on_chain_end(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
607
+ """Process the Chain Run.
608
+
609
+ Args:
610
+ run: The chain run.
611
+ """
580
612
  return None
581
613
 
582
- def _on_chain_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
583
- """Process the Chain Run upon error."""
614
+ def _on_chain_error(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
615
+ """Process the Chain Run upon error.
616
+
617
+ Args:
618
+ run: The chain run.
619
+ """
584
620
  return None
585
621
 
586
- def _on_tool_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
587
- """Process the Tool Run upon start."""
622
+ def _on_tool_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
623
+ """Process the Tool Run upon start.
624
+
625
+ Args:
626
+ run: The tool run.
627
+ """
588
628
  return None
589
629
 
590
- def _on_tool_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
591
- """Process the Tool Run."""
630
+ def _on_tool_end(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
631
+ """Process the Tool Run.
632
+
633
+ Args:
634
+ run: The tool run.
635
+ """
592
636
  return None
593
637
 
594
- def _on_tool_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
595
- """Process the Tool Run upon error."""
638
+ def _on_tool_error(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
639
+ """Process the Tool Run upon error.
640
+
641
+ Args:
642
+ run: The tool run.
643
+ """
596
644
  return None
597
645
 
598
- def _on_chat_model_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
599
- """Process the Chat Model Run upon start."""
646
+ def _on_chat_model_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
647
+ """Process the Chat Model Run upon start.
648
+
649
+ Args:
650
+ run: The chat model run.
651
+ """
600
652
  return None
601
653
 
602
- def _on_retriever_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
603
- """Process the Retriever Run upon start."""
654
+ def _on_retriever_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
655
+ """Process the Retriever Run upon start.
656
+
657
+ Args:
658
+ run: The retriever run.
659
+ """
604
660
  return None
605
661
 
606
- def _on_retriever_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
607
- """Process the Retriever Run."""
662
+ def _on_retriever_end(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
663
+ """Process the Retriever Run.
664
+
665
+ Args:
666
+ run: The retriever run.
667
+ """
608
668
  return None
609
669
 
610
- def _on_retriever_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
611
- """Process the Retriever Run upon error."""
670
+ def _on_retriever_error(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
671
+ """Process the Retriever Run upon error.
672
+
673
+ Args:
674
+ run: The retriever run.
675
+ """
612
676
  return None
@@ -38,24 +38,27 @@ class EvaluatorCallbackHandler(BaseTracer):
38
38
  """Tracer that runs a run evaluator whenever a run is persisted.
39
39
 
40
40
  Attributes:
41
- example_id : Union[UUID, None]
42
- The example ID associated with the runs.
43
41
  client : Client
44
42
  The LangSmith client instance used for evaluating the runs.
45
- evaluators : Sequence[RunEvaluator]
46
- The sequence of run evaluators to be executed.
47
- executor : ThreadPoolExecutor
48
- The thread pool executor used for running the evaluators.
49
- futures : set[Future]
50
- The set of futures representing the running evaluators.
51
- skip_unfinished : bool
52
- Whether to skip runs that are not finished or raised
53
- an error.
54
- project_name : Optional[str]
55
- The LangSmith project name to be organize eval chain runs under.
56
43
  """
57
44
 
58
45
  name: str = "evaluator_callback_handler"
46
+ example_id: Optional[UUID] = None
47
+ """The example ID associated with the runs."""
48
+ client: langsmith.Client
49
+ """The LangSmith client instance used for evaluating the runs."""
50
+ evaluators: Sequence[langsmith.RunEvaluator] = ()
51
+ """The sequence of run evaluators to be executed."""
52
+ executor: Optional[ThreadPoolExecutor] = None
53
+ """The thread pool executor used for running the evaluators."""
54
+ futures: weakref.WeakSet[Future] = weakref.WeakSet()
55
+ """The set of futures representing the running evaluators."""
56
+ skip_unfinished: bool = True
57
+ """Whether to skip runs that are not finished or raised an error."""
58
+ project_name: Optional[str] = None
59
+ """The LangSmith project name to be organize eval chain runs under."""
60
+ logged_eval_results: dict[tuple[str, str], list[EvaluationResult]]
61
+ lock: threading.Lock
59
62
 
60
63
  def __init__(
61
64
  self,
@@ -91,7 +94,7 @@ class EvaluatorCallbackHandler(BaseTracer):
91
94
  self.client = client or langchain_tracer.get_client()
92
95
  self.evaluators = evaluators
93
96
  if max_concurrency is None:
94
- self.executor: Optional[ThreadPoolExecutor] = _get_executor()
97
+ self.executor = _get_executor()
95
98
  elif max_concurrency > 0:
96
99
  self.executor = ThreadPoolExecutor(max_workers=max_concurrency)
97
100
  weakref.finalize(
@@ -100,10 +103,10 @@ class EvaluatorCallbackHandler(BaseTracer):
100
103
  )
101
104
  else:
102
105
  self.executor = None
103
- self.futures: weakref.WeakSet[Future] = weakref.WeakSet()
106
+ self.futures = weakref.WeakSet()
104
107
  self.skip_unfinished = skip_unfinished
105
108
  self.project_name = project_name
106
- self.logged_eval_results: dict[tuple[str, str], list[EvaluationResult]] = {}
109
+ self.logged_eval_results = {}
107
110
  self.lock = threading.Lock()
108
111
  _TRACERS.add(self)
109
112
 
@@ -111,12 +114,8 @@ class EvaluatorCallbackHandler(BaseTracer):
111
114
  """Evaluate the run in the project.
112
115
 
113
116
  Args:
114
- ----------
115
- run : Run
116
- The run to be evaluated.
117
- evaluator : RunEvaluator
118
- The evaluator to use for evaluating the run.
119
-
117
+ run: The run to be evaluated.
118
+ evaluator: The evaluator to use for evaluating the run.
120
119
  """
121
120
  try:
122
121
  if self.project_name is None:
@@ -202,10 +201,7 @@ class EvaluatorCallbackHandler(BaseTracer):
202
201
  """Run the evaluator on the run.
203
202
 
204
203
  Args:
205
- ----------
206
- run : Run
207
- The run to be evaluated.
208
-
204
+ run: The run to be evaluated.
209
205
  """
210
206
  if self.skip_unfinished and not run.outputs:
211
207
  logger.debug("Skipping unfinished run %s", run.id)