langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (172) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +45 -70
  4. langchain_core/_api/deprecation.py +80 -80
  5. langchain_core/_api/path.py +22 -8
  6. langchain_core/_import_utils.py +10 -4
  7. langchain_core/agents.py +25 -21
  8. langchain_core/caches.py +53 -63
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +341 -348
  11. langchain_core/callbacks/file.py +55 -44
  12. langchain_core/callbacks/manager.py +546 -683
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +35 -36
  15. langchain_core/callbacks/usage.py +65 -70
  16. langchain_core/chat_history.py +48 -55
  17. langchain_core/document_loaders/base.py +46 -21
  18. langchain_core/document_loaders/langsmith.py +39 -36
  19. langchain_core/documents/__init__.py +0 -1
  20. langchain_core/documents/base.py +96 -74
  21. langchain_core/documents/compressor.py +12 -9
  22. langchain_core/documents/transformers.py +29 -28
  23. langchain_core/embeddings/fake.py +56 -57
  24. langchain_core/env.py +2 -3
  25. langchain_core/example_selectors/base.py +12 -0
  26. langchain_core/example_selectors/length_based.py +1 -1
  27. langchain_core/example_selectors/semantic_similarity.py +21 -25
  28. langchain_core/exceptions.py +15 -9
  29. langchain_core/globals.py +4 -163
  30. langchain_core/indexing/api.py +132 -125
  31. langchain_core/indexing/base.py +64 -67
  32. langchain_core/indexing/in_memory.py +26 -6
  33. langchain_core/language_models/__init__.py +15 -27
  34. langchain_core/language_models/_utils.py +267 -117
  35. langchain_core/language_models/base.py +92 -177
  36. langchain_core/language_models/chat_models.py +547 -407
  37. langchain_core/language_models/fake.py +11 -11
  38. langchain_core/language_models/fake_chat_models.py +72 -118
  39. langchain_core/language_models/llms.py +168 -242
  40. langchain_core/load/dump.py +8 -11
  41. langchain_core/load/load.py +32 -28
  42. langchain_core/load/mapping.py +2 -4
  43. langchain_core/load/serializable.py +50 -56
  44. langchain_core/messages/__init__.py +36 -51
  45. langchain_core/messages/ai.py +377 -150
  46. langchain_core/messages/base.py +239 -47
  47. langchain_core/messages/block_translators/__init__.py +111 -0
  48. langchain_core/messages/block_translators/anthropic.py +470 -0
  49. langchain_core/messages/block_translators/bedrock.py +94 -0
  50. langchain_core/messages/block_translators/bedrock_converse.py +297 -0
  51. langchain_core/messages/block_translators/google_genai.py +530 -0
  52. langchain_core/messages/block_translators/google_vertexai.py +21 -0
  53. langchain_core/messages/block_translators/groq.py +143 -0
  54. langchain_core/messages/block_translators/langchain_v0.py +301 -0
  55. langchain_core/messages/block_translators/openai.py +1010 -0
  56. langchain_core/messages/chat.py +2 -3
  57. langchain_core/messages/content.py +1423 -0
  58. langchain_core/messages/function.py +7 -7
  59. langchain_core/messages/human.py +44 -38
  60. langchain_core/messages/modifier.py +3 -2
  61. langchain_core/messages/system.py +40 -27
  62. langchain_core/messages/tool.py +160 -58
  63. langchain_core/messages/utils.py +527 -638
  64. langchain_core/output_parsers/__init__.py +1 -14
  65. langchain_core/output_parsers/base.py +68 -104
  66. langchain_core/output_parsers/json.py +13 -17
  67. langchain_core/output_parsers/list.py +11 -33
  68. langchain_core/output_parsers/openai_functions.py +56 -74
  69. langchain_core/output_parsers/openai_tools.py +68 -109
  70. langchain_core/output_parsers/pydantic.py +15 -13
  71. langchain_core/output_parsers/string.py +6 -2
  72. langchain_core/output_parsers/transform.py +17 -60
  73. langchain_core/output_parsers/xml.py +34 -44
  74. langchain_core/outputs/__init__.py +1 -1
  75. langchain_core/outputs/chat_generation.py +26 -11
  76. langchain_core/outputs/chat_result.py +1 -3
  77. langchain_core/outputs/generation.py +17 -6
  78. langchain_core/outputs/llm_result.py +15 -8
  79. langchain_core/prompt_values.py +29 -123
  80. langchain_core/prompts/__init__.py +3 -27
  81. langchain_core/prompts/base.py +48 -63
  82. langchain_core/prompts/chat.py +259 -288
  83. langchain_core/prompts/dict.py +19 -11
  84. langchain_core/prompts/few_shot.py +84 -90
  85. langchain_core/prompts/few_shot_with_templates.py +14 -12
  86. langchain_core/prompts/image.py +19 -14
  87. langchain_core/prompts/loading.py +6 -8
  88. langchain_core/prompts/message.py +7 -8
  89. langchain_core/prompts/prompt.py +42 -43
  90. langchain_core/prompts/string.py +37 -16
  91. langchain_core/prompts/structured.py +43 -46
  92. langchain_core/rate_limiters.py +51 -60
  93. langchain_core/retrievers.py +52 -192
  94. langchain_core/runnables/base.py +1727 -1683
  95. langchain_core/runnables/branch.py +52 -73
  96. langchain_core/runnables/config.py +89 -103
  97. langchain_core/runnables/configurable.py +128 -130
  98. langchain_core/runnables/fallbacks.py +93 -82
  99. langchain_core/runnables/graph.py +127 -127
  100. langchain_core/runnables/graph_ascii.py +63 -41
  101. langchain_core/runnables/graph_mermaid.py +87 -70
  102. langchain_core/runnables/graph_png.py +31 -36
  103. langchain_core/runnables/history.py +145 -161
  104. langchain_core/runnables/passthrough.py +141 -144
  105. langchain_core/runnables/retry.py +84 -68
  106. langchain_core/runnables/router.py +33 -37
  107. langchain_core/runnables/schema.py +79 -72
  108. langchain_core/runnables/utils.py +95 -139
  109. langchain_core/stores.py +85 -131
  110. langchain_core/structured_query.py +11 -15
  111. langchain_core/sys_info.py +31 -32
  112. langchain_core/tools/__init__.py +1 -14
  113. langchain_core/tools/base.py +221 -247
  114. langchain_core/tools/convert.py +144 -161
  115. langchain_core/tools/render.py +10 -10
  116. langchain_core/tools/retriever.py +12 -19
  117. langchain_core/tools/simple.py +52 -29
  118. langchain_core/tools/structured.py +56 -60
  119. langchain_core/tracers/__init__.py +1 -9
  120. langchain_core/tracers/_streaming.py +6 -7
  121. langchain_core/tracers/base.py +103 -112
  122. langchain_core/tracers/context.py +29 -48
  123. langchain_core/tracers/core.py +142 -105
  124. langchain_core/tracers/evaluation.py +30 -34
  125. langchain_core/tracers/event_stream.py +162 -117
  126. langchain_core/tracers/langchain.py +34 -36
  127. langchain_core/tracers/log_stream.py +87 -49
  128. langchain_core/tracers/memory_stream.py +3 -3
  129. langchain_core/tracers/root_listeners.py +18 -34
  130. langchain_core/tracers/run_collector.py +8 -20
  131. langchain_core/tracers/schemas.py +0 -125
  132. langchain_core/tracers/stdout.py +3 -3
  133. langchain_core/utils/__init__.py +1 -4
  134. langchain_core/utils/_merge.py +47 -9
  135. langchain_core/utils/aiter.py +70 -66
  136. langchain_core/utils/env.py +12 -9
  137. langchain_core/utils/function_calling.py +139 -206
  138. langchain_core/utils/html.py +7 -8
  139. langchain_core/utils/input.py +6 -6
  140. langchain_core/utils/interactive_env.py +6 -2
  141. langchain_core/utils/iter.py +48 -45
  142. langchain_core/utils/json.py +14 -4
  143. langchain_core/utils/json_schema.py +159 -43
  144. langchain_core/utils/mustache.py +32 -25
  145. langchain_core/utils/pydantic.py +67 -40
  146. langchain_core/utils/strings.py +5 -5
  147. langchain_core/utils/usage.py +1 -1
  148. langchain_core/utils/utils.py +104 -62
  149. langchain_core/vectorstores/base.py +131 -179
  150. langchain_core/vectorstores/in_memory.py +113 -182
  151. langchain_core/vectorstores/utils.py +23 -17
  152. langchain_core/version.py +1 -1
  153. langchain_core-1.0.0.dist-info/METADATA +68 -0
  154. langchain_core-1.0.0.dist-info/RECORD +172 -0
  155. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
  156. langchain_core/beta/__init__.py +0 -1
  157. langchain_core/beta/runnables/__init__.py +0 -1
  158. langchain_core/beta/runnables/context.py +0 -448
  159. langchain_core/memory.py +0 -116
  160. langchain_core/messages/content_blocks.py +0 -1435
  161. langchain_core/prompts/pipeline.py +0 -133
  162. langchain_core/pydantic_v1/__init__.py +0 -30
  163. langchain_core/pydantic_v1/dataclasses.py +0 -23
  164. langchain_core/pydantic_v1/main.py +0 -23
  165. langchain_core/tracers/langchain_v1.py +0 -23
  166. langchain_core/utils/loading.py +0 -31
  167. langchain_core/v1/__init__.py +0 -1
  168. langchain_core/v1/chat_models.py +0 -1047
  169. langchain_core/v1/messages.py +0 -755
  170. langchain_core-0.4.0.dev0.dist-info/METADATA +0 -108
  171. langchain_core-0.4.0.dev0.dist-info/RECORD +0 -177
  172. langchain_core-0.4.0.dev0.dist-info/entry_points.txt +0 -4
@@ -3,7 +3,6 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import logging
6
- import sys
7
6
  import traceback
8
7
  from abc import ABC, abstractmethod
9
8
  from datetime import datetime, timezone
@@ -11,14 +10,11 @@ from typing import (
11
10
  TYPE_CHECKING,
12
11
  Any,
13
12
  Literal,
14
- Optional,
15
- Union,
16
13
  cast,
17
14
  )
18
15
 
19
16
  from langchain_core.exceptions import TracerException
20
17
  from langchain_core.load import dumpd
21
- from langchain_core.messages.utils import convert_from_v1_message
22
18
  from langchain_core.outputs import (
23
19
  ChatGeneration,
24
20
  ChatGenerationChunk,
@@ -26,12 +22,6 @@ from langchain_core.outputs import (
26
22
  LLMResult,
27
23
  )
28
24
  from langchain_core.tracers.schemas import Run
29
- from langchain_core.v1.messages import (
30
- AIMessage,
31
- AIMessageChunk,
32
- MessageV1,
33
- MessageV1Types,
34
- )
35
25
 
36
26
  if TYPE_CHECKING:
37
27
  from collections.abc import Coroutine, Sequence
@@ -78,7 +68,7 @@ class _TracerCore(ABC):
78
68
  for streaming events.
79
69
  - 'original+chat' is a format that is the same as 'original'
80
70
  except it does NOT raise an attribute error on_chat_model_start
81
- kwargs: Additional keyword arguments that will be passed to
71
+ **kwargs: Additional keyword arguments that will be passed to
82
72
  the superclass.
83
73
  """
84
74
  super().__init__(**kwargs)
@@ -89,7 +79,7 @@ class _TracerCore(ABC):
89
79
  """Map of run ID to (trace_id, dotted_order). Cleared when tracer GCed."""
90
80
 
91
81
  @abstractmethod
92
- def _persist_run(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]:
82
+ def _persist_run(self, run: Run) -> Coroutine[Any, Any, None] | None:
93
83
  """Persist a run."""
94
84
 
95
85
  @staticmethod
@@ -105,17 +95,12 @@ class _TracerCore(ABC):
105
95
  """Get the stacktrace of the parent error."""
106
96
  msg = repr(error)
107
97
  try:
108
- if sys.version_info < (3, 10):
109
- tb = traceback.format_exception(
110
- error.__class__, error, error.__traceback__
111
- )
112
- else:
113
- tb = traceback.format_exception(error)
98
+ tb = traceback.format_exception(error)
114
99
  return (msg + "\n\n".join(tb)).strip()
115
100
  except: # noqa: E722
116
101
  return msg
117
102
 
118
- def _start_trace(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # type: ignore[return]
103
+ def _start_trace(self, run: Run) -> Coroutine[Any, Any, None] | None: # type: ignore[return]
119
104
  current_dotted_order = run.start_time.strftime("%Y%m%dT%H%M%S%fZ") + str(run.id)
120
105
  if run.parent_run_id:
121
106
  if parent := self.order_map.get(run.parent_run_id):
@@ -139,9 +124,7 @@ class _TracerCore(ABC):
139
124
  self.order_map[run.id] = (run.trace_id, run.dotted_order)
140
125
  self.run_map[str(run.id)] = run
141
126
 
142
- def _get_run(
143
- self, run_id: UUID, run_type: Union[str, set[str], None] = None
144
- ) -> Run:
127
+ def _get_run(self, run_id: UUID, run_type: str | set[str] | None = None) -> Run:
145
128
  try:
146
129
  run = self.run_map[str(run_id)]
147
130
  except KeyError as exc:
@@ -149,7 +132,7 @@ class _TracerCore(ABC):
149
132
  raise TracerException(msg) from exc
150
133
 
151
134
  if isinstance(run_type, str):
152
- run_types: Union[set[str], None] = {run_type}
135
+ run_types: set[str] | None = {run_type}
153
136
  else:
154
137
  run_types = run_type
155
138
  if run_types is not None and run.run_type not in run_types:
@@ -163,12 +146,12 @@ class _TracerCore(ABC):
163
146
  def _create_chat_model_run(
164
147
  self,
165
148
  serialized: dict[str, Any],
166
- messages: Union[list[list[BaseMessage]], list[MessageV1]],
149
+ messages: list[list[BaseMessage]],
167
150
  run_id: UUID,
168
- tags: Optional[list[str]] = None,
169
- parent_run_id: Optional[UUID] = None,
170
- metadata: Optional[dict[str, Any]] = None,
171
- name: Optional[str] = None,
151
+ tags: list[str] | None = None,
152
+ parent_run_id: UUID | None = None,
153
+ metadata: dict[str, Any] | None = None,
154
+ name: str | None = None,
172
155
  **kwargs: Any,
173
156
  ) -> Run:
174
157
  """Create a chat model run."""
@@ -188,12 +171,6 @@ class _TracerCore(ABC):
188
171
  start_time = datetime.now(timezone.utc)
189
172
  if metadata:
190
173
  kwargs.update({"metadata": metadata})
191
- if isinstance(messages[0], MessageV1Types):
192
- # Convert from v1 messages to BaseMessage
193
- messages = [
194
- [convert_from_v1_message(msg) for msg in messages] # type: ignore[arg-type]
195
- ]
196
- messages = cast("list[list[BaseMessage]]", messages)
197
174
  return Run(
198
175
  id=run_id,
199
176
  parent_run_id=parent_run_id,
@@ -215,10 +192,10 @@ class _TracerCore(ABC):
215
192
  serialized: dict[str, Any],
216
193
  prompts: list[str],
217
194
  run_id: UUID,
218
- tags: Optional[list[str]] = None,
219
- parent_run_id: Optional[UUID] = None,
220
- metadata: Optional[dict[str, Any]] = None,
221
- name: Optional[str] = None,
195
+ tags: list[str] | None = None,
196
+ parent_run_id: UUID | None = None,
197
+ metadata: dict[str, Any] | None = None,
198
+ name: str | None = None,
222
199
  **kwargs: Any,
223
200
  ) -> Run:
224
201
  """Create a llm run."""
@@ -243,10 +220,8 @@ class _TracerCore(ABC):
243
220
  self,
244
221
  token: str,
245
222
  run_id: UUID,
246
- chunk: Optional[
247
- Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
248
- ] = None,
249
- parent_run_id: Optional[UUID] = None, # noqa: ARG002
223
+ chunk: GenerationChunk | ChatGenerationChunk | None = None,
224
+ parent_run_id: UUID | None = None, # noqa: ARG002
250
225
  ) -> Run:
251
226
  """Append token event to LLM run and return the run."""
252
227
  llm_run = self._get_run(run_id, run_type={"llm", "chat_model"})
@@ -291,15 +266,7 @@ class _TracerCore(ABC):
291
266
  )
292
267
  return llm_run
293
268
 
294
- def _complete_llm_run(
295
- self, response: Union[LLMResult, AIMessage], run_id: UUID
296
- ) -> Run:
297
- if isinstance(response, AIMessage):
298
- response = LLMResult(
299
- generations=[
300
- [ChatGeneration(message=convert_from_v1_message(response))]
301
- ]
302
- )
269
+ def _complete_llm_run(self, response: LLMResult, run_id: UUID) -> Run:
303
270
  llm_run = self._get_run(run_id, run_type={"llm", "chat_model"})
304
271
  if getattr(llm_run, "outputs", None) is None:
305
272
  llm_run.outputs = {}
@@ -320,7 +287,7 @@ class _TracerCore(ABC):
320
287
  return llm_run
321
288
 
322
289
  def _errored_llm_run(
323
- self, error: BaseException, run_id: UUID, response: Optional[LLMResult] = None
290
+ self, error: BaseException, run_id: UUID, response: LLMResult | None = None
324
291
  ) -> Run:
325
292
  llm_run = self._get_run(run_id, run_type={"llm", "chat_model"})
326
293
  llm_run.error = self._get_stacktrace(error)
@@ -348,11 +315,11 @@ class _TracerCore(ABC):
348
315
  serialized: dict[str, Any],
349
316
  inputs: dict[str, Any],
350
317
  run_id: UUID,
351
- tags: Optional[list[str]] = None,
352
- parent_run_id: Optional[UUID] = None,
353
- metadata: Optional[dict[str, Any]] = None,
354
- run_type: Optional[str] = None,
355
- name: Optional[str] = None,
318
+ tags: list[str] | None = None,
319
+ parent_run_id: UUID | None = None,
320
+ metadata: dict[str, Any] | None = None,
321
+ run_type: str | None = None,
322
+ name: str | None = None,
356
323
  **kwargs: Any,
357
324
  ) -> Run:
358
325
  """Create a chain Run."""
@@ -399,7 +366,7 @@ class _TracerCore(ABC):
399
366
  self,
400
367
  outputs: dict[str, Any],
401
368
  run_id: UUID,
402
- inputs: Optional[dict[str, Any]] = None,
369
+ inputs: dict[str, Any] | None = None,
403
370
  ) -> Run:
404
371
  """Update a chain run with outputs and end time."""
405
372
  chain_run = self._get_run(run_id)
@@ -418,7 +385,7 @@ class _TracerCore(ABC):
418
385
  def _errored_chain_run(
419
386
  self,
420
387
  error: BaseException,
421
- inputs: Optional[dict[str, Any]],
388
+ inputs: dict[str, Any] | None,
422
389
  run_id: UUID,
423
390
  ) -> Run:
424
391
  chain_run = self._get_run(run_id)
@@ -434,11 +401,11 @@ class _TracerCore(ABC):
434
401
  serialized: dict[str, Any],
435
402
  input_str: str,
436
403
  run_id: UUID,
437
- tags: Optional[list[str]] = None,
438
- parent_run_id: Optional[UUID] = None,
439
- metadata: Optional[dict[str, Any]] = None,
440
- name: Optional[str] = None,
441
- inputs: Optional[dict[str, Any]] = None,
404
+ tags: list[str] | None = None,
405
+ parent_run_id: UUID | None = None,
406
+ metadata: dict[str, Any] | None = None,
407
+ name: str | None = None,
408
+ inputs: dict[str, Any] | None = None,
442
409
  **kwargs: Any,
443
410
  ) -> Run:
444
411
  """Create a tool run."""
@@ -501,10 +468,10 @@ class _TracerCore(ABC):
501
468
  serialized: dict[str, Any],
502
469
  query: str,
503
470
  run_id: UUID,
504
- parent_run_id: Optional[UUID] = None,
505
- tags: Optional[list[str]] = None,
506
- metadata: Optional[dict[str, Any]] = None,
507
- name: Optional[str] = None,
471
+ parent_run_id: UUID | None = None,
472
+ tags: list[str] | None = None,
473
+ metadata: dict[str, Any] | None = None,
474
+ name: str | None = None,
508
475
  **kwargs: Any,
509
476
  ) -> Run:
510
477
  """Create a retrieval run."""
@@ -554,82 +521,152 @@ class _TracerCore(ABC):
554
521
  return retrieval_run
555
522
 
556
523
  def __deepcopy__(self, memo: dict) -> _TracerCore:
557
- """Deepcopy the tracer."""
524
+ """Return self deepcopied."""
558
525
  return self
559
526
 
560
527
  def __copy__(self) -> _TracerCore:
561
- """Copy the tracer."""
528
+ """Return self copied."""
562
529
  return self
563
530
 
564
- def _end_trace(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
565
- """End a trace for a run."""
531
+ def _end_trace(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
532
+ """End a trace for a run.
533
+
534
+ Args:
535
+ run: The run.
536
+ """
566
537
  return None
567
538
 
568
- def _on_run_create(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
569
- """Process a run upon creation."""
539
+ def _on_run_create(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
540
+ """Process a run upon creation.
541
+
542
+ Args:
543
+ run: The created run.
544
+ """
570
545
  return None
571
546
 
572
- def _on_run_update(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
573
- """Process a run upon update."""
547
+ def _on_run_update(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
548
+ """Process a run upon update.
549
+
550
+ Args:
551
+ run: The updated run.
552
+ """
574
553
  return None
575
554
 
576
- def _on_llm_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
577
- """Process the LLM Run upon start."""
555
+ def _on_llm_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
556
+ """Process the LLM Run upon start.
557
+
558
+ Args:
559
+ run: The LLM run.
560
+ """
578
561
  return None
579
562
 
580
563
  def _on_llm_new_token(
581
564
  self,
582
565
  run: Run, # noqa: ARG002
583
566
  token: str, # noqa: ARG002
584
- chunk: Optional[Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]], # noqa: ARG002
585
- ) -> Union[None, Coroutine[Any, Any, None]]:
586
- """Process new LLM token."""
567
+ chunk: GenerationChunk | ChatGenerationChunk | None, # noqa: ARG002
568
+ ) -> Coroutine[Any, Any, None] | None:
569
+ """Process new LLM token.
570
+
571
+ Args:
572
+ run: The LLM run.
573
+ token: The new token.
574
+ chunk: Optional chunk.
575
+ """
587
576
  return None
588
577
 
589
- def _on_llm_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
590
- """Process the LLM Run."""
578
+ def _on_llm_end(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
579
+ """Process the LLM Run.
580
+
581
+ Args:
582
+ run: The LLM run.
583
+ """
591
584
  return None
592
585
 
593
- def _on_llm_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
594
- """Process the LLM Run upon error."""
586
+ def _on_llm_error(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
587
+ """Process the LLM Run upon error.
588
+
589
+ Args:
590
+ run: The LLM run.
591
+ """
595
592
  return None
596
593
 
597
- def _on_chain_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
598
- """Process the Chain Run upon start."""
594
+ def _on_chain_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
595
+ """Process the Chain Run upon start.
596
+
597
+ Args:
598
+ run: The chain run.
599
+ """
599
600
  return None
600
601
 
601
- def _on_chain_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
602
- """Process the Chain Run."""
602
+ def _on_chain_end(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
603
+ """Process the Chain Run.
604
+
605
+ Args:
606
+ run: The chain run.
607
+ """
603
608
  return None
604
609
 
605
- def _on_chain_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
606
- """Process the Chain Run upon error."""
610
+ def _on_chain_error(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
611
+ """Process the Chain Run upon error.
612
+
613
+ Args:
614
+ run: The chain run.
615
+ """
607
616
  return None
608
617
 
609
- def _on_tool_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
610
- """Process the Tool Run upon start."""
618
+ def _on_tool_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
619
+ """Process the Tool Run upon start.
620
+
621
+ Args:
622
+ run: The tool run.
623
+ """
611
624
  return None
612
625
 
613
- def _on_tool_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
614
- """Process the Tool Run."""
626
+ def _on_tool_end(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
627
+ """Process the Tool Run.
628
+
629
+ Args:
630
+ run: The tool run.
631
+ """
615
632
  return None
616
633
 
617
- def _on_tool_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
618
- """Process the Tool Run upon error."""
634
+ def _on_tool_error(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
635
+ """Process the Tool Run upon error.
636
+
637
+ Args:
638
+ run: The tool run.
639
+ """
619
640
  return None
620
641
 
621
- def _on_chat_model_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
622
- """Process the Chat Model Run upon start."""
642
+ def _on_chat_model_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
643
+ """Process the Chat Model Run upon start.
644
+
645
+ Args:
646
+ run: The chat model run.
647
+ """
623
648
  return None
624
649
 
625
- def _on_retriever_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
626
- """Process the Retriever Run upon start."""
650
+ def _on_retriever_start(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
651
+ """Process the Retriever Run upon start.
652
+
653
+ Args:
654
+ run: The retriever run.
655
+ """
627
656
  return None
628
657
 
629
- def _on_retriever_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
630
- """Process the Retriever Run."""
658
+ def _on_retriever_end(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
659
+ """Process the Retriever Run.
660
+
661
+ Args:
662
+ run: The retriever run.
663
+ """
631
664
  return None
632
665
 
633
- def _on_retriever_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
634
- """Process the Retriever Run upon error."""
666
+ def _on_retriever_error(self, run: Run) -> Coroutine[Any, Any, None] | None: # noqa: ARG002
667
+ """Process the Retriever Run upon error.
668
+
669
+ Args:
670
+ run: The retriever run.
671
+ """
635
672
  return None
@@ -6,7 +6,7 @@ import logging
6
6
  import threading
7
7
  import weakref
8
8
  from concurrent.futures import Future, ThreadPoolExecutor, wait
9
- from typing import TYPE_CHECKING, Any, Optional, Union, cast
9
+ from typing import TYPE_CHECKING, Any, cast
10
10
  from uuid import UUID
11
11
 
12
12
  import langsmith
@@ -38,33 +38,36 @@ class EvaluatorCallbackHandler(BaseTracer):
38
38
  """Tracer that runs a run evaluator whenever a run is persisted.
39
39
 
40
40
  Attributes:
41
- example_id : Union[UUID, None]
42
- The example ID associated with the runs.
43
41
  client : Client
44
42
  The LangSmith client instance used for evaluating the runs.
45
- evaluators : Sequence[RunEvaluator]
46
- The sequence of run evaluators to be executed.
47
- executor : ThreadPoolExecutor
48
- The thread pool executor used for running the evaluators.
49
- futures : set[Future]
50
- The set of futures representing the running evaluators.
51
- skip_unfinished : bool
52
- Whether to skip runs that are not finished or raised
53
- an error.
54
- project_name : Optional[str]
55
- The LangSmith project name to be organize eval chain runs under.
56
43
  """
57
44
 
58
45
  name: str = "evaluator_callback_handler"
46
+ example_id: UUID | None = None
47
+ """The example ID associated with the runs."""
48
+ client: langsmith.Client
49
+ """The LangSmith client instance used for evaluating the runs."""
50
+ evaluators: Sequence[langsmith.RunEvaluator] = ()
51
+ """The sequence of run evaluators to be executed."""
52
+ executor: ThreadPoolExecutor | None = None
53
+ """The thread pool executor used for running the evaluators."""
54
+ futures: weakref.WeakSet[Future] = weakref.WeakSet()
55
+ """The set of futures representing the running evaluators."""
56
+ skip_unfinished: bool = True
57
+ """Whether to skip runs that are not finished or raised an error."""
58
+ project_name: str | None = None
59
+ """The LangSmith project name to be organize eval chain runs under."""
60
+ logged_eval_results: dict[tuple[str, str], list[EvaluationResult]]
61
+ lock: threading.Lock
59
62
 
60
63
  def __init__(
61
64
  self,
62
65
  evaluators: Sequence[langsmith.RunEvaluator],
63
- client: Optional[langsmith.Client] = None,
64
- example_id: Optional[Union[UUID, str]] = None,
66
+ client: langsmith.Client | None = None,
67
+ example_id: UUID | str | None = None,
65
68
  skip_unfinished: bool = True, # noqa: FBT001,FBT002
66
- project_name: Optional[str] = "evaluators",
67
- max_concurrency: Optional[int] = None,
69
+ project_name: str | None = "evaluators",
70
+ max_concurrency: int | None = None,
68
71
  **kwargs: Any,
69
72
  ) -> None:
70
73
  """Create an EvaluatorCallbackHandler.
@@ -91,7 +94,7 @@ class EvaluatorCallbackHandler(BaseTracer):
91
94
  self.client = client or langchain_tracer.get_client()
92
95
  self.evaluators = evaluators
93
96
  if max_concurrency is None:
94
- self.executor: Optional[ThreadPoolExecutor] = _get_executor()
97
+ self.executor = _get_executor()
95
98
  elif max_concurrency > 0:
96
99
  self.executor = ThreadPoolExecutor(max_workers=max_concurrency)
97
100
  weakref.finalize(
@@ -100,10 +103,10 @@ class EvaluatorCallbackHandler(BaseTracer):
100
103
  )
101
104
  else:
102
105
  self.executor = None
103
- self.futures: weakref.WeakSet[Future] = weakref.WeakSet()
106
+ self.futures = weakref.WeakSet()
104
107
  self.skip_unfinished = skip_unfinished
105
108
  self.project_name = project_name
106
- self.logged_eval_results: dict[tuple[str, str], list[EvaluationResult]] = {}
109
+ self.logged_eval_results = {}
107
110
  self.lock = threading.Lock()
108
111
  _TRACERS.add(self)
109
112
 
@@ -111,12 +114,8 @@ class EvaluatorCallbackHandler(BaseTracer):
111
114
  """Evaluate the run in the project.
112
115
 
113
116
  Args:
114
- ----------
115
- run : Run
116
- The run to be evaluated.
117
- evaluator : RunEvaluator
118
- The evaluator to use for evaluating the run.
119
-
117
+ run: The run to be evaluated.
118
+ evaluator: The evaluator to use for evaluating the run.
120
119
  """
121
120
  try:
122
121
  if self.project_name is None:
@@ -157,7 +156,7 @@ class EvaluatorCallbackHandler(BaseTracer):
157
156
 
158
157
  def _select_eval_results(
159
158
  self,
160
- results: Union[EvaluationResult, EvaluationResults],
159
+ results: EvaluationResult | EvaluationResults,
161
160
  ) -> list[EvaluationResult]:
162
161
  if isinstance(results, EvaluationResult):
163
162
  results_ = [results]
@@ -173,9 +172,9 @@ class EvaluatorCallbackHandler(BaseTracer):
173
172
 
174
173
  def _log_evaluation_feedback(
175
174
  self,
176
- evaluator_response: Union[EvaluationResult, EvaluationResults],
175
+ evaluator_response: EvaluationResult | EvaluationResults,
177
176
  run: Run,
178
- source_run_id: Optional[UUID] = None,
177
+ source_run_id: UUID | None = None,
179
178
  ) -> list[EvaluationResult]:
180
179
  results = self._select_eval_results(evaluator_response)
181
180
  for res in results:
@@ -202,10 +201,7 @@ class EvaluatorCallbackHandler(BaseTracer):
202
201
  """Run the evaluator on the run.
203
202
 
204
203
  Args:
205
- ----------
206
- run : Run
207
- The run to be evaluated.
208
-
204
+ run: The run to be evaluated.
209
205
  """
210
206
  if self.skip_unfinished and not run.outputs:
211
207
  logger.debug("Skipping unfinished run %s", run.id)