langchain-core 0.3.69__py3-none-any.whl → 0.3.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

@@ -27,8 +27,8 @@ def import_attr(
27
27
  else:
28
28
  try:
29
29
  module = import_module(f".{module_name}", package=package)
30
- except ModuleNotFoundError:
31
- msg = f"module '{package!r}.{module_name!r}' not found"
30
+ except ModuleNotFoundError as err:
31
+ msg = f"module '{package!r}.{module_name!r}' not found ({err})"
32
32
  raise ImportError(msg) from None
33
33
  result = getattr(module, attr_name)
34
34
  return result
@@ -111,8 +111,9 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
111
111
  def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
112
112
  """Format messages for tracing in on_chat_model_start.
113
113
 
114
- For backward compatibility, we update image content blocks to OpenAI Chat
115
- Completions format.
114
+ - Update image content blocks to OpenAI Chat Completions format (backward
115
+ compatibility).
116
+ - Add "type" key to content blocks that have a single key.
116
117
 
117
118
  Args:
118
119
  messages: List of messages to format.
@@ -125,20 +126,36 @@ def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
125
126
  message_to_trace = message
126
127
  if isinstance(message.content, list):
127
128
  for idx, block in enumerate(message.content):
128
- if (
129
- isinstance(block, dict)
130
- and block.get("type") == "image"
131
- and is_data_content_block(block)
132
- and block.get("source_type") != "id"
133
- ):
134
- if message_to_trace is message:
135
- message_to_trace = message.model_copy()
136
- # Also shallow-copy content
137
- message_to_trace.content = list(message_to_trace.content)
138
-
139
- message_to_trace.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
140
- convert_to_openai_image_block(block)
141
- )
129
+ if isinstance(block, dict):
130
+ # Update image content blocks to OpenAI # Chat Completions format.
131
+ if (
132
+ block.get("type") == "image"
133
+ and is_data_content_block(block)
134
+ and block.get("source_type") != "id"
135
+ ):
136
+ if message_to_trace is message:
137
+ # Shallow copy
138
+ message_to_trace = message.model_copy()
139
+ message_to_trace.content = list(message_to_trace.content)
140
+
141
+ message_to_trace.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
142
+ convert_to_openai_image_block(block)
143
+ )
144
+ elif len(block) == 1 and "type" not in block:
145
+ # Tracing assumes all content blocks have a "type" key. Here
146
+ # we add this key if it is missing, and there's an obvious
147
+ # choice for the type (e.g., a single key in the block).
148
+ if message_to_trace is message:
149
+ # Shallow copy
150
+ message_to_trace = message.model_copy()
151
+ message_to_trace.content = list(message_to_trace.content)
152
+ key = next(iter(block))
153
+ message_to_trace.content[idx] = { # type: ignore[index]
154
+ "type": key,
155
+ key: block[key],
156
+ }
157
+ else:
158
+ pass
142
159
  messages_to_trace.append(message_to_trace)
143
160
 
144
161
  return messages_to_trace
@@ -36,6 +36,8 @@ class FakeMessagesListChatModel(BaseChatModel):
36
36
  run_manager: Optional[CallbackManagerForLLMRun] = None,
37
37
  **kwargs: Any,
38
38
  ) -> ChatResult:
39
+ if self.sleep is not None:
40
+ time.sleep(self.sleep)
39
41
  response = self.responses[self.i]
40
42
  if self.i < len(self.responses) - 1:
41
43
  self.i += 1
@@ -61,9 +63,9 @@ class FakeListChatModel(SimpleChatModel):
61
63
  """List of responses to **cycle** through in order."""
62
64
  sleep: Optional[float] = None
63
65
  i: int = 0
64
- """List of responses to **cycle** through in order."""
65
- error_on_chunk_number: Optional[int] = None
66
66
  """Internally incremented after every model invocation."""
67
+ error_on_chunk_number: Optional[int] = None
68
+ """If set, raise an error on the specified chunk number during streaming."""
67
69
 
68
70
  @property
69
71
  @override
@@ -79,6 +81,8 @@ class FakeListChatModel(SimpleChatModel):
79
81
  **kwargs: Any,
80
82
  ) -> str:
81
83
  """First try to lookup in queries, else return 'foo' or 'bar'."""
84
+ if self.sleep is not None:
85
+ time.sleep(self.sleep)
82
86
  response = self.responses[self.i]
83
87
  if self.i < len(self.responses) - 1:
84
88
  self.i += 1
@@ -234,12 +234,39 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
234
234
  Returns:
235
235
  The parsed tool calls.
236
236
  """
237
- parsed_result = super().parse_result(result, partial=partial)
238
-
237
+ generation = result[0]
238
+ if not isinstance(generation, ChatGeneration):
239
+ msg = "This output parser can only be used with a chat generation."
240
+ raise OutputParserException(msg)
241
+ message = generation.message
242
+ if isinstance(message, AIMessage) and message.tool_calls:
243
+ parsed_tool_calls = [dict(tc) for tc in message.tool_calls]
244
+ for tool_call in parsed_tool_calls:
245
+ if not self.return_id:
246
+ _ = tool_call.pop("id")
247
+ else:
248
+ try:
249
+ raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"])
250
+ except KeyError:
251
+ if self.first_tool_only:
252
+ return None
253
+ return []
254
+ parsed_tool_calls = parse_tool_calls(
255
+ raw_tool_calls,
256
+ partial=partial,
257
+ strict=self.strict,
258
+ return_id=self.return_id,
259
+ )
260
+ # For backwards compatibility
261
+ for tc in parsed_tool_calls:
262
+ tc["type"] = tc.pop("name")
239
263
  if self.first_tool_only:
264
+ parsed_result = list(
265
+ filter(lambda x: x["type"] == self.key_name, parsed_tool_calls)
266
+ )
240
267
  single_result = (
241
- parsed_result
242
- if parsed_result and parsed_result["type"] == self.key_name
268
+ parsed_result[0]
269
+ if parsed_result and parsed_result[0]["type"] == self.key_name
243
270
  else None
244
271
  )
245
272
  if self.return_id:
@@ -247,10 +274,13 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
247
274
  if single_result:
248
275
  return single_result["args"]
249
276
  return None
250
- parsed_result = [res for res in parsed_result if res["type"] == self.key_name]
251
- if not self.return_id:
252
- parsed_result = [res["args"] for res in parsed_result]
253
- return parsed_result
277
+ return (
278
+ [res for res in parsed_tool_calls if res["type"] == self.key_name]
279
+ if self.return_id
280
+ else [
281
+ res["args"] for res in parsed_tool_calls if res["type"] == self.key_name
282
+ ]
283
+ )
254
284
 
255
285
 
256
286
  # Common cause of ValidationError is truncated output due to max_tokens.
@@ -1,24 +1,23 @@
1
1
  """Output classes.
2
2
 
3
- **Output** classes are used to represent the output of a language model call
4
- and the output of a chat.
3
+ Used to represent the output of a language model call and the output of a chat.
5
4
 
6
- The top container for information is the `LLMResult` object. `LLMResult` is used by
7
- both chat models and LLMs. This object contains the output of the language
8
- model and any additional information that the model provider wants to return.
5
+ The top container for information is the `LLMResult` object. `LLMResult` is used by both
6
+ chat models and LLMs. This object contains the output of the language model and any
7
+ additional information that the model provider wants to return.
9
8
 
10
9
  When invoking models via the standard runnable methods (e.g. invoke, batch, etc.):
10
+
11
11
  - Chat models will return `AIMessage` objects.
12
12
  - LLMs will return regular text strings.
13
13
 
14
14
  In addition, users can access the raw output of either LLMs or chat models via
15
- callbacks. The on_chat_model_end and on_llm_end callbacks will return an
15
+ callbacks. The ``on_chat_model_end`` and ``on_llm_end`` callbacks will return an
16
16
  LLMResult object containing the generated outputs and any additional information
17
17
  returned by the model provider.
18
18
 
19
- In general, if information is already available
20
- in the AIMessage object, it is recommended to access it from there rather than
21
- from the `LLMResult` object.
19
+ In general, if information is already available in the AIMessage object, it is
20
+ recommended to access it from there rather than from the `LLMResult` object.
22
21
  """
23
22
 
24
23
  from typing import TYPE_CHECKING
@@ -27,7 +27,11 @@ class ChatGeneration(Generation):
27
27
  """
28
28
 
29
29
  text: str = ""
30
- """*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
30
+ """The text contents of the output message.
31
+
32
+ .. warning::
33
+ SHOULD NOT BE SET DIRECTLY!
34
+ """
31
35
  message: BaseMessage
32
36
  """The message output by the chat model."""
33
37
  # Override type to be ChatGeneration, ignore mypy error as this is intentional
@@ -11,7 +11,8 @@ from langchain_core.utils._merge import merge_dicts
11
11
  class Generation(Serializable):
12
12
  """A single text generation output.
13
13
 
14
- Generation represents the response from an "old-fashioned" LLM that
14
+ Generation represents the response from an
15
+ `"old-fashioned" LLM <https://python.langchain.com/docs/concepts/text_llms/>__` that
15
16
  generates regular text (not chat messages).
16
17
 
17
18
  This model is used internally by chat model and will eventually
@@ -15,9 +15,9 @@ from langchain_core.outputs.run_info import RunInfo
15
15
  class LLMResult(BaseModel):
16
16
  """A container for results of an LLM call.
17
17
 
18
- Both chat models and LLMs generate an LLMResult object. This object contains
19
- the generated outputs and any additional information that the model provider
20
- wants to return.
18
+ Both chat models and LLMs generate an LLMResult object. This object contains the
19
+ generated outputs and any additional information that the model provider wants to
20
+ return.
21
21
  """
22
22
 
23
23
  generations: list[
@@ -25,17 +25,16 @@ class LLMResult(BaseModel):
25
25
  ]
26
26
  """Generated outputs.
27
27
 
28
- The first dimension of the list represents completions for different input
29
- prompts.
28
+ The first dimension of the list represents completions for different input prompts.
30
29
 
31
- The second dimension of the list represents different candidate generations
32
- for a given prompt.
30
+ The second dimension of the list represents different candidate generations for a
31
+ given prompt.
33
32
 
34
- When returned from an LLM the type is list[list[Generation]].
35
- When returned from a chat model the type is list[list[ChatGeneration]].
33
+ - When returned from **an LLM**, the type is ``list[list[Generation]]``.
34
+ - When returned from a **chat model**, the type is ``list[list[ChatGeneration]]``.
36
35
 
37
- ChatGeneration is a subclass of Generation that has a field for a structured
38
- chat message.
36
+ ChatGeneration is a subclass of Generation that has a field for a structured chat
37
+ message.
39
38
  """
40
39
  llm_output: Optional[dict] = None
41
40
  """For arbitrary LLM provider specific output.
@@ -43,9 +42,8 @@ class LLMResult(BaseModel):
43
42
  This dictionary is a free-form dictionary that can contain any information that the
44
43
  provider wants to return. It is not standardized and is provider-specific.
45
44
 
46
- Users should generally avoid relying on this field and instead rely on
47
- accessing relevant information from standardized fields present in
48
- AIMessage.
45
+ Users should generally avoid relying on this field and instead rely on accessing
46
+ relevant information from standardized fields present in AIMessage.
49
47
  """
50
48
  run: Optional[list[RunInfo]] = None
51
49
  """List of metadata info for model call for each input."""
@@ -146,13 +146,12 @@ class InMemoryRateLimiter(BaseRateLimiter):
146
146
 
147
147
  Args:
148
148
  requests_per_second: The number of tokens to add per second to the bucket.
149
- Must be at least 1. The tokens represent "credit" that can be used
150
- to make requests.
149
+ The tokens represent "credit" that can be used to make requests.
151
150
  check_every_n_seconds: check whether the tokens are available
152
151
  every this many seconds. Can be a float to represent
153
152
  fractions of a second.
154
153
  max_bucket_size: The maximum number of tokens that can be in the bucket.
155
- This is used to prevent bursts of requests.
154
+ Must be at least 1. Used to prevent bursts of requests.
156
155
  """
157
156
  # Number of requests that we can make per second.
158
157
  self.requests_per_second = requests_per_second
@@ -230,6 +230,7 @@ def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:
230
230
  not key.startswith("__")
231
231
  and isinstance(value, (str, int, float, bool))
232
232
  and key not in empty["metadata"]
233
+ and key != "api_key"
233
234
  ):
234
235
  empty["metadata"][key] = value
235
236
  return empty
@@ -197,7 +197,14 @@ class StructuredTool(BaseTool):
197
197
  description_ = source_function.__doc__ or None
198
198
  if description_ is None and args_schema:
199
199
  if isinstance(args_schema, type) and is_basemodel_subclass(args_schema):
200
- description_ = args_schema.__doc__ or None
200
+ description_ = args_schema.__doc__
201
+ if (
202
+ description_
203
+ and "A base class for creating Pydantic models" in description_
204
+ ):
205
+ description_ = ""
206
+ elif not description_:
207
+ description_ = None
201
208
  elif isinstance(args_schema, dict):
202
209
  description_ = args_schema.get("description")
203
210
  else:
@@ -23,7 +23,12 @@ if TYPE_CHECKING:
23
23
  from langchain_core.utils.iter import batch_iterate
24
24
  from langchain_core.utils.loading import try_load_from_hub
25
25
  from langchain_core.utils.pydantic import pre_init
26
- from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
26
+ from langchain_core.utils.strings import (
27
+ comma_list,
28
+ sanitize_for_postgres,
29
+ stringify_dict,
30
+ stringify_value,
31
+ )
27
32
  from langchain_core.utils.utils import (
28
33
  build_extra_kwargs,
29
34
  check_package_version,
@@ -59,6 +64,7 @@ __all__ = (
59
64
  "pre_init",
60
65
  "print_text",
61
66
  "raise_for_status_with_text",
67
+ "sanitize_for_postgres",
62
68
  "secret_from_env",
63
69
  "stringify_dict",
64
70
  "stringify_value",
@@ -81,6 +87,7 @@ _dynamic_imports = {
81
87
  "try_load_from_hub": "loading",
82
88
  "pre_init": "pydantic",
83
89
  "comma_list": "strings",
90
+ "sanitize_for_postgres": "strings",
84
91
  "stringify_dict": "strings",
85
92
  "stringify_value": "strings",
86
93
  "build_extra_kwargs": "utils",
@@ -21,8 +21,15 @@ def _retrieve_ref(path: str, schema: dict) -> dict:
21
21
  for component in components[1:]:
22
22
  if component in out:
23
23
  out = out[component]
24
- elif component.isdigit() and int(component) in out:
25
- out = out[int(component)]
24
+ elif component.isdigit():
25
+ index = int(component)
26
+ if (isinstance(out, list) and 0 <= index < len(out)) or (
27
+ isinstance(out, dict) and index in out
28
+ ):
29
+ out = out[index]
30
+ else:
31
+ msg = f"Reference '{path}' not found."
32
+ raise KeyError(msg)
26
33
  else:
27
34
  msg = f"Reference '{path}' not found."
28
35
  raise KeyError(msg)
@@ -32,64 +39,64 @@ def _retrieve_ref(path: str, schema: dict) -> dict:
32
39
  def _dereference_refs_helper(
33
40
  obj: Any,
34
41
  full_schema: dict[str, Any],
42
+ processed_refs: Optional[set[str]],
35
43
  skip_keys: Sequence[str],
36
- processed_refs: Optional[set[str]] = None,
44
+ shallow_refs: bool, # noqa: FBT001
37
45
  ) -> Any:
46
+ """Inline every pure {'$ref':...}.
47
+
48
+ But:
49
+ - if shallow_refs=True: only break cycles, do not inline nested refs
50
+ - if shallow_refs=False: deep-inline all nested refs
51
+
52
+ Also skip recursion under any key in skip_keys.
53
+ """
38
54
  if processed_refs is None:
39
55
  processed_refs = set()
40
56
 
57
+ # 1) Pure $ref node?
58
+ if isinstance(obj, dict) and set(obj.keys()) == {"$ref"}:
59
+ ref_path = obj["$ref"]
60
+ # cycle?
61
+ if ref_path in processed_refs:
62
+ return {}
63
+ processed_refs.add(ref_path)
64
+
65
+ # grab + copy the target
66
+ target = deepcopy(_retrieve_ref(ref_path, full_schema))
67
+
68
+ # deep inlining: recurse into everything
69
+ result = _dereference_refs_helper(
70
+ target, full_schema, processed_refs, skip_keys, shallow_refs
71
+ )
72
+
73
+ processed_refs.remove(ref_path)
74
+ return result
75
+
76
+ # 2) Not a pure-$ref: recurse, skipping any keys in skip_keys
41
77
  if isinstance(obj, dict):
42
- obj_out = {}
78
+ out: dict[str, Any] = {}
43
79
  for k, v in obj.items():
44
80
  if k in skip_keys:
45
- obj_out[k] = v
46
- elif k == "$ref":
47
- if v in processed_refs:
48
- continue
49
- processed_refs.add(v)
50
- ref = _retrieve_ref(v, full_schema)
51
- full_ref = _dereference_refs_helper(
52
- ref, full_schema, skip_keys, processed_refs
53
- )
54
- processed_refs.remove(v)
55
- return full_ref
56
- elif isinstance(v, (list, dict)):
57
- obj_out[k] = _dereference_refs_helper(
58
- v, full_schema, skip_keys, processed_refs
81
+ # do not recurse under this key
82
+ out[k] = deepcopy(v)
83
+ elif isinstance(v, (dict, list)):
84
+ out[k] = _dereference_refs_helper(
85
+ v, full_schema, processed_refs, skip_keys, shallow_refs
59
86
  )
60
87
  else:
61
- obj_out[k] = v
62
- return obj_out
88
+ out[k] = v
89
+ return out
90
+
63
91
  if isinstance(obj, list):
64
92
  return [
65
- _dereference_refs_helper(el, full_schema, skip_keys, processed_refs)
66
- for el in obj
93
+ _dereference_refs_helper(
94
+ item, full_schema, processed_refs, skip_keys, shallow_refs
95
+ )
96
+ for item in obj
67
97
  ]
68
- return obj
69
-
70
-
71
- def _infer_skip_keys(
72
- obj: Any, full_schema: dict, processed_refs: Optional[set[str]] = None
73
- ) -> list[str]:
74
- if processed_refs is None:
75
- processed_refs = set()
76
98
 
77
- keys = []
78
- if isinstance(obj, dict):
79
- for k, v in obj.items():
80
- if k == "$ref":
81
- if v in processed_refs:
82
- continue
83
- processed_refs.add(v)
84
- ref = _retrieve_ref(v, full_schema)
85
- keys.append(v.split("/")[1])
86
- keys += _infer_skip_keys(ref, full_schema, processed_refs)
87
- elif isinstance(v, (list, dict)):
88
- keys += _infer_skip_keys(v, full_schema, processed_refs)
89
- elif isinstance(obj, list):
90
- for el in obj:
91
- keys += _infer_skip_keys(el, full_schema, processed_refs)
92
- return keys
99
+ return obj
93
100
 
94
101
 
95
102
  def dereference_refs(
@@ -101,17 +108,15 @@ def dereference_refs(
101
108
  """Try to substitute $refs in JSON Schema.
102
109
 
103
110
  Args:
104
- schema_obj: The schema object to dereference.
105
- full_schema: The full schema object. Defaults to None.
106
- skip_keys: The keys to skip. Defaults to None.
107
-
108
- Returns:
109
- The dereferenced schema object.
111
+ schema_obj: The fragment to dereference.
112
+ full_schema: The complete schema (defaults to schema_obj).
113
+ skip_keys:
114
+ - If None (the default), we skip recursion under '$defs' *and* only
115
+ shallow-inline refs.
116
+ - If provided (even as an empty list), we will recurse under every key and
117
+ deep-inline all refs.
110
118
  """
111
- full_schema = full_schema or schema_obj
112
- skip_keys = (
113
- skip_keys
114
- if skip_keys is not None
115
- else _infer_skip_keys(schema_obj, full_schema)
116
- )
117
- return _dereference_refs_helper(schema_obj, full_schema, skip_keys)
119
+ full = full_schema or schema_obj
120
+ keys_to_skip = list(skip_keys) if skip_keys is not None else ["$defs"]
121
+ shallow = skip_keys is None
122
+ return _dereference_refs_helper(schema_obj, full, None, keys_to_skip, shallow)
@@ -150,6 +150,11 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
150
150
  msg = f"unclosed tag at line {_CURRENT_LINE}"
151
151
  raise ChevronError(msg) from e
152
152
 
153
+ # Check for empty tags
154
+ if not tag.strip():
155
+ msg = f"empty tag at line {_CURRENT_LINE}"
156
+ raise ChevronError(msg)
157
+
153
158
  # Find the type meaning of the first character
154
159
  tag_type = tag_types.get(tag[0], "variable")
155
160
 
@@ -46,3 +46,26 @@ def comma_list(items: list[Any]) -> str:
46
46
  str: The comma-separated string.
47
47
  """
48
48
  return ", ".join(str(item) for item in items)
49
+
50
+
51
+ def sanitize_for_postgres(text: str, replacement: str = "") -> str:
52
+ r"""Sanitize text by removing NUL bytes that are incompatible with PostgreSQL.
53
+
54
+ PostgreSQL text fields cannot contain NUL (0x00) bytes, which can cause
55
+ psycopg.DataError when inserting documents. This function removes or replaces
56
+ such characters to ensure compatibility.
57
+
58
+ Args:
59
+ text: The text to sanitize.
60
+ replacement: String to replace NUL bytes with. Defaults to empty string.
61
+
62
+ Returns:
63
+ str: The sanitized text with NUL bytes removed or replaced.
64
+
65
+ Example:
66
+ >>> sanitize_for_postgres("Hello\\x00world")
67
+ 'Helloworld'
68
+ >>> sanitize_for_postgres("Hello\\x00world", " ")
69
+ 'Hello world'
70
+ """
71
+ return text.replace("\x00", replacement)
@@ -7,6 +7,7 @@ as they can change without notice.
7
7
  from __future__ import annotations
8
8
 
9
9
  import logging
10
+ import warnings
10
11
  from typing import TYPE_CHECKING, Union
11
12
 
12
13
  if TYPE_CHECKING:
@@ -46,6 +47,23 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
46
47
 
47
48
  x = np.array(x)
48
49
  y = np.array(y)
50
+
51
+ # Check for NaN
52
+ if np.any(np.isnan(x)) or np.any(np.isnan(y)):
53
+ warnings.warn(
54
+ "NaN found in input arrays, unexpected return might follow",
55
+ category=RuntimeWarning,
56
+ stacklevel=2,
57
+ )
58
+
59
+ # Check for Inf
60
+ if np.any(np.isinf(x)) or np.any(np.isinf(y)):
61
+ warnings.warn(
62
+ "Inf found in input arrays, unexpected return might follow",
63
+ category=RuntimeWarning,
64
+ stacklevel=2,
65
+ )
66
+
49
67
  if x.shape[1] != y.shape[1]:
50
68
  msg = (
51
69
  f"Number of columns in X and Y must be the same. X has shape {x.shape} "
langchain_core/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """langchain-core version information and utilities."""
2
2
 
3
- VERSION = "0.3.69"
3
+ VERSION = "0.3.71"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-core
3
- Version: 0.3.69
3
+ Version: 0.3.71
4
4
  Summary: Building applications with LLMs through composability
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/core
@@ -1,13 +1,13 @@
1
- langchain_core-0.3.69.dist-info/METADATA,sha256=dmwRVtm0qaGW76-QTlL_hE_MT_qo_PGWsTcf2zG7e7U,5767
2
- langchain_core-0.3.69.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
- langchain_core-0.3.69.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
1
+ langchain_core-0.3.71.dist-info/METADATA,sha256=2IN2ux1bgW9OCsht-69QXdQsj95meY2WY1vmLgW19xY,5767
2
+ langchain_core-0.3.71.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
+ langchain_core-0.3.71.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
4
  langchain_core/__init__.py,sha256=AN-KPu2IuqeQGc-m9dcDfoTIvBno5-ZdUNEVwIIoZM0,709
5
5
  langchain_core/_api/__init__.py,sha256=WDOMw4faVuscjDCL5ttnRQNienJP_M9vGMmJUXS6L5w,1976
6
6
  langchain_core/_api/beta_decorator.py,sha256=osyHHMFFC4jT59CSlauU8HnVxReBfEaA-USTkvh7yAI,9942
7
7
  langchain_core/_api/deprecation.py,sha256=nZtRLOlU_9fpvpOKO4SLTXpDm73Ik28EEPEBmIdaJVs,20500
8
8
  langchain_core/_api/internal.py,sha256=aOZkYANu747LyWzyAk-0KE4RjdTYj18Wtlh7F9_qyPM,683
9
9
  langchain_core/_api/path.py,sha256=M93Jo_1CUpShRyqB6m___Qjczm1RU1D7yb4LSGaiysk,984
10
- langchain_core/_import_utils.py,sha256=hzGmPpoLFeDGg6o96J39RPtMl_I6GUxW-_2JxGTJcIk,1250
10
+ langchain_core/_import_utils.py,sha256=AXmqapJmqEIYMY7qeA9SF8NmOkWse1ZYfTrljRxnPPo,1265
11
11
  langchain_core/agents.py,sha256=r2GDNZeHrGR83URVMBn_-q18enwg1o-1aZlTlke3ep0,8466
12
12
  langchain_core/beta/__init__.py,sha256=8phOlCdTByvzqN1DR4CU_rvaO4SDRebKATmFKj0B5Nw,68
13
13
  langchain_core/beta/runnables/__init__.py,sha256=KPVZTs2phF46kEB7mn0M75UeSw8nylbTZ4HYpLT0ywE,17
@@ -48,9 +48,9 @@ langchain_core/indexing/in_memory.py,sha256=-qyKjAWJFWxtH_MbUu3JJct0x3R_pbHyHuxA
48
48
  langchain_core/language_models/__init__.py,sha256=j6OXr7CriShFr7BYfCWZ2kOTEZpzvlE7dNDTab75prg,3778
49
49
  langchain_core/language_models/_utils.py,sha256=uy-rdJB51K0O4txjxYe-tLGG8ZAwe3yezIiKvuDXDUU,4785
50
50
  langchain_core/language_models/base.py,sha256=hURYXnzIRP_Ib7vL5hPlWyTPbSEhwWIRGoxp7VQPSHQ,14448
51
- langchain_core/language_models/chat_models.py,sha256=EVD9F0EZ5xK7vLJ9HpqD0JBZ0GdRlPjYRbz2NmopsdA,67895
51
+ langchain_core/language_models/chat_models.py,sha256=ztNksJff6KIH8aC-lFy93tFf2mug4DarBQUU0oZv7xs,68936
52
52
  langchain_core/language_models/fake.py,sha256=h9LhVTkmYLXkJ1_VvsKhqYVpkQsM7eAr9geXF_IVkPs,3772
53
- langchain_core/language_models/fake_chat_models.py,sha256=vt0N35tlETJrStWcr2cZrknjDUMKzZjikb7Ftndzgik,12832
53
+ langchain_core/language_models/fake_chat_models.py,sha256=QLz4VXMdIn6U5sBdZn_Lzfe1-rbebhNemQVGHnB3aBM,12994
54
54
  langchain_core/language_models/llms.py,sha256=87JTPgaRlMFhWR6sAc0N0aBMJxzV2sO3DtQz7dO0cWI,56802
55
55
  langchain_core/load/__init__.py,sha256=m3_6Fk2gpYZO0xqyTnZzdQigvsYHjMariLq_L2KwJFk,1150
56
56
  langchain_core/load/dump.py,sha256=xQMuWsbCpgt8ce_muZuHUOOY9Ju-_voQyHc_fkv18mo,2667
@@ -75,16 +75,16 @@ langchain_core/output_parsers/format_instructions.py,sha256=8oUbeysnVGvXWyNd5gqX
75
75
  langchain_core/output_parsers/json.py,sha256=1KVQSshLOiE4xtoOrwSuVu6tlTEm-LX1hNa9Jt7pRb8,4650
76
76
  langchain_core/output_parsers/list.py,sha256=7op38L-z4s8ElB_7Uo2vr6gJNsdRn3T07r780GubgfI,7677
77
77
  langchain_core/output_parsers/openai_functions.py,sha256=34h2yySGubhDcWogPOMeCxSRrPJB3E0unxUBi6dOf4w,10714
78
- langchain_core/output_parsers/openai_tools.py,sha256=GLSQMJ4TD05TZOtLVnhwI9ZfMVNmRm3FNE3QCWDioOM,11059
78
+ langchain_core/output_parsers/openai_tools.py,sha256=hlwu7RWHTvC1wsBVMh3dFoX7mPpdT0tTlNUrhCiyza8,12252
79
79
  langchain_core/output_parsers/pydantic.py,sha256=NTwYFM2xnTEcxT8xYWsi3ViIJ7UJzZJlh67sA_b7VXw,4347
80
80
  langchain_core/output_parsers/string.py,sha256=F82gzziR6Ovea8kfkZD0gIgYBb3g7DWxuE_V523J3X8,898
81
81
  langchain_core/output_parsers/transform.py,sha256=QYLL5zAfXWQTtPGPZwzdge0RRM9K7Rx2ldKrUfoQiu0,5951
82
82
  langchain_core/output_parsers/xml.py,sha256=vU6z6iQc5BTovH6CT5YMPN85fiM86Dqt-7EY_6ffGBw,11047
83
- langchain_core/outputs/__init__.py,sha256=AtGW1qQJOX3B-n8S8BlZdCDHUyAyTYK0dfs9ywcLrEo,2133
84
- langchain_core/outputs/chat_generation.py,sha256=BO3PomRJxyRdt0d6K_FBkBDRpo28JLXcT_ZxpSyepI4,4319
83
+ langchain_core/outputs/__init__.py,sha256=uy2aeRTvvIfyWeLtPs0KaCw0VpG6QTkC0esmj268BIM,2119
84
+ langchain_core/outputs/chat_generation.py,sha256=HAvbQGRzRXopvyVNwQHcTGC-zm7itFbOPtcXPhb4gXY,4349
85
85
  langchain_core/outputs/chat_result.py,sha256=us15wVh00AYkIVNmf0VETEI9aoEQy-cT-SIXMX-98Zc,1356
86
- langchain_core/outputs/generation.py,sha256=hYl5K90Eul8ldn6UEFwt1fqnMHRG5tI96SY74vm_O50,2312
87
- langchain_core/outputs/llm_result.py,sha256=-IbRnKD1ZPvfi7_Yt-x3GpwL9BvHMVgTiz4G_YKKiiE,3647
86
+ langchain_core/outputs/generation.py,sha256=gZRSOwdA8A4T-isxt80LasjnCKfqGbOB7zLKrpPUmkw,2376
87
+ langchain_core/outputs/llm_result.py,sha256=2-9Sz59tm03rLXCMj8kG5FLpz9Gm8gSKEXWlhKrmQFc,3661
88
88
  langchain_core/outputs/run_info.py,sha256=xCMWdsHfgnnodaf4OCMvZaWUfS836X7mV15JPkqvZjo,594
89
89
  langchain_core/prompt_values.py,sha256=HuG3X7gIYRXfFwpdOYnwksJM-OmcdAFchjoln1nXSg0,4002
90
90
  langchain_core/prompts/__init__.py,sha256=sp3NU858CEf4YUuDYiY_-iF1x1Gb5msSyoyrk2FUI94,4123
@@ -104,12 +104,12 @@ langchain_core/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
104
104
  langchain_core/pydantic_v1/__init__.py,sha256=hqAsQjsfqLduCo5E0oAAAt21Nkls0S6bCQ4tD2moFfU,1080
105
105
  langchain_core/pydantic_v1/dataclasses.py,sha256=q4Qst8I0g7odncWZ3-MvW-Xadfu6DQYxCo-DFZgwLPE,889
106
106
  langchain_core/pydantic_v1/main.py,sha256=uTB_757DTfo-mFKJUn_a4qS_GxmSxlqYmL2WOCJLdS0,882
107
- langchain_core/rate_limiters.py,sha256=pUoyVUDGhSclWOWESrk-_upEKqp61EmyIz-SDfF3UHo,9588
107
+ langchain_core/rate_limiters.py,sha256=u05JRHY0WSdGZiqPUs9spO9bjQRNz3XBu-hMlXI818s,9564
108
108
  langchain_core/retrievers.py,sha256=jkNUYO-_19hjKVBUYHD9pwQVjukYEE21fbHf-vtIdng,16735
109
109
  langchain_core/runnables/__init__.py,sha256=efTnFjwN_QSAv5ThLmKuWeu8P1BLARH-cWKZBuimfDM,3858
110
110
  langchain_core/runnables/base.py,sha256=cT1eB-s0waT4q4JFculG4AdmqYetBKB2e6DIrwCB8Nk,221543
111
111
  langchain_core/runnables/branch.py,sha256=Z0wESU2RmTFjMWam7d5CbijJ9p6ar7EJSQPh7HUHF0Q,16557
112
- langchain_core/runnables/config.py,sha256=b86vkkiJoYj-qanPRW-vXweEvAzaJKz6iLWNhyizHuk,20423
112
+ langchain_core/runnables/config.py,sha256=MSbZg8d4aGyEqNOfsJHkgN7RvgAN1fc-wgFZcd8LO8w,20456
113
113
  langchain_core/runnables/configurable.py,sha256=ReD0jHC8LYeD0Awv-s5x9in1xk8hCATYUeDCcEs0Ttk,24366
114
114
  langchain_core/runnables/fallbacks.py,sha256=nc_dq-UlmIX7LRLv8EOWPW5XX6o1ndfwG19q3SP-VGQ,24334
115
115
  langchain_core/runnables/graph.py,sha256=BzUDXoczHC21kFyD0-Gp2kndDVQbP0j1Bx-fAYTjAY0,23386
@@ -131,7 +131,7 @@ langchain_core/tools/convert.py,sha256=8hu33vhu7ozP868uQCwzGfOyL5CPs60pN9l4M6PAa
131
131
  langchain_core/tools/render.py,sha256=BosvIWrSvOJgRg_gaSDBS58j99gwQHsLhprOXeJP53I,1842
132
132
  langchain_core/tools/retriever.py,sha256=zlSV3HnWhhmtZtkNGbNQW9wxv8GptJKmDhzqZj8e36o,3873
133
133
  langchain_core/tools/simple.py,sha256=GwawH2sfn05W18g8H4NKOza-X5Rrw-pdPwUmVBitO3Y,6048
134
- langchain_core/tools/structured.py,sha256=z1h9Pqb-inl5uvMykLmQbeqPZ6xBxxiyuh9P7gxBYDM,8723
134
+ langchain_core/tools/structured.py,sha256=_Iqw6xjmrqnOid4IESHAbPRD1tppq7BJHJpubKldhLc,8989
135
135
  langchain_core/tracers/__init__.py,sha256=ixZmLjtoMEPqYEFUtAxleiDDRNIaHrS01VRDo9mCPk8,1611
136
136
  langchain_core/tracers/_streaming.py,sha256=TT2N_dzOQIqEM9dH7v3d_-eZKEfkcQxMJqItsMofMpY,960
137
137
  langchain_core/tracers/base.py,sha256=6TWPk6fL4Ep4ywh3q-aGzy-PdiaH6hDZhLs5Z4bL45Q,26025
@@ -147,7 +147,7 @@ langchain_core/tracers/root_listeners.py,sha256=VRr3jnSSLYsIqYEmw9OjbjGgj4897c4f
147
147
  langchain_core/tracers/run_collector.py,sha256=Tnnz5sfKkUI6Rapj8mGjScYGkyEKRyicWOhvEXHV3qE,1622
148
148
  langchain_core/tracers/schemas.py,sha256=2gDs-9zloHTjIrMfuWsr9w9cRdZ6ZMMD_h5hCRH6xHw,3768
149
149
  langchain_core/tracers/stdout.py,sha256=aZN-yz545zj34kYfrEmYzWeSz83pbqN8wNqi-ZvS1Iw,6732
150
- langchain_core/utils/__init__.py,sha256=SXdUKDhlsZB5cusipvcPOVJU5UzccL_Zi_7TIwuD_SA,3036
150
+ langchain_core/utils/__init__.py,sha256=N0ZeV09FHvZIVITLJlqGibb0JNtmmLvvoareFtG0DuI,3169
151
151
  langchain_core/utils/_merge.py,sha256=sCYw0irypropb5Y6ZpIGxZhAmaKpsb7519Hc3pXLGWM,5763
152
152
  langchain_core/utils/aiter.py,sha256=Uz2EB-v7TAK6HVapkEgaKUmzxb8p2Az1cCUtEAa-bTM,10710
153
153
  langchain_core/utils/env.py,sha256=swKMUVFS-Jr_9KK2ToWam6qd9lt73Pz4RtRqwcaiFQw,2464
@@ -159,16 +159,16 @@ langchain_core/utils/input.py,sha256=z3tubdUtsoHqfTyiBGfELLr1xemSe-pGvhfAeGE6O2g
159
159
  langchain_core/utils/interactive_env.py,sha256=Apx6gRncLvidU75maFoI-Gfx-FhDqO2vyiZnR32QAaE,200
160
160
  langchain_core/utils/iter.py,sha256=oqhDIXkuTdsrMj4JZUhNwGmdQ32DPIpGgXfPARdEtmc,7409
161
161
  langchain_core/utils/json.py,sha256=7K3dV2aOfT-1cLl5ZQrfmw9sVnLrn7batTsByzjlPdg,6197
162
- langchain_core/utils/json_schema.py,sha256=qHkMkEwytAKuBF8bVFaLNILagoSBGZVBeDyfgFHXTkg,3534
162
+ langchain_core/utils/json_schema.py,sha256=RuJUipbkwljzdjFZ4E6blJuHJObO9k2pXcxvJX5uzW8,3706
163
163
  langchain_core/utils/loading.py,sha256=7B9nuzOutgknzj5-8W6eorC9EUsNuO-1w4jh-aVf8ms,931
164
- langchain_core/utils/mustache.py,sha256=WNMBl0xC1BJO_LiyIm1Z1HRtxS4X59RTFBnTh21y4oc,21118
164
+ langchain_core/utils/mustache.py,sha256=K_EnRcbYQMjQ-95-fP5G9rB2rCbpgcr1yn5QF6-Tr70,21253
165
165
  langchain_core/utils/pydantic.py,sha256=UFuDwQpGMZ95YFfb2coPMXva48sWn-ytQQhnqdy1ExM,17987
166
- langchain_core/utils/strings.py,sha256=LIh8uZcGlEKI_SnbOA_PsZxcU6QI5GQKTj0hxOraIv0,1016
166
+ langchain_core/utils/strings.py,sha256=0LaQiqpshHwMrWBGvNfFPc-AxihLGMM9vsQcSx3uAkI,1804
167
167
  langchain_core/utils/usage.py,sha256=EYv0poDqA7VejEsPyoA19lEt9M4L24Tppf4OPtOjGwI,1202
168
168
  langchain_core/utils/utils.py,sha256=RK9JRNsdb4mXu1XYuJFuvDqyglSpnr6ak0vb0ELc7Eo,15043
169
169
  langchain_core/vectorstores/__init__.py,sha256=5P0eoeoH5LHab64JjmEeWa6SxX4eMy-etAP1MEHsETY,804
170
170
  langchain_core/vectorstores/base.py,sha256=tClkcmbKtYw5CkwF1AEOPa304rHkYqDJ0jRlXXPPo8c,42025
171
171
  langchain_core/vectorstores/in_memory.py,sha256=lxe2bR-wFtvNN2Ii7EGOh3ON3MwqNRP996eUEek55fA,18076
172
- langchain_core/vectorstores/utils.py,sha256=UoPD1txVxGuFW0jhbo75l58cLHPdDJ03OPbZRj6kODU,4435
173
- langchain_core/version.py,sha256=mOstXyCeIyrvf2rlEWwNlU2wUB1bIz-FlQUJap_AZNI,76
174
- langchain_core-0.3.69.dist-info/RECORD,,
172
+ langchain_core/vectorstores/utils.py,sha256=DZUUR1xDybHDhmZJsd1V2OEPsYiFVc2nhtD4w8hw9ns,4934
173
+ langchain_core/version.py,sha256=CIjYNEnE7dA85UdGJA6hnOak7gPlUoYQi1qU56mcLkQ,76
174
+ langchain_core-0.3.71.dist-info/RECORD,,