langchain-core 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (60) hide show
  1. langchain_core/agents.py +2 -4
  2. langchain_core/caches.py +13 -6
  3. langchain_core/chat_history.py +5 -5
  4. langchain_core/document_loaders/base.py +6 -4
  5. langchain_core/document_loaders/blob_loaders.py +1 -1
  6. langchain_core/document_loaders/langsmith.py +9 -10
  7. langchain_core/documents/__init__.py +24 -3
  8. langchain_core/documents/base.py +72 -59
  9. langchain_core/documents/compressor.py +6 -6
  10. langchain_core/documents/transformers.py +2 -2
  11. langchain_core/embeddings/fake.py +2 -2
  12. langchain_core/example_selectors/semantic_similarity.py +7 -7
  13. langchain_core/exceptions.py +2 -2
  14. langchain_core/indexing/__init__.py +1 -1
  15. langchain_core/indexing/api.py +62 -62
  16. langchain_core/indexing/base.py +16 -16
  17. langchain_core/indexing/in_memory.py +2 -2
  18. langchain_core/language_models/__init__.py +6 -5
  19. langchain_core/language_models/base.py +2 -2
  20. langchain_core/language_models/fake_chat_models.py +1 -1
  21. langchain_core/language_models/llms.py +4 -6
  22. langchain_core/load/dump.py +1 -1
  23. langchain_core/load/serializable.py +4 -1
  24. langchain_core/messages/__init__.py +9 -0
  25. langchain_core/messages/ai.py +11 -7
  26. langchain_core/messages/base.py +4 -0
  27. langchain_core/messages/block_translators/google_genai.py +4 -2
  28. langchain_core/messages/content.py +4 -4
  29. langchain_core/messages/utils.py +13 -13
  30. langchain_core/output_parsers/__init__.py +17 -1
  31. langchain_core/output_parsers/base.py +3 -0
  32. langchain_core/output_parsers/format_instructions.py +9 -4
  33. langchain_core/output_parsers/json.py +5 -2
  34. langchain_core/output_parsers/list.py +16 -16
  35. langchain_core/output_parsers/openai_tools.py +2 -2
  36. langchain_core/output_parsers/pydantic.py +1 -1
  37. langchain_core/output_parsers/string.py +3 -3
  38. langchain_core/output_parsers/xml.py +28 -25
  39. langchain_core/outputs/generation.py +2 -3
  40. langchain_core/prompt_values.py +0 -6
  41. langchain_core/prompts/base.py +5 -3
  42. langchain_core/prompts/chat.py +60 -52
  43. langchain_core/prompts/structured.py +12 -8
  44. langchain_core/retrievers.py +41 -37
  45. langchain_core/runnables/base.py +14 -14
  46. langchain_core/runnables/configurable.py +3 -3
  47. langchain_core/runnables/graph.py +7 -3
  48. langchain_core/tools/base.py +66 -12
  49. langchain_core/tools/convert.py +8 -5
  50. langchain_core/tools/retriever.py +6 -5
  51. langchain_core/tools/structured.py +7 -5
  52. langchain_core/tracers/log_stream.py +2 -2
  53. langchain_core/utils/strings.py +1 -4
  54. langchain_core/utils/utils.py +12 -5
  55. langchain_core/vectorstores/base.py +73 -69
  56. langchain_core/vectorstores/in_memory.py +2 -2
  57. langchain_core/version.py +1 -1
  58. {langchain_core-1.0.1.dist-info → langchain_core-1.0.2.dist-info}/METADATA +1 -1
  59. {langchain_core-1.0.1.dist-info → langchain_core-1.0.2.dist-info}/RECORD +60 -60
  60. {langchain_core-1.0.1.dist-info → langchain_core-1.0.2.dist-info}/WHEEL +0 -0
@@ -104,19 +104,23 @@ class StructuredPrompt(ChatPromptTemplate):
104
104
  )
105
105
  ```
106
106
  Args:
107
- messages: sequence of message representations.
107
+ messages: Sequence of message representations.
108
+
108
109
  A message can be represented using the following formats:
109
- (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
110
- (message type, template); e.g., ("human", "{user_input}"),
111
- (4) 2-tuple of (message class, template), (5) a string which is
112
- shorthand for ("human", template); e.g., "{user_input}"
113
- schema: a dictionary representation of function call, or a Pydantic model.
110
+
111
+ 1. `BaseMessagePromptTemplate`
112
+ 2. `BaseMessage`
113
+ 3. 2-tuple of `(message type, template)`; e.g.,
114
+ `("human", "{user_input}")`
115
+ 4. 2-tuple of `(message class, template)`
116
+ 5. A string which is shorthand for `("human", template)`; e.g.,
117
+ `"{user_input}"`
118
+ schema: A dictionary representation of function call, or a Pydantic model.
114
119
  **kwargs: Any additional kwargs to pass through to
115
120
  `ChatModel.with_structured_output(schema, **kwargs)`.
116
121
 
117
122
  Returns:
118
- a structured prompt template
119
-
123
+ A structured prompt template
120
124
  """
121
125
  return cls(messages, schema, **kwargs)
122
126
 
@@ -50,65 +50,65 @@ class LangSmithRetrieverParams(TypedDict, total=False):
50
50
 
51
51
 
52
52
  class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
53
- """Abstract base class for a Document retrieval system.
53
+ """Abstract base class for a document retrieval system.
54
54
 
55
55
  A retrieval system is defined as something that can take string queries and return
56
- the most 'relevant' Documents from some source.
56
+ the most 'relevant' documents from some source.
57
57
 
58
58
  Usage:
59
59
 
60
- A retriever follows the standard Runnable interface, and should be used
61
- via the standard Runnable methods of `invoke`, `ainvoke`, `batch`, `abatch`.
60
+ A retriever follows the standard `Runnable` interface, and should be used via the
61
+ standard `Runnable` methods of `invoke`, `ainvoke`, `batch`, `abatch`.
62
62
 
63
63
  Implementation:
64
64
 
65
- When implementing a custom retriever, the class should implement
66
- the `_get_relevant_documents` method to define the logic for retrieving documents.
65
+ When implementing a custom retriever, the class should implement the
66
+ `_get_relevant_documents` method to define the logic for retrieving documents.
67
67
 
68
68
  Optionally, an async native implementations can be provided by overriding the
69
69
  `_aget_relevant_documents` method.
70
70
 
71
- Example: A retriever that returns the first 5 documents from a list of documents
71
+ !!! example "Retriever that returns the first 5 documents from a list of documents"
72
72
 
73
- ```python
74
- from langchain_core.documents import Document
75
- from langchain_core.retrievers import BaseRetriever
73
+ ```python
74
+ from langchain_core.documents import Document
75
+ from langchain_core.retrievers import BaseRetriever
76
76
 
77
- class SimpleRetriever(BaseRetriever):
78
- docs: list[Document]
79
- k: int = 5
77
+ class SimpleRetriever(BaseRetriever):
78
+ docs: list[Document]
79
+ k: int = 5
80
80
 
81
- def _get_relevant_documents(self, query: str) -> list[Document]:
82
- \"\"\"Return the first k documents from the list of documents\"\"\"
83
- return self.docs[:self.k]
81
+ def _get_relevant_documents(self, query: str) -> list[Document]:
82
+ \"\"\"Return the first k documents from the list of documents\"\"\"
83
+ return self.docs[:self.k]
84
84
 
85
- async def _aget_relevant_documents(self, query: str) -> list[Document]:
86
- \"\"\"(Optional) async native implementation.\"\"\"
87
- return self.docs[:self.k]
88
- ```
85
+ async def _aget_relevant_documents(self, query: str) -> list[Document]:
86
+ \"\"\"(Optional) async native implementation.\"\"\"
87
+ return self.docs[:self.k]
88
+ ```
89
89
 
90
- Example: A simple retriever based on a scikit-learn vectorizer
90
+ !!! example "Simple retriever based on a scikit-learn vectorizer"
91
91
 
92
- ```python
93
- from sklearn.metrics.pairwise import cosine_similarity
92
+ ```python
93
+ from sklearn.metrics.pairwise import cosine_similarity
94
94
 
95
95
 
96
- class TFIDFRetriever(BaseRetriever, BaseModel):
97
- vectorizer: Any
98
- docs: list[Document]
99
- tfidf_array: Any
100
- k: int = 4
96
+ class TFIDFRetriever(BaseRetriever, BaseModel):
97
+ vectorizer: Any
98
+ docs: list[Document]
99
+ tfidf_array: Any
100
+ k: int = 4
101
101
 
102
- class Config:
103
- arbitrary_types_allowed = True
102
+ class Config:
103
+ arbitrary_types_allowed = True
104
104
 
105
- def _get_relevant_documents(self, query: str) -> list[Document]:
106
- # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
107
- query_vec = self.vectorizer.transform([query])
108
- # Op -- (n_docs,1) -- Cosine Sim with each doc
109
- results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
110
- return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
111
- ```
105
+ def _get_relevant_documents(self, query: str) -> list[Document]:
106
+ # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
107
+ query_vec = self.vectorizer.transform([query])
108
+ # Op -- (n_docs,1) -- Cosine Sim with each doc
109
+ results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
110
+ return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
111
+ ```
112
112
  """
113
113
 
114
114
  model_config = ConfigDict(
@@ -119,15 +119,19 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
119
119
  _expects_other_args: bool = False
120
120
  tags: list[str] | None = None
121
121
  """Optional list of tags associated with the retriever.
122
+
122
123
  These tags will be associated with each call to this retriever,
123
124
  and passed as arguments to the handlers defined in `callbacks`.
125
+
124
126
  You can use these to eg identify a specific instance of a retriever with its
125
127
  use case.
126
128
  """
127
129
  metadata: dict[str, Any] | None = None
128
130
  """Optional metadata associated with the retriever.
131
+
129
132
  This metadata will be associated with each call to this retriever,
130
133
  and passed as arguments to the handlers defined in `callbacks`.
134
+
131
135
  You can use these to eg identify a specific instance of a retriever with its
132
136
  use case.
133
137
  """
@@ -147,11 +147,11 @@ class Runnable(ABC, Generic[Input, Output]):
147
147
  the `input_schema` property, the `output_schema` property and `config_schema`
148
148
  method.
149
149
 
150
- LCEL and Composition
151
- ====================
150
+ Composition
151
+ ===========
152
+
153
+ Runnable objects can be composed together to create chains in a declarative way.
152
154
 
153
- The LangChain Expression Language (LCEL) is a declarative way to compose
154
- `Runnable` objectsinto chains.
155
155
  Any chain constructed this way will automatically have sync, async, batch, and
156
156
  streaming support.
157
157
 
@@ -235,21 +235,21 @@ class Runnable(ABC, Generic[Input, Output]):
235
235
 
236
236
  You can set the global debug flag to True to enable debug output for all chains:
237
237
 
238
- ```python
239
- from langchain_core.globals import set_debug
238
+ ```python
239
+ from langchain_core.globals import set_debug
240
240
 
241
- set_debug(True)
242
- ```
241
+ set_debug(True)
242
+ ```
243
243
 
244
244
  Alternatively, you can pass existing or custom callbacks to any given chain:
245
245
 
246
- ```python
247
- from langchain_core.tracers import ConsoleCallbackHandler
246
+ ```python
247
+ from langchain_core.tracers import ConsoleCallbackHandler
248
248
 
249
- chain.invoke(..., config={"callbacks": [ConsoleCallbackHandler()]})
250
- ```
249
+ chain.invoke(..., config={"callbacks": [ConsoleCallbackHandler()]})
250
+ ```
251
251
 
252
- For a UI (and much more) checkout [LangSmith](https://docs.smith.langchain.com/).
252
+ For a UI (and much more) checkout [LangSmith](https://docs.langchain.com/langsmith/home).
253
253
 
254
254
  """
255
255
 
@@ -3500,7 +3500,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3500
3500
 
3501
3501
  Returns a mapping of their outputs.
3502
3502
 
3503
- `RunnableParallel` is one of the two main composition primitives for the LCEL,
3503
+ `RunnableParallel` is one of the two main composition primitives,
3504
3504
  alongside `RunnableSequence`. It invokes `Runnable`s concurrently, providing the
3505
3505
  same input to each.
3506
3506
 
@@ -475,11 +475,11 @@ _enums_for_spec_lock = threading.Lock()
475
475
  class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
476
476
  """Runnable that can be dynamically configured.
477
477
 
478
- A RunnableConfigurableAlternatives should be initiated using the
478
+ A `RunnableConfigurableAlternatives` should be initiated using the
479
479
  `configurable_alternatives` method of a Runnable or can be
480
480
  initiated directly as well.
481
481
 
482
- Here is an example of using a RunnableConfigurableAlternatives that uses
482
+ Here is an example of using a `RunnableConfigurableAlternatives` that uses
483
483
  alternative prompts to illustrate its functionality:
484
484
 
485
485
  ```python
@@ -506,7 +506,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
506
506
  chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"})
507
507
  ```
508
508
 
509
- Equivalently, you can initialize RunnableConfigurableAlternatives directly
509
+ Equivalently, you can initialize `RunnableConfigurableAlternatives` directly
510
510
  and use in LCEL in the same way:
511
511
 
512
512
  ```python
@@ -132,7 +132,7 @@ class Branch(NamedTuple):
132
132
  condition: Callable[..., str]
133
133
  """A callable that returns a string representation of the condition."""
134
134
  ends: dict[str, str] | None
135
- """Optional dictionary of end node ids for the branches. """
135
+ """Optional dictionary of end node IDs for the branches. """
136
136
 
137
137
 
138
138
  class CurveStyle(Enum):
@@ -706,8 +706,10 @@ class Graph:
706
706
  def _first_node(graph: Graph, exclude: Sequence[str] = ()) -> Node | None:
707
707
  """Find the single node that is not a target of any edge.
708
708
 
709
- Exclude nodes/sources with ids in the exclude list.
709
+ Exclude nodes/sources with IDs in the exclude list.
710
+
710
711
  If there is no such node, or there are multiple, return `None`.
712
+
711
713
  When drawing the graph, this node would be the origin.
712
714
  """
713
715
  targets = {edge.target for edge in graph.edges if edge.source not in exclude}
@@ -722,8 +724,10 @@ def _first_node(graph: Graph, exclude: Sequence[str] = ()) -> Node | None:
722
724
  def _last_node(graph: Graph, exclude: Sequence[str] = ()) -> Node | None:
723
725
  """Find the single node that is not a source of any edge.
724
726
 
725
- Exclude nodes/targets with ids in the exclude list.
727
+ Exclude nodes/targets with IDs in the exclude list.
728
+
726
729
  If there is no such node, or there are multiple, return `None`.
730
+
727
731
  When drawing the graph, this node would be the destination.
728
732
  """
729
733
  sources = {edge.source for edge in graph.edges if edge.target not in exclude}
@@ -707,6 +707,35 @@ class ChildTool(BaseTool):
707
707
  kwargs["run_manager"] = kwargs["run_manager"].get_sync()
708
708
  return await run_in_executor(None, self._run, *args, **kwargs)
709
709
 
710
+ def _filter_injected_args(self, tool_input: dict) -> dict:
711
+ """Filter out injected tool arguments from the input dictionary.
712
+
713
+ Injected arguments are those annotated with InjectedToolArg or its
714
+ subclasses, or arguments in FILTERED_ARGS like run_manager and callbacks.
715
+
716
+ Args:
717
+ tool_input: The tool input dictionary to filter.
718
+
719
+ Returns:
720
+ A filtered dictionary with injected arguments removed.
721
+ """
722
+ # Start with filtered args from the constant
723
+ filtered_keys = set[str](FILTERED_ARGS)
724
+
725
+ # If we have an args_schema, use it to identify injected args
726
+ if self.args_schema is not None:
727
+ try:
728
+ annotations = get_all_basemodel_annotations(self.args_schema)
729
+ for field_name, field_type in annotations.items():
730
+ if _is_injected_arg_type(field_type):
731
+ filtered_keys.add(field_name)
732
+ except Exception: # noqa: S110
733
+ # If we can't get annotations, just use FILTERED_ARGS
734
+ pass
735
+
736
+ # Filter out the injected keys from tool_input
737
+ return {k: v for k, v in tool_input.items() if k not in filtered_keys}
738
+
710
739
  def _to_args_and_kwargs(
711
740
  self, tool_input: str | dict, tool_call_id: str | None
712
741
  ) -> tuple[tuple, dict]:
@@ -794,17 +823,29 @@ class ChildTool(BaseTool):
794
823
  self.metadata,
795
824
  )
796
825
 
826
+ # Filter out injected arguments from callback inputs
827
+ filtered_tool_input = (
828
+ self._filter_injected_args(tool_input)
829
+ if isinstance(tool_input, dict)
830
+ else None
831
+ )
832
+
833
+ # Use filtered inputs for the input_str parameter as well
834
+ tool_input_str = (
835
+ tool_input
836
+ if isinstance(tool_input, str)
837
+ else str(
838
+ filtered_tool_input if filtered_tool_input is not None else tool_input
839
+ )
840
+ )
841
+
797
842
  run_manager = callback_manager.on_tool_start(
798
843
  {"name": self.name, "description": self.description},
799
- tool_input if isinstance(tool_input, str) else str(tool_input),
844
+ tool_input_str,
800
845
  color=start_color,
801
846
  name=run_name,
802
847
  run_id=run_id,
803
- # Inputs by definition should always be dicts.
804
- # For now, it's unclear whether this assumption is ever violated,
805
- # but if it is we will send a `None` value to the callback instead
806
- # TODO: will need to address issue via a patch.
807
- inputs=tool_input if isinstance(tool_input, dict) else None,
848
+ inputs=filtered_tool_input,
808
849
  **kwargs,
809
850
  )
810
851
 
@@ -905,17 +946,30 @@ class ChildTool(BaseTool):
905
946
  metadata,
906
947
  self.metadata,
907
948
  )
949
+
950
+ # Filter out injected arguments from callback inputs
951
+ filtered_tool_input = (
952
+ self._filter_injected_args(tool_input)
953
+ if isinstance(tool_input, dict)
954
+ else None
955
+ )
956
+
957
+ # Use filtered inputs for the input_str parameter as well
958
+ tool_input_str = (
959
+ tool_input
960
+ if isinstance(tool_input, str)
961
+ else str(
962
+ filtered_tool_input if filtered_tool_input is not None else tool_input
963
+ )
964
+ )
965
+
908
966
  run_manager = await callback_manager.on_tool_start(
909
967
  {"name": self.name, "description": self.description},
910
- tool_input if isinstance(tool_input, str) else str(tool_input),
968
+ tool_input_str,
911
969
  color=start_color,
912
970
  name=run_name,
913
971
  run_id=run_id,
914
- # Inputs by definition should always be dicts.
915
- # For now, it's unclear whether this assumption is ever violated,
916
- # but if it is we will send a `None` value to the callback instead
917
- # TODO: will need to address issue via a patch.
918
- inputs=tool_input if isinstance(tool_input, dict) else None,
972
+ inputs=filtered_tool_input,
919
973
  **kwargs,
920
974
  )
921
975
  content = None
@@ -89,6 +89,7 @@ def tool(
89
89
  runnable: Optional runnable to convert to a tool. Must be provided as a
90
90
  positional argument.
91
91
  description: Optional description for the tool.
92
+
92
93
  Precedence for the tool description value is as follows:
93
94
 
94
95
  - `description` argument
@@ -105,11 +106,13 @@ def tool(
105
106
  infer_schema: Whether to infer the schema of the arguments from
106
107
  the function's signature. This also makes the resultant tool
107
108
  accept a dictionary input to its `run()` function.
108
- response_format: The tool response format. If `"content"` then the output of
109
- the tool is interpreted as the contents of a `ToolMessage`. If
110
- `"content_and_artifact"` then the output is expected to be a two-tuple
111
- corresponding to the `(content, artifact)` of a `ToolMessage`.
112
- parse_docstring: if `infer_schema` and `parse_docstring`, will attempt to
109
+ response_format: The tool response format.
110
+
111
+ If `"content"` then the output of the tool is interpreted as the contents of
112
+ a `ToolMessage`. If `"content_and_artifact"` then the output is expected to
113
+ be a two-tuple corresponding to the `(content, artifact)` of a
114
+ `ToolMessage`.
115
+ parse_docstring: If `infer_schema` and `parse_docstring`, will attempt to
113
116
  parse parameter descriptions from Google Style function docstrings.
114
117
  error_on_invalid_docstring: if `parse_docstring` is provided, configure
115
118
  whether to raise `ValueError` on invalid Google Style docstrings.
@@ -83,11 +83,12 @@ def create_retriever_tool(
83
83
  model, so should be descriptive.
84
84
  document_prompt: The prompt to use for the document.
85
85
  document_separator: The separator to use between documents.
86
- response_format: The tool response format. If `"content"` then the output of
87
- the tool is interpreted as the contents of a `ToolMessage`. If
88
- `"content_and_artifact"` then the output is expected to be a two-tuple
89
- corresponding to the `(content, artifact)` of a `ToolMessage` (artifact
90
- being a list of documents in this case).
86
+ response_format: The tool response format.
87
+
88
+ If `"content"` then the output of the tool is interpreted as the contents of
89
+ a `ToolMessage`. If `"content_and_artifact"` then the output is expected to
90
+ be a two-tuple corresponding to the `(content, artifact)` of a `ToolMessage`
91
+ (artifact being a list of documents in this case).
91
92
 
92
93
  Returns:
93
94
  Tool class to pass to an agent.
@@ -151,11 +151,13 @@ class StructuredTool(BaseTool):
151
151
  return_direct: Whether to return the result directly or as a callback.
152
152
  args_schema: The schema of the tool's input arguments.
153
153
  infer_schema: Whether to infer the schema from the function's signature.
154
- response_format: The tool response format. If `"content"` then the output of
155
- the tool is interpreted as the contents of a `ToolMessage`. If
156
- `"content_and_artifact"` then the output is expected to be a two-tuple
157
- corresponding to the `(content, artifact)` of a `ToolMessage`.
158
- parse_docstring: if `infer_schema` and `parse_docstring`, will attempt
154
+ response_format: The tool response format.
155
+
156
+ If `"content"` then the output of the tool is interpreted as the
157
+ contents of a `ToolMessage`. If `"content_and_artifact"` then the output
158
+ is expected to be a two-tuple corresponding to the `(content, artifact)`
159
+ of a `ToolMessage`.
160
+ parse_docstring: If `infer_schema` and `parse_docstring`, will attempt
159
161
  to parse parameter descriptions from Google Style function docstrings.
160
162
  error_on_invalid_docstring: if `parse_docstring` is provided, configure
161
163
  whether to raise `ValueError` on invalid Google Style docstrings.
@@ -96,10 +96,10 @@ class RunLogPatch:
96
96
  """Patch to the run log."""
97
97
 
98
98
  ops: list[dict[str, Any]]
99
- """List of jsonpatch operations, which describe how to create the run state
99
+ """List of JSONPatch operations, which describe how to create the run state
100
100
  from an empty dict. This is the minimal representation of the log, designed to
101
101
  be serialized as JSON and sent over the wire to reconstruct the log on the other
102
- side. Reconstruction of the state can be done with any jsonpatch-compliant library,
102
+ side. Reconstruction of the state can be done with any JSONPatch-compliant library,
103
103
  see https://jsonpatch.com for more information."""
104
104
 
105
105
  def __init__(self, *ops: dict[str, Any]) -> None:
@@ -30,10 +30,7 @@ def stringify_dict(data: dict) -> str:
30
30
  Returns:
31
31
  The stringified dictionary.
32
32
  """
33
- text = ""
34
- for key, value in data.items():
35
- text += key + ": " + stringify_value(value) + "\n"
36
- return text
33
+ return "".join(f"{key}: {stringify_value(value)}\n" for key, value in data.items())
37
34
 
38
35
 
39
36
  def comma_list(items: list[Any]) -> str:
@@ -218,7 +218,7 @@ def _build_model_kwargs(
218
218
  values: dict[str, Any],
219
219
  all_required_field_names: set[str],
220
220
  ) -> dict[str, Any]:
221
- """Build "model_kwargs" param from Pydantic constructor values.
221
+ """Build `model_kwargs` param from Pydantic constructor values.
222
222
 
223
223
  Args:
224
224
  values: All init args passed in by user.
@@ -228,8 +228,8 @@ def _build_model_kwargs(
228
228
  Extra kwargs.
229
229
 
230
230
  Raises:
231
- ValueError: If a field is specified in both values and extra_kwargs.
232
- ValueError: If a field is specified in model_kwargs.
231
+ ValueError: If a field is specified in both `values` and `extra_kwargs`.
232
+ ValueError: If a field is specified in `model_kwargs`.
233
233
  """
234
234
  extra_kwargs = values.get("model_kwargs", {})
235
235
  for field_name in list(values):
@@ -267,6 +267,10 @@ def build_extra_kwargs(
267
267
  ) -> dict[str, Any]:
268
268
  """Build extra kwargs from values and extra_kwargs.
269
269
 
270
+ !!! danger "DON'T USE"
271
+ Kept for backwards-compatibility but should never have been public. Use the
272
+ internal `_build_model_kwargs` function instead.
273
+
270
274
  Args:
271
275
  extra_kwargs: Extra kwargs passed in by user.
272
276
  values: Values passed in by user.
@@ -276,9 +280,10 @@ def build_extra_kwargs(
276
280
  Extra kwargs.
277
281
 
278
282
  Raises:
279
- ValueError: If a field is specified in both values and extra_kwargs.
280
- ValueError: If a field is specified in model_kwargs.
283
+ ValueError: If a field is specified in both `values` and `extra_kwargs`.
284
+ ValueError: If a field is specified in `model_kwargs`.
281
285
  """
286
+ # DON'T USE! Kept for backwards-compatibility but should never have been public.
282
287
  for field_name in list(values):
283
288
  if field_name in extra_kwargs:
284
289
  msg = f"Found {field_name} supplied twice."
@@ -292,6 +297,7 @@ def build_extra_kwargs(
292
297
  )
293
298
  extra_kwargs[field_name] = values.pop(field_name)
294
299
 
300
+ # DON'T USE! Kept for backwards-compatibility but should never have been public.
295
301
  invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys())
296
302
  if invalid_model_kwargs:
297
303
  msg = (
@@ -300,6 +306,7 @@ def build_extra_kwargs(
300
306
  )
301
307
  raise ValueError(msg)
302
308
 
309
+ # DON'T USE! Kept for backwards-compatibility but should never have been public.
303
310
  return extra_kwargs
304
311
 
305
312