langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (172) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +45 -70
  4. langchain_core/_api/deprecation.py +80 -80
  5. langchain_core/_api/path.py +22 -8
  6. langchain_core/_import_utils.py +10 -4
  7. langchain_core/agents.py +25 -21
  8. langchain_core/caches.py +53 -63
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +341 -348
  11. langchain_core/callbacks/file.py +55 -44
  12. langchain_core/callbacks/manager.py +546 -683
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +35 -36
  15. langchain_core/callbacks/usage.py +65 -70
  16. langchain_core/chat_history.py +48 -55
  17. langchain_core/document_loaders/base.py +46 -21
  18. langchain_core/document_loaders/langsmith.py +39 -36
  19. langchain_core/documents/__init__.py +0 -1
  20. langchain_core/documents/base.py +96 -74
  21. langchain_core/documents/compressor.py +12 -9
  22. langchain_core/documents/transformers.py +29 -28
  23. langchain_core/embeddings/fake.py +56 -57
  24. langchain_core/env.py +2 -3
  25. langchain_core/example_selectors/base.py +12 -0
  26. langchain_core/example_selectors/length_based.py +1 -1
  27. langchain_core/example_selectors/semantic_similarity.py +21 -25
  28. langchain_core/exceptions.py +15 -9
  29. langchain_core/globals.py +4 -163
  30. langchain_core/indexing/api.py +132 -125
  31. langchain_core/indexing/base.py +64 -67
  32. langchain_core/indexing/in_memory.py +26 -6
  33. langchain_core/language_models/__init__.py +15 -27
  34. langchain_core/language_models/_utils.py +267 -117
  35. langchain_core/language_models/base.py +92 -177
  36. langchain_core/language_models/chat_models.py +547 -407
  37. langchain_core/language_models/fake.py +11 -11
  38. langchain_core/language_models/fake_chat_models.py +72 -118
  39. langchain_core/language_models/llms.py +168 -242
  40. langchain_core/load/dump.py +8 -11
  41. langchain_core/load/load.py +32 -28
  42. langchain_core/load/mapping.py +2 -4
  43. langchain_core/load/serializable.py +50 -56
  44. langchain_core/messages/__init__.py +36 -51
  45. langchain_core/messages/ai.py +377 -150
  46. langchain_core/messages/base.py +239 -47
  47. langchain_core/messages/block_translators/__init__.py +111 -0
  48. langchain_core/messages/block_translators/anthropic.py +470 -0
  49. langchain_core/messages/block_translators/bedrock.py +94 -0
  50. langchain_core/messages/block_translators/bedrock_converse.py +297 -0
  51. langchain_core/messages/block_translators/google_genai.py +530 -0
  52. langchain_core/messages/block_translators/google_vertexai.py +21 -0
  53. langchain_core/messages/block_translators/groq.py +143 -0
  54. langchain_core/messages/block_translators/langchain_v0.py +301 -0
  55. langchain_core/messages/block_translators/openai.py +1010 -0
  56. langchain_core/messages/chat.py +2 -3
  57. langchain_core/messages/content.py +1423 -0
  58. langchain_core/messages/function.py +7 -7
  59. langchain_core/messages/human.py +44 -38
  60. langchain_core/messages/modifier.py +3 -2
  61. langchain_core/messages/system.py +40 -27
  62. langchain_core/messages/tool.py +160 -58
  63. langchain_core/messages/utils.py +527 -638
  64. langchain_core/output_parsers/__init__.py +1 -14
  65. langchain_core/output_parsers/base.py +68 -104
  66. langchain_core/output_parsers/json.py +13 -17
  67. langchain_core/output_parsers/list.py +11 -33
  68. langchain_core/output_parsers/openai_functions.py +56 -74
  69. langchain_core/output_parsers/openai_tools.py +68 -109
  70. langchain_core/output_parsers/pydantic.py +15 -13
  71. langchain_core/output_parsers/string.py +6 -2
  72. langchain_core/output_parsers/transform.py +17 -60
  73. langchain_core/output_parsers/xml.py +34 -44
  74. langchain_core/outputs/__init__.py +1 -1
  75. langchain_core/outputs/chat_generation.py +26 -11
  76. langchain_core/outputs/chat_result.py +1 -3
  77. langchain_core/outputs/generation.py +17 -6
  78. langchain_core/outputs/llm_result.py +15 -8
  79. langchain_core/prompt_values.py +29 -123
  80. langchain_core/prompts/__init__.py +3 -27
  81. langchain_core/prompts/base.py +48 -63
  82. langchain_core/prompts/chat.py +259 -288
  83. langchain_core/prompts/dict.py +19 -11
  84. langchain_core/prompts/few_shot.py +84 -90
  85. langchain_core/prompts/few_shot_with_templates.py +14 -12
  86. langchain_core/prompts/image.py +19 -14
  87. langchain_core/prompts/loading.py +6 -8
  88. langchain_core/prompts/message.py +7 -8
  89. langchain_core/prompts/prompt.py +42 -43
  90. langchain_core/prompts/string.py +37 -16
  91. langchain_core/prompts/structured.py +43 -46
  92. langchain_core/rate_limiters.py +51 -60
  93. langchain_core/retrievers.py +52 -192
  94. langchain_core/runnables/base.py +1727 -1683
  95. langchain_core/runnables/branch.py +52 -73
  96. langchain_core/runnables/config.py +89 -103
  97. langchain_core/runnables/configurable.py +128 -130
  98. langchain_core/runnables/fallbacks.py +93 -82
  99. langchain_core/runnables/graph.py +127 -127
  100. langchain_core/runnables/graph_ascii.py +63 -41
  101. langchain_core/runnables/graph_mermaid.py +87 -70
  102. langchain_core/runnables/graph_png.py +31 -36
  103. langchain_core/runnables/history.py +145 -161
  104. langchain_core/runnables/passthrough.py +141 -144
  105. langchain_core/runnables/retry.py +84 -68
  106. langchain_core/runnables/router.py +33 -37
  107. langchain_core/runnables/schema.py +79 -72
  108. langchain_core/runnables/utils.py +95 -139
  109. langchain_core/stores.py +85 -131
  110. langchain_core/structured_query.py +11 -15
  111. langchain_core/sys_info.py +31 -32
  112. langchain_core/tools/__init__.py +1 -14
  113. langchain_core/tools/base.py +221 -247
  114. langchain_core/tools/convert.py +144 -161
  115. langchain_core/tools/render.py +10 -10
  116. langchain_core/tools/retriever.py +12 -19
  117. langchain_core/tools/simple.py +52 -29
  118. langchain_core/tools/structured.py +56 -60
  119. langchain_core/tracers/__init__.py +1 -9
  120. langchain_core/tracers/_streaming.py +6 -7
  121. langchain_core/tracers/base.py +103 -112
  122. langchain_core/tracers/context.py +29 -48
  123. langchain_core/tracers/core.py +142 -105
  124. langchain_core/tracers/evaluation.py +30 -34
  125. langchain_core/tracers/event_stream.py +162 -117
  126. langchain_core/tracers/langchain.py +34 -36
  127. langchain_core/tracers/log_stream.py +87 -49
  128. langchain_core/tracers/memory_stream.py +3 -3
  129. langchain_core/tracers/root_listeners.py +18 -34
  130. langchain_core/tracers/run_collector.py +8 -20
  131. langchain_core/tracers/schemas.py +0 -125
  132. langchain_core/tracers/stdout.py +3 -3
  133. langchain_core/utils/__init__.py +1 -4
  134. langchain_core/utils/_merge.py +47 -9
  135. langchain_core/utils/aiter.py +70 -66
  136. langchain_core/utils/env.py +12 -9
  137. langchain_core/utils/function_calling.py +139 -206
  138. langchain_core/utils/html.py +7 -8
  139. langchain_core/utils/input.py +6 -6
  140. langchain_core/utils/interactive_env.py +6 -2
  141. langchain_core/utils/iter.py +48 -45
  142. langchain_core/utils/json.py +14 -4
  143. langchain_core/utils/json_schema.py +159 -43
  144. langchain_core/utils/mustache.py +32 -25
  145. langchain_core/utils/pydantic.py +67 -40
  146. langchain_core/utils/strings.py +5 -5
  147. langchain_core/utils/usage.py +1 -1
  148. langchain_core/utils/utils.py +104 -62
  149. langchain_core/vectorstores/base.py +131 -179
  150. langchain_core/vectorstores/in_memory.py +113 -182
  151. langchain_core/vectorstores/utils.py +23 -17
  152. langchain_core/version.py +1 -1
  153. langchain_core-1.0.0.dist-info/METADATA +68 -0
  154. langchain_core-1.0.0.dist-info/RECORD +172 -0
  155. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
  156. langchain_core/beta/__init__.py +0 -1
  157. langchain_core/beta/runnables/__init__.py +0 -1
  158. langchain_core/beta/runnables/context.py +0 -448
  159. langchain_core/memory.py +0 -116
  160. langchain_core/messages/content_blocks.py +0 -1435
  161. langchain_core/prompts/pipeline.py +0 -133
  162. langchain_core/pydantic_v1/__init__.py +0 -30
  163. langchain_core/pydantic_v1/dataclasses.py +0 -23
  164. langchain_core/pydantic_v1/main.py +0 -23
  165. langchain_core/tracers/langchain_v1.py +0 -23
  166. langchain_core/utils/loading.py +0 -31
  167. langchain_core/v1/__init__.py +0 -1
  168. langchain_core/v1/chat_models.py +0 -1047
  169. langchain_core/v1/messages.py +0 -755
  170. langchain_core-0.4.0.dev0.dist-info/METADATA +0 -108
  171. langchain_core-0.4.0.dev0.dist-info/RECORD +0 -177
  172. langchain_core-0.4.0.dev0.dist-info/entry_points.txt +0 -4
@@ -2,9 +2,8 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- import warnings
6
5
  from pathlib import Path
7
- from typing import TYPE_CHECKING, Any, Optional, Union
6
+ from typing import TYPE_CHECKING, Any
8
7
 
9
8
  from pydantic import BaseModel, model_validator
10
9
  from typing_extensions import override
@@ -45,18 +44,16 @@ class PromptTemplate(StringPromptTemplate):
45
44
  from untrusted sources.
46
45
 
47
46
  Example:
47
+ ```python
48
+ from langchain_core.prompts import PromptTemplate
48
49
 
49
- .. code-block:: python
50
-
51
- from langchain_core.prompts import PromptTemplate
52
-
53
- # Instantiation using from_template (recommended)
54
- prompt = PromptTemplate.from_template("Say {foo}")
55
- prompt.format(foo="bar")
56
-
57
- # Instantiation using initializer
58
- prompt = PromptTemplate(template="Say {foo}")
50
+ # Instantiation using from_template (recommended)
51
+ prompt = PromptTemplate.from_template("Say {foo}")
52
+ prompt.format(foo="bar")
59
53
 
54
+ # Instantiation using initializer
55
+ prompt = PromptTemplate(template="Say {foo}")
56
+ ```
60
57
  """
61
58
 
62
59
  @property
@@ -69,6 +66,11 @@ class PromptTemplate(StringPromptTemplate):
69
66
  @classmethod
70
67
  @override
71
68
  def get_lc_namespace(cls) -> list[str]:
69
+ """Get the namespace of the LangChain object.
70
+
71
+ Returns:
72
+ `["langchain", "prompts", "prompt"]`
73
+ """
72
74
  return ["langchain", "prompts", "prompt"]
73
75
 
74
76
  template: str
@@ -135,14 +137,20 @@ class PromptTemplate(StringPromptTemplate):
135
137
  return mustache_schema(self.template)
136
138
 
137
139
  def __add__(self, other: Any) -> PromptTemplate:
138
- """Override the + operator to allow for combining prompt templates."""
140
+ """Override the + operator to allow for combining prompt templates.
141
+
142
+ Raises:
143
+ ValueError: If the template formats are not f-string or if there are
144
+ conflicting partial variables.
145
+ NotImplementedError: If the other object is not a `PromptTemplate` or str.
146
+
147
+ Returns:
148
+ A new `PromptTemplate` that is the combination of the two.
149
+ """
139
150
  # Allow for easy combining
140
151
  if isinstance(other, PromptTemplate):
141
- if self.template_format != "f-string":
142
- msg = "Adding prompt templates only supported for f-strings."
143
- raise ValueError(msg)
144
- if other.template_format != "f-string":
145
- msg = "Adding prompt templates only supported for f-strings."
152
+ if self.template_format != other.template_format:
153
+ msg = "Cannot add templates of different formats"
146
154
  raise ValueError(msg)
147
155
  input_variables = list(
148
156
  set(self.input_variables) | set(other.input_variables)
@@ -160,11 +168,14 @@ class PromptTemplate(StringPromptTemplate):
160
168
  template=template,
161
169
  input_variables=input_variables,
162
170
  partial_variables=partial_variables,
163
- template_format="f-string",
171
+ template_format=self.template_format,
164
172
  validate_template=validate_template,
165
173
  )
166
174
  if isinstance(other, str):
167
- prompt = PromptTemplate.from_template(other)
175
+ prompt = PromptTemplate.from_template(
176
+ other,
177
+ template_format=self.template_format,
178
+ )
168
179
  return self + prompt
169
180
  msg = f"Unsupported operand type for +: {type(other)}"
170
181
  raise NotImplementedError(msg)
@@ -178,7 +189,7 @@ class PromptTemplate(StringPromptTemplate):
178
189
  """Format the prompt with the inputs.
179
190
 
180
191
  Args:
181
- kwargs: Any arguments to be passed to the prompt template.
192
+ **kwargs: Any arguments to be passed to the prompt template.
182
193
 
183
194
  Returns:
184
195
  A formatted string.
@@ -209,7 +220,7 @@ class PromptTemplate(StringPromptTemplate):
209
220
  example_separator: The separator to use in between examples. Defaults
210
221
  to two new line characters.
211
222
  prefix: String that should go before any examples. Generally includes
212
- examples. Default to an empty string.
223
+ examples.
213
224
 
214
225
  Returns:
215
226
  The final prompt generated.
@@ -220,32 +231,21 @@ class PromptTemplate(StringPromptTemplate):
220
231
  @classmethod
221
232
  def from_file(
222
233
  cls,
223
- template_file: Union[str, Path],
224
- input_variables: Optional[list[str]] = None,
225
- encoding: Optional[str] = None,
234
+ template_file: str | Path,
235
+ encoding: str | None = None,
226
236
  **kwargs: Any,
227
237
  ) -> PromptTemplate:
228
238
  """Load a prompt from a file.
229
239
 
230
240
  Args:
231
241
  template_file: The path to the file containing the prompt template.
232
- input_variables: [DEPRECATED] A list of variable names the final prompt
233
- template will expect. Defaults to None.
234
242
  encoding: The encoding system for opening the template file.
235
243
  If not provided, will use the OS default.
236
244
 
237
- input_variables is ignored as from_file now delegates to from_template().
238
-
239
245
  Returns:
240
246
  The prompt loaded from the file.
241
247
  """
242
248
  template = Path(template_file).read_text(encoding=encoding)
243
- if input_variables:
244
- warnings.warn(
245
- "`input_variables' is deprecated and ignored.",
246
- DeprecationWarning,
247
- stacklevel=2,
248
- )
249
249
  return cls.from_template(template=template, **kwargs)
250
250
 
251
251
  @classmethod
@@ -254,7 +254,7 @@ class PromptTemplate(StringPromptTemplate):
254
254
  template: str,
255
255
  *,
256
256
  template_format: PromptTemplateFormat = "f-string",
257
- partial_variables: Optional[dict[str, Any]] = None,
257
+ partial_variables: dict[str, Any] | None = None,
258
258
  **kwargs: Any,
259
259
  ) -> PromptTemplate:
260
260
  """Load a prompt template from a template.
@@ -275,14 +275,13 @@ class PromptTemplate(StringPromptTemplate):
275
275
  Args:
276
276
  template: The template to load.
277
277
  template_format: The format of the template. Use `jinja2` for jinja2,
278
- `mustache` for mustache, and `f-string` for f-strings.
279
- Defaults to `f-string`.
278
+ `mustache` for mustache, and `f-string` for f-strings.
280
279
  partial_variables: A dictionary of variables that can be used to partially
281
- fill in the template. For example, if the template is
282
- `"{variable1} {variable2}"`, and `partial_variables` is
283
- `{"variable1": "foo"}`, then the final prompt will be
284
- `"foo {variable2}"`. Defaults to None.
285
- kwargs: Any other arguments to pass to the prompt template.
280
+ fill in the template. For example, if the template is
281
+ `"{variable1} {variable2}"`, and `partial_variables` is
282
+ `{"variable1": "foo"}`, then the final prompt will be
283
+ `"foo {variable2}"`.
284
+ **kwargs: Any other arguments to pass to the prompt template.
286
285
 
287
286
  Returns:
288
287
  The prompt template loaded from the template.
@@ -4,8 +4,9 @@ from __future__ import annotations
4
4
 
5
5
  import warnings
6
6
  from abc import ABC
7
+ from collections.abc import Callable, Sequence
7
8
  from string import Formatter
8
- from typing import Any, Callable, Literal
9
+ from typing import Any, Literal
9
10
 
10
11
  from pydantic import BaseModel, create_model
11
12
 
@@ -15,6 +16,14 @@ from langchain_core.utils import get_colored_text, mustache
15
16
  from langchain_core.utils.formatting import formatter
16
17
  from langchain_core.utils.interactive_env import is_interactive_env
17
18
 
19
+ try:
20
+ from jinja2 import Environment, meta
21
+ from jinja2.sandbox import SandboxedEnvironment
22
+
23
+ _HAS_JINJA2 = True
24
+ except ImportError:
25
+ _HAS_JINJA2 = False
26
+
18
27
  PromptTemplateFormat = Literal["f-string", "mustache", "jinja2"]
19
28
 
20
29
 
@@ -40,9 +49,7 @@ def jinja2_formatter(template: str, /, **kwargs: Any) -> str:
40
49
  Raises:
41
50
  ImportError: If jinja2 is not installed.
42
51
  """
43
- try:
44
- from jinja2.sandbox import SandboxedEnvironment
45
- except ImportError as e:
52
+ if not _HAS_JINJA2:
46
53
  msg = (
47
54
  "jinja2 not installed, which is needed to use the jinja2_formatter. "
48
55
  "Please install it with `pip install jinja2`."
@@ -50,7 +57,7 @@ def jinja2_formatter(template: str, /, **kwargs: Any) -> str:
50
57
  "Do not expand jinja2 templates using unverified or user-controlled "
51
58
  "inputs as that can result in arbitrary Python code execution."
52
59
  )
53
- raise ImportError(msg) from e
60
+ raise ImportError(msg)
54
61
 
55
62
  # This uses a sandboxed environment to prevent arbitrary code execution.
56
63
  # Jinja2 uses an opt-out rather than opt-in approach for sand-boxing.
@@ -88,14 +95,12 @@ def validate_jinja2(template: str, input_variables: list[str]) -> None:
88
95
 
89
96
 
90
97
  def _get_jinja2_variables_from_template(template: str) -> set[str]:
91
- try:
92
- from jinja2 import Environment, meta
93
- except ImportError as e:
98
+ if not _HAS_JINJA2:
94
99
  msg = (
95
100
  "jinja2 not installed, which is needed to use the jinja2_formatter. "
96
101
  "Please install it with `pip install jinja2`."
97
102
  )
98
- raise ImportError(msg) from e
103
+ raise ImportError(msg)
99
104
  env = Environment() # noqa: S701
100
105
  ast = env.parse(template)
101
106
  return meta.find_undeclared_variables(ast)
@@ -144,9 +149,7 @@ def mustache_template_vars(
144
149
  Defs = dict[str, "Defs"]
145
150
 
146
151
 
147
- def mustache_schema(
148
- template: str,
149
- ) -> type[BaseModel]:
152
+ def mustache_schema(template: str) -> type[BaseModel]:
150
153
  """Get the variables from a mustache template.
151
154
 
152
155
  Args:
@@ -166,10 +169,15 @@ def mustache_schema(
166
169
  prefix = section_stack.pop()
167
170
  elif type_ in {"section", "inverted section"}:
168
171
  section_stack.append(prefix)
169
- prefix = prefix + tuple(key.split("."))
172
+ prefix += tuple(key.split("."))
170
173
  fields[prefix] = False
171
174
  elif type_ in {"variable", "no escape"}:
172
175
  fields[prefix + tuple(key.split("."))] = True
176
+
177
+ for fkey, fval in fields.items():
178
+ fields[fkey] = fval and not any(
179
+ is_subsequence(fkey, k) for k in fields if k != fkey
180
+ )
173
181
  defs: Defs = {} # None means leaf node
174
182
  while fields:
175
183
  field, is_leaf = fields.popitem()
@@ -268,14 +276,18 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
268
276
 
269
277
  @classmethod
270
278
  def get_lc_namespace(cls) -> list[str]:
271
- """Get the namespace of the langchain object."""
279
+ """Get the namespace of the LangChain object.
280
+
281
+ Returns:
282
+ `["langchain", "prompts", "base"]`
283
+ """
272
284
  return ["langchain", "prompts", "base"]
273
285
 
274
286
  def format_prompt(self, **kwargs: Any) -> PromptValue:
275
287
  """Format the prompt with the inputs.
276
288
 
277
289
  Args:
278
- kwargs: Any arguments to be passed to the prompt template.
290
+ **kwargs: Any arguments to be passed to the prompt template.
279
291
 
280
292
  Returns:
281
293
  A formatted string.
@@ -286,7 +298,7 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
286
298
  """Async format the prompt with the inputs.
287
299
 
288
300
  Args:
289
- kwargs: Any arguments to be passed to the prompt template.
301
+ **kwargs: Any arguments to be passed to the prompt template.
290
302
 
291
303
  Returns:
292
304
  A formatted string.
@@ -318,3 +330,12 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
318
330
  def pretty_print(self) -> None:
319
331
  """Print a pretty representation of the prompt."""
320
332
  print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
333
+
334
+
335
+ def is_subsequence(child: Sequence, parent: Sequence) -> bool:
336
+ """Return True if child is subsequence of parent."""
337
+ if len(child) == 0 or len(parent) == 0:
338
+ return False
339
+ if len(parent) < len(child):
340
+ return False
341
+ return all(child[i] == parent[i] for i in range(len(child)))
@@ -1,11 +1,8 @@
1
1
  """Structured prompt template for a language model."""
2
2
 
3
- from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
3
+ from collections.abc import AsyncIterator, Callable, Iterator, Mapping, Sequence
4
4
  from typing import (
5
5
  Any,
6
- Callable,
7
- Optional,
8
- Union,
9
6
  )
10
7
 
11
8
  from pydantic import BaseModel, Field
@@ -31,16 +28,16 @@ from langchain_core.utils import get_pydantic_field_names
31
28
  class StructuredPrompt(ChatPromptTemplate):
32
29
  """Structured prompt template for a language model."""
33
30
 
34
- schema_: Union[dict, type]
31
+ schema_: dict | type
35
32
  """Schema for the structured prompt."""
36
33
  structured_output_kwargs: dict[str, Any] = Field(default_factory=dict)
37
34
 
38
35
  def __init__(
39
36
  self,
40
37
  messages: Sequence[MessageLikeRepresentation],
41
- schema_: Optional[Union[dict, type[BaseModel]]] = None,
38
+ schema_: dict | type[BaseModel] | None = None,
42
39
  *,
43
- structured_output_kwargs: Optional[dict[str, Any]] = None,
40
+ structured_output_kwargs: dict[str, Any] | None = None,
44
41
  template_format: PromptTemplateFormat = "f-string",
45
42
  **kwargs: Any,
46
43
  ) -> None:
@@ -66,10 +63,13 @@ class StructuredPrompt(ChatPromptTemplate):
66
63
 
67
64
  @classmethod
68
65
  def get_lc_namespace(cls) -> list[str]:
69
- """Get the namespace of the langchain object.
66
+ """Get the namespace of the LangChain object.
70
67
 
71
68
  For example, if the class is `langchain.llms.openai.OpenAI`, then the
72
- namespace is ["langchain", "llms", "openai"]
69
+ namespace is `["langchain", "llms", "openai"]`
70
+
71
+ Returns:
72
+ The namespace of the LangChain object.
73
73
  """
74
74
  return cls.__module__.split(".")
75
75
 
@@ -77,7 +77,7 @@ class StructuredPrompt(ChatPromptTemplate):
77
77
  def from_messages_and_schema(
78
78
  cls,
79
79
  messages: Sequence[MessageLikeRepresentation],
80
- schema: Union[dict, type],
80
+ schema: dict | type,
81
81
  **kwargs: Any,
82
82
  ) -> ChatPromptTemplate:
83
83
  """Create a chat prompt template from a variety of message formats.
@@ -85,33 +85,34 @@ class StructuredPrompt(ChatPromptTemplate):
85
85
  Examples:
86
86
  Instantiation from a list of message templates:
87
87
 
88
- .. code-block:: python
88
+ ```python
89
+ from langchain_core.prompts import StructuredPrompt
89
90
 
90
- from langchain_core.prompts import StructuredPrompt
91
91
 
92
- class OutputSchema(BaseModel):
93
- name: str
94
- value: int
92
+ class OutputSchema(BaseModel):
93
+ name: str
94
+ value: int
95
95
 
96
- template = StructuredPrompt(
97
- [
98
- ("human", "Hello, how are you?"),
99
- ("ai", "I'm doing well, thanks!"),
100
- ("human", "That's good to hear."),
101
- ],
102
- OutputSchema,
103
- )
104
96
 
97
+ template = StructuredPrompt(
98
+ [
99
+ ("human", "Hello, how are you?"),
100
+ ("ai", "I'm doing well, thanks!"),
101
+ ("human", "That's good to hear."),
102
+ ],
103
+ OutputSchema,
104
+ )
105
+ ```
105
106
  Args:
106
107
  messages: sequence of message representations.
107
- A message can be represented using the following formats:
108
- (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
109
- (message type, template); e.g., ("human", "{user_input}"),
110
- (4) 2-tuple of (message class, template), (5) a string which is
111
- shorthand for ("human", template); e.g., "{user_input}"
108
+ A message can be represented using the following formats:
109
+ (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
110
+ (message type, template); e.g., ("human", "{user_input}"),
111
+ (4) 2-tuple of (message class, template), (5) a string which is
112
+ shorthand for ("human", template); e.g., "{user_input}"
112
113
  schema: a dictionary representation of function call, or a Pydantic model.
113
- kwargs: Any additional kwargs to pass through to
114
- ``ChatModel.with_structured_output(schema, **kwargs)``.
114
+ **kwargs: Any additional kwargs to pass through to
115
+ `ChatModel.with_structured_output(schema, **kwargs)`.
115
116
 
116
117
  Returns:
117
118
  a structured prompt template
@@ -122,32 +123,28 @@ class StructuredPrompt(ChatPromptTemplate):
122
123
  @override
123
124
  def __or__(
124
125
  self,
125
- other: Union[
126
- Runnable[Any, Other],
127
- Callable[[Iterator[Any]], Iterator[Other]],
128
- Callable[[AsyncIterator[Any]], AsyncIterator[Other]],
129
- Callable[[Any], Other],
130
- Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]],
131
- ],
126
+ other: Runnable[Any, Other]
127
+ | Callable[[Iterator[Any]], Iterator[Other]]
128
+ | Callable[[AsyncIterator[Any]], AsyncIterator[Other]]
129
+ | Callable[[Any], Other]
130
+ | Mapping[str, Runnable[Any, Other] | Callable[[Any], Other] | Any],
132
131
  ) -> RunnableSerializable[dict, Other]:
133
132
  return self.pipe(other)
134
133
 
135
134
  def pipe(
136
135
  self,
137
- *others: Union[
138
- Runnable[Any, Other],
139
- Callable[[Iterator[Any]], Iterator[Other]],
140
- Callable[[AsyncIterator[Any]], AsyncIterator[Other]],
141
- Callable[[Any], Other],
142
- Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]],
143
- ],
144
- name: Optional[str] = None,
136
+ *others: Runnable[Any, Other]
137
+ | Callable[[Iterator[Any]], Iterator[Other]]
138
+ | Callable[[AsyncIterator[Any]], AsyncIterator[Other]]
139
+ | Callable[[Any], Other]
140
+ | Mapping[str, Runnable[Any, Other] | Callable[[Any], Other] | Any],
141
+ name: str | None = None,
145
142
  ) -> RunnableSerializable[dict, Other]:
146
143
  """Pipe the structured prompt to a language model.
147
144
 
148
145
  Args:
149
146
  others: The language model to pipe the structured prompt to.
150
- name: The name of the pipeline. Defaults to None.
147
+ name: The name of the pipeline.
151
148
 
152
149
  Returns:
153
150
  A RunnableSequence object.