langchain-core 1.0.0rc1__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (92) hide show
  1. langchain_core/agents.py +3 -3
  2. langchain_core/caches.py +44 -48
  3. langchain_core/callbacks/base.py +5 -5
  4. langchain_core/callbacks/file.py +2 -2
  5. langchain_core/callbacks/stdout.py +1 -1
  6. langchain_core/chat_history.py +1 -1
  7. langchain_core/document_loaders/base.py +21 -21
  8. langchain_core/document_loaders/langsmith.py +2 -2
  9. langchain_core/documents/base.py +39 -39
  10. langchain_core/embeddings/fake.py +4 -2
  11. langchain_core/example_selectors/semantic_similarity.py +4 -6
  12. langchain_core/exceptions.py +3 -4
  13. langchain_core/indexing/api.py +8 -14
  14. langchain_core/language_models/__init__.py +11 -25
  15. langchain_core/language_models/_utils.py +2 -1
  16. langchain_core/language_models/base.py +7 -0
  17. langchain_core/language_models/chat_models.py +14 -16
  18. langchain_core/language_models/fake_chat_models.py +3 -3
  19. langchain_core/language_models/llms.py +4 -4
  20. langchain_core/load/dump.py +3 -4
  21. langchain_core/load/load.py +0 -9
  22. langchain_core/load/serializable.py +3 -3
  23. langchain_core/messages/ai.py +20 -22
  24. langchain_core/messages/base.py +8 -8
  25. langchain_core/messages/block_translators/__init__.py +1 -1
  26. langchain_core/messages/block_translators/anthropic.py +1 -1
  27. langchain_core/messages/block_translators/bedrock_converse.py +1 -1
  28. langchain_core/messages/block_translators/google_genai.py +3 -2
  29. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  30. langchain_core/messages/block_translators/langchain_v0.py +1 -1
  31. langchain_core/messages/block_translators/openai.py +1 -1
  32. langchain_core/messages/chat.py +2 -6
  33. langchain_core/messages/content.py +34 -17
  34. langchain_core/messages/function.py +3 -7
  35. langchain_core/messages/human.py +4 -9
  36. langchain_core/messages/modifier.py +1 -1
  37. langchain_core/messages/system.py +2 -10
  38. langchain_core/messages/tool.py +30 -42
  39. langchain_core/messages/utils.py +24 -30
  40. langchain_core/output_parsers/base.py +24 -24
  41. langchain_core/output_parsers/json.py +0 -1
  42. langchain_core/output_parsers/list.py +1 -1
  43. langchain_core/output_parsers/openai_functions.py +2 -2
  44. langchain_core/output_parsers/openai_tools.py +4 -9
  45. langchain_core/output_parsers/string.py +1 -1
  46. langchain_core/outputs/generation.py +1 -1
  47. langchain_core/prompt_values.py +7 -7
  48. langchain_core/prompts/base.py +1 -1
  49. langchain_core/prompts/chat.py +12 -13
  50. langchain_core/prompts/dict.py +2 -2
  51. langchain_core/prompts/few_shot_with_templates.py +1 -1
  52. langchain_core/prompts/image.py +1 -1
  53. langchain_core/prompts/message.py +2 -2
  54. langchain_core/prompts/prompt.py +7 -8
  55. langchain_core/prompts/string.py +1 -1
  56. langchain_core/prompts/structured.py +2 -2
  57. langchain_core/rate_limiters.py +23 -29
  58. langchain_core/retrievers.py +29 -29
  59. langchain_core/runnables/base.py +9 -16
  60. langchain_core/runnables/branch.py +1 -1
  61. langchain_core/runnables/config.py +1 -1
  62. langchain_core/runnables/configurable.py +2 -2
  63. langchain_core/runnables/fallbacks.py +1 -1
  64. langchain_core/runnables/graph.py +23 -28
  65. langchain_core/runnables/graph_mermaid.py +9 -9
  66. langchain_core/runnables/graph_png.py +1 -1
  67. langchain_core/runnables/history.py +2 -2
  68. langchain_core/runnables/passthrough.py +3 -3
  69. langchain_core/runnables/router.py +1 -1
  70. langchain_core/runnables/utils.py +5 -5
  71. langchain_core/tools/base.py +9 -10
  72. langchain_core/tools/convert.py +13 -17
  73. langchain_core/tools/retriever.py +6 -6
  74. langchain_core/tools/simple.py +1 -1
  75. langchain_core/tools/structured.py +5 -10
  76. langchain_core/tracers/memory_stream.py +1 -1
  77. langchain_core/tracers/root_listeners.py +2 -2
  78. langchain_core/tracers/stdout.py +1 -2
  79. langchain_core/utils/__init__.py +1 -1
  80. langchain_core/utils/aiter.py +1 -1
  81. langchain_core/utils/function_calling.py +15 -38
  82. langchain_core/utils/input.py +1 -1
  83. langchain_core/utils/iter.py +1 -1
  84. langchain_core/utils/json.py +1 -1
  85. langchain_core/utils/strings.py +1 -1
  86. langchain_core/vectorstores/base.py +14 -25
  87. langchain_core/vectorstores/utils.py +2 -2
  88. langchain_core/version.py +1 -1
  89. {langchain_core-1.0.0rc1.dist-info → langchain_core-1.0.0rc2.dist-info}/METADATA +1 -1
  90. langchain_core-1.0.0rc2.dist-info/RECORD +172 -0
  91. langchain_core-1.0.0rc1.dist-info/RECORD +0 -172
  92. {langchain_core-1.0.0rc1.dist-info → langchain_core-1.0.0rc2.dist-info}/WHEEL +0 -0
@@ -86,6 +86,7 @@ AnyMessage = Annotated[
86
86
  | Annotated[ToolMessageChunk, Tag(tag="ToolMessageChunk")],
87
87
  Field(discriminator=Discriminator(_get_type)),
88
88
  ]
89
+ """"A type representing any defined `Message` or `MessageChunk` type."""
89
90
 
90
91
 
91
92
  def get_buffer_string(
@@ -96,9 +97,7 @@ def get_buffer_string(
96
97
  Args:
97
98
  messages: Messages to be converted to strings.
98
99
  human_prefix: The prefix to prepend to contents of `HumanMessage`s.
99
- Default is `'Human'`.
100
- ai_prefix: The prefix to prepend to contents of `AIMessage`. Default is
101
- `'AI'`.
100
+ ai_prefix: The prefix to prepend to contents of `AIMessage`.
102
101
 
103
102
  Returns:
104
103
  A single string concatenation of all input messages.
@@ -211,6 +210,7 @@ def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
211
210
  MessageLikeRepresentation = (
212
211
  BaseMessage | list[str] | tuple[str, str] | str | dict[str, Any]
213
212
  )
213
+ """A type representing the various ways a message can be represented."""
214
214
 
215
215
 
216
216
  def _create_message_from_message_type(
@@ -227,10 +227,10 @@ def _create_message_from_message_type(
227
227
  Args:
228
228
  message_type: (str) the type of the message (e.g., `'human'`, `'ai'`, etc.).
229
229
  content: (str) the content string.
230
- name: (str) the name of the message. Default is None.
231
- tool_call_id: (str) the tool call id. Default is None.
232
- tool_calls: (list[dict[str, Any]]) the tool calls. Default is None.
233
- id: (str) the id of the message. Default is None.
230
+ name: (str) the name of the message.
231
+ tool_call_id: (str) the tool call id.
232
+ tool_calls: (list[dict[str, Any]]) the tool calls.
233
+ id: (str) the id of the message.
234
234
  additional_kwargs: (dict[str, Any]) additional keyword arguments.
235
235
 
236
236
  Returns:
@@ -319,7 +319,7 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
319
319
  message: a representation of a message in one of the supported formats.
320
320
 
321
321
  Returns:
322
- an instance of a message or a message template.
322
+ An instance of a message or a message template.
323
323
 
324
324
  Raises:
325
325
  NotImplementedError: if the message type is not supported.
@@ -425,19 +425,19 @@ def filter_messages(
425
425
 
426
426
  Args:
427
427
  messages: Sequence Message-like objects to filter.
428
- include_names: Message names to include. Default is None.
429
- exclude_names: Messages names to exclude. Default is None.
428
+ include_names: Message names to include.
429
+ exclude_names: Messages names to exclude.
430
430
  include_types: Message types to include. Can be specified as string names
431
431
  (e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
432
432
  classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
433
- Default is None.
433
+
434
434
  exclude_types: Message types to exclude. Can be specified as string names
435
435
  (e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage`
436
436
  classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...).
437
- Default is None.
438
- include_ids: Message IDs to include. Default is None.
439
- exclude_ids: Message IDs to exclude. Default is None.
440
- exclude_tool_calls: Tool call IDs to exclude. Default is None.
437
+
438
+ include_ids: Message IDs to include.
439
+ exclude_ids: Message IDs to exclude.
440
+ exclude_tool_calls: Tool call IDs to exclude.
441
441
  Can be one of the following:
442
442
  - `True`: all `AIMessage`s with tool calls and all
443
443
  `ToolMessage` objects will be excluded.
@@ -568,7 +568,6 @@ def merge_message_runs(
568
568
  Args:
569
569
  messages: Sequence Message-like objects to merge.
570
570
  chunk_separator: Specify the string to be inserted between message chunks.
571
- Defaults to `'\n'`.
572
571
 
573
572
  Returns:
574
573
  list of BaseMessages with consecutive runs of message types merged into single
@@ -703,7 +702,7 @@ def trim_messages(
703
702
  r"""Trim messages to be below a token count.
704
703
 
705
704
  `trim_messages` can be used to reduce the size of a chat history to a specified
706
- token count or specified message count.
705
+ token or message count.
707
706
 
708
707
  In either case, if passing the trimmed chat history back into a chat model
709
708
  directly, the resulting chat history should usually satisfy the following
@@ -714,8 +713,6 @@ def trim_messages(
714
713
  followed by a `HumanMessage`. To achieve this, set `start_on='human'`.
715
714
  In addition, generally a `ToolMessage` can only appear after an `AIMessage`
716
715
  that involved a tool call.
717
- Please see the following link for more information about messages:
718
- https://python.langchain.com/docs/concepts/#messages
719
716
  2. It includes recent messages and drops old messages in the chat history.
720
717
  To achieve this set the `strategy='last'`.
721
718
  3. Usually, the new chat history should include the `SystemMessage` if it
@@ -745,12 +742,10 @@ def trim_messages(
745
742
  strategy: Strategy for trimming.
746
743
  - `'first'`: Keep the first `<= n_count` tokens of the messages.
747
744
  - `'last'`: Keep the last `<= n_count` tokens of the messages.
748
- Default is `'last'`.
749
745
  allow_partial: Whether to split a message if only part of the message can be
750
746
  included. If `strategy='last'` then the last partial contents of a message
751
747
  are included. If `strategy='first'` then the first partial contents of a
752
748
  message are included.
753
- Default is False.
754
749
  end_on: The message type to end on. If specified then every message after the
755
750
  last occurrence of this type is ignored. If `strategy='last'` then this
756
751
  is done before we attempt to get the last `max_tokens`. If
@@ -759,7 +754,7 @@ def trim_messages(
759
754
  `'human'`, `'ai'`, ...) or as `BaseMessage` classes (e.g.
760
755
  `SystemMessage`, `HumanMessage`, `AIMessage`, ...). Can be a single
761
756
  type or a list of types.
762
- Default is None.
757
+
763
758
  start_on: The message type to start on. Should only be specified if
764
759
  `strategy='last'`. If specified then every message before
765
760
  the first occurrence of this type is ignored. This is done after we trim
@@ -768,10 +763,9 @@ def trim_messages(
768
763
  specified as string names (e.g. `'system'`, `'human'`, `'ai'`, ...) or
769
764
  as `BaseMessage` classes (e.g. `SystemMessage`, `HumanMessage`,
770
765
  `AIMessage`, ...). Can be a single type or a list of types.
771
- Default is None.
772
- include_system: Whether to keep the SystemMessage if there is one at index 0.
773
- Should only be specified if `strategy="last"`.
774
- Default is False.
766
+
767
+ include_system: Whether to keep the `SystemMessage` if there is one at index
768
+ `0`. Should only be specified if `strategy="last"`.
775
769
  text_splitter: Function or `langchain_text_splitters.TextSplitter` for
776
770
  splitting the string contents of a message. Only used if
777
771
  `allow_partial=True`. If `strategy='last'` then the last split tokens
@@ -782,7 +776,7 @@ def trim_messages(
782
776
  newlines.
783
777
 
784
778
  Returns:
785
- list of trimmed `BaseMessage`.
779
+ List of trimmed `BaseMessage`.
786
780
 
787
781
  Raises:
788
782
  ValueError: if two incompatible arguments are specified or an unrecognized
@@ -1683,11 +1677,11 @@ def count_tokens_approximately(
1683
1677
  Args:
1684
1678
  messages: List of messages to count tokens for.
1685
1679
  chars_per_token: Number of characters per token to use for the approximation.
1686
- Default is 4 (one token corresponds to ~4 chars for common English text).
1680
+ One token corresponds to ~4 chars for common English text.
1687
1681
  You can also specify float values for more fine-grained control.
1688
1682
  [See more here](https://platform.openai.com/tokenizer).
1689
- extra_tokens_per_message: Number of extra tokens to add per message.
1690
- Default is 3 (special tokens, including beginning/end of message).
1683
+ extra_tokens_per_message: Number of extra tokens to add per message, e.g.
1684
+ special tokens, including beginning/end of message.
1691
1685
  You can also specify float values for more fine-grained control.
1692
1686
  [See more here](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb).
1693
1687
  count_name: Whether to include message names in the count.
@@ -31,13 +31,13 @@ class BaseLLMOutputParser(ABC, Generic[T]):
31
31
 
32
32
  @abstractmethod
33
33
  def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
34
- """Parse a list of candidate model Generations into a specific format.
34
+ """Parse a list of candidate model `Generation` objects into a specific format.
35
35
 
36
36
  Args:
37
- result: A list of Generations to be parsed. The Generations are assumed
38
- to be different candidate outputs for a single model input.
37
+ result: A list of `Generation` to be parsed. The `Generation` objects are
38
+ assumed to be different candidate outputs for a single model input.
39
39
  partial: Whether to parse the output as a partial result. This is useful
40
- for parsers that can parse partial results. Default is False.
40
+ for parsers that can parse partial results.
41
41
 
42
42
  Returns:
43
43
  Structured output.
@@ -46,17 +46,17 @@ class BaseLLMOutputParser(ABC, Generic[T]):
46
46
  async def aparse_result(
47
47
  self, result: list[Generation], *, partial: bool = False
48
48
  ) -> T:
49
- """Async parse a list of candidate model Generations into a specific format.
49
+ """Async parse a list of candidate model `Generation` objects into a specific format.
50
50
 
51
51
  Args:
52
- result: A list of Generations to be parsed. The Generations are assumed
52
+ result: A list of `Generation` to be parsed. The Generations are assumed
53
53
  to be different candidate outputs for a single model input.
54
54
  partial: Whether to parse the output as a partial result. This is useful
55
- for parsers that can parse partial results. Default is False.
55
+ for parsers that can parse partial results.
56
56
 
57
57
  Returns:
58
58
  Structured output.
59
- """
59
+ """ # noqa: E501
60
60
  return await run_in_executor(None, self.parse_result, result, partial=partial)
61
61
 
62
62
 
@@ -172,7 +172,7 @@ class BaseOutputParser(
172
172
  This property is inferred from the first type argument of the class.
173
173
 
174
174
  Raises:
175
- TypeError: If the class doesn't have an inferable OutputType.
175
+ TypeError: If the class doesn't have an inferable `OutputType`.
176
176
  """
177
177
  for base in self.__class__.mro():
178
178
  if hasattr(base, "__pydantic_generic_metadata__"):
@@ -234,16 +234,16 @@ class BaseOutputParser(
234
234
 
235
235
  @override
236
236
  def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
237
- """Parse a list of candidate model Generations into a specific format.
237
+ """Parse a list of candidate model `Generation` objects into a specific format.
238
238
 
239
- The return value is parsed from only the first Generation in the result, which
240
- is assumed to be the highest-likelihood Generation.
239
+ The return value is parsed from only the first `Generation` in the result, which
240
+ is assumed to be the highest-likelihood `Generation`.
241
241
 
242
242
  Args:
243
- result: A list of Generations to be parsed. The Generations are assumed
244
- to be different candidate outputs for a single model input.
243
+ result: A list of `Generation` to be parsed. The `Generation` objects are
244
+ assumed to be different candidate outputs for a single model input.
245
245
  partial: Whether to parse the output as a partial result. This is useful
246
- for parsers that can parse partial results. Default is False.
246
+ for parsers that can parse partial results.
247
247
 
248
248
  Returns:
249
249
  Structured output.
@@ -264,20 +264,20 @@ class BaseOutputParser(
264
264
  async def aparse_result(
265
265
  self, result: list[Generation], *, partial: bool = False
266
266
  ) -> T:
267
- """Async parse a list of candidate model Generations into a specific format.
267
+ """Async parse a list of candidate model `Generation` objects into a specific format.
268
268
 
269
- The return value is parsed from only the first Generation in the result, which
270
- is assumed to be the highest-likelihood Generation.
269
+ The return value is parsed from only the first `Generation` in the result, which
270
+ is assumed to be the highest-likelihood `Generation`.
271
271
 
272
272
  Args:
273
- result: A list of Generations to be parsed. The Generations are assumed
274
- to be different candidate outputs for a single model input.
273
+ result: A list of `Generation` to be parsed. The `Generation` objects are
274
+ assumed to be different candidate outputs for a single model input.
275
275
  partial: Whether to parse the output as a partial result. This is useful
276
- for parsers that can parse partial results. Default is False.
276
+ for parsers that can parse partial results.
277
277
 
278
278
  Returns:
279
279
  Structured output.
280
- """
280
+ """ # noqa: E501
281
281
  return await run_in_executor(None, self.parse_result, result, partial=partial)
282
282
 
283
283
  async def aparse(self, text: str) -> T:
@@ -299,13 +299,13 @@ class BaseOutputParser(
299
299
  ) -> Any:
300
300
  """Parse the output of an LLM call with the input prompt for context.
301
301
 
302
- The prompt is largely provided in the event the OutputParser wants
302
+ The prompt is largely provided in the event the `OutputParser` wants
303
303
  to retry or fix the output in some way, and needs information from
304
304
  the prompt to do so.
305
305
 
306
306
  Args:
307
307
  completion: String output of a language model.
308
- prompt: Input PromptValue.
308
+ prompt: Input `PromptValue`.
309
309
 
310
310
  Returns:
311
311
  Structured output.
@@ -62,7 +62,6 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
62
62
  If `True`, the output will be a JSON object containing
63
63
  all the keys that have been returned so far.
64
64
  If `False`, the output will be the full JSON object.
65
- Default is False.
66
65
 
67
66
  Returns:
68
67
  The parsed JSON object.
@@ -146,7 +146,7 @@ class CommaSeparatedListOutputParser(ListOutputParser):
146
146
 
147
147
  @classmethod
148
148
  def get_lc_namespace(cls) -> list[str]:
149
- """Get the namespace of the langchain object.
149
+ """Get the namespace of the LangChain object.
150
150
 
151
151
  Returns:
152
152
  `["langchain", "output_parsers", "list"]`
@@ -238,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
238
238
  The validated values.
239
239
 
240
240
  Raises:
241
- `ValueError`: If the schema is not a Pydantic schema.
241
+ ValueError: If the schema is not a Pydantic schema.
242
242
  """
243
243
  schema = values["pydantic_schema"]
244
244
  if "args_only" not in values:
@@ -264,7 +264,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
264
264
  partial: Whether to parse partial JSON objects.
265
265
 
266
266
  Raises:
267
- `ValueError`: If the Pydantic schema is not valid.
267
+ ValueError: If the Pydantic schema is not valid.
268
268
 
269
269
  Returns:
270
270
  The parsed JSON object.
@@ -31,10 +31,9 @@ def parse_tool_call(
31
31
 
32
32
  Args:
33
33
  raw_tool_call: The raw tool call to parse.
34
- partial: Whether to parse partial JSON. Default is False.
34
+ partial: Whether to parse partial JSON.
35
35
  strict: Whether to allow non-JSON-compliant strings.
36
- Default is False.
37
- return_id: Whether to return the tool call id. Default is True.
36
+ return_id: Whether to return the tool call id.
38
37
 
39
38
  Returns:
40
39
  The parsed tool call.
@@ -105,10 +104,9 @@ def parse_tool_calls(
105
104
 
106
105
  Args:
107
106
  raw_tool_calls: The raw tool calls to parse.
108
- partial: Whether to parse partial JSON. Default is False.
107
+ partial: Whether to parse partial JSON.
109
108
  strict: Whether to allow non-JSON-compliant strings.
110
- Default is False.
111
- return_id: Whether to return the tool call id. Default is True.
109
+ return_id: Whether to return the tool call id.
112
110
 
113
111
  Returns:
114
112
  The parsed tool calls.
@@ -165,7 +163,6 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
165
163
  If `True`, the output will be a JSON object containing
166
164
  all the keys that have been returned so far.
167
165
  If `False`, the output will be the full JSON object.
168
- Default is False.
169
166
 
170
167
  Returns:
171
168
  The parsed tool calls.
@@ -229,7 +226,6 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
229
226
  If `True`, the output will be a JSON object containing
230
227
  all the keys that have been returned so far.
231
228
  If `False`, the output will be the full JSON object.
232
- Default is False.
233
229
 
234
230
  Raises:
235
231
  OutputParserException: If the generation is not a chat generation.
@@ -313,7 +309,6 @@ class PydanticToolsParser(JsonOutputToolsParser):
313
309
  If `True`, the output will be a JSON object containing
314
310
  all the keys that have been returned so far.
315
311
  If `False`, the output will be the full JSON object.
316
- Default is False.
317
312
 
318
313
  Returns:
319
314
  The parsed Pydantic objects.
@@ -19,7 +19,7 @@ class StrOutputParser(BaseTransformOutputParser[str]):
19
19
 
20
20
  @classmethod
21
21
  def get_lc_namespace(cls) -> list[str]:
22
- """Get the namespace of the langchain object.
22
+ """Get the namespace of the LangChain object.
23
23
 
24
24
  Returns:
25
25
  `["langchain", "schema", "output_parser"]`
@@ -44,7 +44,7 @@ class Generation(Serializable):
44
44
 
45
45
  @classmethod
46
46
  def get_lc_namespace(cls) -> list[str]:
47
- """Get the namespace of the langchain object.
47
+ """Get the namespace of the LangChain object.
48
48
 
49
49
  Returns:
50
50
  `["langchain", "schema", "output"]`
@@ -24,8 +24,8 @@ from langchain_core.messages import (
24
24
  class PromptValue(Serializable, ABC):
25
25
  """Base abstract class for inputs to any language model.
26
26
 
27
- PromptValues can be converted to both LLM (pure text-generation) inputs and
28
- ChatModel inputs.
27
+ `PromptValues` can be converted to both LLM (pure text-generation) inputs and
28
+ chat model inputs.
29
29
  """
30
30
 
31
31
  @classmethod
@@ -35,7 +35,7 @@ class PromptValue(Serializable, ABC):
35
35
 
36
36
  @classmethod
37
37
  def get_lc_namespace(cls) -> list[str]:
38
- """Get the namespace of the langchain object.
38
+ """Get the namespace of the LangChain object.
39
39
 
40
40
  This is used to determine the namespace of the object when serializing.
41
41
 
@@ -62,7 +62,7 @@ class StringPromptValue(PromptValue):
62
62
 
63
63
  @classmethod
64
64
  def get_lc_namespace(cls) -> list[str]:
65
- """Get the namespace of the langchain object.
65
+ """Get the namespace of the LangChain object.
66
66
 
67
67
  This is used to determine the namespace of the object when serializing.
68
68
 
@@ -99,7 +99,7 @@ class ChatPromptValue(PromptValue):
99
99
 
100
100
  @classmethod
101
101
  def get_lc_namespace(cls) -> list[str]:
102
- """Get the namespace of the langchain object.
102
+ """Get the namespace of the LangChain object.
103
103
 
104
104
  This is used to determine the namespace of the object when serializing.
105
105
 
@@ -113,11 +113,11 @@ class ImageURL(TypedDict, total=False):
113
113
  """Image URL."""
114
114
 
115
115
  detail: Literal["auto", "low", "high"]
116
- """Specifies the detail level of the image. Defaults to `'auto'`.
116
+ """Specifies the detail level of the image.
117
+
117
118
  Can be `'auto'`, `'low'`, or `'high'`.
118
119
 
119
120
  This follows OpenAI's Chat Completion API's image URL format.
120
-
121
121
  """
122
122
 
123
123
  url: str
@@ -96,7 +96,7 @@ class BasePromptTemplate(
96
96
 
97
97
  @classmethod
98
98
  def get_lc_namespace(cls) -> list[str]:
99
- """Get the namespace of the langchain object.
99
+ """Get the namespace of the LangChain object.
100
100
 
101
101
  Returns:
102
102
  `["langchain", "schema", "prompt_template"]`
@@ -147,7 +147,6 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
147
147
  optional: If `True` format_messages can be called with no arguments and will
148
148
  return an empty list. If `False` then a named argument with name
149
149
  `variable_name` must be passed in, even if the value is an empty list.
150
- Defaults to `False`.]
151
150
  """
152
151
  # mypy can't detect the init which is defined in the parent class
153
152
  # b/c these are BaseModel classes.
@@ -195,7 +194,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
195
194
  """Human-readable representation.
196
195
 
197
196
  Args:
198
- html: Whether to format as HTML. Defaults to `False`.
197
+ html: Whether to format as HTML.
199
198
 
200
199
  Returns:
201
200
  Human-readable representation.
@@ -235,7 +234,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
235
234
 
236
235
  Args:
237
236
  template: a template.
238
- template_format: format of the template. Defaults to "f-string".
237
+ template_format: format of the template.
239
238
  partial_variables: A dictionary of variables that can be used to partially
240
239
  fill in the template. For example, if the template is
241
240
  `"{variable1} {variable2}"`, and `partial_variables` is
@@ -330,7 +329,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
330
329
  """Human-readable representation.
331
330
 
332
331
  Args:
333
- html: Whether to format as HTML. Defaults to `False`.
332
+ html: Whether to format as HTML.
334
333
 
335
334
  Returns:
336
335
  Human-readable representation.
@@ -412,7 +411,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
412
411
  Args:
413
412
  template: a template.
414
413
  template_format: format of the template.
415
- Options are: 'f-string', 'mustache', 'jinja2'. Defaults to "f-string".
414
+ Options are: 'f-string', 'mustache', 'jinja2'.
416
415
  partial_variables: A dictionary of variables that can be used too partially.
417
416
 
418
417
  **kwargs: keyword arguments to pass to the constructor.
@@ -637,7 +636,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
637
636
  """Human-readable representation.
638
637
 
639
638
  Args:
640
- html: Whether to format as HTML. Defaults to `False`.
639
+ html: Whether to format as HTML.
641
640
 
642
641
  Returns:
643
642
  Human-readable representation.
@@ -750,7 +749,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
750
749
  """Human-readable representation.
751
750
 
752
751
  Args:
753
- html: Whether to format as HTML. Defaults to `False`.
752
+ html: Whether to format as HTML.
754
753
 
755
754
  Returns:
756
755
  Human-readable representation.
@@ -905,7 +904,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
905
904
  (message type, template); e.g., ("human", "{user_input}"),
906
905
  (4) 2-tuple of (message class, template), (5) a string which is
907
906
  shorthand for ("human", template); e.g., "{user_input}".
908
- template_format: format of the template. Defaults to "f-string".
907
+ template_format: format of the template.
909
908
  input_variables: A list of the names of the variables whose values are
910
909
  required as inputs to the prompt.
911
910
  optional_variables: A list of the names of the variables for placeholder
@@ -971,7 +970,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
971
970
 
972
971
  @classmethod
973
972
  def get_lc_namespace(cls) -> list[str]:
974
- """Get the namespace of the langchain object.
973
+ """Get the namespace of the LangChain object.
975
974
 
976
975
  Returns:
977
976
  `["langchain", "prompts", "chat"]`
@@ -1128,7 +1127,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1128
1127
  (message type, template); e.g., ("human", "{user_input}"),
1129
1128
  (4) 2-tuple of (message class, template), (5) a string which is
1130
1129
  shorthand for ("human", template); e.g., "{user_input}".
1131
- template_format: format of the template. Defaults to "f-string".
1130
+ template_format: format of the template.
1132
1131
 
1133
1132
  Returns:
1134
1133
  a chat prompt template.
@@ -1287,7 +1286,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1287
1286
  """Human-readable representation.
1288
1287
 
1289
1288
  Args:
1290
- html: Whether to format as HTML. Defaults to `False`.
1289
+ html: Whether to format as HTML.
1291
1290
 
1292
1291
  Returns:
1293
1292
  Human-readable representation.
@@ -1306,7 +1305,7 @@ def _create_template_from_message_type(
1306
1305
  Args:
1307
1306
  message_type: str the type of the message template (e.g., "human", "ai", etc.)
1308
1307
  template: str the template string.
1309
- template_format: format of the template. Defaults to "f-string".
1308
+ template_format: format of the template.
1310
1309
 
1311
1310
  Returns:
1312
1311
  a message prompt template of the appropriate type.
@@ -1383,7 +1382,7 @@ def _convert_to_message_template(
1383
1382
 
1384
1383
  Args:
1385
1384
  message: a representation of a message in one of the supported formats.
1386
- template_format: format of the template. Defaults to "f-string".
1385
+ template_format: format of the template.
1387
1386
 
1388
1387
  Returns:
1389
1388
  an instance of a message or a message template.
@@ -74,7 +74,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
74
74
 
75
75
  @classmethod
76
76
  def get_lc_namespace(cls) -> list[str]:
77
- """Get the namespace of the langchain object.
77
+ """Get the namespace of the LangChain object.
78
78
 
79
79
  Returns:
80
80
  `["langchain_core", "prompts", "dict"]`
@@ -85,7 +85,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
85
85
  """Human-readable representation.
86
86
 
87
87
  Args:
88
- html: Whether to format as HTML. Defaults to `False`.
88
+ html: Whether to format as HTML.
89
89
 
90
90
  Returns:
91
91
  Human-readable representation.
@@ -46,7 +46,7 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
46
46
 
47
47
  @classmethod
48
48
  def get_lc_namespace(cls) -> list[str]:
49
- """Get the namespace of the langchain object.
49
+ """Get the namespace of the LangChain object.
50
50
 
51
51
  Returns:
52
52
  `["langchain", "prompts", "few_shot_with_templates"]`
@@ -49,7 +49,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
49
49
 
50
50
  @classmethod
51
51
  def get_lc_namespace(cls) -> list[str]:
52
- """Get the namespace of the langchain object.
52
+ """Get the namespace of the LangChain object.
53
53
 
54
54
  Returns:
55
55
  `["langchain", "prompts", "image"]`
@@ -23,7 +23,7 @@ class BaseMessagePromptTemplate(Serializable, ABC):
23
23
 
24
24
  @classmethod
25
25
  def get_lc_namespace(cls) -> list[str]:
26
- """Get the namespace of the langchain object.
26
+ """Get the namespace of the LangChain object.
27
27
 
28
28
  Returns:
29
29
  `["langchain", "prompts", "chat"]`
@@ -68,7 +68,7 @@ class BaseMessagePromptTemplate(Serializable, ABC):
68
68
  """Human-readable representation.
69
69
 
70
70
  Args:
71
- html: Whether to format as HTML. Defaults to `False`.
71
+ html: Whether to format as HTML.
72
72
 
73
73
  Returns:
74
74
  Human-readable representation.
@@ -66,7 +66,7 @@ class PromptTemplate(StringPromptTemplate):
66
66
  @classmethod
67
67
  @override
68
68
  def get_lc_namespace(cls) -> list[str]:
69
- """Get the namespace of the langchain object.
69
+ """Get the namespace of the LangChain object.
70
70
 
71
71
  Returns:
72
72
  `["langchain", "prompts", "prompt"]`
@@ -220,7 +220,7 @@ class PromptTemplate(StringPromptTemplate):
220
220
  example_separator: The separator to use in between examples. Defaults
221
221
  to two new line characters.
222
222
  prefix: String that should go before any examples. Generally includes
223
- examples. Default to an empty string.
223
+ examples.
224
224
 
225
225
  Returns:
226
226
  The final prompt generated.
@@ -275,13 +275,12 @@ class PromptTemplate(StringPromptTemplate):
275
275
  Args:
276
276
  template: The template to load.
277
277
  template_format: The format of the template. Use `jinja2` for jinja2,
278
- `mustache` for mustache, and `f-string` for f-strings.
279
- Defaults to `f-string`.
278
+ `mustache` for mustache, and `f-string` for f-strings.
280
279
  partial_variables: A dictionary of variables that can be used to partially
281
- fill in the template. For example, if the template is
282
- `"{variable1} {variable2}"`, and `partial_variables` is
283
- `{"variable1": "foo"}`, then the final prompt will be
284
- `"foo {variable2}"`.
280
+ fill in the template. For example, if the template is
281
+ `"{variable1} {variable2}"`, and `partial_variables` is
282
+ `{"variable1": "foo"}`, then the final prompt will be
283
+ `"foo {variable2}"`.
285
284
  **kwargs: Any other arguments to pass to the prompt template.
286
285
 
287
286
  Returns: