langchain 1.0.0a12__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. langchain/__init__.py +1 -1
  2. langchain/agents/__init__.py +7 -1
  3. langchain/agents/factory.py +722 -226
  4. langchain/agents/middleware/__init__.py +36 -9
  5. langchain/agents/middleware/_execution.py +388 -0
  6. langchain/agents/middleware/_redaction.py +350 -0
  7. langchain/agents/middleware/context_editing.py +46 -17
  8. langchain/agents/middleware/file_search.py +382 -0
  9. langchain/agents/middleware/human_in_the_loop.py +220 -173
  10. langchain/agents/middleware/model_call_limit.py +43 -10
  11. langchain/agents/middleware/model_fallback.py +79 -36
  12. langchain/agents/middleware/pii.py +68 -504
  13. langchain/agents/middleware/shell_tool.py +718 -0
  14. langchain/agents/middleware/summarization.py +2 -2
  15. langchain/agents/middleware/{planning.py → todo.py} +35 -16
  16. langchain/agents/middleware/tool_call_limit.py +308 -114
  17. langchain/agents/middleware/tool_emulator.py +200 -0
  18. langchain/agents/middleware/tool_retry.py +384 -0
  19. langchain/agents/middleware/tool_selection.py +25 -21
  20. langchain/agents/middleware/types.py +714 -257
  21. langchain/agents/structured_output.py +37 -27
  22. langchain/chat_models/__init__.py +7 -1
  23. langchain/chat_models/base.py +192 -190
  24. langchain/embeddings/__init__.py +13 -3
  25. langchain/embeddings/base.py +49 -29
  26. langchain/messages/__init__.py +50 -1
  27. langchain/tools/__init__.py +9 -7
  28. langchain/tools/tool_node.py +16 -1174
  29. langchain-1.0.4.dist-info/METADATA +92 -0
  30. langchain-1.0.4.dist-info/RECORD +34 -0
  31. langchain/_internal/__init__.py +0 -0
  32. langchain/_internal/_documents.py +0 -35
  33. langchain/_internal/_lazy_import.py +0 -35
  34. langchain/_internal/_prompts.py +0 -158
  35. langchain/_internal/_typing.py +0 -70
  36. langchain/_internal/_utils.py +0 -7
  37. langchain/agents/_internal/__init__.py +0 -1
  38. langchain/agents/_internal/_typing.py +0 -13
  39. langchain/agents/middleware/prompt_caching.py +0 -86
  40. langchain/documents/__init__.py +0 -7
  41. langchain/embeddings/cache.py +0 -361
  42. langchain/storage/__init__.py +0 -22
  43. langchain/storage/encoder_backed.py +0 -123
  44. langchain/storage/exceptions.py +0 -5
  45. langchain/storage/in_memory.py +0 -13
  46. langchain-1.0.0a12.dist-info/METADATA +0 -122
  47. langchain-1.0.0a12.dist-info/RECORD +0 -43
  48. {langchain-1.0.0a12.dist-info → langchain-1.0.4.dist-info}/WHEEL +0 -0
  49. {langchain-1.0.0a12.dist-info → langchain-1.0.4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,92 @@
1
+ Metadata-Version: 2.4
2
+ Name: langchain
3
+ Version: 1.0.4
4
+ Summary: Building applications with LLMs through composability
5
+ Project-URL: Homepage, https://docs.langchain.com/
6
+ Project-URL: Documentation, https://reference.langchain.com/python/langchain/langchain/
7
+ Project-URL: Source, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
8
+ Project-URL: Changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
9
+ Project-URL: Twitter, https://x.com/LangChainAI
10
+ Project-URL: Slack, https://www.langchain.com/join-community
11
+ Project-URL: Reddit, https://www.reddit.com/r/LangChain/
12
+ License: MIT
13
+ License-File: LICENSE
14
+ Requires-Python: <4.0.0,>=3.10.0
15
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0
16
+ Requires-Dist: langgraph<1.1.0,>=1.0.2
17
+ Requires-Dist: pydantic<3.0.0,>=2.7.4
18
+ Provides-Extra: anthropic
19
+ Requires-Dist: langchain-anthropic; extra == 'anthropic'
20
+ Provides-Extra: aws
21
+ Requires-Dist: langchain-aws; extra == 'aws'
22
+ Provides-Extra: azure-ai
23
+ Requires-Dist: langchain-azure-ai; extra == 'azure-ai'
24
+ Provides-Extra: community
25
+ Requires-Dist: langchain-community; extra == 'community'
26
+ Provides-Extra: deepseek
27
+ Requires-Dist: langchain-deepseek; extra == 'deepseek'
28
+ Provides-Extra: fireworks
29
+ Requires-Dist: langchain-fireworks; extra == 'fireworks'
30
+ Provides-Extra: google-genai
31
+ Requires-Dist: langchain-google-genai; extra == 'google-genai'
32
+ Provides-Extra: google-vertexai
33
+ Requires-Dist: langchain-google-vertexai; extra == 'google-vertexai'
34
+ Provides-Extra: groq
35
+ Requires-Dist: langchain-groq; extra == 'groq'
36
+ Provides-Extra: huggingface
37
+ Requires-Dist: langchain-huggingface; extra == 'huggingface'
38
+ Provides-Extra: mistralai
39
+ Requires-Dist: langchain-mistralai; extra == 'mistralai'
40
+ Provides-Extra: model-profiles
41
+ Requires-Dist: langchain-model-profiles; extra == 'model-profiles'
42
+ Provides-Extra: ollama
43
+ Requires-Dist: langchain-ollama; extra == 'ollama'
44
+ Provides-Extra: openai
45
+ Requires-Dist: langchain-openai; extra == 'openai'
46
+ Provides-Extra: perplexity
47
+ Requires-Dist: langchain-perplexity; extra == 'perplexity'
48
+ Provides-Extra: together
49
+ Requires-Dist: langchain-together; extra == 'together'
50
+ Provides-Extra: xai
51
+ Requires-Dist: langchain-xai; extra == 'xai'
52
+ Description-Content-Type: text/markdown
53
+
54
+ # 🦜️🔗 LangChain
55
+
56
+ [![PyPI - Version](https://img.shields.io/pypi/v/langchain?label=%20)](https://pypi.org/project/langchain/#history)
57
+ [![PyPI - License](https://img.shields.io/pypi/l/langchain)](https://opensource.org/licenses/MIT)
58
+ [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain)](https://pypistats.org/packages/langchain)
59
+ [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
60
+
61
+ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
62
+
63
+ To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
64
+ [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
65
+
66
+ ## Quick Install
67
+
68
+ ```bash
69
+ pip install langchain
70
+ ```
71
+
72
+ ## 🤔 What is this?
73
+
74
+ LangChain is the easiest way to start building agents and applications powered by LLMs. With under 10 lines of code, you can connect to OpenAI, Anthropic, Google, and [more](https://docs.langchain.com/oss/python/integrations/providers/overview). LangChain provides a pre-built agent architecture and model integrations to help you get started quickly and seamlessly incorporate LLMs into your agents and applications.
75
+
76
+ We recommend you use LangChain if you want to quickly build agents and autonomous applications. Use [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our low-level agent orchestration framework and runtime, when you have more advanced needs that require a combination of deterministic and agentic workflows, heavy customization, and carefully controlled latency.
77
+
78
+ LangChain [agents](https://docs.langchain.com/oss/python/langchain/agents) are built on top of LangGraph in order to provide durable execution, streaming, human-in-the-loop, persistence, and more. (You do not need to know LangGraph for basic LangChain agent usage.)
79
+
80
+ ## 📖 Documentation
81
+
82
+ For full documentation, see the [API reference](https://reference.langchain.com/python/langchain/langchain/).
83
+
84
+ ## 📕 Releases & Versioning
85
+
86
+ See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
87
+
88
+ ## 💁 Contributing
89
+
90
+ As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
91
+
92
+ For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
@@ -0,0 +1,34 @@
1
+ langchain/__init__.py,sha256=6ZpcmOCcqhKHHCh2pnjoWxfV5iHK24TAYhVsLU3QJZs,61
2
+ langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ langchain/agents/__init__.py,sha256=tDjbhFSC6XHQUZ_XvjHwmbICFfjxmE9xKFMbUVSIwfs,522
4
+ langchain/agents/factory.py,sha256=IZXTYMUCsqglVD9082kplSuhxpgYKoNlvjzTtTr8_aE,63985
5
+ langchain/agents/structured_output.py,sha256=nTVyron52uqRIaj2rVY5pJWGd-0mubdA15VR2htx6Mo,14083
6
+ langchain/agents/middleware/__init__.py,sha256=Vm-Ajh4YoaahAa9b_XEAuiyoupKNIwZVzX-8JN1xKkA,2251
7
+ langchain/agents/middleware/_execution.py,sha256=Xyjh3HxTHbgA-C9FFE4WXUOqKUW8mdOB455XRlA_BOU,14251
8
+ langchain/agents/middleware/_redaction.py,sha256=LJeNOvdZ0gd4273Lqgpbxh7EiuuZ6q5LlqeHK4eyin4,11210
9
+ langchain/agents/middleware/context_editing.py,sha256=suWxzSoBTgDse3n4myHooNJ8db7FJRzxZw_vLtoD_dw,8733
10
+ langchain/agents/middleware/file_search.py,sha256=RiBNJRfy8R5E8TvjQRVgXf1O0UDtXqEarirFPnihbtI,12757
11
+ langchain/agents/middleware/human_in_the_loop.py,sha256=N7Vt31rlHS7J-cA0EBDS2mlQW-SMvvxyAwjBnAY9vZU,12650
12
+ langchain/agents/middleware/model_call_limit.py,sha256=2tMKzXeRU2zH_Iu4flyk47Ycg4Al503tdGAltkCH_NE,7719
13
+ langchain/agents/middleware/model_fallback.py,sha256=5DhMhF-mpWlGyZ0FWTXjdtIw6VTpFG-OEkv_cVTtj80,4106
14
+ langchain/agents/middleware/pii.py,sha256=I3nTAnfvrHqre9SoRJvlw0koT8-x3gGZdSvb0uKH5xg,10978
15
+ langchain/agents/middleware/shell_tool.py,sha256=zKy8eTLhYoW5ogLpVBMQ1wK8lrfUCElCdG7gxvlKIeY,26635
16
+ langchain/agents/middleware/summarization.py,sha256=RgsRVuNEdf_2d-YM6P9txyi5eJW_xLteqMw1YmEDSXg,10313
17
+ langchain/agents/middleware/todo.py,sha256=ZH129wzq7nTWPg2A2SqEbGqZeaTa8w7DIbJluZ2irX0,9853
18
+ langchain/agents/middleware/tool_call_limit.py,sha256=N17JNRI4MC36TDFXv1cYQbWZOF8FtYToAjGSJZLjXWc,17682
19
+ langchain/agents/middleware/tool_emulator.py,sha256=jCgojSbb5EYPk6BmOzN0tAkEeRpzvDcAVky7duQkQG0,7209
20
+ langchain/agents/middleware/tool_retry.py,sha256=xeR_i48DnHRZGKLQuRs6gry9Kx0q_x2S0hcnnftPnBs,13795
21
+ langchain/agents/middleware/tool_selection.py,sha256=6RYdgkg6aSNx1w-YxRyL2Hct7UPnMRgGg6YVZVtW5TU,11638
22
+ langchain/agents/middleware/types.py,sha256=6kPHwSwUU47QsW_O_X5Y9XP56_qJmXG9CfjoXtciX94,55873
23
+ langchain/chat_models/__init__.py,sha256=lQwcJkHtGjrclCL7sBFocQuzRdRgZRPzIIPnGhEJmVQ,533
24
+ langchain/chat_models/base.py,sha256=qkLfLXx1Cc836quJk05CvjFIa8OaEYeDg53qUShS6jw,36527
25
+ langchain/embeddings/__init__.py,sha256=6gZ5HnxoKdlYNG84vf6kuJi6vA59Saa3WZ2vMn0LYxY,850
26
+ langchain/embeddings/base.py,sha256=LTaC-CTPnluyo6wBDygQUwZSpa9ms3A1fFMabT__U2w,8921
27
+ langchain/messages/__init__.py,sha256=IL1zvUHXvJ__3N80hhXhPj10KY0Drtq9K0CkK4uvVxU,1868
28
+ langchain/rate_limiters/__init__.py,sha256=5490xUNhet37N2nX6kbJlDgf8u1DX-C1Cs_r7etXn8A,351
29
+ langchain/tools/__init__.py,sha256=hMzbaGcfHhNYfJx20uV57uMd9a-gNLbmopG4gDReeEc,628
30
+ langchain/tools/tool_node.py,sha256=1DRMsm5tc31T76rtqtqJkGINw7ny1zqVCF-ViGUymFs,477
31
+ langchain-1.0.4.dist-info/METADATA,sha256=e60jqwezLwnstgJQTEjryaVeQYmcXiP2kUgDRrBRmlU,4890
32
+ langchain-1.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
33
+ langchain-1.0.4.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
34
+ langchain-1.0.4.dist-info/RECORD,,
File without changes
@@ -1,35 +0,0 @@
1
- """Internal document utilities."""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import TYPE_CHECKING
6
-
7
- if TYPE_CHECKING:
8
- from langchain_core.documents import Document
9
-
10
-
11
- def format_document_xml(doc: Document) -> str:
12
- """Format a document as XML-like structure for LLM consumption.
13
-
14
- Args:
15
- doc: Document to format
16
-
17
- Returns:
18
- Document wrapped in XML tags:
19
- <document>
20
- <id>...</id>
21
- <content>...</content>
22
- <metadata>...</metadata>
23
- </document>
24
-
25
- !!! note
26
- Does not generate valid XML or escape special characters. Intended for
27
- semi-structured LLM input only.
28
-
29
- """
30
- id_str = f"<id>{doc.id}</id>" if doc.id is not None else "<id></id>"
31
- metadata_str = ""
32
- if doc.metadata:
33
- metadata_items = [f"{k}: {v!s}" for k, v in doc.metadata.items()]
34
- metadata_str = f"<metadata>{', '.join(metadata_items)}</metadata>"
35
- return f"<document>{id_str}<content>{doc.page_content}</content>{metadata_str}</document>"
@@ -1,35 +0,0 @@
1
- """Lazy import utilities."""
2
-
3
- from importlib import import_module
4
-
5
-
6
- def import_attr(
7
- attr_name: str,
8
- module_name: str | None,
9
- package: str | None,
10
- ) -> object:
11
- """Import an attribute from a module located in a package.
12
-
13
- This utility function is used in custom __getattr__ methods within __init__.py
14
- files to dynamically import attributes.
15
-
16
- Args:
17
- attr_name: The name of the attribute to import.
18
- module_name: The name of the module to import from. If None, the attribute
19
- is imported from the package itself.
20
- package: The name of the package where the module is located.
21
- """
22
- if module_name == "__module__" or module_name is None:
23
- try:
24
- result = import_module(f".{attr_name}", package=package)
25
- except ModuleNotFoundError:
26
- msg = f"module '{package!r}' has no attribute {attr_name!r}"
27
- raise AttributeError(msg) from None
28
- else:
29
- try:
30
- module = import_module(f".{module_name}", package=package)
31
- except ModuleNotFoundError as err:
32
- msg = f"module '{package!r}.{module_name!r}' not found ({err})"
33
- raise ImportError(msg) from None
34
- result = getattr(module, attr_name)
35
- return result
@@ -1,158 +0,0 @@
1
- """Internal prompt resolution utilities.
2
-
3
- This module provides utilities for resolving different types of prompt specifications
4
- into standardized message formats for language models. It supports both synchronous
5
- and asynchronous prompt resolution with automatic detection of callable types.
6
-
7
- The module is designed to handle common prompt patterns across LangChain components,
8
- particularly for summarization chains and other document processing workflows.
9
-
10
- """
11
-
12
- from __future__ import annotations
13
-
14
- import inspect
15
- from typing import TYPE_CHECKING
16
-
17
- if TYPE_CHECKING:
18
- from collections.abc import Awaitable, Callable
19
-
20
- from langchain_core.messages import MessageLikeRepresentation
21
- from langgraph.runtime import Runtime
22
-
23
- from langchain._internal._typing import ContextT, StateT
24
-
25
-
26
- def resolve_prompt(
27
- prompt: str | None | Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]],
28
- state: StateT,
29
- runtime: Runtime[ContextT],
30
- default_user_content: str,
31
- default_system_content: str,
32
- ) -> list[MessageLikeRepresentation]:
33
- """Resolve a prompt specification into a list of messages.
34
-
35
- Handles prompt resolution across different strategies. Supports callable functions,
36
- string system messages, and None for default behavior.
37
-
38
- Args:
39
- prompt: The prompt specification to resolve. Can be:
40
- - Callable: Function taking (state, runtime) returning message list.
41
- - str: A system message string.
42
- - None: Use the provided default system message.
43
- state: Current state, passed to callable prompts.
44
- runtime: LangGraph runtime instance, passed to callable prompts.
45
- default_user_content: User content to include (e.g., document text).
46
- default_system_content: Default system message when prompt is None.
47
-
48
- Returns:
49
- List of message dictionaries for language models, typically containing
50
- a system message and user message with content.
51
-
52
- Raises:
53
- TypeError: If prompt type is not str, None, or callable.
54
-
55
- Example:
56
- ```python
57
- def custom_prompt(state, runtime):
58
- return [{"role": "system", "content": "Custom"}]
59
-
60
-
61
- messages = resolve_prompt(custom_prompt, state, runtime, "content", "default")
62
- messages = resolve_prompt("Custom system", state, runtime, "content", "default")
63
- messages = resolve_prompt(None, state, runtime, "content", "Default")
64
- ```
65
-
66
- !!! note
67
- Callable prompts have full control over message structure and content parameter
68
- is ignored. String/None prompts create standard system + user structure.
69
-
70
- """
71
- if callable(prompt):
72
- return prompt(state, runtime)
73
- if isinstance(prompt, str):
74
- system_msg = prompt
75
- elif prompt is None:
76
- system_msg = default_system_content
77
- else:
78
- msg = f"Invalid prompt type: {type(prompt)}. Expected str, None, or callable."
79
- raise TypeError(msg)
80
-
81
- return [
82
- {"role": "system", "content": system_msg},
83
- {"role": "user", "content": default_user_content},
84
- ]
85
-
86
-
87
- async def aresolve_prompt(
88
- prompt: str
89
- | None
90
- | Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]]
91
- | Callable[[StateT, Runtime[ContextT]], Awaitable[list[MessageLikeRepresentation]]],
92
- state: StateT,
93
- runtime: Runtime[ContextT],
94
- default_user_content: str,
95
- default_system_content: str,
96
- ) -> list[MessageLikeRepresentation]:
97
- """Async version of resolve_prompt supporting both sync and async callables.
98
-
99
- Handles prompt resolution across different strategies. Supports sync/async callable
100
- functions, string system messages, and None for default behavior.
101
-
102
- Args:
103
- prompt: The prompt specification to resolve. Can be:
104
- - Callable (sync): Function taking (state, runtime) returning message list.
105
- - Callable (async): Async function taking (state, runtime) returning
106
- awaitable message list.
107
- - str: A system message string.
108
- - None: Use the provided default system message.
109
- state: Current state, passed to callable prompts.
110
- runtime: LangGraph runtime instance, passed to callable prompts.
111
- default_user_content: User content to include (e.g., document text).
112
- default_system_content: Default system message when prompt is None.
113
-
114
- Returns:
115
- List of message dictionaries for language models, typically containing
116
- a system message and user message with content.
117
-
118
- Raises:
119
- TypeError: If prompt type is not str, None, or callable.
120
-
121
- Example:
122
- ```python
123
- async def async_prompt(state, runtime):
124
- return [{"role": "system", "content": "Async"}]
125
-
126
-
127
- def sync_prompt(state, runtime):
128
- return [{"role": "system", "content": "Sync"}]
129
-
130
-
131
- messages = await aresolve_prompt(async_prompt, state, runtime, "content", "default")
132
- messages = await aresolve_prompt(sync_prompt, state, runtime, "content", "default")
133
- messages = await aresolve_prompt("Custom", state, runtime, "content", "default")
134
- ```
135
-
136
- !!! note
137
- Callable prompts have full control over message structure and content parameter
138
- is ignored. Automatically detects and handles async callables.
139
-
140
- """
141
- if callable(prompt):
142
- result = prompt(state, runtime)
143
- # Check if the result is awaitable (async function)
144
- if inspect.isawaitable(result):
145
- return await result
146
- return result
147
- if isinstance(prompt, str):
148
- system_msg = prompt
149
- elif prompt is None:
150
- system_msg = default_system_content
151
- else:
152
- msg = f"Invalid prompt type: {type(prompt)}. Expected str, None, or callable."
153
- raise TypeError(msg)
154
-
155
- return [
156
- {"role": "system", "content": system_msg},
157
- {"role": "user", "content": default_user_content},
158
- ]
@@ -1,70 +0,0 @@
1
- """Private typing utilities for langchain."""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import TYPE_CHECKING, Any, ClassVar, Protocol, TypeAlias, TypeVar
6
-
7
- from langgraph.graph._node import StateNode
8
- from pydantic import BaseModel
9
-
10
- if TYPE_CHECKING:
11
- from dataclasses import Field
12
-
13
-
14
- class TypedDictLikeV1(Protocol):
15
- """Protocol to represent types that behave like ``TypedDict``s.
16
-
17
- Version 1: using ``ClassVar`` for keys.
18
-
19
- """
20
-
21
- __required_keys__: ClassVar[frozenset[str]]
22
- __optional_keys__: ClassVar[frozenset[str]]
23
-
24
-
25
- class TypedDictLikeV2(Protocol):
26
- """Protocol to represent types that behave like ``TypedDict``s.
27
-
28
- Version 2: not using ``ClassVar`` for keys.
29
-
30
- """
31
-
32
- __required_keys__: frozenset[str]
33
- __optional_keys__: frozenset[str]
34
-
35
-
36
- class DataclassLike(Protocol):
37
- """Protocol to represent types that behave like dataclasses.
38
-
39
- Inspired by the private ``_DataclassT`` from dataclasses that uses a similar
40
- protocol as a bound.
41
-
42
- """
43
-
44
- __dataclass_fields__: ClassVar[dict[str, Field[Any]]]
45
-
46
-
47
- StateLike: TypeAlias = TypedDictLikeV1 | TypedDictLikeV2 | DataclassLike | BaseModel
48
- """Type alias for state-like types.
49
-
50
- It can either be a ``TypedDict``, ``dataclass``, or Pydantic ``BaseModel``.
51
-
52
- !!! note
53
- We cannot use either ``TypedDict`` or ``dataclass`` directly due to limitations in
54
- type checking.
55
-
56
- """
57
-
58
- StateT = TypeVar("StateT", bound=StateLike)
59
- """Type variable used to represent the state in a graph."""
60
-
61
- ContextT = TypeVar("ContextT", bound=StateLike | None)
62
- """Type variable for context types."""
63
-
64
-
65
- __all__ = [
66
- "ContextT",
67
- "StateLike",
68
- "StateNode",
69
- "StateT",
70
- ]
@@ -1,7 +0,0 @@
1
- # Re-exporting internal utilities from LangGraph for internal use in LangChain.
2
- # A different wrapper needs to be created for this purpose in LangChain.
3
- from langgraph._internal._runnable import RunnableCallable
4
-
5
- __all__ = [
6
- "RunnableCallable",
7
- ]
@@ -1 +0,0 @@
1
- """Internal utilities for agents."""
@@ -1,13 +0,0 @@
1
- """Typing utilities for agents."""
2
-
3
- from __future__ import annotations
4
-
5
- from collections.abc import Awaitable, Callable
6
- from typing import TypeVar
7
-
8
- from typing_extensions import ParamSpec
9
-
10
- P = ParamSpec("P")
11
- R = TypeVar("R")
12
-
13
- SyncOrAsync = Callable[P, R | Awaitable[R]]
@@ -1,86 +0,0 @@
1
- """Anthropic prompt caching middleware."""
2
-
3
- from typing import Literal
4
- from warnings import warn
5
-
6
- from langgraph.runtime import Runtime
7
-
8
- from langchain.agents.middleware.types import AgentMiddleware, AgentState, ModelRequest
9
-
10
-
11
- class AnthropicPromptCachingMiddleware(AgentMiddleware):
12
- """Prompt Caching Middleware.
13
-
14
- Optimizes API usage by caching conversation prefixes for Anthropic models.
15
-
16
- Learn more about Anthropic prompt caching
17
- `here <https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching>`__.
18
- """
19
-
20
- def __init__(
21
- self,
22
- type: Literal["ephemeral"] = "ephemeral",
23
- ttl: Literal["5m", "1h"] = "5m",
24
- min_messages_to_cache: int = 0,
25
- unsupported_model_behavior: Literal["ignore", "warn", "raise"] = "warn",
26
- ) -> None:
27
- """Initialize the middleware with cache control settings.
28
-
29
- Args:
30
- type: The type of cache to use, only "ephemeral" is supported.
31
- ttl: The time to live for the cache, only "5m" and "1h" are supported.
32
- min_messages_to_cache: The minimum number of messages until the cache is used,
33
- default is 0.
34
- unsupported_model_behavior: The behavior to take when an unsupported model is used.
35
- "ignore" will ignore the unsupported model and continue without caching.
36
- "warn" will warn the user and continue without caching.
37
- "raise" will raise an error and stop the agent.
38
- """
39
- self.type = type
40
- self.ttl = ttl
41
- self.min_messages_to_cache = min_messages_to_cache
42
- self.unsupported_model_behavior = unsupported_model_behavior
43
-
44
- def modify_model_request(
45
- self,
46
- request: ModelRequest,
47
- state: AgentState, # noqa: ARG002
48
- runtime: Runtime, # noqa: ARG002
49
- ) -> ModelRequest:
50
- """Modify the model request to add cache control blocks."""
51
- try:
52
- from langchain_anthropic import ChatAnthropic
53
- except ImportError:
54
- ChatAnthropic = None # noqa: N806
55
-
56
- msg: str | None = None
57
-
58
- if ChatAnthropic is None:
59
- msg = (
60
- "AnthropicPromptCachingMiddleware caching middleware only supports "
61
- "Anthropic models. "
62
- "Please install langchain-anthropic."
63
- )
64
- elif not isinstance(request.model, ChatAnthropic):
65
- msg = (
66
- "AnthropicPromptCachingMiddleware caching middleware only supports "
67
- f"Anthropic models, not instances of {type(request.model)}"
68
- )
69
-
70
- if msg is not None:
71
- if self.unsupported_model_behavior == "raise":
72
- raise ValueError(msg)
73
- if self.unsupported_model_behavior == "warn":
74
- warn(msg, stacklevel=3)
75
- else:
76
- return request
77
-
78
- messages_count = (
79
- len(request.messages) + 1 if request.system_prompt else len(request.messages)
80
- )
81
- if messages_count < self.min_messages_to_cache:
82
- return request
83
-
84
- request.model_settings["cache_control"] = {"type": self.type, "ttl": self.ttl}
85
-
86
- return request
@@ -1,7 +0,0 @@
1
- """Document."""
2
-
3
- from langchain_core.documents import Document
4
-
5
- __all__ = [
6
- "Document",
7
- ]