langchain-core 1.0.7__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/callbacks/manager.py +14 -14
- langchain_core/callbacks/usage.py +1 -1
- langchain_core/indexing/api.py +2 -0
- langchain_core/language_models/__init__.py +15 -5
- langchain_core/language_models/_utils.py +1 -0
- langchain_core/language_models/chat_models.py +74 -94
- langchain_core/language_models/llms.py +5 -3
- langchain_core/language_models/model_profile.py +84 -0
- langchain_core/load/load.py +14 -1
- langchain_core/messages/ai.py +12 -4
- langchain_core/messages/base.py +6 -6
- langchain_core/messages/block_translators/anthropic.py +27 -8
- langchain_core/messages/block_translators/bedrock_converse.py +18 -8
- langchain_core/messages/block_translators/google_genai.py +25 -10
- langchain_core/messages/content.py +1 -1
- langchain_core/messages/tool.py +28 -27
- langchain_core/messages/utils.py +45 -18
- langchain_core/output_parsers/openai_tools.py +9 -7
- langchain_core/output_parsers/pydantic.py +1 -1
- langchain_core/output_parsers/string.py +27 -1
- langchain_core/prompts/chat.py +22 -17
- langchain_core/prompts/string.py +2 -59
- langchain_core/prompts/structured.py +7 -1
- langchain_core/runnables/base.py +174 -160
- langchain_core/runnables/branch.py +1 -1
- langchain_core/runnables/config.py +25 -20
- langchain_core/runnables/fallbacks.py +1 -2
- langchain_core/runnables/passthrough.py +2 -2
- langchain_core/tools/base.py +23 -4
- langchain_core/tools/convert.py +16 -0
- langchain_core/tools/retriever.py +29 -58
- langchain_core/tracers/event_stream.py +9 -4
- langchain_core/utils/aiter.py +3 -1
- langchain_core/utils/function_calling.py +7 -2
- langchain_core/utils/json_schema.py +29 -21
- langchain_core/utils/pydantic.py +7 -7
- langchain_core/utils/uuid.py +54 -0
- langchain_core/vectorstores/base.py +26 -18
- langchain_core/version.py +1 -1
- {langchain_core-1.0.7.dist-info → langchain_core-1.2.1.dist-info}/METADATA +2 -1
- {langchain_core-1.0.7.dist-info → langchain_core-1.2.1.dist-info}/RECORD +42 -40
- {langchain_core-1.0.7.dist-info → langchain_core-1.2.1.dist-info}/WHEEL +1 -1
|
@@ -6,7 +6,6 @@ import asyncio
|
|
|
6
6
|
import atexit
|
|
7
7
|
import functools
|
|
8
8
|
import logging
|
|
9
|
-
import uuid
|
|
10
9
|
from abc import ABC, abstractmethod
|
|
11
10
|
from collections.abc import Callable
|
|
12
11
|
from concurrent.futures import ThreadPoolExecutor
|
|
@@ -41,6 +40,7 @@ from langchain_core.tracers.context import (
|
|
|
41
40
|
from langchain_core.tracers.langchain import LangChainTracer
|
|
42
41
|
from langchain_core.tracers.stdout import ConsoleCallbackHandler
|
|
43
42
|
from langchain_core.utils.env import env_var_is_set
|
|
43
|
+
from langchain_core.utils.uuid import uuid7
|
|
44
44
|
|
|
45
45
|
if TYPE_CHECKING:
|
|
46
46
|
from collections.abc import AsyncGenerator, Coroutine, Generator, Sequence
|
|
@@ -504,7 +504,7 @@ class BaseRunManager(RunManagerMixin):
|
|
|
504
504
|
|
|
505
505
|
"""
|
|
506
506
|
return cls(
|
|
507
|
-
run_id=
|
|
507
|
+
run_id=uuid7(),
|
|
508
508
|
handlers=[],
|
|
509
509
|
inheritable_handlers=[],
|
|
510
510
|
tags=[],
|
|
@@ -1330,7 +1330,7 @@ class CallbackManager(BaseCallbackManager):
|
|
|
1330
1330
|
managers = []
|
|
1331
1331
|
for i, prompt in enumerate(prompts):
|
|
1332
1332
|
# Can't have duplicate runs with the same run ID (if provided)
|
|
1333
|
-
run_id_ = run_id if i == 0 and run_id is not None else
|
|
1333
|
+
run_id_ = run_id if i == 0 and run_id is not None else uuid7()
|
|
1334
1334
|
handle_event(
|
|
1335
1335
|
self.handlers,
|
|
1336
1336
|
"on_llm_start",
|
|
@@ -1384,7 +1384,7 @@ class CallbackManager(BaseCallbackManager):
|
|
|
1384
1384
|
run_id_ = run_id
|
|
1385
1385
|
run_id = None
|
|
1386
1386
|
else:
|
|
1387
|
-
run_id_ =
|
|
1387
|
+
run_id_ = uuid7()
|
|
1388
1388
|
handle_event(
|
|
1389
1389
|
self.handlers,
|
|
1390
1390
|
"on_chat_model_start",
|
|
@@ -1433,7 +1433,7 @@ class CallbackManager(BaseCallbackManager):
|
|
|
1433
1433
|
|
|
1434
1434
|
"""
|
|
1435
1435
|
if run_id is None:
|
|
1436
|
-
run_id =
|
|
1436
|
+
run_id = uuid7()
|
|
1437
1437
|
handle_event(
|
|
1438
1438
|
self.handlers,
|
|
1439
1439
|
"on_chain_start",
|
|
@@ -1488,7 +1488,7 @@ class CallbackManager(BaseCallbackManager):
|
|
|
1488
1488
|
|
|
1489
1489
|
"""
|
|
1490
1490
|
if run_id is None:
|
|
1491
|
-
run_id =
|
|
1491
|
+
run_id = uuid7()
|
|
1492
1492
|
|
|
1493
1493
|
handle_event(
|
|
1494
1494
|
self.handlers,
|
|
@@ -1537,7 +1537,7 @@ class CallbackManager(BaseCallbackManager):
|
|
|
1537
1537
|
The callback manager for the retriever run.
|
|
1538
1538
|
"""
|
|
1539
1539
|
if run_id is None:
|
|
1540
|
-
run_id =
|
|
1540
|
+
run_id = uuid7()
|
|
1541
1541
|
|
|
1542
1542
|
handle_event(
|
|
1543
1543
|
self.handlers,
|
|
@@ -1594,7 +1594,7 @@ class CallbackManager(BaseCallbackManager):
|
|
|
1594
1594
|
)
|
|
1595
1595
|
raise ValueError(msg)
|
|
1596
1596
|
if run_id is None:
|
|
1597
|
-
run_id =
|
|
1597
|
+
run_id = uuid7()
|
|
1598
1598
|
|
|
1599
1599
|
handle_event(
|
|
1600
1600
|
self.handlers,
|
|
@@ -1816,7 +1816,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
|
|
1816
1816
|
run_id_ = run_id
|
|
1817
1817
|
run_id = None
|
|
1818
1818
|
else:
|
|
1819
|
-
run_id_ =
|
|
1819
|
+
run_id_ = uuid7()
|
|
1820
1820
|
|
|
1821
1821
|
if inline_handlers:
|
|
1822
1822
|
inline_tasks.append(
|
|
@@ -1900,7 +1900,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
|
|
1900
1900
|
run_id_ = run_id
|
|
1901
1901
|
run_id = None
|
|
1902
1902
|
else:
|
|
1903
|
-
run_id_ =
|
|
1903
|
+
run_id_ = uuid7()
|
|
1904
1904
|
|
|
1905
1905
|
for handler in self.handlers:
|
|
1906
1906
|
task = ahandle_event(
|
|
@@ -1962,7 +1962,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
|
|
1962
1962
|
The async callback manager for the chain run.
|
|
1963
1963
|
"""
|
|
1964
1964
|
if run_id is None:
|
|
1965
|
-
run_id =
|
|
1965
|
+
run_id = uuid7()
|
|
1966
1966
|
|
|
1967
1967
|
await ahandle_event(
|
|
1968
1968
|
self.handlers,
|
|
@@ -2010,7 +2010,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
|
|
2010
2010
|
The async callback manager for the tool run.
|
|
2011
2011
|
"""
|
|
2012
2012
|
if run_id is None:
|
|
2013
|
-
run_id =
|
|
2013
|
+
run_id = uuid7()
|
|
2014
2014
|
|
|
2015
2015
|
await ahandle_event(
|
|
2016
2016
|
self.handlers,
|
|
@@ -2060,7 +2060,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
|
|
2060
2060
|
if not self.handlers:
|
|
2061
2061
|
return
|
|
2062
2062
|
if run_id is None:
|
|
2063
|
-
run_id =
|
|
2063
|
+
run_id = uuid7()
|
|
2064
2064
|
|
|
2065
2065
|
if kwargs:
|
|
2066
2066
|
msg = (
|
|
@@ -2102,7 +2102,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
|
|
2102
2102
|
The async callback manager for the retriever run.
|
|
2103
2103
|
"""
|
|
2104
2104
|
if run_id is None:
|
|
2105
|
-
run_id =
|
|
2105
|
+
run_id = uuid7()
|
|
2106
2106
|
|
|
2107
2107
|
await ahandle_event(
|
|
2108
2108
|
self.handlers,
|
|
@@ -95,7 +95,7 @@ def get_usage_metadata_callback(
|
|
|
95
95
|
"""Get usage metadata callback.
|
|
96
96
|
|
|
97
97
|
Get context manager for tracking usage metadata across chat model calls using
|
|
98
|
-
`AIMessage.usage_metadata
|
|
98
|
+
[`AIMessage.usage_metadata`][langchain.messages.AIMessage.usage_metadata].
|
|
99
99
|
|
|
100
100
|
Args:
|
|
101
101
|
name: The name of the context variable.
|
langchain_core/indexing/api.py
CHANGED
|
@@ -302,6 +302,7 @@ def index(
|
|
|
302
302
|
are not able to specify the uid of the document.
|
|
303
303
|
|
|
304
304
|
!!! warning "Behavior changed in `langchain-core` 0.3.25"
|
|
305
|
+
|
|
305
306
|
Added `scoped_full` cleanup mode.
|
|
306
307
|
|
|
307
308
|
!!! warning
|
|
@@ -640,6 +641,7 @@ async def aindex(
|
|
|
640
641
|
are not able to specify the uid of the document.
|
|
641
642
|
|
|
642
643
|
!!! warning "Behavior changed in `langchain-core` 0.3.25"
|
|
644
|
+
|
|
643
645
|
Added `scoped_full` cleanup mode.
|
|
644
646
|
|
|
645
647
|
!!! warning
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""Core language model abstractions.
|
|
2
2
|
|
|
3
3
|
LangChain has two main classes to work with language models: chat models and
|
|
4
|
-
"old-fashioned" LLMs.
|
|
4
|
+
"old-fashioned" LLMs (string-in, string-out).
|
|
5
5
|
|
|
6
6
|
**Chat models**
|
|
7
7
|
|
|
@@ -11,14 +11,16 @@ as outputs (as opposed to using plain text).
|
|
|
11
11
|
Chat models support the assignment of distinct roles to conversation messages, helping
|
|
12
12
|
to distinguish messages from the AI, users, and instructions such as system messages.
|
|
13
13
|
|
|
14
|
-
The key abstraction for chat models is
|
|
15
|
-
|
|
14
|
+
The key abstraction for chat models is
|
|
15
|
+
[`BaseChatModel`][langchain_core.language_models.BaseChatModel]. Implementations should
|
|
16
|
+
inherit from this class.
|
|
16
17
|
|
|
17
18
|
See existing [chat model integrations](https://docs.langchain.com/oss/python/integrations/chat).
|
|
18
19
|
|
|
19
|
-
**LLMs**
|
|
20
|
+
**LLMs (legacy)**
|
|
20
21
|
|
|
21
22
|
Language models that takes a string as input and returns a string.
|
|
23
|
+
|
|
22
24
|
These are traditionally older models (newer models generally are chat models).
|
|
23
25
|
|
|
24
26
|
Although the underlying models are string in, string out, the LangChain wrappers also
|
|
@@ -53,6 +55,10 @@ if TYPE_CHECKING:
|
|
|
53
55
|
ParrotFakeChatModel,
|
|
54
56
|
)
|
|
55
57
|
from langchain_core.language_models.llms import LLM, BaseLLM
|
|
58
|
+
from langchain_core.language_models.model_profile import (
|
|
59
|
+
ModelProfile,
|
|
60
|
+
ModelProfileRegistry,
|
|
61
|
+
)
|
|
56
62
|
|
|
57
63
|
__all__ = (
|
|
58
64
|
"LLM",
|
|
@@ -68,6 +74,8 @@ __all__ = (
|
|
|
68
74
|
"LanguageModelInput",
|
|
69
75
|
"LanguageModelLike",
|
|
70
76
|
"LanguageModelOutput",
|
|
77
|
+
"ModelProfile",
|
|
78
|
+
"ModelProfileRegistry",
|
|
71
79
|
"ParrotFakeChatModel",
|
|
72
80
|
"SimpleChatModel",
|
|
73
81
|
"get_tokenizer",
|
|
@@ -90,6 +98,8 @@ _dynamic_imports = {
|
|
|
90
98
|
"GenericFakeChatModel": "fake_chat_models",
|
|
91
99
|
"ParrotFakeChatModel": "fake_chat_models",
|
|
92
100
|
"LLM": "llms",
|
|
101
|
+
"ModelProfile": "model_profile",
|
|
102
|
+
"ModelProfileRegistry": "model_profile",
|
|
93
103
|
"BaseLLM": "llms",
|
|
94
104
|
"is_openai_data_block": "_utils",
|
|
95
105
|
}
|
|
@@ -140,6 +140,7 @@ def _normalize_messages(
|
|
|
140
140
|
- LangChain v0 standard content blocks for backward compatibility
|
|
141
141
|
|
|
142
142
|
!!! warning "Behavior changed in `langchain-core` 1.0.0"
|
|
143
|
+
|
|
143
144
|
In previous versions, this function returned messages in LangChain v0 format.
|
|
144
145
|
Now, it returns messages in LangChain v1 format, which upgraded chat models now
|
|
145
146
|
expect to receive when passing back in message history. For backward
|
|
@@ -15,7 +15,6 @@ from typing import TYPE_CHECKING, Any, Literal, cast
|
|
|
15
15
|
from pydantic import BaseModel, ConfigDict, Field
|
|
16
16
|
from typing_extensions import override
|
|
17
17
|
|
|
18
|
-
from langchain_core._api.beta_decorator import beta
|
|
19
18
|
from langchain_core.caches import BaseCache
|
|
20
19
|
from langchain_core.callbacks import (
|
|
21
20
|
AsyncCallbackManager,
|
|
@@ -34,6 +33,7 @@ from langchain_core.language_models.base import (
|
|
|
34
33
|
LangSmithParams,
|
|
35
34
|
LanguageModelInput,
|
|
36
35
|
)
|
|
36
|
+
from langchain_core.language_models.model_profile import ModelProfile
|
|
37
37
|
from langchain_core.load import dumpd, dumps
|
|
38
38
|
from langchain_core.messages import (
|
|
39
39
|
AIMessage,
|
|
@@ -76,8 +76,6 @@ from langchain_core.utils.utils import LC_ID_PREFIX, from_env
|
|
|
76
76
|
if TYPE_CHECKING:
|
|
77
77
|
import uuid
|
|
78
78
|
|
|
79
|
-
from langchain_model_profiles import ModelProfile # type: ignore[import-untyped]
|
|
80
|
-
|
|
81
79
|
from langchain_core.output_parsers.base import OutputParserLike
|
|
82
80
|
from langchain_core.runnables import Runnable, RunnableConfig
|
|
83
81
|
from langchain_core.tools import BaseTool
|
|
@@ -335,8 +333,23 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
335
333
|
[`langchain-openai`](https://pypi.org/project/langchain-openai)) can also use this
|
|
336
334
|
field to roll out new content formats in a backward-compatible way.
|
|
337
335
|
|
|
338
|
-
!!! version-added "Added in `langchain-core` 1.0"
|
|
336
|
+
!!! version-added "Added in `langchain-core` 1.0.0"
|
|
337
|
+
|
|
338
|
+
"""
|
|
339
|
+
|
|
340
|
+
profile: ModelProfile | None = Field(default=None, exclude=True)
|
|
341
|
+
"""Profile detailing model capabilities.
|
|
342
|
+
|
|
343
|
+
!!! warning "Beta feature"
|
|
344
|
+
This is a beta feature. The format of model profiles is subject to change.
|
|
345
|
+
|
|
346
|
+
If not specified, automatically loaded from the provider package on initialization
|
|
347
|
+
if data is available.
|
|
348
|
+
|
|
349
|
+
Example profile data includes context window sizes, supported modalities, or support
|
|
350
|
+
for tool calling, structured output, and other features.
|
|
339
351
|
|
|
352
|
+
!!! version-added "Added in `langchain-core` 1.1.0"
|
|
340
353
|
"""
|
|
341
354
|
|
|
342
355
|
model_config = ConfigDict(
|
|
@@ -1565,88 +1578,89 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1565
1578
|
depends on the `schema` as described above.
|
|
1566
1579
|
- `'parsing_error'`: `BaseException | None`
|
|
1567
1580
|
|
|
1568
|
-
|
|
1581
|
+
???+ example "Pydantic schema (`include_raw=False`)"
|
|
1569
1582
|
|
|
1570
|
-
|
|
1571
|
-
|
|
1583
|
+
```python
|
|
1584
|
+
from pydantic import BaseModel
|
|
1572
1585
|
|
|
1573
1586
|
|
|
1574
|
-
|
|
1575
|
-
|
|
1587
|
+
class AnswerWithJustification(BaseModel):
|
|
1588
|
+
'''An answer to the user question along with justification for the answer.'''
|
|
1576
1589
|
|
|
1577
|
-
|
|
1578
|
-
|
|
1590
|
+
answer: str
|
|
1591
|
+
justification: str
|
|
1579
1592
|
|
|
1580
1593
|
|
|
1581
|
-
|
|
1582
|
-
|
|
1594
|
+
model = ChatModel(model="model-name", temperature=0)
|
|
1595
|
+
structured_model = model.with_structured_output(AnswerWithJustification)
|
|
1583
1596
|
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1597
|
+
structured_model.invoke(
|
|
1598
|
+
"What weighs more a pound of bricks or a pound of feathers"
|
|
1599
|
+
)
|
|
1587
1600
|
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1601
|
+
# -> AnswerWithJustification(
|
|
1602
|
+
# answer='They weigh the same',
|
|
1603
|
+
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
|
1604
|
+
# )
|
|
1605
|
+
```
|
|
1593
1606
|
|
|
1594
|
-
|
|
1607
|
+
??? example "Pydantic schema (`include_raw=True`)"
|
|
1595
1608
|
|
|
1596
|
-
|
|
1597
|
-
|
|
1609
|
+
```python
|
|
1610
|
+
from pydantic import BaseModel
|
|
1598
1611
|
|
|
1599
1612
|
|
|
1600
|
-
|
|
1601
|
-
|
|
1613
|
+
class AnswerWithJustification(BaseModel):
|
|
1614
|
+
'''An answer to the user question along with justification for the answer.'''
|
|
1602
1615
|
|
|
1603
|
-
|
|
1604
|
-
|
|
1616
|
+
answer: str
|
|
1617
|
+
justification: str
|
|
1605
1618
|
|
|
1606
1619
|
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1620
|
+
model = ChatModel(model="model-name", temperature=0)
|
|
1621
|
+
structured_model = model.with_structured_output(
|
|
1622
|
+
AnswerWithJustification, include_raw=True
|
|
1623
|
+
)
|
|
1611
1624
|
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1625
|
+
structured_model.invoke(
|
|
1626
|
+
"What weighs more a pound of bricks or a pound of feathers"
|
|
1627
|
+
)
|
|
1628
|
+
# -> {
|
|
1629
|
+
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
|
1630
|
+
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
|
1631
|
+
# 'parsing_error': None
|
|
1632
|
+
# }
|
|
1633
|
+
```
|
|
1621
1634
|
|
|
1622
|
-
|
|
1635
|
+
??? example "Dictionary schema (`include_raw=False`)"
|
|
1623
1636
|
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1637
|
+
```python
|
|
1638
|
+
from pydantic import BaseModel
|
|
1639
|
+
from langchain_core.utils.function_calling import convert_to_openai_tool
|
|
1627
1640
|
|
|
1628
1641
|
|
|
1629
|
-
|
|
1630
|
-
|
|
1642
|
+
class AnswerWithJustification(BaseModel):
|
|
1643
|
+
'''An answer to the user question along with justification for the answer.'''
|
|
1631
1644
|
|
|
1632
|
-
|
|
1633
|
-
|
|
1645
|
+
answer: str
|
|
1646
|
+
justification: str
|
|
1634
1647
|
|
|
1635
1648
|
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1649
|
+
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
|
1650
|
+
model = ChatModel(model="model-name", temperature=0)
|
|
1651
|
+
structured_model = model.with_structured_output(dict_schema)
|
|
1639
1652
|
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1653
|
+
structured_model.invoke(
|
|
1654
|
+
"What weighs more a pound of bricks or a pound of feathers"
|
|
1655
|
+
)
|
|
1656
|
+
# -> {
|
|
1657
|
+
# 'answer': 'They weigh the same',
|
|
1658
|
+
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
|
1659
|
+
# }
|
|
1660
|
+
```
|
|
1648
1661
|
|
|
1649
1662
|
!!! warning "Behavior changed in `langchain-core` 0.2.26"
|
|
1663
|
+
|
|
1650
1664
|
Added support for `TypedDict` class.
|
|
1651
1665
|
|
|
1652
1666
|
""" # noqa: E501
|
|
@@ -1688,40 +1702,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1688
1702
|
return RunnableMap(raw=llm) | parser_with_fallback
|
|
1689
1703
|
return llm | output_parser
|
|
1690
1704
|
|
|
1691
|
-
@property
|
|
1692
|
-
@beta()
|
|
1693
|
-
def profile(self) -> ModelProfile:
|
|
1694
|
-
"""Return profiling information for the model.
|
|
1695
|
-
|
|
1696
|
-
This property relies on the `langchain-model-profiles` package to retrieve chat
|
|
1697
|
-
model capabilities, such as context window sizes and supported features.
|
|
1698
|
-
|
|
1699
|
-
Raises:
|
|
1700
|
-
ImportError: If `langchain-model-profiles` is not installed.
|
|
1701
|
-
|
|
1702
|
-
Returns:
|
|
1703
|
-
A `ModelProfile` object containing profiling information for the model.
|
|
1704
|
-
"""
|
|
1705
|
-
try:
|
|
1706
|
-
from langchain_model_profiles import get_model_profile # noqa: PLC0415
|
|
1707
|
-
except ImportError as err:
|
|
1708
|
-
informative_error_message = (
|
|
1709
|
-
"To access model profiling information, please install the "
|
|
1710
|
-
"`langchain-model-profiles` package: "
|
|
1711
|
-
"`pip install langchain-model-profiles`."
|
|
1712
|
-
)
|
|
1713
|
-
raise ImportError(informative_error_message) from err
|
|
1714
|
-
|
|
1715
|
-
provider_id = self._llm_type
|
|
1716
|
-
model_name = (
|
|
1717
|
-
# Model name is not standardized across integrations. New integrations
|
|
1718
|
-
# should prefer `model`.
|
|
1719
|
-
getattr(self, "model", None)
|
|
1720
|
-
or getattr(self, "model_name", None)
|
|
1721
|
-
or getattr(self, "model_id", "")
|
|
1722
|
-
)
|
|
1723
|
-
return get_model_profile(provider_id, model_name) or {}
|
|
1724
|
-
|
|
1725
1705
|
|
|
1726
1706
|
class SimpleChatModel(BaseChatModel):
|
|
1727
1707
|
"""Simplified implementation for a chat model to inherit from.
|
|
@@ -61,6 +61,8 @@ if TYPE_CHECKING:
|
|
|
61
61
|
|
|
62
62
|
logger = logging.getLogger(__name__)
|
|
63
63
|
|
|
64
|
+
_background_tasks: set[asyncio.Task] = set()
|
|
65
|
+
|
|
64
66
|
|
|
65
67
|
@functools.lru_cache
|
|
66
68
|
def _log_error_once(msg: str) -> None:
|
|
@@ -100,9 +102,9 @@ def create_base_retry_decorator(
|
|
|
100
102
|
asyncio.run(coro)
|
|
101
103
|
else:
|
|
102
104
|
if loop.is_running():
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
105
|
+
task = loop.create_task(coro)
|
|
106
|
+
_background_tasks.add(task)
|
|
107
|
+
task.add_done_callback(_background_tasks.discard)
|
|
106
108
|
else:
|
|
107
109
|
asyncio.run(coro)
|
|
108
110
|
except Exception as e:
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Model profile types and utilities."""
|
|
2
|
+
|
|
3
|
+
from typing_extensions import TypedDict
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ModelProfile(TypedDict, total=False):
|
|
7
|
+
"""Model profile.
|
|
8
|
+
|
|
9
|
+
!!! warning "Beta feature"
|
|
10
|
+
This is a beta feature. The format of model profiles is subject to change.
|
|
11
|
+
|
|
12
|
+
Provides information about chat model capabilities, such as context window sizes
|
|
13
|
+
and supported features.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
# --- Input constraints ---
|
|
17
|
+
|
|
18
|
+
max_input_tokens: int
|
|
19
|
+
"""Maximum context window (tokens)"""
|
|
20
|
+
|
|
21
|
+
image_inputs: bool
|
|
22
|
+
"""Whether image inputs are supported."""
|
|
23
|
+
# TODO: add more detail about formats?
|
|
24
|
+
|
|
25
|
+
image_url_inputs: bool
|
|
26
|
+
"""Whether [image URL inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
|
|
27
|
+
are supported."""
|
|
28
|
+
|
|
29
|
+
pdf_inputs: bool
|
|
30
|
+
"""Whether [PDF inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
|
|
31
|
+
are supported."""
|
|
32
|
+
# TODO: add more detail about formats? e.g. bytes or base64
|
|
33
|
+
|
|
34
|
+
audio_inputs: bool
|
|
35
|
+
"""Whether [audio inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
|
|
36
|
+
are supported."""
|
|
37
|
+
# TODO: add more detail about formats? e.g. bytes or base64
|
|
38
|
+
|
|
39
|
+
video_inputs: bool
|
|
40
|
+
"""Whether [video inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
|
|
41
|
+
are supported."""
|
|
42
|
+
# TODO: add more detail about formats? e.g. bytes or base64
|
|
43
|
+
|
|
44
|
+
image_tool_message: bool
|
|
45
|
+
"""Whether images can be included in tool messages."""
|
|
46
|
+
|
|
47
|
+
pdf_tool_message: bool
|
|
48
|
+
"""Whether PDFs can be included in tool messages."""
|
|
49
|
+
|
|
50
|
+
# --- Output constraints ---
|
|
51
|
+
|
|
52
|
+
max_output_tokens: int
|
|
53
|
+
"""Maximum output tokens"""
|
|
54
|
+
|
|
55
|
+
reasoning_output: bool
|
|
56
|
+
"""Whether the model supports [reasoning / chain-of-thought](https://docs.langchain.com/oss/python/langchain/models#reasoning)"""
|
|
57
|
+
|
|
58
|
+
image_outputs: bool
|
|
59
|
+
"""Whether [image outputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
|
|
60
|
+
are supported."""
|
|
61
|
+
|
|
62
|
+
audio_outputs: bool
|
|
63
|
+
"""Whether [audio outputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
|
|
64
|
+
are supported."""
|
|
65
|
+
|
|
66
|
+
video_outputs: bool
|
|
67
|
+
"""Whether [video outputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
|
|
68
|
+
are supported."""
|
|
69
|
+
|
|
70
|
+
# --- Tool calling ---
|
|
71
|
+
tool_calling: bool
|
|
72
|
+
"""Whether the model supports [tool calling](https://docs.langchain.com/oss/python/langchain/models#tool-calling)"""
|
|
73
|
+
|
|
74
|
+
tool_choice: bool
|
|
75
|
+
"""Whether the model supports [tool choice](https://docs.langchain.com/oss/python/langchain/models#forcing-tool-calls)"""
|
|
76
|
+
|
|
77
|
+
# --- Structured output ---
|
|
78
|
+
structured_output: bool
|
|
79
|
+
"""Whether the model supports a native [structured output](https://docs.langchain.com/oss/python/langchain/models#structured-outputs)
|
|
80
|
+
feature"""
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
ModelProfileRegistry = dict[str, ModelProfile]
|
|
84
|
+
"""Registry mapping model identifiers or names to their ModelProfile."""
|
langchain_core/load/load.py
CHANGED
|
@@ -1,4 +1,9 @@
|
|
|
1
|
-
"""Load LangChain objects from JSON strings or objects.
|
|
1
|
+
"""Load LangChain objects from JSON strings or objects.
|
|
2
|
+
|
|
3
|
+
!!! warning
|
|
4
|
+
`load` and `loads` are vulnerable to remote code execution. Never use with untrusted
|
|
5
|
+
input.
|
|
6
|
+
"""
|
|
2
7
|
|
|
3
8
|
import importlib
|
|
4
9
|
import json
|
|
@@ -193,6 +198,10 @@ def loads(
|
|
|
193
198
|
) -> Any:
|
|
194
199
|
"""Revive a LangChain class from a JSON string.
|
|
195
200
|
|
|
201
|
+
!!! warning
|
|
202
|
+
This function is vulnerable to remote code execution. Never use with untrusted
|
|
203
|
+
input.
|
|
204
|
+
|
|
196
205
|
Equivalent to `load(json.loads(text))`.
|
|
197
206
|
|
|
198
207
|
Args:
|
|
@@ -236,6 +245,10 @@ def load(
|
|
|
236
245
|
) -> Any:
|
|
237
246
|
"""Revive a LangChain class from a JSON object.
|
|
238
247
|
|
|
248
|
+
!!! warning
|
|
249
|
+
This function is vulnerable to remote code execution. Never use with untrusted
|
|
250
|
+
input.
|
|
251
|
+
|
|
239
252
|
Use this if you already have a parsed JSON object,
|
|
240
253
|
eg. from `json.load` or `orjson.loads`.
|
|
241
254
|
|