langchain-core 1.0.0a6__py3-none-any.whl → 1.0.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/__init__.py +3 -3
- langchain_core/_api/beta_decorator.py +6 -6
- langchain_core/_api/deprecation.py +21 -29
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +2 -3
- langchain_core/agents.py +10 -11
- langchain_core/caches.py +7 -7
- langchain_core/callbacks/base.py +91 -91
- langchain_core/callbacks/file.py +11 -11
- langchain_core/callbacks/manager.py +86 -89
- langchain_core/callbacks/stdout.py +8 -8
- langchain_core/callbacks/usage.py +4 -4
- langchain_core/chat_history.py +5 -5
- langchain_core/document_loaders/base.py +2 -2
- langchain_core/document_loaders/langsmith.py +15 -15
- langchain_core/documents/base.py +16 -16
- langchain_core/documents/compressor.py +4 -4
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +17 -19
- langchain_core/exceptions.py +3 -3
- langchain_core/globals.py +3 -151
- langchain_core/indexing/api.py +44 -43
- langchain_core/indexing/base.py +30 -30
- langchain_core/indexing/in_memory.py +3 -3
- langchain_core/language_models/_utils.py +5 -7
- langchain_core/language_models/base.py +18 -132
- langchain_core/language_models/chat_models.py +118 -227
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +35 -29
- langchain_core/language_models/llms.py +91 -201
- langchain_core/load/dump.py +1 -1
- langchain_core/load/load.py +11 -12
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +2 -4
- langchain_core/messages/ai.py +17 -20
- langchain_core/messages/base.py +23 -25
- langchain_core/messages/block_translators/__init__.py +2 -5
- langchain_core/messages/block_translators/anthropic.py +3 -3
- langchain_core/messages/block_translators/bedrock_converse.py +2 -2
- langchain_core/messages/block_translators/langchain_v0.py +2 -2
- langchain_core/messages/block_translators/openai.py +6 -6
- langchain_core/messages/content.py +120 -124
- langchain_core/messages/human.py +7 -7
- langchain_core/messages/system.py +7 -7
- langchain_core/messages/tool.py +24 -24
- langchain_core/messages/utils.py +67 -79
- langchain_core/output_parsers/base.py +12 -14
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +3 -5
- langchain_core/output_parsers/openai_functions.py +3 -3
- langchain_core/output_parsers/openai_tools.py +3 -3
- langchain_core/output_parsers/pydantic.py +2 -2
- langchain_core/output_parsers/transform.py +13 -15
- langchain_core/output_parsers/xml.py +7 -9
- langchain_core/outputs/chat_generation.py +4 -4
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +2 -2
- langchain_core/outputs/llm_result.py +5 -5
- langchain_core/prompts/__init__.py +1 -5
- langchain_core/prompts/base.py +10 -15
- langchain_core/prompts/chat.py +31 -82
- langchain_core/prompts/dict.py +2 -2
- langchain_core/prompts/few_shot.py +5 -5
- langchain_core/prompts/few_shot_with_templates.py +4 -4
- langchain_core/prompts/loading.py +3 -5
- langchain_core/prompts/prompt.py +4 -16
- langchain_core/prompts/string.py +2 -1
- langchain_core/prompts/structured.py +16 -23
- langchain_core/rate_limiters.py +3 -4
- langchain_core/retrievers.py +14 -14
- langchain_core/runnables/base.py +928 -1042
- langchain_core/runnables/branch.py +36 -40
- langchain_core/runnables/config.py +27 -35
- langchain_core/runnables/configurable.py +108 -124
- langchain_core/runnables/fallbacks.py +76 -72
- langchain_core/runnables/graph.py +39 -45
- langchain_core/runnables/graph_ascii.py +9 -11
- langchain_core/runnables/graph_mermaid.py +18 -19
- langchain_core/runnables/graph_png.py +8 -9
- langchain_core/runnables/history.py +114 -127
- langchain_core/runnables/passthrough.py +113 -139
- langchain_core/runnables/retry.py +43 -48
- langchain_core/runnables/router.py +23 -28
- langchain_core/runnables/schema.py +42 -44
- langchain_core/runnables/utils.py +28 -31
- langchain_core/stores.py +9 -13
- langchain_core/structured_query.py +8 -8
- langchain_core/tools/base.py +62 -115
- langchain_core/tools/convert.py +31 -35
- langchain_core/tools/render.py +1 -1
- langchain_core/tools/retriever.py +4 -4
- langchain_core/tools/simple.py +13 -17
- langchain_core/tools/structured.py +12 -15
- langchain_core/tracers/base.py +62 -64
- langchain_core/tracers/context.py +17 -35
- langchain_core/tracers/core.py +49 -53
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +58 -60
- langchain_core/tracers/langchain.py +13 -13
- langchain_core/tracers/log_stream.py +22 -24
- langchain_core/tracers/root_listeners.py +14 -14
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +8 -8
- langchain_core/tracers/stdout.py +2 -1
- langchain_core/utils/__init__.py +0 -3
- langchain_core/utils/_merge.py +2 -2
- langchain_core/utils/aiter.py +24 -28
- langchain_core/utils/env.py +4 -4
- langchain_core/utils/function_calling.py +31 -41
- langchain_core/utils/html.py +3 -4
- langchain_core/utils/input.py +3 -3
- langchain_core/utils/iter.py +15 -19
- langchain_core/utils/json.py +3 -2
- langchain_core/utils/json_schema.py +6 -6
- langchain_core/utils/mustache.py +3 -5
- langchain_core/utils/pydantic.py +16 -18
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +29 -29
- langchain_core/vectorstores/base.py +18 -21
- langchain_core/vectorstores/in_memory.py +14 -87
- langchain_core/vectorstores/utils.py +2 -2
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a8.dist-info}/METADATA +10 -21
- langchain_core-1.0.0a8.dist-info/RECORD +176 -0
- {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a8.dist-info}/WHEEL +1 -1
- langchain_core/messages/block_translators/ollama.py +0 -47
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-1.0.0a6.dist-info/RECORD +0 -181
- langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
|
@@ -7,6 +7,7 @@ import threading
|
|
|
7
7
|
from abc import abstractmethod
|
|
8
8
|
from collections.abc import (
|
|
9
9
|
AsyncIterator,
|
|
10
|
+
Callable,
|
|
10
11
|
Iterator,
|
|
11
12
|
Sequence,
|
|
12
13
|
)
|
|
@@ -14,9 +15,6 @@ from functools import wraps
|
|
|
14
15
|
from typing import (
|
|
15
16
|
TYPE_CHECKING,
|
|
16
17
|
Any,
|
|
17
|
-
Callable,
|
|
18
|
-
Optional,
|
|
19
|
-
Union,
|
|
20
18
|
cast,
|
|
21
19
|
)
|
|
22
20
|
from weakref import WeakValueDictionary
|
|
@@ -58,7 +56,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
58
56
|
default: RunnableSerializable[Input, Output]
|
|
59
57
|
"""The default Runnable to use."""
|
|
60
58
|
|
|
61
|
-
config:
|
|
59
|
+
config: RunnableConfig | None = None
|
|
62
60
|
"""The configuration to use."""
|
|
63
61
|
|
|
64
62
|
model_config = ConfigDict(
|
|
@@ -92,28 +90,26 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
92
90
|
return self.default.OutputType
|
|
93
91
|
|
|
94
92
|
@override
|
|
95
|
-
def get_input_schema(
|
|
96
|
-
self, config: Optional[RunnableConfig] = None
|
|
97
|
-
) -> type[BaseModel]:
|
|
93
|
+
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
|
|
98
94
|
runnable, config = self.prepare(config)
|
|
99
95
|
return runnable.get_input_schema(config)
|
|
100
96
|
|
|
101
97
|
@override
|
|
102
98
|
def get_output_schema(
|
|
103
|
-
self, config:
|
|
99
|
+
self, config: RunnableConfig | None = None
|
|
104
100
|
) -> type[BaseModel]:
|
|
105
101
|
runnable, config = self.prepare(config)
|
|
106
102
|
return runnable.get_output_schema(config)
|
|
107
103
|
|
|
108
104
|
@override
|
|
109
|
-
def get_graph(self, config:
|
|
105
|
+
def get_graph(self, config: RunnableConfig | None = None) -> Graph:
|
|
110
106
|
runnable, config = self.prepare(config)
|
|
111
107
|
return runnable.get_graph(config)
|
|
112
108
|
|
|
113
109
|
@override
|
|
114
110
|
def with_config(
|
|
115
111
|
self,
|
|
116
|
-
config:
|
|
112
|
+
config: RunnableConfig | None = None,
|
|
117
113
|
# Sadly Unpack is not well supported by mypy so this will have to be untyped
|
|
118
114
|
**kwargs: Any,
|
|
119
115
|
) -> Runnable[Input, Output]:
|
|
@@ -122,7 +118,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
122
118
|
)
|
|
123
119
|
|
|
124
120
|
def prepare(
|
|
125
|
-
self, config:
|
|
121
|
+
self, config: RunnableConfig | None = None
|
|
126
122
|
) -> tuple[Runnable[Input, Output], RunnableConfig]:
|
|
127
123
|
"""Prepare the Runnable for invocation.
|
|
128
124
|
|
|
@@ -140,19 +136,19 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
140
136
|
|
|
141
137
|
@abstractmethod
|
|
142
138
|
def _prepare(
|
|
143
|
-
self, config:
|
|
139
|
+
self, config: RunnableConfig | None = None
|
|
144
140
|
) -> tuple[Runnable[Input, Output], RunnableConfig]: ...
|
|
145
141
|
|
|
146
142
|
@override
|
|
147
143
|
def invoke(
|
|
148
|
-
self, input: Input, config:
|
|
144
|
+
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
|
|
149
145
|
) -> Output:
|
|
150
146
|
runnable, config = self.prepare(config)
|
|
151
147
|
return runnable.invoke(input, config, **kwargs)
|
|
152
148
|
|
|
153
149
|
@override
|
|
154
150
|
async def ainvoke(
|
|
155
|
-
self, input: Input, config:
|
|
151
|
+
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
|
|
156
152
|
) -> Output:
|
|
157
153
|
runnable, config = self.prepare(config)
|
|
158
154
|
return await runnable.ainvoke(input, config, **kwargs)
|
|
@@ -161,10 +157,10 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
161
157
|
def batch(
|
|
162
158
|
self,
|
|
163
159
|
inputs: list[Input],
|
|
164
|
-
config:
|
|
160
|
+
config: RunnableConfig | list[RunnableConfig] | None = None,
|
|
165
161
|
*,
|
|
166
162
|
return_exceptions: bool = False,
|
|
167
|
-
**kwargs:
|
|
163
|
+
**kwargs: Any | None,
|
|
168
164
|
) -> list[Output]:
|
|
169
165
|
configs = get_config_list(config, len(inputs))
|
|
170
166
|
prepared = [self.prepare(c) for c in configs]
|
|
@@ -183,7 +179,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
183
179
|
def invoke(
|
|
184
180
|
prepared: tuple[Runnable[Input, Output], RunnableConfig],
|
|
185
181
|
input_: Input,
|
|
186
|
-
) ->
|
|
182
|
+
) -> Output | Exception:
|
|
187
183
|
bound, config = prepared
|
|
188
184
|
if return_exceptions:
|
|
189
185
|
try:
|
|
@@ -204,10 +200,10 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
204
200
|
async def abatch(
|
|
205
201
|
self,
|
|
206
202
|
inputs: list[Input],
|
|
207
|
-
config:
|
|
203
|
+
config: RunnableConfig | list[RunnableConfig] | None = None,
|
|
208
204
|
*,
|
|
209
205
|
return_exceptions: bool = False,
|
|
210
|
-
**kwargs:
|
|
206
|
+
**kwargs: Any | None,
|
|
211
207
|
) -> list[Output]:
|
|
212
208
|
configs = get_config_list(config, len(inputs))
|
|
213
209
|
prepared = [self.prepare(c) for c in configs]
|
|
@@ -226,7 +222,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
226
222
|
async def ainvoke(
|
|
227
223
|
prepared: tuple[Runnable[Input, Output], RunnableConfig],
|
|
228
224
|
input_: Input,
|
|
229
|
-
) ->
|
|
225
|
+
) -> Output | Exception:
|
|
230
226
|
bound, config = prepared
|
|
231
227
|
if return_exceptions:
|
|
232
228
|
try:
|
|
@@ -243,8 +239,8 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
243
239
|
def stream(
|
|
244
240
|
self,
|
|
245
241
|
input: Input,
|
|
246
|
-
config:
|
|
247
|
-
**kwargs:
|
|
242
|
+
config: RunnableConfig | None = None,
|
|
243
|
+
**kwargs: Any | None,
|
|
248
244
|
) -> Iterator[Output]:
|
|
249
245
|
runnable, config = self.prepare(config)
|
|
250
246
|
return runnable.stream(input, config, **kwargs)
|
|
@@ -253,8 +249,8 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
253
249
|
async def astream(
|
|
254
250
|
self,
|
|
255
251
|
input: Input,
|
|
256
|
-
config:
|
|
257
|
-
**kwargs:
|
|
252
|
+
config: RunnableConfig | None = None,
|
|
253
|
+
**kwargs: Any | None,
|
|
258
254
|
) -> AsyncIterator[Output]:
|
|
259
255
|
runnable, config = self.prepare(config)
|
|
260
256
|
async for chunk in runnable.astream(input, config, **kwargs):
|
|
@@ -264,8 +260,8 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
264
260
|
def transform(
|
|
265
261
|
self,
|
|
266
262
|
input: Iterator[Input],
|
|
267
|
-
config:
|
|
268
|
-
**kwargs:
|
|
263
|
+
config: RunnableConfig | None = None,
|
|
264
|
+
**kwargs: Any | None,
|
|
269
265
|
) -> Iterator[Output]:
|
|
270
266
|
runnable, config = self.prepare(config)
|
|
271
267
|
return runnable.transform(input, config, **kwargs)
|
|
@@ -274,8 +270,8 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
|
|
274
270
|
async def atransform(
|
|
275
271
|
self,
|
|
276
272
|
input: AsyncIterator[Input],
|
|
277
|
-
config:
|
|
278
|
-
**kwargs:
|
|
273
|
+
config: RunnableConfig | None = None,
|
|
274
|
+
**kwargs: Any | None,
|
|
279
275
|
) -> AsyncIterator[Output]:
|
|
280
276
|
runnable, config = self.prepare(config)
|
|
281
277
|
async for chunk in runnable.atransform(input, config, **kwargs):
|
|
@@ -328,58 +324,56 @@ class RunnableConfigurableFields(DynamicRunnable[Input, Output]):
|
|
|
328
324
|
|
|
329
325
|
Here is an example of using a RunnableConfigurableFields with LLMs:
|
|
330
326
|
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
from langchain_openai import ChatOpenAI
|
|
327
|
+
```python
|
|
328
|
+
from langchain_core.prompts import PromptTemplate
|
|
329
|
+
from langchain_core.runnables import ConfigurableField
|
|
330
|
+
from langchain_openai import ChatOpenAI
|
|
336
331
|
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
)
|
|
332
|
+
model = ChatOpenAI(temperature=0).configurable_fields(
|
|
333
|
+
temperature=ConfigurableField(
|
|
334
|
+
id="temperature",
|
|
335
|
+
name="LLM Temperature",
|
|
336
|
+
description="The temperature of the LLM",
|
|
343
337
|
)
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
# When invoking the created RunnableSequence, you can pass in the
|
|
347
|
-
# value for your ConfigurableField's id which in this case
|
|
348
|
-
# will be change in temperature
|
|
338
|
+
)
|
|
339
|
+
# This creates a RunnableConfigurableFields for a chat model.
|
|
349
340
|
|
|
350
|
-
|
|
351
|
-
|
|
341
|
+
# When invoking the created RunnableSequence, you can pass in the
|
|
342
|
+
# value for your ConfigurableField's id which in this case
|
|
343
|
+
# will be change in temperature
|
|
352
344
|
|
|
353
|
-
|
|
354
|
-
|
|
345
|
+
prompt = PromptTemplate.from_template("Pick a random number above {x}")
|
|
346
|
+
chain = prompt | model
|
|
355
347
|
|
|
348
|
+
chain.invoke({"x": 0})
|
|
349
|
+
chain.invoke({"x": 0}, config={"configurable": {"temperature": 0.9}})
|
|
350
|
+
```
|
|
356
351
|
|
|
357
352
|
Here is an example of using a RunnableConfigurableFields with HubRunnables:
|
|
358
353
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
description="The Hub commit to pull from",
|
|
371
|
-
)
|
|
354
|
+
```python
|
|
355
|
+
from langchain_core.prompts import PromptTemplate
|
|
356
|
+
from langchain_core.runnables import ConfigurableField
|
|
357
|
+
from langchain_openai import ChatOpenAI
|
|
358
|
+
from langchain.runnables.hub import HubRunnable
|
|
359
|
+
|
|
360
|
+
prompt = HubRunnable("rlm/rag-prompt").configurable_fields(
|
|
361
|
+
owner_repo_commit=ConfigurableField(
|
|
362
|
+
id="hub_commit",
|
|
363
|
+
name="Hub Commit",
|
|
364
|
+
description="The Hub commit to pull from",
|
|
372
365
|
)
|
|
366
|
+
)
|
|
373
367
|
|
|
374
|
-
|
|
368
|
+
prompt.invoke({"question": "foo", "context": "bar"})
|
|
375
369
|
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
prompt.invoke(
|
|
379
|
-
{"question": "foo", "context": "bar"},
|
|
380
|
-
config={"configurable": {"hub_commit": "rlm/rag-prompt-llama"}},
|
|
381
|
-
)
|
|
370
|
+
# Invoking prompt with `with_config` method
|
|
382
371
|
|
|
372
|
+
prompt.invoke(
|
|
373
|
+
{"question": "foo", "context": "bar"},
|
|
374
|
+
config={"configurable": {"hub_commit": "rlm/rag-prompt-llama"}},
|
|
375
|
+
)
|
|
376
|
+
```
|
|
383
377
|
"""
|
|
384
378
|
|
|
385
379
|
fields: dict[str, AnyConfigurableField]
|
|
@@ -425,7 +419,7 @@ class RunnableConfigurableFields(DynamicRunnable[Input, Output]):
|
|
|
425
419
|
return self.default.configurable_fields(**{**self.fields, **kwargs})
|
|
426
420
|
|
|
427
421
|
def _prepare(
|
|
428
|
-
self, config:
|
|
422
|
+
self, config: RunnableConfig | None = None
|
|
429
423
|
) -> tuple[Runnable[Input, Output], RunnableConfig]:
|
|
430
424
|
config = ensure_config(config)
|
|
431
425
|
specs_by_id = {spec.id: (key, spec) for key, spec in self.fields.items()}
|
|
@@ -472,9 +466,7 @@ class StrEnum(str, enum.Enum):
|
|
|
472
466
|
|
|
473
467
|
|
|
474
468
|
_enums_for_spec: WeakValueDictionary[
|
|
475
|
-
|
|
476
|
-
ConfigurableFieldSingleOption, ConfigurableFieldMultiOption, ConfigurableField
|
|
477
|
-
],
|
|
469
|
+
ConfigurableFieldSingleOption | ConfigurableFieldMultiOption | ConfigurableField,
|
|
478
470
|
type[StrEnum],
|
|
479
471
|
] = WeakValueDictionary()
|
|
480
472
|
|
|
@@ -491,60 +483,52 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
|
|
|
491
483
|
Here is an example of using a RunnableConfigurableAlternatives that uses
|
|
492
484
|
alternative prompts to illustrate its functionality:
|
|
493
485
|
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
)
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
)
|
|
486
|
+
```python
|
|
487
|
+
from langchain_core.runnables import ConfigurableField
|
|
488
|
+
from langchain_openai import ChatOpenAI
|
|
489
|
+
|
|
490
|
+
# This creates a RunnableConfigurableAlternatives for Prompt Runnable
|
|
491
|
+
# with two alternatives.
|
|
492
|
+
prompt = PromptTemplate.from_template(
|
|
493
|
+
"Tell me a joke about {topic}"
|
|
494
|
+
).configurable_alternatives(
|
|
495
|
+
ConfigurableField(id="prompt"),
|
|
496
|
+
default_key="joke",
|
|
497
|
+
poem=PromptTemplate.from_template("Write a short poem about {topic}"),
|
|
498
|
+
)
|
|
508
499
|
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
# The `with_config` method brings in the desired Prompt Runnable in your
|
|
515
|
-
# Runnable Sequence.
|
|
516
|
-
chain.with_config(configurable={"prompt": "poem"}).invoke(
|
|
517
|
-
{"topic": "bears"}
|
|
518
|
-
)
|
|
500
|
+
# When invoking the created RunnableSequence, you can pass in the
|
|
501
|
+
# value for your ConfigurableField's id which in this case will either be
|
|
502
|
+
# `joke` or `poem`.
|
|
503
|
+
chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
|
519
504
|
|
|
505
|
+
# The `with_config` method brings in the desired Prompt Runnable in your
|
|
506
|
+
# Runnable Sequence.
|
|
507
|
+
chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"})
|
|
508
|
+
```
|
|
520
509
|
|
|
521
510
|
Equivalently, you can initialize RunnableConfigurableAlternatives directly
|
|
522
511
|
and use in LCEL in the same way:
|
|
523
512
|
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
|
544
|
-
chain.with_config(configurable={"prompt": "poem"}).invoke(
|
|
545
|
-
{"topic": "bears"}
|
|
546
|
-
)
|
|
547
|
-
|
|
513
|
+
```python
|
|
514
|
+
from langchain_core.runnables import ConfigurableField
|
|
515
|
+
from langchain_core.runnables.configurable import (
|
|
516
|
+
RunnableConfigurableAlternatives,
|
|
517
|
+
)
|
|
518
|
+
from langchain_openai import ChatOpenAI
|
|
519
|
+
|
|
520
|
+
prompt = RunnableConfigurableAlternatives(
|
|
521
|
+
which=ConfigurableField(id="prompt"),
|
|
522
|
+
default=PromptTemplate.from_template("Tell me a joke about {topic}"),
|
|
523
|
+
default_key="joke",
|
|
524
|
+
prefix_keys=False,
|
|
525
|
+
alternatives={
|
|
526
|
+
"poem": PromptTemplate.from_template("Write a short poem about {topic}")
|
|
527
|
+
},
|
|
528
|
+
)
|
|
529
|
+
chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
|
530
|
+
chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"})
|
|
531
|
+
```
|
|
548
532
|
"""
|
|
549
533
|
|
|
550
534
|
which: ConfigurableField
|
|
@@ -552,7 +536,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
|
|
|
552
536
|
|
|
553
537
|
alternatives: dict[
|
|
554
538
|
str,
|
|
555
|
-
|
|
539
|
+
Runnable[Input, Output] | Callable[[], Runnable[Input, Output]],
|
|
556
540
|
]
|
|
557
541
|
"""The alternatives to choose from."""
|
|
558
542
|
|
|
@@ -626,7 +610,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
|
|
|
626
610
|
)
|
|
627
611
|
|
|
628
612
|
def _prepare(
|
|
629
|
-
self, config:
|
|
613
|
+
self, config: RunnableConfig | None = None
|
|
630
614
|
) -> tuple[Runnable[Input, Output], RunnableConfig]:
|
|
631
615
|
config = ensure_config(config)
|
|
632
616
|
which = config.get("configurable", {}).get(self.which.id, self.default_key)
|
|
@@ -689,8 +673,8 @@ def prefix_config_spec(
|
|
|
689
673
|
|
|
690
674
|
|
|
691
675
|
def make_options_spec(
|
|
692
|
-
spec:
|
|
693
|
-
description:
|
|
676
|
+
spec: ConfigurableFieldSingleOption | ConfigurableFieldMultiOption,
|
|
677
|
+
description: str | None,
|
|
694
678
|
) -> ConfigurableFieldSpec:
|
|
695
679
|
"""Make options spec.
|
|
696
680
|
|