langchain 1.0.0a3__py3-none-any.whl → 1.0.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,79 @@
1
+ """Types for middleware and agents."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import TYPE_CHECKING, Annotated, Any, Generic, Literal, cast
7
+
8
+ # needed as top level import for pydantic schema generation on AgentState
9
+ from langchain_core.messages import AnyMessage # noqa: TC002
10
+ from langgraph.channels.ephemeral_value import EphemeralValue
11
+ from langgraph.graph.message import Messages, add_messages
12
+ from typing_extensions import NotRequired, Required, TypedDict, TypeVar
13
+
14
+ if TYPE_CHECKING:
15
+ from langchain_core.language_models.chat_models import BaseChatModel
16
+ from langchain_core.tools import BaseTool
17
+
18
+ from langchain.agents.structured_output import ResponseFormat
19
+
20
+ JumpTo = Literal["tools", "model", "__end__"]
21
+ """Destination to jump to when a middleware node returns."""
22
+
23
+ ResponseT = TypeVar("ResponseT")
24
+
25
+
26
+ @dataclass
27
+ class ModelRequest:
28
+ """Model request information for the agent."""
29
+
30
+ model: BaseChatModel
31
+ system_prompt: str | None
32
+ messages: list[AnyMessage] # excluding system prompt
33
+ tool_choice: Any | None
34
+ tools: list[BaseTool]
35
+ response_format: ResponseFormat | None
36
+ model_settings: dict[str, Any] = field(default_factory=dict)
37
+
38
+
39
+ class AgentState(TypedDict, Generic[ResponseT]):
40
+ """State schema for the agent."""
41
+
42
+ messages: Required[Annotated[list[AnyMessage], add_messages]]
43
+ model_request: NotRequired[Annotated[ModelRequest | None, EphemeralValue]]
44
+ jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue]]
45
+ response: NotRequired[ResponseT]
46
+
47
+
48
+ class PublicAgentState(TypedDict, Generic[ResponseT]):
49
+ """Input / output schema for the agent."""
50
+
51
+ messages: Required[Messages]
52
+ response: NotRequired[ResponseT]
53
+
54
+
55
+ StateT = TypeVar("StateT", bound=AgentState)
56
+
57
+
58
+ class AgentMiddleware(Generic[StateT]):
59
+ """Base middleware class for an agent.
60
+
61
+ Subclass this and implement any of the defined methods to customize agent behavior
62
+ between steps in the main agent loop.
63
+ """
64
+
65
+ state_schema: type[StateT] = cast("type[StateT]", AgentState)
66
+ """The schema for state passed to the middleware nodes."""
67
+
68
+ tools: list[BaseTool]
69
+ """Additional tools registered by the middleware."""
70
+
71
+ def before_model(self, state: StateT) -> dict[str, Any] | None:
72
+ """Logic to run before the model is called."""
73
+
74
+ def modify_model_request(self, request: ModelRequest, state: StateT) -> ModelRequest: # noqa: ARG002
75
+ """Logic to modify request kwargs before the model is called."""
76
+ return request
77
+
78
+ def after_model(self, state: StateT) -> dict[str, Any] | None:
79
+ """Logic to run after the model is called."""
@@ -0,0 +1,557 @@
1
+ """Middleware agent implementation."""
2
+
3
+ import itertools
4
+ from collections.abc import Callable, Sequence
5
+ from typing import Any
6
+
7
+ from langchain_core.language_models.chat_models import BaseChatModel
8
+ from langchain_core.messages import AIMessage, SystemMessage, ToolMessage
9
+ from langchain_core.runnables import Runnable
10
+ from langchain_core.tools import BaseTool
11
+ from langgraph.constants import END, START
12
+ from langgraph.graph.state import StateGraph
13
+ from langgraph.typing import ContextT
14
+ from typing_extensions import TypedDict, TypeVar
15
+
16
+ from langchain.agents.middleware.types import (
17
+ AgentMiddleware,
18
+ AgentState,
19
+ JumpTo,
20
+ ModelRequest,
21
+ PublicAgentState,
22
+ )
23
+
24
+ # Import structured output classes from the old implementation
25
+ from langchain.agents.structured_output import (
26
+ MultipleStructuredOutputsError,
27
+ OutputToolBinding,
28
+ ProviderStrategy,
29
+ ProviderStrategyBinding,
30
+ ResponseFormat,
31
+ StructuredOutputValidationError,
32
+ ToolStrategy,
33
+ )
34
+ from langchain.agents.tool_node import ToolNode
35
+ from langchain.chat_models import init_chat_model
36
+
37
+ STRUCTURED_OUTPUT_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
38
+
39
+
40
+ def _merge_state_schemas(schemas: list[type]) -> type:
41
+ """Merge multiple TypedDict schemas into a single schema with all fields."""
42
+ if not schemas:
43
+ return AgentState
44
+
45
+ all_annotations = {}
46
+
47
+ for schema in schemas:
48
+ all_annotations.update(schema.__annotations__)
49
+
50
+ return TypedDict("MergedState", all_annotations) # type: ignore[operator]
51
+
52
+
53
+ def _filter_state_for_schema(state: dict[str, Any], schema: type) -> dict[str, Any]:
54
+ """Filter state to only include fields defined in the given schema."""
55
+ if not hasattr(schema, "__annotations__"):
56
+ return state
57
+
58
+ schema_fields = set(schema.__annotations__.keys())
59
+ return {k: v for k, v in state.items() if k in schema_fields}
60
+
61
+
62
+ def _supports_native_structured_output(model: str | BaseChatModel) -> bool:
63
+ """Check if a model supports native structured output."""
64
+ model_name: str | None = None
65
+ if isinstance(model, str):
66
+ model_name = model
67
+ elif isinstance(model, BaseChatModel):
68
+ model_name = getattr(model, "model_name", None)
69
+
70
+ return (
71
+ "grok" in model_name.lower()
72
+ or any(part in model_name for part in ["gpt-5", "gpt-4.1", "gpt-oss", "o3-pro", "o3-mini"])
73
+ if model_name
74
+ else False
75
+ )
76
+
77
+
78
+ def _handle_structured_output_error(
79
+ exception: Exception,
80
+ response_format: ResponseFormat,
81
+ ) -> tuple[bool, str]:
82
+ """Handle structured output error. Returns (should_retry, retry_tool_message)."""
83
+ if not isinstance(response_format, ToolStrategy):
84
+ return False, ""
85
+
86
+ handle_errors = response_format.handle_errors
87
+
88
+ if handle_errors is False:
89
+ return False, ""
90
+ if handle_errors is True:
91
+ return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
92
+ if isinstance(handle_errors, str):
93
+ return True, handle_errors
94
+ if isinstance(handle_errors, type) and issubclass(handle_errors, Exception):
95
+ if isinstance(exception, handle_errors):
96
+ return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
97
+ return False, ""
98
+ if isinstance(handle_errors, tuple):
99
+ if any(isinstance(exception, exc_type) for exc_type in handle_errors):
100
+ return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
101
+ return False, ""
102
+ if callable(handle_errors):
103
+ # type narrowing not working appropriately w/ callable check, can fix later
104
+ return True, handle_errors(exception) # type: ignore[return-value,call-arg]
105
+ return False, ""
106
+
107
+
108
+ ResponseT = TypeVar("ResponseT")
109
+
110
+
111
+ def create_agent( # noqa: PLR0915
112
+ *,
113
+ model: str | BaseChatModel,
114
+ tools: Sequence[BaseTool | Callable | dict[str, Any]] | ToolNode | None = None,
115
+ system_prompt: str | None = None,
116
+ middleware: Sequence[AgentMiddleware] = (),
117
+ response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
118
+ context_schema: type[ContextT] | None = None,
119
+ ) -> StateGraph[
120
+ AgentState[ResponseT], ContextT, PublicAgentState[ResponseT], PublicAgentState[ResponseT]
121
+ ]:
122
+ """Create a middleware agent graph."""
123
+ # init chat model
124
+ if isinstance(model, str):
125
+ model = init_chat_model(model)
126
+
127
+ # Handle tools being None or empty
128
+ if tools is None:
129
+ tools = []
130
+
131
+ # Setup structured output
132
+ structured_output_tools: dict[str, OutputToolBinding] = {}
133
+ native_output_binding: ProviderStrategyBinding | None = None
134
+
135
+ if response_format is not None:
136
+ if not isinstance(response_format, (ToolStrategy, ProviderStrategy)):
137
+ # Auto-detect strategy based on model capabilities
138
+ if _supports_native_structured_output(model):
139
+ response_format = ProviderStrategy(schema=response_format)
140
+ else:
141
+ response_format = ToolStrategy(schema=response_format)
142
+
143
+ if isinstance(response_format, ToolStrategy):
144
+ # Setup tools strategy for structured output
145
+ for response_schema in response_format.schema_specs:
146
+ structured_tool_info = OutputToolBinding.from_schema_spec(response_schema)
147
+ structured_output_tools[structured_tool_info.tool.name] = structured_tool_info
148
+ elif isinstance(response_format, ProviderStrategy):
149
+ # Setup native strategy
150
+ native_output_binding = ProviderStrategyBinding.from_schema_spec(
151
+ response_format.schema_spec
152
+ )
153
+ middleware_tools = [t for m in middleware for t in getattr(m, "tools", [])]
154
+
155
+ # Setup tools
156
+ tool_node: ToolNode | None = None
157
+ if isinstance(tools, list):
158
+ # Extract builtin provider tools (dict format)
159
+ builtin_tools = [t for t in tools if isinstance(t, dict)]
160
+ regular_tools = [t for t in tools if not isinstance(t, dict)]
161
+
162
+ # Add structured output tools to regular tools
163
+ structured_tools = [info.tool for info in structured_output_tools.values()]
164
+ all_tools = middleware_tools + regular_tools + structured_tools
165
+
166
+ # Only create ToolNode if we have tools
167
+ tool_node = ToolNode(tools=all_tools) if all_tools else None
168
+ default_tools = regular_tools + builtin_tools + structured_tools + middleware_tools
169
+ elif isinstance(tools, ToolNode):
170
+ # tools is ToolNode or None
171
+ tool_node = tools
172
+ if tool_node:
173
+ default_tools = list(tool_node.tools_by_name.values()) + middleware_tools
174
+ # Update tool node to know about tools provided by middleware
175
+ all_tools = list(tool_node.tools_by_name.values()) + middleware_tools
176
+ tool_node = ToolNode(all_tools)
177
+ # Add structured output tools
178
+ for info in structured_output_tools.values():
179
+ default_tools.append(info.tool)
180
+ else:
181
+ default_tools = (
182
+ list(structured_output_tools.values()) if structured_output_tools else []
183
+ ) + middleware_tools
184
+
185
+ # validate middleware
186
+ assert len({m.__class__.__name__ for m in middleware}) == len(middleware), ( # noqa: S101
187
+ "Please remove duplicate middleware instances."
188
+ )
189
+ middleware_w_before = [
190
+ m for m in middleware if m.__class__.before_model is not AgentMiddleware.before_model
191
+ ]
192
+ middleware_w_modify_model_request = [
193
+ m
194
+ for m in middleware
195
+ if m.__class__.modify_model_request is not AgentMiddleware.modify_model_request
196
+ ]
197
+ middleware_w_after = [
198
+ m for m in middleware if m.__class__.after_model is not AgentMiddleware.after_model
199
+ ]
200
+
201
+ # Collect all middleware state schemas and create merged schema
202
+ merged_state_schema: type[AgentState] = _merge_state_schemas(
203
+ [m.state_schema for m in middleware]
204
+ )
205
+
206
+ # create graph, add nodes
207
+ graph = StateGraph(
208
+ merged_state_schema,
209
+ input_schema=PublicAgentState,
210
+ output_schema=PublicAgentState,
211
+ context_schema=context_schema,
212
+ )
213
+
214
+ def _handle_model_output(state: dict[str, Any], output: AIMessage) -> dict[str, Any]:
215
+ """Handle model output including structured responses."""
216
+ # Handle structured output with native strategy
217
+ if isinstance(response_format, ProviderStrategy):
218
+ if not output.tool_calls and native_output_binding:
219
+ structured_response = native_output_binding.parse(output)
220
+ return {"messages": [output], "response": structured_response}
221
+ if state.get("response") is not None:
222
+ return {"messages": [output], "response": None}
223
+ return {"messages": [output]}
224
+
225
+ # Handle structured output with tools strategy
226
+ if (
227
+ isinstance(response_format, ToolStrategy)
228
+ and isinstance(output, AIMessage)
229
+ and output.tool_calls
230
+ ):
231
+ structured_tool_calls = [
232
+ tc for tc in output.tool_calls if tc["name"] in structured_output_tools
233
+ ]
234
+
235
+ if structured_tool_calls:
236
+ exception: Exception | None = None
237
+ if len(structured_tool_calls) > 1:
238
+ # Handle multiple structured outputs error
239
+ tool_names = [tc["name"] for tc in structured_tool_calls]
240
+ exception = MultipleStructuredOutputsError(tool_names)
241
+ should_retry, error_message = _handle_structured_output_error(
242
+ exception, response_format
243
+ )
244
+ if not should_retry:
245
+ raise exception
246
+
247
+ # Add error messages and retry
248
+ tool_messages = [
249
+ ToolMessage(
250
+ content=error_message,
251
+ tool_call_id=tc["id"],
252
+ name=tc["name"],
253
+ )
254
+ for tc in structured_tool_calls
255
+ ]
256
+ return {"messages": [output, *tool_messages]}
257
+
258
+ # Handle single structured output
259
+ tool_call = structured_tool_calls[0]
260
+ try:
261
+ structured_tool_binding = structured_output_tools[tool_call["name"]]
262
+ structured_response = structured_tool_binding.parse(tool_call["args"])
263
+
264
+ tool_message_content = (
265
+ response_format.tool_message_content
266
+ if response_format.tool_message_content
267
+ else f"Returning structured response: {structured_response}"
268
+ )
269
+
270
+ return {
271
+ "messages": [
272
+ output,
273
+ ToolMessage(
274
+ content=tool_message_content,
275
+ tool_call_id=tool_call["id"],
276
+ name=tool_call["name"],
277
+ ),
278
+ ],
279
+ "response": structured_response,
280
+ }
281
+ except Exception as exc: # noqa: BLE001
282
+ exception = StructuredOutputValidationError(tool_call["name"], exc)
283
+ should_retry, error_message = _handle_structured_output_error(
284
+ exception, response_format
285
+ )
286
+ if not should_retry:
287
+ raise exception
288
+
289
+ return {
290
+ "messages": [
291
+ output,
292
+ ToolMessage(
293
+ content=error_message,
294
+ tool_call_id=tool_call["id"],
295
+ name=tool_call["name"],
296
+ ),
297
+ ],
298
+ }
299
+
300
+ # Standard response handling
301
+ if state.get("response") is not None:
302
+ return {"messages": [output], "response": None}
303
+ return {"messages": [output]}
304
+
305
+ def _get_bound_model(request: ModelRequest) -> Runnable:
306
+ """Get the model with appropriate tool bindings."""
307
+ if isinstance(response_format, ProviderStrategy):
308
+ # Use native structured output
309
+ kwargs = response_format.to_model_kwargs()
310
+ return request.model.bind_tools(
311
+ request.tools, strict=True, **kwargs, **request.model_settings
312
+ )
313
+ if isinstance(response_format, ToolStrategy):
314
+ tool_choice = "any" if structured_output_tools else request.tool_choice
315
+ return request.model.bind_tools(
316
+ request.tools, tool_choice=tool_choice, **request.model_settings
317
+ )
318
+ # Standard model binding
319
+ if request.tools:
320
+ return request.model.bind_tools(
321
+ request.tools, tool_choice=request.tool_choice, **request.model_settings
322
+ )
323
+ return request.model.bind(**request.model_settings)
324
+
325
+ def model_request(state: dict[str, Any]) -> dict[str, Any]:
326
+ """Sync model request handler with sequential middleware processing."""
327
+ request = ModelRequest(
328
+ model=model,
329
+ tools=default_tools,
330
+ system_prompt=system_prompt,
331
+ response_format=response_format,
332
+ messages=state["messages"],
333
+ tool_choice=None,
334
+ )
335
+
336
+ # Apply modify_model_request middleware in sequence
337
+ for m in middleware_w_modify_model_request:
338
+ # Filter state to only include fields defined in this middleware's schema
339
+ filtered_state = _filter_state_for_schema(state, m.state_schema)
340
+ request = m.modify_model_request(request, filtered_state)
341
+
342
+ # Get the final model and messages
343
+ model_ = _get_bound_model(request)
344
+ messages = request.messages
345
+ if request.system_prompt:
346
+ messages = [SystemMessage(request.system_prompt), *messages]
347
+
348
+ output = model_.invoke(messages)
349
+ return _handle_model_output(state, output)
350
+
351
+ async def amodel_request(state: dict[str, Any]) -> dict[str, Any]:
352
+ """Async model request handler with sequential middleware processing."""
353
+ # Start with the base model request
354
+ request = ModelRequest(
355
+ model=model,
356
+ tools=default_tools,
357
+ system_prompt=system_prompt,
358
+ response_format=response_format,
359
+ messages=state["messages"],
360
+ tool_choice=None,
361
+ )
362
+
363
+ # Apply modify_model_request middleware in sequence
364
+ for m in middleware_w_modify_model_request:
365
+ # Filter state to only include fields defined in this middleware's schema
366
+ filtered_state = _filter_state_for_schema(state, m.state_schema)
367
+ request = m.modify_model_request(request, filtered_state)
368
+
369
+ # Get the final model and messages
370
+ model_ = _get_bound_model(request)
371
+ messages = request.messages
372
+ if request.system_prompt:
373
+ messages = [SystemMessage(request.system_prompt), *messages]
374
+
375
+ output = await model_.ainvoke(messages)
376
+ return _handle_model_output(state, output)
377
+
378
+ # Use sync or async based on model capabilities
379
+ from langgraph._internal._runnable import RunnableCallable
380
+
381
+ graph.add_node("model_request", RunnableCallable(model_request, amodel_request))
382
+
383
+ # Only add tools node if we have tools
384
+ if tool_node is not None:
385
+ graph.add_node("tools", tool_node)
386
+
387
+ # Add middleware nodes
388
+ for m in middleware:
389
+ if m.__class__.before_model is not AgentMiddleware.before_model:
390
+ graph.add_node(
391
+ f"{m.__class__.__name__}.before_model",
392
+ m.before_model,
393
+ input_schema=m.state_schema,
394
+ )
395
+
396
+ if m.__class__.after_model is not AgentMiddleware.after_model:
397
+ graph.add_node(
398
+ f"{m.__class__.__name__}.after_model",
399
+ m.after_model,
400
+ input_schema=m.state_schema,
401
+ )
402
+
403
+ # add start edge
404
+ first_node = (
405
+ f"{middleware_w_before[0].__class__.__name__}.before_model"
406
+ if middleware_w_before
407
+ else "model_request"
408
+ )
409
+ last_node = (
410
+ f"{middleware_w_after[0].__class__.__name__}.after_model"
411
+ if middleware_w_after
412
+ else "model_request"
413
+ )
414
+ graph.add_edge(START, first_node)
415
+
416
+ # add conditional edges only if tools exist
417
+ if tool_node is not None:
418
+ graph.add_conditional_edges(
419
+ "tools",
420
+ _make_tools_to_model_edge(tool_node, first_node),
421
+ [first_node, END],
422
+ )
423
+ graph.add_conditional_edges(
424
+ last_node,
425
+ _make_model_to_tools_edge(first_node, structured_output_tools),
426
+ [first_node, "tools", END],
427
+ )
428
+ elif last_node == "model_request":
429
+ # If no tools, just go to END from model
430
+ graph.add_edge(last_node, END)
431
+ else:
432
+ # If after_model, then need to check for jump_to
433
+ _add_middleware_edge(
434
+ graph,
435
+ f"{middleware_w_after[0].__class__.__name__}.after_model",
436
+ END,
437
+ first_node,
438
+ tools_available=tool_node is not None,
439
+ )
440
+
441
+ # Add middleware edges (same as before)
442
+ if middleware_w_before:
443
+ for m1, m2 in itertools.pairwise(middleware_w_before):
444
+ _add_middleware_edge(
445
+ graph,
446
+ f"{m1.__class__.__name__}.before_model",
447
+ f"{m2.__class__.__name__}.before_model",
448
+ first_node,
449
+ tools_available=tool_node is not None,
450
+ )
451
+ # Go directly to model_request after the last before_model
452
+ _add_middleware_edge(
453
+ graph,
454
+ f"{middleware_w_before[-1].__class__.__name__}.before_model",
455
+ "model_request",
456
+ first_node,
457
+ tools_available=tool_node is not None,
458
+ )
459
+
460
+ if middleware_w_after:
461
+ graph.add_edge("model_request", f"{middleware_w_after[-1].__class__.__name__}.after_model")
462
+ for idx in range(len(middleware_w_after) - 1, 0, -1):
463
+ m1 = middleware_w_after[idx]
464
+ m2 = middleware_w_after[idx - 1]
465
+ _add_middleware_edge(
466
+ graph,
467
+ f"{m1.__class__.__name__}.after_model",
468
+ f"{m2.__class__.__name__}.after_model",
469
+ first_node,
470
+ tools_available=tool_node is not None,
471
+ )
472
+
473
+ return graph
474
+
475
+
476
+ def _resolve_jump(jump_to: JumpTo | None, first_node: str) -> str | None:
477
+ if jump_to == "model":
478
+ return first_node
479
+ if jump_to:
480
+ return jump_to
481
+ return None
482
+
483
+
484
+ def _make_model_to_tools_edge(
485
+ first_node: str, structured_output_tools: dict[str, OutputToolBinding]
486
+ ) -> Callable[[AgentState], str | None]:
487
+ def model_to_tools(state: AgentState) -> str | None:
488
+ if jump_to := state.get("jump_to"):
489
+ return _resolve_jump(jump_to, first_node)
490
+
491
+ message = state["messages"][-1]
492
+
493
+ # Check if this is a ToolMessage from structured output - if so, end
494
+ if isinstance(message, ToolMessage) and message.name in structured_output_tools:
495
+ return END
496
+
497
+ # Check for tool calls
498
+ if isinstance(message, AIMessage) and message.tool_calls:
499
+ # If all tool calls are for structured output, don't go to tools
500
+ non_structured_calls = [
501
+ tc for tc in message.tool_calls if tc["name"] not in structured_output_tools
502
+ ]
503
+ if non_structured_calls:
504
+ return "tools"
505
+
506
+ return END
507
+
508
+ return model_to_tools
509
+
510
+
511
+ def _make_tools_to_model_edge(
512
+ tool_node: ToolNode, next_node: str
513
+ ) -> Callable[[AgentState], str | None]:
514
+ def tools_to_model(state: AgentState) -> str | None:
515
+ ai_message = [m for m in state["messages"] if isinstance(m, AIMessage)][-1]
516
+ if all(
517
+ tool_node.tools_by_name[c["name"]].return_direct
518
+ for c in ai_message.tool_calls
519
+ if c["name"] in tool_node.tools_by_name
520
+ ):
521
+ return END
522
+
523
+ return next_node
524
+
525
+ return tools_to_model
526
+
527
+
528
+ def _add_middleware_edge(
529
+ graph: StateGraph[AgentState, ContextT, PublicAgentState, PublicAgentState],
530
+ name: str,
531
+ default_destination: str,
532
+ model_destination: str,
533
+ tools_available: bool, # noqa: FBT001
534
+ ) -> None:
535
+ """Add an edge to the graph for a middleware node.
536
+
537
+ Args:
538
+ graph: The graph to add the edge to.
539
+ method: The method to call for the middleware node.
540
+ name: The name of the middleware node.
541
+ default_destination: The default destination for the edge.
542
+ model_destination: The destination for the edge to the model.
543
+ tools_available: Whether tools are available for the edge to potentially route to.
544
+ """
545
+
546
+ def jump_edge(state: AgentState) -> str:
547
+ return _resolve_jump(state.get("jump_to"), model_destination) or default_destination
548
+
549
+ destinations = [default_destination]
550
+ if default_destination != END:
551
+ destinations.append(END)
552
+ if tools_available:
553
+ destinations.append("tools")
554
+ if name != model_destination:
555
+ destinations.append(model_destination)
556
+
557
+ graph.add_conditional_edges(name, jump_edge, destinations)