langchain-dev-utils 1.1.11__py3-none-any.whl → 1.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_dev_utils/__init__.py +1 -1
- langchain_dev_utils/agents/middleware/plan.py +119 -205
- langchain_dev_utils/agents/wrap.py +1 -1
- langchain_dev_utils/chat_models/adapters/openai_compatible.py +6 -3
- langchain_dev_utils/chat_models/base.py +49 -6
- langchain_dev_utils/chat_models/types.py +1 -0
- langchain_dev_utils/embeddings/base.py +52 -8
- {langchain_dev_utils-1.1.11.dist-info → langchain_dev_utils-1.1.13.dist-info}/METADATA +3 -3
- {langchain_dev_utils-1.1.11.dist-info → langchain_dev_utils-1.1.13.dist-info}/RECORD +11 -11
- {langchain_dev_utils-1.1.11.dist-info → langchain_dev_utils-1.1.13.dist-info}/WHEEL +0 -0
- {langchain_dev_utils-1.1.11.dist-info → langchain_dev_utils-1.1.13.dist-info}/licenses/LICENSE +0 -0
langchain_dev_utils/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "1.1.
|
|
1
|
+
__version__ = "1.1.13"
|
|
@@ -13,138 +13,82 @@ from langchain_core.messages import ToolMessage
|
|
|
13
13
|
from langgraph.types import Command
|
|
14
14
|
from typing_extensions import TypedDict
|
|
15
15
|
|
|
16
|
-
_DEFAULT_WRITE_PLAN_TOOL_DESCRIPTION = """
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
##
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
-
|
|
47
|
-
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
-
|
|
54
|
-
-
|
|
55
|
-
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
### Plan Completion
|
|
60
|
-
|
|
61
|
-
**Important Note**: When completing a plan, call the `finish_sub_plan()` function to update the plan status. **Do not use this tool**.
|
|
62
|
-
|
|
63
|
-
## When to Use
|
|
64
|
-
|
|
65
|
-
**Appropriate Scenarios**:
|
|
66
|
-
|
|
67
|
-
- When starting a new complex work session.
|
|
68
|
-
- When needing to reorganize the plan structure.
|
|
69
|
-
- When the current plan needs revision based on new information.
|
|
70
|
-
|
|
71
|
-
**Scenarios to Avoid**:
|
|
72
|
-
|
|
73
|
-
- Simple tasks (<3 steps).
|
|
74
|
-
- When only needing to mark a plan as complete.
|
|
75
|
-
- Purely informational queries or conversation.
|
|
76
|
-
- Trivial tasks that can be completed directly.
|
|
77
|
-
|
|
78
|
-
## Best Practices
|
|
79
|
-
|
|
80
|
-
1. When creating a plan, the first plan is automatically set to `in_progress`.
|
|
81
|
-
2. Ensure sub-plans within the plan are specific and actionable.
|
|
82
|
-
3. Break down complex sub-plans into smaller steps.
|
|
83
|
-
4. Always keep at least one plan in the `in_progress` status unless all plans are completed.
|
|
84
|
-
5. When modifying a plan, only provide the sub-plans that need to be executed next.
|
|
85
|
-
6. Use clear, descriptive names for sub-plans.
|
|
86
|
-
|
|
87
|
-
## Internal Processing Logic
|
|
88
|
-
|
|
89
|
-
The tool automatically converts the input strings into a structured format:
|
|
90
|
-
|
|
91
|
-
- Input: `["Sub-plan 1", "Sub-plan 2", "Sub-plan 3"]`
|
|
92
|
-
- Internal Representation:
|
|
93
|
-
```json
|
|
94
|
-
[
|
|
95
|
-
{"content": "Sub-plan 1", "status": "in_progress"},
|
|
96
|
-
{"content": "Sub-plan 2", "status": "pending"},
|
|
97
|
-
{"content": "Sub-plan 3", "status": "pending"}
|
|
98
|
-
]
|
|
99
|
-
```
|
|
100
|
-
|
|
101
|
-
Please remember:
|
|
102
|
-
|
|
103
|
-
- For simple plans, execute them directly; there is no need to call this tool.
|
|
104
|
-
- Use the `finish_sub_plan()` function to mark sub-plans as completed.
|
|
105
|
-
- When modifying a plan, only provide the sub-plans that need to be executed next.
|
|
16
|
+
_DEFAULT_WRITE_PLAN_TOOL_DESCRIPTION = """Use this tool to create and manage a structured task list for complex or multi-step work. It helps you stay organized, track progress, and demonstrate to the user that you’re handling tasks systematically.
|
|
17
|
+
|
|
18
|
+
## When to Use This Tool
|
|
19
|
+
Use this tool in the following scenarios:
|
|
20
|
+
|
|
21
|
+
1. **Complex multi-step tasks** — when a task requires three or more distinct steps or actions.
|
|
22
|
+
2. **Non-trivial and complex tasks** — tasks that require careful planning or involve multiple operations.
|
|
23
|
+
3. **User explicitly requests a to-do list** — when the user directly asks you to use the to-do list feature.
|
|
24
|
+
4. **User provides multiple tasks** — when the user supplies a list of items to be done (e.g., numbered or comma-separated).
|
|
25
|
+
5. **The plan needs adjustment based on current execution** — when ongoing progress indicates the plan should be revised.
|
|
26
|
+
|
|
27
|
+
## How to Use This Tool
|
|
28
|
+
1. **When starting a task** — before actually beginning work, invoke this tool with a task list (a list of strings). The first task will automatically be set to `in_progress`, and all others to `pending`.
|
|
29
|
+
2. **When updating the task list** — for example, after completing some tasks, if you find certain tasks are no longer needed, remove them; if new necessary tasks emerge, add them. However, **do not modify** tasks already marked as completed. In such cases, simply call this tool again with the updated task list.
|
|
30
|
+
|
|
31
|
+
## When NOT to Use This Tool
|
|
32
|
+
Avoid using this tool in the following situations:
|
|
33
|
+
1. The task is a **single, straightforward action**.
|
|
34
|
+
2. The task is **too trivial**, and tracking it provides no benefit.
|
|
35
|
+
3. The task can be completed in **fewer than three simple steps**.
|
|
36
|
+
4. The current task list has been fully completed — in this case, use `finish_sub_plan()` to finalize.
|
|
37
|
+
|
|
38
|
+
## How It Works
|
|
39
|
+
- **Input**: A parameter named `plan` containing a list of strings representing the tasks (e.g., `["Task 1", "Task 2", "Task 3"]`).
|
|
40
|
+
- **Automatic status assignment**:
|
|
41
|
+
→ First task: `in_progress`
|
|
42
|
+
→ Remaining tasks: `pending`
|
|
43
|
+
- When updating the plan, provide only the **next set of tasks to execute**. For example, if the next phase requires `["Task 4", "Task 5"]`, call this tool with `plan=["Task 4", "Task 5"]`.
|
|
44
|
+
|
|
45
|
+
## Task States
|
|
46
|
+
- `pending`: Ready to start, awaiting execution
|
|
47
|
+
- `in_progress`: Currently being worked on
|
|
48
|
+
- `done`: Completed
|
|
49
|
+
|
|
50
|
+
## Best Practices
|
|
51
|
+
- Break large tasks into clear, actionable steps.
|
|
52
|
+
- Use specific and descriptive task names.
|
|
53
|
+
- Update the plan immediately if priorities shift or blockers arise.
|
|
54
|
+
- Never leave the plan empty — as long as unfinished tasks remain, at least one must be marked `in_progress`.
|
|
55
|
+
- Do not batch completions — mark each task as done immediately after finishing it.
|
|
56
|
+
- Remove irrelevant tasks entirely instead of leaving them in `pending` state.
|
|
57
|
+
|
|
58
|
+
**Remember**: If a task is simple, just do it. This tool is meant to provide structure — not overhead.
|
|
106
59
|
"""
|
|
107
60
|
|
|
108
|
-
_DEFAULT_FINISH_SUB_PLAN_TOOL_DESCRIPTION = """
|
|
109
|
-
This tool is used to mark the completion status of a sub-plan in an existing plan.
|
|
110
|
-
|
|
111
|
-
Functionality:
|
|
112
|
-
- Marks the sub-plan with status 'in_progress' as 'done'
|
|
113
|
-
- Sets the first sub-plan with status 'pending' to 'in_progress' (if one exists)
|
|
61
|
+
_DEFAULT_FINISH_SUB_PLAN_TOOL_DESCRIPTION = """This tool is used to mark the currently in-progress task in an existing task list as completed.
|
|
114
62
|
|
|
115
|
-
##
|
|
116
|
-
|
|
63
|
+
## Functionality
|
|
64
|
+
- Marks the current task with status `in_progress` as `done`, and automatically sets the next task (previously `pending`) to `in_progress`.
|
|
117
65
|
|
|
118
|
-
|
|
119
|
-
Use only when the current
|
|
66
|
+
## When to Use
|
|
67
|
+
Use only when you have confirmed that the current task is truly finished.
|
|
120
68
|
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
- First `pending` → `in_progress` (if any)
|
|
124
|
-
|
|
125
|
-
## Usage Example
|
|
126
|
-
Current plan status:
|
|
69
|
+
## Example
|
|
70
|
+
Before calling:
|
|
127
71
|
```json
|
|
128
72
|
[
|
|
129
|
-
{"content": "
|
|
130
|
-
{"content": "
|
|
131
|
-
{"content": "
|
|
73
|
+
{"content": "Task 1", "status": "done"},
|
|
74
|
+
{"content": "Task 2", "status": "in_progress"},
|
|
75
|
+
{"content": "Task 3", "status": "pending"}
|
|
132
76
|
]
|
|
133
77
|
```
|
|
134
78
|
|
|
135
|
-
After calling finish_sub_plan()
|
|
79
|
+
After calling `finish_sub_plan()`:
|
|
136
80
|
```json
|
|
137
81
|
[
|
|
138
|
-
{"content": "
|
|
139
|
-
{"content": "
|
|
140
|
-
{"content": "
|
|
82
|
+
{"content": "Task 1", "status": "done"},
|
|
83
|
+
{"content": "Task 2", "status": "done"},
|
|
84
|
+
{"content": "Task 3", "status": "in_progress"}
|
|
141
85
|
]
|
|
142
86
|
```
|
|
143
87
|
|
|
144
|
-
|
|
145
|
-
-
|
|
146
|
-
- Ensure the
|
|
147
|
-
- No parameters
|
|
88
|
+
**Note**:
|
|
89
|
+
- This tool is **only** for marking completion — do **not** use it to create or modify plans (use `write_plan` instead).
|
|
90
|
+
- Ensure the task is genuinely complete before invoking this function.
|
|
91
|
+
- No parameters are required — status updates are handled automatically.
|
|
148
92
|
"""
|
|
149
93
|
|
|
150
94
|
_DEFAULT_READ_PLAN_TOOL_DESCRIPTION = """
|
|
@@ -298,67 +242,27 @@ def create_read_plan_tool(
|
|
|
298
242
|
return read_plan
|
|
299
243
|
|
|
300
244
|
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
245
|
+
_PLAN_SYSTEM_PROMPT_NOT_READ_PLAN = """You can manage task plans using two simple tools:
|
|
246
|
+
|
|
247
|
+
## write_plan
|
|
248
|
+
- Use it to break complex tasks (3+ steps) into a clear, actionable list. Only include next steps to execute — the first becomes `"in_progress"`, the rest `"pending"`. Don’t use it for simple tasks (<3 steps).
|
|
249
|
+
|
|
250
|
+
## finish_sub_plan
|
|
251
|
+
- Call it **only when the current task is 100% done**. It automatically marks it `"done"` and promotes the next `"pending"` task to `"in_progress"`. No parameters needed. Never use it mid-task or if anything’s incomplete.
|
|
252
|
+
Keep plans lean, update immediately, and never batch completions.
|
|
304
253
|
"""
|
|
305
|
-
|
|
306
|
-
You can manage task plans using
|
|
307
|
-
|
|
308
|
-
##
|
|
309
|
-
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
-
|
|
316
|
-
-
|
|
317
|
-
- First plan → `"in_progress"`
|
|
318
|
-
- All subsequent plans → `"pending"`
|
|
319
|
-
- **Plan Replacement Rule**: Provide **only the new plans that should be executed next**. Do not include completed, obsolete, or irrelevant plans.
|
|
320
|
-
- **Plan Quality Requirements**:
|
|
321
|
-
- Plans must be specific, actionable, and verifiable
|
|
322
|
-
- Break work into logical phases (chronological or dependency-based)
|
|
323
|
-
- Define clear milestones and deliverable standards
|
|
324
|
-
- Avoid vague, ambiguous, or non-executable descriptions
|
|
325
|
-
- **Do NOT Use When**:
|
|
326
|
-
- The task is simple (<3 steps)
|
|
327
|
-
- The request is conversational, informational, or a one-off query
|
|
328
|
-
- You only need to mark a plan as complete (use `finish_sub_plan` instead)
|
|
329
|
-
|
|
330
|
-
## 2. finish_sub_plan: Mark Current Plan as Complete
|
|
331
|
-
- **Primary Purpose**: Confirm the current `"in_progress"` plan is fully done, mark it as `"done"`, and automatically promote the first `"pending"` plan to `"in_progress"`.
|
|
332
|
-
- **Call Only If ALL Conditions Are Met**:
|
|
333
|
-
- The sub-plan has been **fully executed**
|
|
334
|
-
- All specified requirements have been satisfied
|
|
335
|
-
- There are no unresolved errors, omissions, or blockers
|
|
336
|
-
- The output meets quality standards and has been verified
|
|
337
|
-
- **Automatic Behavior**:
|
|
338
|
-
- No parameters needed—status transitions are handled internally
|
|
339
|
-
- If no `"pending"` plans remain, the plan ends naturally
|
|
340
|
-
- **Never Call If**:
|
|
341
|
-
- The plan is partially complete
|
|
342
|
-
- Known issues or defects remain
|
|
343
|
-
- Execution was blocked due to missing resources or dependencies
|
|
344
|
-
- The result fails to meet expected quality
|
|
345
|
-
|
|
346
|
-
{read_plan_system_prompt}
|
|
347
|
-
|
|
348
|
-
## Plan Status Rules (Only These Three Are Valid)
|
|
349
|
-
- **`"pending"`**: Plan not yet started
|
|
350
|
-
- **`"in_progress"`**: Currently being executed (exactly one allowed at any time)
|
|
351
|
-
- **`"done"`**: Fully completed and verified
|
|
352
|
-
> ⚠️ No other status values (e.g., "completed", "failed", "blocked") are permitted.
|
|
353
|
-
|
|
354
|
-
## General Usage Principles
|
|
355
|
-
1. **Execute simple plans directly**: If a request can be fulfilled in 1–2 steps, do not create a plan—just complete it.
|
|
356
|
-
2. **Decompose thoughtfully**: Break complex work into clear, independent, trackable sub-plans.
|
|
357
|
-
3. **Manage status rigorously**:
|
|
358
|
-
- Always maintain exactly one `"in_progress"` plan while work is ongoing
|
|
359
|
-
- Call `finish_sub_plan` immediately after plan completion—never delay
|
|
360
|
-
4. **Plan modification = full replacement**: Never edit individual plans. To adjust the plan, use `write_plan` with a new list of remaining plans.
|
|
361
|
-
5. **Respect user intent**: If the user explicitly asks for a plan—even for a simpler task—honor the request and create one.
|
|
254
|
+
|
|
255
|
+
_PLAN_SYSTEM_PROMPT = """You can manage task plans using three simple tools:
|
|
256
|
+
|
|
257
|
+
## write_plan
|
|
258
|
+
- Use it to break complex tasks (3+ steps) into a clear, actionable list. Only include next steps to execute — the first becomes `"in_progress"`, the rest `"pending"`. Don’t use it for simple tasks (<3 steps).
|
|
259
|
+
|
|
260
|
+
## finish_sub_plan
|
|
261
|
+
- Call it **only when the current task is 100% done**. It automatically marks it `"done"` and promotes the next `"pending"` task to `"in_progress"`. No parameters needed. Never use it mid-task or if anything’s incomplete.
|
|
262
|
+
|
|
263
|
+
## read_plan
|
|
264
|
+
- Retrieve the full current plan list with statuses, especially when you forget which sub-plan you're supposed to execute next.
|
|
265
|
+
- No parameters required—returns a current plan list with statuses.
|
|
362
266
|
"""
|
|
363
267
|
|
|
364
268
|
|
|
@@ -374,9 +278,15 @@ class PlanMiddleware(AgentMiddleware):
|
|
|
374
278
|
|
|
375
279
|
Args:
|
|
376
280
|
system_prompt: Custom system prompt to guide the agent on using the plan tool.
|
|
377
|
-
If not provided, uses the default `
|
|
378
|
-
|
|
379
|
-
|
|
281
|
+
If not provided, uses the default `_PLAN_SYSTEM_PROMPT` or `_PLAN_SYSTEM_PROMPT_NOT_READ_PLAN` based on the `use_read_plan_tool` parameter.
|
|
282
|
+
write_plan_tool_description: Description of the `write_plan` tool.
|
|
283
|
+
If not provided, uses the default `_DEFAULT_WRITE_PLAN_TOOL_DESCRIPTION`.
|
|
284
|
+
finish_sub_plan_tool_description: Description of the `finish_sub_plan` tool.
|
|
285
|
+
If not provided, uses the default `_DEFAULT_FINISH_SUB_PLAN_TOOL_DESCRIPTION`.
|
|
286
|
+
read_plan_tool_description: Description of the `read_plan` tool.
|
|
287
|
+
If not provided, uses the default `_DEFAULT_READ_PLAN_TOOL_DESCRIPTION`.
|
|
288
|
+
use_read_plan_tool: Whether to use the `read_plan` tool.
|
|
289
|
+
If not provided, uses the default `True`.
|
|
380
290
|
Example:
|
|
381
291
|
```python
|
|
382
292
|
from langchain_dev_utils.agents.middleware.plan import PlanMiddleware
|
|
@@ -397,41 +307,45 @@ class PlanMiddleware(AgentMiddleware):
|
|
|
397
307
|
self,
|
|
398
308
|
*,
|
|
399
309
|
system_prompt: Optional[str] = None,
|
|
400
|
-
|
|
310
|
+
write_plan_tool_description: Optional[str] = None,
|
|
311
|
+
finish_sub_plan_tool_description: Optional[str] = None,
|
|
312
|
+
read_plan_tool_description: Optional[str] = None,
|
|
313
|
+
use_read_plan_tool: bool = True,
|
|
314
|
+
message_key: Optional[str] = None,
|
|
401
315
|
) -> None:
|
|
402
316
|
super().__init__()
|
|
403
317
|
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
has_finish_sub_plan = any(
|
|
414
|
-
tool_obj.name == "finish_sub_plan" for tool_obj in tools
|
|
318
|
+
write_plan_tool_description = (
|
|
319
|
+
write_plan_tool_description or _DEFAULT_WRITE_PLAN_TOOL_DESCRIPTION
|
|
320
|
+
)
|
|
321
|
+
finish_sub_plan_tool_description = (
|
|
322
|
+
finish_sub_plan_tool_description
|
|
323
|
+
or _DEFAULT_FINISH_SUB_PLAN_TOOL_DESCRIPTION
|
|
324
|
+
)
|
|
325
|
+
read_plan_tool_description = (
|
|
326
|
+
read_plan_tool_description or _DEFAULT_READ_PLAN_TOOL_DESCRIPTION
|
|
415
327
|
)
|
|
416
328
|
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
)
|
|
329
|
+
tools = [
|
|
330
|
+
create_write_plan_tool(
|
|
331
|
+
description=write_plan_tool_description, message_key=message_key
|
|
332
|
+
),
|
|
333
|
+
create_finish_sub_plan_tool(
|
|
334
|
+
description=finish_sub_plan_tool_description, message_key=message_key
|
|
335
|
+
),
|
|
336
|
+
]
|
|
421
337
|
|
|
422
|
-
|
|
338
|
+
if use_read_plan_tool:
|
|
339
|
+
tools.append(create_read_plan_tool(description=read_plan_tool_description))
|
|
423
340
|
|
|
424
341
|
if system_prompt is None:
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
system_prompt = _PLAN_MIDDLEWARE_SYSTEM_PROMPT.format(
|
|
431
|
-
num=num, read_plan_system_prompt=read_plan_system
|
|
432
|
-
)
|
|
342
|
+
if use_read_plan_tool:
|
|
343
|
+
system_prompt = _PLAN_SYSTEM_PROMPT
|
|
344
|
+
else:
|
|
345
|
+
system_prompt = _PLAN_SYSTEM_PROMPT_NOT_READ_PLAN
|
|
433
346
|
|
|
434
347
|
self.system_prompt = system_prompt
|
|
348
|
+
self.tools = tools
|
|
435
349
|
|
|
436
350
|
def wrap_model_call(
|
|
437
351
|
self,
|
|
@@ -46,7 +46,7 @@ _DictOrPydantic = Union[dict, _BM]
|
|
|
46
46
|
|
|
47
47
|
|
|
48
48
|
class _ModelProviderConfigType(BaseModel):
|
|
49
|
-
supported_tool_choice: ToolChoiceType = Field(
|
|
49
|
+
supported_tool_choice: ToolChoiceType = Field(default_factory=list)
|
|
50
50
|
keep_reasoning_content: bool = Field(default=False)
|
|
51
51
|
support_json_mode: bool = Field(default=False)
|
|
52
52
|
|
|
@@ -126,12 +126,15 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
|
|
|
126
126
|
stop: list[str] | None = None,
|
|
127
127
|
**kwargs: Any,
|
|
128
128
|
) -> dict:
|
|
129
|
+
payload = {**self._default_params, **kwargs}
|
|
130
|
+
|
|
131
|
+
if self._use_responses_api(payload):
|
|
132
|
+
return super()._get_request_payload(input_, stop=stop, **kwargs)
|
|
133
|
+
|
|
129
134
|
messages = self._convert_input(input_).to_messages()
|
|
130
135
|
if stop is not None:
|
|
131
136
|
kwargs["stop"] = stop
|
|
132
137
|
|
|
133
|
-
payload = {**self._default_params, **kwargs}
|
|
134
|
-
|
|
135
138
|
payload_messages = []
|
|
136
139
|
|
|
137
140
|
for m in messages:
|
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
import os
|
|
2
1
|
from typing import Any, NotRequired, Optional, TypedDict, cast
|
|
3
2
|
|
|
4
3
|
from langchain.chat_models.base import _SUPPORTED_PROVIDERS, _init_chat_model_helper
|
|
5
4
|
from langchain_core.language_models.chat_models import BaseChatModel
|
|
5
|
+
from langchain_core.utils import from_env
|
|
6
6
|
|
|
7
7
|
from .types import ChatModelType, ToolChoiceType
|
|
8
|
+
from pydantic import BaseModel
|
|
8
9
|
|
|
9
10
|
_MODEL_PROVIDERS_DICT = {}
|
|
10
11
|
|
|
@@ -22,6 +23,34 @@ class ChatModelProvider(TypedDict):
|
|
|
22
23
|
provider_config: NotRequired[ProviderConfig]
|
|
23
24
|
|
|
24
25
|
|
|
26
|
+
def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
|
|
27
|
+
"""
|
|
28
|
+
Return 'base_url' if the model has a field named or aliased as 'base_url',
|
|
29
|
+
else return 'api_base' if it has a field named or aliased as 'api_base',
|
|
30
|
+
else return None.
|
|
31
|
+
The return value is always either 'base_url', 'api_base', or None.
|
|
32
|
+
"""
|
|
33
|
+
model_fields = model_cls.model_fields
|
|
34
|
+
|
|
35
|
+
# try model_fields first
|
|
36
|
+
if "base_url" in model_fields:
|
|
37
|
+
return "base_url"
|
|
38
|
+
|
|
39
|
+
if "api_base" in model_fields:
|
|
40
|
+
return "api_base"
|
|
41
|
+
|
|
42
|
+
# then try aliases
|
|
43
|
+
for field_info in model_fields.values():
|
|
44
|
+
if field_info.alias == "base_url":
|
|
45
|
+
return "base_url"
|
|
46
|
+
|
|
47
|
+
for field_info in model_fields.values():
|
|
48
|
+
if field_info.alias == "api_base":
|
|
49
|
+
return "api_base"
|
|
50
|
+
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
|
|
25
54
|
def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
|
|
26
55
|
"""Parse model string and provider.
|
|
27
56
|
|
|
@@ -71,6 +100,11 @@ def _load_chat_model_helper(
|
|
|
71
100
|
"provider_config"
|
|
72
101
|
):
|
|
73
102
|
kwargs.update({"provider_config": provider_config})
|
|
103
|
+
|
|
104
|
+
if base_url := _MODEL_PROVIDERS_DICT[model_provider].get("base_url"):
|
|
105
|
+
url_key = _get_base_url_field_name(chat_model)
|
|
106
|
+
if url_key:
|
|
107
|
+
kwargs.update({url_key: base_url})
|
|
74
108
|
return chat_model(model=model, **kwargs)
|
|
75
109
|
|
|
76
110
|
return _init_chat_model_helper(model, model_provider=model_provider, **kwargs)
|
|
@@ -91,7 +125,7 @@ def register_model_provider(
|
|
|
91
125
|
Args:
|
|
92
126
|
provider_name: Name of the provider to register
|
|
93
127
|
chat_model: Either a BaseChatModel class or a string identifier for a supported provider
|
|
94
|
-
base_url:
|
|
128
|
+
base_url: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
|
|
95
129
|
provider_config: The configuration of the model provider (Optional parameter;effective only when `chat_model` is a string and is "openai-compatible".)
|
|
96
130
|
It can be configured to configure some related parameters of the provider, such as whether to support json_mode structured output mode, the list of supported tool_choice
|
|
97
131
|
Raises:
|
|
@@ -113,6 +147,7 @@ def register_model_provider(
|
|
|
113
147
|
>>> model = load_chat_model(model="vllm:qwen3-4b")
|
|
114
148
|
>>> model.invoke("Hello")
|
|
115
149
|
"""
|
|
150
|
+
base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
|
|
116
151
|
if isinstance(chat_model, str):
|
|
117
152
|
try:
|
|
118
153
|
from .adapters.openai_compatible import _create_openai_compatible_model
|
|
@@ -120,8 +155,6 @@ def register_model_provider(
|
|
|
120
155
|
raise ImportError(
|
|
121
156
|
"Please install langchain_dev_utils[standard],when chat_model is a 'openai-compatible'"
|
|
122
157
|
)
|
|
123
|
-
|
|
124
|
-
base_url = base_url or os.getenv(f"{provider_name.upper()}_API_BASE")
|
|
125
158
|
if base_url is None:
|
|
126
159
|
raise ValueError(
|
|
127
160
|
f"base_url must be provided or set {provider_name.upper()}_API_BASE environment variable when chat_model is a string"
|
|
@@ -140,11 +173,17 @@ def register_model_provider(
|
|
|
140
173
|
provider_name: {
|
|
141
174
|
"chat_model": chat_model,
|
|
142
175
|
"provider_config": provider_config,
|
|
176
|
+
"base_url": base_url,
|
|
143
177
|
}
|
|
144
178
|
}
|
|
145
179
|
)
|
|
146
180
|
else:
|
|
147
|
-
|
|
181
|
+
if base_url is not None:
|
|
182
|
+
_MODEL_PROVIDERS_DICT.update(
|
|
183
|
+
{provider_name: {"chat_model": chat_model, "base_url": base_url}}
|
|
184
|
+
)
|
|
185
|
+
else:
|
|
186
|
+
_MODEL_PROVIDERS_DICT.update({provider_name: {"chat_model": chat_model}})
|
|
148
187
|
|
|
149
188
|
|
|
150
189
|
def batch_register_model_provider(
|
|
@@ -159,7 +198,7 @@ def batch_register_model_provider(
|
|
|
159
198
|
providers: List of ChatModelProvider dictionaries, each containing:
|
|
160
199
|
- provider_name: Name of the provider to register
|
|
161
200
|
- chat_model: Either a BaseChatModel class or a string identifier for a supported provider
|
|
162
|
-
- base_url:
|
|
201
|
+
- base_url: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
|
|
163
202
|
- provider_config: The configuration of the model provider(Optional parameter; effective only when `chat_model` is a string and is "openai-compatible".)
|
|
164
203
|
It can be configured to configure some related parameters of the provider, such as whether to support json_mode structured output mode, the list of supported tool_choice
|
|
165
204
|
|
|
@@ -235,6 +274,10 @@ def load_chat_model(
|
|
|
235
274
|
... )
|
|
236
275
|
>>> model.invoke("Hello, how are you?")
|
|
237
276
|
"""
|
|
277
|
+
if "provider_config" in kwargs:
|
|
278
|
+
raise ValueError(
|
|
279
|
+
"provider_config is not a valid parameter in load_chat_model ,you can only set it when register model provider"
|
|
280
|
+
)
|
|
238
281
|
return _load_chat_model_helper(
|
|
239
282
|
cast(str, model),
|
|
240
283
|
model_provider=model_provider,
|
|
@@ -1,7 +1,8 @@
|
|
|
1
|
-
import os
|
|
2
1
|
from typing import Any, Literal, NotRequired, Optional, TypedDict, Union
|
|
3
2
|
|
|
4
3
|
from langchain.embeddings.base import Embeddings, _SUPPORTED_PROVIDERS, init_embeddings
|
|
4
|
+
from langchain_core.utils import from_env, secret_from_env
|
|
5
|
+
from pydantic import BaseModel
|
|
5
6
|
|
|
6
7
|
_EMBEDDINGS_PROVIDERS_DICT = {}
|
|
7
8
|
|
|
@@ -14,6 +15,34 @@ class EmbeddingProvider(TypedDict):
|
|
|
14
15
|
base_url: NotRequired[str]
|
|
15
16
|
|
|
16
17
|
|
|
18
|
+
def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
|
|
19
|
+
"""
|
|
20
|
+
Return 'base_url' if the model has a field named or aliased as 'base_url',
|
|
21
|
+
else return 'api_base' if it has a field named or aliased as 'api_base',
|
|
22
|
+
else return None.
|
|
23
|
+
The return value is always either 'base_url', 'api_base', or None.
|
|
24
|
+
"""
|
|
25
|
+
model_fields = model_cls.model_fields
|
|
26
|
+
|
|
27
|
+
# try model_fields first
|
|
28
|
+
if "base_url" in model_fields:
|
|
29
|
+
return "base_url"
|
|
30
|
+
|
|
31
|
+
if "api_base" in model_fields:
|
|
32
|
+
return "api_base"
|
|
33
|
+
|
|
34
|
+
# then try aliases
|
|
35
|
+
for field_info in model_fields.values():
|
|
36
|
+
if field_info.alias == "base_url":
|
|
37
|
+
return "base_url"
|
|
38
|
+
|
|
39
|
+
for field_info in model_fields.values():
|
|
40
|
+
if field_info.alias == "api_base":
|
|
41
|
+
return "api_base"
|
|
42
|
+
|
|
43
|
+
return None
|
|
44
|
+
|
|
45
|
+
|
|
17
46
|
def _parse_model_string(model_name: str) -> tuple[str, str]:
|
|
18
47
|
"""Parse model string into provider and model name.
|
|
19
48
|
|
|
@@ -56,7 +85,7 @@ def register_embeddings_provider(
|
|
|
56
85
|
Args:
|
|
57
86
|
provider_name: Name of the provider to register
|
|
58
87
|
embeddings_model: Either an Embeddings class or a string identifier for a supported provider
|
|
59
|
-
base_url:
|
|
88
|
+
base_url: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
|
|
60
89
|
|
|
61
90
|
Raises:
|
|
62
91
|
ValueError: If base_url is not provided when embeddings_model is a string
|
|
@@ -77,8 +106,9 @@ def register_embeddings_provider(
|
|
|
77
106
|
>>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
|
|
78
107
|
>>> embeddings.embed_query("hello world")
|
|
79
108
|
"""
|
|
109
|
+
|
|
110
|
+
base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
|
|
80
111
|
if isinstance(embeddings_model, str):
|
|
81
|
-
base_url = base_url or os.getenv(f"{provider_name.upper()}_API_BASE")
|
|
82
112
|
if base_url is None:
|
|
83
113
|
raise ValueError(
|
|
84
114
|
f"base_url must be provided or set {provider_name.upper()}_API_BASE environment variable when embeddings_model is a string"
|
|
@@ -98,9 +128,19 @@ def register_embeddings_provider(
|
|
|
98
128
|
}
|
|
99
129
|
)
|
|
100
130
|
else:
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
131
|
+
if base_url is not None:
|
|
132
|
+
_EMBEDDINGS_PROVIDERS_DICT.update(
|
|
133
|
+
{
|
|
134
|
+
provider_name: {
|
|
135
|
+
"embeddings_model": embeddings_model,
|
|
136
|
+
"base_url": base_url,
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
)
|
|
140
|
+
else:
|
|
141
|
+
_EMBEDDINGS_PROVIDERS_DICT.update(
|
|
142
|
+
{provider_name: {"embeddings_model": embeddings_model}}
|
|
143
|
+
)
|
|
104
144
|
|
|
105
145
|
|
|
106
146
|
def batch_register_embeddings_provider(
|
|
@@ -115,7 +155,7 @@ def batch_register_embeddings_provider(
|
|
|
115
155
|
providers: List of EmbeddingProvider dictionaries, each containing:
|
|
116
156
|
- provider_name: str - Provider name
|
|
117
157
|
- embeddings_model: Union[Type[Embeddings], str] - Model class or provider string
|
|
118
|
-
- base_url:
|
|
158
|
+
- base_url: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
|
|
119
159
|
|
|
120
160
|
Raises:
|
|
121
161
|
ValueError: If any of the providers are invalid
|
|
@@ -186,7 +226,7 @@ def load_embeddings(
|
|
|
186
226
|
embeddings = _EMBEDDINGS_PROVIDERS_DICT[provider]["embeddings_model"]
|
|
187
227
|
if isinstance(embeddings, str):
|
|
188
228
|
if not (api_key := kwargs.get("api_key")):
|
|
189
|
-
api_key =
|
|
229
|
+
api_key = secret_from_env(f"{provider.upper()}_API_KEY", default=None)()
|
|
190
230
|
if not api_key:
|
|
191
231
|
raise ValueError(
|
|
192
232
|
f"API key for {provider} not found. Please set it in the environment."
|
|
@@ -203,4 +243,8 @@ def load_embeddings(
|
|
|
203
243
|
**kwargs,
|
|
204
244
|
)
|
|
205
245
|
else:
|
|
246
|
+
if base_url := _EMBEDDINGS_PROVIDERS_DICT[provider].get("base_url"):
|
|
247
|
+
url_key = _get_base_url_field_name(embeddings)
|
|
248
|
+
if url_key is not None:
|
|
249
|
+
kwargs.update({url_key: base_url})
|
|
206
250
|
return embeddings(model=model, **kwargs)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain-dev-utils
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.13
|
|
4
4
|
Summary: A practical utility library for LangChain and LangGraph development
|
|
5
5
|
Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
|
|
6
6
|
Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
|
|
@@ -61,7 +61,7 @@ Mainly consists of the following two functions:
|
|
|
61
61
|
|
|
62
62
|
- `provider_name`: Model provider name, used as an identifier for subsequent model loading
|
|
63
63
|
- `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
|
|
64
|
-
- `base_url`: API address of the model provider (optional, valid when `chat_model` is a string and is "openai-compatible")
|
|
64
|
+
- `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
|
|
65
65
|
- `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
|
|
66
66
|
|
|
67
67
|
`load_chat_model` parameter description:
|
|
@@ -101,7 +101,7 @@ Mainly consists of the following two functions:
|
|
|
101
101
|
|
|
102
102
|
- `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
|
|
103
103
|
- `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
|
|
104
|
-
- `base_url`: API address of the model provider (optional, valid when `embeddings_model` is a string and is "openai-compatible")
|
|
104
|
+
- `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
|
|
105
105
|
|
|
106
106
|
`load_embeddings` parameter description:
|
|
107
107
|
|
|
@@ -1,24 +1,24 @@
|
|
|
1
|
-
langchain_dev_utils/__init__.py,sha256=
|
|
1
|
+
langchain_dev_utils/__init__.py,sha256=fHD3CDKZLpB_vekISdHB54mQUzaieloAPZEREmkbiRQ,23
|
|
2
2
|
langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
langchain_dev_utils/agents/__init__.py,sha256=e17SMQdJIQngbUCr2N1tY-yw0tD3tEnH7PSvyDmVPeQ,127
|
|
4
4
|
langchain_dev_utils/agents/factory.py,sha256=pQeqz_ZlU43Os5gKlRu5-iCLTslPWEqWJzuGxpKhcRo,3904
|
|
5
5
|
langchain_dev_utils/agents/file_system.py,sha256=S6RUEmQI2eerW0gBQp0IP0X5ak5FwvqgIGRiycr2iyw,8468
|
|
6
6
|
langchain_dev_utils/agents/plan.py,sha256=ydJuJLlNydheQvLPl2uCc3TBVv42YxGzPhKgtldIdIk,6497
|
|
7
|
-
langchain_dev_utils/agents/wrap.py,sha256=
|
|
7
|
+
langchain_dev_utils/agents/wrap.py,sha256=4BWksU9DRz8c3ZHQiUi4GHwGhNysDLNs8pmLWV7BeAI,5165
|
|
8
8
|
langchain_dev_utils/agents/middleware/__init__.py,sha256=cjrb8Rue5uukl9pKPF7CjSrHtcYsUBj3Mdvv2szlp7E,679
|
|
9
9
|
langchain_dev_utils/agents/middleware/model_fallback.py,sha256=cvTj_sOw3r4B4ErMAVdsrniMImWnUpLMECmQErxdsUU,1688
|
|
10
10
|
langchain_dev_utils/agents/middleware/model_router.py,sha256=YkaPpYmIZaGj--YlUjm7dVcNzRt3Au317eor4SDYsQs,8799
|
|
11
|
-
langchain_dev_utils/agents/middleware/plan.py,sha256=
|
|
11
|
+
langchain_dev_utils/agents/middleware/plan.py,sha256=pVABuihOo-TGuPwJA_AdpBa6eodbdZalXozl_YcMsHc,15198
|
|
12
12
|
langchain_dev_utils/agents/middleware/summarization.py,sha256=Ws-_cxSQQfa5rn5Spq1gSLpgIleUCno3QmWRvN4-u9E,2213
|
|
13
13
|
langchain_dev_utils/agents/middleware/tool_emulator.py,sha256=u9rV24yUB-dyc1uUfUe74B1wOGVI3TZRwxkE1bvGm18,2025
|
|
14
14
|
langchain_dev_utils/agents/middleware/tool_selection.py,sha256=ZqdyK4Yhp2u3GM6B_D6U7Srca9vy1o7s6N_LrV24-dQ,3107
|
|
15
15
|
langchain_dev_utils/chat_models/__init__.py,sha256=YSLUyHrWEEj4y4DtGFCOnDW02VIYZdfAH800m4Klgeg,224
|
|
16
|
-
langchain_dev_utils/chat_models/base.py,sha256=
|
|
17
|
-
langchain_dev_utils/chat_models/types.py,sha256=
|
|
16
|
+
langchain_dev_utils/chat_models/base.py,sha256=BagUNjqWwTZ2vJ-uHPQ0vyC6nYXOdFJidV_73jlPFG8,11232
|
|
17
|
+
langchain_dev_utils/chat_models/types.py,sha256=oPXFsfho9amnwek5v3ey8LcnsfKVzecWSJcKVBG4ETc,261
|
|
18
18
|
langchain_dev_utils/chat_models/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
19
|
-
langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=
|
|
19
|
+
langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=6ZTRCFqgW8fk8nbZs0OarmuHP5M6wr-0mbFogZuLTWY,18409
|
|
20
20
|
langchain_dev_utils/embeddings/__init__.py,sha256=zbEOaV86TUi9Zrg_dH9dpdgacWg31HMJTlTQknA9EKk,244
|
|
21
|
-
langchain_dev_utils/embeddings/base.py,sha256=
|
|
21
|
+
langchain_dev_utils/embeddings/base.py,sha256=OFXgaLO6DsadSITUmtrDvJg_-042lrrDwY5vnS9_do8,9574
|
|
22
22
|
langchain_dev_utils/message_convert/__init__.py,sha256=xwjaQ1oJoc80xy70oQI4uW3gAmgV5JymJd5hgnA6s3g,458
|
|
23
23
|
langchain_dev_utils/message_convert/content.py,sha256=ApmQ7fUUBO3Ihjm2hYSWd4GrU_CvrjbWla-MA7DAFRc,7758
|
|
24
24
|
langchain_dev_utils/message_convert/format.py,sha256=fh4GyyuZBTMrHeCEwdu9fOh5n8tdli1vDF44jK1i-tI,2373
|
|
@@ -29,7 +29,7 @@ langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV
|
|
|
29
29
|
langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
|
|
30
30
|
langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=nbaON9806pv5tpMRQUA_Ch3HJA5HBFgzZR7kQRf6PiY,9819
|
|
31
31
|
langchain_dev_utils/tool_calling/utils.py,sha256=3cNv_Zx32KxdsGn8IkxjWUzxYEEwVJeJgTZTbfSg0pA,2751
|
|
32
|
-
langchain_dev_utils-1.1.
|
|
33
|
-
langchain_dev_utils-1.1.
|
|
34
|
-
langchain_dev_utils-1.1.
|
|
35
|
-
langchain_dev_utils-1.1.
|
|
32
|
+
langchain_dev_utils-1.1.13.dist-info/METADATA,sha256=m205M6P2wNSDHHHNCVuSE-1SBMD1oyiMHRYYwHbJyEA,16264
|
|
33
|
+
langchain_dev_utils-1.1.13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
34
|
+
langchain_dev_utils-1.1.13.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
|
|
35
|
+
langchain_dev_utils-1.1.13.dist-info/RECORD,,
|
|
File without changes
|
{langchain_dev_utils-1.1.11.dist-info → langchain_dev_utils-1.1.13.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|