lm-deluge 0.0.67__py3-none-any.whl → 0.0.88__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- lm_deluge/__init__.py +25 -2
- lm_deluge/api_requests/anthropic.py +92 -17
- lm_deluge/api_requests/base.py +47 -11
- lm_deluge/api_requests/bedrock.py +7 -4
- lm_deluge/api_requests/chat_reasoning.py +4 -0
- lm_deluge/api_requests/gemini.py +138 -18
- lm_deluge/api_requests/openai.py +114 -21
- lm_deluge/client.py +282 -49
- lm_deluge/config.py +15 -3
- lm_deluge/mock_openai.py +643 -0
- lm_deluge/models/__init__.py +12 -1
- lm_deluge/models/anthropic.py +17 -2
- lm_deluge/models/arcee.py +16 -0
- lm_deluge/models/deepseek.py +36 -4
- lm_deluge/models/google.py +29 -0
- lm_deluge/models/grok.py +24 -0
- lm_deluge/models/kimi.py +36 -0
- lm_deluge/models/minimax.py +10 -0
- lm_deluge/models/openai.py +100 -0
- lm_deluge/models/openrouter.py +86 -8
- lm_deluge/models/together.py +11 -0
- lm_deluge/models/zai.py +1 -0
- lm_deluge/pipelines/gepa/__init__.py +95 -0
- lm_deluge/pipelines/gepa/core.py +354 -0
- lm_deluge/pipelines/gepa/docs/samples.py +696 -0
- lm_deluge/pipelines/gepa/examples/01_synthetic_keywords.py +140 -0
- lm_deluge/pipelines/gepa/examples/02_gsm8k_math.py +261 -0
- lm_deluge/pipelines/gepa/examples/03_hotpotqa_multihop.py +300 -0
- lm_deluge/pipelines/gepa/examples/04_batch_classification.py +271 -0
- lm_deluge/pipelines/gepa/examples/simple_qa.py +129 -0
- lm_deluge/pipelines/gepa/optimizer.py +435 -0
- lm_deluge/pipelines/gepa/proposer.py +235 -0
- lm_deluge/pipelines/gepa/util.py +165 -0
- lm_deluge/{llm_tools → pipelines}/score.py +2 -2
- lm_deluge/{llm_tools → pipelines}/translate.py +5 -3
- lm_deluge/prompt.py +224 -40
- lm_deluge/request_context.py +7 -2
- lm_deluge/tool/__init__.py +1118 -0
- lm_deluge/tool/builtin/anthropic/__init__.py +300 -0
- lm_deluge/tool/builtin/gemini.py +59 -0
- lm_deluge/tool/builtin/openai.py +74 -0
- lm_deluge/tool/cua/__init__.py +173 -0
- lm_deluge/tool/cua/actions.py +148 -0
- lm_deluge/tool/cua/base.py +27 -0
- lm_deluge/tool/cua/batch.py +215 -0
- lm_deluge/tool/cua/converters.py +466 -0
- lm_deluge/tool/cua/kernel.py +702 -0
- lm_deluge/tool/cua/trycua.py +989 -0
- lm_deluge/tool/prefab/__init__.py +45 -0
- lm_deluge/tool/prefab/batch_tool.py +156 -0
- lm_deluge/tool/prefab/docs.py +1119 -0
- lm_deluge/tool/prefab/email.py +294 -0
- lm_deluge/tool/prefab/filesystem.py +1711 -0
- lm_deluge/tool/prefab/full_text_search/__init__.py +285 -0
- lm_deluge/tool/prefab/full_text_search/tantivy_index.py +396 -0
- lm_deluge/tool/prefab/memory.py +458 -0
- lm_deluge/tool/prefab/otc/__init__.py +165 -0
- lm_deluge/tool/prefab/otc/executor.py +281 -0
- lm_deluge/tool/prefab/otc/parse.py +188 -0
- lm_deluge/tool/prefab/random.py +212 -0
- lm_deluge/tool/prefab/rlm/__init__.py +296 -0
- lm_deluge/tool/prefab/rlm/executor.py +349 -0
- lm_deluge/tool/prefab/rlm/parse.py +144 -0
- lm_deluge/tool/prefab/sandbox.py +1621 -0
- lm_deluge/tool/prefab/sheets.py +385 -0
- lm_deluge/tool/prefab/subagents.py +233 -0
- lm_deluge/tool/prefab/todos.py +342 -0
- lm_deluge/tool/prefab/tool_search.py +169 -0
- lm_deluge/tool/prefab/web_search.py +199 -0
- lm_deluge/tracker.py +16 -13
- lm_deluge/util/schema.py +412 -0
- lm_deluge/warnings.py +8 -0
- {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/METADATA +22 -9
- lm_deluge-0.0.88.dist-info/RECORD +117 -0
- lm_deluge/built_in_tools/anthropic/__init__.py +0 -128
- lm_deluge/built_in_tools/openai.py +0 -28
- lm_deluge/presets/cerebras.py +0 -17
- lm_deluge/presets/meta.py +0 -13
- lm_deluge/tool.py +0 -849
- lm_deluge-0.0.67.dist-info/RECORD +0 -72
- lm_deluge/{llm_tools → pipelines}/__init__.py +1 -1
- /lm_deluge/{llm_tools → pipelines}/classify.py +0 -0
- /lm_deluge/{llm_tools → pipelines}/extract.py +0 -0
- /lm_deluge/{llm_tools → pipelines}/locate.py +0 -0
- /lm_deluge/{llm_tools → pipelines}/ocr.py +0 -0
- /lm_deluge/{built_in_tools → tool/builtin}/anthropic/bash.py +0 -0
- /lm_deluge/{built_in_tools → tool/builtin}/anthropic/computer_use.py +0 -0
- /lm_deluge/{built_in_tools → tool/builtin}/anthropic/editor.py +0 -0
- /lm_deluge/{built_in_tools → tool/builtin}/base.py +0 -0
- {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/WHEEL +0 -0
- {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/licenses/LICENSE +0 -0
- {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
from typing import Any, List, Literal, TypedDict, Union
|
|
2
|
+
|
|
3
|
+
Coord = tuple[int, int]
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CUActionBase(TypedDict):
|
|
7
|
+
kind: str | Any # discriminator
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Click(CUActionBase):
|
|
11
|
+
kind: Literal["click"]
|
|
12
|
+
x: int | None # if missing, current cursor position
|
|
13
|
+
y: int | None
|
|
14
|
+
button: Literal["left", "right", "middle", "back", "forward"]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class DoubleClick(CUActionBase):
|
|
18
|
+
kind: Literal["double_click"]
|
|
19
|
+
x: int | None # if missing, current cursor position
|
|
20
|
+
y: int | None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class Move(CUActionBase):
|
|
24
|
+
kind: Literal["move"]
|
|
25
|
+
x: int
|
|
26
|
+
y: int
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Drag(CUActionBase):
|
|
30
|
+
kind: Literal["drag"]
|
|
31
|
+
start_x: int | None # if missing, current cursor position
|
|
32
|
+
start_y: int | None # if missing, current cursor position
|
|
33
|
+
path: List[Coord] # path to drag after mousedown
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class Scroll(CUActionBase):
|
|
37
|
+
kind: Literal["scroll"]
|
|
38
|
+
x: int | None # if not provided, current cursor position
|
|
39
|
+
y: int | None # if not provided, current cursor position
|
|
40
|
+
dx: int # scroll_x in OpenAI
|
|
41
|
+
dy: int # scroll_y in OpenAI
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class Keypress(CUActionBase):
|
|
45
|
+
kind: Literal["keypress"]
|
|
46
|
+
keys: List[str]
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class Type(CUActionBase):
|
|
50
|
+
kind: Literal["type"]
|
|
51
|
+
text: str
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class Wait(CUActionBase):
|
|
55
|
+
kind: Literal["wait"]
|
|
56
|
+
ms: int
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class Screenshot(CUActionBase):
|
|
60
|
+
kind: Literal["screenshot"]
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class MouseDown(CUActionBase):
|
|
64
|
+
kind: Literal["mouse_down"]
|
|
65
|
+
button: Literal["left", "right", "middle", "back", "forward"]
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class MouseUp(CUActionBase):
|
|
69
|
+
kind: Literal["mouse_up"]
|
|
70
|
+
button: Literal["left", "right", "middle", "back", "forward"]
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class CursorPos(CUActionBase):
|
|
74
|
+
kind: Literal["cursor_position"]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class HoldKey(CUActionBase):
|
|
78
|
+
kind: Literal["hold_key"]
|
|
79
|
+
key: str
|
|
80
|
+
ms: int # duration
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class TripleClick(CUActionBase):
|
|
84
|
+
kind: Literal["triple_click"]
|
|
85
|
+
x: int | None # if missing, current cursor position
|
|
86
|
+
y: int | None
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
# ── Browser‑level actions ────────────────────────────────────────────
|
|
90
|
+
class Navigate(CUActionBase):
|
|
91
|
+
kind: Literal["navigate"]
|
|
92
|
+
url: str
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class GoBack(CUActionBase):
|
|
96
|
+
kind: Literal["go_back"]
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class GoForward(CUActionBase):
|
|
100
|
+
kind: Literal["go_forward"]
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class Search(CUActionBase):
|
|
104
|
+
kind: Literal["search"]
|
|
105
|
+
query: str
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
# ── Bash / Editor (provider‑independent) ────────────────────────────
|
|
109
|
+
class Bash(CUActionBase):
|
|
110
|
+
kind: Literal["bash"]
|
|
111
|
+
command: str | None
|
|
112
|
+
restart: bool | None
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class Edit(CUActionBase):
|
|
116
|
+
kind: Literal["edit"]
|
|
117
|
+
command: Literal["view", "create", "str_replace", "insert", "undo_edit"]
|
|
118
|
+
path: str
|
|
119
|
+
# optional, keep names identical to Anthropic spec
|
|
120
|
+
file_text: str | None
|
|
121
|
+
view_range: List[int] | None
|
|
122
|
+
old_str: str | None
|
|
123
|
+
new_str: str | None
|
|
124
|
+
insert_line: int | None
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
CUAction = Union[
|
|
128
|
+
Click,
|
|
129
|
+
DoubleClick,
|
|
130
|
+
TripleClick,
|
|
131
|
+
MouseDown,
|
|
132
|
+
MouseUp,
|
|
133
|
+
Drag,
|
|
134
|
+
Move,
|
|
135
|
+
Scroll,
|
|
136
|
+
Keypress,
|
|
137
|
+
Type,
|
|
138
|
+
HoldKey,
|
|
139
|
+
Wait,
|
|
140
|
+
Screenshot,
|
|
141
|
+
CursorPos,
|
|
142
|
+
Navigate,
|
|
143
|
+
GoBack,
|
|
144
|
+
GoForward,
|
|
145
|
+
Search,
|
|
146
|
+
Bash,
|
|
147
|
+
Edit,
|
|
148
|
+
]
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import abc
|
|
2
|
+
from typing import TypedDict
|
|
3
|
+
|
|
4
|
+
from .actions import CUAction
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Screenshot(TypedDict):
|
|
8
|
+
media_type: str
|
|
9
|
+
content: bytes
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class CUActionResult(TypedDict):
|
|
13
|
+
screenshot: Screenshot | None
|
|
14
|
+
data: dict # for structured metadata
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ComputerExecutor(abc.ABC):
|
|
18
|
+
"""
|
|
19
|
+
A computer executor is any class that can take an action (from actions.py)
|
|
20
|
+
and "execute" it. This allows us to plug any API provider (OpenAI, Anthropic)
|
|
21
|
+
into any computer-use backend (BrowserBase, Kernel, Modal sandbox) by:
|
|
22
|
+
- Mapping each provider's tools to some (sub)set of CUActions
|
|
23
|
+
- Defining how to run each CUAction on that backend
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def execute(self, action: CUAction) -> CUActionResult:
|
|
27
|
+
raise NotImplementedError("Subclasses must implement execute method")
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Batch tool for computer use actions.
|
|
3
|
+
|
|
4
|
+
Allows Claude to submit multiple computer actions in a single tool call,
|
|
5
|
+
executing them sequentially and returning only one screenshot at the end.
|
|
6
|
+
This dramatically reduces roundtrips for common action sequences like:
|
|
7
|
+
- Ctrl+L → type URL → Return → wait → screenshot
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import base64
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from .. import Tool
|
|
16
|
+
from .converters import anthropic_tool_call_to_action
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Define the action schema matching Anthropic's computer tool
|
|
20
|
+
ACTION_SCHEMA = {
|
|
21
|
+
"type": "object",
|
|
22
|
+
"properties": {
|
|
23
|
+
"action": {
|
|
24
|
+
"type": "string",
|
|
25
|
+
"enum": [
|
|
26
|
+
"screenshot",
|
|
27
|
+
"key",
|
|
28
|
+
"type",
|
|
29
|
+
"mouse_move",
|
|
30
|
+
"left_click",
|
|
31
|
+
"left_click_drag",
|
|
32
|
+
"right_click",
|
|
33
|
+
"middle_click",
|
|
34
|
+
"double_click",
|
|
35
|
+
"triple_click",
|
|
36
|
+
"scroll",
|
|
37
|
+
"wait",
|
|
38
|
+
"cursor_position",
|
|
39
|
+
],
|
|
40
|
+
"description": "The action to perform",
|
|
41
|
+
},
|
|
42
|
+
"text": {
|
|
43
|
+
"type": "string",
|
|
44
|
+
"description": "For 'key' action: key combo like 'Return', 'ctrl+l'. For 'type' action: text to type.",
|
|
45
|
+
},
|
|
46
|
+
"coordinate": {
|
|
47
|
+
"type": "array",
|
|
48
|
+
"items": {"type": "integer"},
|
|
49
|
+
"minItems": 2,
|
|
50
|
+
"maxItems": 2,
|
|
51
|
+
"description": "For click/move actions: [x, y] coordinates",
|
|
52
|
+
},
|
|
53
|
+
"scroll_direction": {
|
|
54
|
+
"type": "string",
|
|
55
|
+
"enum": ["up", "down", "left", "right"],
|
|
56
|
+
"description": "For scroll action: direction to scroll",
|
|
57
|
+
},
|
|
58
|
+
"scroll_amount": {
|
|
59
|
+
"type": "integer",
|
|
60
|
+
"description": "For scroll action: number of scroll clicks",
|
|
61
|
+
},
|
|
62
|
+
"duration": {
|
|
63
|
+
"type": "number",
|
|
64
|
+
"description": "For wait action: seconds to wait",
|
|
65
|
+
},
|
|
66
|
+
},
|
|
67
|
+
"required": ["action"],
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def create_computer_batch_tool(
|
|
72
|
+
executor, # AsyncKernelExecutor or similar
|
|
73
|
+
*,
|
|
74
|
+
tool_name: str = "computer_batch",
|
|
75
|
+
include_final_screenshot: bool = True,
|
|
76
|
+
) -> Tool:
|
|
77
|
+
"""
|
|
78
|
+
Create a batch tool for computer use actions.
|
|
79
|
+
|
|
80
|
+
This tool allows Claude to submit multiple actions in one call:
|
|
81
|
+
- Actions execute sequentially
|
|
82
|
+
- Only one screenshot is returned at the end (if requested)
|
|
83
|
+
- Dramatically reduces API roundtrips
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
executor: The computer executor (e.g., AsyncKernelExecutor)
|
|
87
|
+
tool_name: Name for the batch tool
|
|
88
|
+
include_final_screenshot: Whether to always include a screenshot at the end
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
A Tool that can be passed to the LLM
|
|
92
|
+
|
|
93
|
+
Example:
|
|
94
|
+
executor = AsyncKernelExecutor(session_id)
|
|
95
|
+
batch_tool = create_computer_batch_tool(executor)
|
|
96
|
+
|
|
97
|
+
# Claude can now call:
|
|
98
|
+
# computer_batch(actions=[
|
|
99
|
+
# {"action": "key", "text": "ctrl+l"},
|
|
100
|
+
# {"action": "type", "text": "https://example.com"},
|
|
101
|
+
# {"action": "key", "text": "Return"},
|
|
102
|
+
# {"action": "wait", "duration": 2},
|
|
103
|
+
# {"action": "screenshot"}
|
|
104
|
+
# ])
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
async def run_batch(actions: list[dict[str, Any]]) -> str | list:
|
|
108
|
+
"""Execute a batch of computer actions and return results."""
|
|
109
|
+
from ...image import Image
|
|
110
|
+
from ...prompt import Text
|
|
111
|
+
|
|
112
|
+
results = []
|
|
113
|
+
final_screenshot = None
|
|
114
|
+
|
|
115
|
+
for i, action_args in enumerate(actions):
|
|
116
|
+
action_name = action_args.get("action", "unknown")
|
|
117
|
+
|
|
118
|
+
try:
|
|
119
|
+
# Convert Anthropic format to CUAction
|
|
120
|
+
cu_action = anthropic_tool_call_to_action(action_args)
|
|
121
|
+
|
|
122
|
+
# Execute the action
|
|
123
|
+
result = await executor.execute(cu_action)
|
|
124
|
+
|
|
125
|
+
# Track if this was a screenshot
|
|
126
|
+
if result.get("screenshot"):
|
|
127
|
+
final_screenshot = result["screenshot"]
|
|
128
|
+
results.append(
|
|
129
|
+
{
|
|
130
|
+
"action": action_name,
|
|
131
|
+
"status": "ok",
|
|
132
|
+
"has_screenshot": True,
|
|
133
|
+
}
|
|
134
|
+
)
|
|
135
|
+
else:
|
|
136
|
+
results.append(
|
|
137
|
+
{
|
|
138
|
+
"action": action_name,
|
|
139
|
+
"status": "ok",
|
|
140
|
+
}
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
except Exception as e:
|
|
144
|
+
results.append(
|
|
145
|
+
{
|
|
146
|
+
"action": action_name,
|
|
147
|
+
"status": "error",
|
|
148
|
+
"error": str(e),
|
|
149
|
+
}
|
|
150
|
+
)
|
|
151
|
+
# Stop on error
|
|
152
|
+
break
|
|
153
|
+
|
|
154
|
+
# If we should include a final screenshot and don't have one yet, take one
|
|
155
|
+
if include_final_screenshot and final_screenshot is None:
|
|
156
|
+
try:
|
|
157
|
+
from .actions import Screenshot
|
|
158
|
+
|
|
159
|
+
result = await executor.execute(Screenshot(kind="screenshot"))
|
|
160
|
+
if result.get("screenshot"):
|
|
161
|
+
final_screenshot = result["screenshot"]
|
|
162
|
+
except Exception:
|
|
163
|
+
pass
|
|
164
|
+
|
|
165
|
+
# Build the response
|
|
166
|
+
summary = f"Executed {len(results)} actions. "
|
|
167
|
+
errors = [r for r in results if r.get("status") == "error"]
|
|
168
|
+
if errors:
|
|
169
|
+
summary += f"{len(errors)} failed: {errors[0].get('error', 'unknown')}"
|
|
170
|
+
else:
|
|
171
|
+
summary += "All succeeded."
|
|
172
|
+
|
|
173
|
+
if final_screenshot:
|
|
174
|
+
# Return Text + Image (proper ToolResultPart types)
|
|
175
|
+
screenshot_bytes = final_screenshot["content"]
|
|
176
|
+
b64 = base64.b64encode(screenshot_bytes).decode()
|
|
177
|
+
img = Image(data=f"data:image/png;base64,{b64}")
|
|
178
|
+
return [Text(summary), img]
|
|
179
|
+
else:
|
|
180
|
+
# Just return text summary
|
|
181
|
+
return summary
|
|
182
|
+
|
|
183
|
+
description = """Execute multiple computer actions in a single call.
|
|
184
|
+
This is much faster than calling actions one at a time.
|
|
185
|
+
Actions run sequentially. A screenshot is taken at the end.
|
|
186
|
+
|
|
187
|
+
Common patterns:
|
|
188
|
+
- Navigate to URL: [{"action":"key","text":"ctrl+l"}, {"action":"type","text":"https://..."}, {"action":"key","text":"Return"}, {"action":"wait","duration":2}]
|
|
189
|
+
- Click and type: [{"action":"left_click","coordinate":[x,y]}, {"action":"type","text":"..."}]
|
|
190
|
+
- Scroll and screenshot: [{"action":"scroll","coordinate":[x,y],"scroll_direction":"down","scroll_amount":3}]
|
|
191
|
+
|
|
192
|
+
Available actions:
|
|
193
|
+
- screenshot: Capture the screen
|
|
194
|
+
- key: Press key combo (text="Return", "ctrl+l", "ctrl+a", etc.)
|
|
195
|
+
- type: Type text (text="hello world")
|
|
196
|
+
- left_click, right_click, middle_click, double_click, triple_click: Click at coordinate=[x,y]
|
|
197
|
+
- mouse_move: Move cursor to coordinate=[x,y]
|
|
198
|
+
- scroll: Scroll at coordinate=[x,y] with scroll_direction and scroll_amount
|
|
199
|
+
- wait: Pause for duration seconds
|
|
200
|
+
"""
|
|
201
|
+
|
|
202
|
+
return Tool(
|
|
203
|
+
name=tool_name,
|
|
204
|
+
description=description,
|
|
205
|
+
parameters={
|
|
206
|
+
"actions": {
|
|
207
|
+
"type": "array",
|
|
208
|
+
"description": "List of actions to execute in order",
|
|
209
|
+
"items": ACTION_SCHEMA,
|
|
210
|
+
"minItems": 1,
|
|
211
|
+
}
|
|
212
|
+
},
|
|
213
|
+
required=["actions"],
|
|
214
|
+
run=run_batch,
|
|
215
|
+
)
|