@orchagent/cli 0.3.85 → 0.3.87
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/agent-keys.js +21 -7
- package/dist/commands/agents.js +60 -5
- package/dist/commands/config.js +4 -0
- package/dist/commands/delete.js +3 -9
- package/dist/commands/dev.js +226 -0
- package/dist/commands/diff.js +418 -0
- package/dist/commands/estimate.js +105 -0
- package/dist/commands/fork.js +11 -1
- package/dist/commands/health.js +226 -0
- package/dist/commands/index.js +8 -0
- package/dist/commands/info.js +75 -0
- package/dist/commands/init.js +729 -38
- package/dist/commands/publish.js +244 -22
- package/dist/commands/run.js +275 -29
- package/dist/commands/schedule.js +25 -8
- package/dist/commands/skill.js +3 -3
- package/dist/commands/test.js +68 -1
- package/dist/lib/api.js +29 -4
- package/dist/lib/batch-publish.js +223 -0
- package/dist/lib/dev-server.js +425 -0
- package/dist/lib/doctor/checks/environment.js +1 -1
- package/dist/lib/key-store.js +121 -0
- package/dist/lib/spinner.js +50 -0
- package/dist/lib/test-mock-runner.js +334 -0
- package/dist/lib/update-notifier.js +1 -1
- package/package.json +1 -1
- package/src/resources/__pycache__/agent_runner.cpython-311.pyc +0 -0
- package/src/resources/__pycache__/agent_runner.cpython-312.pyc +0 -0
- package/src/resources/__pycache__/test_agent_runner_mocks.cpython-311-pytest-9.0.2.pyc +0 -0
- package/src/resources/__pycache__/test_agent_runner_mocks.cpython-312-pytest-8.4.2.pyc +0 -0
- package/src/resources/agent_runner.py +29 -2
- package/src/resources/test_agent_runner_mocks.py +290 -0
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tests for mock tool support in agent_runner.py.
|
|
3
|
+
|
|
4
|
+
IDEA-002: Validates that dispatch_tool correctly returns mock responses
|
|
5
|
+
for custom tools when mock_tools map is provided, and that built-in
|
|
6
|
+
tools are never mocked.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
import tempfile
|
|
13
|
+
|
|
14
|
+
# Add the resources directory to path so we can import agent_runner
|
|
15
|
+
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
16
|
+
|
|
17
|
+
# We need to test dispatch_tool in isolation, so import it directly
|
|
18
|
+
from agent_runner import dispatch_tool, execute_bash
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def test_mock_tool_returns_dict_response():
|
|
22
|
+
"""Mock tool with dict response returns JSON string."""
|
|
23
|
+
mock_tools = {
|
|
24
|
+
"scan_secrets": {"findings": [{"type": "hardcoded_key"}]},
|
|
25
|
+
}
|
|
26
|
+
custom_tools = [
|
|
27
|
+
{"name": "scan_secrets", "command": "echo should_not_run"},
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
result, is_submit = dispatch_tool("scan_secrets", {"path": "/code"}, custom_tools, mock_tools)
|
|
31
|
+
|
|
32
|
+
assert is_submit is False
|
|
33
|
+
parsed = json.loads(result)
|
|
34
|
+
assert parsed == {"findings": [{"type": "hardcoded_key"}]}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def test_mock_tool_returns_string_response():
|
|
38
|
+
"""Mock tool with string response returns the string directly."""
|
|
39
|
+
mock_tools = {
|
|
40
|
+
"scan_secrets": "raw string response",
|
|
41
|
+
}
|
|
42
|
+
custom_tools = [
|
|
43
|
+
{"name": "scan_secrets", "command": "echo should_not_run"},
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
result, is_submit = dispatch_tool("scan_secrets", {}, custom_tools, mock_tools)
|
|
47
|
+
|
|
48
|
+
assert is_submit is False
|
|
49
|
+
assert result == "raw string response"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def test_mock_tool_returns_list_response():
|
|
53
|
+
"""Mock tool with list response returns JSON string."""
|
|
54
|
+
mock_tools = {
|
|
55
|
+
"scan_deps": [{"name": "lodash", "severity": "high"}],
|
|
56
|
+
}
|
|
57
|
+
custom_tools = [
|
|
58
|
+
{"name": "scan_deps", "command": "echo should_not_run"},
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
result, is_submit = dispatch_tool("scan_deps", {"path": "."}, custom_tools, mock_tools)
|
|
62
|
+
|
|
63
|
+
assert is_submit is False
|
|
64
|
+
parsed = json.loads(result)
|
|
65
|
+
assert len(parsed) == 1
|
|
66
|
+
assert parsed[0]["name"] == "lodash"
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def test_mock_tool_returns_null_response():
|
|
70
|
+
"""Mock tool with None/null response returns JSON null."""
|
|
71
|
+
mock_tools = {
|
|
72
|
+
"scan_secrets": None,
|
|
73
|
+
}
|
|
74
|
+
custom_tools = [
|
|
75
|
+
{"name": "scan_secrets", "command": "echo should_not_run"},
|
|
76
|
+
]
|
|
77
|
+
|
|
78
|
+
result, is_submit = dispatch_tool("scan_secrets", {}, custom_tools, mock_tools)
|
|
79
|
+
|
|
80
|
+
assert is_submit is False
|
|
81
|
+
parsed = json.loads(result)
|
|
82
|
+
assert parsed is None
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def test_unmocked_custom_tool_executes_normally():
|
|
86
|
+
"""Custom tool NOT in mock map still executes its real command."""
|
|
87
|
+
mock_tools = {
|
|
88
|
+
"scan_secrets": {"findings": []},
|
|
89
|
+
}
|
|
90
|
+
custom_tools = [
|
|
91
|
+
{"name": "scan_secrets", "command": "echo should_not_run"},
|
|
92
|
+
{"name": "real_tool", "command": "echo real_output"},
|
|
93
|
+
]
|
|
94
|
+
|
|
95
|
+
result, is_submit = dispatch_tool("real_tool", {}, custom_tools, mock_tools)
|
|
96
|
+
|
|
97
|
+
assert is_submit is False
|
|
98
|
+
assert "real_output" in result
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def test_builtin_bash_not_mocked():
|
|
102
|
+
"""Built-in bash tool is never mocked, even if in mock_tools."""
|
|
103
|
+
mock_tools = {
|
|
104
|
+
"bash": {"should": "not be returned"},
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
result, is_submit = dispatch_tool("bash", {"command": "echo hello_from_bash"}, [], mock_tools)
|
|
108
|
+
|
|
109
|
+
assert is_submit is False
|
|
110
|
+
assert "hello_from_bash" in result
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def test_builtin_read_file_not_mocked():
|
|
114
|
+
"""Built-in read_file tool is never mocked."""
|
|
115
|
+
mock_tools = {
|
|
116
|
+
"read_file": "should not be returned",
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
# Create a temp file to read
|
|
120
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as f:
|
|
121
|
+
f.write("real file content")
|
|
122
|
+
temp_path = f.name
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
result, is_submit = dispatch_tool("read_file", {"path": temp_path}, [], mock_tools)
|
|
126
|
+
assert is_submit is False
|
|
127
|
+
assert "real file content" in result
|
|
128
|
+
finally:
|
|
129
|
+
os.unlink(temp_path)
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def test_builtin_write_file_not_mocked():
|
|
133
|
+
"""Built-in write_file tool is never mocked."""
|
|
134
|
+
mock_tools = {
|
|
135
|
+
"write_file": "should not be returned",
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as f:
|
|
139
|
+
temp_path = f.name
|
|
140
|
+
|
|
141
|
+
try:
|
|
142
|
+
result, is_submit = dispatch_tool(
|
|
143
|
+
"write_file",
|
|
144
|
+
{"path": temp_path, "content": "test content"},
|
|
145
|
+
[],
|
|
146
|
+
mock_tools,
|
|
147
|
+
)
|
|
148
|
+
assert is_submit is False
|
|
149
|
+
assert "Successfully wrote" in result
|
|
150
|
+
with open(temp_path) as f:
|
|
151
|
+
assert f.read() == "test content"
|
|
152
|
+
finally:
|
|
153
|
+
os.unlink(temp_path)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def test_builtin_list_files_not_mocked():
|
|
157
|
+
"""Built-in list_files tool is never mocked."""
|
|
158
|
+
mock_tools = {
|
|
159
|
+
"list_files": "should not be returned",
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
result, is_submit = dispatch_tool("list_files", {"path": "."}, [], mock_tools)
|
|
163
|
+
|
|
164
|
+
assert is_submit is False
|
|
165
|
+
assert result != "should not be returned"
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def test_submit_result_not_mocked():
|
|
169
|
+
"""submit_result tool is never mocked."""
|
|
170
|
+
mock_tools = {
|
|
171
|
+
"submit_result": "should not be returned",
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
result, is_submit = dispatch_tool(
|
|
175
|
+
"submit_result",
|
|
176
|
+
{"result": "final answer"},
|
|
177
|
+
[],
|
|
178
|
+
mock_tools,
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
assert is_submit is True
|
|
182
|
+
parsed = json.loads(result)
|
|
183
|
+
assert parsed == {"result": "final answer"}
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def test_no_mock_tools_dispatches_normally():
|
|
187
|
+
"""When mock_tools is None, custom tools execute normally."""
|
|
188
|
+
custom_tools = [
|
|
189
|
+
{"name": "my_tool", "command": "echo normal_execution"},
|
|
190
|
+
]
|
|
191
|
+
|
|
192
|
+
result, is_submit = dispatch_tool("my_tool", {}, custom_tools, None)
|
|
193
|
+
|
|
194
|
+
assert is_submit is False
|
|
195
|
+
assert "normal_execution" in result
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def test_empty_mock_tools_dispatches_normally():
|
|
199
|
+
"""When mock_tools is empty dict, custom tools execute normally."""
|
|
200
|
+
custom_tools = [
|
|
201
|
+
{"name": "my_tool", "command": "echo normal_execution"},
|
|
202
|
+
]
|
|
203
|
+
|
|
204
|
+
result, is_submit = dispatch_tool("my_tool", {}, custom_tools, {})
|
|
205
|
+
|
|
206
|
+
assert is_submit is False
|
|
207
|
+
assert "normal_execution" in result
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def test_unknown_tool_returns_error():
|
|
211
|
+
"""Unknown tool (not built-in, not custom, not mocked) returns error."""
|
|
212
|
+
mock_tools = {"other_tool": {"data": "mock"}}
|
|
213
|
+
custom_tools = [{"name": "other_tool", "command": "echo test"}]
|
|
214
|
+
|
|
215
|
+
result, is_submit = dispatch_tool("nonexistent_tool", {}, custom_tools, mock_tools)
|
|
216
|
+
|
|
217
|
+
assert is_submit is False
|
|
218
|
+
assert "[ERROR]" in result
|
|
219
|
+
assert "nonexistent_tool" in result
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def test_mock_takes_priority_over_real_command():
|
|
223
|
+
"""When a tool is in both custom_tools and mock_tools, mock wins."""
|
|
224
|
+
mock_tools = {
|
|
225
|
+
"scan_tool": {"mocked": True},
|
|
226
|
+
}
|
|
227
|
+
custom_tools = [
|
|
228
|
+
{"name": "scan_tool", "command": "echo REAL_COMMAND_EXECUTED"},
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
result, is_submit = dispatch_tool("scan_tool", {"input": "test"}, custom_tools, mock_tools)
|
|
232
|
+
|
|
233
|
+
assert is_submit is False
|
|
234
|
+
parsed = json.loads(result)
|
|
235
|
+
assert parsed == {"mocked": True}
|
|
236
|
+
assert "REAL_COMMAND_EXECUTED" not in result
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
def test_mock_with_complex_nested_response():
|
|
240
|
+
"""Mock can return deeply nested JSON structures."""
|
|
241
|
+
mock_tools = {
|
|
242
|
+
"analyze": {
|
|
243
|
+
"summary": "Code review complete",
|
|
244
|
+
"findings": [
|
|
245
|
+
{
|
|
246
|
+
"type": "security",
|
|
247
|
+
"severity": "critical",
|
|
248
|
+
"details": {
|
|
249
|
+
"file": "app.py",
|
|
250
|
+
"line": 42,
|
|
251
|
+
"tags": ["injection", "user-input"],
|
|
252
|
+
},
|
|
253
|
+
}
|
|
254
|
+
],
|
|
255
|
+
"metadata": {"tool_version": "1.0", "scan_time_ms": 150},
|
|
256
|
+
},
|
|
257
|
+
}
|
|
258
|
+
custom_tools = [{"name": "analyze", "command": "echo noop"}]
|
|
259
|
+
|
|
260
|
+
result, is_submit = dispatch_tool("analyze", {}, custom_tools, mock_tools)
|
|
261
|
+
|
|
262
|
+
assert is_submit is False
|
|
263
|
+
parsed = json.loads(result)
|
|
264
|
+
assert parsed["findings"][0]["details"]["tags"] == ["injection", "user-input"]
|
|
265
|
+
assert parsed["metadata"]["scan_time_ms"] == 150
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
if __name__ == "__main__":
|
|
269
|
+
# Simple test runner — run all test_ functions
|
|
270
|
+
passed = 0
|
|
271
|
+
failed = 0
|
|
272
|
+
errors = []
|
|
273
|
+
|
|
274
|
+
for name, func in sorted(globals().items()):
|
|
275
|
+
if name.startswith("test_") and callable(func):
|
|
276
|
+
try:
|
|
277
|
+
func()
|
|
278
|
+
passed += 1
|
|
279
|
+
print(f" PASS: {name}")
|
|
280
|
+
except Exception as e:
|
|
281
|
+
failed += 1
|
|
282
|
+
errors.append((name, e))
|
|
283
|
+
print(f" FAIL: {name} — {e}")
|
|
284
|
+
|
|
285
|
+
print(f"\n{passed} passed, {failed} failed")
|
|
286
|
+
if errors:
|
|
287
|
+
for name, err in errors:
|
|
288
|
+
print(f" {name}: {err}")
|
|
289
|
+
sys.exit(1)
|
|
290
|
+
sys.exit(0)
|