rosetta-sql 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- benchmark/generate_csv_data.py +83 -0
- benchmark/import_data.py +168 -0
- rosetta/__init__.py +3 -0
- rosetta/__main__.py +8 -0
- rosetta/benchmark.py +1678 -0
- rosetta/buglist.py +108 -0
- rosetta/cli/__init__.py +11 -0
- rosetta/cli/config_cmd.py +243 -0
- rosetta/cli/exec.py +219 -0
- rosetta/cli/interactive_cmd.py +124 -0
- rosetta/cli/list_cmd.py +215 -0
- rosetta/cli/main.py +617 -0
- rosetta/cli/output.py +545 -0
- rosetta/cli/result.py +61 -0
- rosetta/cli/result_cmd.py +247 -0
- rosetta/cli/run.py +625 -0
- rosetta/cli/status.py +161 -0
- rosetta/comparator.py +205 -0
- rosetta/config.py +139 -0
- rosetta/executor.py +403 -0
- rosetta/flamegraph.py +630 -0
- rosetta/interactive.py +1790 -0
- rosetta/models.py +197 -0
- rosetta/parser.py +308 -0
- rosetta/reporter/__init__.py +1 -0
- rosetta/reporter/bench_html.py +1457 -0
- rosetta/reporter/bench_text.py +162 -0
- rosetta/reporter/history.py +1686 -0
- rosetta/reporter/html.py +644 -0
- rosetta/reporter/text.py +110 -0
- rosetta/runner.py +3089 -0
- rosetta/ui.py +736 -0
- rosetta/whitelist.py +161 -0
- rosetta_sql-1.0.0.dist-info/LICENSE +21 -0
- rosetta_sql-1.0.0.dist-info/METADATA +379 -0
- rosetta_sql-1.0.0.dist-info/RECORD +42 -0
- rosetta_sql-1.0.0.dist-info/WHEEL +5 -0
- rosetta_sql-1.0.0.dist-info/entry_points.txt +2 -0
- rosetta_sql-1.0.0.dist-info/top_level.txt +4 -0
- skills/rosetta/scripts/install_rosetta.py +469 -0
- skills/rosetta/scripts/rosetta_wrapper.py +377 -0
- tests/test_cli.py +749 -0
tests/test_cli.py
ADDED
|
@@ -0,0 +1,749 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Regression tests for rosetta CLI commands.
|
|
3
|
+
|
|
4
|
+
Covers: status, exec, result (list/show), config (show/init/validate),
|
|
5
|
+
global options (-j/--json position), and argument parsing.
|
|
6
|
+
|
|
7
|
+
Tests use the real CLI entry point (rosetta.cli.main.main) with JSON output
|
|
8
|
+
for easy assertion, avoiding any DB connections by mocking where needed.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import os
|
|
13
|
+
import shutil
|
|
14
|
+
import tempfile
|
|
15
|
+
from unittest import mock
|
|
16
|
+
|
|
17
|
+
import pytest
|
|
18
|
+
|
|
19
|
+
# ---------------------------------------------------------------------------
|
|
20
|
+
# Helpers
|
|
21
|
+
# ---------------------------------------------------------------------------
|
|
22
|
+
|
|
23
|
+
def run_cli(*argv: str) -> dict:
|
|
24
|
+
"""Run rosetta CLI with the given argv and return parsed JSON output."""
|
|
25
|
+
from rosetta.cli.main import main
|
|
26
|
+
from io import StringIO
|
|
27
|
+
|
|
28
|
+
# Always inject -j for machine-parsable output
|
|
29
|
+
full_argv = list(argv)
|
|
30
|
+
if "-j" not in full_argv and "--json" not in full_argv:
|
|
31
|
+
full_argv.insert(0, "-j")
|
|
32
|
+
|
|
33
|
+
buf = StringIO()
|
|
34
|
+
with mock.patch("sys.stdout", buf):
|
|
35
|
+
exit_code = main(full_argv)
|
|
36
|
+
|
|
37
|
+
output = buf.getvalue().strip()
|
|
38
|
+
try:
|
|
39
|
+
return json.loads(output)
|
|
40
|
+
except json.JSONDecodeError:
|
|
41
|
+
pytest.fail(f"CLI did not produce valid JSON.\nexit={exit_code}\noutput={output!r}")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def run_cli_human(*argv: str) -> str:
|
|
45
|
+
"""Run rosetta CLI with human output and return stdout text."""
|
|
46
|
+
from rosetta.cli.main import main
|
|
47
|
+
from io import StringIO
|
|
48
|
+
|
|
49
|
+
full_argv = list(argv)
|
|
50
|
+
|
|
51
|
+
buf = StringIO()
|
|
52
|
+
with mock.patch("sys.stdout", buf):
|
|
53
|
+
main(full_argv)
|
|
54
|
+
|
|
55
|
+
return buf.getvalue()
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# ---------------------------------------------------------------------------
|
|
59
|
+
# Fixtures
|
|
60
|
+
# ---------------------------------------------------------------------------
|
|
61
|
+
|
|
62
|
+
@pytest.fixture()
|
|
63
|
+
def tmp_dir():
|
|
64
|
+
"""Create a temporary directory and clean up after test."""
|
|
65
|
+
d = tempfile.mkdtemp(prefix="rosetta_test_")
|
|
66
|
+
yield d
|
|
67
|
+
shutil.rmtree(d, ignore_errors=True)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
@pytest.fixture()
|
|
71
|
+
def sample_config(tmp_dir):
|
|
72
|
+
"""Write a minimal dbms_config.json and return its path."""
|
|
73
|
+
cfg = {
|
|
74
|
+
"databases": [
|
|
75
|
+
{
|
|
76
|
+
"name": "testdb1",
|
|
77
|
+
"host": "127.0.0.1",
|
|
78
|
+
"port": 39999,
|
|
79
|
+
"user": "testuser",
|
|
80
|
+
"password": "testpass",
|
|
81
|
+
"driver": "pymysql",
|
|
82
|
+
"enabled": True,
|
|
83
|
+
},
|
|
84
|
+
{
|
|
85
|
+
"name": "testdb2",
|
|
86
|
+
"host": "127.0.0.1",
|
|
87
|
+
"port": 39998,
|
|
88
|
+
"user": "testuser",
|
|
89
|
+
"password": "testpass",
|
|
90
|
+
"driver": "pymysql",
|
|
91
|
+
"enabled": False,
|
|
92
|
+
},
|
|
93
|
+
]
|
|
94
|
+
}
|
|
95
|
+
path = os.path.join(tmp_dir, "dbms_config.json")
|
|
96
|
+
with open(path, "w") as f:
|
|
97
|
+
json.dump(cfg, f)
|
|
98
|
+
return path
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@pytest.fixture()
|
|
102
|
+
def sample_results_dir(tmp_dir):
|
|
103
|
+
"""Create a fake results directory with bench and mtr runs."""
|
|
104
|
+
results_dir = os.path.join(tmp_dir, "results")
|
|
105
|
+
|
|
106
|
+
# bench run
|
|
107
|
+
bench_dir = os.path.join(results_dir, "bench_test_20260401_100000")
|
|
108
|
+
os.makedirs(bench_dir)
|
|
109
|
+
bench_result = {
|
|
110
|
+
"mode": "SERIAL",
|
|
111
|
+
"dbms_results": [
|
|
112
|
+
{
|
|
113
|
+
"dbms_name": "mysql",
|
|
114
|
+
"overall_qps": 100.5,
|
|
115
|
+
"total_duration_s": 10.0,
|
|
116
|
+
"total_queries": 1000,
|
|
117
|
+
"total_errors": 0,
|
|
118
|
+
"query_stats": [],
|
|
119
|
+
}
|
|
120
|
+
],
|
|
121
|
+
}
|
|
122
|
+
with open(os.path.join(bench_dir, "bench_result.json"), "w") as f:
|
|
123
|
+
json.dump(bench_result, f)
|
|
124
|
+
with open(os.path.join(bench_dir, "bench_test.html"), "w") as f:
|
|
125
|
+
f.write("<html></html>")
|
|
126
|
+
with open(os.path.join(bench_dir, "bench_test.report.txt"), "w") as f:
|
|
127
|
+
f.write("report")
|
|
128
|
+
|
|
129
|
+
# mtr run
|
|
130
|
+
mtr_dir = os.path.join(results_dir, "mtr_test_20260401_090000")
|
|
131
|
+
os.makedirs(mtr_dir)
|
|
132
|
+
with open(os.path.join(mtr_dir, "test.mysql.result"), "w") as f:
|
|
133
|
+
f.write("ok")
|
|
134
|
+
with open(os.path.join(mtr_dir, "test.tdsql.result"), "w") as f:
|
|
135
|
+
f.write("ok")
|
|
136
|
+
with open(os.path.join(mtr_dir, "test.html"), "w") as f:
|
|
137
|
+
f.write("<html></html>")
|
|
138
|
+
|
|
139
|
+
# older bench run
|
|
140
|
+
bench_dir2 = os.path.join(results_dir, "bench_old_20260301_080000")
|
|
141
|
+
os.makedirs(bench_dir2)
|
|
142
|
+
with open(os.path.join(bench_dir2, "bench_result.json"), "w") as f:
|
|
143
|
+
json.dump(bench_result, f)
|
|
144
|
+
|
|
145
|
+
return results_dir
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
# ===========================================================================
|
|
149
|
+
# 1. Global options / argument parsing
|
|
150
|
+
# ===========================================================================
|
|
151
|
+
|
|
152
|
+
class TestGlobalOptions:
|
|
153
|
+
"""Test that global CLI options work in different positions."""
|
|
154
|
+
|
|
155
|
+
def test_json_before_subcommand(self, sample_config):
|
|
156
|
+
result = run_cli("-j", "status", "--config", sample_config)
|
|
157
|
+
assert result["ok"] is True
|
|
158
|
+
assert "data" in result
|
|
159
|
+
|
|
160
|
+
def test_json_after_subcommand(self, sample_config):
|
|
161
|
+
result = run_cli("status", "-j", "--config", sample_config)
|
|
162
|
+
assert result["ok"] is True
|
|
163
|
+
|
|
164
|
+
def test_json_long_form(self, sample_config):
|
|
165
|
+
result = run_cli("status", "--json", "--config", sample_config)
|
|
166
|
+
assert result["ok"] is True
|
|
167
|
+
|
|
168
|
+
def test_version_json(self):
|
|
169
|
+
result = run_cli("--version")
|
|
170
|
+
assert result["ok"] is True
|
|
171
|
+
assert result["command"] == "version"
|
|
172
|
+
assert result["data"]["name"] == "rosetta"
|
|
173
|
+
assert result["data"]["version"] == "1.0.0"
|
|
174
|
+
|
|
175
|
+
def test_version_short_flag(self):
|
|
176
|
+
output = run_cli_human("-V")
|
|
177
|
+
assert output.strip() == "rosetta 1.0.0"
|
|
178
|
+
|
|
179
|
+
def test_version_legacy_lower_v(self):
|
|
180
|
+
output = run_cli_human("-v")
|
|
181
|
+
assert output.strip() == "rosetta 1.0.0"
|
|
182
|
+
|
|
183
|
+
def test_version_human(self):
|
|
184
|
+
output = run_cli_human("--version")
|
|
185
|
+
assert output.strip() == "rosetta 1.0.0"
|
|
186
|
+
|
|
187
|
+
def test_no_command_shows_help(self):
|
|
188
|
+
"""rosetta with no args should print help and exit 0."""
|
|
189
|
+
from rosetta.cli.main import main
|
|
190
|
+
from io import StringIO
|
|
191
|
+
buf = StringIO()
|
|
192
|
+
with mock.patch("sys.stdout", buf):
|
|
193
|
+
code = main([])
|
|
194
|
+
assert code == 0
|
|
195
|
+
|
|
196
|
+
def test_verbose_flag(self, sample_config):
|
|
197
|
+
result = run_cli("-j", "-v", "status", "--config", sample_config)
|
|
198
|
+
assert result["ok"] is True
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
# ===========================================================================
|
|
202
|
+
# 2. status command
|
|
203
|
+
# ===========================================================================
|
|
204
|
+
|
|
205
|
+
class TestStatusCommand:
|
|
206
|
+
|
|
207
|
+
def test_status_json(self, sample_config):
|
|
208
|
+
result = run_cli("status", "--config", sample_config)
|
|
209
|
+
assert result["ok"] is True
|
|
210
|
+
data = result["data"]
|
|
211
|
+
assert "total" in data
|
|
212
|
+
assert "connected" in data
|
|
213
|
+
assert "dbms" in data
|
|
214
|
+
# testdb2 is disabled, should only see testdb1
|
|
215
|
+
assert data["total"] == 1
|
|
216
|
+
|
|
217
|
+
def test_status_human(self, sample_config):
|
|
218
|
+
output = run_cli_human("status", "--config", sample_config)
|
|
219
|
+
assert "testdb1" in output
|
|
220
|
+
|
|
221
|
+
def test_status_missing_config(self, tmp_dir):
|
|
222
|
+
fake = os.path.join(tmp_dir, "nonexistent.json")
|
|
223
|
+
result = run_cli("status", "--config", fake)
|
|
224
|
+
assert result["ok"] is False
|
|
225
|
+
assert "not found" in result["error"].lower()
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
# ===========================================================================
|
|
229
|
+
# 3. exec command
|
|
230
|
+
# ===========================================================================
|
|
231
|
+
|
|
232
|
+
class TestExecCommand:
|
|
233
|
+
|
|
234
|
+
def test_exec_requires_sql_or_file(self, sample_config):
|
|
235
|
+
result = run_cli("exec", "--config", sample_config, "--dbms", "testdb1")
|
|
236
|
+
assert result["ok"] is False
|
|
237
|
+
assert "required" in result["error"].lower()
|
|
238
|
+
|
|
239
|
+
def test_exec_file_not_found(self, sample_config):
|
|
240
|
+
result = run_cli("exec", "--config", sample_config,
|
|
241
|
+
"--dbms", "testdb1", "--file", "/nonexistent.sql")
|
|
242
|
+
assert result["ok"] is False
|
|
243
|
+
assert "not found" in result["error"].lower()
|
|
244
|
+
|
|
245
|
+
def test_exec_sql_unreachable(self, sample_config):
|
|
246
|
+
"""exec with unreachable port should report error per DBMS."""
|
|
247
|
+
result = run_cli("exec", "--config", sample_config,
|
|
248
|
+
"--dbms", "testdb1",
|
|
249
|
+
"--sql", "SELECT 1")
|
|
250
|
+
assert result["ok"] is True
|
|
251
|
+
data = result["data"]
|
|
252
|
+
assert "results" in data
|
|
253
|
+
# Port 39999 is not open, so should have error
|
|
254
|
+
testdb1 = data["results"].get("testdb1", {})
|
|
255
|
+
assert testdb1.get("error") is not None
|
|
256
|
+
|
|
257
|
+
def test_exec_with_file(self, sample_config, tmp_dir):
|
|
258
|
+
"""exec --file with unreachable DB still returns structured result."""
|
|
259
|
+
sql_file = os.path.join(tmp_dir, "test.sql")
|
|
260
|
+
with open(sql_file, "w") as f:
|
|
261
|
+
f.write("SELECT 1;\nSELECT 2;\n")
|
|
262
|
+
result = run_cli("exec", "--config", sample_config,
|
|
263
|
+
"--dbms", "testdb1", "--file", sql_file)
|
|
264
|
+
assert result["ok"] is True
|
|
265
|
+
data = result["data"]
|
|
266
|
+
assert data["total_statements"] == 2
|
|
267
|
+
|
|
268
|
+
def test_exec_missing_config(self, tmp_dir):
|
|
269
|
+
fake = os.path.join(tmp_dir, "nope.json")
|
|
270
|
+
result = run_cli("exec", "--config", fake,
|
|
271
|
+
"--dbms", "x", "--sql", "SELECT 1")
|
|
272
|
+
assert result["ok"] is False
|
|
273
|
+
|
|
274
|
+
def test_exec_unknown_dbms(self, sample_config):
|
|
275
|
+
result = run_cli("exec", "--config", sample_config,
|
|
276
|
+
"--dbms", "nonexistent_db", "--sql", "SELECT 1")
|
|
277
|
+
assert result["ok"] is False
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
# ===========================================================================
|
|
281
|
+
# 4. result command (list / show)
|
|
282
|
+
# ===========================================================================
|
|
283
|
+
|
|
284
|
+
class TestResultCommand:
|
|
285
|
+
|
|
286
|
+
def test_result_list_default(self, sample_results_dir):
|
|
287
|
+
result = run_cli("result", "list",
|
|
288
|
+
"--output-dir", sample_results_dir)
|
|
289
|
+
assert result["ok"] is True
|
|
290
|
+
data = result["data"]
|
|
291
|
+
assert data["total"] == 3
|
|
292
|
+
assert len(data["runs"]) == 3
|
|
293
|
+
|
|
294
|
+
def test_result_list_pagination(self, sample_results_dir):
|
|
295
|
+
result = run_cli("result", "list",
|
|
296
|
+
"--output-dir", sample_results_dir,
|
|
297
|
+
"-n", "2", "-p", "1")
|
|
298
|
+
data = result["data"]
|
|
299
|
+
assert data["showing"] == 2
|
|
300
|
+
assert data["page"] == 1
|
|
301
|
+
assert data["total_pages"] == 2
|
|
302
|
+
|
|
303
|
+
def test_result_list_page2(self, sample_results_dir):
|
|
304
|
+
result = run_cli("result", "list",
|
|
305
|
+
"--output-dir", sample_results_dir,
|
|
306
|
+
"-n", "2", "-p", "2")
|
|
307
|
+
data = result["data"]
|
|
308
|
+
assert data["showing"] == 1
|
|
309
|
+
assert data["page"] == 2
|
|
310
|
+
|
|
311
|
+
def test_result_list_filter_bench(self, sample_results_dir):
|
|
312
|
+
result = run_cli("result", "list",
|
|
313
|
+
"--output-dir", sample_results_dir,
|
|
314
|
+
"--type", "bench")
|
|
315
|
+
data = result["data"]
|
|
316
|
+
for run in data["runs"]:
|
|
317
|
+
assert run["type"] == "bench"
|
|
318
|
+
assert data["total"] == 2
|
|
319
|
+
|
|
320
|
+
def test_result_list_filter_mtr(self, sample_results_dir):
|
|
321
|
+
result = run_cli("result", "list",
|
|
322
|
+
"--output-dir", sample_results_dir,
|
|
323
|
+
"--type", "mtr")
|
|
324
|
+
data = result["data"]
|
|
325
|
+
assert data["total"] == 1
|
|
326
|
+
assert data["runs"][0]["type"] == "mtr"
|
|
327
|
+
|
|
328
|
+
def test_result_list_empty_dir(self, tmp_dir):
|
|
329
|
+
empty = os.path.join(tmp_dir, "empty_results")
|
|
330
|
+
os.makedirs(empty)
|
|
331
|
+
result = run_cli("result", "list", "--output-dir", empty)
|
|
332
|
+
assert result["ok"] is True
|
|
333
|
+
assert result["data"]["total"] == 0
|
|
334
|
+
|
|
335
|
+
def test_result_default_is_list(self, sample_results_dir):
|
|
336
|
+
"""'rosetta result list' should return runs."""
|
|
337
|
+
result = run_cli("result", "list", "--output-dir", sample_results_dir)
|
|
338
|
+
assert result["ok"] is True
|
|
339
|
+
assert "runs" in result["data"]
|
|
340
|
+
|
|
341
|
+
def test_result_show_latest(self, sample_results_dir):
|
|
342
|
+
result = run_cli("result", "show",
|
|
343
|
+
"--output-dir", sample_results_dir)
|
|
344
|
+
assert result["ok"] is True
|
|
345
|
+
data = result["data"]
|
|
346
|
+
assert "run_id" in data
|
|
347
|
+
assert "path" in data
|
|
348
|
+
# Path should be absolute
|
|
349
|
+
assert os.path.isabs(data["path"])
|
|
350
|
+
|
|
351
|
+
def test_result_show_specific(self, sample_results_dir):
|
|
352
|
+
result = run_cli("result", "show", "bench_test_20260401_100000",
|
|
353
|
+
"--output-dir", sample_results_dir)
|
|
354
|
+
assert result["ok"] is True
|
|
355
|
+
data = result["data"]
|
|
356
|
+
assert data["run_id"] == "bench_test_20260401_100000"
|
|
357
|
+
assert data["type"] == "bench"
|
|
358
|
+
assert "bench_summary" in data
|
|
359
|
+
assert len(data["bench_summary"]) == 1
|
|
360
|
+
assert data["bench_summary"][0]["qps"] == 100.5
|
|
361
|
+
|
|
362
|
+
def test_result_show_prefix_match(self, sample_results_dir):
|
|
363
|
+
result = run_cli("result", "show", "bench_test_2026",
|
|
364
|
+
"--output-dir", sample_results_dir)
|
|
365
|
+
assert result["ok"] is True
|
|
366
|
+
assert result["data"]["run_id"] == "bench_test_20260401_100000"
|
|
367
|
+
|
|
368
|
+
def test_result_show_not_found(self, sample_results_dir):
|
|
369
|
+
result = run_cli("result", "show", "nonexistent_run",
|
|
370
|
+
"--output-dir", sample_results_dir)
|
|
371
|
+
assert result["ok"] is False
|
|
372
|
+
|
|
373
|
+
def test_result_show_mtr(self, sample_results_dir):
|
|
374
|
+
result = run_cli("result", "show", "mtr_test_20260401_090000",
|
|
375
|
+
"--output-dir", sample_results_dir)
|
|
376
|
+
assert result["ok"] is True
|
|
377
|
+
data = result["data"]
|
|
378
|
+
assert data["type"] == "mtr"
|
|
379
|
+
assert "mysql" in data["dbms"]
|
|
380
|
+
assert "tdsql" in data["dbms"]
|
|
381
|
+
|
|
382
|
+
def test_result_show_report_files_absolute(self, sample_results_dir):
|
|
383
|
+
result = run_cli("result", "show", "bench_test_20260401_100000",
|
|
384
|
+
"--output-dir", sample_results_dir)
|
|
385
|
+
data = result["data"]
|
|
386
|
+
for f in data["report_files"]:
|
|
387
|
+
assert os.path.isabs(f)
|
|
388
|
+
|
|
389
|
+
def test_result_show_human(self, sample_results_dir):
|
|
390
|
+
output = run_cli_human("result", "show", "bench_test_20260401_100000",
|
|
391
|
+
"--output-dir", sample_results_dir)
|
|
392
|
+
assert "bench_test_20260401_100000" in output
|
|
393
|
+
assert "QPS" in output or "100.5" in output
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
# ===========================================================================
|
|
397
|
+
# 5. config command
|
|
398
|
+
# ===========================================================================
|
|
399
|
+
|
|
400
|
+
class TestConfigCommand:
|
|
401
|
+
|
|
402
|
+
def test_config_show(self, sample_config):
|
|
403
|
+
result = run_cli("config", "show", "--config", sample_config)
|
|
404
|
+
assert result["ok"] is True
|
|
405
|
+
data = result["data"]
|
|
406
|
+
assert data["total_dbms"] == 2
|
|
407
|
+
assert data["enabled_dbms"] == 1
|
|
408
|
+
assert len(data["databases"]) == 2
|
|
409
|
+
assert data["databases"][0]["name"] == "testdb1"
|
|
410
|
+
# Path should be absolute
|
|
411
|
+
assert os.path.isabs(data["config_path"])
|
|
412
|
+
|
|
413
|
+
def test_config_show_missing(self, tmp_dir):
|
|
414
|
+
fake = os.path.join(tmp_dir, "nope.json")
|
|
415
|
+
result = run_cli("config", "show", "--config", fake)
|
|
416
|
+
assert result["ok"] is False
|
|
417
|
+
|
|
418
|
+
def test_config_init(self, tmp_dir):
|
|
419
|
+
out_path = os.path.join(tmp_dir, "new_config.json")
|
|
420
|
+
result = run_cli("config", "init", "--output", out_path)
|
|
421
|
+
assert result["ok"] is True
|
|
422
|
+
assert os.path.isfile(out_path)
|
|
423
|
+
# Should be valid JSON
|
|
424
|
+
with open(out_path) as f:
|
|
425
|
+
data = json.load(f)
|
|
426
|
+
assert "databases" in data
|
|
427
|
+
|
|
428
|
+
def test_config_init_already_exists(self, sample_config):
|
|
429
|
+
result = run_cli("config", "init", "--output", sample_config)
|
|
430
|
+
assert result["ok"] is False
|
|
431
|
+
assert "exists" in result["error"].lower()
|
|
432
|
+
|
|
433
|
+
def test_config_validate(self, sample_config):
|
|
434
|
+
result = run_cli("config", "validate", "--config", sample_config)
|
|
435
|
+
assert result["ok"] is True
|
|
436
|
+
data = result["data"]
|
|
437
|
+
assert data["valid"] is True
|
|
438
|
+
assert data["total_dbms"] == 2
|
|
439
|
+
|
|
440
|
+
def test_config_validate_missing(self, tmp_dir):
|
|
441
|
+
fake = os.path.join(tmp_dir, "nope.json")
|
|
442
|
+
result = run_cli("config", "validate", "--config", fake)
|
|
443
|
+
assert result["ok"] is False
|
|
444
|
+
|
|
445
|
+
def test_config_validate_invalid_json(self, tmp_dir):
|
|
446
|
+
bad = os.path.join(tmp_dir, "bad.json")
|
|
447
|
+
with open(bad, "w") as f:
|
|
448
|
+
f.write("{invalid json")
|
|
449
|
+
result = run_cli("config", "validate", "--config", bad)
|
|
450
|
+
assert result["ok"] is False
|
|
451
|
+
|
|
452
|
+
def test_config_validate_no_databases(self, tmp_dir):
|
|
453
|
+
empty = os.path.join(tmp_dir, "empty.json")
|
|
454
|
+
with open(empty, "w") as f:
|
|
455
|
+
json.dump({"databases": []}, f)
|
|
456
|
+
result = run_cli("config", "validate", "--config", empty)
|
|
457
|
+
assert result["ok"] is False
|
|
458
|
+
|
|
459
|
+
|
|
460
|
+
# ===========================================================================
|
|
461
|
+
# 6. mtr / bench — argument parsing (no actual execution)
|
|
462
|
+
# ===========================================================================
|
|
463
|
+
|
|
464
|
+
class TestMtrBenchParsing:
|
|
465
|
+
"""Verify that mtr/bench parsers accept all documented arguments."""
|
|
466
|
+
|
|
467
|
+
def _parse(self, *argv):
|
|
468
|
+
from rosetta.cli.main import create_parser
|
|
469
|
+
return create_parser().parse_args(list(argv))
|
|
470
|
+
|
|
471
|
+
# -- mtr: required args ------------------------------------------------
|
|
472
|
+
|
|
473
|
+
def test_mtr_missing_required(self):
|
|
474
|
+
"""mtr without --test and --dbms should fail."""
|
|
475
|
+
with pytest.raises(SystemExit):
|
|
476
|
+
self._parse("mtr")
|
|
477
|
+
|
|
478
|
+
def test_mtr_missing_test(self):
|
|
479
|
+
with pytest.raises(SystemExit):
|
|
480
|
+
self._parse("mtr", "--dbms", "mysql")
|
|
481
|
+
|
|
482
|
+
def test_mtr_missing_dbms(self):
|
|
483
|
+
with pytest.raises(SystemExit):
|
|
484
|
+
self._parse("mtr", "-t", "test.test")
|
|
485
|
+
|
|
486
|
+
# -- mtr: all options --------------------------------------------------
|
|
487
|
+
|
|
488
|
+
def test_mtr_basic_args(self):
|
|
489
|
+
args = self._parse(
|
|
490
|
+
"mtr", "--dbms", "mysql,tdsql", "-t", "test.test",
|
|
491
|
+
)
|
|
492
|
+
assert args.command == "mtr"
|
|
493
|
+
assert args.test == "test.test"
|
|
494
|
+
assert args.dbms == "mysql,tdsql"
|
|
495
|
+
|
|
496
|
+
def test_mtr_baseline(self):
|
|
497
|
+
args = self._parse(
|
|
498
|
+
"mtr", "--dbms", "mysql,tdsql", "-b", "mysql", "-t", "test.test",
|
|
499
|
+
)
|
|
500
|
+
assert args.baseline == "mysql"
|
|
501
|
+
|
|
502
|
+
def test_mtr_database(self):
|
|
503
|
+
args = self._parse(
|
|
504
|
+
"mtr", "--dbms", "mysql", "-d", "mydb", "-t", "t.test",
|
|
505
|
+
)
|
|
506
|
+
assert args.database == "mydb"
|
|
507
|
+
|
|
508
|
+
def test_mtr_output_dir(self):
|
|
509
|
+
args = self._parse(
|
|
510
|
+
"mtr", "--dbms", "mysql", "-o", "/tmp/out", "-t", "t.test",
|
|
511
|
+
)
|
|
512
|
+
assert args.output_dir == "/tmp/out"
|
|
513
|
+
|
|
514
|
+
def test_mtr_output_format(self):
|
|
515
|
+
for fmt in ["text", "html", "all"]:
|
|
516
|
+
args = self._parse(
|
|
517
|
+
"mtr", "--dbms", "mysql", "-f", fmt, "-t", "t.test",
|
|
518
|
+
)
|
|
519
|
+
assert args.output_format == fmt
|
|
520
|
+
|
|
521
|
+
def test_mtr_parse_only(self):
|
|
522
|
+
args = self._parse(
|
|
523
|
+
"mtr", "--dbms", "mysql", "--parse-only", "-t", "t.test",
|
|
524
|
+
)
|
|
525
|
+
assert args.parse_only is True
|
|
526
|
+
|
|
527
|
+
def test_mtr_diff_only(self):
|
|
528
|
+
args = self._parse(
|
|
529
|
+
"mtr", "--dbms", "mysql", "--diff-only", "-t", "t.test",
|
|
530
|
+
)
|
|
531
|
+
assert args.diff_only is True
|
|
532
|
+
|
|
533
|
+
def test_mtr_serve_and_port(self):
|
|
534
|
+
args = self._parse(
|
|
535
|
+
"mtr", "--dbms", "mysql", "--serve", "-p", "8080", "-t", "t.test",
|
|
536
|
+
)
|
|
537
|
+
assert args.serve is True
|
|
538
|
+
assert args.port == 8080
|
|
539
|
+
|
|
540
|
+
def test_mtr_skip_flags(self):
|
|
541
|
+
args = self._parse(
|
|
542
|
+
"mtr", "--dbms", "mysql",
|
|
543
|
+
"--skip-explain", "--skip-analyze", "--skip-show-create",
|
|
544
|
+
"-t", "t.test",
|
|
545
|
+
)
|
|
546
|
+
assert args.skip_explain is True
|
|
547
|
+
assert args.skip_analyze is True
|
|
548
|
+
assert args.skip_show_create is True
|
|
549
|
+
|
|
550
|
+
def test_mtr_defaults(self):
|
|
551
|
+
args = self._parse(
|
|
552
|
+
"mtr", "--dbms", "mysql", "-t", "t.test",
|
|
553
|
+
)
|
|
554
|
+
assert args.baseline == "tdsql"
|
|
555
|
+
assert args.database == "rosetta_mtr_test"
|
|
556
|
+
assert args.output_dir == "results"
|
|
557
|
+
assert args.output_format == "all"
|
|
558
|
+
assert args.parse_only is False
|
|
559
|
+
assert args.diff_only is False
|
|
560
|
+
assert args.serve is False
|
|
561
|
+
assert args.port == 19527
|
|
562
|
+
|
|
563
|
+
def test_mtr_json_after_subcommand(self):
|
|
564
|
+
args = self._parse(
|
|
565
|
+
"mtr", "--dbms", "mysql", "-j", "-t", "t.test",
|
|
566
|
+
)
|
|
567
|
+
assert args.json is True
|
|
568
|
+
|
|
569
|
+
def test_version_flag_without_command(self):
|
|
570
|
+
args = self._parse("--version")
|
|
571
|
+
assert args.version is True
|
|
572
|
+
assert args.command is None
|
|
573
|
+
|
|
574
|
+
def test_version_short_flag_without_command(self):
|
|
575
|
+
args = self._parse("-V")
|
|
576
|
+
assert args.version is True
|
|
577
|
+
assert args.command is None
|
|
578
|
+
|
|
579
|
+
# -- bench: required args ----------------------------------------------
|
|
580
|
+
|
|
581
|
+
def test_bench_missing_dbms(self):
|
|
582
|
+
with pytest.raises(SystemExit):
|
|
583
|
+
self._parse("bench", "--file", "b.json")
|
|
584
|
+
|
|
585
|
+
# -- bench: all options ------------------------------------------------
|
|
586
|
+
|
|
587
|
+
def test_bench_basic_args(self):
|
|
588
|
+
args = self._parse(
|
|
589
|
+
"bench", "--dbms", "mysql", "--file", "bench.json",
|
|
590
|
+
)
|
|
591
|
+
assert args.command == "bench"
|
|
592
|
+
assert args.dbms == "mysql"
|
|
593
|
+
assert args.bench_file == "bench.json"
|
|
594
|
+
|
|
595
|
+
def test_bench_mode_serial(self):
|
|
596
|
+
args = self._parse(
|
|
597
|
+
"bench", "--dbms", "mysql", "--mode", "SERIAL",
|
|
598
|
+
"--iterations", "10", "--warmup", "3",
|
|
599
|
+
"--file", "b.json",
|
|
600
|
+
)
|
|
601
|
+
assert args.mode == "SERIAL"
|
|
602
|
+
assert args.iterations == 10
|
|
603
|
+
assert args.warmup == 3
|
|
604
|
+
|
|
605
|
+
def test_bench_mode_concurrent(self):
|
|
606
|
+
args = self._parse(
|
|
607
|
+
"bench", "--dbms", "mysql", "--mode", "CONCURRENT",
|
|
608
|
+
"--concurrency", "8", "--duration", "60", "--ramp-up", "2.5",
|
|
609
|
+
"--file", "b.json",
|
|
610
|
+
)
|
|
611
|
+
assert args.mode == "CONCURRENT"
|
|
612
|
+
assert args.concurrency == 8
|
|
613
|
+
assert args.duration == 60.0
|
|
614
|
+
assert args.ramp_up == 2.5
|
|
615
|
+
|
|
616
|
+
def test_bench_database(self):
|
|
617
|
+
args = self._parse(
|
|
618
|
+
"bench", "--dbms", "mysql", "-d", "mydb", "--file", "b.json",
|
|
619
|
+
)
|
|
620
|
+
assert args.database == "mydb"
|
|
621
|
+
|
|
622
|
+
def test_bench_output_dir_and_format(self):
|
|
623
|
+
args = self._parse(
|
|
624
|
+
"bench", "--dbms", "mysql", "-o", "/tmp/out", "-f", "html",
|
|
625
|
+
"--file", "b.json",
|
|
626
|
+
)
|
|
627
|
+
assert args.output_dir == "/tmp/out"
|
|
628
|
+
assert args.output_format == "html"
|
|
629
|
+
|
|
630
|
+
def test_bench_query_timeout(self):
|
|
631
|
+
args = self._parse(
|
|
632
|
+
"bench", "--dbms", "mysql", "--query-timeout", "30",
|
|
633
|
+
"--file", "b.json",
|
|
634
|
+
)
|
|
635
|
+
assert args.query_timeout == 30
|
|
636
|
+
|
|
637
|
+
def test_bench_filter(self):
|
|
638
|
+
args = self._parse(
|
|
639
|
+
"bench", "--dbms", "mysql", "--bench-filter", "q1,q2",
|
|
640
|
+
"--file", "b.json",
|
|
641
|
+
)
|
|
642
|
+
assert args.bench_filter == "q1,q2"
|
|
643
|
+
|
|
644
|
+
def test_bench_repeat(self):
|
|
645
|
+
args = self._parse(
|
|
646
|
+
"bench", "--dbms", "mysql", "--repeat", "3",
|
|
647
|
+
"--file", "b.json",
|
|
648
|
+
)
|
|
649
|
+
assert args.repeat == 3
|
|
650
|
+
|
|
651
|
+
def test_bench_skip_setup_teardown(self):
|
|
652
|
+
args = self._parse(
|
|
653
|
+
"bench", "--dbms", "mysql", "--skip-setup", "--skip-teardown",
|
|
654
|
+
"--file", "b.json",
|
|
655
|
+
)
|
|
656
|
+
assert args.skip_setup is True
|
|
657
|
+
assert args.skip_teardown is True
|
|
658
|
+
|
|
659
|
+
def test_bench_no_parallel_dbms(self):
|
|
660
|
+
args = self._parse(
|
|
661
|
+
"bench", "--dbms", "mysql", "--no-parallel-dbms",
|
|
662
|
+
"--file", "b.json",
|
|
663
|
+
)
|
|
664
|
+
assert args.parallel_dbms is False
|
|
665
|
+
|
|
666
|
+
def test_bench_profile_flags(self):
|
|
667
|
+
# --profile (default on)
|
|
668
|
+
args = self._parse(
|
|
669
|
+
"bench", "--dbms", "mysql", "--profile",
|
|
670
|
+
"--file", "b.json",
|
|
671
|
+
)
|
|
672
|
+
assert args.profile is True
|
|
673
|
+
|
|
674
|
+
# --no-profile
|
|
675
|
+
args = self._parse(
|
|
676
|
+
"bench", "--dbms", "mysql", "--no-profile",
|
|
677
|
+
"--file", "b.json",
|
|
678
|
+
)
|
|
679
|
+
assert args.profile is False
|
|
680
|
+
|
|
681
|
+
def test_bench_perf_freq(self):
|
|
682
|
+
args = self._parse(
|
|
683
|
+
"bench", "--dbms", "mysql", "--perf-freq", "199",
|
|
684
|
+
"--file", "b.json",
|
|
685
|
+
)
|
|
686
|
+
assert args.perf_freq == 199
|
|
687
|
+
|
|
688
|
+
def test_bench_template(self):
|
|
689
|
+
args = self._parse(
|
|
690
|
+
"bench", "--dbms", "mysql", "--template", "oltp_read_write",
|
|
691
|
+
)
|
|
692
|
+
assert args.template == "oltp_read_write"
|
|
693
|
+
|
|
694
|
+
def test_bench_defaults(self):
|
|
695
|
+
args = self._parse(
|
|
696
|
+
"bench", "--dbms", "mysql", "--file", "b.json",
|
|
697
|
+
)
|
|
698
|
+
assert args.mode == "SERIAL"
|
|
699
|
+
assert args.database == "rosetta_bench_test"
|
|
700
|
+
assert args.output_dir == "results"
|
|
701
|
+
assert args.output_format == "all"
|
|
702
|
+
assert args.iterations == 1
|
|
703
|
+
assert args.concurrency == 10
|
|
704
|
+
assert args.duration == 30.0
|
|
705
|
+
assert args.warmup == 0
|
|
706
|
+
assert args.ramp_up == 0.0
|
|
707
|
+
assert args.query_timeout == 5
|
|
708
|
+
assert args.bench_filter is None
|
|
709
|
+
assert args.repeat == 1
|
|
710
|
+
assert args.skip_setup is False
|
|
711
|
+
assert args.skip_teardown is False
|
|
712
|
+
assert args.parallel_dbms is True
|
|
713
|
+
assert args.profile is True
|
|
714
|
+
assert args.perf_freq == 99
|
|
715
|
+
|
|
716
|
+
def test_bench_json_after_subcommand(self):
|
|
717
|
+
args = self._parse(
|
|
718
|
+
"bench", "--dbms", "mysql", "-j", "--file", "b.json",
|
|
719
|
+
)
|
|
720
|
+
assert args.json is True
|
|
721
|
+
|
|
722
|
+
|
|
723
|
+
# ===========================================================================
|
|
724
|
+
# 7. CommandResult structure
|
|
725
|
+
# ===========================================================================
|
|
726
|
+
|
|
727
|
+
class TestCommandResult:
|
|
728
|
+
|
|
729
|
+
def test_success_structure(self):
|
|
730
|
+
from rosetta.cli.result import CommandResult
|
|
731
|
+
r = CommandResult.success("test", {"key": "value"})
|
|
732
|
+
assert r.ok is True
|
|
733
|
+
assert r.command == "test"
|
|
734
|
+
assert r.data == {"key": "value"}
|
|
735
|
+
assert r.exit_code() == 0
|
|
736
|
+
|
|
737
|
+
d = r.to_dict()
|
|
738
|
+
assert d["ok"] is True
|
|
739
|
+
assert "timestamp" in d
|
|
740
|
+
|
|
741
|
+
j = json.loads(r.to_json())
|
|
742
|
+
assert j["ok"] is True
|
|
743
|
+
|
|
744
|
+
def test_failure_structure(self):
|
|
745
|
+
from rosetta.cli.result import CommandResult
|
|
746
|
+
r = CommandResult.failure("something broke", command="test")
|
|
747
|
+
assert r.ok is False
|
|
748
|
+
assert r.error == "something broke"
|
|
749
|
+
assert r.exit_code() == 1
|