experimaestro 2.0.0b4__py3-none-any.whl → 2.0.0b17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of experimaestro might be problematic. Click here for more details.
- experimaestro/__init__.py +12 -5
- experimaestro/cli/__init__.py +393 -134
- experimaestro/cli/filter.py +48 -23
- experimaestro/cli/jobs.py +253 -71
- experimaestro/cli/refactor.py +1 -2
- experimaestro/commandline.py +7 -4
- experimaestro/connectors/__init__.py +9 -1
- experimaestro/connectors/local.py +43 -3
- experimaestro/core/arguments.py +18 -18
- experimaestro/core/identifier.py +11 -11
- experimaestro/core/objects/config.py +96 -39
- experimaestro/core/objects/config_walk.py +3 -3
- experimaestro/core/{subparameters.py → partial.py} +16 -16
- experimaestro/core/partial_lock.py +394 -0
- experimaestro/core/types.py +12 -15
- experimaestro/dynamic.py +290 -0
- experimaestro/experiments/__init__.py +6 -2
- experimaestro/experiments/cli.py +223 -52
- experimaestro/experiments/configuration.py +24 -0
- experimaestro/generators.py +5 -5
- experimaestro/ipc.py +118 -1
- experimaestro/launcherfinder/__init__.py +2 -2
- experimaestro/launcherfinder/registry.py +6 -7
- experimaestro/launcherfinder/specs.py +2 -9
- experimaestro/launchers/slurm/__init__.py +2 -2
- experimaestro/launchers/slurm/base.py +62 -0
- experimaestro/locking.py +957 -1
- experimaestro/notifications.py +89 -201
- experimaestro/progress.py +63 -366
- experimaestro/rpyc.py +0 -2
- experimaestro/run.py +29 -2
- experimaestro/scheduler/__init__.py +8 -1
- experimaestro/scheduler/base.py +650 -53
- experimaestro/scheduler/dependencies.py +20 -16
- experimaestro/scheduler/experiment.py +764 -169
- experimaestro/scheduler/interfaces.py +338 -96
- experimaestro/scheduler/jobs.py +58 -20
- experimaestro/scheduler/remote/__init__.py +31 -0
- experimaestro/scheduler/remote/adaptive_sync.py +265 -0
- experimaestro/scheduler/remote/client.py +928 -0
- experimaestro/scheduler/remote/protocol.py +282 -0
- experimaestro/scheduler/remote/server.py +447 -0
- experimaestro/scheduler/remote/sync.py +144 -0
- experimaestro/scheduler/services.py +186 -35
- experimaestro/scheduler/state_provider.py +811 -2157
- experimaestro/scheduler/state_status.py +1247 -0
- experimaestro/scheduler/transient.py +31 -0
- experimaestro/scheduler/workspace.py +1 -1
- experimaestro/scheduler/workspace_state_provider.py +1273 -0
- experimaestro/scriptbuilder.py +4 -4
- experimaestro/settings.py +36 -0
- experimaestro/tests/conftest.py +33 -5
- experimaestro/tests/connectors/bin/executable.py +1 -1
- experimaestro/tests/fixtures/pre_experiment/experiment_check_env.py +16 -0
- experimaestro/tests/fixtures/pre_experiment/experiment_check_mock.py +14 -0
- experimaestro/tests/fixtures/pre_experiment/experiment_simple.py +12 -0
- experimaestro/tests/fixtures/pre_experiment/pre_setup_env.py +5 -0
- experimaestro/tests/fixtures/pre_experiment/pre_setup_error.py +3 -0
- experimaestro/tests/fixtures/pre_experiment/pre_setup_mock.py +8 -0
- experimaestro/tests/launchers/bin/test.py +1 -0
- experimaestro/tests/launchers/test_slurm.py +9 -9
- experimaestro/tests/partial_reschedule.py +46 -0
- experimaestro/tests/restart.py +3 -3
- experimaestro/tests/restart_main.py +1 -0
- experimaestro/tests/scripts/notifyandwait.py +1 -0
- experimaestro/tests/task_partial.py +38 -0
- experimaestro/tests/task_tokens.py +2 -2
- experimaestro/tests/tasks/test_dynamic.py +6 -6
- experimaestro/tests/test_dependencies.py +3 -3
- experimaestro/tests/test_deprecated.py +15 -15
- experimaestro/tests/test_dynamic_locking.py +317 -0
- experimaestro/tests/test_environment.py +24 -14
- experimaestro/tests/test_experiment.py +171 -36
- experimaestro/tests/test_identifier.py +25 -25
- experimaestro/tests/test_identifier_stability.py +3 -5
- experimaestro/tests/test_multitoken.py +2 -4
- experimaestro/tests/{test_subparameters.py → test_partial.py} +25 -25
- experimaestro/tests/test_partial_paths.py +81 -138
- experimaestro/tests/test_pre_experiment.py +219 -0
- experimaestro/tests/test_progress.py +2 -8
- experimaestro/tests/test_remote_state.py +1132 -0
- experimaestro/tests/test_stray_jobs.py +261 -0
- experimaestro/tests/test_tasks.py +1 -2
- experimaestro/tests/test_token_locking.py +52 -67
- experimaestro/tests/test_tokens.py +5 -6
- experimaestro/tests/test_transient.py +225 -0
- experimaestro/tests/test_workspace_state_provider.py +768 -0
- experimaestro/tests/token_reschedule.py +1 -3
- experimaestro/tests/utils.py +2 -7
- experimaestro/tokens.py +227 -372
- experimaestro/tools/diff.py +1 -0
- experimaestro/tools/documentation.py +4 -5
- experimaestro/tools/jobs.py +1 -2
- experimaestro/tui/app.py +459 -1895
- experimaestro/tui/app.tcss +162 -0
- experimaestro/tui/dialogs.py +172 -0
- experimaestro/tui/log_viewer.py +253 -3
- experimaestro/tui/messages.py +137 -0
- experimaestro/tui/utils.py +54 -0
- experimaestro/tui/widgets/__init__.py +23 -0
- experimaestro/tui/widgets/experiments.py +468 -0
- experimaestro/tui/widgets/global_services.py +238 -0
- experimaestro/tui/widgets/jobs.py +972 -0
- experimaestro/tui/widgets/log.py +156 -0
- experimaestro/tui/widgets/orphans.py +363 -0
- experimaestro/tui/widgets/runs.py +185 -0
- experimaestro/tui/widgets/services.py +314 -0
- experimaestro/tui/widgets/stray_jobs.py +528 -0
- experimaestro/utils/__init__.py +1 -1
- experimaestro/utils/environment.py +105 -22
- experimaestro/utils/fswatcher.py +124 -0
- experimaestro/utils/jobs.py +1 -2
- experimaestro/utils/jupyter.py +1 -2
- experimaestro/utils/logging.py +72 -0
- experimaestro/version.py +2 -2
- experimaestro/webui/__init__.py +9 -0
- experimaestro/webui/app.py +117 -0
- experimaestro/{server → webui}/data/index.css +66 -11
- experimaestro/webui/data/index.css.map +1 -0
- experimaestro/{server → webui}/data/index.js +82763 -87217
- experimaestro/webui/data/index.js.map +1 -0
- experimaestro/webui/routes/__init__.py +5 -0
- experimaestro/webui/routes/auth.py +53 -0
- experimaestro/webui/routes/proxy.py +117 -0
- experimaestro/webui/server.py +200 -0
- experimaestro/webui/state_bridge.py +152 -0
- experimaestro/webui/websocket.py +413 -0
- {experimaestro-2.0.0b4.dist-info → experimaestro-2.0.0b17.dist-info}/METADATA +8 -9
- experimaestro-2.0.0b17.dist-info/RECORD +219 -0
- experimaestro/cli/progress.py +0 -269
- experimaestro/scheduler/state.py +0 -75
- experimaestro/scheduler/state_db.py +0 -388
- experimaestro/scheduler/state_sync.py +0 -834
- experimaestro/server/__init__.py +0 -467
- experimaestro/server/data/index.css.map +0 -1
- experimaestro/server/data/index.js.map +0 -1
- experimaestro/tests/test_cli_jobs.py +0 -615
- experimaestro/tests/test_file_progress.py +0 -425
- experimaestro/tests/test_file_progress_integration.py +0 -477
- experimaestro/tests/test_state_db.py +0 -434
- experimaestro-2.0.0b4.dist-info/RECORD +0 -181
- /experimaestro/{server → webui}/data/1815e00441357e01619e.ttf +0 -0
- /experimaestro/{server → webui}/data/2463b90d9a316e4e5294.woff2 +0 -0
- /experimaestro/{server → webui}/data/2582b0e4bcf85eceead0.ttf +0 -0
- /experimaestro/{server → webui}/data/89999bdf5d835c012025.woff2 +0 -0
- /experimaestro/{server → webui}/data/914997e1bdfc990d0897.ttf +0 -0
- /experimaestro/{server → webui}/data/c210719e60948b211a12.woff2 +0 -0
- /experimaestro/{server → webui}/data/favicon.ico +0 -0
- /experimaestro/{server → webui}/data/index.html +0 -0
- /experimaestro/{server → webui}/data/login.html +0 -0
- /experimaestro/{server → webui}/data/manifest.json +0 -0
- {experimaestro-2.0.0b4.dist-info → experimaestro-2.0.0b17.dist-info}/WHEEL +0 -0
- {experimaestro-2.0.0b4.dist-info → experimaestro-2.0.0b17.dist-info}/entry_points.txt +0 -0
- {experimaestro-2.0.0b4.dist-info → experimaestro-2.0.0b17.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
"""Tests for the dynamic locking infrastructure.
|
|
2
|
+
|
|
3
|
+
Tests for DynamicLockFile, JobDependencyLock, and related classes.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import pytest
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from experimaestro.locking import (
|
|
11
|
+
DynamicLockFile,
|
|
12
|
+
JobDependencyLock,
|
|
13
|
+
LockError,
|
|
14
|
+
)
|
|
15
|
+
from experimaestro.tokens import TokenLockFile
|
|
16
|
+
from experimaestro.core.partial_lock import PartialLockFile
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# --- Test implementations ---
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class MockLockFile(DynamicLockFile):
|
|
23
|
+
"""Mock implementation of DynamicLockFile for testing."""
|
|
24
|
+
|
|
25
|
+
value: int
|
|
26
|
+
|
|
27
|
+
def from_information(self, info) -> None:
|
|
28
|
+
if info is None:
|
|
29
|
+
self.value = 0
|
|
30
|
+
elif isinstance(info, dict):
|
|
31
|
+
self.value = info.get("value", 0)
|
|
32
|
+
else:
|
|
33
|
+
raise ValueError(f"Invalid information format: {info}")
|
|
34
|
+
|
|
35
|
+
def to_information(self) -> dict:
|
|
36
|
+
return {"value": self.value}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class MockJobLock(JobDependencyLock):
|
|
40
|
+
"""Mock implementation of JobDependencyLock for testing."""
|
|
41
|
+
|
|
42
|
+
def __init__(self, lock_file_path: Path = None):
|
|
43
|
+
self.lock_file_path = lock_file_path
|
|
44
|
+
self.acquired = False
|
|
45
|
+
self.released = False
|
|
46
|
+
|
|
47
|
+
def acquire(self) -> None:
|
|
48
|
+
self.verify_lock_file()
|
|
49
|
+
self.acquired = True
|
|
50
|
+
|
|
51
|
+
def release(self) -> None:
|
|
52
|
+
self.released = True
|
|
53
|
+
super().release()
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# --- DynamicLockFile tests ---
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class TestDynamicLockFile:
|
|
60
|
+
"""Tests for DynamicLockFile."""
|
|
61
|
+
|
|
62
|
+
def test_create_writes_json_file(self, tmp_path: Path):
|
|
63
|
+
"""create() should write a JSON file with correct structure."""
|
|
64
|
+
lock_path = tmp_path / "test.lock"
|
|
65
|
+
job_uri = "/path/to/job"
|
|
66
|
+
|
|
67
|
+
lock_file = MockLockFile.create(lock_path, job_uri, information={"value": 42})
|
|
68
|
+
|
|
69
|
+
assert lock_path.is_file()
|
|
70
|
+
content = json.loads(lock_path.read_text())
|
|
71
|
+
assert content["job_uri"] == job_uri
|
|
72
|
+
assert content["information"] == {"value": 42}
|
|
73
|
+
assert lock_file.job_uri == job_uri
|
|
74
|
+
assert lock_file.value == 42
|
|
75
|
+
|
|
76
|
+
def test_create_with_none_information(self, tmp_path: Path):
|
|
77
|
+
"""create() with None information should use defaults."""
|
|
78
|
+
lock_path = tmp_path / "test.lock"
|
|
79
|
+
job_uri = "/path/to/job"
|
|
80
|
+
|
|
81
|
+
lock_file = MockLockFile.create(lock_path, job_uri, information=None)
|
|
82
|
+
|
|
83
|
+
assert lock_file.value == 0
|
|
84
|
+
content = json.loads(lock_path.read_text())
|
|
85
|
+
assert content["information"] == {"value": 0}
|
|
86
|
+
|
|
87
|
+
def test_load_reads_json_file(self, tmp_path: Path):
|
|
88
|
+
"""Loading should read JSON file correctly."""
|
|
89
|
+
lock_path = tmp_path / "test.lock"
|
|
90
|
+
lock_path.write_text(
|
|
91
|
+
json.dumps({"job_uri": "/some/job", "information": {"value": 123}})
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
lock_file = MockLockFile(lock_path)
|
|
95
|
+
|
|
96
|
+
assert lock_file.job_uri == "/some/job"
|
|
97
|
+
assert lock_file.value == 123
|
|
98
|
+
|
|
99
|
+
def test_load_missing_file(self, tmp_path: Path):
|
|
100
|
+
"""Loading missing file should set defaults."""
|
|
101
|
+
lock_path = tmp_path / "nonexistent.lock"
|
|
102
|
+
|
|
103
|
+
lock_file = MockLockFile(lock_path)
|
|
104
|
+
|
|
105
|
+
assert lock_file.job_uri is None
|
|
106
|
+
# Note: value is not set when file doesn't exist
|
|
107
|
+
|
|
108
|
+
def test_load_invalid_information_raises(self, tmp_path: Path):
|
|
109
|
+
"""Loading with invalid information format should raise."""
|
|
110
|
+
lock_path = tmp_path / "test.lock"
|
|
111
|
+
lock_path.write_text(
|
|
112
|
+
json.dumps(
|
|
113
|
+
{
|
|
114
|
+
"job_uri": "/some/job",
|
|
115
|
+
"information": "invalid", # Should be dict
|
|
116
|
+
}
|
|
117
|
+
)
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
with pytest.raises(ValueError, match="Invalid information format"):
|
|
121
|
+
MockLockFile(lock_path)
|
|
122
|
+
|
|
123
|
+
def test_delete_removes_file(self, tmp_path: Path):
|
|
124
|
+
"""delete() should remove the lock file."""
|
|
125
|
+
lock_path = tmp_path / "test.lock"
|
|
126
|
+
lock_file = MockLockFile.create(lock_path, "/job", information={"value": 1})
|
|
127
|
+
|
|
128
|
+
assert lock_path.is_file()
|
|
129
|
+
lock_file.delete()
|
|
130
|
+
assert not lock_path.is_file()
|
|
131
|
+
|
|
132
|
+
def test_delete_missing_file_noop(self, tmp_path: Path):
|
|
133
|
+
"""delete() on missing file should not raise."""
|
|
134
|
+
lock_path = tmp_path / "nonexistent.lock"
|
|
135
|
+
lock_file = MockLockFile(lock_path)
|
|
136
|
+
|
|
137
|
+
# Should not raise
|
|
138
|
+
lock_file.delete()
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
# --- JobDependencyLock tests ---
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class TestJobDependencyLock:
|
|
145
|
+
"""Tests for JobDependencyLock."""
|
|
146
|
+
|
|
147
|
+
def test_verify_lock_file_passes_when_exists(self, tmp_path: Path):
|
|
148
|
+
"""verify_lock_file() should pass when file exists."""
|
|
149
|
+
lock_path = tmp_path / "test.lock"
|
|
150
|
+
lock_path.write_text("{}")
|
|
151
|
+
|
|
152
|
+
lock = MockJobLock(lock_file_path=lock_path)
|
|
153
|
+
lock.verify_lock_file() # Should not raise
|
|
154
|
+
|
|
155
|
+
def test_verify_lock_file_raises_when_missing(self, tmp_path: Path):
|
|
156
|
+
"""verify_lock_file() should raise LockError when file is missing."""
|
|
157
|
+
lock_path = tmp_path / "nonexistent.lock"
|
|
158
|
+
|
|
159
|
+
lock = MockJobLock(lock_file_path=lock_path)
|
|
160
|
+
|
|
161
|
+
with pytest.raises(LockError, match="Lock file missing"):
|
|
162
|
+
lock.verify_lock_file()
|
|
163
|
+
|
|
164
|
+
def test_verify_lock_file_noop_when_path_none(self):
|
|
165
|
+
"""verify_lock_file() should be no-op when lock_file_path is None."""
|
|
166
|
+
lock = MockJobLock(lock_file_path=None)
|
|
167
|
+
lock.verify_lock_file() # Should not raise
|
|
168
|
+
|
|
169
|
+
def test_release_deletes_lock_file(self, tmp_path: Path):
|
|
170
|
+
"""release() should delete the lock file."""
|
|
171
|
+
lock_path = tmp_path / "test.lock"
|
|
172
|
+
lock_path.write_text("{}")
|
|
173
|
+
|
|
174
|
+
lock = MockJobLock(lock_file_path=lock_path)
|
|
175
|
+
lock.release()
|
|
176
|
+
|
|
177
|
+
assert not lock_path.is_file()
|
|
178
|
+
|
|
179
|
+
def test_release_noop_when_path_none(self):
|
|
180
|
+
"""release() should not raise when lock_file_path is None."""
|
|
181
|
+
lock = MockJobLock(lock_file_path=None)
|
|
182
|
+
lock.release() # Should not raise
|
|
183
|
+
|
|
184
|
+
def test_context_manager_acquire_release(self, tmp_path: Path):
|
|
185
|
+
"""Context manager should acquire on enter and release on exit."""
|
|
186
|
+
lock_path = tmp_path / "test.lock"
|
|
187
|
+
lock_path.write_text("{}")
|
|
188
|
+
|
|
189
|
+
lock = MockJobLock(lock_file_path=lock_path)
|
|
190
|
+
|
|
191
|
+
with lock:
|
|
192
|
+
assert lock.acquired
|
|
193
|
+
assert lock_path.is_file()
|
|
194
|
+
|
|
195
|
+
assert lock.released
|
|
196
|
+
assert not lock_path.is_file()
|
|
197
|
+
|
|
198
|
+
def test_acquire_fails_if_lock_file_missing(self, tmp_path: Path):
|
|
199
|
+
"""acquire() should fail if lock file verification fails."""
|
|
200
|
+
lock_path = tmp_path / "nonexistent.lock"
|
|
201
|
+
lock = MockJobLock(lock_file_path=lock_path)
|
|
202
|
+
|
|
203
|
+
with pytest.raises(LockError):
|
|
204
|
+
lock.acquire()
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
# --- TokenLockFile tests ---
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
class TestTokenLockFile:
|
|
211
|
+
"""Tests for TokenLockFile."""
|
|
212
|
+
|
|
213
|
+
def test_create_with_count(self, tmp_path: Path):
|
|
214
|
+
"""create() should store count in information."""
|
|
215
|
+
lock_path = tmp_path / "test.token"
|
|
216
|
+
job_uri = "/path/to/job"
|
|
217
|
+
|
|
218
|
+
lock_file = TokenLockFile.create(lock_path, job_uri, information={"count": 5})
|
|
219
|
+
|
|
220
|
+
assert lock_file.count == 5
|
|
221
|
+
content = json.loads(lock_path.read_text())
|
|
222
|
+
assert content["information"]["count"] == 5
|
|
223
|
+
|
|
224
|
+
def test_load_json_format(self, tmp_path: Path):
|
|
225
|
+
"""Loading should read JSON format correctly."""
|
|
226
|
+
lock_path = tmp_path / "test.token"
|
|
227
|
+
lock_path.write_text(
|
|
228
|
+
json.dumps({"job_uri": "/some/job", "information": {"count": 10}})
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
lock_file = TokenLockFile(lock_path)
|
|
232
|
+
|
|
233
|
+
assert lock_file.job_uri == "/some/job"
|
|
234
|
+
assert lock_file.count == 10
|
|
235
|
+
|
|
236
|
+
def test_load_old_line_format(self, tmp_path: Path):
|
|
237
|
+
"""Loading should read old line-based format for backward compatibility."""
|
|
238
|
+
lock_path = tmp_path / "test.token"
|
|
239
|
+
# Old format: line1=count, line2=job_uri
|
|
240
|
+
lock_path.write_text("7\n/old/job/path")
|
|
241
|
+
|
|
242
|
+
lock_file = TokenLockFile(lock_path)
|
|
243
|
+
|
|
244
|
+
assert lock_file.job_uri == "/old/job/path"
|
|
245
|
+
assert lock_file.count == 7
|
|
246
|
+
|
|
247
|
+
def test_to_information(self, tmp_path: Path):
|
|
248
|
+
"""to_information() should return count dict."""
|
|
249
|
+
lock_path = tmp_path / "test.token"
|
|
250
|
+
lock_file = TokenLockFile.create(lock_path, "/job", information={"count": 3})
|
|
251
|
+
|
|
252
|
+
assert lock_file.to_information() == {"count": 3}
|
|
253
|
+
|
|
254
|
+
def test_from_information_none(self, tmp_path: Path):
|
|
255
|
+
"""from_information(None) should set count to 0."""
|
|
256
|
+
lock_path = tmp_path / "test.token"
|
|
257
|
+
lock_file = TokenLockFile.create(lock_path, "/job", information=None)
|
|
258
|
+
|
|
259
|
+
assert lock_file.count == 0
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
# --- PartialLockFile tests ---
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
class TestPartialLockFile:
|
|
266
|
+
"""Tests for PartialLockFile."""
|
|
267
|
+
|
|
268
|
+
def test_create_with_partial_name(self, tmp_path: Path):
|
|
269
|
+
"""create() should store partial_name in information."""
|
|
270
|
+
lock_path = tmp_path / "holder.json"
|
|
271
|
+
job_uri = "/path/to/job"
|
|
272
|
+
|
|
273
|
+
lock_file = PartialLockFile.create(
|
|
274
|
+
lock_path, job_uri, information={"partial_name": "checkpoints"}
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
assert lock_file.partial_name == "checkpoints"
|
|
278
|
+
content = json.loads(lock_path.read_text())
|
|
279
|
+
assert content["information"]["partial_name"] == "checkpoints"
|
|
280
|
+
|
|
281
|
+
def test_load_json_format(self, tmp_path: Path):
|
|
282
|
+
"""Loading should read JSON format correctly."""
|
|
283
|
+
lock_path = tmp_path / "holder.json"
|
|
284
|
+
lock_path.write_text(
|
|
285
|
+
json.dumps(
|
|
286
|
+
{"job_uri": "/some/job", "information": {"partial_name": "outputs"}}
|
|
287
|
+
)
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
lock_file = PartialLockFile(lock_path)
|
|
291
|
+
|
|
292
|
+
assert lock_file.job_uri == "/some/job"
|
|
293
|
+
assert lock_file.partial_name == "outputs"
|
|
294
|
+
|
|
295
|
+
def test_to_information(self, tmp_path: Path):
|
|
296
|
+
"""to_information() should return partial_name dict."""
|
|
297
|
+
lock_path = tmp_path / "holder.json"
|
|
298
|
+
lock_file = PartialLockFile.create(
|
|
299
|
+
lock_path, "/job", information={"partial_name": "data"}
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
assert lock_file.to_information() == {"partial_name": "data"}
|
|
303
|
+
|
|
304
|
+
def test_from_information_none(self, tmp_path: Path):
|
|
305
|
+
"""from_information(None) should set partial_name to empty string."""
|
|
306
|
+
lock_path = tmp_path / "holder.json"
|
|
307
|
+
lock_file = PartialLockFile.create(lock_path, "/job", information=None)
|
|
308
|
+
|
|
309
|
+
assert lock_file.partial_name == ""
|
|
310
|
+
|
|
311
|
+
def test_from_information_invalid_raises(self, tmp_path: Path):
|
|
312
|
+
"""from_information with invalid format should raise."""
|
|
313
|
+
lock_path = tmp_path / "holder.json"
|
|
314
|
+
lock_path.write_text(json.dumps({"job_uri": "/job", "information": "invalid"}))
|
|
315
|
+
|
|
316
|
+
with pytest.raises(ValueError, match="Invalid information format"):
|
|
317
|
+
PartialLockFile(lock_path)
|
|
@@ -7,8 +7,9 @@ from experimaestro.utils.git import get_git_info
|
|
|
7
7
|
from experimaestro.utils.environment import (
|
|
8
8
|
get_environment_info,
|
|
9
9
|
get_editable_packages_git_info,
|
|
10
|
-
|
|
10
|
+
get_current_environment,
|
|
11
11
|
load_environment_info,
|
|
12
|
+
ExperimentEnvironment,
|
|
12
13
|
)
|
|
13
14
|
|
|
14
15
|
|
|
@@ -40,8 +41,8 @@ class TestGetGitInfo:
|
|
|
40
41
|
assert len(git_info["commit"]) == 40
|
|
41
42
|
assert all(c in "0123456789abcdef" for c in git_info["commit"])
|
|
42
43
|
|
|
43
|
-
# Short commit should be 7 characters
|
|
44
|
-
assert len(git_info["commit_short"])
|
|
44
|
+
# Short commit should be 7-12 characters (git uses more if needed for uniqueness)
|
|
45
|
+
assert 7 <= len(git_info["commit_short"]) <= 12
|
|
45
46
|
|
|
46
47
|
def test_returns_none_for_non_git_dir(self, tmp_path):
|
|
47
48
|
"""Test that get_git_info returns None for non-git directories"""
|
|
@@ -131,22 +132,24 @@ class TestGetEditablePackagesGitInfo:
|
|
|
131
132
|
|
|
132
133
|
|
|
133
134
|
class TestSaveAndLoadEnvironmentInfo:
|
|
134
|
-
"""Tests for
|
|
135
|
+
"""Tests for get_current_environment and load_environment_info functions"""
|
|
135
136
|
|
|
136
137
|
def test_save_creates_file(self, tmp_path):
|
|
137
|
-
"""Test that
|
|
138
|
+
"""Test that get_current_environment + save creates a JSON file"""
|
|
138
139
|
path = tmp_path / "environment.json"
|
|
139
140
|
|
|
140
|
-
|
|
141
|
+
env = get_current_environment()
|
|
142
|
+
env.save(path)
|
|
141
143
|
|
|
142
144
|
assert path.exists()
|
|
143
|
-
assert isinstance(
|
|
145
|
+
assert isinstance(env, ExperimentEnvironment)
|
|
144
146
|
|
|
145
147
|
def test_save_writes_valid_json(self, tmp_path):
|
|
146
148
|
"""Test that saved file contains valid JSON"""
|
|
147
149
|
path = tmp_path / "environment.json"
|
|
148
150
|
|
|
149
|
-
|
|
151
|
+
env = get_current_environment()
|
|
152
|
+
env.save(path)
|
|
150
153
|
|
|
151
154
|
content = json.loads(path.read_text())
|
|
152
155
|
assert "python_version" in content
|
|
@@ -157,10 +160,13 @@ class TestSaveAndLoadEnvironmentInfo:
|
|
|
157
160
|
"""Test that load_environment_info reads back saved data"""
|
|
158
161
|
path = tmp_path / "environment.json"
|
|
159
162
|
|
|
160
|
-
saved =
|
|
163
|
+
saved = get_current_environment()
|
|
164
|
+
saved.save(path)
|
|
161
165
|
loaded = load_environment_info(path)
|
|
162
166
|
|
|
163
|
-
assert loaded == saved
|
|
167
|
+
assert loaded.python_version == saved.python_version
|
|
168
|
+
assert loaded.packages == saved.packages
|
|
169
|
+
assert loaded.editable_packages == saved.editable_packages
|
|
164
170
|
|
|
165
171
|
def test_load_returns_none_for_missing_file(self, tmp_path):
|
|
166
172
|
"""Test that load returns None for non-existent file"""
|
|
@@ -170,14 +176,18 @@ class TestSaveAndLoadEnvironmentInfo:
|
|
|
170
176
|
|
|
171
177
|
assert result is None
|
|
172
178
|
|
|
173
|
-
def
|
|
174
|
-
"""Test that load returns
|
|
179
|
+
def test_load_returns_empty_for_invalid_json(self, tmp_path):
|
|
180
|
+
"""Test that load returns empty ExperimentEnvironment for invalid JSON"""
|
|
175
181
|
path = tmp_path / "invalid.json"
|
|
176
182
|
path.write_text("not valid json{")
|
|
177
183
|
|
|
178
184
|
result = load_environment_info(path)
|
|
179
185
|
|
|
180
|
-
|
|
186
|
+
# Returns empty ExperimentEnvironment (graceful degradation)
|
|
187
|
+
assert result is not None
|
|
188
|
+
assert result.python_version is None
|
|
189
|
+
assert result.packages == {}
|
|
190
|
+
assert result.editable_packages == {}
|
|
181
191
|
|
|
182
192
|
|
|
183
193
|
class TestExperimentEnvironmentSaving:
|
|
@@ -188,7 +198,7 @@ class TestExperimentEnvironmentSaving:
|
|
|
188
198
|
from experimaestro import experiment
|
|
189
199
|
|
|
190
200
|
# Just enter the experiment context, no need to run any tasks
|
|
191
|
-
with experiment(xpmdirectory, "test-env-save"
|
|
201
|
+
with experiment(xpmdirectory, "test-env-save") as xp:
|
|
192
202
|
pass # environment.json should be saved on __enter__
|
|
193
203
|
|
|
194
204
|
env_path = xp.workdir / "environment.json"
|
|
@@ -1,5 +1,12 @@
|
|
|
1
|
-
|
|
1
|
+
import json
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
from experimaestro import Task, Param
|
|
2
7
|
from experimaestro.tests.utils import TemporaryDirectory, TemporaryExperiment
|
|
8
|
+
from experimaestro.scheduler.experiment import cleanup_experiment_history
|
|
9
|
+
from experimaestro.settings import HistorySettings
|
|
3
10
|
|
|
4
11
|
|
|
5
12
|
class TaskA(Task):
|
|
@@ -15,41 +22,6 @@ class TaskB(Task):
|
|
|
15
22
|
pass
|
|
16
23
|
|
|
17
24
|
|
|
18
|
-
# xp = get_experiment(id="my-xp-1")
|
|
19
|
-
|
|
20
|
-
# # Returns a list of tasks which were submitted and successful
|
|
21
|
-
# tasks = xp.get_tasks(myxps.evaluation.Evaluation, status=Job.DONE)
|
|
22
|
-
|
|
23
|
-
# for task in tasks:
|
|
24
|
-
# # Look at the tags
|
|
25
|
-
# print(task.tags)
|
|
26
|
-
|
|
27
|
-
# # Get some information
|
|
28
|
-
# print("Task ran in {task.workdir}")
|
|
29
|
-
|
|
30
|
-
# # Look at the parent jobs
|
|
31
|
-
# print(task.depends_on)
|
|
32
|
-
|
|
33
|
-
# # Look at the dependant
|
|
34
|
-
# print(task.dependents)
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
def test_experiment_history():
|
|
38
|
-
"""Test retrieving experiment history"""
|
|
39
|
-
with TemporaryDirectory() as workdir:
|
|
40
|
-
with TemporaryExperiment("experiment", workdir=workdir):
|
|
41
|
-
task_a = TaskA.C().submit()
|
|
42
|
-
TaskB.C(task_a=task_a, x=tag(1)).submit()
|
|
43
|
-
|
|
44
|
-
# Look at the experiment
|
|
45
|
-
xp = get_experiment("experiment", workdir=workdir)
|
|
46
|
-
|
|
47
|
-
(task_a_info,) = xp.get_jobs(TaskA)
|
|
48
|
-
(task_b_info,) = xp.get_jobs(TaskB)
|
|
49
|
-
assert task_b_info.tags == {"x": 1}
|
|
50
|
-
assert task_b_info.depends_on == [task_a_info]
|
|
51
|
-
|
|
52
|
-
|
|
53
25
|
class FlagHandler:
|
|
54
26
|
def __init__(self):
|
|
55
27
|
self.flag = False
|
|
@@ -71,3 +43,166 @@ def test_experiment_events():
|
|
|
71
43
|
task_a.on_completed(flag.set)
|
|
72
44
|
|
|
73
45
|
assert flag.is_set()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# === Tests for cleanup_experiment_history ===
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _create_run_dir(experiment_base: Path, run_id: str, status: str) -> Path:
|
|
52
|
+
"""Helper to create a fake run directory with a given status."""
|
|
53
|
+
run_dir = experiment_base / run_id
|
|
54
|
+
run_dir.mkdir(parents=True, exist_ok=True)
|
|
55
|
+
env_data = {"run": {"status": status}}
|
|
56
|
+
(run_dir / "environment.json").write_text(json.dumps(env_data))
|
|
57
|
+
return run_dir
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# fmt: off
|
|
61
|
+
# Cleanup uses `while count > max` logic, so max_done=N keeps N runs
|
|
62
|
+
CLEANUP_TEST_CASES = [
|
|
63
|
+
# runs [(run_id, status, should_remain)], max_done, max_failed, current_run_id, current_status
|
|
64
|
+
pytest.param(
|
|
65
|
+
[
|
|
66
|
+
("20260101_000000", "completed", False),
|
|
67
|
+
("20260101_010000", "completed", False),
|
|
68
|
+
("20260101_020000", "completed", True), # kept (3rd newest)
|
|
69
|
+
("20260101_030000", "completed", True), # kept (2nd newest)
|
|
70
|
+
("20260101_040000", "completed", True), # kept (newest)
|
|
71
|
+
],
|
|
72
|
+
3, 1, None, None,
|
|
73
|
+
id="oldest_deleted_first__max_done_3_keeps_3",
|
|
74
|
+
),
|
|
75
|
+
pytest.param(
|
|
76
|
+
[
|
|
77
|
+
("20260101_000000", "completed", False), # removed (oldest completed)
|
|
78
|
+
("20260101_010000", "completed", True), # kept (2nd newest completed)
|
|
79
|
+
("20260101_020000", "failed", False), # remove (failed after success)
|
|
80
|
+
("20260101_030000", "completed", True), # kept (newest completed)
|
|
81
|
+
],
|
|
82
|
+
2, 1, None, None,
|
|
83
|
+
id="max_done_2_keeps_2_and_failed_1",
|
|
84
|
+
),
|
|
85
|
+
pytest.param(
|
|
86
|
+
[
|
|
87
|
+
("20260101_000000", "completed", False),
|
|
88
|
+
("20260101_010000", "completed", False),
|
|
89
|
+
("20260101_020000", "completed", True), # kept (2nd newest)
|
|
90
|
+
("20260101_030000", "completed", True), # kept (newest)
|
|
91
|
+
],
|
|
92
|
+
2, 1, None, None,
|
|
93
|
+
id="max_done_2_keeps_2",
|
|
94
|
+
),
|
|
95
|
+
pytest.param(
|
|
96
|
+
[
|
|
97
|
+
("20260101_000000", "completed", True),
|
|
98
|
+
("20260101_010000", "completed", True),
|
|
99
|
+
("20260101_020000", "completed", True),
|
|
100
|
+
("20260101_030000", "completed", True),
|
|
101
|
+
],
|
|
102
|
+
5, 1, None, None,
|
|
103
|
+
id="max_done_5_keeps_all_4",
|
|
104
|
+
),
|
|
105
|
+
pytest.param(
|
|
106
|
+
[
|
|
107
|
+
("20260101_000000", "failed", False), # removed (oldest failed)
|
|
108
|
+
("20260101_010000", "completed", True), # kept (only completed)
|
|
109
|
+
("20260101_020000", "failed", True), # kept (2nd newest failed)
|
|
110
|
+
("20260101_030000", "failed", True), # kept (newest failed)
|
|
111
|
+
],
|
|
112
|
+
5, 2, None, None,
|
|
113
|
+
id="max_failed_2_keeps_2",
|
|
114
|
+
),
|
|
115
|
+
pytest.param(
|
|
116
|
+
[
|
|
117
|
+
("20260101_000000", "completed", True), # current, preserved
|
|
118
|
+
("20260101_010000", "completed", True), # kept (2 non-current <= max_done=2)
|
|
119
|
+
("20260101_020000", "completed", True), # kept (newest)
|
|
120
|
+
],
|
|
121
|
+
2, 1, "20260101_000000", None,
|
|
122
|
+
id="excludes_current_run__all_kept",
|
|
123
|
+
),
|
|
124
|
+
pytest.param(
|
|
125
|
+
[
|
|
126
|
+
("20260101_000000", "failed", False), # removed (success clears failed)
|
|
127
|
+
("20260101_010000", "completed", True),
|
|
128
|
+
("20260101_020000", "failed", False), # removed (success clears failed)
|
|
129
|
+
("20260101_030000", "completed", True),
|
|
130
|
+
],
|
|
131
|
+
5, 5, "current_run", "completed",
|
|
132
|
+
id="success_removes_all_failed",
|
|
133
|
+
),
|
|
134
|
+
pytest.param(
|
|
135
|
+
[
|
|
136
|
+
("20260101_010000", "completed", False), # removed (oldest completed)
|
|
137
|
+
("20260101_020000", "failed", False), # removed (before newest success)
|
|
138
|
+
("20260101_030000", "completed", False), # removed (oldest completed)
|
|
139
|
+
("20260101_040000", "failed", False), # removed (before newest success)
|
|
140
|
+
("20260101_050000", "completed", True), # kept (2nd newest completed)
|
|
141
|
+
("20260101_060000", "failed", False), # removed (before newest success)
|
|
142
|
+
("20260101_070000", "completed", True), # kept (newest completed)
|
|
143
|
+
],
|
|
144
|
+
2, 1, None, None,
|
|
145
|
+
id="mixed_runs__max_done_2_max_failed_1",
|
|
146
|
+
),
|
|
147
|
+
pytest.param(
|
|
148
|
+
[
|
|
149
|
+
("20260101_120000", "completed", False),
|
|
150
|
+
("20260101_120000.1", "completed", False),
|
|
151
|
+
("20260101_120000.2", "completed", True), # kept (2nd newest)
|
|
152
|
+
("20260101_130000", "completed", True), # kept (newest)
|
|
153
|
+
],
|
|
154
|
+
2, 1, None, None,
|
|
155
|
+
id="handles_modifiers_in_order",
|
|
156
|
+
),
|
|
157
|
+
]
|
|
158
|
+
# fmt: on
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
@pytest.mark.parametrize(
|
|
162
|
+
"runs,max_done,max_failed,current_run_id,current_status",
|
|
163
|
+
CLEANUP_TEST_CASES,
|
|
164
|
+
)
|
|
165
|
+
def test_cleanup_experiment_history(
|
|
166
|
+
runs: list[tuple[str, str, bool]],
|
|
167
|
+
max_done: int,
|
|
168
|
+
max_failed: int,
|
|
169
|
+
current_run_id: str | None,
|
|
170
|
+
current_status: str | None,
|
|
171
|
+
):
|
|
172
|
+
"""Test cleanup_experiment_history with various configurations.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
runs: List of (run_id, status, should_remain) tuples
|
|
176
|
+
max_done: HistorySettings.max_done (removes while count >= max_done)
|
|
177
|
+
max_failed: HistorySettings.max_failed (removes while count >= max_failed)
|
|
178
|
+
current_run_id: Run to exclude from cleanup
|
|
179
|
+
current_status: If "completed", removes ALL past failed runs
|
|
180
|
+
"""
|
|
181
|
+
with TemporaryDirectory() as workdir:
|
|
182
|
+
experiment_base = workdir / "experiments" / "test-exp"
|
|
183
|
+
experiment_base.mkdir(parents=True)
|
|
184
|
+
|
|
185
|
+
# Create all run directories
|
|
186
|
+
for run_id, status, _ in runs:
|
|
187
|
+
_create_run_dir(experiment_base, run_id, status)
|
|
188
|
+
|
|
189
|
+
# Run cleanup
|
|
190
|
+
history = HistorySettings(max_done=max_done, max_failed=max_failed)
|
|
191
|
+
removed = cleanup_experiment_history(
|
|
192
|
+
experiment_base,
|
|
193
|
+
current_run_id=current_run_id,
|
|
194
|
+
current_status=current_status,
|
|
195
|
+
history=history,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Verify results
|
|
199
|
+
remaining = {d.name for d in experiment_base.iterdir()}
|
|
200
|
+
expected_remaining = {run_id for run_id, _, keep in runs if keep}
|
|
201
|
+
expected_removed = {run_id for run_id, _, keep in runs if not keep}
|
|
202
|
+
|
|
203
|
+
assert remaining == expected_remaining, (
|
|
204
|
+
f"Remaining mismatch: got {remaining}, expected {expected_remaining}"
|
|
205
|
+
)
|
|
206
|
+
assert len(removed) == len(expected_removed), (
|
|
207
|
+
f"Removed count: got {len(removed)}, expected {len(expected_removed)}"
|
|
208
|
+
)
|