verily 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,197 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # Abstra
171
+ # Abstra is an AI-powered process automation framework.
172
+ # Ignore directories containing user credentials, local state, and settings.
173
+ # Learn more at https://abstra.io/docs
174
+ .abstra/
175
+
176
+ # Visual Studio Code
177
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
178
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
179
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
180
+ # you could uncomment the following to ignore the enitre vscode folder
181
+ # .vscode/
182
+
183
+ # Ruff stuff:
184
+ .ruff_cache/
185
+
186
+ # PyPI configuration file
187
+ .pypirc
188
+
189
+ # Cursor
190
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
191
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
192
+ # refer to https://docs.cursor.com/context/ignore-files
193
+ .cursorignore
194
+ .cursorindexingignore
195
+
196
+ # macOS files
197
+ .DS_Store
verily-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 wowthx
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
verily-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,17 @@
1
+ Metadata-Version: 2.4
2
+ Name: verily
3
+ Version: 0.1.0
4
+ Summary: Simple stability testing test for stochastic systems
5
+ License-File: LICENSE
6
+ Requires-Python: >=3.11
7
+ Requires-Dist: aiostream>=0.6.4
8
+ Requires-Dist: fsspec>=2025.5.1
9
+ Requires-Dist: numpy>=2.3.1
10
+ Requires-Dist: pydantic>=2.11.7
11
+ Requires-Dist: rich>=14.0.0
12
+ Requires-Dist: scipy>=1.16.0
13
+ Requires-Dist: typer>=0.16.0
14
+ Description-Content-Type: text/markdown
15
+
16
+ # Verily
17
+ Simple stability testing test for stochastic systems
verily-0.1.0/README.md ADDED
@@ -0,0 +1,2 @@
1
+ # Verily
2
+ Simple stability testing test for stochastic systems
@@ -0,0 +1,69 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "verily"
7
+ version = "0.1.0"
8
+ description = "Simple stability testing test for stochastic systems"
9
+ readme = "README.md"
10
+ requires-python = ">=3.11"
11
+ dependencies = [
12
+ "aiostream>=0.6.4",
13
+ "fsspec>=2025.5.1",
14
+ "numpy>=2.3.1",
15
+ "pydantic>=2.11.7",
16
+ "rich>=14.0.0",
17
+ "scipy>=1.16.0",
18
+ "typer>=0.16.0",
19
+ ]
20
+
21
+ [project.scripts]
22
+ zen = "verily.main:app"
23
+
24
+ [dependency-groups]
25
+ dev = [
26
+ "build>=1.2.2.post1",
27
+ "pytest>=8.4.0",
28
+ "pytest-asyncio>=1.0.0",
29
+ "ruff>=0.11.13",
30
+ ]
31
+
32
+ [tool.poe.tasks]
33
+ lint = "ruff check ."
34
+ format = "ruff format ."
35
+ test = "uv run pytest"
36
+ build = "uv run pyproject-build"
37
+
38
+ [tool.pytest.ini_options]
39
+ pythonpath = ["src"]
40
+ asyncio_mode = "auto"
41
+
42
+ [tool.ruff.lint]
43
+ extend-select = [
44
+ "F", # Pyflakes rules
45
+ "W", # PyCodeStyle warnings
46
+ "E", # PyCodeStyle errors
47
+ "I", # Sort imports properly
48
+ "UP", # Warn if certain things can changed due to newer Python versions
49
+ "C4", # Catch incorrect use of comprehensions, dict, list, etc
50
+ "FA", # Enforce from __future__ import annotations
51
+ "ISC", # Good use of string concatenation
52
+ "ICN", # Use common import conventions
53
+ "RET", # Good return practices
54
+ "SIM", # Common simplification rules
55
+ "TID", # Some good import practices
56
+ "TC", # Enforce importing certain types in a TYPE_CHECKING block
57
+ "PTH", # Use pathlib instead of os.path
58
+ "TD", # Be diligent with TODO comments
59
+ "NPY", # Some numpy-specific things
60
+ ]
61
+
62
+ [tool.ruff.lint.flake8-tidy-imports.banned-api."pytest.mark.asyncio"]
63
+ msg = "The @pytest.mark.asyncio decorator is not needed as `asyncio_mode` is set to `auto` for the project."
64
+
65
+ [tool.hatch.build.targets.sdist]
66
+ only-include = ["src/verily"]
67
+
68
+ [tool.hatch.build.targets.wheel]
69
+ only-include = ["src/verily"]
@@ -0,0 +1,3 @@
1
+ from .models import Benchmark, BenchmarkCase, RunConfigOverride
2
+
3
+ __all__ = ["Benchmark", "BenchmarkCase", "RunConfigOverride"]
@@ -0,0 +1,112 @@
1
+ from rich.spinner import Spinner
2
+ from rich.table import Table
3
+ from rich.text import Text
4
+
5
+ from .models import BenchmarkRunResult
6
+
7
+
8
+ def generate_benchmark_table(
9
+ results: list[BenchmarkRunResult], confidence_level: float
10
+ ) -> Table:
11
+ table = Table(show_header=True, header_style="cyan", expand=True)
12
+ table.add_column("Benchmark", style="dim", width=30)
13
+ table.add_column(f"Mean @ {confidence_level:.0%} CI", justify="right")
14
+ table.add_column("Std Dev", justify="right")
15
+ table.add_column("Stability", justify="right")
16
+ table.add_column("Runs", justify="right")
17
+ table.add_column("Avg Dur~ (s)", justify="right")
18
+
19
+ if not results:
20
+ table.add_row("Waiting for benchmarks to start...", "", "", "", "", "")
21
+ return table
22
+
23
+ for result in results:
24
+ benchmark_name = result.benchmark.name
25
+ all_runs = [run for case in result.case_results for run in case.runs]
26
+
27
+ pending_runs = [r for r in all_runs if r.result.type == "pending"]
28
+ successful_runs = [r for r in all_runs if r.result.type == "success"]
29
+ failed_runs = [r for r in all_runs if r.result.type == "failure"]
30
+
31
+ latest_stats = result.stats
32
+
33
+ all_cases_done = all(cr.done for cr in result.case_results)
34
+
35
+ def with_spinner(text: str | None) -> str | Spinner | Text:
36
+ if pending_runs and not all_cases_done:
37
+ if text:
38
+ return Spinner("dots", text=Text(text, style="white"), style="cyan")
39
+ return Spinner("dots", style="cyan")
40
+ return text or "[yellow]N/A[/yellow]"
41
+
42
+ mean_display_text = None
43
+ if latest_stats:
44
+ mean_val_text = f"{latest_stats.mean * 100:.2f}%"
45
+ if latest_stats.precision:
46
+ margin_of_error = (
47
+ latest_stats.precision.high - latest_stats.precision.low
48
+ ) / 2
49
+ margin_of_error_text = f"±{margin_of_error * 100:.2f}%"
50
+ mean_display_text = f"{mean_val_text} ({margin_of_error_text})"
51
+ else:
52
+ mean_display_text = mean_val_text
53
+
54
+ std_dev_text = f"{latest_stats.std_dev:.3f}" if latest_stats else None
55
+ stability_text = f"{latest_stats.stability * 100:.2f}%" if latest_stats else None
56
+
57
+ mean_score_display = with_spinner(mean_display_text)
58
+ std_dev_display = with_spinner(std_dev_text)
59
+ stability_display = with_spinner(stability_text)
60
+
61
+ total_runs_count = len(all_runs)
62
+ runs_display_text = f"{total_runs_count}"
63
+ if failed_runs:
64
+ runs_display_text += f" [red]({len(failed_runs)} failed)[/red]"
65
+
66
+ avg_run_duration_display: str | Spinner | Text
67
+ avg_runtime_text = None
68
+ completed_runs = successful_runs + failed_runs
69
+ if completed_runs:
70
+ total_runtime = sum(r.result.runtime for r in completed_runs)
71
+ avg_runtime = total_runtime / len(completed_runs)
72
+ avg_runtime_text = f"{avg_runtime:.2f}"
73
+
74
+ avg_run_duration_display = with_spinner(avg_runtime_text)
75
+
76
+ table.add_row(
77
+ benchmark_name,
78
+ mean_score_display,
79
+ std_dev_display,
80
+ stability_display,
81
+ runs_display_text,
82
+ avg_run_duration_display,
83
+ )
84
+
85
+ return table
86
+
87
+
88
+ def generate_failures_table(
89
+ results: list[BenchmarkRunResult]
90
+ ) -> Table:
91
+ table = Table(show_header=True, header_style="bold red")
92
+ table.title = "Failures"
93
+ table.add_column("Benchmark", style="dim", width=30)
94
+ table.add_column("Case", justify="right")
95
+ table.add_column("Error")
96
+ table.add_column("Stdout")
97
+ table.add_column("Stderr")
98
+
99
+ for result in results:
100
+ for case_result in result.case_results:
101
+ for run in case_result.runs:
102
+ if run.result.type == "failure":
103
+ failure = run.result
104
+ table.add_row(
105
+ result.benchmark.name,
106
+ str(case_result.case.inputs),
107
+ failure.error_message,
108
+ failure.stdout.decode("utf-8", errors="ignore"),
109
+ failure.stderr.decode("utf-8", errors="ignore"),
110
+ )
111
+
112
+ return table
@@ -0,0 +1,152 @@
1
+ import json
2
+ from collections.abc import Callable, Iterator
3
+ from contextlib import contextmanager
4
+
5
+ import fsspec
6
+
7
+ from .models import (
8
+ BenchmarkCase,
9
+ BenchmarkRunResult,
10
+ BenchmarkStatistics,
11
+ CaseResult,
12
+ FailureResult,
13
+ NamedBenchmark,
14
+ Run,
15
+ RunConfig,
16
+ RunResult,
17
+ SuccessResult,
18
+ )
19
+ from .stats import aggregate_benchmark_statistics, calculate_run_stats
20
+ from .utils import create_sequential_run_directory
21
+
22
+
23
+ def _serialize_run(run: Run) -> dict:
24
+ result_dump = None
25
+ if hasattr(run.result, "model_dump"):
26
+ result_dump = run.result.model_dump()
27
+ if "stdout" in result_dump and isinstance(result_dump["stdout"], bytes):
28
+ result_dump["stdout"] = result_dump["stdout"].decode("utf-8", "ignore")
29
+ if "stderr" in result_dump and isinstance(result_dump["stderr"], bytes):
30
+ result_dump["stderr"] = result_dump["stderr"].decode("utf-8", "ignore")
31
+
32
+ return {
33
+ "result": result_dump,
34
+ "stats": run.stats.model_dump() if run.stats else None,
35
+ }
36
+
37
+
38
+ @contextmanager
39
+ def create_results_writer(
40
+ results_dir: str,
41
+ config: RunConfig,
42
+ ) -> Iterator[Callable[[BenchmarkRunResult], None]]:
43
+ fs, path = fsspec.url_to_fs(results_dir)
44
+ run_dir = create_sequential_run_directory(fs=fs, base_path=path)
45
+ written_runs = set()
46
+
47
+ with fs.open(f"{run_dir}/config.json", "w") as f:
48
+ json.dump(config.model_dump(), f, indent=4)
49
+
50
+ with fs.open(f"{run_dir}/results.jsonl", "w") as f:
51
+
52
+ def writer(result: BenchmarkRunResult) -> None:
53
+ benchmark_name = result.benchmark.name
54
+ for case_idx, case_result in enumerate(result.case_results):
55
+ for run_idx, run in enumerate(case_result.runs):
56
+ run_id = (benchmark_name, case_idx, run_idx)
57
+ if run.result.type != "pending" and run_id not in written_runs:
58
+ run_data = _serialize_run(run)
59
+ output_record = {
60
+ "benchmark_name": benchmark_name,
61
+ "case_index": case_idx,
62
+ "case_inputs": case_result.case.inputs,
63
+ "run_index": run_idx,
64
+ **run_data,
65
+ }
66
+ f.write(json.dumps(output_record) + "\n")
67
+ written_runs.add(run_id)
68
+
69
+ yield writer
70
+
71
+
72
+ def load_results(run_path: str) -> tuple[list[BenchmarkRunResult], RunConfig]:
73
+ fs, path = fsspec.url_to_fs(run_path)
74
+
75
+ with fs.open(f"{path}/config.json", "r") as f:
76
+ config_dict = json.load(f)
77
+ config = RunConfig(**config_dict)
78
+
79
+ results_by_benchmark_case: dict[
80
+ tuple[str, int], list[Run]
81
+ ] = {}
82
+
83
+ with fs.open(f"{path}/results.jsonl", "r") as f:
84
+ for line in f:
85
+ record = json.loads(line)
86
+ benchmark_name = record["benchmark_name"]
87
+ case_index = record["case_index"]
88
+ case_inputs = record["case_inputs"]
89
+ run_index = record["run_index"]
90
+ run_data = record["result"]
91
+ stats_data = record["stats"]
92
+
93
+ run_result: RunResult
94
+ if run_data["type"] == "success":
95
+ run_result = SuccessResult(
96
+ comparison=run_data["comparison"],
97
+ stdout=run_data["stdout"].encode("utf-8"),
98
+ stderr=run_data["stderr"].encode("utf-8"),
99
+ runtime=run_data["runtime"],
100
+ )
101
+ elif run_data["type"] == "failure":
102
+ run_result = FailureResult(
103
+ error_message=run_data["error_message"],
104
+ stdout=run_data["stdout"].encode("utf-8"),
105
+ stderr=run_data["stderr"].encode("utf-8"),
106
+ runtime=run_data["runtime"],
107
+ )
108
+ else:
109
+ # This should not happen with current serialization logic
110
+ continue
111
+
112
+ run_stats = (
113
+ BenchmarkStatistics(**stats_data) if stats_data else None
114
+ )
115
+ run = Run(result=run_result, stats=run_stats)
116
+
117
+ if (benchmark_name, case_index) not in results_by_benchmark_case:
118
+ results_by_benchmark_case[(benchmark_name, case_index)] = []
119
+
120
+ # Ensure the list is long enough to insert at run_index
121
+ current_runs = results_by_benchmark_case[(benchmark_name, case_index)]
122
+ while len(current_runs) <= run_index:
123
+ current_runs.append(None) # type: ignore
124
+ current_runs[run_index] = run
125
+
126
+
127
+ benchmark_results: dict[str, BenchmarkRunResult] = {}
128
+
129
+ for (benchmark_name, case_index), runs in results_by_benchmark_case.items():
130
+ # NOTE: We don't have the original Benchmark object, so we create a dummy one
131
+ # This is acceptable as it's only used for display purposes and not execution
132
+ dummy_benchmark_case = BenchmarkCase(inputs=[], expectation=None)
133
+
134
+ # Filter out None runs and calculate stats for the case
135
+ successful_runs_in_case = [r.result for r in runs if r and r.result.type == "success"]
136
+ case_stats = calculate_run_stats(successful_runs_in_case, config.confidence_level)
137
+
138
+ case_result = CaseResult(case=dummy_benchmark_case, runs=runs, stats=case_stats)
139
+
140
+ if benchmark_name not in benchmark_results:
141
+ dummy_named_benchmark = NamedBenchmark(name=benchmark_name, benchmark=None) # type: ignore
142
+ benchmark_results[benchmark_name] = BenchmarkRunResult(
143
+ benchmark=dummy_named_benchmark, case_results=[], stats=None
144
+ )
145
+ benchmark_results[benchmark_name].case_results.append(case_result)
146
+
147
+ # Calculate overall benchmark statistics
148
+ for benchmark_run_result in benchmark_results.values():
149
+ all_case_stats = [cr.stats for cr in benchmark_run_result.case_results if cr.stats]
150
+ benchmark_run_result.stats = aggregate_benchmark_statistics(all_case_stats)
151
+
152
+ return list(benchmark_results.values()), config