ssrjson-benchmark 0.0.5__cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ssrjson-benchmark might be problematic. Click here for more details.
- ssrjson_benchmark/__init__.py +19 -0
- ssrjson_benchmark/__main__.py +97 -0
- ssrjson_benchmark/_files/MotionsQuestionsAnswersQuestions2016.json +1 -0
- ssrjson_benchmark/_files/apache.json +3532 -0
- ssrjson_benchmark/_files/canada.json +56532 -0
- ssrjson_benchmark/_files/ctm.json +48951 -0
- ssrjson_benchmark/_files/github.json +1320 -0
- ssrjson_benchmark/_files/instruments.json +7395 -0
- ssrjson_benchmark/_files/mesh.json +3602 -0
- ssrjson_benchmark/_files/simple_object.json +11 -0
- ssrjson_benchmark/_files/simple_object_zh.json +11 -0
- ssrjson_benchmark/_files/truenull.json +1 -0
- ssrjson_benchmark/_files/tweet.json +135 -0
- ssrjson_benchmark/_files/twitter.json +15195 -0
- ssrjson_benchmark/_ssrjson_benchmark.so +0 -0
- ssrjson_benchmark/benchmark_impl.py +826 -0
- ssrjson_benchmark/result_types.py +88 -0
- ssrjson_benchmark/template.md +11 -0
- ssrjson_benchmark-0.0.5.dist-info/METADATA +70 -0
- ssrjson_benchmark-0.0.5.dist-info/RECORD +23 -0
- ssrjson_benchmark-0.0.5.dist-info/WHEEL +6 -0
- ssrjson_benchmark-0.0.5.dist-info/licenses/LICENSE +21 -0
- ssrjson_benchmark-0.0.5.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from .benchmark_impl import (
|
|
2
|
+
generate_report_pdf,
|
|
3
|
+
generate_report_markdown,
|
|
4
|
+
run_benchmark,
|
|
5
|
+
)
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
from importlib.metadata import version
|
|
9
|
+
|
|
10
|
+
__version__ = version("ssrjson-benchmark")
|
|
11
|
+
except Exception:
|
|
12
|
+
__version__ = "0.0.0"
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"run_benchmark",
|
|
16
|
+
"generate_report_markdown",
|
|
17
|
+
"generate_report_pdf",
|
|
18
|
+
"__version__",
|
|
19
|
+
]
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
def main():
|
|
2
|
+
import argparse
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import pathlib
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
from .benchmark_impl import (
|
|
9
|
+
generate_report_markdown,
|
|
10
|
+
generate_report_pdf,
|
|
11
|
+
parse_file_result,
|
|
12
|
+
run_benchmark,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
parser = argparse.ArgumentParser()
|
|
16
|
+
|
|
17
|
+
parser.add_argument(
|
|
18
|
+
"-f", "--file", help="record JSON file", required=False, default=None
|
|
19
|
+
)
|
|
20
|
+
parser.add_argument(
|
|
21
|
+
"-d",
|
|
22
|
+
"--in-dir",
|
|
23
|
+
help="Benchmark JSON files directory",
|
|
24
|
+
required=False,
|
|
25
|
+
)
|
|
26
|
+
parser.add_argument(
|
|
27
|
+
"-m",
|
|
28
|
+
"--markdown",
|
|
29
|
+
help="Generate Markdown report",
|
|
30
|
+
required=False,
|
|
31
|
+
action="store_true",
|
|
32
|
+
)
|
|
33
|
+
parser.add_argument(
|
|
34
|
+
"--no-pdf",
|
|
35
|
+
help="Don't generate PDF report",
|
|
36
|
+
required=False,
|
|
37
|
+
action="store_true",
|
|
38
|
+
)
|
|
39
|
+
parser.add_argument(
|
|
40
|
+
"--process-gigabytes",
|
|
41
|
+
help="Total process gigabytes per test, default 0.1 (float)",
|
|
42
|
+
required=False,
|
|
43
|
+
default=0.1,
|
|
44
|
+
type=float,
|
|
45
|
+
)
|
|
46
|
+
parser.add_argument(
|
|
47
|
+
"--bin-process-megabytes",
|
|
48
|
+
help="Maximum bytes to process per read for binary formats, default 32 (int)",
|
|
49
|
+
required=False,
|
|
50
|
+
default=32,
|
|
51
|
+
type=int,
|
|
52
|
+
)
|
|
53
|
+
parser.add_argument(
|
|
54
|
+
"--out-dir",
|
|
55
|
+
help="Output directory for reports",
|
|
56
|
+
required=False,
|
|
57
|
+
default=os.getcwd(),
|
|
58
|
+
)
|
|
59
|
+
args = parser.parse_args()
|
|
60
|
+
if args.file and args.no_pdf and not args.markdown:
|
|
61
|
+
print("Nothing to do.")
|
|
62
|
+
sys.exit(0)
|
|
63
|
+
|
|
64
|
+
_benchmark_files_dir = args.in_dir
|
|
65
|
+
if not _benchmark_files_dir:
|
|
66
|
+
_benchmark_files_dir = os.path.join(
|
|
67
|
+
os.path.dirname(os.path.abspath(__file__)), "_files"
|
|
68
|
+
)
|
|
69
|
+
benchmark_files_dir = sorted(pathlib.Path(_benchmark_files_dir).glob("*.json"))
|
|
70
|
+
if not benchmark_files_dir:
|
|
71
|
+
print(f"No benchmark file found using given path: {_benchmark_files_dir}")
|
|
72
|
+
sys.exit(0)
|
|
73
|
+
|
|
74
|
+
if args.file:
|
|
75
|
+
with open(args.file, "rb") as f:
|
|
76
|
+
result_ = json.load(f)
|
|
77
|
+
result = parse_file_result(result_)
|
|
78
|
+
file = args.file.split("/")[-1]
|
|
79
|
+
else:
|
|
80
|
+
process_bytes = int(args.process_gigabytes * 1024 * 1024 * 1024)
|
|
81
|
+
bin_process_bytes = args.bin_process_megabytes * 1024 * 1024
|
|
82
|
+
if process_bytes <= 0 or bin_process_bytes <= 0:
|
|
83
|
+
print("process-gigabytes and bin-process-megabytes must be positive.")
|
|
84
|
+
sys.exit(1)
|
|
85
|
+
result, file = run_benchmark(
|
|
86
|
+
benchmark_files_dir, process_bytes, bin_process_bytes
|
|
87
|
+
)
|
|
88
|
+
file = file.split("/")[-1]
|
|
89
|
+
|
|
90
|
+
if args.markdown:
|
|
91
|
+
generate_report_markdown(result, file, args.out_dir)
|
|
92
|
+
if not args.no_pdf:
|
|
93
|
+
generate_report_pdf(result, file, args.out_dir)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
if __name__ == "__main__":
|
|
97
|
+
main()
|