ssrjson-benchmark 0.0.2__cp39-cp39-win_amd64.whl → 0.0.4__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ssrjson-benchmark might be problematic. Click here for more details.

@@ -1,668 +0,0 @@
1
- import importlib
2
- import io
3
- import sys
4
- import os
5
- import gc
6
- import json
7
- from collections import defaultdict
8
- from typing import Any, Callable
9
- from typing import List
10
- import io
11
- import time
12
- import platform
13
- import re
14
- import pathlib
15
- import math
16
- from ssrjson_benchmark import _ssrjson_benchmark
17
- import matplotlib.pyplot as plt
18
- import matplotlib as mpl
19
-
20
- import orjson
21
- import ssrjson
22
-
23
-
24
- mpl.use("Agg")
25
- mpl.rcParams["svg.fonttype"] = "none"
26
-
27
-
28
- try:
29
- from svglib.fonts import FontMap
30
-
31
- font_map = FontMap()
32
- font_map.register_default_fonts()
33
- # workaround for matplotlib using 700 to represent bold font, but svg2rlg using 700 as normal.
34
- font_map.register_font("Helvetica", weight="700", rlgFontName="Helvetica-Bold")
35
- except ImportError:
36
- pass
37
-
38
- CUR_FILE = os.path.abspath(__file__)
39
- CUR_DIR = os.path.dirname(CUR_FILE)
40
- CWD = os.getcwd()
41
- _NS_IN_ONE_S = 1000000000
42
-
43
- PDF_HEADING_FONT = "Helvetica-Bold"
44
- PDF_TEXT_FONT = "Courier"
45
-
46
- # baseline is the first one.
47
- LIBRARIES_COLORS = {"json": "#74c476", "orjson": "#6baed6", "ssrjson": "#fd8d3c"}
48
- LIBRARIES: dict[str, dict[str, Callable[[str | bytes], Any]]] = {
49
- "dumps": {
50
- "json.dumps": json.dumps,
51
- "orjson.dumps+decode": lambda x: orjson.dumps(x).decode("utf-8"),
52
- "ssrjson.dumps": ssrjson.dumps,
53
- },
54
- "dumps(indented2)": {
55
- "json.dumps": lambda x: json.dumps(x, indent=2),
56
- "orjson.dumps+decode": lambda x: orjson.dumps(
57
- x, option=orjson.OPT_INDENT_2
58
- ).decode("utf-8"),
59
- "ssrjson.dumps": lambda x: ssrjson.dumps(x, indent=2),
60
- },
61
- "dumps_to_bytes": {
62
- "json.dumps+encode": lambda x: json.dumps(x).encode("utf-8"),
63
- "orjson.dumps": orjson.dumps,
64
- "ssrjson.dumps_to_bytes": ssrjson.dumps_to_bytes,
65
- },
66
- "dumps_to_bytes(indented2)": {
67
- "json.dumps+encode": lambda x: json.dumps(x, indent=2).encode("utf-8"),
68
- "orjson.dumps": lambda x: orjson.dumps(x, option=orjson.OPT_INDENT_2),
69
- "ssrjson.dumps_to_bytes": lambda x: ssrjson.dumps_to_bytes(x, indent=2),
70
- },
71
- "loads(str)": {
72
- "json.loads": json.loads,
73
- "orjson.loads": orjson.loads,
74
- "ssrjson.loads": ssrjson.loads,
75
- },
76
- "loads(bytes)": {
77
- "json.loads": json.loads,
78
- "orjson.loads": orjson.loads,
79
- "ssrjson.loads": ssrjson.loads,
80
- },
81
- }
82
- CATEGORIES = LIBRARIES.keys()
83
-
84
- INDEXES = ["speed"]
85
-
86
-
87
- def gc_prepare():
88
- """
89
- Call collect once, and then disable automatic GC.
90
- Return True if automatic GC was enabled.
91
- """
92
- gc.collect()
93
- gc_was_enabled = gc.isenabled()
94
- if gc_was_enabled:
95
- gc.disable()
96
- return gc_was_enabled
97
-
98
-
99
- def benchmark(repeat_time: int, func, *args):
100
- """
101
- Run repeat benchmark, disabling orjson utf-8 cache.
102
- returns time used (ns).
103
- """
104
- gc_was_enabled = gc_prepare()
105
- try:
106
- # warm up
107
- _ssrjson_benchmark.run_object_accumulate_benchmark(func, 100, args)
108
- return _ssrjson_benchmark.run_object_accumulate_benchmark(
109
- func, repeat_time, args
110
- )
111
- finally:
112
- if gc_was_enabled:
113
- gc.enable()
114
-
115
-
116
- def benchmark_unicode_arg(repeat_time: int, func, unicode: str, *args):
117
- """
118
- Run repeat benchmark, disabling orjson utf-8 cache.
119
- returns time used (ns).
120
- """
121
- gc_was_enabled = gc_prepare()
122
- try:
123
- # warm up
124
- _ssrjson_benchmark.run_unicode_accumulate_benchmark(func, 100, unicode, args)
125
- return _ssrjson_benchmark.run_unicode_accumulate_benchmark(
126
- func, repeat_time, unicode, args
127
- )
128
- finally:
129
- if gc_was_enabled:
130
- gc.enable()
131
-
132
-
133
- def benchmark_invalidate_dump_cache(repeat_time: int, func, raw_bytes: bytes, *args):
134
- """
135
- orjson will use utf-8 cache for the same input,
136
- so we need to invalidate it.
137
- returns time used (ns).
138
- """
139
- # prepare identical data, without sharing objects
140
- data_warmup = [json.loads(raw_bytes) for _ in range(10)]
141
- data = [json.loads(raw_bytes) for _ in range(repeat_time)]
142
- # disable GC
143
- gc_was_enabled = gc_prepare()
144
- try:
145
- # warm up
146
- for i in range(10):
147
- new_args = (data_warmup[i], *args)
148
- _ssrjson_benchmark.run_object_benchmark(func, new_args)
149
- #
150
- total = 0
151
- for i in range(repeat_time):
152
- new_args = (data[i], *args)
153
- total += _ssrjson_benchmark.run_object_benchmark(func, new_args)
154
- return total
155
- finally:
156
- if gc_was_enabled:
157
- gc.enable()
158
-
159
-
160
- def get_benchmark_files() -> list[pathlib.Path]:
161
- return sorted(pathlib.Path(CUR_DIR, "_files").glob("*.json"))
162
-
163
-
164
- def _run_benchmark(
165
- curfile_obj: defaultdict[str, Any],
166
- repeat_times: int,
167
- input_data: str | bytes,
168
- mode: str, # "dumps", etc
169
- ):
170
- print(f"Running benchmark for {mode}")
171
- funcs = LIBRARIES[mode]
172
- cur_obj = curfile_obj[mode]
173
-
174
- def pick_benchmark_func() -> Callable:
175
- if "dumps" in mode and "loads" not in mode:
176
- return benchmark_invalidate_dump_cache
177
- if isinstance(input_data, str) and "loads" in mode:
178
- return benchmark_unicode_arg
179
- return benchmark
180
-
181
- # process = psutil.Process()
182
-
183
- for name, func in funcs.items():
184
- benchmark_func = pick_benchmark_func()
185
-
186
- # t0 = time.perf_counter()
187
- # cpu_times_before = process.cpu_times()
188
- # ctx_before = process.num_ctx_switches()
189
- # mem_before = process.memory_info().rss
190
-
191
- speed = benchmark_func(repeat_times, func, input_data)
192
-
193
- # End measuring
194
- # t1 = time.perf_counter()
195
- # cpu_times_after = process.cpu_times()
196
- # ctx_after = process.num_ctx_switches()
197
-
198
- # user_cpu = cpu_times_after.user - cpu_times_before.user
199
- # system_cpu = cpu_times_after.system - cpu_times_before.system
200
- # voluntary_ctx = ctx_after.voluntary - ctx_before.voluntary
201
- # involuntary_ctx = ctx_after.involuntary - ctx_before.involuntary
202
- # mem_after = process.memory_info().rss
203
-
204
- cur_obj[name] = {
205
- "speed": speed,
206
- # "user_cpu": user_cpu,
207
- # "system_cpu": system_cpu,
208
- # "ctx_vol": voluntary_ctx,
209
- # "ctx_invol": involuntary_ctx,
210
- # "mem_diff": mem_after - mem_before,
211
- # "wall_time": t1 - t0,
212
- }
213
-
214
- funcs_iter = iter(funcs.items())
215
- baseline_name, _ = next(funcs_iter)
216
- baseline_data = cur_obj[baseline_name]
217
- for name, func in funcs_iter:
218
- if name.startswith("ssrjson"):
219
- # debug use, bytes per sec
220
- if "dumps" in mode:
221
- data_obj = json.loads(input_data)
222
- output = func(data_obj)
223
- if "bytes" in mode:
224
- size = len(output)
225
- else:
226
- _, size, _, _ = _ssrjson_benchmark.inspect_pyunicode(output)
227
- else:
228
- size = (
229
- len(input_data)
230
- if isinstance(input_data, bytes)
231
- else _ssrjson_benchmark.inspect_pyunicode(input_data)[1]
232
- )
233
- cur_obj["ssrjson_bytes_per_sec"] = (
234
- size * repeat_times / (cur_obj[name]["speed"] / _NS_IN_ONE_S)
235
- )
236
-
237
- for index in INDEXES:
238
- basename = name.split(".")[0]
239
- if baseline_data[index] == 0:
240
- cur_obj[f"{basename}_{index}_ratio"] = math.inf
241
- else:
242
- cur_obj[f"{basename}_{index}_ratio"] = (
243
- baseline_data[index] / cur_obj[name][index]
244
- )
245
-
246
-
247
- def run_file_benchmark(
248
- file: str, result: defaultdict[str, defaultdict[str, Any]], process_bytes: int
249
- ):
250
- with open(file, "rb") as f:
251
- raw_bytes = f.read()
252
- raw = raw_bytes.decode("utf-8")
253
- base_file_name = os.path.basename(file)
254
- curfile_obj = result[base_file_name]
255
- curfile_obj["byte_size"] = bytes_size = len(raw_bytes)
256
- kind, str_size, is_ascii, _ = _ssrjson_benchmark.inspect_pyunicode(raw)
257
- curfile_obj["pyunicode_size"] = str_size
258
- curfile_obj["pyunicode_kind"] = kind
259
- curfile_obj["pyunicode_is_ascii"] = is_ascii
260
- repeat_times = int((process_bytes + bytes_size - 1) // bytes_size)
261
-
262
- for mode in LIBRARIES.keys():
263
- _run_benchmark(curfile_obj, repeat_times, raw_bytes, mode)
264
-
265
-
266
- def get_head_rev_name():
267
- return (
268
- getattr(ssrjson, "__version__", None) or getattr(ssrjson, "ssrjson").__version__
269
- )
270
-
271
-
272
- def get_real_output_file_name():
273
- rev = get_head_rev_name()
274
- if not rev:
275
- file = "benchmark_result.json"
276
- else:
277
- file = f"benchmark_result_{rev}.json"
278
- return file
279
-
280
-
281
- def get_cpu_name() -> str:
282
- cpuinfo_spec = importlib.util.find_spec("cpuinfo")
283
- if cpuinfo_spec is not None:
284
- import cpuinfo
285
-
286
- cpu_name = cpuinfo.get_cpu_info().get("brand_raw", "UnknownCPU")
287
- else:
288
- # fallback
289
- cpu_name: str = platform.processor()
290
- if cpu_name.strip() == "":
291
- # linux fallback
292
- if os.path.exists("/proc/cpuinfo"):
293
- with open(file="/proc/cpuinfo", mode="r") as file:
294
- cpu_info_lines = file.readlines()
295
- for line in cpu_info_lines:
296
- if "model name" in line:
297
- cpu_name = re.sub(
298
- pattern=r"model name\s+:\s+", repl="", string=line
299
- )
300
- break
301
- else:
302
- cpu_name = "UnknownCPU"
303
- # merge nearby spaces
304
- return re.sub(pattern=r"\s+", repl=" ", string=cpu_name).strip()
305
-
306
-
307
- def get_mem_total() -> str:
308
- mem_total: int = 0
309
- if platform.system() == "Linux":
310
- with open(file="/proc/meminfo", mode="r") as file:
311
- mem_info_lines = file.readlines()
312
- for line in mem_info_lines:
313
- if "MemTotal" in line:
314
- mem_total = int(re.sub(pattern=r"[^0-9]", repl="", string=line))
315
- break
316
- elif platform.system() == "Windows":
317
- import psutil
318
-
319
- mem_total = psutil.virtual_memory().total // (1024 * 1024)
320
- return f"{mem_total / (1024**2):.3f}GiB"
321
-
322
-
323
- def get_ratio_color(ratio: float) -> str:
324
- if ratio < 1:
325
- return "#d63031" # red (worse than baseline)
326
- elif ratio == 1:
327
- return "black" # black (baseline)
328
- elif ratio < 2:
329
- return "#e67e22" # orange (similar/slightly better)
330
- elif ratio < 4:
331
- return "#f39c12" # amber (decent improvement)
332
- elif ratio < 8:
333
- return "#27ae60" # green (good)
334
- elif ratio < 16:
335
- return "#2980b9" # blue (great)
336
- else:
337
- return "#8e44ad" # purple (exceptional)
338
-
339
-
340
- def plot_relative_ops(data: dict, doc_name: str, index_s: str) -> io.BytesIO:
341
- libs = list(LIBRARIES_COLORS.keys())
342
- colors = [LIBRARIES_COLORS[n] for n in libs]
343
- n = len(CATEGORIES)
344
- bar_width = 0.2
345
- inner_pad = 0
346
-
347
- fig, axs = plt.subplots(
348
- 1,
349
- n,
350
- figsize=(4 * n, 6),
351
- sharey=False,
352
- tight_layout=True,
353
- gridspec_kw={"wspace": 0},
354
- )
355
-
356
- x_positions = [i * (bar_width + inner_pad) for i in range(len(libs))]
357
-
358
- for ax, cat in zip(axs, CATEGORIES):
359
- vals = [1.0] + [data[cat][f"{name}_{index_s}_ratio"] for name in libs[1:]]
360
- gbps = (data[cat]["ssrjson_bytes_per_sec"]) / (1024**3)
361
-
362
- for xi, val, col in zip(x_positions, vals, colors):
363
- ax.bar(xi, val, width=bar_width, color=col)
364
- ax.text(
365
- xi,
366
- val + 0.05,
367
- f"{val:.2f}x",
368
- ha="center",
369
- va="bottom",
370
- fontsize=9,
371
- color=get_ratio_color(val),
372
- )
373
-
374
- ssrjson_index = libs.index("ssrjson")
375
- ax.text(
376
- x_positions[ssrjson_index],
377
- vals[ssrjson_index] / 2,
378
- f"{gbps:.2f} GB/s",
379
- ha="center",
380
- va="center",
381
- fontsize=10,
382
- color="#2c3e50",
383
- fontweight="bold",
384
- )
385
-
386
- # baseline line
387
- ax.axhline(1.0, color="gray", linestyle="--", linewidth=1)
388
- # height = 1.1 * max bar height
389
- ax.set_ylim(0, max(vals + [1.0]) * 1.1)
390
-
391
- # hide all tick
392
- ax.tick_params(
393
- axis="both",
394
- which="both",
395
- left=False,
396
- bottom=False,
397
- labelleft=False,
398
- labelbottom=False,
399
- )
400
-
401
- # and spine
402
- for spine in ("left", "top", "right"):
403
- ax.spines[spine].set_visible(False)
404
-
405
- ax.set_xlabel(cat, fontsize=10, labelpad=6)
406
-
407
- fig.suptitle(
408
- doc_name,
409
- fontsize=20,
410
- fontweight="bold",
411
- y=0.98,
412
- )
413
-
414
- # color legend
415
- legend_elements = [
416
- plt.Line2D([0], [0], color=col, lw=4, label=name)
417
- for name, col in LIBRARIES_COLORS.items()
418
- ]
419
- fig.legend(
420
- handles=legend_elements,
421
- loc="upper right",
422
- bbox_to_anchor=(0.98, 0.95),
423
- ncol=len(libs),
424
- fontsize=14,
425
- frameon=False,
426
- )
427
-
428
- fig.text(
429
- 0.5,
430
- 0,
431
- "Higher is better",
432
- ha="center",
433
- va="bottom",
434
- fontsize=8,
435
- style="italic",
436
- color="#555555",
437
- )
438
-
439
- buf = io.BytesIO()
440
- plt.savefig(buf, format="svg", bbox_inches="tight")
441
- buf.seek(0)
442
- plt.close(fig)
443
- return buf
444
-
445
-
446
- def draw_page_number(c: "canvas.Canvas", page_num: int):
447
- from reportlab.lib.pagesizes import A4
448
-
449
- width, _ = A4
450
- c.setFont("Helvetica-Oblique", 8) # italic
451
- c.setFillColorRGB(0.5, 0.5, 0.5) # grey
452
- c.drawRightString(width - 40, 20, f"{page_num}")
453
-
454
-
455
- def generate_pdf_report(
456
- figures: List[List[io.BytesIO]], header_text: str, output_pdf_path: str
457
- ) -> str:
458
- from reportlab.pdfgen import canvas
459
- from reportlab.graphics import renderPDF
460
- from svglib.svglib import svg2rlg
461
- from reportlab.lib.pagesizes import A4
462
-
463
- c = canvas.Canvas(output_pdf_path, pagesize=A4)
464
- width, height = A4
465
-
466
- # heading info
467
- heading = header_text.splitlines()
468
- # first line is # header
469
- header, heading_info = heading[0].removeprefix("#").strip(), heading[1:]
470
- c.setFont(PDF_HEADING_FONT, 20)
471
- text_obj = c.beginText(40, height - 50)
472
- text_obj.textLine(header)
473
- c.drawText(text_obj)
474
-
475
- # Wrap heading_info lines if overflow
476
- max_width = width - 80 # 40 margin on both sides
477
- wrapped_heading_info = []
478
- for line in heading_info:
479
- while c.stringWidth(line, PDF_TEXT_FONT, 10) > max_width:
480
- # Find a split point
481
- split_idx = int(max_width // c.stringWidth(" ", PDF_TEXT_FONT, 10))
482
- # Try to split at nearest space before split_idx
483
- space_idx = line.rfind(" ", 0, split_idx)
484
- if space_idx == -1:
485
- space_idx = split_idx
486
- wrapped_heading_info.append(line[:space_idx])
487
- # TODO fixed indent
488
- line = " " + line[space_idx:].lstrip()
489
- wrapped_heading_info.append(line)
490
- heading_info = wrapped_heading_info
491
-
492
- c.setFont(PDF_TEXT_FONT, 10)
493
- text_obj = c.beginText(40, height - 70)
494
- for line in heading_info:
495
- text_obj.textLine(line)
496
- c.drawText(text_obj)
497
-
498
- c.setFont("Helvetica-Oblique", 8)
499
- text = "This report was generated by https://github.com/Nambers/ssrJSON-benchmark"
500
- c.drawString(40, 20, text)
501
- link_start = 40 + c.stringWidth("This report was generated by ")
502
- link_end = link_start + c.stringWidth(
503
- "https://github.com/Nambers/ssrJSON-benchmark"
504
- )
505
- text_height = 5 # Adjusted height to better fit the link area
506
- c.linkURL(
507
- "https://github.com/Nambers/ssrJSON-benchmark",
508
- (link_start, 20, link_end, 20 + text_height),
509
- relative=1,
510
- )
511
-
512
- header_lines = header_text.count("\n") + 1
513
- header_height = header_lines * 14 + 10
514
- # subheading spacing = 30
515
- y_pos = height - header_height - 30
516
- bottom_margin = 20
517
- vertical_gap = 20
518
-
519
- p = 0
520
-
521
- for name, figs in zip(INDEXES, figures):
522
- text_obj = c.beginText()
523
- text_obj.setTextOrigin(40, y_pos)
524
- text_obj.setFont(PDF_HEADING_FONT, 14)
525
- text_obj.textLine(f"{name}")
526
- c.drawText(text_obj)
527
- c.bookmarkHorizontal(name, 0, y_pos + 20)
528
- c.addOutlineEntry(name, name, level=0)
529
- y_pos -= 20
530
- for svg_io in figs:
531
- svg_io.seek(0)
532
- drawing = svg2rlg(svg_io, font_map=font_map)
533
-
534
- avail_w = width - 80
535
- scale = avail_w / drawing.width
536
- drawing.width *= scale
537
- drawing.height *= scale
538
- drawing.scale(scale, scale)
539
-
540
- img_h = drawing.height
541
- # no enough space
542
- if y_pos - img_h - vertical_gap < bottom_margin:
543
- draw_page_number(c, p)
544
- p += 1
545
- c.showPage()
546
- y_pos = height - bottom_margin
547
-
548
- c.setStrokeColorRGB(0.9, 0.9, 0.9)
549
- c.setLineWidth(0.4)
550
- c.line(40, y_pos, width - 40, y_pos)
551
-
552
- renderPDF.draw(drawing, c, 40, y_pos - img_h)
553
- y_pos -= img_h + vertical_gap
554
-
555
- draw_page_number(c, p)
556
- c.save()
557
- return output_pdf_path
558
-
559
-
560
- def generate_report(result: dict[str, dict[str, Any]], file: str, out_dir: str = CWD):
561
- file = file.removesuffix(".json")
562
- report_name = f"{file}.pdf"
563
-
564
- figures = []
565
-
566
- for index_s in INDEXES:
567
- tmp = []
568
- for bench_file in get_benchmark_files():
569
- print(f"Processing {bench_file.name}")
570
- tmp.append(
571
- plot_relative_ops(
572
- result[bench_file.name],
573
- bench_file.name,
574
- index_s,
575
- )
576
- )
577
- figures.append(tmp)
578
-
579
- with open(os.path.join(CUR_DIR, "template.md"), "r") as f:
580
- template = f.read()
581
- template = template.format(
582
- REV=file.removeprefix("benchmark_result_").removesuffix(".json"),
583
- TIME=time.strftime("%Y-%m-%d %H:%M:%S %Z", time.localtime()),
584
- OS=f"{platform.system()} {platform.machine()} {platform.release()} {platform.version()}",
585
- PYTHON=sys.version,
586
- ORJSON_VER=orjson.__version__,
587
- SIMD_FLAGS=ssrjson.get_current_features(),
588
- CHIPSET=get_cpu_name(),
589
- MEM=get_mem_total(),
590
- )
591
- out_path = generate_pdf_report(
592
- figures,
593
- header_text=template,
594
- output_pdf_path=os.path.join(out_dir, report_name),
595
- )
596
- print(f"Report saved to {out_path}")
597
-
598
-
599
- def generate_report_markdown(
600
- result: dict[str, dict[str, Any]], file: str, out_dir: str = CWD
601
- ):
602
- file = file.removesuffix(".json")
603
- report_name = f"{file}.md"
604
- report_folder = os.path.join(out_dir, f"{file}_report")
605
-
606
- # mkdir
607
- if not os.path.exists(report_folder):
608
- os.makedirs(report_folder)
609
-
610
- with open(os.path.join(CUR_DIR, "template.md"), "r") as f:
611
- template = f.read()
612
- template = template.format(
613
- REV=file.removeprefix("benchmark_result_").removesuffix(".json"),
614
- TIME=time.strftime("%Y-%m-%d %H:%M:%S %Z", time.localtime()),
615
- OS=f"{platform.system()} {platform.machine()} {platform.release()} {platform.version()}",
616
- PYTHON=sys.version,
617
- ORJSON_VER=orjson.__version__,
618
- SIMD_FLAGS=ssrjson.get_current_features(),
619
- CHIPSET=get_cpu_name(),
620
- MEM=get_mem_total(),
621
- )
622
-
623
- for index_s in INDEXES:
624
- template += f"\n\n## {index_s}\n\n"
625
- for bench_file in get_benchmark_files():
626
- print(f"Processing {bench_file.name}")
627
- with open(
628
- os.path.join(report_folder, bench_file.name + ".svg"), "wb"
629
- ) as svg_file:
630
- svg_file.write(
631
- plot_relative_ops(
632
- result[bench_file.name],
633
- bench_file.name,
634
- index_s,
635
- ).getvalue()
636
- )
637
- # add svg
638
- template += f"![{bench_file.name}](./{bench_file.name}.svg)\n\n"
639
-
640
- with open(os.path.join(report_folder, report_name), "w") as f:
641
- f.write(template)
642
- print(f"Report saved to {os.path.join(report_folder, report_name)}")
643
-
644
-
645
- def run_benchmark(process_bytes: int = 1e8):
646
- file = get_real_output_file_name()
647
- if os.path.exists(file):
648
- os.remove(file)
649
- result: defaultdict[str, defaultdict[str, Any]] = defaultdict(
650
- lambda: defaultdict(dict)
651
- )
652
-
653
- for bench_file in get_benchmark_files():
654
- run_file_benchmark(bench_file, result, process_bytes)
655
- output_result = json.dumps(result, indent=4)
656
-
657
- with open(f"{file}", "w", encoding="utf-8") as f:
658
- f.write(output_result)
659
- return result, file
660
-
661
-
662
- def run_benchmark_default():
663
- """
664
- Run default benchmark with default parameters. Generate report in PDF.
665
- """
666
- j, file = run_benchmark()
667
- file = file.split("/")[-1]
668
- generate_report(j, file)