ssrjson-benchmark 0.0.1__cp313-cp313-win_amd64.whl → 0.0.1b0__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ssrjson-benchmark might be problematic. Click here for more details.

@@ -0,0 +1,652 @@
1
+ import importlib
2
+ import io
3
+ import sys
4
+ import os
5
+ import gc
6
+ import json
7
+ from collections import defaultdict
8
+ from typing import Any, Callable
9
+ from typing import List
10
+ import io
11
+ import time
12
+ import platform
13
+ import re
14
+ import pathlib
15
+ import math
16
+ from ssrjson_benchmark import _ssrjson_benchmark
17
+ import matplotlib.pyplot as plt
18
+ import matplotlib as mpl
19
+
20
+ import orjson
21
+ import ssrjson
22
+
23
+
24
+ mpl.use("Agg")
25
+ mpl.rcParams["svg.fonttype"] = "none"
26
+
27
+
28
+ try:
29
+ from svglib.fonts import FontMap
30
+
31
+ font_map = FontMap()
32
+ font_map.register_default_fonts()
33
+ # workaround for matplotlib using 700 to represent bold font, but svg2rlg using 700 as normal.
34
+ font_map.register_font("Helvetica", weight="700", rlgFontName="Helvetica-Bold")
35
+ except ImportError:
36
+ pass
37
+
38
+ CUR_FILE = os.path.abspath(__file__)
39
+ CUR_DIR = os.path.dirname(CUR_FILE)
40
+ CWD = os.getcwd()
41
+ _NS_IN_ONE_S = 1000000000
42
+
43
+ PDF_HEADING_FONT = "Helvetica-Bold"
44
+ PDF_TEXT_FONT = "Courier"
45
+
46
+ # baseline is the first one.
47
+ LIBRARIES_COLORS = {"json": "#74c476", "orjson": "#6baed6", "ssrjson": "#fd8d3c"}
48
+ LIBRARIES: dict[str, dict[str, Callable[[str | bytes], Any]]] = {
49
+ "dumps": {
50
+ "json.dumps": json.dumps,
51
+ "orjson.dumps+decode": lambda x: orjson.dumps(x).decode("utf-8"),
52
+ "ssrjson.dumps": ssrjson.dumps,
53
+ },
54
+ "dumps(indented2)": {
55
+ "json.dumps": lambda x: json.dumps(x, indent=2),
56
+ "orjson.dumps+decode": lambda x: orjson.dumps(
57
+ x, option=orjson.OPT_INDENT_2
58
+ ).decode("utf-8"),
59
+ "ssrjson.dumps": lambda x: ssrjson.dumps(x, indent=2),
60
+ },
61
+ "dumps_to_bytes": {
62
+ "json.dumps+encode": lambda x: json.dumps(x).encode("utf-8"),
63
+ "orjson.dumps": orjson.dumps,
64
+ "ssrjson.dumps_to_bytes": ssrjson.dumps_to_bytes,
65
+ },
66
+ "dumps_to_bytes(indented2)": {
67
+ "json.dumps+encode": lambda x: json.dumps(x, indent=2).encode("utf-8"),
68
+ "orjson.dumps": lambda x: orjson.dumps(x, option=orjson.OPT_INDENT_2),
69
+ "ssrjson.dumps_to_bytes": lambda x: ssrjson.dumps_to_bytes(x, indent=2),
70
+ },
71
+ "loads(str)": {
72
+ "json.loads": json.loads,
73
+ "orjson.loads": orjson.loads,
74
+ "ssrjson.loads": ssrjson.loads,
75
+ },
76
+ "loads(bytes)": {
77
+ "json.loads": json.loads,
78
+ "orjson.loads": orjson.loads,
79
+ "ssrjson.loads": ssrjson.loads,
80
+ },
81
+ }
82
+ CATEGORIES = LIBRARIES.keys()
83
+
84
+ INDEXES = ["speed"]
85
+
86
+
87
+ def gc_prepare():
88
+ """
89
+ Call collect once, and then disable automatic GC.
90
+ Return True if automatic GC was enabled.
91
+ """
92
+ gc.collect()
93
+ gc_was_enabled = gc.isenabled()
94
+ if gc_was_enabled:
95
+ gc.disable()
96
+ return gc_was_enabled
97
+
98
+
99
+ def benchmark(repeat_time: int, func, *args):
100
+ """
101
+ Run repeat benchmark, disabling orjson utf-8 cache.
102
+ returns time used (ns).
103
+ """
104
+ gc_was_enabled = gc_prepare()
105
+ try:
106
+ # warm up
107
+ _ssrjson_benchmark.run_object_accumulate_benchmark(func, 100, args)
108
+ return _ssrjson_benchmark.run_object_accumulate_benchmark(
109
+ func, repeat_time, args
110
+ )
111
+ finally:
112
+ if gc_was_enabled:
113
+ gc.enable()
114
+
115
+
116
+ def benchmark_unicode_arg(repeat_time: int, func, unicode: str, *args):
117
+ """
118
+ Run repeat benchmark, disabling orjson utf-8 cache.
119
+ returns time used (ns).
120
+ """
121
+ gc_was_enabled = gc_prepare()
122
+ try:
123
+ # warm up
124
+ _ssrjson_benchmark.run_unicode_accumulate_benchmark(func, 100, unicode, args)
125
+ return _ssrjson_benchmark.run_unicode_accumulate_benchmark(
126
+ func, repeat_time, unicode, args
127
+ )
128
+ finally:
129
+ if gc_was_enabled:
130
+ gc.enable()
131
+
132
+
133
+ def benchmark_invalidate_dump_cache(repeat_time: int, func, raw_bytes: bytes, *args):
134
+ """
135
+ orjson will use utf-8 cache for the same input,
136
+ so we need to invalidate it.
137
+ returns time used (ns).
138
+ """
139
+ # prepare identical data, without sharing objects
140
+ data_warmup = [json.loads(raw_bytes) for _ in range(10)]
141
+ data = [json.loads(raw_bytes) for _ in range(repeat_time)]
142
+ # disable GC
143
+ gc_was_enabled = gc_prepare()
144
+ try:
145
+ # warm up
146
+ for i in range(10):
147
+ new_args = (data_warmup[i], *args)
148
+ _ssrjson_benchmark.run_object_benchmark(func, new_args)
149
+ #
150
+ total = 0
151
+ for i in range(repeat_time):
152
+ new_args = (data[i], *args)
153
+ total += _ssrjson_benchmark.run_object_benchmark(func, new_args)
154
+ return total
155
+ finally:
156
+ if gc_was_enabled:
157
+ gc.enable()
158
+
159
+
160
+ def get_benchmark_files() -> list[pathlib.Path]:
161
+ return sorted(pathlib.Path(CUR_DIR, "_files").glob("*.json"))
162
+
163
+
164
+ def _run_benchmark(
165
+ curfile_obj: defaultdict[str, Any],
166
+ repeat_times: int,
167
+ input_data: str | bytes,
168
+ mode: str, # "dumps", etc
169
+ ):
170
+ print(f"Running benchmark for {mode}")
171
+ funcs = LIBRARIES[mode]
172
+ cur_obj = curfile_obj[mode]
173
+
174
+ def pick_benchmark_func() -> Callable:
175
+ if "dumps" in mode and "loads" not in mode:
176
+ return benchmark_invalidate_dump_cache
177
+ if isinstance(input_data, str) and "loads" in mode:
178
+ return benchmark_unicode_arg
179
+ return benchmark
180
+
181
+ # process = psutil.Process()
182
+
183
+ for name, func in funcs.items():
184
+ benchmark_func = pick_benchmark_func()
185
+
186
+ # t0 = time.perf_counter()
187
+ # cpu_times_before = process.cpu_times()
188
+ # ctx_before = process.num_ctx_switches()
189
+ # mem_before = process.memory_info().rss
190
+
191
+ speed = benchmark_func(repeat_times, func, input_data)
192
+
193
+ # End measuring
194
+ # t1 = time.perf_counter()
195
+ # cpu_times_after = process.cpu_times()
196
+ # ctx_after = process.num_ctx_switches()
197
+
198
+ # user_cpu = cpu_times_after.user - cpu_times_before.user
199
+ # system_cpu = cpu_times_after.system - cpu_times_before.system
200
+ # voluntary_ctx = ctx_after.voluntary - ctx_before.voluntary
201
+ # involuntary_ctx = ctx_after.involuntary - ctx_before.involuntary
202
+ # mem_after = process.memory_info().rss
203
+
204
+ cur_obj[name] = {
205
+ "speed": speed,
206
+ # "user_cpu": user_cpu,
207
+ # "system_cpu": system_cpu,
208
+ # "ctx_vol": voluntary_ctx,
209
+ # "ctx_invol": involuntary_ctx,
210
+ # "mem_diff": mem_after - mem_before,
211
+ # "wall_time": t1 - t0,
212
+ }
213
+
214
+ funcs_iter = iter(funcs.items())
215
+ baseline_name, _ = next(funcs_iter)
216
+ baseline_data = cur_obj[baseline_name]
217
+ for name, func in funcs_iter:
218
+ if name.startswith("ssrjson"):
219
+ # debug use, bytes per sec
220
+ if "dumps" in mode:
221
+ data_obj = json.loads(input_data)
222
+ output = func(data_obj)
223
+ if "bytes" in mode:
224
+ size = len(output)
225
+ else:
226
+ _, size, _, _ = _ssrjson_benchmark.inspect_pyunicode(output)
227
+ else:
228
+ size = (
229
+ len(input_data)
230
+ if isinstance(input_data, bytes)
231
+ else _ssrjson_benchmark.inspect_pyunicode(input_data)[1]
232
+ )
233
+ cur_obj["ssrjson_bytes_per_sec"] = ssrjson.dumps(
234
+ size * repeat_times / (cur_obj[name]["speed"] / _NS_IN_ONE_S)
235
+ )
236
+ for index in INDEXES:
237
+ basename = name.split(".")[0]
238
+ if baseline_data[index] == 0:
239
+ cur_obj[f"{basename}_{index}_ratio"] = math.inf
240
+ else:
241
+ cur_obj[f"{basename}_{index}_ratio"] = (
242
+ baseline_data[index] / cur_obj[name][index]
243
+ )
244
+
245
+
246
+ def run_file_benchmark(
247
+ file: str, result: defaultdict[str, defaultdict[str, Any]], process_bytes: int
248
+ ):
249
+ with open(file, "rb") as f:
250
+ raw_bytes = f.read()
251
+ raw = raw_bytes.decode("utf-8")
252
+ base_file_name = os.path.basename(file)
253
+ curfile_obj = result[base_file_name]
254
+ curfile_obj["byte_size"] = bytes_size = len(raw_bytes)
255
+ kind, str_size, is_ascii, _ = _ssrjson_benchmark.inspect_pyunicode(raw)
256
+ curfile_obj["pyunicode_size"] = str_size
257
+ curfile_obj["pyunicode_kind"] = kind
258
+ curfile_obj["pyunicode_is_ascii"] = is_ascii
259
+ repeat_times = int((process_bytes + bytes_size - 1) // bytes_size)
260
+
261
+ for mode in LIBRARIES.keys():
262
+ _run_benchmark(curfile_obj, repeat_times, raw_bytes, mode)
263
+
264
+
265
+ def get_head_rev_name():
266
+ return getattr(ssrjson, "__version__", ssrjson.ssrjson.__version__)
267
+
268
+
269
+ def get_real_output_file_name():
270
+ rev = get_head_rev_name()
271
+ if not rev:
272
+ file = "benchmark_result.json"
273
+ else:
274
+ file = f"benchmark_result_{rev}.json"
275
+ return file
276
+
277
+
278
+ def get_cpu_name() -> str:
279
+ cpuinfo_spec = importlib.util.find_spec("cpuinfo")
280
+ if cpuinfo_spec is not None:
281
+ import cpuinfo
282
+
283
+ cpu_name = cpuinfo.get_cpu_info().get("brand_raw", "UnknownCPU")
284
+ else:
285
+ # fallback
286
+ cpu_name: str = platform.processor()
287
+ if cpu_name.strip() == "":
288
+ # linux fallback
289
+ if os.path.exists("/proc/cpuinfo"):
290
+ with open(file="/proc/cpuinfo", mode="r") as file:
291
+ cpu_info_lines = file.readlines()
292
+ for line in cpu_info_lines:
293
+ if "model name" in line:
294
+ cpu_name = re.sub(
295
+ pattern=r"model name\s+:\s+", repl="", string=line
296
+ )
297
+ break
298
+ else:
299
+ cpu_name = "UnknownCPU"
300
+ # merge nearby spaces
301
+ return re.sub(pattern=r"\s+", repl=" ", string=cpu_name).strip()
302
+
303
+
304
+ def get_mem_total() -> str:
305
+ mem_total: int = 0
306
+ if platform.system() == "Linux":
307
+ with open(file="/proc/meminfo", mode="r") as file:
308
+ mem_info_lines = file.readlines()
309
+ for line in mem_info_lines:
310
+ if "MemTotal" in line:
311
+ mem_total = int(re.sub(pattern=r"[^0-9]", repl="", string=line))
312
+ break
313
+ elif platform.system() == "Windows":
314
+ import psutil
315
+
316
+ mem_total = psutil.virtual_memory().total // (1024 * 1024)
317
+ return f"{mem_total / (1024**2):.3f}GiB"
318
+
319
+
320
+ def get_ratio_color(ratio: float) -> str:
321
+ if ratio < 1:
322
+ return "#d63031" # red (worse than baseline)
323
+ elif ratio == 1:
324
+ return "black" # black (baseline)
325
+ elif ratio < 2:
326
+ return "#e67e22" # orange (similar/slightly better)
327
+ elif ratio < 4:
328
+ return "#f39c12" # amber (decent improvement)
329
+ elif ratio < 8:
330
+ return "#27ae60" # green (good)
331
+ elif ratio < 16:
332
+ return "#2980b9" # blue (great)
333
+ else:
334
+ return "#8e44ad" # purple (exceptional)
335
+
336
+
337
+ def plot_relative_ops(data: dict, doc_name: str, index_s: str) -> io.BytesIO:
338
+ libs = list(LIBRARIES_COLORS.keys())
339
+ colors = [LIBRARIES_COLORS[n] for n in libs]
340
+ n = len(CATEGORIES)
341
+ bar_width = 0.2
342
+ inner_pad = 0
343
+
344
+ fig, axs = plt.subplots(
345
+ 1,
346
+ n,
347
+ figsize=(4 * n, 6),
348
+ sharey=False,
349
+ tight_layout=True,
350
+ gridspec_kw={"wspace": 0},
351
+ )
352
+
353
+ x_positions = [i * (bar_width + inner_pad) for i in range(len(libs))]
354
+
355
+ for ax, cat in zip(axs, CATEGORIES):
356
+ vals = [1.0] + [data[cat][f"{name}_{index_s}_ratio"] for name in libs[1:]]
357
+
358
+ for xi, val, col in zip(x_positions, vals, colors):
359
+ ax.bar(xi, val, width=bar_width, color=col)
360
+ ax.text(
361
+ xi,
362
+ val + 0.05,
363
+ f"{val:.2f}x",
364
+ ha="center",
365
+ va="bottom",
366
+ fontsize=9,
367
+ color=get_ratio_color(val),
368
+ )
369
+
370
+ # baseline line
371
+ ax.axhline(1.0, color="gray", linestyle="--", linewidth=1)
372
+ # height = 1.1 * max bar height
373
+ ax.set_ylim(0, max(vals + [1.0]) * 1.1)
374
+
375
+ # hide all tick
376
+ ax.tick_params(
377
+ axis="both",
378
+ which="both",
379
+ left=False,
380
+ bottom=False,
381
+ labelleft=False,
382
+ labelbottom=False,
383
+ )
384
+
385
+ # and spine
386
+ for spine in ("left", "top", "right"):
387
+ ax.spines[spine].set_visible(False)
388
+
389
+ ax.set_xlabel(cat, fontsize=10, labelpad=6)
390
+
391
+ fig.suptitle(
392
+ doc_name,
393
+ fontsize=20,
394
+ fontweight="bold",
395
+ y=0.98,
396
+ )
397
+
398
+ # color legend
399
+ legend_elements = [
400
+ plt.Line2D([0], [0], color=col, lw=4, label=name)
401
+ for name, col in LIBRARIES_COLORS.items()
402
+ ]
403
+ fig.legend(
404
+ handles=legend_elements,
405
+ loc="upper right",
406
+ bbox_to_anchor=(0.98, 0.95),
407
+ ncol=len(libs),
408
+ fontsize=14,
409
+ frameon=False,
410
+ )
411
+
412
+ fig.text(
413
+ 0.5,
414
+ 0,
415
+ "Higher is better",
416
+ ha="center",
417
+ va="bottom",
418
+ fontsize=8,
419
+ style="italic",
420
+ color="#555555",
421
+ )
422
+
423
+ buf = io.BytesIO()
424
+ plt.savefig(buf, format="svg", bbox_inches="tight")
425
+ buf.seek(0)
426
+ plt.close(fig)
427
+ return buf
428
+
429
+
430
+ def draw_page_number(c: "canvas.Canvas", page_num: int):
431
+ from reportlab.lib.pagesizes import A4
432
+
433
+ width, _ = A4
434
+ c.setFont("Helvetica-Oblique", 8) # italic
435
+ c.setFillColorRGB(0.5, 0.5, 0.5) # grey
436
+ c.drawRightString(width - 40, 20, f"{page_num}")
437
+
438
+
439
+ def generate_pdf_report(
440
+ figures: List[List[io.BytesIO]], header_text: str, output_pdf_path: str
441
+ ) -> str:
442
+ from reportlab.pdfgen import canvas
443
+ from reportlab.graphics import renderPDF
444
+ from svglib.svglib import svg2rlg
445
+ from reportlab.lib.pagesizes import A4
446
+
447
+ c = canvas.Canvas(output_pdf_path, pagesize=A4)
448
+ width, height = A4
449
+
450
+ # heading info
451
+ heading = header_text.splitlines()
452
+ # first line is # header
453
+ header, heading_info = heading[0].removeprefix("#").strip(), heading[1:]
454
+ c.setFont(PDF_HEADING_FONT, 20)
455
+ text_obj = c.beginText(40, height - 50)
456
+ text_obj.textLine(header)
457
+ c.drawText(text_obj)
458
+
459
+ # Wrap heading_info lines if overflow
460
+ max_width = width - 80 # 40 margin on both sides
461
+ wrapped_heading_info = []
462
+ for line in heading_info:
463
+ while c.stringWidth(line, PDF_TEXT_FONT, 10) > max_width:
464
+ # Find a split point
465
+ split_idx = int(max_width // c.stringWidth(" ", PDF_TEXT_FONT, 10))
466
+ # Try to split at nearest space before split_idx
467
+ space_idx = line.rfind(" ", 0, split_idx)
468
+ if space_idx == -1:
469
+ space_idx = split_idx
470
+ wrapped_heading_info.append(line[:space_idx])
471
+ # TODO fixed indent
472
+ line = " " + line[space_idx:].lstrip()
473
+ wrapped_heading_info.append(line)
474
+ heading_info = wrapped_heading_info
475
+
476
+ c.setFont(PDF_TEXT_FONT, 10)
477
+ text_obj = c.beginText(40, height - 70)
478
+ for line in heading_info:
479
+ text_obj.textLine(line)
480
+ c.drawText(text_obj)
481
+
482
+ c.setFont("Helvetica-Oblique", 8)
483
+ text = "This report was generated by https://github.com/Nambers/ssrJSON-benchmark"
484
+ c.drawString(40, 20, text)
485
+ link_start = 40 + c.stringWidth("This report was generated by ")
486
+ link_end = link_start + c.stringWidth(
487
+ "https://github.com/Nambers/ssrJSON-benchmark"
488
+ )
489
+ text_height = 5 # Adjusted height to better fit the link area
490
+ c.linkURL(
491
+ "https://github.com/Nambers/ssrJSON-benchmark",
492
+ (link_start, 20, link_end, 20 + text_height),
493
+ relative=1,
494
+ )
495
+
496
+ header_lines = header_text.count("\n") + 1
497
+ header_height = header_lines * 14 + 10
498
+ # subheading spacing = 30
499
+ y_pos = height - header_height - 30
500
+ bottom_margin = 20
501
+ vertical_gap = 20
502
+
503
+ p = 0
504
+
505
+ for name, figs in zip(INDEXES, figures):
506
+ text_obj = c.beginText()
507
+ text_obj.setTextOrigin(40, y_pos)
508
+ text_obj.setFont(PDF_HEADING_FONT, 14)
509
+ text_obj.textLine(f"{name}")
510
+ c.drawText(text_obj)
511
+ c.bookmarkHorizontal(name, 0, y_pos + 20)
512
+ c.addOutlineEntry(name, name, level=0)
513
+ y_pos -= 20
514
+ for svg_io in figs:
515
+ svg_io.seek(0)
516
+ drawing = svg2rlg(svg_io, font_map=font_map)
517
+
518
+ avail_w = width - 80
519
+ scale = avail_w / drawing.width
520
+ drawing.width *= scale
521
+ drawing.height *= scale
522
+ drawing.scale(scale, scale)
523
+
524
+ img_h = drawing.height
525
+ # no enough space
526
+ if y_pos - img_h - vertical_gap < bottom_margin:
527
+ draw_page_number(c, p)
528
+ p += 1
529
+ c.showPage()
530
+ y_pos = height - bottom_margin
531
+
532
+ c.setStrokeColorRGB(0.9, 0.9, 0.9)
533
+ c.setLineWidth(0.4)
534
+ c.line(40, y_pos, width - 40, y_pos)
535
+
536
+ renderPDF.draw(drawing, c, 40, y_pos - img_h)
537
+ y_pos -= img_h + vertical_gap
538
+
539
+ draw_page_number(c, p)
540
+ c.save()
541
+ return output_pdf_path
542
+
543
+
544
+ def generate_report(result: dict[str, dict[str, Any]], file: str, out_dir: str = CWD):
545
+ file = file.removesuffix(".json")
546
+ report_name = f"{file}.pdf"
547
+
548
+ figures = []
549
+
550
+ for index_s in INDEXES:
551
+ tmp = []
552
+ for bench_file in get_benchmark_files():
553
+ print(f"Processing {bench_file.name}")
554
+ tmp.append(
555
+ plot_relative_ops(
556
+ result[bench_file.name],
557
+ bench_file.name,
558
+ index_s,
559
+ )
560
+ )
561
+ figures.append(tmp)
562
+
563
+ with open(os.path.join(CUR_DIR, "template.md"), "r") as f:
564
+ template = f.read()
565
+ template = template.format(
566
+ REV=file.removeprefix("benchmark_result_").removesuffix(".json"),
567
+ TIME=time.strftime("%Y-%m-%d %H:%M:%S %Z", time.localtime()),
568
+ OS=f"{platform.system()} {platform.machine()} {platform.release()} {platform.version()}",
569
+ PYTHON=sys.version,
570
+ ORJSON_VER=orjson.__version__,
571
+ SIMD_FLAGS=ssrjson.get_current_features(),
572
+ CHIPSET=get_cpu_name(),
573
+ MEM=get_mem_total(),
574
+ )
575
+ out_path = generate_pdf_report(
576
+ figures,
577
+ header_text=template,
578
+ output_pdf_path=os.path.join(out_dir, report_name),
579
+ )
580
+ print(f"Report saved to {out_path}")
581
+
582
+
583
+ def generate_report_markdown(
584
+ result: dict[str, dict[str, Any]], file: str, out_dir: str = CWD
585
+ ):
586
+ file = file.removesuffix(".json")
587
+ report_name = f"{file}.md"
588
+ report_folder = os.path.join(out_dir, f"{file}_report")
589
+
590
+ # mkdir
591
+ if not os.path.exists(report_folder):
592
+ os.makedirs(report_folder)
593
+
594
+ with open(os.path.join(CUR_DIR, "template.md"), "r") as f:
595
+ template = f.read()
596
+ template = template.format(
597
+ REV=file.removeprefix("benchmark_result_").removesuffix(".json"),
598
+ TIME=time.strftime("%Y-%m-%d %H:%M:%S %Z", time.localtime()),
599
+ OS=f"{platform.system()} {platform.machine()} {platform.release()} {platform.version()}",
600
+ PYTHON=sys.version,
601
+ ORJSON_VER=orjson.__version__,
602
+ SIMD_FLAGS=ssrjson.get_current_features(),
603
+ CHIPSET=get_cpu_name(),
604
+ MEM=get_mem_total(),
605
+ )
606
+
607
+ for index_s in INDEXES:
608
+ template += f"\n\n## {index_s}\n\n"
609
+ for bench_file in get_benchmark_files():
610
+ print(f"Processing {bench_file.name}")
611
+ with open(
612
+ os.path.join(report_folder, bench_file.name + ".svg"), "wb"
613
+ ) as svg_file:
614
+ svg_file.write(
615
+ plot_relative_ops(
616
+ result[bench_file.name],
617
+ bench_file.name,
618
+ index_s,
619
+ ).getvalue()
620
+ )
621
+ # add svg
622
+ template += f"![{bench_file.name}](./{bench_file.name}.svg)\n\n"
623
+
624
+ with open(os.path.join(report_folder, report_name), "w") as f:
625
+ f.write(template)
626
+ print(f"Report saved to {os.path.join(report_folder, report_name)}")
627
+
628
+
629
+ def run_benchmark(process_bytes: int = 1e8):
630
+ file = get_real_output_file_name()
631
+ if os.path.exists(file):
632
+ os.remove(file)
633
+ result: defaultdict[str, defaultdict[str, Any]] = defaultdict(
634
+ lambda: defaultdict(dict)
635
+ )
636
+
637
+ for bench_file in get_benchmark_files():
638
+ run_file_benchmark(bench_file, result, process_bytes)
639
+ output_result = json.dumps(result, indent=4)
640
+
641
+ with open(f"{file}", "w", encoding="utf-8") as f:
642
+ f.write(output_result)
643
+ return result, file
644
+
645
+
646
+ def run_benchmark_default():
647
+ """
648
+ Run default benchmark with default parameters. Generate report in PDF.
649
+ """
650
+ j, file = run_benchmark()
651
+ file = file.split("/")[-1]
652
+ generate_report(j, file)
@@ -0,0 +1,10 @@
1
+ # ssrJSON benchmark Report
2
+
3
+ REV: `{REV}`
4
+ Python: `{PYTHON}`
5
+ Orjson: `{ORJSON_VER}`
6
+ Generated time: {TIME}
7
+ OS: {OS}
8
+ SIMD flag: {SIMD_FLAGS}
9
+ Chipset: {CHIPSET}
10
+ Memory: {MEM}