fustor-benchmark 0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,61 @@
1
+ Metadata-Version: 2.4
2
+ Name: fustor-benchmark
3
+ Version: 0.2
4
+ Summary: Performance benchmarking tool for Fustor platform
5
+ Author-email: Huajin Wang <wanghuajin999@163.com>
6
+ Requires-Python: >=3.11
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: click>=8.1.7
9
+ Requires-Dist: requests>=2.31.0
10
+ Requires-Dist: pyyaml>=6.0.1
11
+ Requires-Dist: psutil>=5.9.0
12
+
13
+ # Fustor 性能基准测试工具 (Benchmark)
14
+
15
+ 该模块是 Fustor 平台的自动化压力测试和性能量化工具。它通过模拟大规模文件系统元数据,量化 Fusion API 相比于操作系统原生文件系统调用的性能优势。
16
+
17
+ ## 核心设计目标
18
+
19
+ 1. **量化优势**: 对比 Fusion 内存索引与 Linux 原生 `find` 命令在递归元数据检索下的延迟与吞吐量。
20
+ 2. **百万级规模**: 支持生成并同步超过 1,000,000 个文件的元数据。
21
+ 3. **全自动流程**: 自动编排 Registry、Fusion 和 Agent,实现一键式从环境部署到报告生成。
22
+
23
+ ## 目录结构规范与安全
24
+
25
+ 为了保护生产数据免受误删,Benchmark 实施了严格的路径校验:
26
+
27
+ * **路径白名单**: 压测主目录(`run-dir`)必须以 **`fustor-benchmark-run`** 结尾。
28
+ * **结构定义**:
29
+ * `{run-dir}/data/`: 存放生成的数百万个模拟文件。
30
+ * `{run-dir}/.fustor/`: 存放压测期间的独立配置文件、SQLite 数据库、日志以及最终报告。
31
+
32
+ ## 快速使用
33
+
34
+ ### 1. 数据生成
35
+ 构建一个包含 1000 个 UUID 目录,每个目录下 1000 个文件(总计 100 万文件)的测试集:
36
+ ```bash
37
+ uv run fustor-benchmark generate fustor-benchmark-run --num-dirs 1000
38
+ ```
39
+
40
+ ### 2. 执行压测
41
+ 运行全链路同步并执行并发性能对比:
42
+ ```bash
43
+ uv run fustor-benchmark run fustor-benchmark-run -d 5 -c 20 -n 100
44
+ ```
45
+ * `-d`: 探测深度。
46
+ * `-c`: 并发数。
47
+ * `-n`: 总请求次数。
48
+
49
+ ## 报告与指标
50
+
51
+ 压测完成后,将在 `fustor-benchmark-run/.fustor/` 下生成以下产出:
52
+
53
+ 1. **`report.html`**: 交互式可视化报表。
54
+ * **Latency Distribution**: 展示 Avg, P50, P95, P99 的柱状对比。
55
+ * **Latency Percentiles**: 展现延迟分布曲线。
56
+ * **Speedup Factor**: 自动计算 Fusion 相比 OS 的加速倍数。
57
+ 2. **`benchmark_results.json`**: 结构化的指标数据,包含所有原始延迟序列。
58
+
59
+ ## 安全保护说明
60
+
61
+ Benchmark 会在 `run-dir` 下执行 `shutil.rmtree` 操作以清理旧环境。**请务必确保指定的目录不包含任何重要业务数据**。如果尝试在非 `fustor-benchmark-run` 后缀目录下运行,程序将强制退出。
@@ -0,0 +1,49 @@
1
+ # Fustor 性能基准测试工具 (Benchmark)
2
+
3
+ 该模块是 Fustor 平台的自动化压力测试和性能量化工具。它通过模拟大规模文件系统元数据,量化 Fusion API 相比于操作系统原生文件系统调用的性能优势。
4
+
5
+ ## 核心设计目标
6
+
7
+ 1. **量化优势**: 对比 Fusion 内存索引与 Linux 原生 `find` 命令在递归元数据检索下的延迟与吞吐量。
8
+ 2. **百万级规模**: 支持生成并同步超过 1,000,000 个文件的元数据。
9
+ 3. **全自动流程**: 自动编排 Registry、Fusion 和 Agent,实现一键式从环境部署到报告生成。
10
+
11
+ ## 目录结构规范与安全
12
+
13
+ 为了保护生产数据免受误删,Benchmark 实施了严格的路径校验:
14
+
15
+ * **路径白名单**: 压测主目录(`run-dir`)必须以 **`fustor-benchmark-run`** 结尾。
16
+ * **结构定义**:
17
+ * `{run-dir}/data/`: 存放生成的数百万个模拟文件。
18
+ * `{run-dir}/.fustor/`: 存放压测期间的独立配置文件、SQLite 数据库、日志以及最终报告。
19
+
20
+ ## 快速使用
21
+
22
+ ### 1. 数据生成
23
+ 构建一个包含 1000 个 UUID 目录,每个目录下 1000 个文件(总计 100 万文件)的测试集:
24
+ ```bash
25
+ uv run fustor-benchmark generate fustor-benchmark-run --num-dirs 1000
26
+ ```
27
+
28
+ ### 2. 执行压测
29
+ 运行全链路同步并执行并发性能对比:
30
+ ```bash
31
+ uv run fustor-benchmark run fustor-benchmark-run -d 5 -c 20 -n 100
32
+ ```
33
+ * `-d`: 探测深度。
34
+ * `-c`: 并发数。
35
+ * `-n`: 总请求次数。
36
+
37
+ ## 报告与指标
38
+
39
+ 压测完成后,将在 `fustor-benchmark-run/.fustor/` 下生成以下产出:
40
+
41
+ 1. **`report.html`**: 交互式可视化报表。
42
+ * **Latency Distribution**: 展示 Avg, P50, P95, P99 的柱状对比。
43
+ * **Latency Percentiles**: 展现延迟分布曲线。
44
+ * **Speedup Factor**: 自动计算 Fusion 相比 OS 的加速倍数。
45
+ 2. **`benchmark_results.json`**: 结构化的指标数据,包含所有原始延迟序列。
46
+
47
+ ## 安全保护说明
48
+
49
+ Benchmark 会在 `run-dir` 下执行 `shutil.rmtree` 操作以清理旧环境。**请务必确保指定的目录不包含任何重要业务数据**。如果尝试在非 `fustor-benchmark-run` 后缀目录下运行,程序将强制退出。
@@ -0,0 +1,30 @@
1
+ [project]
2
+ name = "fustor-benchmark"
3
+ dynamic = ["version"]
4
+ description = "Performance benchmarking tool for Fustor platform"
5
+ readme = "README.md"
6
+ requires-python = ">=3.11"
7
+ dependencies = [
8
+ "click>=8.1.7",
9
+ "requests>=2.31.0",
10
+ "pyyaml>=6.0.1",
11
+ "psutil>=5.9.0",
12
+ ]
13
+ [[project.authors]]
14
+ name = "Huajin Wang"
15
+ email = "wanghuajin999@163.com"
16
+
17
+ [build-system]
18
+ requires = ["setuptools>=61.0", "setuptools-scm>=8.0"]
19
+ build-backend = "setuptools.build_meta"
20
+
21
+ [tool.setuptools_scm]
22
+ root = ".."
23
+ version_scheme = "post-release"
24
+ local_scheme = "dirty-tag"
25
+
26
+ [project.scripts]
27
+ fustor-benchmark = "fustor_benchmark.cli:cli"
28
+
29
+ [tool.setuptools.packages.find]
30
+ where = ["src"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
File without changes
@@ -0,0 +1,40 @@
1
+ import click
2
+ import os
3
+ from .generator import DataGenerator
4
+ from .runner import BenchmarkRunner
5
+
6
+ @click.group()
7
+ def cli():
8
+ """Fustor Benchmark Tool"""
9
+ pass
10
+
11
+ @cli.command()
12
+ @click.argument("run-dir", type=click.Path(exists=False))
13
+ @click.option("--num-dirs", default=1000, help="Number of UUID directories")
14
+ @click.option("--num-subdirs", default=4, help="Number of subdirectories per UUID directory")
15
+ @click.option("--files-per-subdir", default=250, help="Files per subdirectory")
16
+ def generate(run_dir, num_dirs, num_subdirs, files_per_subdir):
17
+ """Generate benchmark dataset"""
18
+ gen = DataGenerator(os.path.join(run_dir, "data"))
19
+ gen.generate(num_dirs, num_subdirs, files_per_subdir)
20
+
21
+ @cli.command()
22
+ @click.argument("run-dir", type=click.Path(exists=True))
23
+ @click.option("--concurrency", "-c", default=20, help="Number of concurrent workers")
24
+ @click.option("--requests", "-n", default=200, help="Total number of requests to run")
25
+ @click.option("--target-depth", "-d", default=5, help="Depth of target directories for benchmarking")
26
+ @click.option("--force-gen", is_flag=True, help="Force regeneration of test data")
27
+ @click.option("--skip-gen", is_flag=True, default=True, help="Skip generation if data exists (default: True)")
28
+ def run(run_dir, concurrency, requests, target_depth, force_gen, skip_gen):
29
+ """Run the full benchmark suite"""
30
+ abs_run_dir = os.path.abspath(run_dir)
31
+ runner = BenchmarkRunner(run_dir=abs_run_dir)
32
+ # If force_gen is False and skip_gen is True, we treat it as custom_target=True (skip generation)
33
+ custom_target = skip_gen and not force_gen
34
+ runner.run(concurrency=concurrency, reqs=requests, target_depth=target_depth, force_gen=force_gen, custom_target=custom_target)
35
+
36
+ cli.add_command(generate)
37
+ cli.add_command(run)
38
+
39
+ if __name__ == "__main__":
40
+ cli()
@@ -0,0 +1,62 @@
1
+ import os
2
+ import uuid
3
+ import time
4
+ import shutil
5
+ import click
6
+ from concurrent.futures import ThreadPoolExecutor
7
+
8
+ class DataGenerator:
9
+ def __init__(self, base_dir: str):
10
+ self.base_dir = os.path.abspath(base_dir)
11
+ self.submit_dir = os.path.join(self.base_dir, "upload/submit")
12
+
13
+ def _create_batch(self, args):
14
+ """
15
+ Creates subdirectories and files within a specific UUID directory.
16
+ args: (uuid_path, num_subdirs, files_per_subdir)
17
+ """
18
+ uuid_path, num_subdirs, files_per_subdir = args
19
+ try:
20
+ for s in range(num_subdirs):
21
+ sub_path = os.path.join(uuid_path, f"sub_{s}")
22
+ os.makedirs(sub_path, exist_ok=True)
23
+ for i in range(files_per_subdir):
24
+ # Create empty dummy files
25
+ file_path = os.path.join(sub_path, f"data_{i:04d}.dat")
26
+ with open(file_path, "w") as f:
27
+ pass
28
+ except Exception as e:
29
+ print(f"Error generating data in {uuid_path}: {e}")
30
+
31
+ def generate(self, num_uuids: int = 1000, num_subdirs: int = 4, files_per_subdir: int = 250):
32
+ # Safety Check: Only allow operations in directories ending with 'fustor-benchmark-run'
33
+ # Note: self.base_dir is typically run-dir/data, so we check the parent run-dir
34
+ run_dir = os.path.dirname(self.base_dir)
35
+ if not run_dir.endswith("fustor-benchmark-run"):
36
+ click.echo(click.style(f"FATAL: Operation denied. Target run-dir '{run_dir}' must end with 'fustor-benchmark-run' for safety.", fg="red", bold=True))
37
+ return
38
+
39
+ if os.path.exists(self.base_dir):
40
+ click.echo(f"Cleaning up old data in {self.base_dir}...")
41
+ shutil.rmtree(self.base_dir)
42
+
43
+ total_files = num_uuids * num_subdirs * files_per_subdir
44
+ click.echo(f"Generating {total_files:,} files in 1000 UUID directories...")
45
+ click.echo(f"Structure: {self.submit_dir}/{{c1}}/{{c2}}/{{uuid}}/sub_X/{{250 files}}")
46
+
47
+ tasks = []
48
+ for _ in range(num_uuids):
49
+ uid = str(uuid.uuid4())
50
+ # Target path at depth 5 (relative to base_dir/data):
51
+ # 1:upload / 2:submit / 3:c1 / 4:c2 / 5:uuid
52
+ path = os.path.join(self.submit_dir, uid[0], uid[1], uid)
53
+ tasks.append((path, num_subdirs, files_per_subdir))
54
+
55
+ start_gen = time.time()
56
+ # Using a high worker count for I/O bound file creation
57
+ with ThreadPoolExecutor(max_workers=os.cpu_count() * 8) as executor:
58
+ list(executor.map(self._create_batch, tasks))
59
+
60
+ duration = time.time() - start_gen
61
+ click.echo(f"Generation Complete: {duration:.2f}s (Average: {total_files/duration:.1f} files/sec)")
62
+ return self.base_dir
@@ -0,0 +1,419 @@
1
+ import time
2
+ import subprocess
3
+ import requests
4
+ import click
5
+ import statistics
6
+ import random
7
+ import os
8
+ import json
9
+ from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
10
+ from .generator import DataGenerator
11
+ from .services import ServiceManager
12
+
13
+ def run_find_recursive_metadata_task(args):
14
+ """
15
+ Simulates a recursive metadata retrieval: `find <dir> -type f -ls`
16
+ This matches the recursive nature of Fusion's tree API for a subdirectory.
17
+ """
18
+ data_dir, subdir = args
19
+ target = os.path.join(data_dir, subdir.lstrip('/'))
20
+
21
+ # Recursive search for all descendant files with metadata
22
+ cmd = ["find", target, "-type", "f", "-ls"]
23
+
24
+ start = time.time()
25
+ subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
26
+ return time.time() - start
27
+
28
+ class BenchmarkRunner:
29
+ def __init__(self, run_dir):
30
+ self.run_dir = os.path.abspath(run_dir) # Ensure absolute path
31
+ # Derived directories
32
+ self.data_dir = os.path.join(self.run_dir, "data")
33
+ self.env_dir = os.path.join(self.run_dir, ".fustor")
34
+
35
+ self.services = ServiceManager(self.run_dir)
36
+ self.generator = DataGenerator(self.data_dir)
37
+
38
+ def _calculate_stats(self, latencies, total_time, count):
39
+ """Calculate rich statistics from a list of latencies (in seconds)."""
40
+ if not latencies:
41
+ return {
42
+ "qps": 0, "avg": 0, "min": 0, "max": 0, "stddev": 0,
43
+ "p50": 0, "p95": 0, "p99": 0
44
+ }
45
+
46
+ # Convert to milliseconds for presentation
47
+ l_ms = sorted([l * 1000 for l in latencies])
48
+ qps = count / total_time
49
+
50
+ # Calculate percentiles
51
+ qs = statistics.quantiles(l_ms, n=100) if len(l_ms) >= 2 else [l_ms[0]] * 100
52
+
53
+ return {
54
+ "qps": qps,
55
+ "avg": statistics.mean(l_ms),
56
+ "min": min(l_ms),
57
+ "max": max(l_ms),
58
+ "stddev": statistics.stdev(l_ms) if len(l_ms) >= 2 else 0,
59
+ "p50": statistics.median(l_ms),
60
+ "p95": qs[94],
61
+ "p99": qs[98],
62
+ "raw": l_ms # Keep raw data for charting
63
+ }
64
+
65
+ def _discover_leaf_targets_via_api(self, api_key: str, depth: int):
66
+ """Finds directories at the specified depth relative to data_dir using Fusion API."""
67
+ # Calculate the depth of the data_dir itself (prefix_depth)
68
+ # e.g., /home/user/data -> ['home', 'user', 'data'] -> depth 3
69
+ prefix_depth = len(self.data_dir.strip('/').split('/')) if self.data_dir != '/' else 0
70
+ max_fetch_depth = depth + prefix_depth
71
+
72
+ click.echo(f"Discovering target directories at depth {depth} (prefix: {prefix_depth}, total: {max_fetch_depth}) via Fusion API...")
73
+
74
+ fusion_url = f"http://localhost:{self.services.fusion_port}"
75
+ headers = {"X-API-Key": api_key}
76
+
77
+ try:
78
+ # Fetch the tree with exact required depth
79
+ res = requests.get(
80
+ f"{fusion_url}/views/fs/tree",
81
+ params={"path": "/", "max_depth": max_fetch_depth, "only_path": "true"},
82
+ headers=headers,
83
+ timeout=30
84
+ )
85
+ if res.status_code != 200:
86
+ return ["/"]
87
+
88
+ tree_data = res.json()
89
+ targets = []
90
+
91
+ # Determine the mount point node (the one matching self.data_dir)
92
+ # and start walking depth-counting from there.
93
+ def find_and_walk(node, current_rel_depth, inside_mount):
94
+ path = node.get('path', '')
95
+
96
+ # Check if this node is our data_dir (mount point)
97
+ if not inside_mount:
98
+ if os.path.abspath(path) == os.path.abspath(self.data_dir):
99
+ inside_mount = True
100
+ current_rel_depth = 0
101
+ else:
102
+ # Continue searching for the mount point in children
103
+ children = node.get('children', {})
104
+ if isinstance(children, dict):
105
+ for child in children.values(): find_and_walk(child, 0, False)
106
+ elif isinstance(children, list):
107
+ for child in children: find_and_walk(child, 0, False)
108
+ return
109
+
110
+ # If we are here, we are at or inside the mount point
111
+ if current_rel_depth == depth:
112
+ if node.get('content_type') == 'directory':
113
+ targets.append(path)
114
+ return
115
+
116
+ # Recurse further down
117
+ children = node.get('children', {})
118
+ if isinstance(children, dict):
119
+ for child in children.values(): find_and_walk(child, current_rel_depth + 1, True)
120
+ elif isinstance(children, list):
121
+ for child in children: find_and_walk(child, current_rel_depth + 1, True)
122
+
123
+ find_and_walk(tree_data, 0, False)
124
+ except Exception as e:
125
+ click.echo(click.style(f"Discovery error: {e}. Falling back to root.", fg="yellow"))
126
+ return ["/"]
127
+
128
+ if not targets:
129
+ click.echo(click.style(f"No targets found at relative depth {depth}. (Check if data is synced)", fg="yellow"))
130
+ targets = ["/"]
131
+ else:
132
+ example_path = random.choice(targets)
133
+ click.echo(f" [Check] Example target path at relative depth {depth}: '{example_path}'")
134
+
135
+ click.echo(f"Discovered {len(targets)} candidate directories via API.")
136
+ return targets
137
+
138
+ def run_concurrent_baseline(self, targets, concurrency=20, requests_count=100):
139
+ click.echo(f"Running Concurrent OS Baseline (Recursive find -ls): {concurrency} workers, {requests_count} requests...")
140
+ # Since targets are now absolute paths from Fusion, we extract the relative part
141
+ # to join with local data_dir if needed, but here find needs absolute paths.
142
+ tasks = [(self.data_dir, t) for t in [random.choice(targets) for _ in range(requests_count)]]
143
+ latencies = []
144
+ start_total = time.time()
145
+ with ProcessPoolExecutor(max_workers=concurrency) as executor:
146
+ futures = [executor.submit(run_find_recursive_metadata_task, t) for t in tasks]
147
+ for f in as_completed(futures): latencies.append(f.result())
148
+ total_time = time.time() - start_total
149
+ return self._calculate_stats(latencies, total_time, requests_count)
150
+
151
+ def _run_single_fusion_req(self, url, headers, path):
152
+ start = time.time()
153
+ try:
154
+ res = requests.get(f"{url}/views/fs/tree", params={"path": path}, headers=headers, timeout=10)
155
+ if res.status_code != 200: return None
156
+ except Exception: return None
157
+ return time.time() - start
158
+
159
+ def run_concurrent_fusion(self, api_key, targets, concurrency=20, requests_count=100):
160
+ click.echo(f"Running Concurrent Fusion API (Recursive Tree): {concurrency} workers, {requests_count} requests...")
161
+ url = f"http://localhost:{self.services.fusion_port}"
162
+ headers = {"X-API-Key": api_key}
163
+ tasks = [random.choice(targets) for _ in range(requests_count)]
164
+ latencies = []
165
+ start_total = time.time()
166
+ with ThreadPoolExecutor(max_workers=concurrency) as executor:
167
+ futures = [executor.submit(self._run_single_fusion_req, url, headers, t) for t in tasks]
168
+ for f in as_completed(futures):
169
+ res = f.result()
170
+ if res is not None: latencies.append(res)
171
+ total_time = time.time() - start_total
172
+ return self._calculate_stats(latencies, total_time, requests_count)
173
+
174
+ def wait_for_sync(self, api_key: str):
175
+ click.echo("Waiting for Fusion readiness (Alternating between API status and Agent logs)...")
176
+ fusion_url = f"http://localhost:{self.services.fusion_port}"
177
+ headers = {"X-API-Key": api_key}
178
+ start_wait = time.time()
179
+ loop_count = 0
180
+ while True:
181
+ elapsed = time.time() - start_wait
182
+ if loop_count % 2 == 0:
183
+ is_ok, log_msg = self.services.check_agent_logs()
184
+ if not is_ok: raise RuntimeError(f"Agent reported error during sync: {log_msg}")
185
+ if int(elapsed) % 30 < 5: click.echo(f" [Agent] Status: {log_msg}")
186
+ try:
187
+ res = requests.get(f"{fusion_url}/views/fs/tree", params={"path": "/"}, headers=headers, timeout=5)
188
+ if res.status_code == 200:
189
+ click.echo(f" [Fusion] READY (200 OK) after {elapsed:.1f}s.")
190
+ break
191
+ elif res.status_code == 503:
192
+ if int(elapsed) % 5 == 0:
193
+ click.echo(f" [Fusion] Still syncing... (Elapsed: {int(elapsed)}s)")
194
+ else: raise RuntimeError(f" [Fusion] Unexpected API response: {res.status_code}")
195
+ except requests.ConnectionError: pass
196
+ except Exception as e: click.echo(f" [Fusion] Warning: Connection glitch ({e})")
197
+ loop_count += 1
198
+ time.sleep(5)
199
+ click.echo("Sync complete. Proceeding to benchmark.")
200
+
201
+ def generate_html_report(self, results, output_path):
202
+ """Generates a rich HTML report with charts using Chart.js."""
203
+ template = """
204
+ <!DOCTYPE html>
205
+ <html>
206
+ <head>
207
+ <meta charset="utf-8">
208
+ <title>Fustor Benchmark Report</title>
209
+ <script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
210
+ <style>
211
+ body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; margin: 40px; background: #f4f7f6; color: #333; }
212
+ .container { max-width: 1100px; margin: auto; background: white; padding: 40px; border-radius: 12px; box-shadow: 0 4px 20px rgba(0,0,0,0.08); }
213
+ h1 { color: #2c3e50; border-bottom: 2px solid #eee; padding-bottom: 15px; margin-top: 0; }
214
+ .summary { display: flex; justify-content: space-between; margin: 30px -10px; }
215
+ .stat-card { background: #fff; padding: 20px; border-radius: 10px; flex: 1; margin: 0 10px; text-align: center; border: 1px solid #eee; border-top: 5px solid #3498db; transition: transform 0.2s; }
216
+ .stat-card:hover { transform: translateY(-5px); }
217
+ .stat-card.fusion { border-top-color: #2ecc71; }
218
+ .stat-card.gain { border-top-color: #f1c40f; }
219
+ .stat-value { font-size: 28px; font-weight: bold; margin: 10px 0; color: #2c3e50; }
220
+ .stat-label { font-size: 14px; color: #7f8c8d; text-transform: uppercase; letter-spacing: 1px; }
221
+ .chart-row { display: flex; gap: 20px; margin-bottom: 40px; }
222
+ .chart-box { flex: 1; background: #fff; padding: 20px; border-radius: 10px; border: 1px solid #eee; }
223
+ .chart-container { position: relative; height: 350px; width: 100%; }
224
+ table { width: 100%; border-collapse: collapse; margin-top: 20px; font-size: 15px; }
225
+ th, td { text-align: left; padding: 15px; border-bottom: 1px solid #eee; }
226
+ th { background: #f8f9fa; color: #2c3e50; font-weight: 600; }
227
+ .winning { color: #2ecc71; font-weight: bold; }
228
+ .info-bar { background: #e8f4fd; padding: 15px; border-radius: 8px; margin-bottom: 30px; display: flex; gap: 20px; font-size: 14px; }
229
+ </style>
230
+ </head>
231
+ <body>
232
+ <div class="container">
233
+ <h1>Fustor Performance Benchmark</h1>
234
+
235
+ <div class="info-bar">
236
+ <span>📅 Time: <strong>{{timestamp}}</strong></span>
237
+ <span>📂 Target Depth: <strong>{{depth}}</strong></span>
238
+ <span>🚀 Requests: <strong>{{reqs}}</strong></span>
239
+ <span>👥 Concurrency: <strong>{{concurrency}}</strong></span>
240
+ </div>
241
+
242
+ <div class="summary">
243
+ <div class="stat-card">
244
+ <div class="stat-label">OS Baseline (Avg)</div>
245
+ <div class="stat-value">{{os_avg}} ms</div>
246
+ </div>
247
+ <div class="stat-card fusion">
248
+ <div class="stat-label">Fusion API (Avg)</div>
249
+ <div class="stat-value">{{fusion_avg}} ms</div>
250
+ </div>
251
+ <div class="stat-card gain">
252
+ <div class="stat-label">Speedup Factor</div>
253
+ <div class="stat-value" style="color: #2ecc71">{{gain}}x</div>
254
+ </div>
255
+ </div>
256
+
257
+ <div class="chart-row">
258
+ <div class="chart-box">
259
+ <h3>Latency Distribution (Bar)</h3>
260
+ <div class="chart-container">
261
+ <canvas id="barChart"></canvas>
262
+ </div>
263
+ </div>
264
+ <div class="chart-box">
265
+ <h3>Latency Percentiles (Line)</h3>
266
+ <div class="chart-container">
267
+ <canvas id="lineChart"></canvas>
268
+ </div>
269
+ </div>
270
+ </div>
271
+
272
+ <h2>Detailed Metrics Comparison</h2>
273
+ <table>
274
+ <thead>
275
+ <tr>
276
+ <th>Metric (ms)</th>
277
+ <th>OS (find -ls)</th>
278
+ <th>Fusion API</th>
279
+ <th>Improvement</th>
280
+ </tr>
281
+ </thead>
282
+ <tbody>
283
+ <tr><td>Average Latency</td><td>{{os_avg}}</td><td>{{fusion_avg}}</td><td class="winning">{{gain_avg}}x faster</td></tr>
284
+ <tr><td>Median (P50)</td><td>{{os_p50}}</td><td>{{fusion_p50}}</td><td class="winning">{{gain_p50}}x faster</td></tr>
285
+ <tr><td>P95 Latency</td><td>{{os_p95}}</td><td>{{fusion_p95}}</td><td class="winning">{{gain_p95}}x faster</td></tr>
286
+ <tr><td>P99 Latency</td><td>{{os_p99}}</td><td>{{fusion_p99}}</td><td class="winning">{{gain_p99}}x faster</td></tr>
287
+ <tr><td>Throughput (QPS)</td><td>{{os_qps}}</td><td>{{fusion_qps}}</td><td class="winning">{{gain_qps}}x higher</td></tr>
288
+ <tr><td>Min / Max</td><td>{{os_min}} / {{os_max}}</td><td>{{fusion_min}} / {{fusion_max}}</td><td>-</td></tr>
289
+ </tbody>
290
+ </table>
291
+ </div>
292
+
293
+ <script>
294
+ // Bar Chart
295
+ new Chart(document.getElementById('barChart'), {
296
+ type: 'bar',
297
+ data: {
298
+ labels: ['Average', 'Median', 'P95', 'P99'],
299
+ datasets: [
300
+ { label: 'OS Baseline', data: [{{os_avg}}, {{os_p50}}, {{os_p95}}, {{os_p99}}], backgroundColor: '#3498db' },
301
+ { label: 'Fusion API', data: [{{fusion_avg}}, {{fusion_p50}}, {{fusion_p95}}, {{fusion_p99}}], backgroundColor: '#2ecc71' }
302
+ ]
303
+ },
304
+ options: { responsive: true, maintainAspectRatio: false, scales: { y: { beginAtZero: true } } }
305
+ });
306
+
307
+ // Percentile Line Chart
308
+ new Chart(document.getElementById('lineChart'), {
309
+ type: 'line',
310
+ data: {
311
+ labels: ['Min', 'P50', 'P75', 'P90', 'P95', 'P99', 'Max'],
312
+ datasets: [
313
+ { label: 'OS Baseline', data: [{{os_min}}, {{os_p50}}, {{os_p75}}, {{os_p90}}, {{os_p95}}, {{os_p99}}, {{os_max}}], borderColor: '#3498db', fill: false, tension: 0.1 },
314
+ { label: 'Fusion API', data: [{{fusion_min}}, {{fusion_p50}}, {{fusion_p75}}, {{fusion_p90}}, {{fusion_p95}}, {{fusion_p99}}, {{fusion_max}}], borderColor: '#2ecc71', fill: false, tension: 0.1 }
315
+ ]
316
+ },
317
+ options: { responsive: true, maintainAspectRatio: false, scales: { y: { type: 'logarithmic', title: { display: true, text: 'Latency (ms) - Log Scale' } } } }
318
+ });
319
+ </script>
320
+ </body>
321
+ </html>
322
+ """
323
+ # Calculate speedups
324
+ g_avg = results['os']['avg'] / results['fusion']['avg'] if results['fusion']['avg'] > 0 else 0
325
+
326
+ # Helper for percentiles in line chart
327
+ def get_p(stats, p):
328
+ raw = stats['raw']
329
+ idx = int(len(raw) * p / 100)
330
+ return raw[min(idx, len(raw)-1)]
331
+
332
+ html = template.replace("{{timestamp}}", results['timestamp']) \
333
+ .replace("{{depth}}", str(results['depth'])) \
334
+ .replace("{{reqs}}", str(results['requests'])) \
335
+ .replace("{{concurrency}}", str(results['concurrency'])) \
336
+ .replace("{{os_avg}}", f"{results['os']['avg']:.2f}") \
337
+ .replace("{{fusion_avg}}", f"{results['fusion']['avg']:.2f}") \
338
+ .replace("{{os_p50}}", f"{results['os']['p50']:.2f}") \
339
+ .replace("{{fusion_p50}}", f"{results['fusion']['p50']:.2f}") \
340
+ .replace("{{os_p95}}", f"{results['os']['p95']:.2f}") \
341
+ .replace("{{fusion_p95}}", f"{results['fusion']['p95']:.2f}") \
342
+ .replace("{{os_p99}}", f"{results['os']['p99']:.2f}") \
343
+ .replace("{{fusion_p99}}", f"{results['fusion']['p99']:.2f}") \
344
+ .replace("{{os_qps}}", f"{results['os']['qps']:.1f}") \
345
+ .replace("{{fusion_qps}}", f"{results['fusion']['qps']:.1f}") \
346
+ .replace("{{os_min}}", f"{results['os']['min']:.2f}") \
347
+ .replace("{{os_max}}", f"{results['os']['max']:.2f}") \
348
+ .replace("{{fusion_min}}", f"{results['fusion']['min']:.2f}") \
349
+ .replace("{{fusion_max}}", f"{results['fusion']['max']:.2f}") \
350
+ .replace("{{os_p75}}", f"{get_p(results['os'], 75):.2f}") \
351
+ .replace("{{os_p90}}", f"{get_p(results['os'], 90):.2f}") \
352
+ .replace("{{fusion_p75}}", f"{get_p(results['fusion'], 75):.2f}") \
353
+ .replace("{{fusion_p90}}", f"{get_p(results['fusion'], 90):.2f}") \
354
+ .replace("{{gain}}", f"{g_avg:.1f}") \
355
+ .replace("{{gain_avg}}", f"{g_avg:.1f}") \
356
+ .replace("{{gain_p50}}", f"{results['os']['p50']/results['fusion']['p50']:.1f}" if results['fusion']['p50'] > 0 else "N/A") \
357
+ .replace("{{gain_p95}}", f"{results['os']['p95']/results['fusion']['p95']:.1f}" if results['fusion']['p95'] > 0 else "N/A") \
358
+ .replace("{{gain_p99}}", f"{results['os']['p99']/results['fusion']['p99']:.1f}" if results['fusion']['p99'] > 0 else "N/A") \
359
+ .replace("{{gain_qps}}", f"{results['fusion']['qps']/results['os']['qps']:.1f}" if results['os']['qps'] > 0 else "N/A")
360
+
361
+ with open(output_path, "w") as f: f.write(html)
362
+
363
+ def run(self, concurrency=20, reqs=200, target_depth=5, force_gen=False, custom_target=False):
364
+ if not custom_target:
365
+ if os.path.exists(self.data_dir) and not force_gen: click.echo(f"Data directory '{self.data_dir}' exists. Skipping generation.")
366
+ else: self.generator.generate()
367
+ else: click.echo(f"Benchmarking target directory: {self.data_dir}")
368
+
369
+ try:
370
+ self.services.setup_env()
371
+ self.services.start_registry(); api_key = self.services.configure_system()
372
+ self.services.start_fusion(); self.services.start_agent(api_key)
373
+ time.sleep(2)
374
+ is_ok, msg = self.services.check_agent_logs()
375
+ if not is_ok: raise RuntimeError(f"Agent failed to initialize correctly: {msg}")
376
+ click.echo("Agent health check passed.")
377
+
378
+ self.wait_for_sync(api_key)
379
+ targets = self._discover_leaf_targets_via_api(api_key, target_depth)
380
+
381
+ # Run benchmarks
382
+ os_stats = self.run_concurrent_baseline(targets, concurrency, reqs)
383
+ fusion_stats = self.run_concurrent_fusion(api_key, targets, concurrency, reqs)
384
+
385
+ # Prepare results object
386
+ final_results = {
387
+ "depth": target_depth, "requests": reqs, "concurrency": concurrency,
388
+ "target_directory_count": len(targets),
389
+ "os": os_stats, "fusion": fusion_stats,
390
+ "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
391
+ }
392
+
393
+ # Save JSON and HTML to results directory
394
+ results_dir = os.path.join(self.run_dir, "results")
395
+ os.makedirs(results_dir, exist_ok=True)
396
+
397
+ json_path = os.path.join(results_dir, "stress-find.json")
398
+ html_path = os.path.join(results_dir, "stress-find.html")
399
+
400
+ with open(json_path, "w") as f: json.dump(final_results, f, indent=2)
401
+ self.generate_html_report(final_results, html_path)
402
+
403
+ # Output Scorecard to console
404
+ click.echo("\n" + "="*60)
405
+ click.echo(f"RECURSIVE METADATA RETRIEVAL PERFORMANCE (DEPTH {target_depth})")
406
+ click.echo(f"Target Directories Found: {len(targets)}")
407
+ click.echo("="*60)
408
+ click.echo(f"{ 'Metric (ms)':<25} | {'OS (find -ls)':<18} | {'Fusion API':<18}")
409
+ click.echo("-" * 65)
410
+ click.echo(f"{ 'Avg Latency':<25} | {os_stats['avg']:10.2f} ms | {fusion_stats['avg']:10.2f} ms")
411
+ click.echo(f"{ 'P50 Latency':<25} | {os_stats['p50']:10.2f} ms | {fusion_stats['p50']:10.2f} ms")
412
+ click.echo(f"{ 'P99 Latency':<25} | {os_stats['p99']:10.2f} ms | {fusion_stats['p99']:10.2f} ms")
413
+ click.echo(f"{ 'Throughput (QPS)':<25} | {os_stats['qps']:10.1f} | {fusion_stats['qps']:10.1f}")
414
+ click.echo("-" * 65)
415
+ click.echo(click.style(f"\nJSON results saved to: {json_path}", fg="cyan"))
416
+ click.echo(click.style(f"Visual HTML report saved to: {html_path}", fg="green", bold=True))
417
+
418
+ finally:
419
+ self.services.stop_all()
@@ -0,0 +1,226 @@
1
+ import os
2
+ import time
3
+ import shutil
4
+ import signal
5
+ import subprocess
6
+ import requests
7
+ import yaml
8
+ import click
9
+
10
+ class ServiceManager:
11
+ def __init__(self, run_dir: str):
12
+ self.run_dir = os.path.abspath(run_dir)
13
+ # 监控目标数据目录
14
+ self.data_dir = os.path.join(self.run_dir, "data")
15
+ # 系统环境主目录 (FUSTOR_HOME)
16
+ self.env_dir = os.path.join(self.run_dir, ".fustor")
17
+
18
+ self.registry_port = 18101
19
+ self.fusion_port = 18102
20
+ self.agent_port = 18100
21
+ self.processes = []
22
+
23
+ # Paths
24
+ self.venv_bin = os.path.abspath(".venv/bin") # Assuming run from repo root
25
+
26
+ def setup_env(self):
27
+ # Safety Check: Only allow operations in directories ending with 'fustor-benchmark-run'
28
+ if not self.run_dir.endswith("fustor-benchmark-run"):
29
+ click.echo(click.style(f"FATAL: Environment setup denied. Target run-dir '{self.run_dir}' must end with 'fustor-benchmark-run' for safety.", fg="red", bold=True))
30
+ sys.exit(1)
31
+
32
+ if os.path.exists(self.env_dir):
33
+ shutil.rmtree(self.env_dir)
34
+ os.makedirs(self.env_dir, exist_ok=True)
35
+
36
+ # Generate a random token for internal communication
37
+ import secrets
38
+ self.client_token = secrets.token_urlsafe(32)
39
+
40
+ # Registry DB config
41
+ with open(os.path.join(self.env_dir, ".env"), "w") as f:
42
+ f.write(f"FUSTOR_REGISTRY_DB_URL=sqlite+aiosqlite:///{self.env_dir}/registry.db\n")
43
+ f.write(f"FUSTOR_FUSION_REGISTRY_URL=http://localhost:{self.registry_port}\n")
44
+ f.write(f"FUSTOR_REGISTRY_CLIENT_TOKEN={self.client_token}\n")
45
+
46
+ def _wait_for_service(self, url: str, name: str, timeout: int = 30):
47
+ click.echo(f"Waiting for {name} at {url}...")
48
+ start = time.time()
49
+ while time.time() - start < timeout:
50
+ try:
51
+ requests.get(url, timeout=1)
52
+ click.echo(f"{name} is up.")
53
+ return True
54
+ except:
55
+ time.sleep(0.5)
56
+ click.echo(f"Error: {name} failed to start.")
57
+ return False
58
+
59
+ def start_registry(self):
60
+ cmd = [
61
+ f"{self.venv_bin}/fustor-registry", "start",
62
+ "-p", str(self.registry_port)
63
+ ]
64
+ log_file = open(os.path.join(self.env_dir, "registry.log"), "w")
65
+ env = os.environ.copy()
66
+ env["FUSTOR_HOME"] = self.env_dir
67
+ env["FUSTOR_REGISTRY_CLIENT_TOKEN"] = self.client_token
68
+
69
+ p = subprocess.Popen(cmd, env=env, stdout=log_file, stderr=subprocess.STDOUT)
70
+ self.processes.append(p)
71
+
72
+ if not self._wait_for_service(f"http://localhost:{self.registry_port}/health", "Registry"):
73
+ raise RuntimeError("Registry start failed")
74
+
75
+ def configure_system(self):
76
+ reg_url = f"http://localhost:{self.registry_port}/v1"
77
+ click.echo("Logging in to Registry...")
78
+ try:
79
+ res = requests.post(f"{reg_url}/auth/login", data={
80
+ "username": "admin@admin.com",
81
+ "password": "admin"
82
+ })
83
+ if res.status_code != 200:
84
+ raise RuntimeError(f"Login failed: {res.text}")
85
+
86
+ token = res.json()["access_token"]
87
+ headers = {"Authorization": f"Bearer {token}"}
88
+
89
+ click.echo("Creating Datastore...")
90
+ res = requests.post(f"{reg_url}/datastores/", json={
91
+ "name": "BenchmarkDS", "description": "Auto-generated"
92
+ }, headers=headers)
93
+ if res.status_code not in (200, 201):
94
+ raise RuntimeError(f"DS creation failed: {res.text}")
95
+ ds_id = res.json()["id"]
96
+
97
+ click.echo("Creating API Key...")
98
+ res = requests.post(f"{reg_url}/keys/", json={
99
+ "datastore_id": ds_id, "name": "bench-key"
100
+ }, headers=headers)
101
+ if res.status_code not in (200, 201):
102
+ raise RuntimeError(f"API Key creation failed: {res.text}")
103
+
104
+ self.api_key = res.json()["key"]
105
+ click.echo(f"API Key generated: {self.api_key[:8]}...")
106
+
107
+ return self.api_key
108
+ except Exception as e:
109
+ raise RuntimeError(f"Failed to configure system: {e}")
110
+
111
+ def start_fusion(self):
112
+ cmd = [
113
+ f"{self.venv_bin}/fustor-fusion", "start",
114
+ "-p", str(self.fusion_port)
115
+ ]
116
+ log_file = open(os.path.join(self.env_dir, "fusion.log"), "w")
117
+ env = os.environ.copy()
118
+ env["FUSTOR_HOME"] = self.env_dir
119
+ env["FUSTOR_FUSION_REGISTRY_URL"] = f"http://localhost:{self.registry_port}"
120
+ env["FUSTOR_REGISTRY_CLIENT_TOKEN"] = self.client_token
121
+
122
+ p = subprocess.Popen(cmd, env=env, stdout=log_file, stderr=subprocess.STDOUT)
123
+ self.processes.append(p)
124
+
125
+ click.echo(f"Waiting for Fusion at http://localhost:{self.fusion_port}...")
126
+ start = time.time()
127
+ while time.time() - start < 30:
128
+ try:
129
+ requests.get(f"http://localhost:{self.fusion_port}/", timeout=1)
130
+ click.echo("Fusion is up.")
131
+ return
132
+ except requests.ConnectionError:
133
+ time.sleep(0.5)
134
+ raise RuntimeError("Fusion start failed")
135
+
136
+ def start_agent(self, api_key: str):
137
+ config = {
138
+ "sources": {
139
+ "bench-fs": {
140
+ "driver": "fs",
141
+ "uri": self.data_dir,
142
+ "credential": {"user": "admin"},
143
+ "disabled": False,
144
+ "is_transient": True,
145
+ "max_queue_size": 100000,
146
+ "max_retries": 1,
147
+ "driver_params": {"min_monitoring_window_days": 1}
148
+ }
149
+ },
150
+ "pushers": {
151
+ "bench-fusion": {
152
+ "driver": "fusion",
153
+ "endpoint": f"http://127.0.0.1:{self.fusion_port}",
154
+ "credential": {"key": api_key},
155
+ "disabled": False,
156
+ "batch_size": 1000,
157
+ "max_retries": 10,
158
+ "retry_delay_sec": 5
159
+ }
160
+ },
161
+ "syncs": {
162
+ "bench-sync": {
163
+ "source": "bench-fs",
164
+ "pusher": "bench-fusion",
165
+ "disabled": False
166
+ }
167
+ }
168
+ }
169
+ with open(os.path.join(self.env_dir, "agent-config.yaml"), "w") as f:
170
+ yaml.dump(config, f)
171
+
172
+ cmd = [
173
+ f"{self.venv_bin}/fustor-agent", "start",
174
+ "-p", str(self.agent_port)
175
+ ]
176
+ log_file = open(os.path.join(self.env_dir, "agent.log"), "w")
177
+ env = os.environ.copy()
178
+ env["FUSTOR_HOME"] = self.env_dir
179
+
180
+ p = subprocess.Popen(cmd, env=env, stdout=log_file, stderr=subprocess.STDOUT)
181
+ self.processes.append(p)
182
+
183
+ self._wait_for_service(f"http://localhost:{self.agent_port}/", "Agent")
184
+
185
+ def check_agent_logs(self, lines=100):
186
+ log_path = os.path.join(self.env_dir, "agent.log")
187
+ if not os.path.exists(log_path):
188
+ return False, "Log file not found yet"
189
+
190
+ try:
191
+ with open(log_path, "r") as f:
192
+ content = f.readlines()[-lines:]
193
+
194
+ error_keywords = ["ERROR", "Exception", "Traceback", "404 -", "failed to start", "ConfigurationError", "崩溃"]
195
+ success_keywords = ["initiated successfully", "Uvicorn running", "Application startup complete"]
196
+
197
+ has_error = False
198
+ error_msg = ""
199
+ has_success = False
200
+
201
+ for line in content:
202
+ if any(kw in line for kw in error_keywords):
203
+ has_error = True
204
+ error_msg = line.strip()
205
+ if any(kw in line for kw in success_keywords):
206
+ has_success = True
207
+
208
+ if has_error:
209
+ return False, f"Detected Error: {error_msg}"
210
+
211
+ if not has_success:
212
+ return True, "Starting up... (no success signal yet)"
213
+
214
+ return True, "OK (Success signals detected)"
215
+ except Exception as e:
216
+ return True, f"Could not read log: {e}"
217
+
218
+ def stop_all(self):
219
+ click.echo("Stopping all services...")
220
+ for p in self.processes:
221
+ try:
222
+ p.terminate()
223
+ p.wait(timeout=5)
224
+ except:
225
+ p.kill()
226
+ self.processes = []
@@ -0,0 +1,61 @@
1
+ Metadata-Version: 2.4
2
+ Name: fustor-benchmark
3
+ Version: 0.2
4
+ Summary: Performance benchmarking tool for Fustor platform
5
+ Author-email: Huajin Wang <wanghuajin999@163.com>
6
+ Requires-Python: >=3.11
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: click>=8.1.7
9
+ Requires-Dist: requests>=2.31.0
10
+ Requires-Dist: pyyaml>=6.0.1
11
+ Requires-Dist: psutil>=5.9.0
12
+
13
+ # Fustor 性能基准测试工具 (Benchmark)
14
+
15
+ 该模块是 Fustor 平台的自动化压力测试和性能量化工具。它通过模拟大规模文件系统元数据,量化 Fusion API 相比于操作系统原生文件系统调用的性能优势。
16
+
17
+ ## 核心设计目标
18
+
19
+ 1. **量化优势**: 对比 Fusion 内存索引与 Linux 原生 `find` 命令在递归元数据检索下的延迟与吞吐量。
20
+ 2. **百万级规模**: 支持生成并同步超过 1,000,000 个文件的元数据。
21
+ 3. **全自动流程**: 自动编排 Registry、Fusion 和 Agent,实现一键式从环境部署到报告生成。
22
+
23
+ ## 目录结构规范与安全
24
+
25
+ 为了保护生产数据免受误删,Benchmark 实施了严格的路径校验:
26
+
27
+ * **路径白名单**: 压测主目录(`run-dir`)必须以 **`fustor-benchmark-run`** 结尾。
28
+ * **结构定义**:
29
+ * `{run-dir}/data/`: 存放生成的数百万个模拟文件。
30
+ * `{run-dir}/.fustor/`: 存放压测期间的独立配置文件、SQLite 数据库、日志以及最终报告。
31
+
32
+ ## 快速使用
33
+
34
+ ### 1. 数据生成
35
+ 构建一个包含 1000 个 UUID 目录,每个目录下 1000 个文件(总计 100 万文件)的测试集:
36
+ ```bash
37
+ uv run fustor-benchmark generate fustor-benchmark-run --num-dirs 1000
38
+ ```
39
+
40
+ ### 2. 执行压测
41
+ 运行全链路同步并执行并发性能对比:
42
+ ```bash
43
+ uv run fustor-benchmark run fustor-benchmark-run -d 5 -c 20 -n 100
44
+ ```
45
+ * `-d`: 探测深度。
46
+ * `-c`: 并发数。
47
+ * `-n`: 总请求次数。
48
+
49
+ ## 报告与指标
50
+
51
+ 压测完成后,将在 `fustor-benchmark-run/.fustor/` 下生成以下产出:
52
+
53
+ 1. **`report.html`**: 交互式可视化报表。
54
+ * **Latency Distribution**: 展示 Avg, P50, P95, P99 的柱状对比。
55
+ * **Latency Percentiles**: 展现延迟分布曲线。
56
+ * **Speedup Factor**: 自动计算 Fusion 相比 OS 的加速倍数。
57
+ 2. **`benchmark_results.json`**: 结构化的指标数据,包含所有原始延迟序列。
58
+
59
+ ## 安全保护说明
60
+
61
+ Benchmark 会在 `run-dir` 下执行 `shutil.rmtree` 操作以清理旧环境。**请务必确保指定的目录不包含任何重要业务数据**。如果尝试在非 `fustor-benchmark-run` 后缀目录下运行,程序将强制退出。
@@ -0,0 +1,13 @@
1
+ README.md
2
+ pyproject.toml
3
+ src/fustor_benchmark/__init__.py
4
+ src/fustor_benchmark/cli.py
5
+ src/fustor_benchmark/generator.py
6
+ src/fustor_benchmark/runner.py
7
+ src/fustor_benchmark/services.py
8
+ src/fustor_benchmark.egg-info/PKG-INFO
9
+ src/fustor_benchmark.egg-info/SOURCES.txt
10
+ src/fustor_benchmark.egg-info/dependency_links.txt
11
+ src/fustor_benchmark.egg-info/entry_points.txt
12
+ src/fustor_benchmark.egg-info/requires.txt
13
+ src/fustor_benchmark.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ fustor-benchmark = fustor_benchmark.cli:cli
@@ -0,0 +1,4 @@
1
+ click>=8.1.7
2
+ requests>=2.31.0
3
+ pyyaml>=6.0.1
4
+ psutil>=5.9.0
@@ -0,0 +1 @@
1
+ fustor_benchmark