resp-benchmark 0.1.4__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of resp-benchmark might be problematic. Click here for more details.

Files changed (25) hide show
  1. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/.github/workflows/CI.yml +0 -3
  2. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/Cargo.lock +1 -1
  3. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/Cargo.toml +1 -1
  4. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/PKG-INFO +13 -7
  5. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/README.md +11 -6
  6. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/pyproject.toml +1 -0
  7. resp_benchmark-0.1.5/python/resp_benchmark/__init__.py +1 -0
  8. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/python/resp_benchmark/cli.py +4 -4
  9. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/python/resp_benchmark/wrapper.py +2 -30
  10. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/auto_connection.rs +7 -5
  11. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/bench.rs +5 -7
  12. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/lib.rs +1 -12
  13. resp_benchmark-0.1.4/python/resp_benchmark/__init__.py +0 -1
  14. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/.gitignore +0 -0
  15. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/LICENSE +0 -0
  16. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/python/resp_benchmark/cores.py +0 -0
  17. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/rustfmt.toml +0 -0
  18. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/async_flag.rs +0 -0
  19. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/client.rs +0 -0
  20. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/command/distribution.rs +0 -0
  21. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/command/mod.rs +0 -0
  22. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/command/parser.rs +0 -0
  23. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/command/placeholder.rs +0 -0
  24. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/histogram.rs +0 -0
  25. {resp_benchmark-0.1.4 → resp_benchmark-0.1.5}/src/shared_context.rs +0 -0
@@ -2,9 +2,6 @@ name: CI
2
2
 
3
3
  on:
4
4
  push:
5
- branches:
6
- - main
7
- - master
8
5
  tags:
9
6
  - '*'
10
7
  pull_request:
@@ -671,7 +671,7 @@ dependencies = [
671
671
 
672
672
  [[package]]
673
673
  name = "resp-benchmark"
674
- version = "0.1.4"
674
+ version = "0.1.5"
675
675
  dependencies = [
676
676
  "awaitgroup",
677
677
  "colored",
@@ -1,6 +1,6 @@
1
1
  [package]
2
2
  name = "resp-benchmark"
3
- version = "0.1.4"
3
+ version = "0.1.5"
4
4
  edition = "2021"
5
5
 
6
6
  # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -1,18 +1,22 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: resp-benchmark
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Classifier: Programming Language :: Rust
5
5
  Classifier: Programming Language :: Python :: Implementation :: CPython
6
6
  Classifier: Programming Language :: Python :: Implementation :: PyPy
7
7
  Requires-Dist: pydantic
8
8
  Requires-Dist: redis
9
9
  License-File: LICENSE
10
+ Summary: resp-benchmark is a benchmark tool for testing databases that support the RESP protocol, such as Redis, Valkey, and Tair.
10
11
  Requires-Python: >=3.8
11
12
  Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
12
13
 
13
14
  # resp-benchmark
14
15
 
15
- [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/your_username/resp-benchmark/blob/main/LICENSE)
16
+ [![Python - Version](https://img.shields.io/badge/python-%3E%3D3.8-brightgreen)](https://www.python.org/doc/versions/)
17
+ [![PyPI - Version](https://img.shields.io/pypi/v/resp-benchmark?color=%231772b4)](https://pypi.org/project/resp-benchmark/)
18
+ [![PyPI - Downloads](https://img.shields.io/pypi/dw/resp-benchmark?color=%231ba784)](https://pypi.org/project/resp-benchmark/)
19
+ [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/tair-opensource/resp-benchmark/blob/main/LICENSE)
16
20
 
17
21
  resp-benchmark is a benchmark tool for testing databases that support the RESP protocol,
18
22
  such as [Redis](https://github.com/redis/redis), [Valkey](https://github.com/valkey-io/valkey),
@@ -65,17 +69,19 @@ Supported placeholders include:
65
69
  ### Benchmarking zset
66
70
 
67
71
  ```shell
68
- # 1. Load data
69
- resp-benchmark --load -n 1000000 -P 10 "ZADD {key sequence 1000} {rand 1000} {value 8}"
70
- # 2. Benchmark
71
- resp-benchmark "ZRANGEBYSCORE {key uniform 1000} {range 1000 10}"
72
+ # Load data
73
+ resp-benchmark --load -P 10 -c 256 -n 10007000 "ZADD {key sequence 1000} {rand 70000} {key sequence 10007}"
74
+ # Benchmark ZSCORE
75
+ resp-benchmark -s 10 "ZSCORE {key uniform 1000} {key uniform 10007}"
76
+ # Benchmark ZRANGEBYSCORE
77
+ resp-benchmark -s 10 "ZRANGEBYSCORE {key uniform 1000} {range 70000 10}"
72
78
  ```
73
79
 
74
80
  ### Benchmarking Lua Scripts
75
81
 
76
82
  ```shell
77
83
  redis-cli 'SCRIPT LOAD "return redis.call('\''SET'\'', KEYS[1], ARGV[1])"'
78
- resp-benchmark "EVALSHA d8f2fad9f8e86a53d2a6ebd960b33c4972cacc37 1 {key uniform 100000} {value 64}"
84
+ resp-benchmark -s 10 "EVALSHA d8f2fad9f8e86a53d2a6ebd960b33c4972cacc37 1 {key uniform 100000} {value 64}"
79
85
  ```
80
86
 
81
87
  ## Differences with redis-benchmark
@@ -1,6 +1,9 @@
1
1
  # resp-benchmark
2
2
 
3
- [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/your_username/resp-benchmark/blob/main/LICENSE)
3
+ [![Python - Version](https://img.shields.io/badge/python-%3E%3D3.8-brightgreen)](https://www.python.org/doc/versions/)
4
+ [![PyPI - Version](https://img.shields.io/pypi/v/resp-benchmark?color=%231772b4)](https://pypi.org/project/resp-benchmark/)
5
+ [![PyPI - Downloads](https://img.shields.io/pypi/dw/resp-benchmark?color=%231ba784)](https://pypi.org/project/resp-benchmark/)
6
+ [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/tair-opensource/resp-benchmark/blob/main/LICENSE)
4
7
 
5
8
  resp-benchmark is a benchmark tool for testing databases that support the RESP protocol,
6
9
  such as [Redis](https://github.com/redis/redis), [Valkey](https://github.com/valkey-io/valkey),
@@ -53,17 +56,19 @@ Supported placeholders include:
53
56
  ### Benchmarking zset
54
57
 
55
58
  ```shell
56
- # 1. Load data
57
- resp-benchmark --load -n 1000000 -P 10 "ZADD {key sequence 1000} {rand 1000} {value 8}"
58
- # 2. Benchmark
59
- resp-benchmark "ZRANGEBYSCORE {key uniform 1000} {range 1000 10}"
59
+ # Load data
60
+ resp-benchmark --load -P 10 -c 256 -n 10007000 "ZADD {key sequence 1000} {rand 70000} {key sequence 10007}"
61
+ # Benchmark ZSCORE
62
+ resp-benchmark -s 10 "ZSCORE {key uniform 1000} {key uniform 10007}"
63
+ # Benchmark ZRANGEBYSCORE
64
+ resp-benchmark -s 10 "ZRANGEBYSCORE {key uniform 1000} {range 70000 10}"
60
65
  ```
61
66
 
62
67
  ### Benchmarking Lua Scripts
63
68
 
64
69
  ```shell
65
70
  redis-cli 'SCRIPT LOAD "return redis.call('\''SET'\'', KEYS[1], ARGV[1])"'
66
- resp-benchmark "EVALSHA d8f2fad9f8e86a53d2a6ebd960b33c4972cacc37 1 {key uniform 100000} {value 64}"
71
+ resp-benchmark -s 10 "EVALSHA d8f2fad9f8e86a53d2a6ebd960b33c4972cacc37 1 {key uniform 100000} {value 64}"
67
72
  ```
68
73
 
69
74
  ## Differences with redis-benchmark
@@ -4,6 +4,7 @@ build-backend = "maturin"
4
4
 
5
5
  [project]
6
6
  name = "resp-benchmark"
7
+ description = "resp-benchmark is a benchmark tool for testing databases that support the RESP protocol, such as Redis, Valkey, and Tair."
7
8
  requires-python = ">=3.8"
8
9
  classifiers = [
9
10
  "Programming Language :: Rust",
@@ -0,0 +1 @@
1
+ from .wrapper import Benchmark, Result
@@ -14,10 +14,10 @@ def parse_args():
14
14
  parser.add_argument("-p", metavar="port", type=int, default=6379, help="Server port (default 6379)")
15
15
  parser.add_argument("-u", metavar="username", type=str, default="", help="Used to send ACL style \"AUTH username pass\". Needs -a.")
16
16
  parser.add_argument("-a", metavar="password", type=str, default="", help="Password for Redis Auth")
17
- parser.add_argument("-c", metavar="clients", type=int, default=50, help="Number of parallel connections (default 50)")
18
- parser.add_argument("--cores", type=str, default=f"", help="Comma-separated list of CPU cores to use.")
19
- parser.add_argument("--cluster", action="store_true", help="Enable cluster mode.")
20
- parser.add_argument("-n", metavar="requests", type=int, default=100000, help="Total number of requests (default 100000), 0 for unlimited.")
17
+ parser.add_argument("-c", metavar="clients", type=int, default=0, help="Number of parallel connections (0 for auto, default: 0)")
18
+ parser.add_argument("--cores", type=str, default=f"", help="Comma-separated list of CPU cores to use (default all)")
19
+ parser.add_argument("--cluster", action="store_true", help="Use cluster mode (default false)")
20
+ parser.add_argument("-n", metavar="requests", type=int, default=0, help="Total number of requests (default 0), 0 for unlimited.")
21
21
  parser.add_argument("-s", metavar="seconds", type=int, default=0, help="Total time in seconds (default 0), 0 for unlimited.")
22
22
  parser.add_argument("-P", metavar="pipeline", type=int, default=1, help="Pipeline <numreq> requests. Default 1 (no pipeline).")
23
23
  # parser.add_argument("--tls", action="store_true", help="Use TLS for connection (default false)")
@@ -8,23 +8,6 @@ import redis
8
8
  from .cores import parse_cores_string
9
9
 
10
10
 
11
- @dataclass
12
- class ResultPoint:
13
- """
14
- Represents a single data point in benchmark results.
15
-
16
- Attributes:
17
- timestamp_second (int): Unix timestamp in seconds.
18
- qps (float): Queries per second at this timestamp.
19
- avg_latency_ms (float): Average latency in milliseconds at this timestamp.
20
- p99_latency_ms (float): 99th percentile latency in milliseconds at this timestamp.
21
- """
22
- timestamp_second: int
23
- qps: float
24
- avg_latency_ms: float
25
- p99_latency_ms: float
26
-
27
-
28
11
  @dataclass
29
12
  class Result:
30
13
  """
@@ -34,12 +17,10 @@ class Result:
34
17
  qps (float): Average queries per second.
35
18
  avg_latency_ms (float): Average latency in milliseconds.
36
19
  p99_latency_ms (float): 99th percentile latency in milliseconds.
37
- per_second_data (List[ResultPoint]): List of per-second data points.
38
20
  """
39
21
  qps: float
40
22
  avg_latency_ms: float
41
23
  p99_latency_ms: float
42
- per_second_data: List[ResultPoint]
43
24
 
44
25
 
45
26
  class Benchmark:
@@ -83,9 +64,9 @@ class Benchmark:
83
64
  def bench(
84
65
  self,
85
66
  command: str,
86
- connections: int = 32,
67
+ connections: int = 0,
87
68
  pipeline: int = 1,
88
- count: int = 100000,
69
+ count: int = 0,
89
70
  seconds: int = 0,
90
71
  quiet: bool = False,
91
72
  ) -> Result:
@@ -125,15 +106,6 @@ class Benchmark:
125
106
  qps=ret.qps,
126
107
  avg_latency_ms=ret.avg_latency_ms,
127
108
  p99_latency_ms=ret.p99_latency_ms,
128
- per_second_data=[
129
- ResultPoint(
130
- timestamp_second=point.timestamp_second,
131
- qps=point.qps,
132
- avg_latency_ms=point.avg_latency_ms,
133
- p99_latency_ms=point.p99_latency_ms,
134
- )
135
- for point in ret.per_second_data
136
- ],
137
109
  )
138
110
 
139
111
  return result
@@ -1,6 +1,7 @@
1
1
  use std::sync::atomic::AtomicU64;
2
2
  use std::sync::Arc;
3
3
  use tokio::sync::Notify;
4
+ use crate::histogram::Histogram;
4
5
 
5
6
  const MAX_CONN: u64 = if cfg!(target_os = "macos") { 64 } else { 1024 }; // 1024 is enough for most cases
6
7
 
@@ -104,11 +105,12 @@ impl AutoConnection {
104
105
  pub fn active_conn(&self) -> u64 {
105
106
  self.limiters.iter().map(|limiter| limiter.get_active_conn()).sum()
106
107
  }
108
+ #[allow(dead_code)]
107
109
  pub fn target_conn(&self) -> u64 {
108
110
  self.limiters.iter().map(|limiter| limiter.get_target_conn()).sum()
109
111
  }
110
112
 
111
- pub fn adjust(&mut self, cnt: u64) {
113
+ pub fn adjust(&mut self, h: &Histogram) {
112
114
  if self.ready {
113
115
  return;
114
116
  }
@@ -117,12 +119,12 @@ impl AutoConnection {
117
119
  if elapsed < 0.5 {
118
120
  return;
119
121
  }
120
- let qps = (cnt - self.last_cnt) as f64 / elapsed;
122
+ let qps = (h.cnt() - self.last_cnt) as f64 / elapsed;
121
123
  let need_add_conn;
122
- if qps >= self.last_qps * 1.5 || elapsed >= 3f64 {
124
+ if qps >= self.last_qps * 2.0 || elapsed >= 3f64 {
123
125
  if self.last_qps == 0.0 {
124
126
  need_add_conn = 1; // at least 1 connection
125
- } else if qps > self.last_qps * 1.1 {
127
+ } else if qps > self.last_qps * 1.3 {
126
128
  need_add_conn = self.active_conn();
127
129
  } else {
128
130
  self.ready = true;
@@ -136,7 +138,7 @@ impl AutoConnection {
136
138
  self.inx = (self.inx + 1) % self.limiters.len();
137
139
  }
138
140
  self.last_qps = qps;
139
- self.last_cnt = cnt;
141
+ self.last_cnt = h.cnt();
140
142
  self.instant = std::time::Instant::now();
141
143
  return;
142
144
  }
@@ -88,8 +88,7 @@ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: Sh
88
88
  {
89
89
  let cnt = histogram.cnt();
90
90
  let qps = (cnt - log_last_cnt) as f64 / log_instance.elapsed().as_secs_f64();
91
- let active_conn: u64 = auto_connection.active_conn();
92
- let target_conn: u64 = auto_connection.target_conn();
91
+ let conn: u64 = auto_connection.active_conn();
93
92
  if auto_connection.ready {
94
93
  result.qps = (cnt - overall_cnt_overhead) as f64 / overall_time.elapsed().as_secs_f64();
95
94
  }
@@ -97,7 +96,7 @@ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: Sh
97
96
  if load {
98
97
  print!("\r\x1B[2KData loading qps: {:.0}, {:.2}%", qps, histogram.cnt() as f64 / case.count as f64 * 100f64);
99
98
  } else {
100
- print!("\r\x1B[2Kqps: {:.0}(overall {:.0}), active_conn: {}, target_conn: {}, {}", qps, result.qps, active_conn, target_conn, histogram);
99
+ print!("\r\x1B[2Kqps: {:.0}(overall {:.0}), conn: {}, {}", qps, result.qps, conn, histogram);
101
100
  }
102
101
  }
103
102
  std::io::stdout().flush().unwrap();
@@ -105,8 +104,7 @@ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: Sh
105
104
  log_instance = std::time::Instant::now();
106
105
  }
107
106
  if !auto_connection.ready {
108
- let cnt = histogram.cnt();
109
- auto_connection.adjust(cnt);
107
+ auto_connection.adjust(&histogram);
110
108
  if auto_connection.ready {
111
109
  overall_cnt_overhead = histogram.cnt();
112
110
  overall_time = std::time::Instant::now();
@@ -114,11 +112,11 @@ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: Sh
114
112
  }
115
113
  }
116
114
  }
117
- let active_conn: u64 = auto_connection.active_conn();
115
+ let conn: u64 = auto_connection.active_conn();
118
116
  if load {
119
117
  print!("\r\x1B[2KData loaded, qps: {:.0}, time elapsed: {:.2}s\n", result.qps, overall_time.elapsed().as_secs_f64());
120
118
  } else {
121
- print!("\r\x1B[2Kqps: {:.0}, conn: {}, {}\n", result.qps, active_conn, histogram)
119
+ print!("\r\x1B[2Kqps: {:.0}, conn: {}, {}\n", result.qps, conn, histogram)
122
120
  };
123
121
  result.avg_latency_ms = histogram.avg() as f64 / 1_000.0;
124
122
  result.p99_latency_ms = histogram.percentile(0.99) as f64 / 1_000.0;
@@ -18,14 +18,6 @@ fn _resp_benchmark_rust_lib(m: &Bound<'_, PyModule>) -> PyResult<()> {
18
18
  Ok(())
19
19
  }
20
20
 
21
- #[pyclass]
22
- #[derive(Clone)]
23
- struct ResultPoint {
24
- #[pyo3(get, set)] pub timestamp_second: i64,
25
- #[pyo3(get, set)] pub qps: f64,
26
- #[pyo3(get, set)] pub avg_latency_ms: f64,
27
- #[pyo3(get, set)] pub p99_latency_ms: f64,
28
- }
29
21
 
30
22
  #[pyclass]
31
23
  #[derive(Default)]
@@ -33,7 +25,6 @@ struct BenchmarkResult {
33
25
  #[pyo3(get, set)] pub qps: f64,
34
26
  #[pyo3(get, set)] pub avg_latency_ms: f64,
35
27
  #[pyo3(get, set)] pub p99_latency_ms: f64,
36
- #[pyo3(get, set)] pub per_second_data: Vec<ResultPoint>,
37
28
  }
38
29
 
39
30
  #[pyfunction]
@@ -56,10 +47,8 @@ fn benchmark(
56
47
  ) -> PyResult<BenchmarkResult> {
57
48
  assert!(cores.len() > 0);
58
49
  if load {
59
- assert_ne!(connections, 0);
60
- assert_ne!(count, 0);
50
+ assert_ne!(count, 0, "count must be greater than 0");
61
51
  }
62
- assert!(count != 0 || seconds != 0);
63
52
 
64
53
  let _ = ctrlc::set_handler(move || {
65
54
  std::process::exit(0);
@@ -1 +0,0 @@
1
- from .wrapper import Benchmark, Result, ResultPoint
File without changes