resp-benchmark 0.1.4__tar.gz → 0.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of resp-benchmark might be problematic. Click here for more details.

Files changed (25) hide show
  1. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/.github/workflows/CI.yml +0 -3
  2. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/Cargo.lock +1 -1
  3. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/Cargo.toml +1 -1
  4. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/PKG-INFO +13 -7
  5. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/README.md +11 -6
  6. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/pyproject.toml +1 -0
  7. resp_benchmark-0.1.6/python/resp_benchmark/__init__.py +1 -0
  8. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/python/resp_benchmark/cli.py +4 -4
  9. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/python/resp_benchmark/wrapper.py +2 -30
  10. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/auto_connection.rs +7 -5
  11. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/bench.rs +17 -15
  12. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/command/mod.rs +11 -3
  13. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/command/parser.rs +4 -6
  14. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/lib.rs +1 -12
  15. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/shared_context.rs +3 -1
  16. resp_benchmark-0.1.4/python/resp_benchmark/__init__.py +0 -1
  17. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/.gitignore +0 -0
  18. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/LICENSE +0 -0
  19. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/python/resp_benchmark/cores.py +0 -0
  20. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/rustfmt.toml +0 -0
  21. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/async_flag.rs +0 -0
  22. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/client.rs +0 -0
  23. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/command/distribution.rs +0 -0
  24. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/command/placeholder.rs +0 -0
  25. {resp_benchmark-0.1.4 → resp_benchmark-0.1.6}/src/histogram.rs +0 -0
@@ -2,9 +2,6 @@ name: CI
2
2
 
3
3
  on:
4
4
  push:
5
- branches:
6
- - main
7
- - master
8
5
  tags:
9
6
  - '*'
10
7
  pull_request:
@@ -671,7 +671,7 @@ dependencies = [
671
671
 
672
672
  [[package]]
673
673
  name = "resp-benchmark"
674
- version = "0.1.4"
674
+ version = "0.1.6"
675
675
  dependencies = [
676
676
  "awaitgroup",
677
677
  "colored",
@@ -1,6 +1,6 @@
1
1
  [package]
2
2
  name = "resp-benchmark"
3
- version = "0.1.4"
3
+ version = "0.1.6"
4
4
  edition = "2021"
5
5
 
6
6
  # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -1,18 +1,22 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: resp-benchmark
3
- Version: 0.1.4
3
+ Version: 0.1.6
4
4
  Classifier: Programming Language :: Rust
5
5
  Classifier: Programming Language :: Python :: Implementation :: CPython
6
6
  Classifier: Programming Language :: Python :: Implementation :: PyPy
7
7
  Requires-Dist: pydantic
8
8
  Requires-Dist: redis
9
9
  License-File: LICENSE
10
+ Summary: resp-benchmark is a benchmark tool for testing databases that support the RESP protocol, such as Redis, Valkey, and Tair.
10
11
  Requires-Python: >=3.8
11
12
  Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
12
13
 
13
14
  # resp-benchmark
14
15
 
15
- [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/your_username/resp-benchmark/blob/main/LICENSE)
16
+ [![Python - Version](https://img.shields.io/badge/python-%3E%3D3.8-brightgreen)](https://www.python.org/doc/versions/)
17
+ [![PyPI - Version](https://img.shields.io/pypi/v/resp-benchmark?color=%231772b4)](https://pypi.org/project/resp-benchmark/)
18
+ [![PyPI - Downloads](https://img.shields.io/pypi/dw/resp-benchmark?color=%231ba784)](https://pypi.org/project/resp-benchmark/)
19
+ [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/tair-opensource/resp-benchmark/blob/main/LICENSE)
16
20
 
17
21
  resp-benchmark is a benchmark tool for testing databases that support the RESP protocol,
18
22
  such as [Redis](https://github.com/redis/redis), [Valkey](https://github.com/valkey-io/valkey),
@@ -65,17 +69,19 @@ Supported placeholders include:
65
69
  ### Benchmarking zset
66
70
 
67
71
  ```shell
68
- # 1. Load data
69
- resp-benchmark --load -n 1000000 -P 10 "ZADD {key sequence 1000} {rand 1000} {value 8}"
70
- # 2. Benchmark
71
- resp-benchmark "ZRANGEBYSCORE {key uniform 1000} {range 1000 10}"
72
+ # Load data
73
+ resp-benchmark --load -P 10 -c 256 -n 10007000 "ZADD {key sequence 1000} {rand 70000} {key sequence 10007}"
74
+ # Benchmark ZSCORE
75
+ resp-benchmark -s 10 "ZSCORE {key uniform 1000} {key uniform 10007}"
76
+ # Benchmark ZRANGEBYSCORE
77
+ resp-benchmark -s 10 "ZRANGEBYSCORE {key uniform 1000} {range 70000 10}"
72
78
  ```
73
79
 
74
80
  ### Benchmarking Lua Scripts
75
81
 
76
82
  ```shell
77
83
  redis-cli 'SCRIPT LOAD "return redis.call('\''SET'\'', KEYS[1], ARGV[1])"'
78
- resp-benchmark "EVALSHA d8f2fad9f8e86a53d2a6ebd960b33c4972cacc37 1 {key uniform 100000} {value 64}"
84
+ resp-benchmark -s 10 "EVALSHA d8f2fad9f8e86a53d2a6ebd960b33c4972cacc37 1 {key uniform 100000} {value 64}"
79
85
  ```
80
86
 
81
87
  ## Differences with redis-benchmark
@@ -1,6 +1,9 @@
1
1
  # resp-benchmark
2
2
 
3
- [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/your_username/resp-benchmark/blob/main/LICENSE)
3
+ [![Python - Version](https://img.shields.io/badge/python-%3E%3D3.8-brightgreen)](https://www.python.org/doc/versions/)
4
+ [![PyPI - Version](https://img.shields.io/pypi/v/resp-benchmark?color=%231772b4)](https://pypi.org/project/resp-benchmark/)
5
+ [![PyPI - Downloads](https://img.shields.io/pypi/dw/resp-benchmark?color=%231ba784)](https://pypi.org/project/resp-benchmark/)
6
+ [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/tair-opensource/resp-benchmark/blob/main/LICENSE)
4
7
 
5
8
  resp-benchmark is a benchmark tool for testing databases that support the RESP protocol,
6
9
  such as [Redis](https://github.com/redis/redis), [Valkey](https://github.com/valkey-io/valkey),
@@ -53,17 +56,19 @@ Supported placeholders include:
53
56
  ### Benchmarking zset
54
57
 
55
58
  ```shell
56
- # 1. Load data
57
- resp-benchmark --load -n 1000000 -P 10 "ZADD {key sequence 1000} {rand 1000} {value 8}"
58
- # 2. Benchmark
59
- resp-benchmark "ZRANGEBYSCORE {key uniform 1000} {range 1000 10}"
59
+ # Load data
60
+ resp-benchmark --load -P 10 -c 256 -n 10007000 "ZADD {key sequence 1000} {rand 70000} {key sequence 10007}"
61
+ # Benchmark ZSCORE
62
+ resp-benchmark -s 10 "ZSCORE {key uniform 1000} {key uniform 10007}"
63
+ # Benchmark ZRANGEBYSCORE
64
+ resp-benchmark -s 10 "ZRANGEBYSCORE {key uniform 1000} {range 70000 10}"
60
65
  ```
61
66
 
62
67
  ### Benchmarking Lua Scripts
63
68
 
64
69
  ```shell
65
70
  redis-cli 'SCRIPT LOAD "return redis.call('\''SET'\'', KEYS[1], ARGV[1])"'
66
- resp-benchmark "EVALSHA d8f2fad9f8e86a53d2a6ebd960b33c4972cacc37 1 {key uniform 100000} {value 64}"
71
+ resp-benchmark -s 10 "EVALSHA d8f2fad9f8e86a53d2a6ebd960b33c4972cacc37 1 {key uniform 100000} {value 64}"
67
72
  ```
68
73
 
69
74
  ## Differences with redis-benchmark
@@ -4,6 +4,7 @@ build-backend = "maturin"
4
4
 
5
5
  [project]
6
6
  name = "resp-benchmark"
7
+ description = "resp-benchmark is a benchmark tool for testing databases that support the RESP protocol, such as Redis, Valkey, and Tair."
7
8
  requires-python = ">=3.8"
8
9
  classifiers = [
9
10
  "Programming Language :: Rust",
@@ -0,0 +1 @@
1
+ from .wrapper import Benchmark, Result
@@ -14,10 +14,10 @@ def parse_args():
14
14
  parser.add_argument("-p", metavar="port", type=int, default=6379, help="Server port (default 6379)")
15
15
  parser.add_argument("-u", metavar="username", type=str, default="", help="Used to send ACL style \"AUTH username pass\". Needs -a.")
16
16
  parser.add_argument("-a", metavar="password", type=str, default="", help="Password for Redis Auth")
17
- parser.add_argument("-c", metavar="clients", type=int, default=50, help="Number of parallel connections (default 50)")
18
- parser.add_argument("--cores", type=str, default=f"", help="Comma-separated list of CPU cores to use.")
19
- parser.add_argument("--cluster", action="store_true", help="Enable cluster mode.")
20
- parser.add_argument("-n", metavar="requests", type=int, default=100000, help="Total number of requests (default 100000), 0 for unlimited.")
17
+ parser.add_argument("-c", metavar="clients", type=int, default=0, help="Number of parallel connections (0 for auto, default: 0)")
18
+ parser.add_argument("--cores", type=str, default=f"", help="Comma-separated list of CPU cores to use (default all)")
19
+ parser.add_argument("--cluster", action="store_true", help="Use cluster mode (default false)")
20
+ parser.add_argument("-n", metavar="requests", type=int, default=0, help="Total number of requests (default 0), 0 for unlimited.")
21
21
  parser.add_argument("-s", metavar="seconds", type=int, default=0, help="Total time in seconds (default 0), 0 for unlimited.")
22
22
  parser.add_argument("-P", metavar="pipeline", type=int, default=1, help="Pipeline <numreq> requests. Default 1 (no pipeline).")
23
23
  # parser.add_argument("--tls", action="store_true", help="Use TLS for connection (default false)")
@@ -8,23 +8,6 @@ import redis
8
8
  from .cores import parse_cores_string
9
9
 
10
10
 
11
- @dataclass
12
- class ResultPoint:
13
- """
14
- Represents a single data point in benchmark results.
15
-
16
- Attributes:
17
- timestamp_second (int): Unix timestamp in seconds.
18
- qps (float): Queries per second at this timestamp.
19
- avg_latency_ms (float): Average latency in milliseconds at this timestamp.
20
- p99_latency_ms (float): 99th percentile latency in milliseconds at this timestamp.
21
- """
22
- timestamp_second: int
23
- qps: float
24
- avg_latency_ms: float
25
- p99_latency_ms: float
26
-
27
-
28
11
  @dataclass
29
12
  class Result:
30
13
  """
@@ -34,12 +17,10 @@ class Result:
34
17
  qps (float): Average queries per second.
35
18
  avg_latency_ms (float): Average latency in milliseconds.
36
19
  p99_latency_ms (float): 99th percentile latency in milliseconds.
37
- per_second_data (List[ResultPoint]): List of per-second data points.
38
20
  """
39
21
  qps: float
40
22
  avg_latency_ms: float
41
23
  p99_latency_ms: float
42
- per_second_data: List[ResultPoint]
43
24
 
44
25
 
45
26
  class Benchmark:
@@ -83,9 +64,9 @@ class Benchmark:
83
64
  def bench(
84
65
  self,
85
66
  command: str,
86
- connections: int = 32,
67
+ connections: int = 0,
87
68
  pipeline: int = 1,
88
- count: int = 100000,
69
+ count: int = 0,
89
70
  seconds: int = 0,
90
71
  quiet: bool = False,
91
72
  ) -> Result:
@@ -125,15 +106,6 @@ class Benchmark:
125
106
  qps=ret.qps,
126
107
  avg_latency_ms=ret.avg_latency_ms,
127
108
  p99_latency_ms=ret.p99_latency_ms,
128
- per_second_data=[
129
- ResultPoint(
130
- timestamp_second=point.timestamp_second,
131
- qps=point.qps,
132
- avg_latency_ms=point.avg_latency_ms,
133
- p99_latency_ms=point.p99_latency_ms,
134
- )
135
- for point in ret.per_second_data
136
- ],
137
109
  )
138
110
 
139
111
  return result
@@ -1,6 +1,7 @@
1
1
  use std::sync::atomic::AtomicU64;
2
2
  use std::sync::Arc;
3
3
  use tokio::sync::Notify;
4
+ use crate::histogram::Histogram;
4
5
 
5
6
  const MAX_CONN: u64 = if cfg!(target_os = "macos") { 64 } else { 1024 }; // 1024 is enough for most cases
6
7
 
@@ -104,11 +105,12 @@ impl AutoConnection {
104
105
  pub fn active_conn(&self) -> u64 {
105
106
  self.limiters.iter().map(|limiter| limiter.get_active_conn()).sum()
106
107
  }
108
+ #[allow(dead_code)]
107
109
  pub fn target_conn(&self) -> u64 {
108
110
  self.limiters.iter().map(|limiter| limiter.get_target_conn()).sum()
109
111
  }
110
112
 
111
- pub fn adjust(&mut self, cnt: u64) {
113
+ pub fn adjust(&mut self, h: &Histogram) {
112
114
  if self.ready {
113
115
  return;
114
116
  }
@@ -117,12 +119,12 @@ impl AutoConnection {
117
119
  if elapsed < 0.5 {
118
120
  return;
119
121
  }
120
- let qps = (cnt - self.last_cnt) as f64 / elapsed;
122
+ let qps = (h.cnt() - self.last_cnt) as f64 / elapsed;
121
123
  let need_add_conn;
122
- if qps >= self.last_qps * 1.5 || elapsed >= 3f64 {
124
+ if qps >= self.last_qps * 2.0 || elapsed >= 3f64 {
123
125
  if self.last_qps == 0.0 {
124
126
  need_add_conn = 1; // at least 1 connection
125
- } else if qps > self.last_qps * 1.1 {
127
+ } else if qps > self.last_qps * 1.3 {
126
128
  need_add_conn = self.active_conn();
127
129
  } else {
128
130
  self.ready = true;
@@ -136,7 +138,7 @@ impl AutoConnection {
136
138
  self.inx = (self.inx + 1) % self.limiters.len();
137
139
  }
138
140
  self.last_qps = qps;
139
- self.last_cnt = cnt;
141
+ self.last_cnt = h.cnt();
140
142
  self.instant = std::time::Instant::now();
141
143
  return;
142
144
  }
@@ -46,7 +46,11 @@ async fn run_commands_on_single_thread(limiter: Arc<ConnLimiter>, config: Client
46
46
  // prepare pipeline
47
47
  let mut p = Vec::new();
48
48
  for _ in 0..pipeline_cnt {
49
- p.push(cmd.gen_cmd());
49
+ if context.is_loading {
50
+ p.push(cmd.gen_cmd_with_lock());
51
+ } else {
52
+ p.push(cmd.gen_cmd());
53
+ }
50
54
  }
51
55
  let instant = std::time::Instant::now();
52
56
  client.run_commands(p).await;
@@ -60,7 +64,7 @@ async fn run_commands_on_single_thread(limiter: Arc<ConnLimiter>, config: Client
60
64
  local.await;
61
65
  }
62
66
 
63
- fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: SharedContext, mut wg: WaitGroup, load: bool, quiet: bool) -> BenchmarkResult {
67
+ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: SharedContext, mut wg: WaitGroup, quiet: bool) -> BenchmarkResult {
64
68
  let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap();
65
69
  let mut result = BenchmarkResult::default();
66
70
 
@@ -88,16 +92,15 @@ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: Sh
88
92
  {
89
93
  let cnt = histogram.cnt();
90
94
  let qps = (cnt - log_last_cnt) as f64 / log_instance.elapsed().as_secs_f64();
91
- let active_conn: u64 = auto_connection.active_conn();
92
- let target_conn: u64 = auto_connection.target_conn();
95
+ let conn: u64 = auto_connection.active_conn();
93
96
  if auto_connection.ready {
94
97
  result.qps = (cnt - overall_cnt_overhead) as f64 / overall_time.elapsed().as_secs_f64();
95
98
  }
96
99
  if !quiet {
97
- if load {
98
- print!("\r\x1B[2KData loading qps: {:.0}, {:.2}%", qps, histogram.cnt() as f64 / case.count as f64 * 100f64);
100
+ if context.is_loading {
101
+ println!("\x1B[F\x1B[2KData loading qps: {:.0}, {:.2}%", qps, histogram.cnt() as f64 / case.count as f64 * 100f64);
99
102
  } else {
100
- print!("\r\x1B[2Kqps: {:.0}(overall {:.0}), active_conn: {}, target_conn: {}, {}", qps, result.qps, active_conn, target_conn, histogram);
103
+ println!("\x1B[F\x1B[2Kqps: {:.0}(overall {:.0}), conn: {}, {}", qps, result.qps, conn, histogram);
101
104
  }
102
105
  }
103
106
  std::io::stdout().flush().unwrap();
@@ -105,8 +108,7 @@ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: Sh
105
108
  log_instance = std::time::Instant::now();
106
109
  }
107
110
  if !auto_connection.ready {
108
- let cnt = histogram.cnt();
109
- auto_connection.adjust(cnt);
111
+ auto_connection.adjust(&histogram);
110
112
  if auto_connection.ready {
111
113
  overall_cnt_overhead = histogram.cnt();
112
114
  overall_time = std::time::Instant::now();
@@ -114,11 +116,11 @@ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: Sh
114
116
  }
115
117
  }
116
118
  }
117
- let active_conn: u64 = auto_connection.active_conn();
118
- if load {
119
- print!("\r\x1B[2KData loaded, qps: {:.0}, time elapsed: {:.2}s\n", result.qps, overall_time.elapsed().as_secs_f64());
119
+ let conn: u64 = auto_connection.active_conn();
120
+ if context.is_loading {
121
+ println!("\x1B[F\x1B[2KData loaded, qps: {:.0}, time elapsed: {:.2}s\n", result.qps, overall_time.elapsed().as_secs_f64());
120
122
  } else {
121
- print!("\r\x1B[2Kqps: {:.0}, conn: {}, {}\n", result.qps, active_conn, histogram)
123
+ println!("\x1B[F\x1B[2Kqps: {:.0}, conn: {}, {}\n", result.qps, conn, histogram)
122
124
  };
123
125
  result.avg_latency_ms = histogram.avg() as f64 / 1_000.0;
124
126
  result.p99_latency_ms = histogram.percentile(0.99) as f64 / 1_000.0;
@@ -141,7 +143,7 @@ pub fn do_benchmark(client_config: ClientConfig, cores: Vec<u16>, case: Case, lo
141
143
  let mut thread_handlers = Vec::new();
142
144
  let wg = WaitGroup::new();
143
145
  let core_ids = core_affinity::get_core_ids().unwrap();
144
- let context = SharedContext::new(case.count, case.seconds);
146
+ let context = SharedContext::new(case.count, case.seconds, load);
145
147
  for inx in 0..cores.len() {
146
148
  let client_config = client_config.clone();
147
149
  let case = case.clone();
@@ -161,7 +163,7 @@ pub fn do_benchmark(client_config: ClientConfig, cores: Vec<u16>, case: Case, lo
161
163
  }
162
164
 
163
165
  // log thread
164
- let result = wait_finish(&case, auto_connection, context, wg, load, quiet);
166
+ let result = wait_finish(&case, auto_connection, context, wg, quiet);
165
167
 
166
168
  // join all threads
167
169
  for thread_handler in thread_handlers {
@@ -33,22 +33,30 @@ impl Command {
33
33
  }
34
34
  pub fn gen_cmd(&mut self) -> redis::Cmd {
35
35
  let mut cmd = redis::Cmd::new();
36
+ let mut cmd_str = String::new();
36
37
  for ph in self.argv.iter_mut() {
37
38
  for arg in ph.gen() {
38
- cmd.arg(arg);
39
+ cmd_str.push_str(&arg);
39
40
  }
40
41
  }
42
+ for word in cmd_str.split_whitespace() {
43
+ cmd.arg(word);
44
+ }
41
45
  cmd
42
46
  }
43
47
  #[allow(dead_code)]
44
48
  pub fn gen_cmd_with_lock(&mut self) -> redis::Cmd {
45
- let mut cmd = redis::Cmd::new();
46
49
  let _lock = self.lock.lock().unwrap();
50
+ let mut cmd = redis::Cmd::new();
51
+ let mut cmd_str = String::new();
47
52
  for ph in self.argv.iter_mut() {
48
53
  for arg in ph.gen() {
49
- cmd.arg(arg);
54
+ cmd_str.push_str(&arg);
50
55
  }
51
56
  }
57
+ for word in cmd_str.split_whitespace() {
58
+ cmd.arg(word);
59
+ }
52
60
  cmd
53
61
  }
54
62
  pub fn to_string(&self) -> String {
@@ -3,7 +3,6 @@ use nom::{
3
3
  sequence::delimited,
4
4
  branch::alt,
5
5
  bytes::complete::{is_not, tag},
6
- character::complete::multispace0,
7
6
  multi::many0,
8
7
  combinator::{map, all_consuming},
9
8
  };
@@ -13,16 +12,15 @@ fn parse_string(input: &str) -> IResult<&str, PlaceholderEnum> {
13
12
  let s = alt((
14
13
  delimited(tag("\""), is_not("\""), tag("\"")),
15
14
  delimited(tag("\'"), is_not("\'"), tag("\'")),
16
- delimited(multispace0, is_not("{ "), multispace0)
17
- ));
15
+ is_not("{")
16
+ ));
18
17
  map(s, PlaceholderEnum::new_string)(input)
19
18
  }
20
19
 
21
20
 
22
21
  fn parse_placeholder(input: &str) -> IResult<&str, PlaceholderEnum> {
23
22
  let inner = delimited(tag("{"), is_not("}"), tag("}"));
24
- let eat_whitespace = delimited(multispace0, inner, multispace0);
25
- map(eat_whitespace, PlaceholderEnum::new)(input)
23
+ map(inner, PlaceholderEnum::new)(input)
26
24
  }
27
25
 
28
26
 
@@ -36,7 +34,7 @@ mod tests {
36
34
 
37
35
  #[test]
38
36
  fn test_root() {
39
- let (nm, args) = match parse_all("aa {key sequence 100} bbb") {
37
+ let (nm, args) = match parse_all("aa test_{key sequence 100} bbb") {
40
38
  Ok((nm, args)) => (nm, args),
41
39
  Err(e) => {
42
40
  println!("Error: {:?}", e);
@@ -18,14 +18,6 @@ fn _resp_benchmark_rust_lib(m: &Bound<'_, PyModule>) -> PyResult<()> {
18
18
  Ok(())
19
19
  }
20
20
 
21
- #[pyclass]
22
- #[derive(Clone)]
23
- struct ResultPoint {
24
- #[pyo3(get, set)] pub timestamp_second: i64,
25
- #[pyo3(get, set)] pub qps: f64,
26
- #[pyo3(get, set)] pub avg_latency_ms: f64,
27
- #[pyo3(get, set)] pub p99_latency_ms: f64,
28
- }
29
21
 
30
22
  #[pyclass]
31
23
  #[derive(Default)]
@@ -33,7 +25,6 @@ struct BenchmarkResult {
33
25
  #[pyo3(get, set)] pub qps: f64,
34
26
  #[pyo3(get, set)] pub avg_latency_ms: f64,
35
27
  #[pyo3(get, set)] pub p99_latency_ms: f64,
36
- #[pyo3(get, set)] pub per_second_data: Vec<ResultPoint>,
37
28
  }
38
29
 
39
30
  #[pyfunction]
@@ -56,10 +47,8 @@ fn benchmark(
56
47
  ) -> PyResult<BenchmarkResult> {
57
48
  assert!(cores.len() > 0);
58
49
  if load {
59
- assert_ne!(connections, 0);
60
- assert_ne!(count, 0);
50
+ assert_ne!(count, 0, "count must be greater than 0");
61
51
  }
62
- assert!(count != 0 || seconds != 0);
63
52
 
64
53
  let _ = ctrlc::set_handler(move || {
65
54
  std::process::exit(0);
@@ -8,6 +8,7 @@ use std::time::Instant;
8
8
 
9
9
  #[derive(Clone)]
10
10
  pub struct SharedContext {
11
+ pub is_loading: bool,
11
12
  // limit by max_count
12
13
  current_count: Arc<AtomicU64>,
13
14
  max_count: u64,
@@ -24,8 +25,9 @@ pub struct SharedContext {
24
25
  }
25
26
 
26
27
  impl SharedContext {
27
- pub fn new(max_count: u64, max_seconds: u64) -> Self {
28
+ pub fn new(max_count: u64, max_seconds: u64, is_loading: bool) -> Self {
28
29
  SharedContext {
30
+ is_loading,
29
31
  current_count: Arc::new(AtomicU64::new(0)),
30
32
  max_count,
31
33
  instant: Arc::new(RwLock::new(None)),
@@ -1 +0,0 @@
1
- from .wrapper import Benchmark, Result, ResultPoint
File without changes