resp-benchmark 0.1.5__tar.gz → 0.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of resp-benchmark might be problematic. Click here for more details.

Files changed (24) hide show
  1. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/Cargo.lock +1 -1
  2. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/Cargo.toml +1 -1
  3. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/PKG-INFO +1 -1
  4. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/bench.rs +14 -10
  5. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/command/mod.rs +11 -3
  6. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/command/parser.rs +4 -6
  7. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/shared_context.rs +3 -1
  8. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/.github/workflows/CI.yml +0 -0
  9. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/.gitignore +0 -0
  10. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/LICENSE +0 -0
  11. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/README.md +0 -0
  12. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/pyproject.toml +0 -0
  13. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/python/resp_benchmark/__init__.py +0 -0
  14. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/python/resp_benchmark/cli.py +0 -0
  15. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/python/resp_benchmark/cores.py +0 -0
  16. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/python/resp_benchmark/wrapper.py +0 -0
  17. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/rustfmt.toml +0 -0
  18. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/async_flag.rs +0 -0
  19. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/auto_connection.rs +0 -0
  20. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/client.rs +0 -0
  21. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/command/distribution.rs +0 -0
  22. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/command/placeholder.rs +0 -0
  23. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/histogram.rs +0 -0
  24. {resp_benchmark-0.1.5 → resp_benchmark-0.1.6}/src/lib.rs +0 -0
@@ -671,7 +671,7 @@ dependencies = [
671
671
 
672
672
  [[package]]
673
673
  name = "resp-benchmark"
674
- version = "0.1.5"
674
+ version = "0.1.6"
675
675
  dependencies = [
676
676
  "awaitgroup",
677
677
  "colored",
@@ -1,6 +1,6 @@
1
1
  [package]
2
2
  name = "resp-benchmark"
3
- version = "0.1.5"
3
+ version = "0.1.6"
4
4
  edition = "2021"
5
5
 
6
6
  # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: resp-benchmark
3
- Version: 0.1.5
3
+ Version: 0.1.6
4
4
  Classifier: Programming Language :: Rust
5
5
  Classifier: Programming Language :: Python :: Implementation :: CPython
6
6
  Classifier: Programming Language :: Python :: Implementation :: PyPy
@@ -46,7 +46,11 @@ async fn run_commands_on_single_thread(limiter: Arc<ConnLimiter>, config: Client
46
46
  // prepare pipeline
47
47
  let mut p = Vec::new();
48
48
  for _ in 0..pipeline_cnt {
49
- p.push(cmd.gen_cmd());
49
+ if context.is_loading {
50
+ p.push(cmd.gen_cmd_with_lock());
51
+ } else {
52
+ p.push(cmd.gen_cmd());
53
+ }
50
54
  }
51
55
  let instant = std::time::Instant::now();
52
56
  client.run_commands(p).await;
@@ -60,7 +64,7 @@ async fn run_commands_on_single_thread(limiter: Arc<ConnLimiter>, config: Client
60
64
  local.await;
61
65
  }
62
66
 
63
- fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: SharedContext, mut wg: WaitGroup, load: bool, quiet: bool) -> BenchmarkResult {
67
+ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: SharedContext, mut wg: WaitGroup, quiet: bool) -> BenchmarkResult {
64
68
  let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap();
65
69
  let mut result = BenchmarkResult::default();
66
70
 
@@ -93,10 +97,10 @@ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: Sh
93
97
  result.qps = (cnt - overall_cnt_overhead) as f64 / overall_time.elapsed().as_secs_f64();
94
98
  }
95
99
  if !quiet {
96
- if load {
97
- print!("\r\x1B[2KData loading qps: {:.0}, {:.2}%", qps, histogram.cnt() as f64 / case.count as f64 * 100f64);
100
+ if context.is_loading {
101
+ println!("\x1B[F\x1B[2KData loading qps: {:.0}, {:.2}%", qps, histogram.cnt() as f64 / case.count as f64 * 100f64);
98
102
  } else {
99
- print!("\r\x1B[2Kqps: {:.0}(overall {:.0}), conn: {}, {}", qps, result.qps, conn, histogram);
103
+ println!("\x1B[F\x1B[2Kqps: {:.0}(overall {:.0}), conn: {}, {}", qps, result.qps, conn, histogram);
100
104
  }
101
105
  }
102
106
  std::io::stdout().flush().unwrap();
@@ -113,10 +117,10 @@ fn wait_finish(case: &Case, mut auto_connection: AutoConnection, mut context: Sh
113
117
  }
114
118
  }
115
119
  let conn: u64 = auto_connection.active_conn();
116
- if load {
117
- print!("\r\x1B[2KData loaded, qps: {:.0}, time elapsed: {:.2}s\n", result.qps, overall_time.elapsed().as_secs_f64());
120
+ if context.is_loading {
121
+ println!("\x1B[F\x1B[2KData loaded, qps: {:.0}, time elapsed: {:.2}s\n", result.qps, overall_time.elapsed().as_secs_f64());
118
122
  } else {
119
- print!("\r\x1B[2Kqps: {:.0}, conn: {}, {}\n", result.qps, conn, histogram)
123
+ println!("\x1B[F\x1B[2Kqps: {:.0}, conn: {}, {}\n", result.qps, conn, histogram)
120
124
  };
121
125
  result.avg_latency_ms = histogram.avg() as f64 / 1_000.0;
122
126
  result.p99_latency_ms = histogram.percentile(0.99) as f64 / 1_000.0;
@@ -139,7 +143,7 @@ pub fn do_benchmark(client_config: ClientConfig, cores: Vec<u16>, case: Case, lo
139
143
  let mut thread_handlers = Vec::new();
140
144
  let wg = WaitGroup::new();
141
145
  let core_ids = core_affinity::get_core_ids().unwrap();
142
- let context = SharedContext::new(case.count, case.seconds);
146
+ let context = SharedContext::new(case.count, case.seconds, load);
143
147
  for inx in 0..cores.len() {
144
148
  let client_config = client_config.clone();
145
149
  let case = case.clone();
@@ -159,7 +163,7 @@ pub fn do_benchmark(client_config: ClientConfig, cores: Vec<u16>, case: Case, lo
159
163
  }
160
164
 
161
165
  // log thread
162
- let result = wait_finish(&case, auto_connection, context, wg, load, quiet);
166
+ let result = wait_finish(&case, auto_connection, context, wg, quiet);
163
167
 
164
168
  // join all threads
165
169
  for thread_handler in thread_handlers {
@@ -33,22 +33,30 @@ impl Command {
33
33
  }
34
34
  pub fn gen_cmd(&mut self) -> redis::Cmd {
35
35
  let mut cmd = redis::Cmd::new();
36
+ let mut cmd_str = String::new();
36
37
  for ph in self.argv.iter_mut() {
37
38
  for arg in ph.gen() {
38
- cmd.arg(arg);
39
+ cmd_str.push_str(&arg);
39
40
  }
40
41
  }
42
+ for word in cmd_str.split_whitespace() {
43
+ cmd.arg(word);
44
+ }
41
45
  cmd
42
46
  }
43
47
  #[allow(dead_code)]
44
48
  pub fn gen_cmd_with_lock(&mut self) -> redis::Cmd {
45
- let mut cmd = redis::Cmd::new();
46
49
  let _lock = self.lock.lock().unwrap();
50
+ let mut cmd = redis::Cmd::new();
51
+ let mut cmd_str = String::new();
47
52
  for ph in self.argv.iter_mut() {
48
53
  for arg in ph.gen() {
49
- cmd.arg(arg);
54
+ cmd_str.push_str(&arg);
50
55
  }
51
56
  }
57
+ for word in cmd_str.split_whitespace() {
58
+ cmd.arg(word);
59
+ }
52
60
  cmd
53
61
  }
54
62
  pub fn to_string(&self) -> String {
@@ -3,7 +3,6 @@ use nom::{
3
3
  sequence::delimited,
4
4
  branch::alt,
5
5
  bytes::complete::{is_not, tag},
6
- character::complete::multispace0,
7
6
  multi::many0,
8
7
  combinator::{map, all_consuming},
9
8
  };
@@ -13,16 +12,15 @@ fn parse_string(input: &str) -> IResult<&str, PlaceholderEnum> {
13
12
  let s = alt((
14
13
  delimited(tag("\""), is_not("\""), tag("\"")),
15
14
  delimited(tag("\'"), is_not("\'"), tag("\'")),
16
- delimited(multispace0, is_not("{ "), multispace0)
17
- ));
15
+ is_not("{")
16
+ ));
18
17
  map(s, PlaceholderEnum::new_string)(input)
19
18
  }
20
19
 
21
20
 
22
21
  fn parse_placeholder(input: &str) -> IResult<&str, PlaceholderEnum> {
23
22
  let inner = delimited(tag("{"), is_not("}"), tag("}"));
24
- let eat_whitespace = delimited(multispace0, inner, multispace0);
25
- map(eat_whitespace, PlaceholderEnum::new)(input)
23
+ map(inner, PlaceholderEnum::new)(input)
26
24
  }
27
25
 
28
26
 
@@ -36,7 +34,7 @@ mod tests {
36
34
 
37
35
  #[test]
38
36
  fn test_root() {
39
- let (nm, args) = match parse_all("aa {key sequence 100} bbb") {
37
+ let (nm, args) = match parse_all("aa test_{key sequence 100} bbb") {
40
38
  Ok((nm, args)) => (nm, args),
41
39
  Err(e) => {
42
40
  println!("Error: {:?}", e);
@@ -8,6 +8,7 @@ use std::time::Instant;
8
8
 
9
9
  #[derive(Clone)]
10
10
  pub struct SharedContext {
11
+ pub is_loading: bool,
11
12
  // limit by max_count
12
13
  current_count: Arc<AtomicU64>,
13
14
  max_count: u64,
@@ -24,8 +25,9 @@ pub struct SharedContext {
24
25
  }
25
26
 
26
27
  impl SharedContext {
27
- pub fn new(max_count: u64, max_seconds: u64) -> Self {
28
+ pub fn new(max_count: u64, max_seconds: u64, is_loading: bool) -> Self {
28
29
  SharedContext {
30
+ is_loading,
29
31
  current_count: Arc::new(AtomicU64::new(0)),
30
32
  max_count,
31
33
  instant: Arc::new(RwLock::new(None)),
File without changes
File without changes