redis-benchmarks-specification 0.1.291__py3-none-any.whl → 0.1.292__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

Files changed (227) hide show
  1. redis_benchmarks_specification/__runner__/args.py +6 -0
  2. redis_benchmarks_specification/__runner__/remote_profiling.py +17 -2
  3. redis_benchmarks_specification/__runner__/runner.py +619 -57
  4. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetall-50-fields-100B-values.yml +21 -4
  5. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-20-fields-with-1B-values-pipeline-30.yml +15 -3
  6. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-1000B-values.yml +16 -3
  7. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-100B-values.yml +16 -3
  8. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values.yml +15 -3
  9. redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-hash-50-fields-with-10000B-values.yml +16 -3
  10. redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-with-10B-values-pipeline-50.yml +5 -3
  11. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values-pipeline-10.yml +5 -3
  12. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values.yml +5 -3
  13. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values-pipeline-10.yml +6 -3
  14. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values.yml +5 -3
  15. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-string-get-10B-pipeline-100-nokeyprefix.yml +10 -4
  16. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml +14 -4
  17. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-expire-use-case.yml +14 -4
  18. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-psetex-expire-use-case.yml +12 -4
  19. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-setex-expire-use-case.yml +12 -4
  20. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-1KiB-expire-use-case.yml +13 -4
  21. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-4KiB-expire-use-case.yml +14 -4
  22. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-bitmap-getbit-pipeline-10.yml +10 -4
  23. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-exists-pipeline-10.yml +10 -4
  24. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expire-pipeline-10.yml +10 -4
  25. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expireat-pipeline-10.yml +10 -4
  26. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-pexpire-pipeline-10.yml +10 -4
  27. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-count-500-pipeline-10.yml +10 -4
  28. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-count-500-pipeline-10.yml +11 -4
  29. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-count-5000-pipeline-10.yml +11 -4
  30. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-pipeline-10.yml +11 -4
  31. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-pipeline-10.yml +10 -4
  32. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-type-pipeline-10.yml +10 -4
  33. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-touch-pipeline-10.yml +10 -4
  34. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-ttl-pipeline-10.yml +10 -4
  35. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hexists.yml +15 -5
  36. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hget-hgetall-hkeys-hvals-with-100B-values.yml +14 -4
  37. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hgetall-50-fields-10B-values.yml +22 -4
  38. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hincrby.yml +11 -4
  39. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hincrbyfloat.yml +11 -4
  40. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hmget-5-fields-with-100B-values-pipeline-10.yml +13 -4
  41. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-transactions-multi-exec-pipeline-20.yml +9 -3
  42. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-100B-values.yml +12 -4
  43. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-10B-values.yml +12 -4
  44. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-1KiB-values.yml +12 -4
  45. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-rpoplpush-with-10B-values.yml +11 -4
  46. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values-pipeline-10.yml +6 -3
  47. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values.yml +5 -3
  48. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-hmset-5-fields-with-1000B-values.yml +5 -3
  49. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-rpush-with-10B-values.yml +4 -3
  50. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-100B-values.yml +4 -3
  51. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-10B-values-pipeline-10.yml +5 -3
  52. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-10B-values.yml +4 -3
  53. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-1KiB-values.yml +4 -3
  54. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits-pipeline-10.yml +30 -3
  55. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits.yml +30 -3
  56. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-pipeline-10.yml +13 -3
  57. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements.yml +12 -3
  58. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values-pipeline-10.yml +5 -3
  59. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values.yml +5 -3
  60. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values-pipeline-10.yml +6 -3
  61. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values.yml +5 -3
  62. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values-pipeline-10.yml +4 -3
  63. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values.yml +4 -4
  64. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-10.yml +5 -3
  65. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-100-nokeyprefix.yml +5 -4
  66. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-100.yml +5 -3
  67. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-50.yml +5 -3
  68. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-500.yml +5 -3
  69. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values.yml +4 -3
  70. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-1KiB-values.yml +4 -3
  71. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-20KiB-values.yml +4 -4
  72. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-listpack-with-100-elements-double-score.yml +63 -3
  73. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-double-score.yml +7 -3
  74. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-int-score.yml +6 -3
  75. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-append-1-100B-pipeline-10.yml +12 -4
  76. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-append-1-100B.yml +11 -4
  77. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-decr.yml +10 -4
  78. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B-pipeline-10.yml +10 -4
  79. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B.yml +10 -4
  80. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-10.yml +10 -4
  81. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-100-nokeyprefix.yml +10 -4
  82. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-100.yml +10 -4
  83. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-50.yml +10 -4
  84. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-500.yml +10 -4
  85. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B.yml +10 -4
  86. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB-pipeline-10.yml +10 -4
  87. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB.yml +11 -5
  88. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-32B-pipeline-10.yml +9 -4
  89. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-32B.yml +9 -4
  90. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incr-pipeline-10.yml +4 -3
  91. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrby-pipeline-10.yml +4 -3
  92. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrby.yml +4 -3
  93. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrbyfloat-pipeline-10.yml +4 -3
  94. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrbyfloat.yml +4 -3
  95. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-int-encoding-strlen-pipeline-10.yml +9 -4
  96. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mget-1KiB.yml +10 -4
  97. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-expire-pipeline-10.yml +12 -4
  98. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-expire.yml +12 -4
  99. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-pipeline-10.yml +11 -4
  100. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B.yml +10 -4
  101. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-1KB-pipeline-10.yml +10 -4
  102. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-1KB.yml +9 -4
  103. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-32B-pipeline-10.yml +11 -4
  104. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-32B.yml +10 -4
  105. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-512B-pipeline-10.yml +11 -4
  106. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-512B.yml +10 -4
  107. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-with-expiration-240B-400_conns.yml +12 -4
  108. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-set-with-ex-100B-pipeline-10.yml +10 -3
  109. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setex-100B-pipeline-10.yml +10 -3
  110. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B-pipeline-10.yml +11 -4
  111. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B.yml +11 -4
  112. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-100M-bits-bitmap-bitcount.yml +12 -5
  113. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-1Billion-bits-bitmap-bitcount.yml +12 -5
  114. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-2-elements-geopos.yml +9 -4
  115. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-2-elements-geosearch-fromlonlat-withcoord.yml +10 -5
  116. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist-pipeline-10.yml +7 -3
  117. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist.yml +7 -3
  118. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash-pipeline-10.yml +6 -3
  119. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash.yml +4 -2
  120. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos-pipeline-10.yml +6 -3
  121. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos.yml +4 -2
  122. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-bybox.yml +7 -3
  123. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-pipeline-10.yml +7 -3
  124. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat.yml +7 -3
  125. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-1K-fields-hgetall-pipeline-10.yml +256 -4
  126. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-1K-fields-hgetall.yml +254 -3
  127. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-100B-values-cursor-count-1000.yml +260 -4
  128. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values-cursor-count-100.yml +260 -4
  129. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values.yml +259 -4
  130. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-50-fields-10B-values.yml +23 -4
  131. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10-elements-lrange-all-elements-pipeline-10.yml +8 -4
  132. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10-elements-lrange-all-elements.yml +6 -3
  133. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-int-7bit-uint-lrange-all-elements-pipeline-10.yml +15 -4
  134. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-int-lrange-all-elements-pipeline-10.yml +23 -4
  135. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-llen-pipeline-10.yml +23 -4
  136. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-lrange-all-elements-pipeline-10.yml +23 -4
  137. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-lrange-all-elements.yml +21 -3
  138. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-integer.yml +8 -2
  139. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-string-pipeline-10.yml +10 -3
  140. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-string.yml +8 -2
  141. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-linsert-lrem-integer.yml +12 -3
  142. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-linsert-lrem-string.yml +12 -3
  143. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lpos-integer.yml +8 -2
  144. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lpos-string.yml +8 -2
  145. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-1K-elements-lrange-all-elements-pipeline-10.yml +173 -4
  146. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-1K-elements-lrange-all-elements.yml +171 -3
  147. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-2K-elements-quicklist-lrange-all-elements-longs.yml +228 -3
  148. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-hash-1K-fields-with-5B-values.yml +254 -3
  149. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-zset-with-5-elements-parsing-float-score.yml +8 -3
  150. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-zset-with-5-elements-parsing-hexa-score.yml +8 -3
  151. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-pfadd-4KB-values-pipeline-10.yml +4 -3
  152. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers-pipeline-10.yml +8 -4
  153. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers.yml +6 -3
  154. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smismember.yml +9 -4
  155. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sismember-is-a-member.yml +24 -4
  156. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sismember-not-a-member.yml +24 -4
  157. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smembers.yml +21 -4
  158. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smismember.yml +24 -4
  159. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sscan.yml +21 -4
  160. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10M-elements-sismember-50pct-chance.yml +9 -3
  161. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10M-elements-srem-50pct-chance.yml +9 -3
  162. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-smembers.yml +171 -4
  163. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan-cursor-count-100.yml +173 -5
  164. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan.yml +171 -4
  165. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1M-elements-sismember-50pct-chance.yml +8 -3
  166. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-200K-elements-sadd-constant.yml +10 -3
  167. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-2M-elements-sadd-increasing.yml +4 -2
  168. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zincrby-1M-elements-pipeline-1.yml +9 -4
  169. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-100K-elements-pipeline-1.yml +9 -4
  170. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-10M-elements-pipeline-1.yml +10 -4
  171. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-1M-elements-pipeline-1.yml +9 -4
  172. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrem-5M-elements-pipeline-1.yml +16 -4
  173. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrangebyscore-256K-elements-pipeline-1.yml +10 -4
  174. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrangebyscore-256K-elements-pipeline-10.yml +10 -4
  175. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrank-1M-elements-pipeline-1.yml +9 -4
  176. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements-long-scores.yml +12 -4
  177. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements.yml +11 -4
  178. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrange-all-elements.yml +37 -4
  179. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements-long-scores.yml +37 -4
  180. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements.yml +37 -4
  181. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zscan.yml +35 -3
  182. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zrange-all-elements.yml +293 -4
  183. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zscan.yml +291 -3
  184. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zcard-pipeline-10.yml +7 -3
  185. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zremrangebyscore-pipeline-10.yml +10 -4
  186. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zrevrange-5-elements.yml +8 -3
  187. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zrevrange-withscores-5-elements-pipeline-10.yml +10 -4
  188. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zscore-pipeline-10.yml +9 -4
  189. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-600K-elements-zrangestore-1K-elements.yml +10 -4
  190. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-600K-elements-zrangestore-300K-elements.yml +12 -4
  191. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-listpack-zrank-100-elements-pipeline-1.yml +21 -4
  192. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-eval-hset-expire.yml +6 -3
  193. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-evalsha-hset-expire.yml +9 -7
  194. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sdiff.yml +26 -4
  195. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sinter.yml +26 -4
  196. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sunion.yml +26 -4
  197. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries-pipeline-10.yml +17 -13
  198. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries.yml +17 -13
  199. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-zset-300-elements-skiplist-encoded-zunion.yml +405 -5
  200. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-zset-300-elements-skiplist-encoded-zunionstore.yml +405 -5
  201. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-load-string-with-512B-values-pipeline-10.yml +5 -3
  202. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-load-string-with-512B-values.yml +5 -3
  203. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-400_conns.yml +20 -14
  204. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-40_conns.yml +20 -14
  205. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-2000_conns.yml +11 -4
  206. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-400_conns.yml +11 -4
  207. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-40_conns.yml +11 -4
  208. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-400_conns.yml +20 -14
  209. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-2000_conns.yml +11 -4
  210. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-400_conns.yml +11 -4
  211. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-5200_conns.yml +11 -4
  212. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-50-50-with-512B-values-with-expiration-pipeline-10-400_conns.yml +9 -4
  213. redis_benchmarks_specification/test-suites/memtier_benchmark-connection-hello-pipeline-10.yml +4 -3
  214. redis_benchmarks_specification/test-suites/memtier_benchmark-connection-hello.yml +4 -3
  215. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-connection-ping-pipeline-10.yml +2 -2
  216. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-100-subscribers.yml +19 -15
  217. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-1000-subscribers.yml +19 -15
  218. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-5000-subscribers.yml +19 -15
  219. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns.yml +19 -15
  220. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-publish-1K-channels-10B-no-subscribers.yml +4 -3
  221. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-server-time-pipeline-10.yml +2 -2
  222. redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml +0 -3
  223. {redis_benchmarks_specification-0.1.291.dist-info → redis_benchmarks_specification-0.1.292.dist-info}/METADATA +1 -1
  224. {redis_benchmarks_specification-0.1.291.dist-info → redis_benchmarks_specification-0.1.292.dist-info}/RECORD +227 -227
  225. {redis_benchmarks_specification-0.1.291.dist-info → redis_benchmarks_specification-0.1.292.dist-info}/LICENSE +0 -0
  226. {redis_benchmarks_specification-0.1.291.dist-info → redis_benchmarks_specification-0.1.292.dist-info}/WHEEL +0 -0
  227. {redis_benchmarks_specification-0.1.291.dist-info → redis_benchmarks_specification-0.1.292.dist-info}/entry_points.txt +0 -0
@@ -4,6 +4,7 @@ import logging
4
4
  import math
5
5
  import os
6
6
  import shutil
7
+ import signal
7
8
  import subprocess
8
9
  import sys
9
10
  import tempfile
@@ -66,6 +67,23 @@ from redis_benchmarks_specification.__runner__.args import create_client_runner_
66
67
  from redis_benchmarks_specification.__runner__.remote_profiling import RemoteProfiler
67
68
 
68
69
 
70
+ # Global flag to track if user wants to exit
71
+ _exit_requested = False
72
+
73
+
74
+ def signal_handler(signum, frame):
75
+ """Handle Ctrl+C signal to exit gracefully"""
76
+ global _exit_requested
77
+ if not _exit_requested:
78
+ _exit_requested = True
79
+ logging.info("Ctrl+C detected. Exiting after current test completes...")
80
+ print("\nCtrl+C detected. Exiting after current test completes...")
81
+ else:
82
+ logging.info("Ctrl+C detected again. Force exiting...")
83
+ print("\nForce exiting...")
84
+ sys.exit(1)
85
+
86
+
69
87
  def run_local_command_with_timeout(command_str, timeout_seconds, description="command"):
70
88
  """
71
89
  Run a local command with timeout support.
@@ -203,6 +221,176 @@ def extract_expected_benchmark_duration(
203
221
  return 30
204
222
 
205
223
 
224
+ def detect_object_encoding(redis_conn, dbconfig):
225
+ """
226
+ Detect object encoding by scanning 1% of the dataset.
227
+
228
+ Args:
229
+ redis_conn: Redis connection
230
+ dbconfig: Database configuration containing keyspace info
231
+
232
+ Returns:
233
+ Dict with encoding information
234
+ """
235
+ try:
236
+ # Get total key count
237
+ total_keys = redis_conn.dbsize()
238
+ logging.debug(f"Object encoding detection: DBSIZE reports {total_keys} keys")
239
+
240
+ if total_keys == 0:
241
+ logging.warning("No keys found in database for encoding detection")
242
+ return {
243
+ "encoding": "unknown",
244
+ "confidence": 0.0,
245
+ "sample_size": 0,
246
+ "total_keys": 0,
247
+ "encoding_distribution": {}
248
+ }
249
+
250
+ # Determine scanning strategy based on dataset size
251
+ if total_keys <= 1000:
252
+ # For small datasets, scan all keys for complete accuracy
253
+ sample_size = total_keys
254
+ scan_all_keys = True
255
+ logging.info(f"Scanning all {total_keys} keys (small dataset - complete analysis)")
256
+ else:
257
+ # For large datasets, sample 1% (minimum 10, maximum 1000)
258
+ sample_size = max(10, min(1000, int(total_keys * 0.01)))
259
+ scan_all_keys = False
260
+ logging.info(f"Sampling {sample_size} keys out of {total_keys} total keys ({(sample_size/total_keys)*100:.2f}%)")
261
+
262
+ # Use SCAN to get keys
263
+ encoding_counts = {}
264
+ scanned_keys = []
265
+ cursor = 0
266
+
267
+ if scan_all_keys:
268
+ # Scan all keys in the database
269
+ while True:
270
+ cursor, keys = redis_conn.scan(cursor=cursor, count=100)
271
+ scanned_keys.extend(keys)
272
+
273
+ # Break if we've completed a full scan
274
+ if cursor == 0:
275
+ break
276
+ else:
277
+ # Sample keys until we reach our target sample size
278
+ while len(scanned_keys) < sample_size:
279
+ cursor, keys = redis_conn.scan(cursor=cursor, count=min(100, sample_size - len(scanned_keys)))
280
+ scanned_keys.extend(keys)
281
+
282
+ # Break if we've completed a full scan
283
+ if cursor == 0:
284
+ break
285
+
286
+ # Limit to our target sample size
287
+ scanned_keys = scanned_keys[:sample_size]
288
+
289
+ logging.debug(f"SCAN completed: found {len(scanned_keys)} keys, cursor ended at {cursor}")
290
+
291
+ # If SCAN didn't find any keys but we know there are keys, try KEYS command as fallback
292
+ if len(scanned_keys) == 0 and total_keys > 0:
293
+ logging.warning(f"SCAN found no keys but DBSIZE reports {total_keys} keys. Trying KEYS fallback.")
294
+ try:
295
+ # Use KEYS * as fallback (only for small datasets to avoid blocking)
296
+ if total_keys <= 1000:
297
+ all_keys = redis_conn.keys('*')
298
+ scanned_keys = all_keys[:sample_size] if not scan_all_keys else all_keys
299
+ logging.info(f"KEYS fallback found {len(scanned_keys)} keys")
300
+ else:
301
+ logging.error(f"Cannot use KEYS fallback for large dataset ({total_keys} keys)")
302
+ except Exception as e:
303
+ logging.error(f"KEYS fallback failed: {e}")
304
+
305
+ # Final check: if we still have no keys, return early
306
+ if len(scanned_keys) == 0:
307
+ logging.error(f"No keys found for encoding detection despite DBSIZE={total_keys}")
308
+ return {
309
+ "encoding": "unknown",
310
+ "confidence": 0.0,
311
+ "sample_size": 0,
312
+ "total_keys": total_keys,
313
+ "encoding_distribution": {},
314
+ "is_complete_scan": scan_all_keys,
315
+ "error": "No keys found by SCAN or KEYS commands"
316
+ }
317
+
318
+ # Get encoding for each sampled key
319
+ successful_encodings = 0
320
+ for i, key in enumerate(scanned_keys):
321
+ try:
322
+ # Use the redis-py object_encoding method instead of raw command
323
+ encoding = redis_conn.object("ENCODING", key)
324
+ if isinstance(encoding, bytes):
325
+ encoding = encoding.decode('utf-8')
326
+ elif encoding is None:
327
+ # Key might have expired or been deleted
328
+ logging.debug(f"Key '{key}' returned None encoding (key may have expired)")
329
+ continue
330
+
331
+ encoding_counts[encoding] = encoding_counts.get(encoding, 0) + 1
332
+ successful_encodings += 1
333
+
334
+ # Log first few keys for debugging
335
+ if i < 3:
336
+ logging.debug(f"Key '{key}' has encoding '{encoding}'")
337
+
338
+ except Exception as e:
339
+ logging.warning(f"Failed to get encoding for key {key}: {e}")
340
+ continue
341
+
342
+ logging.debug(f"Successfully got encoding for {successful_encodings}/{len(scanned_keys)} keys")
343
+
344
+ if not encoding_counts:
345
+ logging.warning(f"No object encodings detected! Scanned {len(scanned_keys)} keys, successful encodings: {successful_encodings}")
346
+ return {
347
+ "encoding": "unknown",
348
+ "confidence": 0.0,
349
+ "sample_size": 0,
350
+ "total_keys": total_keys,
351
+ "encoding_distribution": {},
352
+ "is_complete_scan": scan_all_keys
353
+ }
354
+
355
+ # Determine dominant encoding
356
+ total_sampled = sum(encoding_counts.values())
357
+ dominant_encoding = max(encoding_counts.items(), key=lambda x: x[1])
358
+ confidence = dominant_encoding[1] / total_sampled
359
+
360
+ # Calculate encoding distribution percentages
361
+ encoding_distribution = {
362
+ enc: (count / total_sampled) * 100
363
+ for enc, count in encoding_counts.items()
364
+ }
365
+
366
+ result = {
367
+ "encoding": dominant_encoding[0],
368
+ "confidence": confidence,
369
+ "sample_size": total_sampled,
370
+ "total_keys": total_keys,
371
+ "encoding_distribution": encoding_distribution,
372
+ "is_complete_scan": scan_all_keys
373
+ }
374
+
375
+ scan_type = "complete scan" if scan_all_keys else "sample"
376
+ logging.info(f"Object encoding analysis ({scan_type}): {dominant_encoding[0]} ({confidence*100:.1f}% confidence)")
377
+ if len(encoding_counts) > 1:
378
+ logging.info(f"Encoding distribution: {encoding_distribution}")
379
+
380
+ return result
381
+
382
+ except Exception as e:
383
+ logging.error(f"Failed to detect object encoding: {e}")
384
+ return {
385
+ "encoding": "error",
386
+ "confidence": 0.0,
387
+ "sample_size": 0,
388
+ "total_keys": 0,
389
+ "encoding_distribution": {},
390
+ "error": str(e)
391
+ }
392
+
393
+
206
394
  def run_multiple_clients(
207
395
  benchmark_config,
208
396
  docker_client,
@@ -631,6 +819,9 @@ def run_multiple_clients(
631
819
 
632
820
 
633
821
  def main():
822
+ # Register signal handler for graceful exit on Ctrl+C
823
+ signal.signal(signal.SIGINT, signal_handler)
824
+
634
825
  _, _, project_version = populate_with_poetry_data()
635
826
  project_name_suffix = "redis-benchmarks-spec-client-runner"
636
827
  project_name = f"{project_name_suffix} (solely client)"
@@ -1087,7 +1278,10 @@ def process_self_contained_coordinator_stream(
1087
1278
  total_test_suite_runs = 0
1088
1279
  dry_run_count = 0
1089
1280
  dry_run_tests = [] # Track test names for dry run output
1281
+ memory_results = [] # Track memory results for memory comparison mode
1282
+ loaded_datasets = set() # Track datasets that have been loaded (for memory comparison mode)
1090
1283
  dry_run = args.dry_run
1284
+ memory_comparison_only = args.memory_comparison_only
1091
1285
  dry_run_include_preload = args.dry_run_include_preload
1092
1286
  defaults_filename = args.defaults_filename
1093
1287
  override_test_runs = args.override_test_runs
@@ -1100,7 +1294,49 @@ def process_self_contained_coordinator_stream(
1100
1294
  _,
1101
1295
  ) = get_defaults(defaults_filename)
1102
1296
 
1297
+ # For memory comparison mode, analyze datasets before starting
1298
+ if memory_comparison_only:
1299
+ unique_datasets = set()
1300
+ total_tests_with_datasets = 0
1301
+
1302
+ logging.info("Analyzing datasets for memory comparison mode...")
1303
+ for test_file in testsuite_spec_files:
1304
+ if defaults_filename in test_file:
1305
+ continue
1306
+ try:
1307
+ with open(test_file, "r") as stream:
1308
+ benchmark_config = yaml.safe_load(stream)
1309
+
1310
+ if "dbconfig" in benchmark_config:
1311
+ # Skip load tests (keyspacelen = 0) in memory comparison mode
1312
+ keyspacelen = benchmark_config["dbconfig"].get("check", {}).get("keyspacelen", None)
1313
+ if keyspacelen is not None and keyspacelen == 0:
1314
+ logging.debug(f"Skipping load test {test_file} (keyspacelen=0)")
1315
+ continue
1316
+
1317
+ dataset_name = benchmark_config["dbconfig"].get("dataset_name")
1318
+ if dataset_name:
1319
+ unique_datasets.add(dataset_name)
1320
+ total_tests_with_datasets += 1
1321
+
1322
+ except Exception as e:
1323
+ logging.warning(f"Error analyzing {test_file}: {e}")
1324
+
1325
+ logging.info(f"Memory comparison mode analysis:")
1326
+ logging.info(f" Total tests with datasets: {total_tests_with_datasets}")
1327
+ logging.info(f" Unique datasets to load: {len(unique_datasets)}")
1328
+ logging.info(f" Dataset ingestion savings: {total_tests_with_datasets - len(unique_datasets)} skipped loads")
1329
+ logging.info(f" Load tests skipped: Tests with keyspacelen=0 are automatically excluded")
1330
+
1331
+ if len(unique_datasets) > 0:
1332
+ logging.info(f" Unique datasets: {', '.join(sorted(unique_datasets))}")
1333
+
1103
1334
  for test_file in tqdm.tqdm(testsuite_spec_files):
1335
+ # Check if user requested exit via Ctrl+C
1336
+ if _exit_requested:
1337
+ logging.info("Exit requested by user. Stopping test execution.")
1338
+ break
1339
+
1104
1340
  if defaults_filename in test_file:
1105
1341
  continue
1106
1342
  client_containers = []
@@ -1110,6 +1346,11 @@ def process_self_contained_coordinator_stream(
1110
1346
  None, None, stream, ""
1111
1347
  )
1112
1348
 
1349
+ # Check if user requested exit via Ctrl+C
1350
+ if _exit_requested:
1351
+ logging.info(f"Exit requested by user. Skipping test {test_name}.")
1352
+ break
1353
+
1113
1354
  if tls_enabled:
1114
1355
  test_name = test_name + "-tls"
1115
1356
  logging.info(
@@ -1188,28 +1429,17 @@ def process_self_contained_coordinator_stream(
1188
1429
  git_version = detected_info["github_version"]
1189
1430
  logging.info(f"Auto-detected github_version: {git_version}")
1190
1431
 
1191
- # Auto-detect git hash from server info if available
1192
- if git_hash == "NA":
1193
- try:
1194
- server_info = r.info("server")
1195
- redis_git_sha1 = server_info.get("redis_git_sha1", "")
1196
- redis_build_id = server_info.get("redis_build_id", "")
1197
-
1198
- # Use git_sha1 if available and not empty/zero
1199
- if redis_git_sha1 and redis_git_sha1 not in ("", "0", "00000000"):
1200
- git_hash = redis_git_sha1
1201
- logging.info(f"Auto-detected git_hash from redis_git_sha1: {git_hash}")
1202
- # Fallback to build_id if git_sha1 is not available
1203
- elif redis_build_id and redis_build_id not in ("", "0"):
1204
- git_hash = redis_build_id
1205
- logging.info(f"Auto-detected git_hash from redis_build_id: {git_hash}")
1206
- except Exception as e:
1207
- logging.warning(f"Failed to auto-detect git hash: {e}")
1432
+ # Auto-detect git hash if it's the default value
1433
+ if git_hash == "NA" and detected_info["github_hash"] != "unknown":
1434
+ git_hash = detected_info["github_hash"]
1435
+ logging.info(f"Auto-detected git_hash: {git_hash}")
1208
1436
 
1209
1437
  # Update tf_github_org and tf_github_repo with detected values
1210
1438
  tf_github_org = github_org
1211
1439
  tf_github_repo = github_repo
1212
1440
  redis_conns = [r]
1441
+
1442
+
1213
1443
  if oss_cluster_api_enabled:
1214
1444
  redis_conns = []
1215
1445
  logging.info("updating redis connections from cluster slots")
@@ -1287,6 +1517,15 @@ def process_self_contained_coordinator_stream(
1287
1517
  for conn in redis_conns:
1288
1518
  conn.flushall()
1289
1519
 
1520
+ # Send MEMORY PURGE after FLUSHALL for memory comparison mode
1521
+ if memory_comparison_only:
1522
+ try:
1523
+ logging.info("Sending MEMORY PURGE after FLUSHALL at test start")
1524
+ for conn in redis_conns:
1525
+ conn.execute_command("MEMORY", "PURGE")
1526
+ except Exception as e:
1527
+ logging.warning(f"MEMORY PURGE failed after FLUSHALL at test start: {e}")
1528
+
1290
1529
  benchmark_required_memory = get_benchmark_required_memory(
1291
1530
  benchmark_config
1292
1531
  )
@@ -1437,6 +1676,20 @@ def process_self_contained_coordinator_stream(
1437
1676
  )
1438
1677
  continue
1439
1678
 
1679
+ # For memory comparison mode, only run tests with dbconfig
1680
+ if memory_comparison_only and "dbconfig" not in benchmark_config:
1681
+ logging.warning(
1682
+ "Skipping test {} in memory comparison mode as it does not contain dbconfig".format(
1683
+ test_name
1684
+ )
1685
+ )
1686
+ delete_temporary_files(
1687
+ temporary_dir_client=temporary_dir_client,
1688
+ full_result_path=None,
1689
+ benchmark_tool_global=benchmark_tool_global,
1690
+ )
1691
+ continue
1692
+
1440
1693
  if dry_run is True:
1441
1694
  dry_run_count = dry_run_count + 1
1442
1695
  dry_run_tests.append(test_name)
@@ -1448,46 +1701,68 @@ def process_self_contained_coordinator_stream(
1448
1701
  continue
1449
1702
  if "dbconfig" in benchmark_config:
1450
1703
  if "preload_tool" in benchmark_config["dbconfig"]:
1451
- # Get timeout buffer for preload
1452
- buffer_timeout = getattr(
1453
- args,
1454
- "timeout_buffer",
1455
- getattr(args, "container_timeout_buffer", 60),
1456
- )
1457
-
1458
- res = data_prepopulation_step(
1459
- benchmark_config,
1460
- benchmark_tool_workdir,
1461
- client_cpuset_cpus,
1462
- docker_client,
1463
- git_hash,
1464
- port,
1465
- temporary_dir_client,
1466
- test_name,
1467
- host,
1468
- tls_enabled,
1469
- tls_skip_verify,
1470
- test_tls_cert,
1471
- test_tls_key,
1472
- test_tls_cacert,
1473
- resp_version,
1474
- args.benchmark_local_install,
1475
- password,
1476
- oss_cluster_api_enabled,
1477
- unix_socket,
1478
- buffer_timeout,
1479
- args,
1480
- )
1481
- if res is False:
1482
- logging.warning(
1483
- "Skipping this test given preload result was false"
1704
+ # Check if this dataset has already been loaded (for memory comparison mode)
1705
+ dataset_name = benchmark_config["dbconfig"].get("dataset_name")
1706
+ skip_preload = False
1707
+
1708
+ if memory_comparison_only and dataset_name:
1709
+ if dataset_name in loaded_datasets:
1710
+ logging.info(f"Skipping preload for dataset '{dataset_name}' - already loaded")
1711
+ skip_preload = True
1712
+ else:
1713
+ logging.info(f"Loading dataset '{dataset_name}' for the first time")
1714
+ loaded_datasets.add(dataset_name)
1715
+
1716
+ if not skip_preload:
1717
+ # Get timeout buffer for preload
1718
+ buffer_timeout = getattr(
1719
+ args,
1720
+ "timeout_buffer",
1721
+ getattr(args, "container_timeout_buffer", 60),
1484
1722
  )
1485
- delete_temporary_files(
1486
- temporary_dir_client=temporary_dir_client,
1487
- full_result_path=None,
1488
- benchmark_tool_global=benchmark_tool_global,
1723
+
1724
+ res = data_prepopulation_step(
1725
+ benchmark_config,
1726
+ benchmark_tool_workdir,
1727
+ client_cpuset_cpus,
1728
+ docker_client,
1729
+ git_hash,
1730
+ port,
1731
+ temporary_dir_client,
1732
+ test_name,
1733
+ host,
1734
+ tls_enabled,
1735
+ tls_skip_verify,
1736
+ test_tls_cert,
1737
+ test_tls_key,
1738
+ test_tls_cacert,
1739
+ resp_version,
1740
+ args.benchmark_local_install,
1741
+ password,
1742
+ oss_cluster_api_enabled,
1743
+ unix_socket,
1744
+ buffer_timeout,
1745
+ args,
1489
1746
  )
1490
- continue
1747
+ if res is False:
1748
+ logging.warning(
1749
+ "Skipping this test given preload result was false"
1750
+ )
1751
+ delete_temporary_files(
1752
+ temporary_dir_client=temporary_dir_client,
1753
+ full_result_path=None,
1754
+ benchmark_tool_global=benchmark_tool_global,
1755
+ )
1756
+ continue
1757
+ # Send MEMORY PURGE before preload for memory comparison mode (if FLUSHALL wasn't already done)
1758
+ if memory_comparison_only and not args.flushall_on_every_test_start:
1759
+ try:
1760
+ logging.info("Sending MEMORY PURGE before preload for memory comparison mode")
1761
+ for conn in redis_conns:
1762
+ conn.execute_command("MEMORY", "PURGE")
1763
+ except Exception as e:
1764
+ logging.warning(f"MEMORY PURGE failed before preload: {e}")
1765
+
1491
1766
  execute_init_commands(
1492
1767
  benchmark_config, r, dbconfig_keyname="dbconfig"
1493
1768
  )
@@ -1506,6 +1781,169 @@ def process_self_contained_coordinator_stream(
1506
1781
  redis_conns,
1507
1782
  )
1508
1783
 
1784
+ # For memory comparison mode, collect memory stats after preload and skip client benchmark
1785
+ if memory_comparison_only:
1786
+ # Skip load tests (keyspacelen = 0) in memory comparison mode
1787
+ keyspacelen = benchmark_config.get("dbconfig", {}).get("check", {}).get("keyspacelen", None)
1788
+ if keyspacelen is not None and keyspacelen == 0:
1789
+ logging.info(f"Skipping load test {test_name} in memory comparison mode (keyspacelen=0)")
1790
+ delete_temporary_files(
1791
+ temporary_dir_client=temporary_dir_client,
1792
+ full_result_path=None,
1793
+ benchmark_tool_global=benchmark_tool_global,
1794
+ )
1795
+ continue
1796
+
1797
+ # Handle dry run for memory comparison mode
1798
+ if dry_run:
1799
+ dry_run_count = dry_run_count + 1
1800
+ dry_run_tests.append(test_name)
1801
+ logging.info(f"[DRY RUN] Would collect memory stats for test {test_name}")
1802
+
1803
+ # Add dataset info to dry run output
1804
+ dataset_name = benchmark_config.get("dbconfig", {}).get("dataset_name")
1805
+ if dataset_name:
1806
+ logging.info(f"[DRY RUN] Dataset: {dataset_name}")
1807
+
1808
+ delete_temporary_files(
1809
+ temporary_dir_client=temporary_dir_client,
1810
+ full_result_path=None,
1811
+ benchmark_tool_global=benchmark_tool_global,
1812
+ )
1813
+ continue
1814
+
1815
+ logging.info(f"Collecting memory stats for test {test_name}")
1816
+ try:
1817
+ # Use raw command to avoid parsing issues with some Redis versions
1818
+ memory_stats_raw = r.execute_command("MEMORY", "STATS")
1819
+ # Convert list response to dict
1820
+ memory_stats = {}
1821
+ for i in range(0, len(memory_stats_raw), 2):
1822
+ key = memory_stats_raw[i].decode() if isinstance(memory_stats_raw[i], bytes) else str(memory_stats_raw[i])
1823
+ value = memory_stats_raw[i + 1]
1824
+ if isinstance(value, bytes):
1825
+ try:
1826
+ value = float(value.decode())
1827
+ except ValueError:
1828
+ value = value.decode()
1829
+ memory_stats[key] = value
1830
+ except Exception as e:
1831
+ logging.error(f"Failed to collect memory stats: {e}")
1832
+ # Fallback to basic memory info
1833
+ info = r.info("memory")
1834
+ memory_stats = {
1835
+ "total.allocated": info.get("used_memory", 0),
1836
+ "dataset.bytes": info.get("used_memory_dataset", 0),
1837
+ "keys.count": r.dbsize(),
1838
+ "keys.bytes-per-key": 0,
1839
+ "dataset.percentage": 0,
1840
+ "overhead.total": 0,
1841
+ "fragmentation": info.get("mem_fragmentation_ratio", 1.0),
1842
+ "fragmentation.bytes": 0,
1843
+ "allocator.allocated": info.get("used_memory", 0),
1844
+ "allocator.resident": info.get("used_memory_rss", 0),
1845
+ "allocator-fragmentation.ratio": 1.0,
1846
+ }
1847
+
1848
+ # Detect object encoding by scanning 1% of the dataset
1849
+ object_encoding_info = detect_object_encoding(r, benchmark_config.get("dbconfig", {}))
1850
+ logging.info(f"Object encoding detection: {object_encoding_info.get('encoding', 'unknown')} "
1851
+ f"({object_encoding_info.get('confidence', 0)*100:.1f}% confidence)")
1852
+
1853
+ # Extract key memory metrics
1854
+ memory_result = {
1855
+ "test_name": test_name,
1856
+ "total_allocated": memory_stats.get("total.allocated", 0),
1857
+ "dataset_bytes": memory_stats.get("dataset.bytes", 0),
1858
+ "keys_count": memory_stats.get("keys.count", 0),
1859
+ "keys_bytes_per_key": memory_stats.get("keys.bytes-per-key", 0),
1860
+ "dataset_percentage": memory_stats.get("dataset.percentage", 0),
1861
+ "overhead_total": memory_stats.get("overhead.total", 0),
1862
+ "fragmentation": memory_stats.get("fragmentation", 0),
1863
+ "fragmentation_bytes": memory_stats.get("fragmentation.bytes", 0),
1864
+ "allocator_allocated": memory_stats.get("allocator.allocated", 0),
1865
+ "allocator_resident": memory_stats.get("allocator.resident", 0),
1866
+ "allocator_fragmentation_ratio": memory_stats.get("allocator-fragmentation.ratio", 0),
1867
+ # Object encoding information
1868
+ "object_encoding": object_encoding_info.get("encoding", "unknown"),
1869
+ "encoding_confidence": object_encoding_info.get("confidence", 0.0),
1870
+ "encoding_sample_size": object_encoding_info.get("sample_size", 0),
1871
+ "encoding_distribution": object_encoding_info.get("encoding_distribution", {}),
1872
+ "encoding_is_complete_scan": object_encoding_info.get("is_complete_scan", False),
1873
+ }
1874
+ memory_results.append(memory_result)
1875
+
1876
+ # Push memory metrics to datasink
1877
+ if datasink_push_results_redistimeseries:
1878
+ memory_metrics_dict = {
1879
+ "memory.total_allocated": memory_result["total_allocated"],
1880
+ "memory.dataset_bytes": memory_result["dataset_bytes"],
1881
+ "memory.keys_count": memory_result["keys_count"],
1882
+ "memory.keys_bytes_per_key": memory_result["keys_bytes_per_key"],
1883
+ "memory.dataset_percentage": memory_result["dataset_percentage"],
1884
+ "memory.overhead_total": memory_result["overhead_total"],
1885
+ "memory.fragmentation": memory_result["fragmentation"],
1886
+ "memory.fragmentation_bytes": memory_result["fragmentation_bytes"],
1887
+ "memory.allocator_allocated": memory_result["allocator_allocated"],
1888
+ "memory.allocator_resident": memory_result["allocator_resident"],
1889
+ "memory.allocator_fragmentation_ratio": memory_result["allocator_fragmentation_ratio"],
1890
+ "memory.encoding_confidence": memory_result["encoding_confidence"],
1891
+ "memory.encoding_sample_size": memory_result["encoding_sample_size"],
1892
+ }
1893
+
1894
+ # Add object encoding to metadata
1895
+ metadata["object_encoding"] = memory_result["object_encoding"]
1896
+ metadata["encoding_confidence"] = f"{memory_result['encoding_confidence']:.3f}"
1897
+ metadata["encoding_sample_size"] = str(memory_result["encoding_sample_size"])
1898
+ metadata["encoding_scan_type"] = "complete" if memory_result.get("encoding_is_complete_scan", False) else "sample"
1899
+
1900
+ # Add encoding distribution to metadata if multiple encodings found
1901
+ if len(memory_result["encoding_distribution"]) > 1:
1902
+ for enc, percentage in memory_result["encoding_distribution"].items():
1903
+ metadata[f"encoding_dist_{enc}"] = f"{percentage:.1f}%"
1904
+
1905
+ exporter_datasink_common(
1906
+ benchmark_config,
1907
+ 0, # benchmark_duration_seconds = 0 for memory only
1908
+ build_variant_name,
1909
+ datapoint_time_ms,
1910
+ dataset_load_duration_seconds,
1911
+ datasink_conn,
1912
+ datasink_push_results_redistimeseries,
1913
+ git_branch,
1914
+ git_version,
1915
+ metadata,
1916
+ redis_conns,
1917
+ memory_metrics_dict,
1918
+ running_platform,
1919
+ args.deployment_name,
1920
+ args.deployment_type,
1921
+ test_name,
1922
+ tf_github_org,
1923
+ tf_github_repo,
1924
+ tf_triggering_env,
1925
+ topology_spec_name,
1926
+ default_metrics,
1927
+ git_hash,
1928
+ )
1929
+
1930
+ # Send MEMORY PURGE after memory comparison (if FLUSHALL at test end is not enabled)
1931
+ if not args.flushall_on_every_test_end:
1932
+ try:
1933
+ logging.info("Sending MEMORY PURGE after memory comparison")
1934
+ for conn in redis_conns:
1935
+ conn.execute_command("MEMORY", "PURGE")
1936
+ except Exception as e:
1937
+ logging.warning(f"MEMORY PURGE failed after memory comparison: {e}")
1938
+
1939
+ logging.info(f"Memory comparison completed for test {test_name}")
1940
+ delete_temporary_files(
1941
+ temporary_dir_client=temporary_dir_client,
1942
+ full_result_path=None,
1943
+ benchmark_tool_global=benchmark_tool_global,
1944
+ )
1945
+ continue
1946
+
1509
1947
  if dry_run_include_preload is True:
1510
1948
  dry_run_count = dry_run_count + 1
1511
1949
  dry_run_tests.append(test_name)
@@ -2070,6 +2508,19 @@ def process_self_contained_coordinator_stream(
2070
2508
  for r in redis_conns:
2071
2509
  r.flushall()
2072
2510
 
2511
+ # Send MEMORY PURGE after FLUSHALL for memory comparison mode
2512
+ if memory_comparison_only:
2513
+ try:
2514
+ logging.info("Sending MEMORY PURGE after FLUSHALL at test end")
2515
+ for r in redis_conns:
2516
+ r.execute_command("MEMORY", "PURGE")
2517
+ except Exception as e:
2518
+ logging.warning(f"MEMORY PURGE failed after FLUSHALL at test end: {e}")
2519
+
2520
+ except KeyboardInterrupt:
2521
+ logging.info("KeyboardInterrupt caught. Exiting...")
2522
+ print("\nKeyboardInterrupt caught. Exiting...")
2523
+ break
2073
2524
  except:
2074
2525
  logging.critical(
2075
2526
  "Some unexpected exception was caught "
@@ -2080,6 +2531,11 @@ def process_self_contained_coordinator_stream(
2080
2531
  traceback.print_exc(file=sys.stdout)
2081
2532
  print("-" * 60)
2082
2533
  test_result = False
2534
+
2535
+ # Check if user requested exit via Ctrl+C
2536
+ if _exit_requested:
2537
+ logging.info("Exit requested by user. Stopping after exception.")
2538
+ break
2083
2539
  # tear-down
2084
2540
  logging.info("Tearing down setup")
2085
2541
  for container in client_containers:
@@ -2114,6 +2570,11 @@ def process_self_contained_coordinator_stream(
2114
2570
  benchmark_tool_global=benchmark_tool_global,
2115
2571
  )
2116
2572
 
2573
+ # Check if user requested exit via Ctrl+C
2574
+ if _exit_requested:
2575
+ logging.info("Exit requested by user. Printing summary before exit.")
2576
+ print("\nExecution stopped by user request. Printing summary of completed tests...")
2577
+
2117
2578
  # Print Redis server information section before results
2118
2579
  if len(results_matrix) > 0:
2119
2580
  # Get redis_conns from the first test context (we need to pass it somehow)
@@ -2136,6 +2597,10 @@ def process_self_contained_coordinator_stream(
2136
2597
  )
2137
2598
  writer.write_table()
2138
2599
 
2600
+ # Add note if execution was stopped early
2601
+ if _exit_requested:
2602
+ print("\n(Note: Execution was stopped early by user request - showing results for completed tests only)")
2603
+
2139
2604
  if client_aggregated_results_folder != "":
2140
2605
  os.makedirs(client_aggregated_results_folder, exist_ok=True)
2141
2606
  dest_fpath = f"{client_aggregated_results_folder}/aggregate-results.csv"
@@ -2148,15 +2613,112 @@ def process_self_contained_coordinator_stream(
2148
2613
  )
2149
2614
  csv_writer.dump(dest_fpath)
2150
2615
 
2616
+ # Print memory comparison summary if in memory comparison mode
2617
+ if memory_comparison_only and memory_results:
2618
+ logging.info("\n" + "="*80)
2619
+ logging.info("MEMORY COMPARISON SUMMARY")
2620
+ logging.info("="*80)
2621
+ logging.info(f"Total unique datasets loaded: {len(loaded_datasets)}")
2622
+ if loaded_datasets:
2623
+ logging.info(f"Datasets: {', '.join(sorted(loaded_datasets))}")
2624
+ logging.info("="*80)
2625
+
2626
+ # Create memory summary table
2627
+ memory_headers = [
2628
+ "Test Name",
2629
+ "Total Allocated",
2630
+ "Dataset Bytes",
2631
+ "Keys Count",
2632
+ "Bytes/Key",
2633
+ "Dataset %",
2634
+ "Overhead",
2635
+ "Fragmentation",
2636
+ "Alloc Fragmentation",
2637
+ "Object Encoding",
2638
+ "Encoding Confidence",
2639
+ "Scan Type"
2640
+ ]
2641
+
2642
+ memory_matrix = []
2643
+ for result in memory_results:
2644
+ # Convert bytes to human readable format
2645
+ total_mb = result["total_allocated"] / (1024 * 1024)
2646
+ dataset_mb = result["dataset_bytes"] / (1024 * 1024)
2647
+ overhead_mb = result["overhead_total"] / (1024 * 1024)
2648
+
2649
+ memory_matrix.append([
2650
+ result["test_name"],
2651
+ f"{total_mb:.1f}MB",
2652
+ f"{dataset_mb:.1f}MB",
2653
+ f"{result['keys_count']:,}",
2654
+ f"{result['keys_bytes_per_key']:.0f}B",
2655
+ f"{result['dataset_percentage']:.1f}%",
2656
+ f"{overhead_mb:.1f}MB",
2657
+ f"{result['fragmentation']:.2f}",
2658
+ f"{result['allocator_fragmentation_ratio']:.3f}",
2659
+ result.get("object_encoding", "unknown"),
2660
+ f"{result.get('encoding_confidence', 0.0)*100:.1f}%",
2661
+ "complete" if result.get("encoding_is_complete_scan", False) else "sample"
2662
+ ])
2663
+
2664
+ memory_writer = MarkdownTableWriter(
2665
+ table_name="Memory Usage Summary",
2666
+ headers=memory_headers,
2667
+ value_matrix=memory_matrix,
2668
+ )
2669
+ memory_writer.write_table()
2670
+
2151
2671
  if dry_run is True:
2672
+ mode_description = "memory comparison" if memory_comparison_only else "benchmark"
2152
2673
  logging.info(
2153
- "Number of tests that would have been run: {}".format(dry_run_count)
2674
+ "Number of tests that would have been run ({}): {}".format(mode_description, dry_run_count)
2154
2675
  )
2676
+ if _exit_requested:
2677
+ logging.info("(Note: Execution was stopped early by user request)")
2155
2678
  if dry_run_tests:
2156
- logging.info("Tests that would be run:")
2679
+ logging.info(f"Tests that would be run ({mode_description} mode):")
2680
+ for test in dry_run_tests:
2681
+ logging.info(f" - {test}")
2157
2682
  final_test_regex = "|".join(dry_run_tests)
2158
2683
  logging.info(f"Final test regex: {final_test_regex}")
2159
2684
 
2685
+ # For memory comparison mode, show dataset analysis
2686
+ if memory_comparison_only:
2687
+ unique_datasets = set()
2688
+ tests_with_datasets = 0
2689
+
2690
+ for test_file in testsuite_spec_files:
2691
+ if defaults_filename in test_file:
2692
+ continue
2693
+ try:
2694
+ with open(test_file, "r") as stream:
2695
+ benchmark_config = yaml.safe_load(stream)
2696
+
2697
+ test_name = extract_test_name_from_test_configuration_file(test_file)
2698
+ if test_name in dry_run_tests and "dbconfig" in benchmark_config:
2699
+ # Skip load tests in dry run analysis too
2700
+ keyspacelen = benchmark_config["dbconfig"].get("check", {}).get("keyspacelen", None)
2701
+ if keyspacelen is not None and keyspacelen == 0:
2702
+ continue
2703
+
2704
+ dataset_name = benchmark_config["dbconfig"].get("dataset_name")
2705
+ if dataset_name:
2706
+ unique_datasets.add(dataset_name)
2707
+ tests_with_datasets += 1
2708
+
2709
+ except Exception as e:
2710
+ logging.debug(f"Error analyzing {test_file} for dry run: {e}")
2711
+
2712
+ if tests_with_datasets > 0:
2713
+ logging.info(f"\nMemory comparison analysis:")
2714
+ logging.info(f" Tests with datasets: {tests_with_datasets}")
2715
+ logging.info(f" Unique datasets: {len(unique_datasets)}")
2716
+ logging.info(f" Dataset ingestion savings: {tests_with_datasets - len(unique_datasets)} skipped loads")
2717
+ if unique_datasets:
2718
+ logging.info(f" Datasets that would be loaded:")
2719
+ for dataset in sorted(unique_datasets):
2720
+ logging.info(f" - {dataset}")
2721
+
2160
2722
 
2161
2723
  def get_maxmemory(r):
2162
2724
  memory_info = r.info("memory")