redis-benchmarks-specification 0.2.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (336) hide show
  1. redis_benchmarks_specification/__api__/Readme.md +7 -0
  2. redis_benchmarks_specification/__api__/__init__.py +5 -0
  3. redis_benchmarks_specification/__api__/api.py +87 -0
  4. redis_benchmarks_specification/__api__/app.py +191 -0
  5. redis_benchmarks_specification/__builder__/Readme.md +7 -0
  6. redis_benchmarks_specification/__builder__/__init__.py +5 -0
  7. redis_benchmarks_specification/__builder__/builder.py +1010 -0
  8. redis_benchmarks_specification/__builder__/schema.py +23 -0
  9. redis_benchmarks_specification/__cli__/__init__.py +5 -0
  10. redis_benchmarks_specification/__cli__/args.py +226 -0
  11. redis_benchmarks_specification/__cli__/cli.py +624 -0
  12. redis_benchmarks_specification/__cli__/stats.py +1304 -0
  13. redis_benchmarks_specification/__common__/__init__.py +0 -0
  14. redis_benchmarks_specification/__common__/builder_schema.py +256 -0
  15. redis_benchmarks_specification/__common__/env.py +96 -0
  16. redis_benchmarks_specification/__common__/github.py +280 -0
  17. redis_benchmarks_specification/__common__/package.py +28 -0
  18. redis_benchmarks_specification/__common__/runner.py +485 -0
  19. redis_benchmarks_specification/__common__/spec.py +143 -0
  20. redis_benchmarks_specification/__common__/suppress_warnings.py +20 -0
  21. redis_benchmarks_specification/__common__/timeseries.py +1621 -0
  22. redis_benchmarks_specification/__compare__/__init__.py +5 -0
  23. redis_benchmarks_specification/__compare__/args.py +240 -0
  24. redis_benchmarks_specification/__compare__/compare.py +3322 -0
  25. redis_benchmarks_specification/__init__.py +15 -0
  26. redis_benchmarks_specification/__runner__/__init__.py +5 -0
  27. redis_benchmarks_specification/__runner__/args.py +334 -0
  28. redis_benchmarks_specification/__runner__/remote_profiling.py +535 -0
  29. redis_benchmarks_specification/__runner__/runner.py +3837 -0
  30. redis_benchmarks_specification/__self_contained_coordinator__/__init__.py +5 -0
  31. redis_benchmarks_specification/__self_contained_coordinator__/args.py +210 -0
  32. redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py +27 -0
  33. redis_benchmarks_specification/__self_contained_coordinator__/build_info.py +61 -0
  34. redis_benchmarks_specification/__self_contained_coordinator__/clients.py +58 -0
  35. redis_benchmarks_specification/__self_contained_coordinator__/cpuset.py +17 -0
  36. redis_benchmarks_specification/__self_contained_coordinator__/docker.py +108 -0
  37. redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py +19 -0
  38. redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py +96 -0
  39. redis_benchmarks_specification/__self_contained_coordinator__/runners.py +740 -0
  40. redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +2554 -0
  41. redis_benchmarks_specification/__setups__/__init__.py +0 -0
  42. redis_benchmarks_specification/__setups__/topologies.py +17 -0
  43. redis_benchmarks_specification/__spec__/__init__.py +5 -0
  44. redis_benchmarks_specification/__spec__/args.py +78 -0
  45. redis_benchmarks_specification/__spec__/cli.py +259 -0
  46. redis_benchmarks_specification/__watchdog__/__init__.py +5 -0
  47. redis_benchmarks_specification/__watchdog__/args.py +54 -0
  48. redis_benchmarks_specification/__watchdog__/watchdog.py +175 -0
  49. redis_benchmarks_specification/commands/__init__.py +0 -0
  50. redis_benchmarks_specification/commands/commands.py +15 -0
  51. redis_benchmarks_specification/setups/builders/gcc:15.2.0-amd64-debian-bookworm-default.yml +20 -0
  52. redis_benchmarks_specification/setups/builders/gcc:15.2.0-arm64-debian-bookworm-default.yml +20 -0
  53. redis_benchmarks_specification/setups/platforms/aws-ec2-1node-c5.4xlarge.yml +27 -0
  54. redis_benchmarks_specification/setups/topologies/topologies.yml +153 -0
  55. redis_benchmarks_specification/test-suites/defaults.yml +32 -0
  56. redis_benchmarks_specification/test-suites/generate.py +114 -0
  57. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpire-5-fields-10B-values.yml +43 -0
  58. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpire-50-fields-10B-values.yml +53 -0
  59. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpireat-5-fields-10B-values.yml +43 -0
  60. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpireat-50-fields-10B-values.yml +53 -0
  61. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetall-50-fields-100B-values.yml +52 -0
  62. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetex-5-fields-10B-values.yml +43 -0
  63. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetex-50-fields-10B-values.yml +53 -0
  64. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetex-persist-50-fields-10B-values.yml +53 -0
  65. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpire-5-fields-10B-values.yml +43 -0
  66. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpire-50-fields-10B-values.yml +53 -0
  67. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpireat-5-fields-10B-values.yml +43 -0
  68. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpireat-50-fields-10B-values.yml +53 -0
  69. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-htll-50-fields-10B-values.yml +53 -0
  70. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-1000B-values-expiration.yml +35 -0
  71. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-10B-values-expiration.yml +34 -0
  72. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-10B-values-long-expiration.yml +35 -0
  73. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-10B-values-short-expiration.yml +35 -0
  74. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-20-fields-with-1B-values-pipeline-30.yml +43 -0
  75. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-1000B-values-expiration.yml +36 -0
  76. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-10B-values-expiration.yml +35 -0
  77. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-10B-values-long-expiration.yml +36 -0
  78. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-10B-values-short-expiration.yml +36 -0
  79. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-1000B-values-expiration.yml +45 -0
  80. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-1000B-values.yml +44 -0
  81. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-100B-values.yml +44 -0
  82. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values-expiration.yml +44 -0
  83. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values-long-expiration.yml +45 -0
  84. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values-short-expiration.yml +45 -0
  85. redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values.yml +43 -0
  86. redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-hash-50-fields-with-10000B-values.yml +44 -0
  87. redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-rpush-bulkload-pipeline-50.yml +39 -0
  88. redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-with-10B-values-pipeline-50.yml +33 -0
  89. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values-pipeline-10.yml +33 -0
  90. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values.yml +33 -0
  91. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values-pipeline-10.yml +34 -0
  92. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values.yml +33 -0
  93. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-string-get-10B-pipeline-100-nokeyprefix.yml +38 -0
  94. redis_benchmarks_specification/test-suites/memtier_benchmark-1Kkeys-hash-listpack-500-fields-update-20-fields-with-1B-to-64B-values.yml +75 -0
  95. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml +50 -0
  96. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-expire-use-case.yml +50 -0
  97. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-psetex-expire-use-case.yml +43 -0
  98. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-setex-expire-use-case.yml +43 -0
  99. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-1KiB-expire-use-case.yml +49 -0
  100. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-4KiB-expire-use-case.yml +50 -0
  101. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-bitmap-getbit-pipeline-10.yml +42 -0
  102. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-exists-pipeline-10.yml +41 -0
  103. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expire-pipeline-10.yml +41 -0
  104. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expireat-pipeline-10.yml +41 -0
  105. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-pexpire-pipeline-10.yml +41 -0
  106. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-count-500-pipeline-10.yml +41 -0
  107. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-count-500-pipeline-10.yml +42 -0
  108. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-count-5000-pipeline-10.yml +42 -0
  109. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-pipeline-10.yml +42 -0
  110. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-pipeline-10.yml +41 -0
  111. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-type-pipeline-10.yml +41 -0
  112. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-touch-pipeline-10.yml +41 -0
  113. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-ttl-pipeline-10.yml +41 -0
  114. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hexists.yml +45 -0
  115. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hget-hgetall-hkeys-hvals-with-100B-values.yml +48 -0
  116. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hgetall-50-fields-10B-values.yml +53 -0
  117. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hincrby.yml +42 -0
  118. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hincrbyfloat.yml +42 -0
  119. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-10-fields-with-10B-values-with-expiration-pipeline-10.yml +45 -0
  120. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-5-fields-with-100B-values-with-expiration-pipeline-10.yml +44 -0
  121. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-5-fields-with-10B-values-with-expiration-pipeline-10.yml +44 -0
  122. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-50-fields-with-10B-values-with-expiration-pipeline-10.yml +54 -0
  123. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hmget-5-fields-with-100B-values-pipeline-10.yml +44 -0
  124. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-transactions-multi-exec-pipeline-20.yml +43 -0
  125. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-100B-values.yml +44 -0
  126. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-10B-values.yml +44 -0
  127. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-1KiB-values.yml +44 -0
  128. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-rpoplpush-with-10B-values.yml +42 -0
  129. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values-pipeline-10.yml +34 -0
  130. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values.yml +33 -0
  131. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-50-fields-with-10B-values-long-expiration-pipeline-10.yml +46 -0
  132. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-hmset-5-fields-with-1000B-values.yml +33 -0
  133. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-rpush-with-10B-values.yml +32 -0
  134. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-100B-values.yml +32 -0
  135. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-10B-values-pipeline-10.yml +33 -0
  136. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-10B-values.yml +32 -0
  137. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-1KiB-values.yml +32 -0
  138. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits-pipeline-10.yml +58 -0
  139. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits.yml +58 -0
  140. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-pipeline-10.yml +41 -0
  141. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements.yml +40 -0
  142. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values-pipeline-10.yml +33 -0
  143. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values.yml +33 -0
  144. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values-pipeline-10.yml +34 -0
  145. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values.yml +33 -0
  146. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values-pipeline-10.yml +32 -0
  147. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values.yml +35 -0
  148. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-10.yml +33 -0
  149. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-100-nokeyprefix.yml +29 -0
  150. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-100.yml +33 -0
  151. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-50.yml +33 -0
  152. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-500.yml +33 -0
  153. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values.yml +32 -0
  154. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-1KiB-values-pipeline-10.yml +32 -0
  155. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-1KiB-values.yml +32 -0
  156. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-20KiB-values.yml +35 -0
  157. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-listpack-with-100-elements-double-score.yml +91 -0
  158. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-double-score.yml +35 -0
  159. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-int-score.yml +34 -0
  160. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-append-1-100B-pipeline-10.yml +43 -0
  161. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-append-1-100B.yml +42 -0
  162. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-decr.yml +41 -0
  163. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B-pipeline-10.yml +41 -0
  164. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B.yml +41 -0
  165. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-10.yml +41 -0
  166. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-100-nokeyprefix.yml +38 -0
  167. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-100.yml +41 -0
  168. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-50.yml +41 -0
  169. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-500.yml +41 -0
  170. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B.yml +41 -0
  171. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB-pipeline-10.yml +41 -0
  172. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB.yml +41 -0
  173. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-32B-pipeline-10.yml +40 -0
  174. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-32B.yml +40 -0
  175. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incr-pipeline-10.yml +30 -0
  176. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrby-pipeline-10.yml +30 -0
  177. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrby.yml +30 -0
  178. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrbyfloat-pipeline-10.yml +30 -0
  179. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrbyfloat.yml +30 -0
  180. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-int-encoding-strlen-pipeline-10.yml +40 -0
  181. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mget-1KiB.yml +41 -0
  182. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-expire-pipeline-10.yml +45 -0
  183. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-expire.yml +45 -0
  184. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-pipeline-10.yml +43 -0
  185. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B.yml +42 -0
  186. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-1KB-pipeline-10.yml +42 -0
  187. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-1KB.yml +41 -0
  188. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-32B-pipeline-10.yml +43 -0
  189. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-32B.yml +42 -0
  190. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-512B-pipeline-10.yml +43 -0
  191. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-512B.yml +42 -0
  192. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-with-expiration-240B-400_conns.yml +47 -0
  193. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-set-with-ex-100B-pipeline-10.yml +41 -0
  194. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setex-100B-pipeline-10.yml +41 -0
  195. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-1.yml +43 -0
  196. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10.yml +43 -0
  197. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-4KiB-pipeline-1.yml +43 -0
  198. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-4KiB-pipeline-10.yml +43 -0
  199. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-512B-pipeline-1.yml +43 -0
  200. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-512B-pipeline-10.yml +43 -0
  201. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B-pipeline-10.yml +42 -0
  202. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B.yml +42 -0
  203. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-100M-bits-bitmap-bitcount.yml +45 -0
  204. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-1Billion-bits-bitmap-bitcount.yml +45 -0
  205. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-2-elements-geopos.yml +38 -0
  206. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-2-elements-geosearch-fromlonlat-withcoord.yml +39 -0
  207. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist-pipeline-10.yml +36 -0
  208. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist.yml +36 -0
  209. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash-pipeline-10.yml +35 -0
  210. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash.yml +34 -0
  211. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos-pipeline-10.yml +35 -0
  212. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos.yml +34 -0
  213. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-bybox.yml +36 -0
  214. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-pipeline-10.yml +36 -0
  215. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat.yml +36 -0
  216. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-1K-fields-hgetall-pipeline-10.yml +285 -0
  217. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-1K-fields-hgetall.yml +284 -0
  218. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-100B-values-cursor-count-1000.yml +291 -0
  219. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values-cursor-count-100.yml +291 -0
  220. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values.yml +290 -0
  221. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-50-fields-10B-values.yml +54 -0
  222. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10-elements-lrange-all-elements-pipeline-10.yml +37 -0
  223. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10-elements-lrange-all-elements.yml +36 -0
  224. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-int-7bit-uint-lrange-all-elements-pipeline-10.yml +44 -0
  225. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-int-lrange-all-elements-pipeline-10.yml +52 -0
  226. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-llen-pipeline-10.yml +52 -0
  227. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-lrange-all-elements-pipeline-10.yml +52 -0
  228. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-lrange-all-elements.yml +51 -0
  229. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-integer.yml +41 -0
  230. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-string-pipeline-10.yml +42 -0
  231. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-string.yml +41 -0
  232. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-linsert-lrem-integer.yml +45 -0
  233. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-linsert-lrem-string.yml +45 -0
  234. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lpos-integer.yml +41 -0
  235. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lpos-string.yml +41 -0
  236. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-1K-elements-lrange-all-elements-pipeline-10.yml +202 -0
  237. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-1K-elements-lrange-all-elements.yml +201 -0
  238. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-2K-elements-quicklist-lrange-all-elements-longs.yml +258 -0
  239. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-hash-1K-fields-with-5B-values.yml +282 -0
  240. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-zset-with-5-elements-parsing-float-score.yml +36 -0
  241. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-zset-with-5-elements-parsing-hexa-score.yml +36 -0
  242. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-pfadd-4KB-values-pipeline-10.yml +32 -0
  243. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers-pipeline-10.yml +37 -0
  244. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers.yml +36 -0
  245. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smismember.yml +38 -0
  246. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sismember-is-a-member.yml +53 -0
  247. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sismember-not-a-member.yml +53 -0
  248. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smembers.yml +50 -0
  249. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smismember.yml +54 -0
  250. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sscan.yml +50 -0
  251. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10M-elements-sismember-50pct-chance.yml +41 -0
  252. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10M-elements-srem-50pct-chance.yml +40 -0
  253. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-smembers.yml +200 -0
  254. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan-cursor-count-100.yml +201 -0
  255. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan.yml +200 -0
  256. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1M-elements-sismember-50pct-chance.yml +40 -0
  257. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-200K-elements-sadd-constant.yml +41 -0
  258. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-2M-elements-sadd-increasing.yml +32 -0
  259. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zincrby-1M-elements-pipeline-1.yml +40 -0
  260. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-100K-elements-pipeline-1.yml +40 -0
  261. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-10M-elements-pipeline-1.yml +41 -0
  262. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-1M-elements-pipeline-1.yml +40 -0
  263. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrem-5M-elements-pipeline-1.yml +47 -0
  264. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrangebyscore-256K-elements-pipeline-1.yml +41 -0
  265. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrangebyscore-256K-elements-pipeline-10.yml +41 -0
  266. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrank-1M-elements-pipeline-1.yml +40 -0
  267. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements-long-scores.yml +41 -0
  268. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements.yml +40 -0
  269. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrange-all-elements.yml +66 -0
  270. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements-long-scores.yml +66 -0
  271. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements.yml +66 -0
  272. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zscan.yml +65 -0
  273. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zrange-all-elements.yml +322 -0
  274. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zscan.yml +321 -0
  275. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zcard-pipeline-10.yml +39 -0
  276. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zremrangebyscore-pipeline-10.yml +41 -0
  277. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zrevrange-5-elements.yml +40 -0
  278. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zrevrange-withscores-5-elements-pipeline-10.yml +41 -0
  279. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zscore-pipeline-10.yml +40 -0
  280. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-600K-elements-zrangestore-1K-elements.yml +41 -0
  281. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-600K-elements-zrangestore-300K-elements.yml +43 -0
  282. redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-listpack-zrank-100-elements-pipeline-1.yml +50 -0
  283. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-eval-hset-expire.yml +37 -0
  284. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-evalsha-hset-expire.yml +41 -0
  285. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sdiff.yml +57 -0
  286. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sinter.yml +57 -0
  287. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sunion.yml +57 -0
  288. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries-pipeline-10.yml +46 -0
  289. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries.yml +46 -0
  290. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-zset-300-elements-skiplist-encoded-zunion.yml +434 -0
  291. redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-zset-300-elements-skiplist-encoded-zunionstore.yml +434 -0
  292. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-load-string-with-512B-values-pipeline-10.yml +37 -0
  293. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-load-string-with-512B-values.yml +37 -0
  294. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-400_conns.yml +45 -0
  295. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-40_conns.yml +45 -0
  296. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-2000_conns.yml +46 -0
  297. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-400_conns.yml +46 -0
  298. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-40_conns.yml +46 -0
  299. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-400_conns.yml +45 -0
  300. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-2000_conns.yml +46 -0
  301. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-400_conns.yml +46 -0
  302. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-5200_conns.yml +46 -0
  303. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-50-50-with-512B-values-with-expiration-pipeline-10-400_conns.yml +43 -0
  304. redis_benchmarks_specification/test-suites/memtier_benchmark-connection-hello-pipeline-10.yml +32 -0
  305. redis_benchmarks_specification/test-suites/memtier_benchmark-connection-hello.yml +32 -0
  306. redis_benchmarks_specification/test-suites/memtier_benchmark-multiple-hll-pfcount-100B-values.yml +34 -0
  307. redis_benchmarks_specification/test-suites/memtier_benchmark-multiple-hll-pfmerge-100B-values.yml +34 -0
  308. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-connection-ping-pipeline-10.yml +29 -0
  309. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-100-subscribers.yml +40 -0
  310. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-1000-subscribers.yml +40 -0
  311. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-5000-subscribers.yml +40 -0
  312. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns.yml +40 -0
  313. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-publish-1K-channels-10B-no-subscribers.yml +30 -0
  314. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-server-time-pipeline-10.yml +29 -0
  315. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-leaderboard-top-10.yml +68 -0
  316. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-leaderboard-top-100.yml +69 -0
  317. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-leaderboard-top-1000.yml +68 -0
  318. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-rate-limiting-lua-100k-sessions.yml +64 -0
  319. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-realtime-analytics-membership-pipeline-10.yml +56 -0
  320. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-realtime-analytics-membership.yml +56 -0
  321. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-caching-hash-100k-sessions.yml +108 -0
  322. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-caching-json-100k-sessions.yml +109 -0
  323. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-caching-string-100k-sessions.yml +98 -0
  324. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-100k-sessions.yml +205 -0
  325. redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml +205 -0
  326. redis_benchmarks_specification/test-suites/memtier_benchmark-stream-10M-entries-xread-count-100.yml +36 -0
  327. redis_benchmarks_specification/test-suites/memtier_benchmark-stream-10M-entries-xreadgroup-count-100-noack.yml +38 -0
  328. redis_benchmarks_specification/test-suites/memtier_benchmark-stream-10M-entries-xreadgroup-count-100.yml +38 -0
  329. redis_benchmarks_specification/test-suites/memtier_benchmark-stream-concurrent-xadd-xreadgroup-70-30.yml +50 -0
  330. redis_benchmarks_specification/test-suites/template.txt +18 -0
  331. redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml +41 -0
  332. redis_benchmarks_specification-0.2.42.dist-info/LICENSE +201 -0
  333. redis_benchmarks_specification-0.2.42.dist-info/METADATA +434 -0
  334. redis_benchmarks_specification-0.2.42.dist-info/RECORD +336 -0
  335. redis_benchmarks_specification-0.2.42.dist-info/WHEEL +4 -0
  336. redis_benchmarks_specification-0.2.42.dist-info/entry_points.txt +10 -0
@@ -0,0 +1,2554 @@
1
+ # Import warning suppression first
2
+ from redis_benchmarks_specification.__common__.suppress_warnings import *
3
+
4
+ import datetime
5
+ import json
6
+ import logging
7
+ import pathlib
8
+ import shutil
9
+ import subprocess
10
+ import tempfile
11
+ import threading
12
+ import traceback
13
+ import re
14
+ import docker
15
+ import docker.errors
16
+ import redis
17
+ import os
18
+ from pathlib import Path
19
+ import sys
20
+ import time
21
+ import base64
22
+ from http.server import HTTPServer, BaseHTTPRequestHandler
23
+ from urllib.parse import urlparse, parse_qs
24
+
25
+ from docker.models.containers import Container
26
+ from redis_benchmarks_specification.__self_contained_coordinator__.post_processing import (
27
+ post_process_vector_db,
28
+ )
29
+ from redisbench_admin.profilers.profilers_local import (
30
+ check_compatible_system_and_kernel_and_prepare_profile,
31
+ )
32
+
33
+ from redis_benchmarks_specification.__common__.env import (
34
+ LOG_FORMAT,
35
+ LOG_DATEFMT,
36
+ LOG_LEVEL,
37
+ REDIS_HEALTH_CHECK_INTERVAL,
38
+ REDIS_SOCKET_TIMEOUT,
39
+ REDIS_BINS_EXPIRE_SECS,
40
+ )
41
+ from redis_benchmarks_specification.__common__.github import (
42
+ check_github_available_and_actionable,
43
+ check_benchmark_running_comment,
44
+ update_comment_if_needed,
45
+ create_new_pr_comment,
46
+ generate_benchmark_started_pr_comment,
47
+ check_regression_comment,
48
+ )
49
+ from redis_benchmarks_specification.__common__.package import (
50
+ get_version_string,
51
+ populate_with_poetry_data,
52
+ )
53
+ from redis_benchmarks_specification.__common__.runner import (
54
+ extract_testsuites,
55
+ reset_commandstats,
56
+ exporter_datasink_common,
57
+ execute_init_commands,
58
+ )
59
+ from redis_benchmarks_specification.__common__.timeseries import (
60
+ datasink_profile_tabular_data,
61
+ )
62
+ from redis_benchmarks_specification.__compare__.compare import (
63
+ compute_regression_table,
64
+ prepare_regression_comment,
65
+ extract_default_branch_and_metric,
66
+ )
67
+ from redis_benchmarks_specification.__runner__.runner import (
68
+ print_results_table_stdout,
69
+ prepare_memtier_benchmark_parameters,
70
+ validate_benchmark_metrics,
71
+ )
72
+ from redis_benchmarks_specification.__self_contained_coordinator__.args import (
73
+ create_self_contained_coordinator_args,
74
+ )
75
+ from redis_benchmarks_specification.__self_contained_coordinator__.runners import (
76
+ build_runners_consumer_group_create,
77
+ get_runners_consumer_group_name,
78
+ clear_pending_messages_for_consumer,
79
+ reset_consumer_group_to_latest,
80
+ )
81
+ from redis_benchmarks_specification.__setups__.topologies import get_topologies
82
+
83
+
84
+ from redisbench_admin.profilers.profilers_local import (
85
+ local_profilers_platform_checks,
86
+ profilers_start_if_required,
87
+ profilers_stop_if_required,
88
+ )
89
+ from redisbench_admin.run.common import (
90
+ get_start_time_vars,
91
+ prepare_benchmark_parameters,
92
+ )
93
+ from redisbench_admin.run.grafana import generate_artifacts_table_grafana_redis
94
+
95
+ from redisbench_admin.run.run import calculate_client_tool_duration_and_check
96
+ from redisbench_admin.utils.benchmark_config import (
97
+ get_final_benchmark_config,
98
+ get_defaults,
99
+ )
100
+ from redisbench_admin.utils.local import get_local_run_full_filename
101
+ from redisbench_admin.utils.results import post_process_benchmark_results
102
+
103
+ from redis_benchmarks_specification.__common__.env import (
104
+ STREAM_KEYNAME_NEW_BUILD_EVENTS,
105
+ get_arch_specific_stream_name,
106
+ S3_BUCKET_NAME,
107
+ )
108
+ from redis_benchmarks_specification.__common__.spec import (
109
+ extract_build_variant_variations,
110
+ extract_client_cpu_limit,
111
+ extract_client_tool,
112
+ extract_client_container_image,
113
+ extract_redis_dbconfig_parameters,
114
+ extract_redis_configuration_from_topology,
115
+ )
116
+ from redis_benchmarks_specification.__self_contained_coordinator__.artifacts import (
117
+ restore_build_artifacts_from_test_details,
118
+ )
119
+ from redis_benchmarks_specification.__self_contained_coordinator__.build_info import (
120
+ extract_build_info_from_streamdata,
121
+ )
122
+
123
+ # Global variables for HTTP server control
124
+ _reset_queue_requested = False
125
+ _exclusive_hardware = False
126
+ _http_auth_username = None
127
+ _http_auth_password = None
128
+ _flush_timestamp = None
129
+
130
+
131
+ class CoordinatorHTTPHandler(BaseHTTPRequestHandler):
132
+ """HTTP request handler for coordinator endpoints"""
133
+
134
+ def log_message(self, format, *args):
135
+ """Override to use our logging system"""
136
+ logging.info(f"HTTP {format % args}")
137
+
138
+ def _authenticate(self):
139
+ """Check if the request is authenticated"""
140
+ global _http_auth_username, _http_auth_password
141
+
142
+ # Check for Authorization header
143
+ auth_header = self.headers.get("Authorization")
144
+ if not auth_header:
145
+ return False
146
+
147
+ # Parse Basic auth
148
+ try:
149
+ if not auth_header.startswith("Basic "):
150
+ return False
151
+
152
+ # Decode base64 credentials
153
+ encoded_credentials = auth_header[6:] # Remove 'Basic ' prefix
154
+ decoded_credentials = base64.b64decode(encoded_credentials).decode("utf-8")
155
+ username, password = decoded_credentials.split(":", 1)
156
+
157
+ # Verify credentials
158
+ return username == _http_auth_username and password == _http_auth_password
159
+
160
+ except Exception as e:
161
+ logging.warning(f"Authentication error: {e}")
162
+ return False
163
+
164
+ def _send_auth_required(self):
165
+ """Send 401 Unauthorized response"""
166
+ self.send_response(401)
167
+ self.send_header(
168
+ "WWW-Authenticate", 'Basic realm="Redis Benchmarks Coordinator"'
169
+ )
170
+ self.send_header("Content-type", "application/json")
171
+ self.end_headers()
172
+ response = {
173
+ "error": "Authentication required",
174
+ "message": "Please provide valid credentials using Basic authentication",
175
+ }
176
+ self.wfile.write(json.dumps(response).encode())
177
+
178
+ def do_GET(self):
179
+ """Handle GET requests"""
180
+ # Check authentication
181
+ if not self._authenticate():
182
+ self._send_auth_required()
183
+ return
184
+
185
+ parsed_path = urlparse(self.path)
186
+
187
+ if parsed_path.path == "/ping":
188
+ self.send_response(200)
189
+ self.send_header("Content-type", "application/json")
190
+ self.end_headers()
191
+ response = {
192
+ "status": "healthy",
193
+ "timestamp": datetime.datetime.utcnow().isoformat(),
194
+ "service": "redis-benchmarks-self-contained-coordinator",
195
+ }
196
+ self.wfile.write(json.dumps(response).encode())
197
+
198
+ elif parsed_path.path == "/containers":
199
+ # Check for stuck containers
200
+ stuck_containers = self._check_stuck_containers()
201
+
202
+ self.send_response(200)
203
+ self.send_header("Content-type", "application/json")
204
+ self.end_headers()
205
+ response = {
206
+ "status": "success",
207
+ "stuck_containers": stuck_containers,
208
+ "total_stuck": len(stuck_containers),
209
+ "timestamp": datetime.datetime.utcnow().isoformat(),
210
+ }
211
+ self.wfile.write(json.dumps(response).encode())
212
+
213
+ else:
214
+ self.send_response(404)
215
+ self.send_header("Content-type", "application/json")
216
+ self.end_headers()
217
+ self.wfile.write(json.dumps({"error": "Not found"}).encode())
218
+
219
+ def do_POST(self):
220
+ """Handle POST requests"""
221
+ # Check authentication
222
+ if not self._authenticate():
223
+ self._send_auth_required()
224
+ return
225
+
226
+ global _reset_queue_requested, _flush_timestamp
227
+
228
+ parsed_path = urlparse(self.path)
229
+
230
+ if parsed_path.path == "/reset-queue":
231
+ try:
232
+ # Read request body
233
+ content_length = int(self.headers.get("Content-Length", 0))
234
+ if content_length > 0:
235
+ post_data = self.rfile.read(content_length)
236
+ try:
237
+ request_data = json.loads(post_data.decode())
238
+ except json.JSONDecodeError:
239
+ request_data = {}
240
+ else:
241
+ request_data = {}
242
+
243
+ # Set the reset flag
244
+ _reset_queue_requested = True
245
+ logging.info("Queue reset requested via HTTP endpoint")
246
+
247
+ self.send_response(200)
248
+ self.send_header("Content-type", "application/json")
249
+ self.end_headers()
250
+ response = {
251
+ "status": "success",
252
+ "message": "Queue reset requested",
253
+ "timestamp": datetime.datetime.utcnow().isoformat(),
254
+ }
255
+ self.wfile.write(json.dumps(response).encode())
256
+
257
+ except Exception as e:
258
+ logging.error(f"Error handling reset-queue request: {e}")
259
+ self.send_response(500)
260
+ self.send_header("Content-type", "application/json")
261
+ self.end_headers()
262
+ self.wfile.write(json.dumps({"error": str(e)}).encode())
263
+
264
+ elif parsed_path.path == "/flush":
265
+ try:
266
+ # Read request body (optional)
267
+ content_length = int(self.headers.get("Content-Length", 0))
268
+ if content_length > 0:
269
+ post_data = self.rfile.read(content_length)
270
+ try:
271
+ request_data = json.loads(post_data.decode())
272
+ except json.JSONDecodeError:
273
+ request_data = {}
274
+ else:
275
+ request_data = {}
276
+
277
+ # Record flush timestamp
278
+ flush_time = datetime.datetime.utcnow()
279
+ _flush_timestamp = flush_time
280
+
281
+ logging.info(
282
+ "Flush requested via HTTP endpoint - stopping all containers and processes"
283
+ )
284
+
285
+ # Perform flush cleanup
286
+ self._perform_flush_cleanup()
287
+
288
+ self.send_response(200)
289
+ self.send_header("Content-type", "application/json")
290
+ self.end_headers()
291
+ response = {
292
+ "status": "success",
293
+ "message": "Flush completed - all containers stopped and processes killed",
294
+ "flush_timestamp": flush_time.isoformat(),
295
+ "timestamp": datetime.datetime.utcnow().isoformat(),
296
+ }
297
+ self.wfile.write(json.dumps(response).encode())
298
+
299
+ except Exception as e:
300
+ logging.error(f"Error during flush operation: {e}")
301
+ self.send_response(500)
302
+ self.send_header("Content-type", "application/json")
303
+ self.end_headers()
304
+ response = {
305
+ "status": "error",
306
+ "message": f"Flush failed: {str(e)}",
307
+ "timestamp": datetime.datetime.utcnow().isoformat(),
308
+ }
309
+ self.wfile.write(json.dumps(response).encode())
310
+
311
+ else:
312
+ self.send_response(404)
313
+ self.send_header("Content-type", "application/json")
314
+ self.end_headers()
315
+ self.wfile.write(json.dumps({"error": "Not found"}).encode())
316
+
317
+ def _perform_flush_cleanup(self):
318
+ """Perform flush cleanup: stop all containers and kill memtier processes"""
319
+ import subprocess
320
+
321
+ # Kill all memtier processes
322
+ try:
323
+ logging.info("Killing all memtier_benchmark processes")
324
+ result = subprocess.run(
325
+ ["pkill", "-f", "memtier_benchmark"], capture_output=True, text=True
326
+ )
327
+ if result.returncode == 0:
328
+ logging.info("Successfully killed memtier_benchmark processes")
329
+ else:
330
+ logging.info("No memtier_benchmark processes found to kill")
331
+
332
+ result = subprocess.run(
333
+ ["pkill", "-f", "memtier"], capture_output=True, text=True
334
+ )
335
+ if result.returncode == 0:
336
+ logging.info("Successfully killed memtier processes")
337
+ else:
338
+ logging.info("No memtier processes found to kill")
339
+ except Exception as e:
340
+ logging.warning(f"Error killing memtier processes: {e}")
341
+
342
+ # Stop all Docker containers with force if needed
343
+ try:
344
+ logging.info("Stopping all Docker containers")
345
+ client = docker.from_env()
346
+ containers = client.containers.list()
347
+
348
+ if not containers:
349
+ logging.info("No running containers found")
350
+ return
351
+
352
+ logging.info(f"Found {len(containers)} running containers")
353
+
354
+ for container in containers:
355
+ try:
356
+ # Get container info
357
+ created_time = container.attrs["Created"]
358
+ uptime = (
359
+ datetime.datetime.utcnow()
360
+ - datetime.datetime.fromisoformat(
361
+ created_time.replace("Z", "+00:00")
362
+ )
363
+ )
364
+
365
+ logging.info(
366
+ f"Stopping container: {container.name} ({container.id[:12]}) - uptime: {uptime}"
367
+ )
368
+
369
+ # Try graceful stop first
370
+ container.stop(timeout=10)
371
+ logging.info(f"Successfully stopped container: {container.name}")
372
+
373
+ except Exception as e:
374
+ logging.warning(f"Error stopping container {container.name}: {e}")
375
+ try:
376
+ # Force kill if graceful stop failed
377
+ logging.info(f"Force killing container: {container.name}")
378
+ container.kill()
379
+ logging.info(
380
+ f"Successfully force killed container: {container.name}"
381
+ )
382
+ except Exception as e2:
383
+ logging.error(
384
+ f"Failed to force kill container {container.name}: {e2}"
385
+ )
386
+
387
+ except Exception as e:
388
+ logging.warning(f"Error accessing Docker client: {e}")
389
+
390
+ logging.info("Flush cleanup completed")
391
+
392
+ def _check_stuck_containers(self, max_hours=2):
393
+ """Check for containers running longer than max_hours and return info"""
394
+ try:
395
+ client = docker.from_env()
396
+ containers = client.containers.list()
397
+ stuck_containers = []
398
+
399
+ for container in containers:
400
+ try:
401
+ created_time = container.attrs["Created"]
402
+ uptime = (
403
+ datetime.datetime.utcnow()
404
+ - datetime.datetime.fromisoformat(
405
+ created_time.replace("Z", "+00:00")
406
+ )
407
+ )
408
+ uptime_hours = uptime.total_seconds() / 3600
409
+
410
+ if uptime_hours > max_hours:
411
+ stuck_containers.append(
412
+ {
413
+ "name": container.name,
414
+ "id": container.id[:12],
415
+ "image": (
416
+ container.image.tags[0]
417
+ if container.image.tags
418
+ else "unknown"
419
+ ),
420
+ "uptime_hours": round(uptime_hours, 2),
421
+ "status": container.status,
422
+ }
423
+ )
424
+ except Exception as e:
425
+ logging.warning(f"Error checking container {container.name}: {e}")
426
+
427
+ return stuck_containers
428
+ except Exception as e:
429
+ logging.warning(f"Error accessing Docker client: {e}")
430
+ return []
431
+
432
+
433
+ def start_http_server(port=8080):
434
+ """Start the HTTP server in a separate thread"""
435
+
436
+ def run_server():
437
+ try:
438
+ server = HTTPServer(("0.0.0.0", port), CoordinatorHTTPHandler)
439
+ logging.info(f"Starting HTTP server on port {port}")
440
+ logging.info(f"Available endpoints:")
441
+ logging.info(f" GET /ping - Health check")
442
+ logging.info(f" GET /containers - Check for stuck containers")
443
+ logging.info(
444
+ f" POST /reset-queue - Reset pending streams and skip running tests"
445
+ )
446
+ logging.info(
447
+ f" POST /flush - Stop all containers and processes, ignore work before flush time"
448
+ )
449
+ server.serve_forever()
450
+ except Exception as e:
451
+ logging.error(f"HTTP server error: {e}")
452
+
453
+ server_thread = threading.Thread(target=run_server, daemon=True)
454
+ server_thread.start()
455
+ return server_thread
456
+
457
+
458
+ def cleanup_system_processes():
459
+ """Clean up memtier processes and docker containers for exclusive hardware mode"""
460
+ global _exclusive_hardware
461
+
462
+ if not _exclusive_hardware:
463
+ return
464
+
465
+ logging.info("Exclusive hardware mode: Cleaning up system processes")
466
+
467
+ try:
468
+ # Kill all memtier_benchmark processes
469
+ logging.info("Killing all memtier_benchmark processes")
470
+ subprocess.run(["pkill", "-f", "memtier_benchmark"], check=False)
471
+
472
+ # Stop all docker containers
473
+ logging.info("Stopping all docker containers")
474
+ docker_client = docker.from_env()
475
+ containers = docker_client.containers.list()
476
+ for container in containers:
477
+ try:
478
+ logging.info(
479
+ f"Stopping container: {container.name} ({container.id[:12]})"
480
+ )
481
+ container.stop(timeout=10)
482
+ container.remove(force=True)
483
+ except Exception as e:
484
+ logging.warning(f"Error stopping container {container.id[:12]}: {e}")
485
+
486
+ # Wait a moment for cleanup to complete
487
+ time.sleep(2)
488
+ logging.info("System cleanup completed")
489
+
490
+ except Exception as e:
491
+ logging.error(f"Error during system cleanup: {e}")
492
+
493
+
494
+ def print_directory_logs(directory_path, description=""):
495
+ """Print all .log files in a directory for debugging purposes."""
496
+ if not os.path.exists(directory_path):
497
+ logging.warning(f"Directory {directory_path} does not exist")
498
+ return
499
+
500
+ logging.info(
501
+ f"Printing all .log files in {description} directory: {directory_path}"
502
+ )
503
+ try:
504
+ for root, dirs, files in os.walk(directory_path):
505
+ for file in files:
506
+ # Only process .log files
507
+ if not file.endswith(".log"):
508
+ continue
509
+
510
+ file_path = os.path.join(root, file)
511
+ logging.info(f"Found log file: {file_path}")
512
+ try:
513
+ # Try to read and print the log file content
514
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
515
+ content = f.read()
516
+ if content.strip(): # Only print non-empty files
517
+ logging.info(f"Content of {file_path}:")
518
+ logging.info("-" * 40)
519
+ logging.info(content)
520
+ logging.info("-" * 40)
521
+ else:
522
+ logging.info(f"Log file {file_path} is empty")
523
+ except Exception as e:
524
+ logging.warning(f"Could not read log file {file_path}: {e}")
525
+ except Exception as e:
526
+ logging.error(f"Error walking directory {directory_path}: {e}")
527
+
528
+
529
+ from redis_benchmarks_specification.__self_contained_coordinator__.cpuset import (
530
+ extract_db_cpu_limit,
531
+ generate_cpuset_cpus,
532
+ )
533
+ from redis_benchmarks_specification.__self_contained_coordinator__.clients import (
534
+ prepare_vector_db_benchmark_parameters,
535
+ )
536
+ from redis_benchmarks_specification.__self_contained_coordinator__.docker import (
537
+ generate_standalone_redis_server_args,
538
+ )
539
+
540
+
541
+ def main():
542
+ global _exclusive_hardware, _http_auth_username, _http_auth_password
543
+
544
+ _, _, project_version = populate_with_poetry_data()
545
+ project_name = "redis-benchmarks-spec runner(self-contained)"
546
+ parser = create_self_contained_coordinator_args(
547
+ get_version_string(project_name, project_version)
548
+ )
549
+ args = parser.parse_args()
550
+
551
+ # Configure logging first, before any logging calls
552
+ if args.logname is not None:
553
+ print("Writting log to {}".format(args.logname))
554
+ logging.basicConfig(
555
+ filename=args.logname,
556
+ filemode="a",
557
+ format=LOG_FORMAT,
558
+ datefmt=LOG_DATEFMT,
559
+ level=LOG_LEVEL,
560
+ )
561
+ else:
562
+ # logging settings
563
+ logging.basicConfig(
564
+ format=LOG_FORMAT,
565
+ level=LOG_LEVEL,
566
+ datefmt=LOG_DATEFMT,
567
+ )
568
+
569
+ # Set global exclusive hardware flag
570
+ _exclusive_hardware = args.exclusive_hardware
571
+ if _exclusive_hardware:
572
+ logging.info("Exclusive hardware mode enabled")
573
+
574
+ # Set HTTP authentication credentials and start server only if credentials are provided
575
+ _http_auth_username = args.http_auth_username
576
+ _http_auth_password = args.http_auth_password
577
+
578
+ if _http_auth_username and _http_auth_password:
579
+ logging.info(
580
+ "Starting HTTP server with authentication on port {}".format(args.http_port)
581
+ )
582
+ start_http_server(args.http_port)
583
+ else:
584
+ logging.info("HTTP server disabled - no authentication credentials provided")
585
+ logging.info(get_version_string(project_name, project_version))
586
+ topologies_folder = os.path.abspath(args.setups_folder + "/topologies")
587
+ logging.info("Using topologies folder dir {}".format(topologies_folder))
588
+ topologies_files = pathlib.Path(topologies_folder).glob("*.yml")
589
+ topologies_files = [str(x) for x in topologies_files]
590
+ logging.info(
591
+ "Reading topologies specifications from: {}".format(
592
+ " ".join([str(x) for x in topologies_files])
593
+ )
594
+ )
595
+ topologies_map = get_topologies(topologies_files[0])
596
+ testsuite_spec_files = extract_testsuites(args)
597
+
598
+ logging.info(
599
+ "Reading event streams from: {}:{} with user {}".format(
600
+ args.event_stream_host, args.event_stream_port, args.event_stream_user
601
+ )
602
+ )
603
+ try:
604
+ gh_event_conn = redis.StrictRedis(
605
+ host=args.event_stream_host,
606
+ port=args.event_stream_port,
607
+ decode_responses=False, # dont decode due to binary archives
608
+ password=args.event_stream_pass,
609
+ username=args.event_stream_user,
610
+ health_check_interval=REDIS_HEALTH_CHECK_INTERVAL,
611
+ socket_connect_timeout=REDIS_SOCKET_TIMEOUT,
612
+ socket_keepalive=True,
613
+ )
614
+ gh_event_conn.ping()
615
+ except redis.exceptions.ConnectionError as e:
616
+ logging.error(
617
+ "Unable to connect to redis available at: {}:{} to read the event streams".format(
618
+ args.event_stream_host, args.event_stream_port
619
+ )
620
+ )
621
+ logging.error("Error message {}".format(e.__str__()))
622
+ exit(1)
623
+ datasink_conn = None
624
+ if args.datasink_push_results_redistimeseries:
625
+ logging.info(
626
+ "Checking redistimeseries datasink connection is available at: {}:{} to push the timeseries data".format(
627
+ args.datasink_redistimeseries_host, args.datasink_redistimeseries_port
628
+ )
629
+ )
630
+ try:
631
+ datasink_conn = redis.StrictRedis(
632
+ host=args.datasink_redistimeseries_host,
633
+ port=args.datasink_redistimeseries_port,
634
+ decode_responses=True,
635
+ password=args.datasink_redistimeseries_pass,
636
+ username=args.datasink_redistimeseries_user,
637
+ health_check_interval=REDIS_HEALTH_CHECK_INTERVAL,
638
+ socket_connect_timeout=REDIS_SOCKET_TIMEOUT,
639
+ socket_keepalive=True,
640
+ )
641
+ datasink_conn.ping()
642
+ except redis.exceptions.ConnectionError as e:
643
+ logging.error(
644
+ "Unable to connect to redis available at: {}:{}".format(
645
+ args.datasink_redistimeseries_host,
646
+ args.datasink_redistimeseries_port,
647
+ )
648
+ )
649
+ logging.error("Error message {}".format(e.__str__()))
650
+ exit(1)
651
+
652
+ logging.info("checking build spec requirements")
653
+ running_platform = args.platform_name
654
+ build_runners_consumer_group_create(gh_event_conn, running_platform, args.arch)
655
+
656
+ # Clear pending messages and reset consumer group position by default (unless explicitly skipped)
657
+ if not args.skip_clear_pending_on_startup:
658
+ consumer_pos = args.consumer_pos
659
+ logging.info(
660
+ "Clearing pending messages and resetting consumer group position on startup (default behavior)"
661
+ )
662
+ clear_pending_messages_for_consumer(
663
+ gh_event_conn, running_platform, consumer_pos, args.arch
664
+ )
665
+ reset_consumer_group_to_latest(gh_event_conn, running_platform, args.arch)
666
+ else:
667
+ logging.info(
668
+ "Skipping pending message cleanup and consumer group reset as requested"
669
+ )
670
+
671
+ stream_id = None
672
+ docker_client = docker.from_env()
673
+ home = str(Path.home())
674
+ cpuset_start_pos = args.cpuset_start_pos
675
+ logging.info("Start CPU pinning at position {}".format(cpuset_start_pos))
676
+ redis_proc_start_port = args.redis_proc_start_port
677
+ logging.info("Redis Processes start port: {}".format(redis_proc_start_port))
678
+
679
+ priority_lower_limit = args.tests_priority_lower_limit
680
+ priority_upper_limit = args.tests_priority_upper_limit
681
+
682
+ logging.info(
683
+ f"Using priority for test filters [{priority_lower_limit},{priority_upper_limit}]"
684
+ )
685
+
686
+ default_baseline_branch, default_metrics_str = extract_default_branch_and_metric(
687
+ args.defaults_filename
688
+ )
689
+
690
+ # TODO: confirm we do have enough cores to run the spec
691
+ # availabe_cpus = args.cpu_count
692
+ datasink_push_results_redistimeseries = args.datasink_push_results_redistimeseries
693
+ grafana_profile_dashboard = args.grafana_profile_dashboard
694
+
695
+ defaults_filename = args.defaults_filename
696
+ get_defaults_result = get_defaults(defaults_filename)
697
+ # Handle variable number of return values from get_defaults
698
+ if len(get_defaults_result) >= 3:
699
+ default_metrics = get_defaults_result[2]
700
+ else:
701
+ default_metrics = []
702
+ logging.warning(
703
+ "get_defaults returned fewer values than expected, using empty default_metrics"
704
+ )
705
+
706
+ # Consumer id
707
+ consumer_pos = args.consumer_pos
708
+ logging.info("Consumer pos {}".format(consumer_pos))
709
+
710
+ # Arch
711
+ arch = args.arch
712
+ logging.info("Running for arch: {}".format(arch))
713
+
714
+ # Github token
715
+ github_token = args.github_token
716
+ if github_token is not None:
717
+ logging.info("Detected GITHUB token. will push PR comments with updates")
718
+
719
+ # Docker air gap usage
720
+ docker_air_gap = args.docker_air_gap
721
+ if docker_air_gap:
722
+ logging.info(
723
+ "Using docker in an air-gapped way. Restoring running images from redis keys."
724
+ )
725
+
726
+ profilers_list = []
727
+ profilers_enabled = args.enable_profilers
728
+ if profilers_enabled:
729
+ profilers_list = args.profilers.split(",")
730
+ res = check_compatible_system_and_kernel_and_prepare_profile(args)
731
+ if res is False:
732
+ logging.error(
733
+ "Requested for the following profilers to be enabled but something went wrong: {}.".format(
734
+ " ".join(profilers_list)
735
+ )
736
+ )
737
+ exit(1)
738
+
739
+ override_memtier_test_time = args.override_memtier_test_time
740
+ if override_memtier_test_time > 0:
741
+ logging.info(
742
+ "Overriding memtier benchmark --test-time to {} seconds".format(
743
+ override_memtier_test_time
744
+ )
745
+ )
746
+ logging.info("Entering blocking read waiting for work.")
747
+ if stream_id is None:
748
+ stream_id = args.consumer_start_id
749
+ while True:
750
+ _, stream_id, _, _ = self_contained_coordinator_blocking_read(
751
+ gh_event_conn,
752
+ datasink_push_results_redistimeseries,
753
+ docker_client,
754
+ home,
755
+ stream_id,
756
+ datasink_conn,
757
+ testsuite_spec_files,
758
+ topologies_map,
759
+ running_platform,
760
+ profilers_enabled,
761
+ profilers_list,
762
+ grafana_profile_dashboard,
763
+ cpuset_start_pos,
764
+ redis_proc_start_port,
765
+ consumer_pos,
766
+ docker_air_gap,
767
+ override_memtier_test_time,
768
+ default_metrics,
769
+ arch,
770
+ github_token,
771
+ priority_lower_limit,
772
+ priority_upper_limit,
773
+ default_baseline_branch,
774
+ default_metrics_str,
775
+ )
776
+
777
+
778
+ def check_health(container):
779
+ logging.info(container.attrs["State"])
780
+ health_status = container.attrs["State"].get("Health", {}).get("Status")
781
+ return health_status
782
+
783
+
784
+ def self_contained_coordinator_blocking_read(
785
+ github_event_conn,
786
+ datasink_push_results_redistimeseries,
787
+ docker_client,
788
+ home,
789
+ stream_id,
790
+ datasink_conn,
791
+ testsuite_spec_files,
792
+ topologies_map,
793
+ platform_name,
794
+ profilers_enabled,
795
+ profilers_list,
796
+ grafana_profile_dashboard="",
797
+ cpuset_start_pos=0,
798
+ redis_proc_start_port=6379,
799
+ consumer_pos=1,
800
+ docker_air_gap=False,
801
+ override_test_time=1,
802
+ default_metrics=None,
803
+ arch="amd64",
804
+ github_token=None,
805
+ priority_lower_limit=0,
806
+ priority_upper_limit=10000,
807
+ default_baseline_branch="unstable",
808
+ default_metrics_str="ALL_STATS.Totals.Ops/sec",
809
+ docker_keep_env=False,
810
+ restore_build_artifacts_default=True,
811
+ ):
812
+ num_process_streams = 0
813
+ num_process_test_suites = 0
814
+ overall_result = False
815
+ consumer_name = "{}-self-contained-proc#{}".format(
816
+ get_runners_consumer_group_name(platform_name), consumer_pos
817
+ )
818
+ logging.info(
819
+ "Consuming from group {}. Consumer id {}".format(
820
+ get_runners_consumer_group_name(platform_name), consumer_name
821
+ )
822
+ )
823
+ # Use architecture-specific stream
824
+ arch_specific_stream = get_arch_specific_stream_name(arch)
825
+ logging.info(
826
+ f"Reading work from architecture-specific stream: {arch_specific_stream}"
827
+ )
828
+ newTestInfo = github_event_conn.xreadgroup(
829
+ get_runners_consumer_group_name(platform_name),
830
+ consumer_name,
831
+ {arch_specific_stream: stream_id},
832
+ count=1,
833
+ block=0,
834
+ )
835
+ logging.info(f"New test info: {newTestInfo}")
836
+ if len(newTestInfo[0]) < 2 or len(newTestInfo[0][1]) < 1:
837
+ stream_id = ">"
838
+ else:
839
+ # Create args object with topology parameter
840
+ class Args:
841
+ def __init__(self):
842
+ self.topology = ""
843
+
844
+ args = Args()
845
+
846
+ (
847
+ stream_id,
848
+ overall_result,
849
+ total_test_suite_runs,
850
+ ) = process_self_contained_coordinator_stream(
851
+ github_event_conn,
852
+ datasink_push_results_redistimeseries,
853
+ docker_client,
854
+ home,
855
+ newTestInfo,
856
+ datasink_conn,
857
+ testsuite_spec_files,
858
+ topologies_map,
859
+ platform_name,
860
+ profilers_enabled,
861
+ profilers_list,
862
+ grafana_profile_dashboard,
863
+ cpuset_start_pos,
864
+ redis_proc_start_port,
865
+ docker_air_gap,
866
+ "defaults.yml",
867
+ override_test_time,
868
+ default_metrics,
869
+ arch,
870
+ github_token,
871
+ priority_lower_limit,
872
+ priority_upper_limit,
873
+ default_baseline_branch,
874
+ default_metrics_str,
875
+ docker_keep_env,
876
+ restore_build_artifacts_default,
877
+ args,
878
+ )
879
+ num_process_streams = num_process_streams + 1
880
+ num_process_test_suites = num_process_test_suites + total_test_suite_runs
881
+
882
+ # Always acknowledge the message, even if it was filtered out
883
+ arch_specific_stream = get_arch_specific_stream_name(arch)
884
+ ack_reply = github_event_conn.xack(
885
+ arch_specific_stream,
886
+ get_runners_consumer_group_name(platform_name),
887
+ stream_id,
888
+ )
889
+ if type(ack_reply) == bytes:
890
+ ack_reply = ack_reply.decode()
891
+ if ack_reply == "1" or ack_reply == 1:
892
+ if overall_result is True:
893
+ logging.info(
894
+ "Successfully acknowledged BENCHMARK variation stream with id {} (processed).".format(
895
+ stream_id
896
+ )
897
+ )
898
+ else:
899
+ logging.info(
900
+ "Successfully acknowledged BENCHMARK variation stream with id {} (filtered/skipped).".format(
901
+ stream_id
902
+ )
903
+ )
904
+ else:
905
+ logging.error(
906
+ "Unable to acknowledge build variation stream with id {}. XACK reply {}".format(
907
+ stream_id, ack_reply
908
+ )
909
+ )
910
+ return overall_result, stream_id, num_process_streams, num_process_test_suites
911
+
912
+
913
+ #
914
+ # def prepare_memtier_benchmark_parameters(
915
+ # clientconfig,
916
+ # full_benchmark_path,
917
+ # port,
918
+ # server,
919
+ # local_benchmark_output_filename,
920
+ # oss_cluster_api_enabled,
921
+ # ):
922
+ # benchmark_command = [
923
+ # full_benchmark_path,
924
+ # "--port",
925
+ # "{}".format(port),
926
+ # "--server",
927
+ # "{}".format(server),
928
+ # "--json-out-file",
929
+ # local_benchmark_output_filename,
930
+ # ]
931
+ # if oss_cluster_api_enabled is True:
932
+ # benchmark_command.append("--cluster-mode")
933
+ # benchmark_command_str = " ".join(benchmark_command)
934
+ # if "arguments" in clientconfig:
935
+ # benchmark_command_str = benchmark_command_str + " " + clientconfig["arguments"]
936
+ #
937
+ # return None, benchmark_command_str
938
+
939
+
940
+ def process_self_contained_coordinator_stream(
941
+ github_event_conn,
942
+ datasink_push_results_redistimeseries,
943
+ docker_client,
944
+ home,
945
+ newTestInfo,
946
+ datasink_conn,
947
+ testsuite_spec_files,
948
+ topologies_map,
949
+ running_platform,
950
+ profilers_enabled=False,
951
+ profilers_list=[],
952
+ grafana_profile_dashboard="",
953
+ cpuset_start_pos=0,
954
+ redis_proc_start_port=6379,
955
+ default_docker_air_gap=False,
956
+ defaults_filename="defaults.yml",
957
+ override_test_time=0,
958
+ default_metrics=[],
959
+ arch="amd64",
960
+ github_token=None,
961
+ priority_lower_limit=0,
962
+ priority_upper_limit=10000,
963
+ default_baseline_branch="unstable",
964
+ default_metrics_str="ALL_STATS.Totals.Ops/sec",
965
+ docker_keep_env=False,
966
+ restore_build_artifacts_default=True,
967
+ args=None,
968
+ redis_password="redis_coordinator_password_2024",
969
+ ):
970
+ stream_id = "n/a"
971
+ overall_result = False
972
+ total_test_suite_runs = 0
973
+ # github updates
974
+ is_actionable_pr = False
975
+ contains_benchmark_run_comment = False
976
+ github_pr = None
977
+ old_benchmark_run_comment_body = ""
978
+ pr_link = ""
979
+ regression_comment = None
980
+ pull_request = None
981
+ auto_approve_github = True
982
+ # defaults
983
+ default_github_org = "redis"
984
+ default_github_repo = "redis"
985
+ restore_build_artifacts = restore_build_artifacts_default
986
+
987
+ try:
988
+ stream_id, testDetails = newTestInfo[0][1][0]
989
+ stream_id = stream_id.decode()
990
+ logging.info("Received work . Stream id {}.".format(stream_id))
991
+
992
+ if b"run_image" in testDetails:
993
+ (
994
+ build_variant_name,
995
+ metadata,
996
+ build_artifacts,
997
+ git_hash,
998
+ git_branch,
999
+ git_version,
1000
+ run_image,
1001
+ use_git_timestamp,
1002
+ git_timestamp_ms,
1003
+ run_arch,
1004
+ ) = extract_build_info_from_streamdata(testDetails)
1005
+
1006
+ # Check if this work should be ignored due to flush
1007
+ global _flush_timestamp
1008
+ if (
1009
+ _flush_timestamp is not None
1010
+ and use_git_timestamp
1011
+ and git_timestamp_ms is not None
1012
+ ):
1013
+ # Convert flush timestamp to milliseconds for comparison
1014
+ flush_timestamp_ms = int(_flush_timestamp.timestamp() * 1000)
1015
+ if git_timestamp_ms < flush_timestamp_ms:
1016
+ logging.info(
1017
+ f"Ignoring work with git_timestamp_ms {git_timestamp_ms} "
1018
+ f"(before flush timestamp {flush_timestamp_ms}). Stream id: {stream_id}"
1019
+ )
1020
+ return stream_id, False, 0
1021
+
1022
+ tf_github_org = default_github_org
1023
+ if b"github_org" in testDetails:
1024
+ tf_github_org = testDetails[b"github_org"].decode()
1025
+ logging.info(
1026
+ f"detected a github_org definition on the streamdata: {tf_github_org}. Overriding the default one: {default_github_org}"
1027
+ )
1028
+ tf_github_repo = default_github_repo
1029
+ if b"github_repo" in testDetails:
1030
+ tf_github_repo = testDetails[b"github_repo"].decode()
1031
+ logging.info(
1032
+ f"detected a github_org definition on the streamdata: {tf_github_repo}. Overriding the default one: {default_github_repo}"
1033
+ )
1034
+
1035
+ mnt_point = "/mnt/redis/"
1036
+ if b"mnt_point" in testDetails:
1037
+ mnt_point = testDetails[b"mnt_point"].decode()
1038
+ logging.info(
1039
+ f"detected a mnt_point definition on the streamdata: {mnt_point}."
1040
+ )
1041
+
1042
+ executable = f"{mnt_point}redis-server"
1043
+ if b"executable" in testDetails:
1044
+ executable = testDetails[b"executable"].decode()
1045
+ logging.info(
1046
+ f"detected a executable definition on the streamdata: {executable}."
1047
+ )
1048
+
1049
+ server_name = "redis"
1050
+ if b"server_name" in testDetails:
1051
+ server_name = testDetails[b"server_name"].decode()
1052
+ logging.info(
1053
+ f"detected a server_name definition on the streamdata: {server_name}."
1054
+ )
1055
+ new_executable = f"{mnt_point}{server_name}-server"
1056
+ logging.info(
1057
+ f"changing executable from {executable} to {new_executable}"
1058
+ )
1059
+ executable = new_executable
1060
+
1061
+ if b"restore_build_artifacts" in testDetails:
1062
+ restore_build_artifacts = bool(
1063
+ testDetails[b"restore_build_artifacts"].decode()
1064
+ )
1065
+ logging.info(
1066
+ f"detected a restore_build_artifacts config {restore_build_artifacts} overriding the default just for this test"
1067
+ )
1068
+
1069
+ test_docker_air_gap = default_docker_air_gap
1070
+ # check if we override the docker air gap on this test details
1071
+ if b"docker_air_gap" in testDetails:
1072
+ test_docker_air_gap = bool(testDetails[b"docker_air_gap"].decode())
1073
+ logging.info(
1074
+ f"detected a docker air gap config {test_docker_air_gap} overriding the default of {default_docker_air_gap} just for this test"
1075
+ )
1076
+
1077
+ if b"priority_upper_limit" in testDetails:
1078
+ stream_priority_upper_limit = int(
1079
+ testDetails[b"priority_upper_limit"].decode()
1080
+ )
1081
+ logging.info(
1082
+ f"detected a priority_upper_limit definition on the streamdata {stream_priority_upper_limit}. will replace the default upper limit of {priority_upper_limit}"
1083
+ )
1084
+ priority_upper_limit = stream_priority_upper_limit
1085
+
1086
+ if b"priority_lower_limit" in testDetails:
1087
+ stream_priority_lower_limit = int(
1088
+ testDetails[b"priority_lower_limit"].decode()
1089
+ )
1090
+ logging.info(
1091
+ f"detected a priority_lower_limit definition on the streamdata {stream_priority_lower_limit}. will replace the default lower limit of {priority_lower_limit}"
1092
+ )
1093
+ priority_lower_limit = stream_priority_lower_limit
1094
+
1095
+ if b"pull_request" in testDetails:
1096
+ pull_request = testDetails[b"pull_request"].decode()
1097
+ logging.info(
1098
+ f"detected a pull_request definition on the streamdata {pull_request}"
1099
+ )
1100
+ verbose = True
1101
+ fn = check_benchmark_running_comment
1102
+ (
1103
+ contains_benchmark_run_comment,
1104
+ github_pr,
1105
+ is_actionable_pr,
1106
+ old_benchmark_run_comment_body,
1107
+ pr_link,
1108
+ benchmark_run_comment,
1109
+ ) = check_github_available_and_actionable(
1110
+ fn, github_token, pull_request, "redis", "redis", verbose
1111
+ )
1112
+
1113
+ tests_regexp = ".*"
1114
+ if b"tests_regexp" in testDetails:
1115
+ tests_regexp = testDetails[b"tests_regexp"].decode()
1116
+ logging.info(
1117
+ f"detected a regexp definition on the streamdata {tests_regexp}"
1118
+ )
1119
+
1120
+ command_groups_regexp = None
1121
+ if b"tests_groups_regexp" in testDetails:
1122
+ command_groups_regexp = testDetails[b"tests_groups_regexp"].decode()
1123
+ logging.info(
1124
+ f"detected a command groups regexp definition on the streamdata {command_groups_regexp}"
1125
+ )
1126
+
1127
+ command_regexp = None
1128
+ if b"command_regexp" in testDetails:
1129
+ command_regexp = testDetails[b"command_regexp"].decode()
1130
+ logging.info(
1131
+ f"detected a command regexp definition on the streamdata {command_regexp}"
1132
+ )
1133
+
1134
+ skip_test = False
1135
+ if b"platform" in testDetails:
1136
+ platform = testDetails[b"platform"]
1137
+ # Decode bytes to string for proper comparison
1138
+ platform_str = (
1139
+ platform.decode() if isinstance(platform, bytes) else platform
1140
+ )
1141
+ if running_platform != platform_str:
1142
+ skip_test = True
1143
+ logging.info(
1144
+ "skipping stream_id {} given plaform {}!={}".format(
1145
+ stream_id, running_platform, platform_str
1146
+ )
1147
+ )
1148
+
1149
+ if run_arch != arch:
1150
+ skip_test = True
1151
+ logging.info(
1152
+ "skipping stream_id {} given arch {}!={}".format(
1153
+ stream_id, run_arch, arch
1154
+ )
1155
+ )
1156
+
1157
+ if skip_test is False:
1158
+ overall_result = True
1159
+ profiler_dashboard_links = []
1160
+ if test_docker_air_gap:
1161
+ airgap_key = "docker:air-gap:{}".format(run_image)
1162
+ logging.info(
1163
+ "Restoring docker image: {} from {}".format(
1164
+ run_image, airgap_key
1165
+ )
1166
+ )
1167
+ airgap_docker_image_bin = github_event_conn.get(airgap_key)
1168
+ images_loaded = docker_client.images.load(airgap_docker_image_bin)
1169
+ logging.info("Successfully loaded images {}".format(images_loaded))
1170
+
1171
+ stream_time_ms = stream_id.split("-")[0]
1172
+ zset_running_platform_benchmarks = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{running_platform}:zset"
1173
+ res = github_event_conn.zadd(
1174
+ zset_running_platform_benchmarks,
1175
+ {stream_id: stream_time_ms},
1176
+ )
1177
+ logging.info(
1178
+ f"Added stream with id {stream_id} to zset {zset_running_platform_benchmarks}. res={res}"
1179
+ )
1180
+
1181
+ stream_test_list_pending = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{stream_id}:{running_platform}:tests_pending"
1182
+ stream_test_list_running = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{stream_id}:{running_platform}:tests_running"
1183
+ stream_test_list_failed = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{stream_id}:{running_platform}:tests_failed"
1184
+ stream_test_list_completed = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{stream_id}:{running_platform}:tests_completed"
1185
+
1186
+ filtered_test_files = filter_test_files(
1187
+ defaults_filename,
1188
+ priority_lower_limit,
1189
+ priority_upper_limit,
1190
+ tests_regexp,
1191
+ testsuite_spec_files,
1192
+ command_groups_regexp,
1193
+ command_regexp,
1194
+ )
1195
+
1196
+ logging.info(
1197
+ f"Adding {len(filtered_test_files)} tests to pending test list"
1198
+ )
1199
+
1200
+ # Use pipeline for efficient bulk operations
1201
+ pipeline = github_event_conn.pipeline()
1202
+ test_names_added = []
1203
+
1204
+ for test_file in filtered_test_files:
1205
+ with open(test_file, "r") as stream:
1206
+ (
1207
+ _,
1208
+ benchmark_config,
1209
+ test_name,
1210
+ ) = get_final_benchmark_config(None, None, stream, "")
1211
+ pipeline.lpush(stream_test_list_pending, test_name)
1212
+ test_names_added.append(test_name)
1213
+ logging.debug(
1214
+ f"Queued test named {test_name} for addition to pending test list"
1215
+ )
1216
+
1217
+ # Set expiration and execute pipeline
1218
+ pipeline.expire(stream_test_list_pending, REDIS_BINS_EXPIRE_SECS)
1219
+ pipeline.execute()
1220
+
1221
+ logging.info(
1222
+ f"Successfully added {len(test_names_added)} tests to pending test list in key {stream_test_list_pending}"
1223
+ )
1224
+
1225
+ pending_tests = len(filtered_test_files)
1226
+ failed_tests = 0
1227
+ benchmark_suite_start_datetime = datetime.datetime.utcnow()
1228
+ # update on github if needed
1229
+ if is_actionable_pr:
1230
+ comment_body = generate_benchmark_started_pr_comment(
1231
+ stream_id,
1232
+ pending_tests,
1233
+ len(filtered_test_files),
1234
+ failed_tests,
1235
+ benchmark_suite_start_datetime,
1236
+ 0,
1237
+ )
1238
+ if contains_benchmark_run_comment:
1239
+ update_comment_if_needed(
1240
+ auto_approve_github,
1241
+ comment_body,
1242
+ old_benchmark_run_comment_body,
1243
+ benchmark_run_comment,
1244
+ verbose,
1245
+ )
1246
+ else:
1247
+ benchmark_run_comment = create_new_pr_comment(
1248
+ auto_approve_github, comment_body, github_pr, pr_link
1249
+ )
1250
+
1251
+ for test_file in filtered_test_files:
1252
+ # Check if queue reset was requested
1253
+ global _reset_queue_requested
1254
+ if _reset_queue_requested:
1255
+ logging.info(
1256
+ "Queue reset requested. Skipping remaining tests and clearing queues."
1257
+ )
1258
+ # Clear all pending tests from the queue
1259
+ github_event_conn.delete(stream_test_list_pending)
1260
+ github_event_conn.delete(stream_test_list_running)
1261
+ logging.info("Cleared pending and running test queues")
1262
+ _reset_queue_requested = False
1263
+ break
1264
+
1265
+ # Clean up system processes if in exclusive hardware mode
1266
+ cleanup_system_processes()
1267
+
1268
+ redis_containers = []
1269
+ client_containers = []
1270
+ with open(test_file, "r") as stream:
1271
+ (
1272
+ _,
1273
+ benchmark_config,
1274
+ test_name,
1275
+ ) = get_final_benchmark_config(None, None, stream, "")
1276
+ github_event_conn.lrem(stream_test_list_pending, 1, test_name)
1277
+ github_event_conn.lpush(stream_test_list_running, test_name)
1278
+ github_event_conn.expire(
1279
+ stream_test_list_running, REDIS_BINS_EXPIRE_SECS
1280
+ )
1281
+ logging.debug(
1282
+ f"Added test named {test_name} to the running test list in key {stream_test_list_running}"
1283
+ )
1284
+ (
1285
+ _,
1286
+ _,
1287
+ redis_configuration_parameters,
1288
+ _,
1289
+ _,
1290
+ ) = extract_redis_dbconfig_parameters(
1291
+ benchmark_config, "dbconfig"
1292
+ )
1293
+ build_variants = extract_build_variant_variations(
1294
+ benchmark_config
1295
+ )
1296
+ if build_variants is not None:
1297
+ logging.info("Detected build variant filter")
1298
+ if build_variant_name not in build_variants:
1299
+ logging.info(
1300
+ "Skipping {} given it's not part of build-variants for this test-suite {}".format(
1301
+ build_variant_name, build_variants
1302
+ )
1303
+ )
1304
+ continue
1305
+ else:
1306
+ logging.info(
1307
+ "Running build variant {} given it's present on the build-variants spec {}".format(
1308
+ build_variant_name, build_variants
1309
+ )
1310
+ )
1311
+ for topology_spec_name in benchmark_config["redis-topologies"]:
1312
+ setup_name = topology_spec_name
1313
+ setup_type = "oss-standalone"
1314
+
1315
+ # Filter by topology if specified
1316
+ if (
1317
+ args is not None
1318
+ and args.topology
1319
+ and topology_spec_name != args.topology
1320
+ ):
1321
+ logging.info(
1322
+ f"Skipping topology {topology_spec_name} as it doesn't match the requested topology {args.topology}"
1323
+ )
1324
+ continue
1325
+
1326
+ if topology_spec_name in topologies_map:
1327
+ topology_spec = topologies_map[topology_spec_name]
1328
+ setup_type = topology_spec["type"]
1329
+ logging.info(
1330
+ f"Running topology named {topology_spec_name} of type {setup_type}"
1331
+ )
1332
+ test_result = False
1333
+ redis_container = None
1334
+ try:
1335
+ current_cpu_pos = cpuset_start_pos
1336
+ ceil_db_cpu_limit = extract_db_cpu_limit(
1337
+ topologies_map, topology_spec_name
1338
+ )
1339
+ redis_arguments = (
1340
+ extract_redis_configuration_from_topology(
1341
+ topologies_map, topology_spec_name
1342
+ )
1343
+ )
1344
+ temporary_dir = tempfile.mkdtemp(dir=home)
1345
+ temporary_dir_client = tempfile.mkdtemp(dir=home)
1346
+ logging.info(
1347
+ "Using local temporary dir to persist redis build artifacts. Path: {}".format(
1348
+ temporary_dir
1349
+ )
1350
+ )
1351
+ logging.info(
1352
+ "Using local temporary dir to persist client output files. Path: {}".format(
1353
+ temporary_dir_client
1354
+ )
1355
+ )
1356
+
1357
+ tf_triggering_env = "ci"
1358
+ github_actor = "{}-{}".format(
1359
+ tf_triggering_env, running_platform
1360
+ )
1361
+ dso = server_name
1362
+ profilers_artifacts_matrix = []
1363
+
1364
+ collection_summary_str = ""
1365
+ if profilers_enabled:
1366
+ collection_summary_str = (
1367
+ local_profilers_platform_checks(
1368
+ dso,
1369
+ github_actor,
1370
+ git_branch,
1371
+ tf_github_repo,
1372
+ git_hash,
1373
+ )
1374
+ )
1375
+ logging.info(
1376
+ "Using the following collection summary string for profiler description: {}".format(
1377
+ collection_summary_str
1378
+ )
1379
+ )
1380
+ if restore_build_artifacts:
1381
+ restore_build_artifacts_from_test_details(
1382
+ build_artifacts,
1383
+ github_event_conn,
1384
+ temporary_dir,
1385
+ testDetails,
1386
+ )
1387
+
1388
+ command = generate_standalone_redis_server_args(
1389
+ executable,
1390
+ redis_proc_start_port,
1391
+ mnt_point,
1392
+ redis_configuration_parameters,
1393
+ redis_arguments,
1394
+ redis_password,
1395
+ )
1396
+ command_str = " ".join(command)
1397
+ db_cpuset_cpus, current_cpu_pos = generate_cpuset_cpus(
1398
+ ceil_db_cpu_limit, current_cpu_pos
1399
+ )
1400
+ redis_container = start_redis_container(
1401
+ command_str,
1402
+ db_cpuset_cpus,
1403
+ docker_client,
1404
+ mnt_point,
1405
+ redis_containers,
1406
+ run_image,
1407
+ temporary_dir,
1408
+ )
1409
+
1410
+ r = redis.StrictRedis(
1411
+ port=redis_proc_start_port, password=redis_password
1412
+ )
1413
+ r.ping()
1414
+ redis_conns = [r]
1415
+ reset_commandstats(redis_conns)
1416
+ redis_pids = []
1417
+ redis_info = r.info()
1418
+ first_redis_pid = redis_info.get("process_id")
1419
+ if first_redis_pid is None:
1420
+ logging.warning(
1421
+ "Redis process_id not found in INFO command"
1422
+ )
1423
+ first_redis_pid = "unknown"
1424
+ if git_hash is None and "redis_git_sha1" in redis_info:
1425
+ git_hash = redis_info["redis_git_sha1"]
1426
+ if (
1427
+ git_hash == "" or git_hash == 0
1428
+ ) and "redis_build_id" in redis_info:
1429
+ git_hash = redis_info["redis_build_id"]
1430
+ logging.info(
1431
+ f"Given git_hash was None, we've collected that info from the server reply. git_hash={git_hash}"
1432
+ )
1433
+
1434
+ server_version_keyname = f"{server_name}_version"
1435
+ if (
1436
+ git_version is None
1437
+ and server_version_keyname in redis_info
1438
+ ):
1439
+ git_version = redis_info[server_version_keyname]
1440
+ logging.info(
1441
+ f"Given git_version was None, we've collected that info from the server reply key named {server_version_keyname}. git_version={git_version}"
1442
+ )
1443
+ redis_pids.append(first_redis_pid)
1444
+ ceil_client_cpu_limit = extract_client_cpu_limit(
1445
+ benchmark_config
1446
+ )
1447
+ (
1448
+ client_cpuset_cpus,
1449
+ current_cpu_pos,
1450
+ ) = generate_cpuset_cpus(
1451
+ ceil_client_cpu_limit, current_cpu_pos
1452
+ )
1453
+ client_mnt_point = "/mnt/client/"
1454
+ benchmark_tool_workdir = client_mnt_point
1455
+
1456
+ if "preload_tool" in benchmark_config["dbconfig"]:
1457
+ data_prepopulation_step(
1458
+ benchmark_config,
1459
+ benchmark_tool_workdir,
1460
+ client_cpuset_cpus,
1461
+ docker_client,
1462
+ git_hash,
1463
+ redis_proc_start_port,
1464
+ temporary_dir,
1465
+ test_name,
1466
+ redis_password,
1467
+ )
1468
+
1469
+ execute_init_commands(
1470
+ benchmark_config, r, dbconfig_keyname="dbconfig"
1471
+ )
1472
+
1473
+ benchmark_tool = extract_client_tool(benchmark_config)
1474
+ # backwards compatible
1475
+ if benchmark_tool is None:
1476
+ benchmark_tool = "redis-benchmark"
1477
+ if benchmark_tool == "vector_db_benchmark":
1478
+ full_benchmark_path = "python /code/run.py"
1479
+ else:
1480
+ full_benchmark_path = "/usr/local/bin/{}".format(
1481
+ benchmark_tool
1482
+ )
1483
+
1484
+ # setup the benchmark
1485
+ (
1486
+ start_time,
1487
+ start_time_ms,
1488
+ start_time_str,
1489
+ ) = get_start_time_vars()
1490
+ local_benchmark_output_filename = (
1491
+ get_local_run_full_filename(
1492
+ start_time_str,
1493
+ git_hash,
1494
+ test_name,
1495
+ setup_name,
1496
+ )
1497
+ )
1498
+ logging.info(
1499
+ "Will store benchmark json output to local file {}".format(
1500
+ local_benchmark_output_filename
1501
+ )
1502
+ )
1503
+ if "memtier_benchmark" in benchmark_tool:
1504
+ # prepare the benchmark command
1505
+ (
1506
+ _,
1507
+ benchmark_command_str,
1508
+ arbitrary_command,
1509
+ ) = prepare_memtier_benchmark_parameters(
1510
+ benchmark_config["clientconfig"],
1511
+ full_benchmark_path,
1512
+ redis_proc_start_port,
1513
+ "localhost",
1514
+ redis_password,
1515
+ local_benchmark_output_filename,
1516
+ False,
1517
+ False,
1518
+ False,
1519
+ None,
1520
+ None,
1521
+ None,
1522
+ None,
1523
+ override_test_time,
1524
+ )
1525
+ elif "vector_db_benchmark" in benchmark_tool:
1526
+ (
1527
+ _,
1528
+ benchmark_command_str,
1529
+ ) = prepare_vector_db_benchmark_parameters(
1530
+ benchmark_config["clientconfig"],
1531
+ full_benchmark_path,
1532
+ redis_proc_start_port,
1533
+ "localhost",
1534
+ None,
1535
+ client_mnt_point,
1536
+ )
1537
+ else:
1538
+ (
1539
+ benchmark_command,
1540
+ benchmark_command_str,
1541
+ ) = prepare_benchmark_parameters(
1542
+ benchmark_config,
1543
+ full_benchmark_path,
1544
+ redis_proc_start_port,
1545
+ "localhost",
1546
+ local_benchmark_output_filename,
1547
+ False,
1548
+ benchmark_tool_workdir,
1549
+ False,
1550
+ )
1551
+
1552
+ client_container_image = extract_client_container_image(
1553
+ benchmark_config
1554
+ )
1555
+ profiler_call_graph_mode = "dwarf"
1556
+ profiler_frequency = 99
1557
+ # start the profile
1558
+ (
1559
+ profiler_name,
1560
+ profilers_map,
1561
+ ) = profilers_start_if_required(
1562
+ profilers_enabled,
1563
+ profilers_list,
1564
+ redis_pids,
1565
+ setup_name,
1566
+ start_time_str,
1567
+ test_name,
1568
+ profiler_frequency,
1569
+ profiler_call_graph_mode,
1570
+ )
1571
+
1572
+ logging.info(
1573
+ "Using docker image {} as benchmark client image (cpuset={}) with the following args: {}".format(
1574
+ client_container_image,
1575
+ client_cpuset_cpus,
1576
+ benchmark_command_str,
1577
+ )
1578
+ )
1579
+ # run the benchmark
1580
+ benchmark_start_time = datetime.datetime.now()
1581
+
1582
+ # Calculate container timeout
1583
+ container_timeout = 300 # 5 minutes default
1584
+ buffer_timeout = 60 # Default buffer
1585
+
1586
+ # Try to extract test time from command and add buffer
1587
+ import re
1588
+
1589
+ test_time_match = re.search(
1590
+ r"--?test-time[=\s]+(\d+)", benchmark_command_str
1591
+ )
1592
+ if test_time_match:
1593
+ test_time = int(test_time_match.group(1))
1594
+ container_timeout = test_time + buffer_timeout
1595
+ logging.info(
1596
+ f"Set container timeout to {container_timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer)"
1597
+ )
1598
+ else:
1599
+ logging.info(
1600
+ f"Using default container timeout: {container_timeout}s"
1601
+ )
1602
+
1603
+ try:
1604
+ # Start container with detach=True to enable timeout handling
1605
+ container = docker_client.containers.run(
1606
+ image=client_container_image,
1607
+ volumes={
1608
+ temporary_dir_client: {
1609
+ "bind": client_mnt_point,
1610
+ "mode": "rw",
1611
+ },
1612
+ },
1613
+ auto_remove=False, # Don't auto-remove so we can get logs if timeout
1614
+ privileged=True,
1615
+ working_dir=benchmark_tool_workdir,
1616
+ command=benchmark_command_str,
1617
+ network_mode="host",
1618
+ detach=True, # Detach to enable timeout
1619
+ cpuset_cpus=client_cpuset_cpus,
1620
+ )
1621
+
1622
+ logging.info(
1623
+ f"Started container {container.name} ({container.id[:12]}) with {container_timeout}s timeout"
1624
+ )
1625
+
1626
+ # Wait for container with timeout
1627
+ try:
1628
+ result = container.wait(
1629
+ timeout=container_timeout
1630
+ )
1631
+ client_container_stdout = container.logs(
1632
+ stdout=True, stderr=False
1633
+ ).decode("utf-8")
1634
+ container_stderr = container.logs(
1635
+ stdout=False, stderr=True
1636
+ ).decode("utf-8")
1637
+
1638
+ # Check exit code
1639
+ if result["StatusCode"] != 0:
1640
+ logging.error(
1641
+ f"Container exited with code {result['StatusCode']}"
1642
+ )
1643
+ logging.error(
1644
+ f"Container stderr: {container_stderr}"
1645
+ )
1646
+ raise docker.errors.ContainerError(
1647
+ container,
1648
+ result["StatusCode"],
1649
+ benchmark_command_str,
1650
+ client_container_stdout,
1651
+ container_stderr,
1652
+ )
1653
+
1654
+ logging.info(
1655
+ f"Container {container.name} completed successfully"
1656
+ )
1657
+
1658
+ except Exception as timeout_error:
1659
+ if "timeout" in str(timeout_error).lower():
1660
+ logging.error(
1661
+ f"Container {container.name} timed out after {container_timeout}s"
1662
+ )
1663
+ # Get logs before killing
1664
+ try:
1665
+ timeout_logs = container.logs(
1666
+ stdout=True, stderr=True
1667
+ ).decode("utf-8")
1668
+ logging.error(
1669
+ f"Container logs before timeout: {timeout_logs}"
1670
+ )
1671
+ except:
1672
+ pass
1673
+ # Kill the container
1674
+ container.kill()
1675
+ raise Exception(
1676
+ f"Container timed out after {container_timeout} seconds"
1677
+ )
1678
+ else:
1679
+ raise timeout_error
1680
+ finally:
1681
+ # Clean up container
1682
+ try:
1683
+ container.remove(force=True)
1684
+ except:
1685
+ pass
1686
+ except docker.errors.ContainerError as e:
1687
+ logging.info(
1688
+ "stdout: {}".format(
1689
+ e.container.logs(stdout=True)
1690
+ )
1691
+ )
1692
+ logging.info(
1693
+ "stderr: {}".format(
1694
+ e.container.logs(stderr=True)
1695
+ )
1696
+ )
1697
+ raise e
1698
+
1699
+ benchmark_end_time = datetime.datetime.now()
1700
+ benchmark_duration_seconds = (
1701
+ calculate_client_tool_duration_and_check(
1702
+ benchmark_end_time, benchmark_start_time
1703
+ )
1704
+ )
1705
+ logging.info(
1706
+ "output {}".format(client_container_stdout)
1707
+ )
1708
+
1709
+ (
1710
+ _,
1711
+ overall_tabular_data_map,
1712
+ ) = profilers_stop_if_required(
1713
+ datasink_push_results_redistimeseries,
1714
+ benchmark_duration_seconds,
1715
+ collection_summary_str,
1716
+ dso,
1717
+ tf_github_org,
1718
+ tf_github_repo,
1719
+ profiler_name,
1720
+ profilers_artifacts_matrix,
1721
+ profilers_enabled,
1722
+ profilers_map,
1723
+ redis_pids,
1724
+ S3_BUCKET_NAME,
1725
+ test_name,
1726
+ )
1727
+ if (
1728
+ profilers_enabled
1729
+ and datasink_push_results_redistimeseries
1730
+ ):
1731
+ datasink_profile_tabular_data(
1732
+ git_branch,
1733
+ tf_github_org,
1734
+ tf_github_repo,
1735
+ git_hash,
1736
+ overall_tabular_data_map,
1737
+ github_event_conn,
1738
+ setup_name,
1739
+ start_time_ms,
1740
+ start_time_str,
1741
+ test_name,
1742
+ tf_triggering_env,
1743
+ )
1744
+ if len(profilers_artifacts_matrix) == 0:
1745
+ logging.error(
1746
+ "No profiler artifact was retrieved"
1747
+ )
1748
+ else:
1749
+ profilers_artifacts = []
1750
+ for line in profilers_artifacts_matrix:
1751
+ artifact_name = line[2]
1752
+ s3_link = line[4]
1753
+ profilers_artifacts.append(
1754
+ {
1755
+ "artifact_name": artifact_name,
1756
+ "s3_link": s3_link,
1757
+ }
1758
+ )
1759
+ https_link = (
1760
+ generate_artifacts_table_grafana_redis(
1761
+ datasink_push_results_redistimeseries,
1762
+ grafana_profile_dashboard,
1763
+ profilers_artifacts,
1764
+ datasink_conn,
1765
+ setup_name,
1766
+ start_time_ms,
1767
+ start_time_str,
1768
+ test_name,
1769
+ tf_github_org,
1770
+ tf_github_repo,
1771
+ git_hash,
1772
+ git_branch,
1773
+ )
1774
+ )
1775
+ profiler_dashboard_links.append(
1776
+ [
1777
+ setup_name,
1778
+ test_name,
1779
+ " {} ".format(https_link),
1780
+ ]
1781
+ )
1782
+ logging.info(
1783
+ "Published new profile info for this testcase. Access it via: {}".format(
1784
+ https_link
1785
+ )
1786
+ )
1787
+
1788
+ # Delete all the perf artifacts, now that they are uploaded to S3.
1789
+ # The .script and .script.mainthread files are not part of the artifacts_matrix and thus have to be deleted separately
1790
+ line = profilers_artifacts_matrix[0]
1791
+ logging.info(
1792
+ "Deleting perf file {}".format(
1793
+ line[3].split(".")[0]
1794
+ + ".out.script.mainthread"
1795
+ )
1796
+ )
1797
+ os.remove(
1798
+ line[3].split(".")[0]
1799
+ + ".out.script.mainthread"
1800
+ )
1801
+ logging.info(
1802
+ "Deleteing perf file {}".format(
1803
+ line[3].split(".")[0] + ".out.script"
1804
+ )
1805
+ )
1806
+ os.remove(line[3].split(".")[0] + ".out.script")
1807
+ for line in profilers_artifacts_matrix:
1808
+ logging.info(
1809
+ "Deleting perf file {}".format(line[3])
1810
+ )
1811
+ os.remove(line[3])
1812
+
1813
+ datapoint_time_ms = start_time_ms
1814
+ if (
1815
+ use_git_timestamp is True
1816
+ and git_timestamp_ms is not None
1817
+ ):
1818
+ datapoint_time_ms = git_timestamp_ms
1819
+ if "vector_db_benchmark" in benchmark_tool:
1820
+ results_dict = post_process_vector_db(
1821
+ temporary_dir_client
1822
+ )
1823
+
1824
+ # Validate benchmark metrics for vector-db-benchmark
1825
+ is_valid, validation_error = (
1826
+ validate_benchmark_metrics(
1827
+ results_dict,
1828
+ test_name,
1829
+ benchmark_config,
1830
+ default_metrics,
1831
+ )
1832
+ )
1833
+ if not is_valid:
1834
+ logging.error(
1835
+ f"Test {test_name} failed metric validation: {validation_error}"
1836
+ )
1837
+ test_result = False
1838
+ failed_tests += 1
1839
+ continue
1840
+ else:
1841
+ post_process_benchmark_results(
1842
+ benchmark_tool,
1843
+ local_benchmark_output_filename,
1844
+ datapoint_time_ms,
1845
+ start_time_str,
1846
+ client_container_stdout,
1847
+ None,
1848
+ )
1849
+ full_result_path = local_benchmark_output_filename
1850
+ if "memtier_benchmark" in benchmark_tool:
1851
+ full_result_path = "{}/{}".format(
1852
+ temporary_dir_client,
1853
+ local_benchmark_output_filename,
1854
+ )
1855
+ logging.info(
1856
+ "Reading results json from {}".format(
1857
+ full_result_path
1858
+ )
1859
+ )
1860
+
1861
+ with open(
1862
+ full_result_path,
1863
+ "r",
1864
+ ) as json_file:
1865
+ results_dict = json.load(json_file)
1866
+
1867
+ # Validate benchmark metrics
1868
+ is_valid, validation_error = (
1869
+ validate_benchmark_metrics(
1870
+ results_dict,
1871
+ test_name,
1872
+ benchmark_config,
1873
+ default_metrics,
1874
+ )
1875
+ )
1876
+ if not is_valid:
1877
+ logging.error(
1878
+ f"Test {test_name} failed metric validation: {validation_error}"
1879
+ )
1880
+ test_result = False
1881
+ failed_tests += 1
1882
+ continue
1883
+
1884
+ print_results_table_stdout(
1885
+ benchmark_config,
1886
+ default_metrics,
1887
+ results_dict,
1888
+ setup_type,
1889
+ test_name,
1890
+ None,
1891
+ )
1892
+
1893
+ dataset_load_duration_seconds = 0
1894
+ try:
1895
+ exporter_datasink_common(
1896
+ benchmark_config,
1897
+ benchmark_duration_seconds,
1898
+ build_variant_name,
1899
+ datapoint_time_ms,
1900
+ dataset_load_duration_seconds,
1901
+ datasink_conn,
1902
+ datasink_push_results_redistimeseries,
1903
+ git_branch,
1904
+ git_version,
1905
+ metadata,
1906
+ redis_conns,
1907
+ results_dict,
1908
+ running_platform,
1909
+ setup_name,
1910
+ setup_type,
1911
+ test_name,
1912
+ tf_github_org,
1913
+ tf_github_repo,
1914
+ tf_triggering_env,
1915
+ topology_spec_name,
1916
+ default_metrics,
1917
+ git_hash,
1918
+ )
1919
+ r.shutdown(save=False)
1920
+
1921
+ except redis.exceptions.ConnectionError as e:
1922
+ logging.critical(
1923
+ "Some unexpected exception was caught during metric fetching. Skipping it..."
1924
+ )
1925
+ logging.critical(
1926
+ f"Exception type: {type(e).__name__}"
1927
+ )
1928
+ logging.critical(f"Exception message: {str(e)}")
1929
+ logging.critical("Traceback details:")
1930
+ logging.critical(traceback.format_exc())
1931
+ print("-" * 60)
1932
+ traceback.print_exc(file=sys.stdout)
1933
+ print("-" * 60)
1934
+
1935
+ test_result = True
1936
+ total_test_suite_runs = total_test_suite_runs + 1
1937
+
1938
+ except Exception as e:
1939
+ logging.critical(
1940
+ "Some unexpected exception was caught during local work. Failing test...."
1941
+ )
1942
+ logging.critical(f"Exception type: {type(e).__name__}")
1943
+ logging.critical(f"Exception message: {str(e)}")
1944
+ logging.critical("Traceback details:")
1945
+ logging.critical(traceback.format_exc())
1946
+ print("-" * 60)
1947
+ traceback.print_exc(file=sys.stdout)
1948
+ print("-" * 60)
1949
+ if redis_container is not None:
1950
+ logging.critical("Printing redis container log....")
1951
+
1952
+ print("-" * 60)
1953
+ try:
1954
+ print(
1955
+ redis_container.logs(
1956
+ stdout=True, stderr=True
1957
+ )
1958
+ )
1959
+ redis_container.stop()
1960
+ redis_container.remove()
1961
+ except docker.errors.NotFound:
1962
+ logging.info(
1963
+ "When trying to fetch logs from DB container with id {} and image {} it was already stopped".format(
1964
+ redis_container.id,
1965
+ redis_container.image,
1966
+ )
1967
+ )
1968
+ pass
1969
+
1970
+ print("-" * 60)
1971
+
1972
+ # Print all log files in the temporary directories for debugging
1973
+ logging.critical(
1974
+ "Printing all files in temporary directories for debugging..."
1975
+ )
1976
+ try:
1977
+ print_directory_logs(temporary_dir, "Redis server")
1978
+ print_directory_logs(temporary_dir_client, "Client")
1979
+ except Exception as log_error:
1980
+ logging.error(
1981
+ f"Failed to print directory logs: {log_error}"
1982
+ )
1983
+
1984
+ test_result = False
1985
+ # tear-down
1986
+ logging.info("Tearing down setup")
1987
+ if docker_keep_env is False:
1988
+ for redis_container in redis_containers:
1989
+ try:
1990
+ redis_container.stop()
1991
+ redis_container.remove()
1992
+ except docker.errors.NotFound:
1993
+ logging.info(
1994
+ "When trying to stop DB container with id {} and image {} it was already stopped".format(
1995
+ redis_container.id,
1996
+ redis_container.image,
1997
+ )
1998
+ )
1999
+ pass
2000
+
2001
+ for redis_container in client_containers:
2002
+ if type(redis_container) == Container:
2003
+ try:
2004
+ redis_container.stop()
2005
+ redis_container.remove()
2006
+ except docker.errors.NotFound:
2007
+ logging.info(
2008
+ "When trying to stop Client container with id {} and image {} it was already stopped".format(
2009
+ redis_container.id,
2010
+ redis_container.image,
2011
+ )
2012
+ )
2013
+ pass
2014
+
2015
+ # Only remove temporary directories if test passed
2016
+ if test_result:
2017
+ logging.info(
2018
+ "Test passed. Removing temporary dirs {} and {}".format(
2019
+ temporary_dir, temporary_dir_client
2020
+ )
2021
+ )
2022
+ shutil.rmtree(temporary_dir, ignore_errors=True)
2023
+ shutil.rmtree(
2024
+ temporary_dir_client, ignore_errors=True
2025
+ )
2026
+ else:
2027
+ logging.warning(
2028
+ "Test failed. Preserving temporary dirs for debugging: {} and {}".format(
2029
+ temporary_dir, temporary_dir_client
2030
+ )
2031
+ )
2032
+ # Print all log files in the temporary directories for debugging
2033
+ print_directory_logs(temporary_dir, "Redis server")
2034
+ print_directory_logs(temporary_dir_client, "Client")
2035
+
2036
+ overall_result &= test_result
2037
+
2038
+ # Clean up system processes after test completion if in exclusive hardware mode
2039
+ cleanup_system_processes()
2040
+
2041
+ github_event_conn.lrem(stream_test_list_running, 1, test_name)
2042
+ github_event_conn.lpush(stream_test_list_completed, test_name)
2043
+ github_event_conn.expire(
2044
+ stream_test_list_completed, REDIS_BINS_EXPIRE_SECS
2045
+ )
2046
+ if test_result is False:
2047
+ github_event_conn.lpush(stream_test_list_failed, test_name)
2048
+ failed_tests = failed_tests + 1
2049
+ logging.warning(
2050
+ f"updating key {stream_test_list_failed} with the failed test: {test_name}. Total failed tests {failed_tests}."
2051
+ )
2052
+ pending_tests = pending_tests - 1
2053
+
2054
+ benchmark_suite_end_datetime = datetime.datetime.utcnow()
2055
+ benchmark_suite_duration = (
2056
+ benchmark_suite_end_datetime - benchmark_suite_start_datetime
2057
+ )
2058
+ benchmark_suite_duration_secs = (
2059
+ benchmark_suite_duration.total_seconds()
2060
+ )
2061
+
2062
+ # update on github if needed
2063
+ if is_actionable_pr:
2064
+ comment_body = generate_benchmark_started_pr_comment(
2065
+ stream_id,
2066
+ pending_tests,
2067
+ len(filtered_test_files),
2068
+ failed_tests,
2069
+ benchmark_suite_start_datetime,
2070
+ benchmark_suite_duration_secs,
2071
+ )
2072
+ update_comment_if_needed(
2073
+ auto_approve_github,
2074
+ comment_body,
2075
+ old_benchmark_run_comment_body,
2076
+ benchmark_run_comment,
2077
+ verbose,
2078
+ )
2079
+ logging.info(
2080
+ f"Updated github comment with latest test info {benchmark_run_comment.html_url}"
2081
+ )
2082
+
2083
+ ###########################
2084
+ # regression part
2085
+ ###########################
2086
+ fn = check_regression_comment
2087
+ (
2088
+ contains_regression_comment,
2089
+ github_pr,
2090
+ is_actionable_pr,
2091
+ old_regression_comment_body,
2092
+ pr_link,
2093
+ regression_comment,
2094
+ ) = check_github_available_and_actionable(
2095
+ fn,
2096
+ github_token,
2097
+ pull_request,
2098
+ tf_github_org,
2099
+ tf_github_repo,
2100
+ verbose,
2101
+ )
2102
+ logging.info("Preparing regression info for the data available")
2103
+ print_improvements_only = False
2104
+ print_regressions_only = False
2105
+ skip_unstable = False
2106
+ regressions_percent_lower_limit = 10.0
2107
+ simplify_table = False
2108
+ testname_regex = ""
2109
+ test = ""
2110
+ last_n_baseline = 1
2111
+ last_n_comparison = 31
2112
+ use_metric_context_path = False
2113
+ baseline_tag = None
2114
+ baseline_deployment_name = "oss-standalone"
2115
+ comparison_deployment_name = "oss-standalone"
2116
+ metric_name = "ALL_STATS.Totals.Ops/sec"
2117
+ metric_mode = "higher-better"
2118
+ to_date = datetime.datetime.utcnow()
2119
+ from_date = to_date - datetime.timedelta(days=180)
2120
+ baseline_branch = default_baseline_branch
2121
+ comparison_tag = git_version
2122
+ comparison_branch = git_branch
2123
+ to_ts_ms = None
2124
+ from_ts_ms = None
2125
+
2126
+ (
2127
+ detected_regressions,
2128
+ table_output,
2129
+ improvement_list,
2130
+ regressions_list,
2131
+ total_stable,
2132
+ total_unstable,
2133
+ total_comparison_points,
2134
+ ) = compute_regression_table(
2135
+ datasink_conn,
2136
+ tf_github_org,
2137
+ tf_github_repo,
2138
+ tf_triggering_env,
2139
+ metric_name,
2140
+ comparison_branch,
2141
+ baseline_branch,
2142
+ None, # we only compare by branch on CI automation
2143
+ None, # we only compare by branch on CI automation
2144
+ baseline_deployment_name,
2145
+ comparison_deployment_name,
2146
+ print_improvements_only,
2147
+ print_regressions_only,
2148
+ skip_unstable,
2149
+ regressions_percent_lower_limit,
2150
+ simplify_table,
2151
+ test,
2152
+ testname_regex,
2153
+ verbose,
2154
+ last_n_baseline,
2155
+ last_n_comparison,
2156
+ metric_mode,
2157
+ from_date,
2158
+ from_ts_ms,
2159
+ to_date,
2160
+ to_ts_ms,
2161
+ use_metric_context_path,
2162
+ running_platform,
2163
+ )
2164
+ total_regressions = len(regressions_list)
2165
+ total_improvements = len(improvement_list)
2166
+ auto_approve = True
2167
+ grafana_link_base = "https://benchmarksredisio.grafana.net/d/1fWbtb7nz/experimental-oss-spec-benchmarks"
2168
+ try:
2169
+ prepare_regression_comment(
2170
+ auto_approve,
2171
+ baseline_branch,
2172
+ baseline_tag,
2173
+ comparison_branch,
2174
+ comparison_tag,
2175
+ contains_regression_comment,
2176
+ github_pr,
2177
+ grafana_link_base,
2178
+ is_actionable_pr,
2179
+ old_regression_comment_body,
2180
+ pr_link,
2181
+ regression_comment,
2182
+ datasink_conn,
2183
+ running_platform,
2184
+ table_output,
2185
+ tf_github_org,
2186
+ tf_github_repo,
2187
+ tf_triggering_env,
2188
+ total_comparison_points,
2189
+ total_improvements,
2190
+ total_regressions,
2191
+ total_stable,
2192
+ total_unstable,
2193
+ verbose,
2194
+ regressions_percent_lower_limit,
2195
+ regressions_list,
2196
+ )
2197
+ except Exception as e:
2198
+ logging.error(
2199
+ "Failed to produce regression comment but continuing... Error: {}".format(
2200
+ e.__str__()
2201
+ )
2202
+ )
2203
+ logging.debug(
2204
+ f"Added test named {test_name} to the completed test list in key {stream_test_list_completed}"
2205
+ )
2206
+ else:
2207
+ logging.error("Missing run image information within received message.")
2208
+
2209
+ except Exception as e:
2210
+ logging.critical(
2211
+ "Some unexpected exception was caught "
2212
+ "during local work on stream {}. Failing test....".format(stream_id)
2213
+ )
2214
+ logging.critical(f"Exception type: {type(e).__name__}")
2215
+ logging.critical(f"Exception message: {str(e)}")
2216
+ logging.critical("Traceback details:")
2217
+ logging.critical(traceback.format_exc())
2218
+ print("-" * 60)
2219
+ traceback.print_exc(file=sys.stdout)
2220
+ print("-" * 60)
2221
+ overall_result = False
2222
+ return stream_id, overall_result, total_test_suite_runs
2223
+
2224
+
2225
+ def start_redis_container(
2226
+ command_str,
2227
+ db_cpuset_cpus,
2228
+ docker_client,
2229
+ mnt_point,
2230
+ redis_containers,
2231
+ run_image,
2232
+ temporary_dir,
2233
+ auto_remove=False,
2234
+ ):
2235
+ logging.info(
2236
+ "Running redis-server on docker image {} (cpuset={}) with the following args: {}".format(
2237
+ run_image, db_cpuset_cpus, command_str
2238
+ )
2239
+ )
2240
+ volumes = {}
2241
+ working_dir = "/"
2242
+ if mnt_point != "":
2243
+ volumes = {
2244
+ temporary_dir: {
2245
+ "bind": mnt_point,
2246
+ "mode": "rw",
2247
+ },
2248
+ }
2249
+ logging.info(f"setting volume as follow: {volumes}. working_dir={mnt_point}")
2250
+ working_dir = mnt_point
2251
+ redis_container = docker_client.containers.run(
2252
+ image=run_image,
2253
+ volumes=volumes,
2254
+ auto_remove=auto_remove,
2255
+ privileged=True,
2256
+ working_dir=mnt_point,
2257
+ command=command_str,
2258
+ network_mode="host",
2259
+ detach=True,
2260
+ cpuset_cpus=db_cpuset_cpus,
2261
+ pid_mode="host",
2262
+ publish_all_ports=True,
2263
+ )
2264
+ time.sleep(5)
2265
+ redis_containers.append(redis_container)
2266
+ return redis_container
2267
+
2268
+
2269
+ def filter_test_files(
2270
+ defaults_filename,
2271
+ priority_lower_limit,
2272
+ priority_upper_limit,
2273
+ tests_regexp,
2274
+ testsuite_spec_files,
2275
+ command_groups_regexp=None,
2276
+ command_regexp=None,
2277
+ ):
2278
+ filtered_test_files = []
2279
+ for test_file in testsuite_spec_files:
2280
+ if defaults_filename in test_file:
2281
+ continue
2282
+
2283
+ if tests_regexp != ".*":
2284
+ logging.debug(
2285
+ "Filtering all tests via a regular expression: {}".format(tests_regexp)
2286
+ )
2287
+ tags_regex_string = re.compile(tests_regexp)
2288
+
2289
+ match_obj = re.search(tags_regex_string, test_file)
2290
+ if match_obj is None:
2291
+ logging.debug(
2292
+ "Skipping {} given it does not match regex {}".format(
2293
+ test_file, tests_regexp
2294
+ )
2295
+ )
2296
+ continue
2297
+
2298
+ with open(test_file, "r") as stream:
2299
+ (
2300
+ result,
2301
+ benchmark_config,
2302
+ test_name,
2303
+ ) = get_final_benchmark_config(None, None, stream, "")
2304
+ if result is False:
2305
+ logging.error(
2306
+ "Skipping {} given there were errors while calling get_final_benchmark_config()".format(
2307
+ test_file
2308
+ )
2309
+ )
2310
+ continue
2311
+
2312
+ if command_groups_regexp is not None:
2313
+ logging.debug(
2314
+ "Filtering all test command groups via a regular expression: {}".format(
2315
+ command_groups_regexp
2316
+ )
2317
+ )
2318
+ if "tested-groups" in benchmark_config:
2319
+ command_groups = benchmark_config["tested-groups"]
2320
+ logging.debug(
2321
+ f"The file {test_file} (test name = {test_name}) contains the following groups: {command_groups}"
2322
+ )
2323
+ groups_regex_string = re.compile(command_groups_regexp)
2324
+ found = False
2325
+ for command_group in command_groups:
2326
+ match_obj = re.search(groups_regex_string, command_group)
2327
+ if match_obj is not None:
2328
+ found = True
2329
+ logging.debug(f"found the command group {command_group}")
2330
+ if found is False:
2331
+ logging.info(
2332
+ f"Skipping {test_file} given the following groups: {command_groups} does not match command group regex {command_groups_regexp}"
2333
+ )
2334
+ continue
2335
+ else:
2336
+ logging.debug(
2337
+ f"The file {test_file} (test name = {test_name}) does not contain the property 'tested-groups'. Cannot filter based uppon groups..."
2338
+ )
2339
+
2340
+ # Filter by command regex if specified
2341
+ if command_regexp is not None and command_regexp != ".*":
2342
+ if "tested-commands" in benchmark_config:
2343
+ tested_commands = benchmark_config["tested-commands"]
2344
+ command_regex_compiled = re.compile(command_regexp, re.IGNORECASE)
2345
+ found = False
2346
+ for command in tested_commands:
2347
+ if re.search(command_regex_compiled, command):
2348
+ found = True
2349
+ logging.info(
2350
+ f"found the command {command} matching regex {command_regexp}"
2351
+ )
2352
+ break
2353
+ if found is False:
2354
+ logging.info(
2355
+ f"Skipping {test_file} given the following commands: {tested_commands} does not match command regex {command_regexp}"
2356
+ )
2357
+ continue
2358
+ else:
2359
+ logging.warning(
2360
+ f"The file {test_file} (test name = {test_name}) does not contain the property 'tested-commands'. Cannot filter based upon commands..."
2361
+ )
2362
+
2363
+ if "priority" in benchmark_config:
2364
+ priority = benchmark_config["priority"]
2365
+
2366
+ if priority is not None:
2367
+ if priority > priority_upper_limit:
2368
+ logging.warning(
2369
+ "Skipping test {} giving the priority limit ({}) is above the priority value ({})".format(
2370
+ test_name, priority_upper_limit, priority
2371
+ )
2372
+ )
2373
+
2374
+ continue
2375
+ if priority < priority_lower_limit:
2376
+ logging.warning(
2377
+ "Skipping test {} giving the priority limit ({}) is bellow the priority value ({})".format(
2378
+ test_name, priority_lower_limit, priority
2379
+ )
2380
+ )
2381
+
2382
+ continue
2383
+ logging.info(
2384
+ "Test {} priority ({}) is within the priority limit [{},{}]".format(
2385
+ test_name,
2386
+ priority,
2387
+ priority_lower_limit,
2388
+ priority_upper_limit,
2389
+ )
2390
+ )
2391
+ filtered_test_files.append(test_file)
2392
+ return filtered_test_files
2393
+
2394
+
2395
+ def data_prepopulation_step(
2396
+ benchmark_config,
2397
+ benchmark_tool_workdir,
2398
+ client_cpuset_cpus,
2399
+ docker_client,
2400
+ git_hash,
2401
+ port,
2402
+ temporary_dir,
2403
+ test_name,
2404
+ redis_password,
2405
+ ):
2406
+ # setup the benchmark
2407
+ (
2408
+ start_time,
2409
+ start_time_ms,
2410
+ start_time_str,
2411
+ ) = get_start_time_vars()
2412
+ local_benchmark_output_filename = get_local_run_full_filename(
2413
+ start_time_str,
2414
+ git_hash,
2415
+ "preload__" + test_name,
2416
+ "oss-standalone",
2417
+ )
2418
+ preload_image = extract_client_container_image(
2419
+ benchmark_config["dbconfig"], "preload_tool"
2420
+ )
2421
+ preload_tool = extract_client_tool(benchmark_config["dbconfig"], "preload_tool")
2422
+ full_benchmark_path = "/usr/local/bin/{}".format(preload_tool)
2423
+ client_mnt_point = "/mnt/client/"
2424
+ if "memtier_benchmark" in preload_tool:
2425
+ (
2426
+ _,
2427
+ preload_command_str,
2428
+ _,
2429
+ ) = prepare_memtier_benchmark_parameters(
2430
+ benchmark_config["dbconfig"]["preload_tool"],
2431
+ full_benchmark_path,
2432
+ port,
2433
+ "localhost",
2434
+ redis_password,
2435
+ local_benchmark_output_filename,
2436
+ False,
2437
+ )
2438
+
2439
+ logging.info(
2440
+ "Using docker image {} as benchmark PRELOAD image (cpuset={}) with the following args: {}".format(
2441
+ preload_image,
2442
+ client_cpuset_cpus,
2443
+ preload_command_str,
2444
+ )
2445
+ )
2446
+ # run the benchmark
2447
+ preload_start_time = datetime.datetime.now()
2448
+
2449
+ # Set preload timeout (preload can take longer than benchmarks)
2450
+ preload_timeout = 1800 # 30 minutes default for data loading
2451
+ logging.info(f"Starting preload container with {preload_timeout}s timeout")
2452
+
2453
+ try:
2454
+ # Start container with detach=True to enable timeout handling
2455
+ container = docker_client.containers.run(
2456
+ image=preload_image,
2457
+ volumes={
2458
+ temporary_dir: {
2459
+ "bind": client_mnt_point,
2460
+ "mode": "rw",
2461
+ },
2462
+ },
2463
+ auto_remove=False, # Don't auto-remove so we can get logs if timeout
2464
+ privileged=True,
2465
+ working_dir=benchmark_tool_workdir,
2466
+ command=preload_command_str,
2467
+ network_mode="host",
2468
+ detach=True, # Detach to enable timeout
2469
+ cpuset_cpus=client_cpuset_cpus,
2470
+ )
2471
+
2472
+ logging.info(
2473
+ f"Started preload container {container.name} ({container.id[:12]}) with {preload_timeout}s timeout"
2474
+ )
2475
+
2476
+ # Wait for container with timeout
2477
+ try:
2478
+ result = container.wait(timeout=preload_timeout)
2479
+ client_container_stdout = container.logs(
2480
+ stdout=True, stderr=False
2481
+ ).decode("utf-8")
2482
+ container_stderr = container.logs(stdout=False, stderr=True).decode(
2483
+ "utf-8"
2484
+ )
2485
+
2486
+ # Check exit code
2487
+ if result["StatusCode"] != 0:
2488
+ logging.error(
2489
+ f"Preload container exited with code {result['StatusCode']}"
2490
+ )
2491
+ logging.error(f"Preload container stderr: {container_stderr}")
2492
+ raise docker.errors.ContainerError(
2493
+ container,
2494
+ result["StatusCode"],
2495
+ preload_command_str,
2496
+ client_container_stdout,
2497
+ container_stderr,
2498
+ )
2499
+
2500
+ logging.info(
2501
+ f"Preload container {container.name} completed successfully"
2502
+ )
2503
+
2504
+ except Exception as timeout_error:
2505
+ if "timeout" in str(timeout_error).lower():
2506
+ logging.error(
2507
+ f"Preload container {container.name} timed out after {preload_timeout}s"
2508
+ )
2509
+ # Get logs before killing
2510
+ try:
2511
+ timeout_logs = container.logs(stdout=True, stderr=True).decode(
2512
+ "utf-8"
2513
+ )
2514
+ logging.error(
2515
+ f"Preload container logs before timeout: {timeout_logs}"
2516
+ )
2517
+ except:
2518
+ pass
2519
+ # Kill the container
2520
+ container.kill()
2521
+ raise Exception(
2522
+ f"Preload container timed out after {preload_timeout} seconds"
2523
+ )
2524
+ else:
2525
+ raise timeout_error
2526
+ finally:
2527
+ # Clean up container
2528
+ try:
2529
+ container.remove(force=True)
2530
+ except:
2531
+ pass
2532
+ except Exception as e:
2533
+ logging.error(f"Preload container failed: {e}")
2534
+ raise e
2535
+
2536
+ preload_end_time = datetime.datetime.now()
2537
+ preload_duration_seconds = calculate_client_tool_duration_and_check(
2538
+ preload_end_time, preload_start_time, "Preload", False
2539
+ )
2540
+ logging.info(
2541
+ "Tool {} seconds to load data. Output {}".format(
2542
+ preload_duration_seconds,
2543
+ client_container_stdout,
2544
+ )
2545
+ )
2546
+
2547
+
2548
+ def get_benchmark_specs(testsuites_folder):
2549
+ files = pathlib.Path(testsuites_folder).glob("*.yml")
2550
+ files = [str(x) for x in files]
2551
+ logging.info(
2552
+ "Running all specified benchmarks: {}".format(" ".join([str(x) for x in files]))
2553
+ )
2554
+ return files