redis-benchmarks-specification 0.2.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis_benchmarks_specification/__api__/Readme.md +7 -0
- redis_benchmarks_specification/__api__/__init__.py +5 -0
- redis_benchmarks_specification/__api__/api.py +87 -0
- redis_benchmarks_specification/__api__/app.py +191 -0
- redis_benchmarks_specification/__builder__/Readme.md +7 -0
- redis_benchmarks_specification/__builder__/__init__.py +5 -0
- redis_benchmarks_specification/__builder__/builder.py +1010 -0
- redis_benchmarks_specification/__builder__/schema.py +23 -0
- redis_benchmarks_specification/__cli__/__init__.py +5 -0
- redis_benchmarks_specification/__cli__/args.py +226 -0
- redis_benchmarks_specification/__cli__/cli.py +624 -0
- redis_benchmarks_specification/__cli__/stats.py +1304 -0
- redis_benchmarks_specification/__common__/__init__.py +0 -0
- redis_benchmarks_specification/__common__/builder_schema.py +256 -0
- redis_benchmarks_specification/__common__/env.py +96 -0
- redis_benchmarks_specification/__common__/github.py +280 -0
- redis_benchmarks_specification/__common__/package.py +28 -0
- redis_benchmarks_specification/__common__/runner.py +485 -0
- redis_benchmarks_specification/__common__/spec.py +143 -0
- redis_benchmarks_specification/__common__/suppress_warnings.py +20 -0
- redis_benchmarks_specification/__common__/timeseries.py +1621 -0
- redis_benchmarks_specification/__compare__/__init__.py +5 -0
- redis_benchmarks_specification/__compare__/args.py +240 -0
- redis_benchmarks_specification/__compare__/compare.py +3322 -0
- redis_benchmarks_specification/__init__.py +15 -0
- redis_benchmarks_specification/__runner__/__init__.py +5 -0
- redis_benchmarks_specification/__runner__/args.py +334 -0
- redis_benchmarks_specification/__runner__/remote_profiling.py +535 -0
- redis_benchmarks_specification/__runner__/runner.py +3837 -0
- redis_benchmarks_specification/__self_contained_coordinator__/__init__.py +5 -0
- redis_benchmarks_specification/__self_contained_coordinator__/args.py +210 -0
- redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py +27 -0
- redis_benchmarks_specification/__self_contained_coordinator__/build_info.py +61 -0
- redis_benchmarks_specification/__self_contained_coordinator__/clients.py +58 -0
- redis_benchmarks_specification/__self_contained_coordinator__/cpuset.py +17 -0
- redis_benchmarks_specification/__self_contained_coordinator__/docker.py +108 -0
- redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py +19 -0
- redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py +96 -0
- redis_benchmarks_specification/__self_contained_coordinator__/runners.py +740 -0
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +2554 -0
- redis_benchmarks_specification/__setups__/__init__.py +0 -0
- redis_benchmarks_specification/__setups__/topologies.py +17 -0
- redis_benchmarks_specification/__spec__/__init__.py +5 -0
- redis_benchmarks_specification/__spec__/args.py +78 -0
- redis_benchmarks_specification/__spec__/cli.py +259 -0
- redis_benchmarks_specification/__watchdog__/__init__.py +5 -0
- redis_benchmarks_specification/__watchdog__/args.py +54 -0
- redis_benchmarks_specification/__watchdog__/watchdog.py +175 -0
- redis_benchmarks_specification/commands/__init__.py +0 -0
- redis_benchmarks_specification/commands/commands.py +15 -0
- redis_benchmarks_specification/setups/builders/gcc:15.2.0-amd64-debian-bookworm-default.yml +20 -0
- redis_benchmarks_specification/setups/builders/gcc:15.2.0-arm64-debian-bookworm-default.yml +20 -0
- redis_benchmarks_specification/setups/platforms/aws-ec2-1node-c5.4xlarge.yml +27 -0
- redis_benchmarks_specification/setups/topologies/topologies.yml +153 -0
- redis_benchmarks_specification/test-suites/defaults.yml +32 -0
- redis_benchmarks_specification/test-suites/generate.py +114 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpire-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpire-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpireat-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpireat-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetall-50-fields-100B-values.yml +52 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetex-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetex-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetex-persist-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpire-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpire-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpireat-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpireat-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-htll-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-1000B-values-expiration.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-10B-values-expiration.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-10B-values-long-expiration.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-10B-values-short-expiration.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-20-fields-with-1B-values-pipeline-30.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-1000B-values-expiration.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-10B-values-expiration.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-10B-values-long-expiration.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-10B-values-short-expiration.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-1000B-values-expiration.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-1000B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-100B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values-expiration.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values-long-expiration.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values-short-expiration.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-hash-50-fields-with-10000B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-rpush-bulkload-pipeline-50.yml +39 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-with-10B-values-pipeline-50.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values-pipeline-10.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values-pipeline-10.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-string-get-10B-pipeline-100-nokeyprefix.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Kkeys-hash-listpack-500-fields-update-20-fields-with-1B-to-64B-values.yml +75 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-expire-use-case.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-psetex-expire-use-case.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-setex-expire-use-case.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-1KiB-expire-use-case.yml +49 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-4KiB-expire-use-case.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-bitmap-getbit-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-exists-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expire-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expireat-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-pexpire-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-count-500-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-count-500-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-count-5000-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-type-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-touch-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-ttl-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hexists.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hget-hgetall-hkeys-hvals-with-100B-values.yml +48 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hgetall-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hincrby.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hincrbyfloat.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-10-fields-with-10B-values-with-expiration-pipeline-10.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-5-fields-with-100B-values-with-expiration-pipeline-10.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-5-fields-with-10B-values-with-expiration-pipeline-10.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-50-fields-with-10B-values-with-expiration-pipeline-10.yml +54 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hmget-5-fields-with-100B-values-pipeline-10.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-transactions-multi-exec-pipeline-20.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-100B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-10B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-1KiB-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-rpoplpush-with-10B-values.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values-pipeline-10.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-50-fields-with-10B-values-long-expiration-pipeline-10.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-hmset-5-fields-with-1000B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-rpush-with-10B-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-100B-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-10B-values-pipeline-10.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-10B-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-1KiB-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits-pipeline-10.yml +58 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits.yml +58 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values-pipeline-10.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values-pipeline-10.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values-pipeline-10.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-10.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-100-nokeyprefix.yml +29 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-100.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-50.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-500.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-1KiB-values-pipeline-10.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-1KiB-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-20KiB-values.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-listpack-with-100-elements-double-score.yml +91 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-double-score.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-int-score.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-append-1-100B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-append-1-100B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-decr.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-100-nokeyprefix.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-100.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-50.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-500.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-32B-pipeline-10.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-32B.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incr-pipeline-10.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrby-pipeline-10.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrby.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrbyfloat-pipeline-10.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrbyfloat.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-int-encoding-strlen-pipeline-10.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mget-1KiB.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-expire-pipeline-10.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-expire.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-1KB-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-1KB.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-32B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-32B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-512B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-512B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-with-expiration-240B-400_conns.yml +47 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-set-with-ex-100B-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setex-100B-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-1.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-4KiB-pipeline-1.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-4KiB-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-512B-pipeline-1.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-512B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-100M-bits-bitmap-bitcount.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-1Billion-bits-bitmap-bitcount.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-2-elements-geopos.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-2-elements-geosearch-fromlonlat-withcoord.yml +39 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist-pipeline-10.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash-pipeline-10.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos-pipeline-10.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-bybox.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-pipeline-10.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-1K-fields-hgetall-pipeline-10.yml +285 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-1K-fields-hgetall.yml +284 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-100B-values-cursor-count-1000.yml +291 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values-cursor-count-100.yml +291 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values.yml +290 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-50-fields-10B-values.yml +54 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10-elements-lrange-all-elements-pipeline-10.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10-elements-lrange-all-elements.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-int-7bit-uint-lrange-all-elements-pipeline-10.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-int-lrange-all-elements-pipeline-10.yml +52 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-llen-pipeline-10.yml +52 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-lrange-all-elements-pipeline-10.yml +52 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-lrange-all-elements.yml +51 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-integer.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-string-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-string.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-linsert-lrem-integer.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-linsert-lrem-string.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lpos-integer.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lpos-string.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-1K-elements-lrange-all-elements-pipeline-10.yml +202 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-1K-elements-lrange-all-elements.yml +201 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-2K-elements-quicklist-lrange-all-elements-longs.yml +258 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-hash-1K-fields-with-5B-values.yml +282 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-zset-with-5-elements-parsing-float-score.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-zset-with-5-elements-parsing-hexa-score.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-pfadd-4KB-values-pipeline-10.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers-pipeline-10.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smismember.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sismember-is-a-member.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sismember-not-a-member.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smembers.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smismember.yml +54 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sscan.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10M-elements-sismember-50pct-chance.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10M-elements-srem-50pct-chance.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-smembers.yml +200 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan-cursor-count-100.yml +201 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan.yml +200 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1M-elements-sismember-50pct-chance.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-200K-elements-sadd-constant.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-2M-elements-sadd-increasing.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zincrby-1M-elements-pipeline-1.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-100K-elements-pipeline-1.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-10M-elements-pipeline-1.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-1M-elements-pipeline-1.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrem-5M-elements-pipeline-1.yml +47 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrangebyscore-256K-elements-pipeline-1.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrangebyscore-256K-elements-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrank-1M-elements-pipeline-1.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements-long-scores.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrange-all-elements.yml +66 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements-long-scores.yml +66 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements.yml +66 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zscan.yml +65 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zrange-all-elements.yml +322 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zscan.yml +321 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zcard-pipeline-10.yml +39 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zremrangebyscore-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zrevrange-5-elements.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zrevrange-withscores-5-elements-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zscore-pipeline-10.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-600K-elements-zrangestore-1K-elements.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-600K-elements-zrangestore-300K-elements.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-listpack-zrank-100-elements-pipeline-1.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-eval-hset-expire.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-evalsha-hset-expire.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sdiff.yml +57 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sinter.yml +57 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sunion.yml +57 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries-pipeline-10.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-zset-300-elements-skiplist-encoded-zunion.yml +434 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-zset-300-elements-skiplist-encoded-zunionstore.yml +434 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-load-string-with-512B-values-pipeline-10.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-load-string-with-512B-values.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-400_conns.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-40_conns.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-2000_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-400_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-40_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-400_conns.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-2000_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-400_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-5200_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-50-50-with-512B-values-with-expiration-pipeline-10-400_conns.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-connection-hello-pipeline-10.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-connection-hello.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-multiple-hll-pfcount-100B-values.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-multiple-hll-pfmerge-100B-values.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-connection-ping-pipeline-10.yml +29 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-100-subscribers.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-1000-subscribers.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-5000-subscribers.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-publish-1K-channels-10B-no-subscribers.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-server-time-pipeline-10.yml +29 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-leaderboard-top-10.yml +68 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-leaderboard-top-100.yml +69 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-leaderboard-top-1000.yml +68 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-rate-limiting-lua-100k-sessions.yml +64 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-realtime-analytics-membership-pipeline-10.yml +56 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-realtime-analytics-membership.yml +56 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-caching-hash-100k-sessions.yml +108 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-caching-json-100k-sessions.yml +109 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-caching-string-100k-sessions.yml +98 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-100k-sessions.yml +205 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml +205 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-stream-10M-entries-xread-count-100.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-stream-10M-entries-xreadgroup-count-100-noack.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-stream-10M-entries-xreadgroup-count-100.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-stream-concurrent-xadd-xreadgroup-70-30.yml +50 -0
- redis_benchmarks_specification/test-suites/template.txt +18 -0
- redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml +41 -0
- redis_benchmarks_specification-0.2.42.dist-info/LICENSE +201 -0
- redis_benchmarks_specification-0.2.42.dist-info/METADATA +434 -0
- redis_benchmarks_specification-0.2.42.dist-info/RECORD +336 -0
- redis_benchmarks_specification-0.2.42.dist-info/WHEEL +4 -0
- redis_benchmarks_specification-0.2.42.dist-info/entry_points.txt +10 -0
|
@@ -0,0 +1,3837 @@
|
|
|
1
|
+
# Import warning suppression first
|
|
2
|
+
from redis_benchmarks_specification.__common__.suppress_warnings import *
|
|
3
|
+
|
|
4
|
+
import datetime
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
import math
|
|
8
|
+
import os
|
|
9
|
+
import shutil
|
|
10
|
+
import signal
|
|
11
|
+
import subprocess
|
|
12
|
+
import sys
|
|
13
|
+
import tempfile
|
|
14
|
+
import traceback
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
import re
|
|
17
|
+
import tqdm
|
|
18
|
+
from urllib.parse import urlparse
|
|
19
|
+
import docker
|
|
20
|
+
import redis
|
|
21
|
+
from docker.models.containers import Container
|
|
22
|
+
from pytablewriter import CsvTableWriter, MarkdownTableWriter
|
|
23
|
+
from redisbench_admin.profilers.profilers_local import (
|
|
24
|
+
check_compatible_system_and_kernel_and_prepare_profile,
|
|
25
|
+
)
|
|
26
|
+
from redisbench_admin.run.common import (
|
|
27
|
+
get_start_time_vars,
|
|
28
|
+
merge_default_and_config_metrics,
|
|
29
|
+
prepare_benchmark_parameters,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
from redis_benchmarks_specification.__common__.runner import (
|
|
33
|
+
export_redis_metrics,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
from redisbench_admin.profilers.profilers_local import (
|
|
37
|
+
local_profilers_platform_checks,
|
|
38
|
+
profilers_start_if_required,
|
|
39
|
+
profilers_stop_if_required,
|
|
40
|
+
)
|
|
41
|
+
from redisbench_admin.run.common import (
|
|
42
|
+
dbconfig_keyspacelen_check,
|
|
43
|
+
)
|
|
44
|
+
from redisbench_admin.run.metrics import extract_results_table
|
|
45
|
+
from redisbench_admin.run.run import calculate_client_tool_duration_and_check
|
|
46
|
+
from redisbench_admin.utils.benchmark_config import (
|
|
47
|
+
get_final_benchmark_config,
|
|
48
|
+
get_defaults,
|
|
49
|
+
)
|
|
50
|
+
from redisbench_admin.utils.local import get_local_run_full_filename
|
|
51
|
+
from redisbench_admin.utils.results import post_process_benchmark_results
|
|
52
|
+
|
|
53
|
+
from redis_benchmarks_specification.__common__.env import (
|
|
54
|
+
LOG_DATEFMT,
|
|
55
|
+
LOG_FORMAT,
|
|
56
|
+
LOG_LEVEL,
|
|
57
|
+
REDIS_HEALTH_CHECK_INTERVAL,
|
|
58
|
+
REDIS_SOCKET_TIMEOUT,
|
|
59
|
+
S3_BUCKET_NAME,
|
|
60
|
+
)
|
|
61
|
+
from redis_benchmarks_specification.__common__.package import (
|
|
62
|
+
get_version_string,
|
|
63
|
+
populate_with_poetry_data,
|
|
64
|
+
)
|
|
65
|
+
from redis_benchmarks_specification.__common__.runner import (
|
|
66
|
+
extract_testsuites,
|
|
67
|
+
exporter_datasink_common,
|
|
68
|
+
reset_commandstats,
|
|
69
|
+
execute_init_commands,
|
|
70
|
+
export_redis_metrics,
|
|
71
|
+
)
|
|
72
|
+
from redis_benchmarks_specification.__common__.spec import (
|
|
73
|
+
extract_client_container_image,
|
|
74
|
+
extract_client_cpu_limit,
|
|
75
|
+
extract_client_tool,
|
|
76
|
+
extract_client_configs,
|
|
77
|
+
extract_client_container_images,
|
|
78
|
+
extract_client_tools,
|
|
79
|
+
)
|
|
80
|
+
from redis_benchmarks_specification.__runner__.args import create_client_runner_args
|
|
81
|
+
from redis_benchmarks_specification.__runner__.remote_profiling import RemoteProfiler
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
# Global flag to track if user wants to exit
|
|
85
|
+
_exit_requested = False
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def signal_handler(signum, frame):
|
|
89
|
+
"""Handle Ctrl+C signal to exit gracefully"""
|
|
90
|
+
global _exit_requested
|
|
91
|
+
if not _exit_requested:
|
|
92
|
+
_exit_requested = True
|
|
93
|
+
logging.info("Ctrl+C detected. Exiting after current test completes...")
|
|
94
|
+
print("\nCtrl+C detected. Exiting after current test completes...")
|
|
95
|
+
else:
|
|
96
|
+
logging.info("Ctrl+C detected again. Force exiting...")
|
|
97
|
+
print("\nForce exiting...")
|
|
98
|
+
sys.exit(1)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def parse_redis_uri(uri):
|
|
102
|
+
"""
|
|
103
|
+
Parse Redis URI and extract connection parameters.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
uri (str): Redis URI in format redis://user:password@host:port/dbnum
|
|
107
|
+
or rediss://user:password@host:port/dbnum for TLS
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
dict: Dictionary containing parsed connection parameters
|
|
111
|
+
"""
|
|
112
|
+
if not uri:
|
|
113
|
+
return {}
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
parsed = urlparse(uri)
|
|
117
|
+
|
|
118
|
+
# Extract connection parameters
|
|
119
|
+
params = {}
|
|
120
|
+
|
|
121
|
+
# Host (required)
|
|
122
|
+
if parsed.hostname:
|
|
123
|
+
params["host"] = parsed.hostname
|
|
124
|
+
|
|
125
|
+
# Port (optional, defaults to 6379)
|
|
126
|
+
if parsed.port:
|
|
127
|
+
params["port"] = parsed.port
|
|
128
|
+
|
|
129
|
+
# Username and password
|
|
130
|
+
if parsed.username:
|
|
131
|
+
params["username"] = parsed.username
|
|
132
|
+
if parsed.password:
|
|
133
|
+
params["password"] = parsed.password
|
|
134
|
+
|
|
135
|
+
# Database number
|
|
136
|
+
if parsed.path and len(parsed.path) > 1: # path starts with '/'
|
|
137
|
+
try:
|
|
138
|
+
params["db"] = int(parsed.path[1:]) # Remove leading '/'
|
|
139
|
+
except ValueError:
|
|
140
|
+
logging.warning(f"Invalid database number in URI: {parsed.path[1:]}")
|
|
141
|
+
|
|
142
|
+
# TLS detection
|
|
143
|
+
if parsed.scheme == "rediss":
|
|
144
|
+
params["tls_enabled"] = True
|
|
145
|
+
elif parsed.scheme == "redis":
|
|
146
|
+
params["tls_enabled"] = False
|
|
147
|
+
else:
|
|
148
|
+
logging.warning(
|
|
149
|
+
f"Unknown scheme in URI: {parsed.scheme}. Assuming non-TLS."
|
|
150
|
+
)
|
|
151
|
+
params["tls_enabled"] = False
|
|
152
|
+
|
|
153
|
+
logging.info(
|
|
154
|
+
f"Parsed Redis URI: host={params.get('host', 'N/A')}, "
|
|
155
|
+
f"port={params.get('port', 'N/A')}, "
|
|
156
|
+
f"username={params.get('username', 'N/A')}, "
|
|
157
|
+
f"db={params.get('db', 'N/A')}, "
|
|
158
|
+
f"tls={params.get('tls_enabled', False)}"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
return params
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
logging.error(f"Failed to parse Redis URI '{uri}': {e}")
|
|
165
|
+
return {}
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def validate_benchmark_metrics(
|
|
169
|
+
results_dict, test_name, benchmark_config=None, default_metrics=None
|
|
170
|
+
):
|
|
171
|
+
"""
|
|
172
|
+
Validate benchmark metrics to ensure they contain reasonable values.
|
|
173
|
+
Fails the test if critical metrics indicate something is wrong.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
results_dict: Dictionary containing benchmark results
|
|
177
|
+
test_name: Name of the test being validated
|
|
178
|
+
benchmark_config: Benchmark configuration (optional, contains tested-commands)
|
|
179
|
+
default_metrics: Default metrics configuration (unused, for compatibility)
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
tuple: (is_valid, error_message)
|
|
183
|
+
"""
|
|
184
|
+
try:
|
|
185
|
+
# Get tested commands from config if available
|
|
186
|
+
tested_commands = []
|
|
187
|
+
if benchmark_config and "tested-commands" in benchmark_config:
|
|
188
|
+
tested_commands = [
|
|
189
|
+
cmd.lower() for cmd in benchmark_config["tested-commands"]
|
|
190
|
+
]
|
|
191
|
+
|
|
192
|
+
# Define validation rules
|
|
193
|
+
throughput_patterns = [
|
|
194
|
+
"ops/sec",
|
|
195
|
+
"qps",
|
|
196
|
+
"totals.ops/sec",
|
|
197
|
+
"all_stats.totals.ops/sec",
|
|
198
|
+
]
|
|
199
|
+
|
|
200
|
+
latency_patterns = ["p50", "p95", "p99", "p999", "percentile"]
|
|
201
|
+
|
|
202
|
+
validation_errors = []
|
|
203
|
+
|
|
204
|
+
def check_nested_dict(data, path=""):
|
|
205
|
+
"""Recursively check nested dictionary for metrics"""
|
|
206
|
+
if isinstance(data, dict):
|
|
207
|
+
for key, value in data.items():
|
|
208
|
+
current_path = f"{path}.{key}" if path else key
|
|
209
|
+
check_nested_dict(value, current_path)
|
|
210
|
+
elif isinstance(data, (int, float)):
|
|
211
|
+
metric_path_lower = path.lower()
|
|
212
|
+
|
|
213
|
+
# Skip Waits metrics as they can legitimately be 0
|
|
214
|
+
if "waits" in metric_path_lower:
|
|
215
|
+
return
|
|
216
|
+
|
|
217
|
+
# Skip general latency metrics that can legitimately be 0
|
|
218
|
+
# Only validate specific percentile latencies (p50, p95, etc.)
|
|
219
|
+
if any(
|
|
220
|
+
pattern in metric_path_lower
|
|
221
|
+
for pattern in [
|
|
222
|
+
"average latency",
|
|
223
|
+
"totals.latency",
|
|
224
|
+
"all_stats.totals.latency",
|
|
225
|
+
]
|
|
226
|
+
):
|
|
227
|
+
return
|
|
228
|
+
|
|
229
|
+
# Skip operation-specific metrics for operations not being tested
|
|
230
|
+
# For example, skip Gets.Ops/sec if only SET commands are tested
|
|
231
|
+
if tested_commands:
|
|
232
|
+
skip_metric = False
|
|
233
|
+
operation_types = [
|
|
234
|
+
"gets",
|
|
235
|
+
"sets",
|
|
236
|
+
"hgets",
|
|
237
|
+
"hsets",
|
|
238
|
+
"lpush",
|
|
239
|
+
"rpush",
|
|
240
|
+
"sadd",
|
|
241
|
+
]
|
|
242
|
+
for op_type in operation_types:
|
|
243
|
+
if (
|
|
244
|
+
op_type in metric_path_lower
|
|
245
|
+
and op_type not in tested_commands
|
|
246
|
+
):
|
|
247
|
+
skip_metric = True
|
|
248
|
+
break
|
|
249
|
+
if skip_metric:
|
|
250
|
+
return
|
|
251
|
+
|
|
252
|
+
# Check throughput metrics
|
|
253
|
+
for pattern in throughput_patterns:
|
|
254
|
+
if pattern in metric_path_lower:
|
|
255
|
+
if data <= 10: # Below 10 QPS threshold
|
|
256
|
+
validation_errors.append(
|
|
257
|
+
f"Throughput metric '{path}' has invalid value: {data} "
|
|
258
|
+
f"(below 10 QPS threshold)"
|
|
259
|
+
)
|
|
260
|
+
break
|
|
261
|
+
|
|
262
|
+
# Check latency metrics
|
|
263
|
+
for pattern in latency_patterns:
|
|
264
|
+
if pattern in metric_path_lower:
|
|
265
|
+
if data <= 0.0: # Invalid latency
|
|
266
|
+
validation_errors.append(
|
|
267
|
+
f"Latency metric '{path}' has invalid value: {data} "
|
|
268
|
+
f"(should be > 0.0)"
|
|
269
|
+
)
|
|
270
|
+
break
|
|
271
|
+
|
|
272
|
+
# Validate the results dictionary
|
|
273
|
+
check_nested_dict(results_dict)
|
|
274
|
+
|
|
275
|
+
if validation_errors:
|
|
276
|
+
error_msg = f"Test {test_name} failed metric validation:\n" + "\n".join(
|
|
277
|
+
validation_errors
|
|
278
|
+
)
|
|
279
|
+
logging.error(error_msg)
|
|
280
|
+
return False, error_msg
|
|
281
|
+
|
|
282
|
+
logging.info(f"Test {test_name} passed metric validation")
|
|
283
|
+
return True, None
|
|
284
|
+
|
|
285
|
+
except Exception as e:
|
|
286
|
+
logging.warning(f"Error during metric validation for test {test_name}: {e}")
|
|
287
|
+
# Don't fail the test if validation itself fails
|
|
288
|
+
return True, None
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def run_local_command_with_timeout(command_str, timeout_seconds, description="command"):
|
|
292
|
+
"""
|
|
293
|
+
Run a local command with timeout support.
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
command_str: The command string to execute
|
|
297
|
+
timeout_seconds: Timeout in seconds
|
|
298
|
+
description: Description for logging
|
|
299
|
+
|
|
300
|
+
Returns:
|
|
301
|
+
tuple: (success, stdout, stderr)
|
|
302
|
+
"""
|
|
303
|
+
try:
|
|
304
|
+
logging.info(
|
|
305
|
+
f"Running {description} with {timeout_seconds}s timeout: {command_str}"
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
# Use shell=True to support complex command strings with pipes, etc.
|
|
309
|
+
process = subprocess.Popen(
|
|
310
|
+
command_str,
|
|
311
|
+
shell=True,
|
|
312
|
+
stdout=subprocess.PIPE,
|
|
313
|
+
stderr=subprocess.PIPE,
|
|
314
|
+
text=True,
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
try:
|
|
318
|
+
stdout, stderr = process.communicate(timeout=timeout_seconds)
|
|
319
|
+
return_code = process.returncode
|
|
320
|
+
|
|
321
|
+
if return_code == 0:
|
|
322
|
+
logging.info(f"{description} completed successfully")
|
|
323
|
+
return True, stdout, stderr
|
|
324
|
+
else:
|
|
325
|
+
logging.error(f"{description} failed with return code {return_code}")
|
|
326
|
+
logging.error(f"stderr: {stderr}")
|
|
327
|
+
return False, stdout, stderr
|
|
328
|
+
|
|
329
|
+
except subprocess.TimeoutExpired:
|
|
330
|
+
logging.error(f"{description} timed out after {timeout_seconds} seconds")
|
|
331
|
+
process.kill()
|
|
332
|
+
try:
|
|
333
|
+
stdout, stderr = process.communicate(
|
|
334
|
+
timeout=5
|
|
335
|
+
) # Give 5 seconds to cleanup
|
|
336
|
+
except subprocess.TimeoutExpired:
|
|
337
|
+
stdout, stderr = "", "Process killed due to timeout"
|
|
338
|
+
return False, stdout, f"Timeout after {timeout_seconds} seconds. {stderr}"
|
|
339
|
+
|
|
340
|
+
except Exception as e:
|
|
341
|
+
logging.error(f"Error running {description}: {e}")
|
|
342
|
+
return False, "", str(e)
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
def calculate_process_timeout(command_str, buffer_timeout):
|
|
346
|
+
"""
|
|
347
|
+
Calculate timeout for a process based on test-time parameter and buffer.
|
|
348
|
+
|
|
349
|
+
Args:
|
|
350
|
+
command_str: The command string to analyze
|
|
351
|
+
buffer_timeout: Buffer time to add to test-time
|
|
352
|
+
|
|
353
|
+
Returns:
|
|
354
|
+
int: Timeout in seconds
|
|
355
|
+
"""
|
|
356
|
+
default_timeout = 300 # 5 minutes default
|
|
357
|
+
run_count = 1
|
|
358
|
+
if "run-count" in command_str:
|
|
359
|
+
# Try to extract test time and add buffer
|
|
360
|
+
# Handle both --test-time (memtier) and -test-time (pubsub-sub-bench)
|
|
361
|
+
run_count_match = re.search(r"--?run-count[=\s]+(\d+)", command_str)
|
|
362
|
+
if run_count_match:
|
|
363
|
+
run_count = int(run_count_match.group(1))
|
|
364
|
+
logging.info(f"Detected run count of: {run_count}")
|
|
365
|
+
run_count_match = re.search(r"-?x[=\s]+(\d+)", command_str)
|
|
366
|
+
if run_count_match:
|
|
367
|
+
run_count = int(run_count_match.group(1))
|
|
368
|
+
logging.info(f"Detected run count (from -x) of: {run_count}")
|
|
369
|
+
|
|
370
|
+
if "test-time" in command_str:
|
|
371
|
+
# Try to extract test time and add buffer
|
|
372
|
+
# Handle both --test-time (memtier) and -test-time (pubsub-sub-bench)
|
|
373
|
+
test_time_match = re.search(r"--?test-time[=\s]+(\d+)", command_str)
|
|
374
|
+
if test_time_match:
|
|
375
|
+
test_time = int(test_time_match.group(1))
|
|
376
|
+
timeout = (test_time + buffer_timeout) * run_count
|
|
377
|
+
logging.info(
|
|
378
|
+
f"Set process timeout to {timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer) x {run_count} runs)"
|
|
379
|
+
)
|
|
380
|
+
return timeout
|
|
381
|
+
|
|
382
|
+
logging.info(f"Using default process timeout: {default_timeout}s")
|
|
383
|
+
return default_timeout
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
def parse_size(size):
|
|
387
|
+
units = {
|
|
388
|
+
"B": 1,
|
|
389
|
+
"KB": 2**10,
|
|
390
|
+
"MB": 2**20,
|
|
391
|
+
"GB": 2**30,
|
|
392
|
+
"TB": 2**40,
|
|
393
|
+
"": 1,
|
|
394
|
+
"KIB": 10**3,
|
|
395
|
+
"MIB": 10**6,
|
|
396
|
+
"GIB": 10**9,
|
|
397
|
+
"TIB": 10**12,
|
|
398
|
+
"K": 2**10,
|
|
399
|
+
"M": 2**20,
|
|
400
|
+
"G": 2**30,
|
|
401
|
+
"T": 2**40,
|
|
402
|
+
"": 1,
|
|
403
|
+
"KI": 10**3,
|
|
404
|
+
"MI": 10**6,
|
|
405
|
+
"GI": 10**9,
|
|
406
|
+
"TI": 10**12,
|
|
407
|
+
}
|
|
408
|
+
m = re.match(r"^([\d\.]+)\s*([a-zA-Z]{0,3})$", str(size).strip())
|
|
409
|
+
number, unit = float(m.group(1)), m.group(2).upper()
|
|
410
|
+
return int(number * units[unit])
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
def extract_expected_benchmark_duration(
|
|
414
|
+
benchmark_command_str, override_memtier_test_time
|
|
415
|
+
):
|
|
416
|
+
"""
|
|
417
|
+
Extract expected benchmark duration from command string or override.
|
|
418
|
+
|
|
419
|
+
Args:
|
|
420
|
+
benchmark_command_str: The benchmark command string
|
|
421
|
+
override_memtier_test_time: Override test time value
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
Expected duration in seconds, or 30 as default
|
|
425
|
+
"""
|
|
426
|
+
if override_memtier_test_time > 0:
|
|
427
|
+
return override_memtier_test_time
|
|
428
|
+
|
|
429
|
+
# Try to extract test-time from command string
|
|
430
|
+
if "test-time" in benchmark_command_str:
|
|
431
|
+
# Handle both --test-time (memtier) and -test-time (pubsub-sub-bench)
|
|
432
|
+
test_time_match = re.search(r"--?test-time[=\s]+(\d+)", benchmark_command_str)
|
|
433
|
+
if test_time_match:
|
|
434
|
+
return int(test_time_match.group(1))
|
|
435
|
+
|
|
436
|
+
# Default duration if not found
|
|
437
|
+
return 30
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
def detect_object_encoding(redis_conn, dbconfig):
|
|
441
|
+
"""
|
|
442
|
+
Detect object encoding by scanning 1% of the dataset.
|
|
443
|
+
|
|
444
|
+
Args:
|
|
445
|
+
redis_conn: Redis connection
|
|
446
|
+
dbconfig: Database configuration containing keyspace info
|
|
447
|
+
|
|
448
|
+
Returns:
|
|
449
|
+
Dict with encoding information
|
|
450
|
+
"""
|
|
451
|
+
try:
|
|
452
|
+
# Get total key count
|
|
453
|
+
total_keys = redis_conn.dbsize()
|
|
454
|
+
logging.debug(f"Object encoding detection: DBSIZE reports {total_keys} keys")
|
|
455
|
+
|
|
456
|
+
if total_keys == 0:
|
|
457
|
+
logging.warning("No keys found in database for encoding detection")
|
|
458
|
+
return {
|
|
459
|
+
"encoding": "unknown",
|
|
460
|
+
"confidence": 0.0,
|
|
461
|
+
"sample_size": 0,
|
|
462
|
+
"total_keys": 0,
|
|
463
|
+
"encoding_distribution": {},
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
# Determine scanning strategy based on dataset size
|
|
467
|
+
if total_keys <= 1000:
|
|
468
|
+
# For small datasets, scan all keys for complete accuracy
|
|
469
|
+
sample_size = total_keys
|
|
470
|
+
scan_all_keys = True
|
|
471
|
+
logging.info(
|
|
472
|
+
f"Scanning all {total_keys} keys (small dataset - complete analysis)"
|
|
473
|
+
)
|
|
474
|
+
else:
|
|
475
|
+
# For large datasets, sample 1% (minimum 10, maximum 1000)
|
|
476
|
+
sample_size = max(10, min(1000, int(total_keys * 0.01)))
|
|
477
|
+
scan_all_keys = False
|
|
478
|
+
logging.info(
|
|
479
|
+
f"Sampling {sample_size} keys out of {total_keys} total keys ({(sample_size/total_keys)*100:.2f}%)"
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
# Use SCAN to get keys
|
|
483
|
+
encoding_counts = {}
|
|
484
|
+
scanned_keys = []
|
|
485
|
+
cursor = 0
|
|
486
|
+
|
|
487
|
+
if scan_all_keys:
|
|
488
|
+
# Scan all keys in the database
|
|
489
|
+
while True:
|
|
490
|
+
cursor, keys = redis_conn.scan(cursor=cursor, count=100)
|
|
491
|
+
scanned_keys.extend(keys)
|
|
492
|
+
|
|
493
|
+
# Break if we've completed a full scan
|
|
494
|
+
if cursor == 0:
|
|
495
|
+
break
|
|
496
|
+
else:
|
|
497
|
+
# Sample keys until we reach our target sample size
|
|
498
|
+
while len(scanned_keys) < sample_size:
|
|
499
|
+
cursor, keys = redis_conn.scan(
|
|
500
|
+
cursor=cursor, count=min(100, sample_size - len(scanned_keys))
|
|
501
|
+
)
|
|
502
|
+
scanned_keys.extend(keys)
|
|
503
|
+
|
|
504
|
+
# Break if we've completed a full scan
|
|
505
|
+
if cursor == 0:
|
|
506
|
+
break
|
|
507
|
+
|
|
508
|
+
# Limit to our target sample size
|
|
509
|
+
scanned_keys = scanned_keys[:sample_size]
|
|
510
|
+
|
|
511
|
+
logging.debug(
|
|
512
|
+
f"SCAN completed: found {len(scanned_keys)} keys, cursor ended at {cursor}"
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
# If SCAN didn't find any keys but we know there are keys, try KEYS command as fallback
|
|
516
|
+
if len(scanned_keys) == 0 and total_keys > 0:
|
|
517
|
+
logging.warning(
|
|
518
|
+
f"SCAN found no keys but DBSIZE reports {total_keys} keys. Trying KEYS fallback."
|
|
519
|
+
)
|
|
520
|
+
try:
|
|
521
|
+
# Use KEYS * as fallback (only for small datasets to avoid blocking)
|
|
522
|
+
if total_keys <= 1000:
|
|
523
|
+
all_keys = redis_conn.keys("*")
|
|
524
|
+
scanned_keys = (
|
|
525
|
+
all_keys[:sample_size] if not scan_all_keys else all_keys
|
|
526
|
+
)
|
|
527
|
+
logging.info(f"KEYS fallback found {len(scanned_keys)} keys")
|
|
528
|
+
else:
|
|
529
|
+
logging.error(
|
|
530
|
+
f"Cannot use KEYS fallback for large dataset ({total_keys} keys)"
|
|
531
|
+
)
|
|
532
|
+
except Exception as e:
|
|
533
|
+
logging.error(f"KEYS fallback failed: {e}")
|
|
534
|
+
|
|
535
|
+
# Final check: if we still have no keys, return early
|
|
536
|
+
if len(scanned_keys) == 0:
|
|
537
|
+
logging.error(
|
|
538
|
+
f"No keys found for encoding detection despite DBSIZE={total_keys}"
|
|
539
|
+
)
|
|
540
|
+
return {
|
|
541
|
+
"encoding": "unknown",
|
|
542
|
+
"confidence": 0.0,
|
|
543
|
+
"sample_size": 0,
|
|
544
|
+
"total_keys": total_keys,
|
|
545
|
+
"encoding_distribution": {},
|
|
546
|
+
"is_complete_scan": scan_all_keys,
|
|
547
|
+
"error": "No keys found by SCAN or KEYS commands",
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
# Get encoding for each sampled key
|
|
551
|
+
successful_encodings = 0
|
|
552
|
+
for i, key in enumerate(scanned_keys):
|
|
553
|
+
try:
|
|
554
|
+
# Use the redis-py object_encoding method instead of raw command
|
|
555
|
+
encoding = redis_conn.object("ENCODING", key)
|
|
556
|
+
if isinstance(encoding, bytes):
|
|
557
|
+
encoding = encoding.decode("utf-8")
|
|
558
|
+
elif encoding is None:
|
|
559
|
+
# Key might have expired or been deleted
|
|
560
|
+
logging.debug(
|
|
561
|
+
f"Key '{key}' returned None encoding (key may have expired)"
|
|
562
|
+
)
|
|
563
|
+
continue
|
|
564
|
+
|
|
565
|
+
encoding_counts[encoding] = encoding_counts.get(encoding, 0) + 1
|
|
566
|
+
successful_encodings += 1
|
|
567
|
+
|
|
568
|
+
# Log first few keys for debugging
|
|
569
|
+
if i < 3:
|
|
570
|
+
logging.debug(f"Key '{key}' has encoding '{encoding}'")
|
|
571
|
+
|
|
572
|
+
except Exception as e:
|
|
573
|
+
logging.warning(f"Failed to get encoding for key {key}: {e}")
|
|
574
|
+
continue
|
|
575
|
+
|
|
576
|
+
logging.debug(
|
|
577
|
+
f"Successfully got encoding for {successful_encodings}/{len(scanned_keys)} keys"
|
|
578
|
+
)
|
|
579
|
+
|
|
580
|
+
if not encoding_counts:
|
|
581
|
+
logging.warning(
|
|
582
|
+
f"No object encodings detected! Scanned {len(scanned_keys)} keys, successful encodings: {successful_encodings}"
|
|
583
|
+
)
|
|
584
|
+
return {
|
|
585
|
+
"encoding": "unknown",
|
|
586
|
+
"confidence": 0.0,
|
|
587
|
+
"sample_size": 0,
|
|
588
|
+
"total_keys": total_keys,
|
|
589
|
+
"encoding_distribution": {},
|
|
590
|
+
"is_complete_scan": scan_all_keys,
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
# Determine dominant encoding
|
|
594
|
+
total_sampled = sum(encoding_counts.values())
|
|
595
|
+
dominant_encoding = max(encoding_counts.items(), key=lambda x: x[1])
|
|
596
|
+
confidence = dominant_encoding[1] / total_sampled
|
|
597
|
+
|
|
598
|
+
# Calculate encoding distribution percentages
|
|
599
|
+
encoding_distribution = {
|
|
600
|
+
enc: (count / total_sampled) * 100 for enc, count in encoding_counts.items()
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
result = {
|
|
604
|
+
"encoding": dominant_encoding[0],
|
|
605
|
+
"confidence": confidence,
|
|
606
|
+
"sample_size": total_sampled,
|
|
607
|
+
"total_keys": total_keys,
|
|
608
|
+
"encoding_distribution": encoding_distribution,
|
|
609
|
+
"is_complete_scan": scan_all_keys,
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
scan_type = "complete scan" if scan_all_keys else "sample"
|
|
613
|
+
logging.info(
|
|
614
|
+
f"Object encoding analysis ({scan_type}): {dominant_encoding[0]} ({confidence*100:.1f}% confidence)"
|
|
615
|
+
)
|
|
616
|
+
if len(encoding_counts) > 1:
|
|
617
|
+
logging.info(f"Encoding distribution: {encoding_distribution}")
|
|
618
|
+
|
|
619
|
+
return result
|
|
620
|
+
|
|
621
|
+
except Exception as e:
|
|
622
|
+
logging.error(f"Failed to detect object encoding: {e}")
|
|
623
|
+
return {
|
|
624
|
+
"encoding": "error",
|
|
625
|
+
"confidence": 0.0,
|
|
626
|
+
"sample_size": 0,
|
|
627
|
+
"total_keys": 0,
|
|
628
|
+
"encoding_distribution": {},
|
|
629
|
+
"error": str(e),
|
|
630
|
+
}
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
def run_multiple_clients(
|
|
634
|
+
benchmark_config,
|
|
635
|
+
docker_client,
|
|
636
|
+
temporary_dir_client,
|
|
637
|
+
client_mnt_point,
|
|
638
|
+
benchmark_tool_workdir,
|
|
639
|
+
client_cpuset_cpus,
|
|
640
|
+
port,
|
|
641
|
+
host,
|
|
642
|
+
password,
|
|
643
|
+
oss_cluster_api_enabled,
|
|
644
|
+
tls_enabled,
|
|
645
|
+
tls_skip_verify,
|
|
646
|
+
test_tls_cert,
|
|
647
|
+
test_tls_key,
|
|
648
|
+
test_tls_cacert,
|
|
649
|
+
resp_version,
|
|
650
|
+
override_memtier_test_time,
|
|
651
|
+
override_test_runs,
|
|
652
|
+
unix_socket,
|
|
653
|
+
args,
|
|
654
|
+
):
|
|
655
|
+
"""
|
|
656
|
+
Run multiple client configurations simultaneously and aggregate results.
|
|
657
|
+
Returns aggregated stdout and list of individual results.
|
|
658
|
+
"""
|
|
659
|
+
client_configs = extract_client_configs(benchmark_config)
|
|
660
|
+
client_images = extract_client_container_images(benchmark_config)
|
|
661
|
+
client_tools = extract_client_tools(benchmark_config)
|
|
662
|
+
|
|
663
|
+
if not client_configs:
|
|
664
|
+
raise ValueError("No client configurations found")
|
|
665
|
+
|
|
666
|
+
containers = []
|
|
667
|
+
results = []
|
|
668
|
+
|
|
669
|
+
# Start all containers simultaneously (detached)
|
|
670
|
+
for client_index, (client_config, client_tool, client_image) in enumerate(
|
|
671
|
+
zip(client_configs, client_tools, client_images)
|
|
672
|
+
):
|
|
673
|
+
try:
|
|
674
|
+
local_benchmark_output_filename = f"benchmark_output_{client_index}.json"
|
|
675
|
+
|
|
676
|
+
# Prepare benchmark command for this client
|
|
677
|
+
if "memtier_benchmark" in client_tool:
|
|
678
|
+
# Set benchmark path based on local install option
|
|
679
|
+
if args.benchmark_local_install:
|
|
680
|
+
full_benchmark_path = getattr(
|
|
681
|
+
args, "memtier_bin_path", "memtier_benchmark"
|
|
682
|
+
)
|
|
683
|
+
else:
|
|
684
|
+
full_benchmark_path = f"/usr/local/bin/{client_tool}"
|
|
685
|
+
|
|
686
|
+
(
|
|
687
|
+
_,
|
|
688
|
+
benchmark_command_str,
|
|
689
|
+
arbitrary_command,
|
|
690
|
+
) = prepare_memtier_benchmark_parameters(
|
|
691
|
+
client_config,
|
|
692
|
+
full_benchmark_path,
|
|
693
|
+
port,
|
|
694
|
+
host,
|
|
695
|
+
password,
|
|
696
|
+
local_benchmark_output_filename,
|
|
697
|
+
oss_cluster_api_enabled,
|
|
698
|
+
tls_enabled,
|
|
699
|
+
tls_skip_verify,
|
|
700
|
+
test_tls_cert,
|
|
701
|
+
test_tls_key,
|
|
702
|
+
test_tls_cacert,
|
|
703
|
+
resp_version,
|
|
704
|
+
override_memtier_test_time,
|
|
705
|
+
override_test_runs,
|
|
706
|
+
unix_socket,
|
|
707
|
+
)
|
|
708
|
+
elif "pubsub-sub-bench" in client_tool:
|
|
709
|
+
(
|
|
710
|
+
_,
|
|
711
|
+
benchmark_command_str,
|
|
712
|
+
arbitrary_command,
|
|
713
|
+
) = prepare_pubsub_sub_bench_parameters(
|
|
714
|
+
client_config,
|
|
715
|
+
client_tool,
|
|
716
|
+
port,
|
|
717
|
+
host,
|
|
718
|
+
password,
|
|
719
|
+
local_benchmark_output_filename,
|
|
720
|
+
oss_cluster_api_enabled,
|
|
721
|
+
tls_enabled,
|
|
722
|
+
tls_skip_verify,
|
|
723
|
+
test_tls_cert,
|
|
724
|
+
test_tls_key,
|
|
725
|
+
test_tls_cacert,
|
|
726
|
+
resp_version,
|
|
727
|
+
override_memtier_test_time,
|
|
728
|
+
unix_socket,
|
|
729
|
+
None, # username
|
|
730
|
+
)
|
|
731
|
+
elif "vector-db-benchmark" in client_tool:
|
|
732
|
+
(
|
|
733
|
+
_,
|
|
734
|
+
benchmark_command_str,
|
|
735
|
+
arbitrary_command,
|
|
736
|
+
client_env_vars,
|
|
737
|
+
) = prepare_vector_db_benchmark_parameters(
|
|
738
|
+
client_config,
|
|
739
|
+
client_tool,
|
|
740
|
+
port,
|
|
741
|
+
host,
|
|
742
|
+
password,
|
|
743
|
+
local_benchmark_output_filename,
|
|
744
|
+
oss_cluster_api_enabled,
|
|
745
|
+
tls_enabled,
|
|
746
|
+
tls_skip_verify,
|
|
747
|
+
test_tls_cert,
|
|
748
|
+
test_tls_key,
|
|
749
|
+
test_tls_cacert,
|
|
750
|
+
resp_version,
|
|
751
|
+
override_memtier_test_time,
|
|
752
|
+
unix_socket,
|
|
753
|
+
None, # username
|
|
754
|
+
)
|
|
755
|
+
else:
|
|
756
|
+
# Handle other benchmark tools
|
|
757
|
+
(
|
|
758
|
+
benchmark_command,
|
|
759
|
+
benchmark_command_str,
|
|
760
|
+
) = prepare_benchmark_parameters(
|
|
761
|
+
{**benchmark_config, "clientconfig": client_config},
|
|
762
|
+
client_tool,
|
|
763
|
+
port,
|
|
764
|
+
host,
|
|
765
|
+
local_benchmark_output_filename,
|
|
766
|
+
False,
|
|
767
|
+
benchmark_tool_workdir,
|
|
768
|
+
False,
|
|
769
|
+
)
|
|
770
|
+
|
|
771
|
+
# Calculate container timeout
|
|
772
|
+
container_timeout = 300 # 5 minutes default
|
|
773
|
+
# Use new timeout_buffer argument, fallback to container_timeout_buffer for backward compatibility
|
|
774
|
+
buffer_timeout = getattr(
|
|
775
|
+
args, "timeout_buffer", getattr(args, "container_timeout_buffer", 60)
|
|
776
|
+
)
|
|
777
|
+
if "test-time" in benchmark_command_str:
|
|
778
|
+
# Try to extract test time and add buffer
|
|
779
|
+
import re
|
|
780
|
+
|
|
781
|
+
# Handle both --test-time (memtier) and -test-time (pubsub-sub-bench)
|
|
782
|
+
test_time_match = re.search(
|
|
783
|
+
r"--?test-time[=\s]+(\d+)", benchmark_command_str
|
|
784
|
+
)
|
|
785
|
+
if test_time_match:
|
|
786
|
+
test_time = int(test_time_match.group(1))
|
|
787
|
+
container_timeout = test_time + buffer_timeout
|
|
788
|
+
logging.info(
|
|
789
|
+
f"Client {client_index}: Set container timeout to {container_timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer)"
|
|
790
|
+
)
|
|
791
|
+
|
|
792
|
+
logging.info(
|
|
793
|
+
f"Starting client {client_index} with docker image {client_image} (cpuset={client_cpuset_cpus}) with args: {benchmark_command_str}"
|
|
794
|
+
)
|
|
795
|
+
|
|
796
|
+
# Start container (detached)
|
|
797
|
+
# Set working directory based on tool
|
|
798
|
+
working_dir = benchmark_tool_workdir
|
|
799
|
+
if "vector-db-benchmark" in client_tool:
|
|
800
|
+
working_dir = "/app" # vector-db-benchmark needs to run from /app
|
|
801
|
+
|
|
802
|
+
# Prepare container arguments
|
|
803
|
+
volumes = {
|
|
804
|
+
temporary_dir_client: {
|
|
805
|
+
"bind": client_mnt_point,
|
|
806
|
+
"mode": "rw",
|
|
807
|
+
},
|
|
808
|
+
}
|
|
809
|
+
|
|
810
|
+
# For vector-db-benchmark, also mount the results directory
|
|
811
|
+
if "vector-db-benchmark" in client_tool:
|
|
812
|
+
volumes[temporary_dir_client] = {
|
|
813
|
+
"bind": "/app/results",
|
|
814
|
+
"mode": "rw",
|
|
815
|
+
}
|
|
816
|
+
|
|
817
|
+
container_kwargs = {
|
|
818
|
+
"image": client_image,
|
|
819
|
+
"volumes": volumes,
|
|
820
|
+
"auto_remove": False,
|
|
821
|
+
"privileged": True,
|
|
822
|
+
"working_dir": working_dir,
|
|
823
|
+
"command": benchmark_command_str,
|
|
824
|
+
"network_mode": "host",
|
|
825
|
+
"detach": True,
|
|
826
|
+
"cpuset_cpus": client_cpuset_cpus,
|
|
827
|
+
}
|
|
828
|
+
|
|
829
|
+
# Only add user for non-vector-db-benchmark tools to avoid permission issues
|
|
830
|
+
if "vector-db-benchmark" not in client_tool:
|
|
831
|
+
container_kwargs["user"] = f"{os.getuid()}:{os.getgid()}"
|
|
832
|
+
|
|
833
|
+
# Add environment variables for vector-db-benchmark
|
|
834
|
+
if "vector-db-benchmark" in client_tool:
|
|
835
|
+
try:
|
|
836
|
+
container_kwargs["environment"] = client_env_vars
|
|
837
|
+
except NameError:
|
|
838
|
+
# client_env_vars not defined, skip environment variables
|
|
839
|
+
pass
|
|
840
|
+
|
|
841
|
+
container = docker_client.containers.run(**container_kwargs)
|
|
842
|
+
|
|
843
|
+
containers.append(
|
|
844
|
+
{
|
|
845
|
+
"container": container,
|
|
846
|
+
"client_index": client_index,
|
|
847
|
+
"client_tool": client_tool,
|
|
848
|
+
"client_image": client_image,
|
|
849
|
+
"benchmark_command_str": benchmark_command_str,
|
|
850
|
+
"timeout": container_timeout,
|
|
851
|
+
}
|
|
852
|
+
)
|
|
853
|
+
|
|
854
|
+
except Exception as e:
|
|
855
|
+
error_msg = f"Error starting client {client_index}: {e}"
|
|
856
|
+
logging.error(error_msg)
|
|
857
|
+
logging.error(f"Image: {client_image}, Tool: {client_tool}")
|
|
858
|
+
logging.error(f"Command: {benchmark_command_str}")
|
|
859
|
+
# Fail fast on container startup errors
|
|
860
|
+
raise RuntimeError(f"Failed to start client {client_index}: {e}")
|
|
861
|
+
|
|
862
|
+
# Wait for all containers to complete
|
|
863
|
+
logging.info(f"Waiting for {len(containers)} containers to complete...")
|
|
864
|
+
|
|
865
|
+
for container_info in containers:
|
|
866
|
+
container = container_info["container"]
|
|
867
|
+
client_index = container_info["client_index"]
|
|
868
|
+
client_tool = container_info["client_tool"]
|
|
869
|
+
client_image = container_info["client_image"]
|
|
870
|
+
benchmark_command_str = container_info["benchmark_command_str"]
|
|
871
|
+
|
|
872
|
+
try:
|
|
873
|
+
# Wait for container to complete
|
|
874
|
+
exit_code = container.wait(timeout=container_info["timeout"])
|
|
875
|
+
client_stdout = container.logs().decode("utf-8")
|
|
876
|
+
|
|
877
|
+
# Check if container succeeded
|
|
878
|
+
if exit_code.get("StatusCode", 1) != 0:
|
|
879
|
+
logging.error(
|
|
880
|
+
f"Client {client_index} failed with exit code: {exit_code}"
|
|
881
|
+
)
|
|
882
|
+
logging.error(f"Client {client_index} stdout/stderr:")
|
|
883
|
+
logging.error(client_stdout)
|
|
884
|
+
# Fail fast on container execution errors
|
|
885
|
+
raise RuntimeError(
|
|
886
|
+
f"Client {client_index} ({client_tool}) failed with exit code {exit_code}"
|
|
887
|
+
)
|
|
888
|
+
|
|
889
|
+
logging.info(
|
|
890
|
+
f"Client {client_index} completed successfully with exit code: {exit_code}"
|
|
891
|
+
)
|
|
892
|
+
|
|
893
|
+
results.append(
|
|
894
|
+
{
|
|
895
|
+
"client_index": client_index,
|
|
896
|
+
"stdout": client_stdout,
|
|
897
|
+
"config": client_configs[client_index],
|
|
898
|
+
"tool": client_tool,
|
|
899
|
+
"image": client_image,
|
|
900
|
+
}
|
|
901
|
+
)
|
|
902
|
+
|
|
903
|
+
except Exception as e:
|
|
904
|
+
# Get logs even if wait failed
|
|
905
|
+
try:
|
|
906
|
+
client_stdout = container.logs().decode("utf-8")
|
|
907
|
+
logging.error(f"Client {client_index} logs:")
|
|
908
|
+
logging.error(client_stdout)
|
|
909
|
+
except:
|
|
910
|
+
logging.error(f"Could not retrieve logs for client {client_index}")
|
|
911
|
+
|
|
912
|
+
raise RuntimeError(f"Client {client_index} ({client_tool}) failed: {e}")
|
|
913
|
+
|
|
914
|
+
finally:
|
|
915
|
+
# Clean up container
|
|
916
|
+
try:
|
|
917
|
+
container.remove(force=True)
|
|
918
|
+
except Exception as cleanup_error:
|
|
919
|
+
logging.warning(f"Client {client_index} cleanup error: {cleanup_error}")
|
|
920
|
+
|
|
921
|
+
logging.info(f"Successfully completed {len(containers)} client configurations")
|
|
922
|
+
|
|
923
|
+
# Aggregate results by reading JSON output files
|
|
924
|
+
aggregated_stdout = ""
|
|
925
|
+
successful_results = [r for r in results if "error" not in r]
|
|
926
|
+
|
|
927
|
+
if successful_results:
|
|
928
|
+
# Try to read and aggregate JSON output files
|
|
929
|
+
|
|
930
|
+
aggregated_json = {}
|
|
931
|
+
memtier_json = None
|
|
932
|
+
pubsub_json = None
|
|
933
|
+
vector_json = None
|
|
934
|
+
|
|
935
|
+
for result in successful_results:
|
|
936
|
+
client_index = result["client_index"]
|
|
937
|
+
tool = result["tool"]
|
|
938
|
+
|
|
939
|
+
# Look for JSON output file
|
|
940
|
+
json_filename = f"benchmark_output_{client_index}.json"
|
|
941
|
+
json_filepath = os.path.join(temporary_dir_client, json_filename)
|
|
942
|
+
|
|
943
|
+
if os.path.exists(json_filepath):
|
|
944
|
+
try:
|
|
945
|
+
with open(json_filepath, "r") as f:
|
|
946
|
+
client_json = json.load(f)
|
|
947
|
+
|
|
948
|
+
if "memtier_benchmark" in tool:
|
|
949
|
+
# Store memtier JSON
|
|
950
|
+
memtier_json = client_json
|
|
951
|
+
logging.info(
|
|
952
|
+
f"Successfully read memtier JSON output from client {client_index}"
|
|
953
|
+
)
|
|
954
|
+
elif "pubsub-sub-bench" in tool:
|
|
955
|
+
# Store pubsub JSON
|
|
956
|
+
pubsub_json = client_json
|
|
957
|
+
logging.info(
|
|
958
|
+
f"Successfully read pubsub-sub-bench JSON output from client {client_index}"
|
|
959
|
+
)
|
|
960
|
+
elif "vector-db-benchmark" in tool:
|
|
961
|
+
# For vector-db-benchmark, look for summary JSON file
|
|
962
|
+
summary_files = [
|
|
963
|
+
f
|
|
964
|
+
for f in os.listdir(temporary_dir_client)
|
|
965
|
+
if f.endswith("-summary.json")
|
|
966
|
+
]
|
|
967
|
+
if summary_files:
|
|
968
|
+
summary_filepath = os.path.join(
|
|
969
|
+
temporary_dir_client, summary_files[0]
|
|
970
|
+
)
|
|
971
|
+
try:
|
|
972
|
+
with open(summary_filepath, "r") as f:
|
|
973
|
+
vector_json = json.load(f)
|
|
974
|
+
logging.info(
|
|
975
|
+
f"Successfully read vector-db-benchmark JSON output from {summary_files[0]}"
|
|
976
|
+
)
|
|
977
|
+
except Exception as e:
|
|
978
|
+
logging.warning(
|
|
979
|
+
f"Failed to read vector-db-benchmark JSON from {summary_files[0]}: {e}"
|
|
980
|
+
)
|
|
981
|
+
else:
|
|
982
|
+
logging.warning(
|
|
983
|
+
f"No vector-db-benchmark summary JSON file found for client {client_index}"
|
|
984
|
+
)
|
|
985
|
+
|
|
986
|
+
logging.info(
|
|
987
|
+
f"Successfully read JSON output from client {client_index} ({tool})"
|
|
988
|
+
)
|
|
989
|
+
|
|
990
|
+
except Exception as e:
|
|
991
|
+
logging.warning(
|
|
992
|
+
f"Failed to read JSON from client {client_index}: {e}"
|
|
993
|
+
)
|
|
994
|
+
# Fall back to stdout
|
|
995
|
+
pass
|
|
996
|
+
else:
|
|
997
|
+
logging.warning(
|
|
998
|
+
f"JSON output file not found for client {client_index}: {json_filepath}"
|
|
999
|
+
)
|
|
1000
|
+
|
|
1001
|
+
# Merge JSON outputs from all tools
|
|
1002
|
+
if memtier_json and pubsub_json and vector_json:
|
|
1003
|
+
# Use memtier as base and add other metrics
|
|
1004
|
+
aggregated_json = memtier_json.copy()
|
|
1005
|
+
aggregated_json.update(pubsub_json)
|
|
1006
|
+
aggregated_json.update(vector_json)
|
|
1007
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
1008
|
+
logging.info(
|
|
1009
|
+
"Using merged JSON results from memtier, pubsub-sub-bench, and vector-db-benchmark clients"
|
|
1010
|
+
)
|
|
1011
|
+
elif memtier_json and pubsub_json:
|
|
1012
|
+
# Use memtier as base and add pubsub metrics
|
|
1013
|
+
aggregated_json = memtier_json.copy()
|
|
1014
|
+
aggregated_json.update(pubsub_json)
|
|
1015
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
1016
|
+
logging.info(
|
|
1017
|
+
"Using merged JSON results from memtier and pubsub-sub-bench clients"
|
|
1018
|
+
)
|
|
1019
|
+
elif memtier_json and vector_json:
|
|
1020
|
+
# Use memtier as base and add vector metrics
|
|
1021
|
+
aggregated_json = memtier_json.copy()
|
|
1022
|
+
aggregated_json.update(vector_json)
|
|
1023
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
1024
|
+
logging.info(
|
|
1025
|
+
"Using merged JSON results from memtier and vector-db-benchmark clients"
|
|
1026
|
+
)
|
|
1027
|
+
elif pubsub_json and vector_json:
|
|
1028
|
+
# Use pubsub as base and add vector metrics
|
|
1029
|
+
aggregated_json = pubsub_json.copy()
|
|
1030
|
+
aggregated_json.update(vector_json)
|
|
1031
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
1032
|
+
logging.info(
|
|
1033
|
+
"Using merged JSON results from pubsub-sub-bench and vector-db-benchmark clients"
|
|
1034
|
+
)
|
|
1035
|
+
elif memtier_json:
|
|
1036
|
+
# Only memtier available
|
|
1037
|
+
aggregated_json = memtier_json
|
|
1038
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
1039
|
+
logging.info("Using JSON results from memtier client only")
|
|
1040
|
+
elif pubsub_json:
|
|
1041
|
+
# Only pubsub available
|
|
1042
|
+
aggregated_json = pubsub_json
|
|
1043
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
1044
|
+
logging.info("Using JSON results from pubsub-sub-bench client only")
|
|
1045
|
+
elif vector_json:
|
|
1046
|
+
# Only vector-db-benchmark available
|
|
1047
|
+
aggregated_json = vector_json
|
|
1048
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
1049
|
+
logging.info("Using JSON results from vector-db-benchmark client only")
|
|
1050
|
+
else:
|
|
1051
|
+
# Fall back to concatenated stdout
|
|
1052
|
+
aggregated_stdout = "\n".join([r["stdout"] for r in successful_results])
|
|
1053
|
+
logging.warning(
|
|
1054
|
+
"No JSON results found, falling back to concatenated stdout"
|
|
1055
|
+
)
|
|
1056
|
+
|
|
1057
|
+
return aggregated_stdout, results
|
|
1058
|
+
|
|
1059
|
+
|
|
1060
|
+
def main():
|
|
1061
|
+
# Register signal handler for graceful exit on Ctrl+C
|
|
1062
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
1063
|
+
|
|
1064
|
+
_, _, project_version = populate_with_poetry_data()
|
|
1065
|
+
project_name_suffix = "redis-benchmarks-spec-client-runner"
|
|
1066
|
+
project_name = f"{project_name_suffix} (solely client)"
|
|
1067
|
+
parser = create_client_runner_args(
|
|
1068
|
+
get_version_string(project_name, project_version)
|
|
1069
|
+
)
|
|
1070
|
+
args = parser.parse_args()
|
|
1071
|
+
|
|
1072
|
+
run_client_runner_logic(args, project_name, project_name_suffix, project_version)
|
|
1073
|
+
|
|
1074
|
+
|
|
1075
|
+
def run_client_runner_logic(args, project_name, project_name_suffix, project_version):
|
|
1076
|
+
if args.logname is not None:
|
|
1077
|
+
print(f"Writting log to {args.logname}")
|
|
1078
|
+
logging.basicConfig(
|
|
1079
|
+
filename=args.logname,
|
|
1080
|
+
filemode="a",
|
|
1081
|
+
format=LOG_FORMAT,
|
|
1082
|
+
datefmt=LOG_DATEFMT,
|
|
1083
|
+
level=LOG_LEVEL,
|
|
1084
|
+
)
|
|
1085
|
+
else:
|
|
1086
|
+
# logging settings
|
|
1087
|
+
logging.basicConfig(
|
|
1088
|
+
format=LOG_FORMAT,
|
|
1089
|
+
level=LOG_LEVEL,
|
|
1090
|
+
datefmt=LOG_DATEFMT,
|
|
1091
|
+
)
|
|
1092
|
+
logging.info(get_version_string(project_name, project_version))
|
|
1093
|
+
testsuite_spec_files = extract_testsuites(args)
|
|
1094
|
+
datasink_conn = None
|
|
1095
|
+
if args.datasink_push_results_redistimeseries:
|
|
1096
|
+
logging.info(
|
|
1097
|
+
"Checking redistimeseries datasink connection is available at: {}:{} to push the timeseries data".format(
|
|
1098
|
+
args.datasink_redistimeseries_host, args.datasink_redistimeseries_port
|
|
1099
|
+
)
|
|
1100
|
+
)
|
|
1101
|
+
try:
|
|
1102
|
+
datasink_conn = redis.StrictRedis(
|
|
1103
|
+
host=args.datasink_redistimeseries_host,
|
|
1104
|
+
port=args.datasink_redistimeseries_port,
|
|
1105
|
+
decode_responses=True,
|
|
1106
|
+
password=args.datasink_redistimeseries_pass,
|
|
1107
|
+
username=args.datasink_redistimeseries_user,
|
|
1108
|
+
health_check_interval=REDIS_HEALTH_CHECK_INTERVAL,
|
|
1109
|
+
socket_connect_timeout=REDIS_SOCKET_TIMEOUT,
|
|
1110
|
+
socket_keepalive=True,
|
|
1111
|
+
)
|
|
1112
|
+
datasink_conn.ping()
|
|
1113
|
+
datasink_conn.client_setname(project_name_suffix)
|
|
1114
|
+
except redis.exceptions.ConnectionError as e:
|
|
1115
|
+
logging.error(
|
|
1116
|
+
"Unable to connect to redis available at: {}:{}".format(
|
|
1117
|
+
args.datasink_redistimeseries_host,
|
|
1118
|
+
args.datasink_redistimeseries_port,
|
|
1119
|
+
)
|
|
1120
|
+
)
|
|
1121
|
+
logging.error(f"Error message {e.__str__()}")
|
|
1122
|
+
exit(1)
|
|
1123
|
+
running_platform = args.platform_name
|
|
1124
|
+
tls_enabled = args.tls
|
|
1125
|
+
tls_skip_verify = args.tls_skip_verify
|
|
1126
|
+
tls_cert = args.cert
|
|
1127
|
+
tls_key = args.key
|
|
1128
|
+
tls_cacert = args.cacert
|
|
1129
|
+
resp_version = args.resp
|
|
1130
|
+
client_aggregated_results_folder = args.client_aggregated_results_folder
|
|
1131
|
+
preserve_temporary_client_dirs = args.preserve_temporary_client_dirs
|
|
1132
|
+
|
|
1133
|
+
docker_client = docker.from_env()
|
|
1134
|
+
home = str(Path.home())
|
|
1135
|
+
profilers_list = []
|
|
1136
|
+
profilers_enabled = args.enable_profilers
|
|
1137
|
+
if profilers_enabled:
|
|
1138
|
+
profilers_list = args.profilers.split(",")
|
|
1139
|
+
res = check_compatible_system_and_kernel_and_prepare_profile(args)
|
|
1140
|
+
if res is False:
|
|
1141
|
+
logging.error(
|
|
1142
|
+
"Requested for the following profilers to be enabled but something went wrong: {}.".format(
|
|
1143
|
+
" ".join(profilers_list)
|
|
1144
|
+
)
|
|
1145
|
+
)
|
|
1146
|
+
exit(1)
|
|
1147
|
+
override_memtier_test_time = args.override_memtier_test_time
|
|
1148
|
+
if override_memtier_test_time > 0:
|
|
1149
|
+
logging.info(
|
|
1150
|
+
"Overriding memtier benchmark --test-time to {} seconds".format(
|
|
1151
|
+
override_memtier_test_time
|
|
1152
|
+
)
|
|
1153
|
+
)
|
|
1154
|
+
logging.info("Running the benchmark specs.")
|
|
1155
|
+
process_self_contained_coordinator_stream(
|
|
1156
|
+
args,
|
|
1157
|
+
args.datasink_push_results_redistimeseries,
|
|
1158
|
+
docker_client,
|
|
1159
|
+
home,
|
|
1160
|
+
None,
|
|
1161
|
+
datasink_conn,
|
|
1162
|
+
testsuite_spec_files,
|
|
1163
|
+
{},
|
|
1164
|
+
running_platform,
|
|
1165
|
+
profilers_enabled,
|
|
1166
|
+
profilers_list,
|
|
1167
|
+
tls_enabled,
|
|
1168
|
+
tls_skip_verify,
|
|
1169
|
+
tls_cert,
|
|
1170
|
+
tls_key,
|
|
1171
|
+
tls_cacert,
|
|
1172
|
+
client_aggregated_results_folder,
|
|
1173
|
+
preserve_temporary_client_dirs,
|
|
1174
|
+
resp_version,
|
|
1175
|
+
override_memtier_test_time,
|
|
1176
|
+
)
|
|
1177
|
+
|
|
1178
|
+
|
|
1179
|
+
def prepare_memtier_benchmark_parameters(
|
|
1180
|
+
clientconfig,
|
|
1181
|
+
full_benchmark_path,
|
|
1182
|
+
port,
|
|
1183
|
+
server,
|
|
1184
|
+
password,
|
|
1185
|
+
local_benchmark_output_filename,
|
|
1186
|
+
oss_cluster_api_enabled=False,
|
|
1187
|
+
tls_enabled=False,
|
|
1188
|
+
tls_skip_verify=False,
|
|
1189
|
+
tls_cert=None,
|
|
1190
|
+
tls_key=None,
|
|
1191
|
+
tls_cacert=None,
|
|
1192
|
+
resp_version=None,
|
|
1193
|
+
override_memtier_test_time=0,
|
|
1194
|
+
override_test_runs=1,
|
|
1195
|
+
unix_socket="",
|
|
1196
|
+
):
|
|
1197
|
+
arbitrary_command = False
|
|
1198
|
+
benchmark_command = [
|
|
1199
|
+
full_benchmark_path,
|
|
1200
|
+
"--json-out-file",
|
|
1201
|
+
local_benchmark_output_filename,
|
|
1202
|
+
]
|
|
1203
|
+
if unix_socket != "":
|
|
1204
|
+
benchmark_command.extend(["--unix-socket", unix_socket])
|
|
1205
|
+
logging.info(f"Using UNIX SOCKET to connect {unix_socket}")
|
|
1206
|
+
else:
|
|
1207
|
+
benchmark_command.extend(
|
|
1208
|
+
[
|
|
1209
|
+
"--port",
|
|
1210
|
+
f"{port}",
|
|
1211
|
+
"--server",
|
|
1212
|
+
f"{server}",
|
|
1213
|
+
]
|
|
1214
|
+
)
|
|
1215
|
+
if password is not None:
|
|
1216
|
+
benchmark_command.extend(["--authenticate", password])
|
|
1217
|
+
if tls_enabled:
|
|
1218
|
+
benchmark_command.append("--tls")
|
|
1219
|
+
if tls_cert is not None and tls_cert != "":
|
|
1220
|
+
benchmark_command.extend(["--cert", tls_cert])
|
|
1221
|
+
if tls_key is not None and tls_key != "":
|
|
1222
|
+
benchmark_command.extend(["--key", tls_key])
|
|
1223
|
+
if tls_cacert is not None and tls_cacert != "":
|
|
1224
|
+
benchmark_command.extend(["--cacert", tls_cacert])
|
|
1225
|
+
if tls_skip_verify:
|
|
1226
|
+
benchmark_command.append("--tls-skip-verify")
|
|
1227
|
+
|
|
1228
|
+
if resp_version:
|
|
1229
|
+
tool = clientconfig["tool"]
|
|
1230
|
+
if tool == "memtier_benchmark":
|
|
1231
|
+
if resp_version == "3":
|
|
1232
|
+
benchmark_command.extend(["--protocol", "resp{}".format(resp_version)])
|
|
1233
|
+
elif tool == "redis-benchmark":
|
|
1234
|
+
if resp_version == "3":
|
|
1235
|
+
benchmark_command.append("-3")
|
|
1236
|
+
|
|
1237
|
+
if oss_cluster_api_enabled is True:
|
|
1238
|
+
benchmark_command.append("--cluster-mode")
|
|
1239
|
+
logging.info(f"Preparing the benchmark parameters. {benchmark_command}.")
|
|
1240
|
+
benchmark_command_str = " ".join(benchmark_command)
|
|
1241
|
+
if "arguments" in clientconfig:
|
|
1242
|
+
benchmark_command_str = benchmark_command_str + " " + clientconfig["arguments"]
|
|
1243
|
+
|
|
1244
|
+
if "--command" in benchmark_command_str:
|
|
1245
|
+
arbitrary_command = True
|
|
1246
|
+
|
|
1247
|
+
if override_test_runs > 1:
|
|
1248
|
+
benchmark_command_str = re.sub(
|
|
1249
|
+
"--run-count\\s\\d+",
|
|
1250
|
+
"--run-count={}".format(override_test_runs),
|
|
1251
|
+
benchmark_command_str,
|
|
1252
|
+
)
|
|
1253
|
+
benchmark_command_str = re.sub(
|
|
1254
|
+
"--run-count=\\d+",
|
|
1255
|
+
"--run-count={}".format(override_test_runs),
|
|
1256
|
+
benchmark_command_str,
|
|
1257
|
+
)
|
|
1258
|
+
benchmark_command_str = re.sub(
|
|
1259
|
+
'--run-count="\\d+"',
|
|
1260
|
+
"--run-count={}".format(override_test_runs),
|
|
1261
|
+
benchmark_command_str,
|
|
1262
|
+
)
|
|
1263
|
+
# short
|
|
1264
|
+
benchmark_command_str = re.sub(
|
|
1265
|
+
"-x\\s\\d+",
|
|
1266
|
+
"-x={}".format(override_test_runs),
|
|
1267
|
+
benchmark_command_str,
|
|
1268
|
+
)
|
|
1269
|
+
benchmark_command_str = re.sub(
|
|
1270
|
+
"-x=\\d+",
|
|
1271
|
+
"-x={}".format(override_test_runs),
|
|
1272
|
+
benchmark_command_str,
|
|
1273
|
+
)
|
|
1274
|
+
benchmark_command_str = re.sub(
|
|
1275
|
+
'-x="\\d+"',
|
|
1276
|
+
"-x={}".format(override_test_runs),
|
|
1277
|
+
benchmark_command_str,
|
|
1278
|
+
)
|
|
1279
|
+
if (
|
|
1280
|
+
len(
|
|
1281
|
+
re.findall(
|
|
1282
|
+
"--run-count={}".format(override_test_runs),
|
|
1283
|
+
benchmark_command_str,
|
|
1284
|
+
)
|
|
1285
|
+
)
|
|
1286
|
+
== 0
|
|
1287
|
+
and len(
|
|
1288
|
+
re.findall(
|
|
1289
|
+
"-x={}".format(override_test_runs),
|
|
1290
|
+
benchmark_command_str,
|
|
1291
|
+
)
|
|
1292
|
+
)
|
|
1293
|
+
== 0
|
|
1294
|
+
):
|
|
1295
|
+
logging.info("adding --run-count option to benchmark run. ")
|
|
1296
|
+
benchmark_command_str = (
|
|
1297
|
+
benchmark_command_str
|
|
1298
|
+
+ " "
|
|
1299
|
+
+ "--run-count={}".format(override_test_runs)
|
|
1300
|
+
)
|
|
1301
|
+
|
|
1302
|
+
if override_memtier_test_time > 0:
|
|
1303
|
+
benchmark_command_str = re.sub(
|
|
1304
|
+
"--test-time\\s\\d+",
|
|
1305
|
+
"--test-time={}".format(override_memtier_test_time),
|
|
1306
|
+
benchmark_command_str,
|
|
1307
|
+
)
|
|
1308
|
+
benchmark_command_str = re.sub(
|
|
1309
|
+
"--test-time=\\d+",
|
|
1310
|
+
"--test-time={}".format(override_memtier_test_time),
|
|
1311
|
+
benchmark_command_str,
|
|
1312
|
+
)
|
|
1313
|
+
benchmark_command_str = re.sub(
|
|
1314
|
+
'--test-time="\\d+"',
|
|
1315
|
+
"--test-time={}".format(override_memtier_test_time),
|
|
1316
|
+
benchmark_command_str,
|
|
1317
|
+
)
|
|
1318
|
+
|
|
1319
|
+
return None, benchmark_command_str, arbitrary_command
|
|
1320
|
+
|
|
1321
|
+
|
|
1322
|
+
def prepare_vector_db_benchmark_parameters(
|
|
1323
|
+
clientconfig,
|
|
1324
|
+
full_benchmark_path,
|
|
1325
|
+
port,
|
|
1326
|
+
server,
|
|
1327
|
+
password,
|
|
1328
|
+
local_benchmark_output_filename,
|
|
1329
|
+
oss_cluster_api_enabled=False,
|
|
1330
|
+
tls_enabled=False,
|
|
1331
|
+
tls_skip_verify=False,
|
|
1332
|
+
tls_cert=None,
|
|
1333
|
+
tls_key=None,
|
|
1334
|
+
tls_cacert=None,
|
|
1335
|
+
resp_version=None,
|
|
1336
|
+
override_test_time=0,
|
|
1337
|
+
unix_socket="",
|
|
1338
|
+
username=None,
|
|
1339
|
+
):
|
|
1340
|
+
"""
|
|
1341
|
+
Prepare vector-db-benchmark command parameters
|
|
1342
|
+
"""
|
|
1343
|
+
arbitrary_command = False
|
|
1344
|
+
|
|
1345
|
+
benchmark_command = [
|
|
1346
|
+
"/app/run.py",
|
|
1347
|
+
"--host",
|
|
1348
|
+
f"{server}",
|
|
1349
|
+
]
|
|
1350
|
+
|
|
1351
|
+
# Add port as environment variable (vector-db-benchmark uses env vars)
|
|
1352
|
+
env_vars = {}
|
|
1353
|
+
if port is not None:
|
|
1354
|
+
env_vars["REDIS_PORT"] = str(port)
|
|
1355
|
+
if password is not None:
|
|
1356
|
+
env_vars["REDIS_AUTH"] = password
|
|
1357
|
+
if username is not None:
|
|
1358
|
+
env_vars["REDIS_USER"] = username
|
|
1359
|
+
|
|
1360
|
+
# Add engines parameter
|
|
1361
|
+
engines = clientconfig.get("engines", "vectorsets-fp32-default")
|
|
1362
|
+
benchmark_command.extend(["--engines", engines])
|
|
1363
|
+
|
|
1364
|
+
# Add datasets parameter
|
|
1365
|
+
datasets = clientconfig.get("datasets", "random-100")
|
|
1366
|
+
benchmark_command.extend(["--datasets", datasets])
|
|
1367
|
+
|
|
1368
|
+
# Add other optional parameters
|
|
1369
|
+
if "parallels" in clientconfig:
|
|
1370
|
+
benchmark_command.extend(["--parallels", str(clientconfig["parallels"])])
|
|
1371
|
+
|
|
1372
|
+
if "queries" in clientconfig:
|
|
1373
|
+
benchmark_command.extend(["--queries", str(clientconfig["queries"])])
|
|
1374
|
+
|
|
1375
|
+
if "timeout" in clientconfig:
|
|
1376
|
+
benchmark_command.extend(["--timeout", str(clientconfig["timeout"])])
|
|
1377
|
+
|
|
1378
|
+
# Add custom arguments if specified
|
|
1379
|
+
if "arguments" in clientconfig:
|
|
1380
|
+
benchmark_command_str = (
|
|
1381
|
+
" ".join(benchmark_command) + " " + clientconfig["arguments"]
|
|
1382
|
+
)
|
|
1383
|
+
else:
|
|
1384
|
+
benchmark_command_str = " ".join(benchmark_command)
|
|
1385
|
+
|
|
1386
|
+
return benchmark_command, benchmark_command_str, arbitrary_command, env_vars
|
|
1387
|
+
|
|
1388
|
+
|
|
1389
|
+
def prepare_pubsub_sub_bench_parameters(
|
|
1390
|
+
clientconfig,
|
|
1391
|
+
full_benchmark_path,
|
|
1392
|
+
port,
|
|
1393
|
+
server,
|
|
1394
|
+
password,
|
|
1395
|
+
local_benchmark_output_filename,
|
|
1396
|
+
oss_cluster_api_enabled=False,
|
|
1397
|
+
tls_enabled=False,
|
|
1398
|
+
tls_skip_verify=False,
|
|
1399
|
+
tls_cert=None,
|
|
1400
|
+
tls_key=None,
|
|
1401
|
+
tls_cacert=None,
|
|
1402
|
+
resp_version=None,
|
|
1403
|
+
override_test_time=0,
|
|
1404
|
+
unix_socket="",
|
|
1405
|
+
username=None,
|
|
1406
|
+
):
|
|
1407
|
+
"""
|
|
1408
|
+
Prepare pubsub-sub-bench command parameters
|
|
1409
|
+
"""
|
|
1410
|
+
arbitrary_command = False
|
|
1411
|
+
|
|
1412
|
+
benchmark_command = [
|
|
1413
|
+
# full_benchmark_path,
|
|
1414
|
+
"-json-out-file",
|
|
1415
|
+
local_benchmark_output_filename,
|
|
1416
|
+
]
|
|
1417
|
+
|
|
1418
|
+
# Connection parameters
|
|
1419
|
+
if unix_socket != "":
|
|
1420
|
+
# pubsub-sub-bench doesn't support unix sockets directly
|
|
1421
|
+
# Fall back to host/port
|
|
1422
|
+
logging.warning(
|
|
1423
|
+
"pubsub-sub-bench doesn't support unix sockets, using host/port"
|
|
1424
|
+
)
|
|
1425
|
+
benchmark_command.extend(["-host", server, "-port", str(port)])
|
|
1426
|
+
else:
|
|
1427
|
+
benchmark_command.extend(["-host", server, "-port", str(port)])
|
|
1428
|
+
|
|
1429
|
+
# Authentication
|
|
1430
|
+
if username and password:
|
|
1431
|
+
# ACL style authentication
|
|
1432
|
+
benchmark_command.extend(["-user", username, "-a", password])
|
|
1433
|
+
elif password:
|
|
1434
|
+
# Password-only authentication
|
|
1435
|
+
benchmark_command.extend(["-a", password])
|
|
1436
|
+
|
|
1437
|
+
# TLS support (if the tool supports it in future versions)
|
|
1438
|
+
if tls_enabled:
|
|
1439
|
+
logging.warning("pubsub-sub-bench TLS support not implemented yet")
|
|
1440
|
+
|
|
1441
|
+
# RESP version
|
|
1442
|
+
if resp_version:
|
|
1443
|
+
if resp_version == "3":
|
|
1444
|
+
benchmark_command.extend(["-resp", "3"])
|
|
1445
|
+
elif resp_version == "2":
|
|
1446
|
+
benchmark_command.extend(["-resp", "2"])
|
|
1447
|
+
|
|
1448
|
+
# Cluster mode
|
|
1449
|
+
if oss_cluster_api_enabled:
|
|
1450
|
+
benchmark_command.append("-oss-cluster-api-distribute-subscribers")
|
|
1451
|
+
|
|
1452
|
+
logging.info(f"Preparing pubsub-sub-bench parameters: {benchmark_command}")
|
|
1453
|
+
benchmark_command_str = " ".join(benchmark_command)
|
|
1454
|
+
|
|
1455
|
+
# Append user-defined arguments from YAML
|
|
1456
|
+
user_arguments = ""
|
|
1457
|
+
if "arguments" in clientconfig:
|
|
1458
|
+
user_arguments = clientconfig["arguments"]
|
|
1459
|
+
|
|
1460
|
+
# Test time override - handle after user arguments to avoid conflicts
|
|
1461
|
+
if override_test_time and override_test_time > 0:
|
|
1462
|
+
# Remove any existing -test-time from user arguments
|
|
1463
|
+
import re
|
|
1464
|
+
|
|
1465
|
+
user_arguments = re.sub(r"-test-time\s+\d+", "", user_arguments)
|
|
1466
|
+
# Add our override test time
|
|
1467
|
+
benchmark_command_str = (
|
|
1468
|
+
benchmark_command_str + " -test-time " + str(override_test_time)
|
|
1469
|
+
)
|
|
1470
|
+
logging.info(f"Applied test-time override: {override_test_time}s")
|
|
1471
|
+
|
|
1472
|
+
# Add cleaned user arguments
|
|
1473
|
+
if user_arguments.strip():
|
|
1474
|
+
benchmark_command_str = benchmark_command_str + " " + user_arguments.strip()
|
|
1475
|
+
|
|
1476
|
+
return benchmark_command, benchmark_command_str, arbitrary_command
|
|
1477
|
+
|
|
1478
|
+
|
|
1479
|
+
def process_self_contained_coordinator_stream(
|
|
1480
|
+
args,
|
|
1481
|
+
datasink_push_results_redistimeseries,
|
|
1482
|
+
docker_client,
|
|
1483
|
+
home,
|
|
1484
|
+
newTestInfo,
|
|
1485
|
+
datasink_conn,
|
|
1486
|
+
testsuite_spec_files,
|
|
1487
|
+
topologies_map,
|
|
1488
|
+
running_platform,
|
|
1489
|
+
profilers_enabled=False,
|
|
1490
|
+
profilers_list=[],
|
|
1491
|
+
tls_enabled=False,
|
|
1492
|
+
tls_skip_verify=False,
|
|
1493
|
+
tls_cert=None,
|
|
1494
|
+
tls_key=None,
|
|
1495
|
+
tls_cacert=None,
|
|
1496
|
+
client_aggregated_results_folder="",
|
|
1497
|
+
preserve_temporary_client_dirs=False,
|
|
1498
|
+
resp_version=None,
|
|
1499
|
+
override_memtier_test_time=0,
|
|
1500
|
+
used_memory_check_fail=False,
|
|
1501
|
+
):
|
|
1502
|
+
def delete_temporary_files(
|
|
1503
|
+
temporary_dir_client, full_result_path, benchmark_tool_global
|
|
1504
|
+
):
|
|
1505
|
+
if preserve_temporary_client_dirs is True:
|
|
1506
|
+
logging.info(f"Preserving temporary client dir {temporary_dir_client}")
|
|
1507
|
+
else:
|
|
1508
|
+
if benchmark_tool_global and "redis-benchmark" in benchmark_tool_global:
|
|
1509
|
+
if full_result_path is not None:
|
|
1510
|
+
os.remove(full_result_path)
|
|
1511
|
+
logging.info("Removing temporary JSON file")
|
|
1512
|
+
shutil.rmtree(temporary_dir_client, ignore_errors=True)
|
|
1513
|
+
logging.info(f"Removing temporary client dir {temporary_dir_client}")
|
|
1514
|
+
|
|
1515
|
+
overall_result = True
|
|
1516
|
+
results_matrix = []
|
|
1517
|
+
total_test_suite_runs = 0
|
|
1518
|
+
dry_run_count = 0
|
|
1519
|
+
dry_run_tests = [] # Track test names for dry run output
|
|
1520
|
+
memory_results = [] # Track memory results for memory comparison mode
|
|
1521
|
+
loaded_datasets = (
|
|
1522
|
+
set()
|
|
1523
|
+
) # Track datasets that have been loaded (for memory comparison mode)
|
|
1524
|
+
dry_run = args.dry_run
|
|
1525
|
+
memory_comparison_only = args.memory_comparison_only
|
|
1526
|
+
dry_run_include_preload = args.dry_run_include_preload
|
|
1527
|
+
defaults_filename = args.defaults_filename
|
|
1528
|
+
override_test_runs = args.override_test_runs
|
|
1529
|
+
get_defaults_result = get_defaults(defaults_filename)
|
|
1530
|
+
# Handle variable number of return values from get_defaults
|
|
1531
|
+
if len(get_defaults_result) >= 3:
|
|
1532
|
+
default_metrics = get_defaults_result[2]
|
|
1533
|
+
else:
|
|
1534
|
+
default_metrics = []
|
|
1535
|
+
logging.warning(
|
|
1536
|
+
"get_defaults returned fewer values than expected, using empty default_metrics"
|
|
1537
|
+
)
|
|
1538
|
+
|
|
1539
|
+
# For memory comparison mode, analyze datasets before starting
|
|
1540
|
+
if memory_comparison_only:
|
|
1541
|
+
unique_datasets = set()
|
|
1542
|
+
total_tests_with_datasets = 0
|
|
1543
|
+
|
|
1544
|
+
logging.info("Analyzing datasets for memory comparison mode...")
|
|
1545
|
+
for test_file in testsuite_spec_files:
|
|
1546
|
+
if defaults_filename in test_file:
|
|
1547
|
+
continue
|
|
1548
|
+
try:
|
|
1549
|
+
with open(test_file, "r") as stream:
|
|
1550
|
+
benchmark_config = yaml.safe_load(stream)
|
|
1551
|
+
|
|
1552
|
+
if "dbconfig" in benchmark_config:
|
|
1553
|
+
# Skip load tests (keyspacelen = 0) in memory comparison mode
|
|
1554
|
+
keyspacelen = (
|
|
1555
|
+
benchmark_config["dbconfig"]
|
|
1556
|
+
.get("check", {})
|
|
1557
|
+
.get("keyspacelen", None)
|
|
1558
|
+
)
|
|
1559
|
+
if keyspacelen is not None and keyspacelen == 0:
|
|
1560
|
+
logging.debug(f"Skipping load test {test_file} (keyspacelen=0)")
|
|
1561
|
+
continue
|
|
1562
|
+
|
|
1563
|
+
dataset_name = benchmark_config["dbconfig"].get("dataset_name")
|
|
1564
|
+
if dataset_name:
|
|
1565
|
+
unique_datasets.add(dataset_name)
|
|
1566
|
+
total_tests_with_datasets += 1
|
|
1567
|
+
|
|
1568
|
+
except Exception as e:
|
|
1569
|
+
logging.warning(f"Error analyzing {test_file}: {e}")
|
|
1570
|
+
|
|
1571
|
+
logging.info(f"Memory comparison mode analysis:")
|
|
1572
|
+
logging.info(f" Total tests with datasets: {total_tests_with_datasets}")
|
|
1573
|
+
logging.info(f" Unique datasets to load: {len(unique_datasets)}")
|
|
1574
|
+
logging.info(
|
|
1575
|
+
f" Dataset ingestion savings: {total_tests_with_datasets - len(unique_datasets)} skipped loads"
|
|
1576
|
+
)
|
|
1577
|
+
logging.info(
|
|
1578
|
+
f" Load tests skipped: Tests with keyspacelen=0 are automatically excluded"
|
|
1579
|
+
)
|
|
1580
|
+
|
|
1581
|
+
if len(unique_datasets) > 0:
|
|
1582
|
+
logging.info(f" Unique datasets: {', '.join(sorted(unique_datasets))}")
|
|
1583
|
+
|
|
1584
|
+
for test_file in tqdm.tqdm(testsuite_spec_files):
|
|
1585
|
+
# Check if user requested exit via Ctrl+C
|
|
1586
|
+
if _exit_requested:
|
|
1587
|
+
logging.info("Exit requested by user. Stopping test execution.")
|
|
1588
|
+
break
|
|
1589
|
+
|
|
1590
|
+
if defaults_filename in test_file:
|
|
1591
|
+
continue
|
|
1592
|
+
client_containers = []
|
|
1593
|
+
|
|
1594
|
+
with open(test_file, "r") as stream:
|
|
1595
|
+
_, benchmark_config, test_name = get_final_benchmark_config(
|
|
1596
|
+
None, None, stream, ""
|
|
1597
|
+
)
|
|
1598
|
+
|
|
1599
|
+
# Use override topology if provided, otherwise use all topologies from config
|
|
1600
|
+
if hasattr(args, "override_topology") and args.override_topology:
|
|
1601
|
+
benchmark_topologies = [args.override_topology]
|
|
1602
|
+
logging.info(f"Using override topology: {args.override_topology}")
|
|
1603
|
+
else:
|
|
1604
|
+
benchmark_topologies = benchmark_config["redis-topologies"]
|
|
1605
|
+
logging.info(
|
|
1606
|
+
f"Running for a total of {len(benchmark_topologies)} topologies: {benchmark_topologies}"
|
|
1607
|
+
)
|
|
1608
|
+
|
|
1609
|
+
# Check if user requested exit via Ctrl+C
|
|
1610
|
+
if _exit_requested:
|
|
1611
|
+
logging.info(f"Exit requested by user. Skipping test {test_name}.")
|
|
1612
|
+
break
|
|
1613
|
+
|
|
1614
|
+
# Filter by command regex if specified
|
|
1615
|
+
if hasattr(args, "commands_regex") and args.commands_regex != ".*":
|
|
1616
|
+
if "tested-commands" in benchmark_config:
|
|
1617
|
+
tested_commands = benchmark_config["tested-commands"]
|
|
1618
|
+
command_regex_compiled = re.compile(
|
|
1619
|
+
args.commands_regex, re.IGNORECASE
|
|
1620
|
+
)
|
|
1621
|
+
command_match = False
|
|
1622
|
+
for command in tested_commands:
|
|
1623
|
+
if re.search(command_regex_compiled, command):
|
|
1624
|
+
command_match = True
|
|
1625
|
+
logging.info(
|
|
1626
|
+
f"Including test {test_name} (matches command: {command})"
|
|
1627
|
+
)
|
|
1628
|
+
break
|
|
1629
|
+
if not command_match:
|
|
1630
|
+
logging.info(
|
|
1631
|
+
f"Skipping test {test_name} (commands: {tested_commands} do not match regex: {args.commands_regex})"
|
|
1632
|
+
)
|
|
1633
|
+
continue
|
|
1634
|
+
else:
|
|
1635
|
+
logging.warning(
|
|
1636
|
+
f"Test {test_name} does not contain 'tested-commands' property. Cannot filter by commands."
|
|
1637
|
+
)
|
|
1638
|
+
|
|
1639
|
+
if tls_enabled:
|
|
1640
|
+
test_name = test_name + "-tls"
|
|
1641
|
+
logging.info(
|
|
1642
|
+
"Given that TLS is enabled, appending -tls to the testname: {}.".format(
|
|
1643
|
+
test_name
|
|
1644
|
+
)
|
|
1645
|
+
)
|
|
1646
|
+
|
|
1647
|
+
for topology_spec_name in benchmark_topologies:
|
|
1648
|
+
test_result = False
|
|
1649
|
+
benchmark_tool_global = ""
|
|
1650
|
+
full_result_path = None
|
|
1651
|
+
try:
|
|
1652
|
+
current_cpu_pos = args.cpuset_start_pos
|
|
1653
|
+
temporary_dir_client = tempfile.mkdtemp(dir=home)
|
|
1654
|
+
|
|
1655
|
+
# These will be updated after auto-detection
|
|
1656
|
+
tf_github_org = args.github_org
|
|
1657
|
+
tf_github_repo = args.github_repo
|
|
1658
|
+
tf_triggering_env = args.platform_name
|
|
1659
|
+
setup_type = args.setup_type
|
|
1660
|
+
priority_upper_limit = args.tests_priority_upper_limit
|
|
1661
|
+
priority_lower_limit = args.tests_priority_lower_limit
|
|
1662
|
+
git_hash = "NA"
|
|
1663
|
+
git_version = args.github_version
|
|
1664
|
+
build_variant_name = "NA"
|
|
1665
|
+
git_branch = None
|
|
1666
|
+
|
|
1667
|
+
# Parse URI if provided, otherwise use individual arguments
|
|
1668
|
+
if hasattr(args, "uri") and args.uri:
|
|
1669
|
+
uri_params = parse_redis_uri(args.uri)
|
|
1670
|
+
port = uri_params.get("port", args.db_server_port)
|
|
1671
|
+
host = uri_params.get("host", args.db_server_host)
|
|
1672
|
+
password = uri_params.get("password", args.db_server_password)
|
|
1673
|
+
# Override TLS setting from URI if specified
|
|
1674
|
+
if "tls_enabled" in uri_params:
|
|
1675
|
+
tls_enabled = uri_params["tls_enabled"]
|
|
1676
|
+
if tls_enabled:
|
|
1677
|
+
test_name = test_name + "-tls"
|
|
1678
|
+
logging.info(
|
|
1679
|
+
"TLS enabled via URI. Appending -tls to testname."
|
|
1680
|
+
)
|
|
1681
|
+
# Note: username and db are handled by redis-py automatically when using URI
|
|
1682
|
+
logging.info(
|
|
1683
|
+
f"Using connection parameters from URI: host={host}, port={port}, tls={tls_enabled}"
|
|
1684
|
+
)
|
|
1685
|
+
else:
|
|
1686
|
+
port = args.db_server_port
|
|
1687
|
+
host = args.db_server_host
|
|
1688
|
+
password = args.db_server_password
|
|
1689
|
+
logging.info(
|
|
1690
|
+
f"Using individual connection arguments: host={host}, port={port}"
|
|
1691
|
+
)
|
|
1692
|
+
|
|
1693
|
+
unix_socket = args.unix_socket
|
|
1694
|
+
oss_cluster_api_enabled = args.cluster_mode
|
|
1695
|
+
ssl_cert_reqs = "required"
|
|
1696
|
+
if tls_skip_verify:
|
|
1697
|
+
ssl_cert_reqs = None
|
|
1698
|
+
|
|
1699
|
+
# Create Redis connection - use URI if provided, otherwise use individual parameters
|
|
1700
|
+
if hasattr(args, "uri") and args.uri:
|
|
1701
|
+
# Use URI connection (redis-py handles URI parsing automatically)
|
|
1702
|
+
redis_params = {}
|
|
1703
|
+
|
|
1704
|
+
# Only add SSL parameters if TLS is enabled
|
|
1705
|
+
if tls_enabled:
|
|
1706
|
+
redis_params["ssl_cert_reqs"] = ssl_cert_reqs
|
|
1707
|
+
redis_params["ssl_check_hostname"] = False
|
|
1708
|
+
if tls_key is not None and tls_key != "":
|
|
1709
|
+
redis_params["ssl_keyfile"] = tls_key
|
|
1710
|
+
if tls_cert is not None and tls_cert != "":
|
|
1711
|
+
redis_params["ssl_certfile"] = tls_cert
|
|
1712
|
+
if tls_cacert is not None and tls_cacert != "":
|
|
1713
|
+
redis_params["ssl_ca_certs"] = tls_cacert
|
|
1714
|
+
|
|
1715
|
+
r = redis.StrictRedis.from_url(args.uri, **redis_params)
|
|
1716
|
+
logging.info(f"Connected to Redis using URI: {args.uri}")
|
|
1717
|
+
else:
|
|
1718
|
+
# Use individual connection parameters
|
|
1719
|
+
redis_params = {
|
|
1720
|
+
"host": host,
|
|
1721
|
+
"port": port,
|
|
1722
|
+
"password": password,
|
|
1723
|
+
"ssl": tls_enabled,
|
|
1724
|
+
"ssl_cert_reqs": ssl_cert_reqs,
|
|
1725
|
+
"ssl_check_hostname": False,
|
|
1726
|
+
}
|
|
1727
|
+
|
|
1728
|
+
# Only add SSL certificate parameters if they are provided
|
|
1729
|
+
if tls_enabled:
|
|
1730
|
+
if tls_key is not None and tls_key != "":
|
|
1731
|
+
redis_params["ssl_keyfile"] = tls_key
|
|
1732
|
+
if tls_cert is not None and tls_cert != "":
|
|
1733
|
+
redis_params["ssl_certfile"] = tls_cert
|
|
1734
|
+
if tls_cacert is not None and tls_cacert != "":
|
|
1735
|
+
redis_params["ssl_ca_certs"] = tls_cacert
|
|
1736
|
+
|
|
1737
|
+
r = redis.StrictRedis(**redis_params)
|
|
1738
|
+
logging.info(
|
|
1739
|
+
f"Connected to Redis using individual parameters: {host}:{port}"
|
|
1740
|
+
)
|
|
1741
|
+
setup_name = topology_spec_name
|
|
1742
|
+
r.ping()
|
|
1743
|
+
|
|
1744
|
+
# Auto-detect server information if not explicitly provided
|
|
1745
|
+
from redis_benchmarks_specification.__runner__.remote_profiling import (
|
|
1746
|
+
extract_server_info_for_args,
|
|
1747
|
+
extract_server_metadata_for_timeseries,
|
|
1748
|
+
)
|
|
1749
|
+
|
|
1750
|
+
detected_info = extract_server_info_for_args(r)
|
|
1751
|
+
server_metadata = extract_server_metadata_for_timeseries(r)
|
|
1752
|
+
|
|
1753
|
+
# Use detected values if arguments weren't explicitly provided
|
|
1754
|
+
github_org = args.github_org
|
|
1755
|
+
github_repo = args.github_repo
|
|
1756
|
+
|
|
1757
|
+
# Auto-detect github_org if it's the default value
|
|
1758
|
+
if (
|
|
1759
|
+
args.github_org == "redis"
|
|
1760
|
+
and detected_info["github_org"] != "redis"
|
|
1761
|
+
):
|
|
1762
|
+
github_org = detected_info["github_org"]
|
|
1763
|
+
logging.info(f"Auto-detected github_org: {github_org}")
|
|
1764
|
+
|
|
1765
|
+
# Auto-detect github_repo if it's the default value
|
|
1766
|
+
if (
|
|
1767
|
+
args.github_repo == "redis"
|
|
1768
|
+
and detected_info["github_repo"] != "redis"
|
|
1769
|
+
):
|
|
1770
|
+
github_repo = detected_info["github_repo"]
|
|
1771
|
+
logging.info(f"Auto-detected github_repo: {github_repo}")
|
|
1772
|
+
|
|
1773
|
+
# Auto-detect version if it's the default value
|
|
1774
|
+
if (
|
|
1775
|
+
args.github_version == "NA"
|
|
1776
|
+
and detected_info["github_version"] != "unknown"
|
|
1777
|
+
):
|
|
1778
|
+
git_version = detected_info["github_version"]
|
|
1779
|
+
logging.info(f"Auto-detected github_version: {git_version}")
|
|
1780
|
+
|
|
1781
|
+
# Auto-detect git hash if it's the default value
|
|
1782
|
+
if (git_hash is None or git_hash == "NA") and detected_info[
|
|
1783
|
+
"github_hash"
|
|
1784
|
+
] != "unknown":
|
|
1785
|
+
git_hash = detected_info["github_hash"]
|
|
1786
|
+
logging.info(f"Auto-detected git_hash: {git_hash}")
|
|
1787
|
+
|
|
1788
|
+
# Update tf_github_org and tf_github_repo with detected values
|
|
1789
|
+
tf_github_org = github_org
|
|
1790
|
+
tf_github_repo = github_repo
|
|
1791
|
+
redis_conns = [r]
|
|
1792
|
+
|
|
1793
|
+
if oss_cluster_api_enabled:
|
|
1794
|
+
redis_conns = []
|
|
1795
|
+
logging.info("updating redis connections from cluster slots")
|
|
1796
|
+
slots = r.cluster("slots")
|
|
1797
|
+
for slot in slots:
|
|
1798
|
+
# Master for slot range represented as nested networking information starts at pos 2
|
|
1799
|
+
# example: [0, 5460, [b'127.0.0.1', 30001, b'eccd21c2e7e9b7820434080d2e394cb8f2a7eff2', []]]
|
|
1800
|
+
slot_network_info = slot[2]
|
|
1801
|
+
prefered_endpoint = slot_network_info[0]
|
|
1802
|
+
prefered_port = slot_network_info[1]
|
|
1803
|
+
# Build shard connection parameters
|
|
1804
|
+
shard_params = {
|
|
1805
|
+
"host": prefered_endpoint,
|
|
1806
|
+
"port": prefered_port,
|
|
1807
|
+
"password": password,
|
|
1808
|
+
"ssl": tls_enabled,
|
|
1809
|
+
"ssl_cert_reqs": ssl_cert_reqs,
|
|
1810
|
+
"ssl_check_hostname": False,
|
|
1811
|
+
}
|
|
1812
|
+
|
|
1813
|
+
# Only add SSL certificate parameters if they are provided
|
|
1814
|
+
if tls_enabled:
|
|
1815
|
+
if tls_key is not None and tls_key != "":
|
|
1816
|
+
shard_params["ssl_keyfile"] = tls_key
|
|
1817
|
+
if tls_cert is not None and tls_cert != "":
|
|
1818
|
+
shard_params["ssl_certfile"] = tls_cert
|
|
1819
|
+
if tls_cacert is not None and tls_cacert != "":
|
|
1820
|
+
shard_params["ssl_ca_certs"] = tls_cacert
|
|
1821
|
+
|
|
1822
|
+
shard_conn = redis.StrictRedis(**shard_params)
|
|
1823
|
+
redis_conns.append(shard_conn)
|
|
1824
|
+
logging.info(
|
|
1825
|
+
"There are a total of {} shards".format(len(redis_conns))
|
|
1826
|
+
)
|
|
1827
|
+
setup_name = "oss-cluster"
|
|
1828
|
+
|
|
1829
|
+
redis_pids = []
|
|
1830
|
+
for conn in redis_conns:
|
|
1831
|
+
redis_info = conn.info()
|
|
1832
|
+
redis_pid = redis_info.get("process_id")
|
|
1833
|
+
if redis_pid is not None:
|
|
1834
|
+
redis_pids.append(redis_pid)
|
|
1835
|
+
else:
|
|
1836
|
+
logging.warning(
|
|
1837
|
+
"Redis process_id not found in INFO command, skipping PID collection for this connection"
|
|
1838
|
+
)
|
|
1839
|
+
|
|
1840
|
+
# Check if all tested commands are supported by this Redis instance
|
|
1841
|
+
supported_commands = get_supported_redis_commands(redis_conns)
|
|
1842
|
+
commands_supported, unsupported_commands = (
|
|
1843
|
+
check_test_command_support(benchmark_config, supported_commands)
|
|
1844
|
+
)
|
|
1845
|
+
|
|
1846
|
+
if not commands_supported:
|
|
1847
|
+
logging.warning(
|
|
1848
|
+
f"Skipping test {test_name} due to unsupported commands: {unsupported_commands}"
|
|
1849
|
+
)
|
|
1850
|
+
delete_temporary_files(
|
|
1851
|
+
temporary_dir_client=temporary_dir_client,
|
|
1852
|
+
full_result_path=None,
|
|
1853
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
1854
|
+
)
|
|
1855
|
+
continue
|
|
1856
|
+
|
|
1857
|
+
github_actor = f"{tf_triggering_env}-{running_platform}"
|
|
1858
|
+
dso = "redis-server"
|
|
1859
|
+
profilers_artifacts_matrix = []
|
|
1860
|
+
|
|
1861
|
+
collection_summary_str = ""
|
|
1862
|
+
if profilers_enabled:
|
|
1863
|
+
collection_summary_str = local_profilers_platform_checks(
|
|
1864
|
+
dso,
|
|
1865
|
+
github_actor,
|
|
1866
|
+
git_branch,
|
|
1867
|
+
tf_github_repo,
|
|
1868
|
+
git_hash,
|
|
1869
|
+
)
|
|
1870
|
+
logging.info(
|
|
1871
|
+
"Using the following collection summary string for profiler description: {}".format(
|
|
1872
|
+
collection_summary_str
|
|
1873
|
+
)
|
|
1874
|
+
)
|
|
1875
|
+
|
|
1876
|
+
ceil_client_cpu_limit = extract_client_cpu_limit(benchmark_config)
|
|
1877
|
+
client_cpuset_cpus, current_cpu_pos = generate_cpuset_cpus(
|
|
1878
|
+
ceil_client_cpu_limit, current_cpu_pos
|
|
1879
|
+
)
|
|
1880
|
+
if args.flushall_on_every_test_start:
|
|
1881
|
+
logging.info("Sending FLUSHALL to the DB")
|
|
1882
|
+
for conn in redis_conns:
|
|
1883
|
+
conn.flushall()
|
|
1884
|
+
|
|
1885
|
+
# Send MEMORY PURGE after FLUSHALL for memory comparison mode
|
|
1886
|
+
if memory_comparison_only:
|
|
1887
|
+
try:
|
|
1888
|
+
logging.info(
|
|
1889
|
+
"Sending MEMORY PURGE after FLUSHALL at test start"
|
|
1890
|
+
)
|
|
1891
|
+
for conn in redis_conns:
|
|
1892
|
+
conn.execute_command("MEMORY", "PURGE")
|
|
1893
|
+
except Exception as e:
|
|
1894
|
+
logging.warning(
|
|
1895
|
+
f"MEMORY PURGE failed after FLUSHALL at test start: {e}"
|
|
1896
|
+
)
|
|
1897
|
+
|
|
1898
|
+
benchmark_required_memory = get_benchmark_required_memory(
|
|
1899
|
+
benchmark_config
|
|
1900
|
+
)
|
|
1901
|
+
maxmemory = 0
|
|
1902
|
+
if args.maxmemory > 0:
|
|
1903
|
+
maxmemory = args.maxmemory
|
|
1904
|
+
else:
|
|
1905
|
+
for conn in redis_conns:
|
|
1906
|
+
maxmemory = maxmemory + get_maxmemory(conn)
|
|
1907
|
+
|
|
1908
|
+
# Only perform memory check if we have valid maxmemory information
|
|
1909
|
+
if maxmemory > 0 and benchmark_required_memory > maxmemory:
|
|
1910
|
+
logging.warning(
|
|
1911
|
+
"Skipping test {} given maxmemory of server is bellow the benchmark required memory: {} < {}".format(
|
|
1912
|
+
test_name, maxmemory, benchmark_required_memory
|
|
1913
|
+
)
|
|
1914
|
+
)
|
|
1915
|
+
delete_temporary_files(
|
|
1916
|
+
temporary_dir_client=temporary_dir_client,
|
|
1917
|
+
full_result_path=None,
|
|
1918
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
1919
|
+
)
|
|
1920
|
+
continue
|
|
1921
|
+
elif maxmemory == 0 and benchmark_required_memory > 0:
|
|
1922
|
+
logging.warning(
|
|
1923
|
+
"Cannot enforce memory checks for test {} - maxmemory information unavailable. Proceeding with test.".format(
|
|
1924
|
+
test_name
|
|
1925
|
+
)
|
|
1926
|
+
)
|
|
1927
|
+
|
|
1928
|
+
reset_commandstats(redis_conns)
|
|
1929
|
+
|
|
1930
|
+
client_mnt_point = "/mnt/client/"
|
|
1931
|
+
benchmark_tool_workdir = client_mnt_point
|
|
1932
|
+
|
|
1933
|
+
metadata = {}
|
|
1934
|
+
# Add server metadata from Redis INFO SERVER
|
|
1935
|
+
metadata.update(server_metadata)
|
|
1936
|
+
|
|
1937
|
+
# Add connection mode metadata
|
|
1938
|
+
if tls_enabled:
|
|
1939
|
+
metadata["conn_mode"] = "TLS"
|
|
1940
|
+
metadata["tls"] = "true"
|
|
1941
|
+
else:
|
|
1942
|
+
metadata["conn_mode"] = "PLAINTEXT"
|
|
1943
|
+
|
|
1944
|
+
# Add deployment metadata
|
|
1945
|
+
metadata["deployment_type"] = args.deployment_type
|
|
1946
|
+
metadata["deployment_name"] = args.deployment_name
|
|
1947
|
+
|
|
1948
|
+
# Add core count if specified
|
|
1949
|
+
if args.core_count is not None:
|
|
1950
|
+
metadata["core_count"] = str(args.core_count)
|
|
1951
|
+
|
|
1952
|
+
test_tls_cacert = None
|
|
1953
|
+
test_tls_cert = None
|
|
1954
|
+
test_tls_key = None
|
|
1955
|
+
if tls_enabled:
|
|
1956
|
+
if tls_cert is not None and tls_cert != "":
|
|
1957
|
+
_, test_tls_cert = cp_to_workdir(
|
|
1958
|
+
temporary_dir_client, tls_cert
|
|
1959
|
+
)
|
|
1960
|
+
if tls_cacert is not None and tls_cacert != "":
|
|
1961
|
+
_, test_tls_cacert = cp_to_workdir(
|
|
1962
|
+
temporary_dir_client, tls_cacert
|
|
1963
|
+
)
|
|
1964
|
+
if tls_key is not None and tls_key != "":
|
|
1965
|
+
_, test_tls_key = cp_to_workdir(
|
|
1966
|
+
temporary_dir_client, tls_key
|
|
1967
|
+
)
|
|
1968
|
+
priority = None
|
|
1969
|
+
if "priority" in benchmark_config:
|
|
1970
|
+
priority = benchmark_config["priority"]
|
|
1971
|
+
|
|
1972
|
+
if priority is not None:
|
|
1973
|
+
if priority > priority_upper_limit:
|
|
1974
|
+
logging.warning(
|
|
1975
|
+
"Skipping test {} giving the priority limit ({}) is above the priority value ({})".format(
|
|
1976
|
+
test_name, priority_upper_limit, priority
|
|
1977
|
+
)
|
|
1978
|
+
)
|
|
1979
|
+
delete_temporary_files(
|
|
1980
|
+
temporary_dir_client=temporary_dir_client,
|
|
1981
|
+
full_result_path=None,
|
|
1982
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
1983
|
+
)
|
|
1984
|
+
continue
|
|
1985
|
+
if priority < priority_lower_limit:
|
|
1986
|
+
logging.warning(
|
|
1987
|
+
"Skipping test {} giving the priority limit ({}) is bellow the priority value ({})".format(
|
|
1988
|
+
test_name, priority_lower_limit, priority
|
|
1989
|
+
)
|
|
1990
|
+
)
|
|
1991
|
+
delete_temporary_files(
|
|
1992
|
+
temporary_dir_client=temporary_dir_client,
|
|
1993
|
+
full_result_path=None,
|
|
1994
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
1995
|
+
)
|
|
1996
|
+
continue
|
|
1997
|
+
logging.debug(
|
|
1998
|
+
"Test {} priority ({}) is within the priority limit [{},{}]".format(
|
|
1999
|
+
test_name,
|
|
2000
|
+
priority,
|
|
2001
|
+
priority_lower_limit,
|
|
2002
|
+
priority_upper_limit,
|
|
2003
|
+
)
|
|
2004
|
+
)
|
|
2005
|
+
if "dbconfig" in benchmark_config:
|
|
2006
|
+
if "dataset" in benchmark_config["dbconfig"]:
|
|
2007
|
+
if args.run_tests_with_dataset is False:
|
|
2008
|
+
logging.warning(
|
|
2009
|
+
"Skipping test {} giving it implies dataset preload".format(
|
|
2010
|
+
test_name
|
|
2011
|
+
)
|
|
2012
|
+
)
|
|
2013
|
+
delete_temporary_files(
|
|
2014
|
+
temporary_dir_client=temporary_dir_client,
|
|
2015
|
+
full_result_path=None,
|
|
2016
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2017
|
+
)
|
|
2018
|
+
continue
|
|
2019
|
+
if "preload_tool" in benchmark_config["dbconfig"]:
|
|
2020
|
+
if args.skip_tests_with_preload_via_tool is True:
|
|
2021
|
+
logging.warning(
|
|
2022
|
+
"Skipping test {} giving it implies dataset preload via tool".format(
|
|
2023
|
+
test_name
|
|
2024
|
+
)
|
|
2025
|
+
)
|
|
2026
|
+
delete_temporary_files(
|
|
2027
|
+
temporary_dir_client=temporary_dir_client,
|
|
2028
|
+
full_result_path=None,
|
|
2029
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2030
|
+
)
|
|
2031
|
+
continue
|
|
2032
|
+
|
|
2033
|
+
# Check if we should skip tests without dataset
|
|
2034
|
+
has_dataset = "preload_tool" in benchmark_config.get(
|
|
2035
|
+
"dbconfig", {}
|
|
2036
|
+
)
|
|
2037
|
+
if args.skip_tests_without_dataset is True and not has_dataset:
|
|
2038
|
+
logging.warning(
|
|
2039
|
+
"Skipping test {} as it does not contain a dataset".format(
|
|
2040
|
+
test_name
|
|
2041
|
+
)
|
|
2042
|
+
)
|
|
2043
|
+
delete_temporary_files(
|
|
2044
|
+
temporary_dir_client=temporary_dir_client,
|
|
2045
|
+
full_result_path=None,
|
|
2046
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2047
|
+
)
|
|
2048
|
+
continue
|
|
2049
|
+
|
|
2050
|
+
# For memory comparison mode, only run tests with dbconfig
|
|
2051
|
+
if (
|
|
2052
|
+
memory_comparison_only
|
|
2053
|
+
and "dbconfig" not in benchmark_config
|
|
2054
|
+
):
|
|
2055
|
+
logging.warning(
|
|
2056
|
+
"Skipping test {} in memory comparison mode as it does not contain dbconfig".format(
|
|
2057
|
+
test_name
|
|
2058
|
+
)
|
|
2059
|
+
)
|
|
2060
|
+
delete_temporary_files(
|
|
2061
|
+
temporary_dir_client=temporary_dir_client,
|
|
2062
|
+
full_result_path=None,
|
|
2063
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2064
|
+
)
|
|
2065
|
+
continue
|
|
2066
|
+
|
|
2067
|
+
if dry_run is True:
|
|
2068
|
+
dry_run_count = dry_run_count + 1
|
|
2069
|
+
dry_run_tests.append(test_name)
|
|
2070
|
+
delete_temporary_files(
|
|
2071
|
+
temporary_dir_client=temporary_dir_client,
|
|
2072
|
+
full_result_path=None,
|
|
2073
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2074
|
+
)
|
|
2075
|
+
continue
|
|
2076
|
+
if "dbconfig" in benchmark_config:
|
|
2077
|
+
if "preload_tool" in benchmark_config["dbconfig"]:
|
|
2078
|
+
# Check if this dataset has already been loaded (for memory comparison mode)
|
|
2079
|
+
dataset_name = benchmark_config["dbconfig"].get(
|
|
2080
|
+
"dataset_name"
|
|
2081
|
+
)
|
|
2082
|
+
skip_preload = False
|
|
2083
|
+
|
|
2084
|
+
if memory_comparison_only and dataset_name:
|
|
2085
|
+
if dataset_name in loaded_datasets:
|
|
2086
|
+
logging.info(
|
|
2087
|
+
f"Skipping preload for dataset '{dataset_name}' - already loaded"
|
|
2088
|
+
)
|
|
2089
|
+
skip_preload = True
|
|
2090
|
+
continue
|
|
2091
|
+
else:
|
|
2092
|
+
logging.info(
|
|
2093
|
+
f"Loading dataset '{dataset_name}' for the first time"
|
|
2094
|
+
)
|
|
2095
|
+
loaded_datasets.add(dataset_name)
|
|
2096
|
+
|
|
2097
|
+
if not skip_preload:
|
|
2098
|
+
# Get timeout buffer for preload
|
|
2099
|
+
buffer_timeout = getattr(
|
|
2100
|
+
args,
|
|
2101
|
+
"timeout_buffer",
|
|
2102
|
+
getattr(args, "container_timeout_buffer", 60),
|
|
2103
|
+
)
|
|
2104
|
+
|
|
2105
|
+
res = data_prepopulation_step(
|
|
2106
|
+
benchmark_config,
|
|
2107
|
+
benchmark_tool_workdir,
|
|
2108
|
+
client_cpuset_cpus,
|
|
2109
|
+
docker_client,
|
|
2110
|
+
git_hash,
|
|
2111
|
+
port,
|
|
2112
|
+
temporary_dir_client,
|
|
2113
|
+
test_name,
|
|
2114
|
+
host,
|
|
2115
|
+
tls_enabled,
|
|
2116
|
+
tls_skip_verify,
|
|
2117
|
+
test_tls_cert,
|
|
2118
|
+
test_tls_key,
|
|
2119
|
+
test_tls_cacert,
|
|
2120
|
+
resp_version,
|
|
2121
|
+
args.benchmark_local_install,
|
|
2122
|
+
password,
|
|
2123
|
+
oss_cluster_api_enabled,
|
|
2124
|
+
unix_socket,
|
|
2125
|
+
buffer_timeout,
|
|
2126
|
+
args,
|
|
2127
|
+
)
|
|
2128
|
+
if res is False:
|
|
2129
|
+
logging.warning(
|
|
2130
|
+
"Skipping this test given preload result was false"
|
|
2131
|
+
)
|
|
2132
|
+
delete_temporary_files(
|
|
2133
|
+
temporary_dir_client=temporary_dir_client,
|
|
2134
|
+
full_result_path=None,
|
|
2135
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2136
|
+
)
|
|
2137
|
+
continue
|
|
2138
|
+
# Send MEMORY PURGE before preload for memory comparison mode (if FLUSHALL wasn't already done)
|
|
2139
|
+
if memory_comparison_only and not args.flushall_on_every_test_start:
|
|
2140
|
+
try:
|
|
2141
|
+
logging.info(
|
|
2142
|
+
"Sending MEMORY PURGE before preload for memory comparison mode"
|
|
2143
|
+
)
|
|
2144
|
+
for conn in redis_conns:
|
|
2145
|
+
conn.execute_command("MEMORY", "PURGE")
|
|
2146
|
+
except Exception as e:
|
|
2147
|
+
logging.warning(f"MEMORY PURGE failed before preload: {e}")
|
|
2148
|
+
|
|
2149
|
+
execute_init_commands(
|
|
2150
|
+
benchmark_config, r, dbconfig_keyname="dbconfig"
|
|
2151
|
+
)
|
|
2152
|
+
|
|
2153
|
+
used_memory_check(
|
|
2154
|
+
test_name,
|
|
2155
|
+
benchmark_required_memory,
|
|
2156
|
+
redis_conns,
|
|
2157
|
+
"start of benchmark",
|
|
2158
|
+
used_memory_check_fail,
|
|
2159
|
+
)
|
|
2160
|
+
|
|
2161
|
+
logging.info("Checking if there is a keyspace check being enforced")
|
|
2162
|
+
dbconfig_keyspacelen_check(
|
|
2163
|
+
benchmark_config,
|
|
2164
|
+
redis_conns,
|
|
2165
|
+
)
|
|
2166
|
+
|
|
2167
|
+
# For memory comparison mode, collect memory stats after preload and skip client benchmark
|
|
2168
|
+
if memory_comparison_only:
|
|
2169
|
+
# Initialize timing variables for memory comparison mode
|
|
2170
|
+
(
|
|
2171
|
+
start_time,
|
|
2172
|
+
start_time_ms,
|
|
2173
|
+
start_time_str,
|
|
2174
|
+
) = get_start_time_vars()
|
|
2175
|
+
dataset_load_duration_seconds = (
|
|
2176
|
+
0 # No dataset loading time for memory comparison
|
|
2177
|
+
)
|
|
2178
|
+
|
|
2179
|
+
# Skip load tests (keyspacelen = 0) in memory comparison mode
|
|
2180
|
+
keyspacelen = (
|
|
2181
|
+
benchmark_config.get("dbconfig", {})
|
|
2182
|
+
.get("check", {})
|
|
2183
|
+
.get("keyspacelen", None)
|
|
2184
|
+
)
|
|
2185
|
+
if keyspacelen is not None and keyspacelen == 0:
|
|
2186
|
+
logging.info(
|
|
2187
|
+
f"Skipping load test {test_name} in memory comparison mode (keyspacelen=0)"
|
|
2188
|
+
)
|
|
2189
|
+
delete_temporary_files(
|
|
2190
|
+
temporary_dir_client=temporary_dir_client,
|
|
2191
|
+
full_result_path=None,
|
|
2192
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2193
|
+
)
|
|
2194
|
+
continue
|
|
2195
|
+
|
|
2196
|
+
# Handle dry run for memory comparison mode
|
|
2197
|
+
if dry_run:
|
|
2198
|
+
dry_run_count = dry_run_count + 1
|
|
2199
|
+
dry_run_tests.append(test_name)
|
|
2200
|
+
logging.info(
|
|
2201
|
+
f"[DRY RUN] Would collect memory stats for test {test_name}"
|
|
2202
|
+
)
|
|
2203
|
+
|
|
2204
|
+
# Add dataset info to dry run output
|
|
2205
|
+
dataset_name = benchmark_config.get("dbconfig", {}).get(
|
|
2206
|
+
"dataset_name"
|
|
2207
|
+
)
|
|
2208
|
+
if dataset_name:
|
|
2209
|
+
logging.info(f"[DRY RUN] Dataset: {dataset_name}")
|
|
2210
|
+
|
|
2211
|
+
delete_temporary_files(
|
|
2212
|
+
temporary_dir_client=temporary_dir_client,
|
|
2213
|
+
full_result_path=None,
|
|
2214
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2215
|
+
)
|
|
2216
|
+
continue
|
|
2217
|
+
|
|
2218
|
+
logging.info(f"Collecting memory stats for test {test_name}")
|
|
2219
|
+
try:
|
|
2220
|
+
# Use raw command to avoid parsing issues with some Redis versions
|
|
2221
|
+
memory_stats_raw = r.execute_command("MEMORY", "STATS")
|
|
2222
|
+
# Convert list response to dict
|
|
2223
|
+
memory_stats = {}
|
|
2224
|
+
for i in range(0, len(memory_stats_raw), 2):
|
|
2225
|
+
key = (
|
|
2226
|
+
memory_stats_raw[i].decode()
|
|
2227
|
+
if isinstance(memory_stats_raw[i], bytes)
|
|
2228
|
+
else str(memory_stats_raw[i])
|
|
2229
|
+
)
|
|
2230
|
+
value = memory_stats_raw[i + 1]
|
|
2231
|
+
if isinstance(value, bytes):
|
|
2232
|
+
try:
|
|
2233
|
+
value = float(value.decode())
|
|
2234
|
+
except ValueError:
|
|
2235
|
+
value = value.decode()
|
|
2236
|
+
memory_stats[key] = value
|
|
2237
|
+
except Exception as e:
|
|
2238
|
+
logging.error(f"Failed to collect memory stats: {e}")
|
|
2239
|
+
# Fallback to basic memory info
|
|
2240
|
+
info = r.info("memory")
|
|
2241
|
+
memory_stats = {
|
|
2242
|
+
"total.allocated": info.get("used_memory", 0),
|
|
2243
|
+
"dataset.bytes": info.get("used_memory_dataset", 0),
|
|
2244
|
+
"keys.count": r.dbsize(),
|
|
2245
|
+
"keys.bytes-per-key": 0,
|
|
2246
|
+
"dataset.percentage": 0,
|
|
2247
|
+
"overhead.total": 0,
|
|
2248
|
+
"fragmentation": info.get(
|
|
2249
|
+
"mem_fragmentation_ratio", 1.0
|
|
2250
|
+
),
|
|
2251
|
+
"fragmentation.bytes": 0,
|
|
2252
|
+
"allocator.allocated": info.get("used_memory", 0),
|
|
2253
|
+
"allocator.resident": info.get("used_memory_rss", 0),
|
|
2254
|
+
"allocator-fragmentation.ratio": 1.0,
|
|
2255
|
+
}
|
|
2256
|
+
|
|
2257
|
+
# Detect object encoding by scanning 1% of the dataset
|
|
2258
|
+
object_encoding_info = detect_object_encoding(
|
|
2259
|
+
r, benchmark_config.get("dbconfig", {})
|
|
2260
|
+
)
|
|
2261
|
+
logging.info(
|
|
2262
|
+
f"Object encoding detection: {object_encoding_info.get('encoding', 'unknown')} "
|
|
2263
|
+
f"({object_encoding_info.get('confidence', 0)*100:.1f}% confidence)"
|
|
2264
|
+
)
|
|
2265
|
+
|
|
2266
|
+
# Extract key memory metrics
|
|
2267
|
+
memory_result = {
|
|
2268
|
+
"test_name": test_name,
|
|
2269
|
+
"total_allocated": memory_stats.get("total.allocated", 0),
|
|
2270
|
+
"dataset_bytes": memory_stats.get("dataset.bytes", 0),
|
|
2271
|
+
"keys_count": memory_stats.get("keys.count", 0),
|
|
2272
|
+
"keys_bytes_per_key": memory_stats.get(
|
|
2273
|
+
"keys.bytes-per-key", 0
|
|
2274
|
+
),
|
|
2275
|
+
"dataset_percentage": memory_stats.get(
|
|
2276
|
+
"dataset.percentage", 0
|
|
2277
|
+
),
|
|
2278
|
+
"overhead_total": memory_stats.get("overhead.total", 0),
|
|
2279
|
+
"fragmentation": memory_stats.get("fragmentation", 0),
|
|
2280
|
+
"fragmentation_bytes": memory_stats.get(
|
|
2281
|
+
"fragmentation.bytes", 0
|
|
2282
|
+
),
|
|
2283
|
+
"allocator_allocated": memory_stats.get(
|
|
2284
|
+
"allocator.allocated", 0
|
|
2285
|
+
),
|
|
2286
|
+
"allocator_resident": memory_stats.get(
|
|
2287
|
+
"allocator.resident", 0
|
|
2288
|
+
),
|
|
2289
|
+
"allocator_fragmentation_ratio": memory_stats.get(
|
|
2290
|
+
"allocator-fragmentation.ratio", 0
|
|
2291
|
+
),
|
|
2292
|
+
# Object encoding information
|
|
2293
|
+
"object_encoding": object_encoding_info.get(
|
|
2294
|
+
"encoding", "unknown"
|
|
2295
|
+
),
|
|
2296
|
+
"encoding_confidence": object_encoding_info.get(
|
|
2297
|
+
"confidence", 0.0
|
|
2298
|
+
),
|
|
2299
|
+
"encoding_sample_size": object_encoding_info.get(
|
|
2300
|
+
"sample_size", 0
|
|
2301
|
+
),
|
|
2302
|
+
"encoding_distribution": object_encoding_info.get(
|
|
2303
|
+
"encoding_distribution", {}
|
|
2304
|
+
),
|
|
2305
|
+
"encoding_is_complete_scan": object_encoding_info.get(
|
|
2306
|
+
"is_complete_scan", False
|
|
2307
|
+
),
|
|
2308
|
+
}
|
|
2309
|
+
memory_results.append(memory_result)
|
|
2310
|
+
|
|
2311
|
+
# Push memory metrics to datasink
|
|
2312
|
+
if datasink_push_results_redistimeseries:
|
|
2313
|
+
memory_metrics_dict = {
|
|
2314
|
+
"memory.total_allocated": memory_result[
|
|
2315
|
+
"total_allocated"
|
|
2316
|
+
],
|
|
2317
|
+
"memory.dataset_bytes": memory_result["dataset_bytes"],
|
|
2318
|
+
"memory.keys_count": memory_result["keys_count"],
|
|
2319
|
+
"memory.keys_bytes_per_key": memory_result[
|
|
2320
|
+
"keys_bytes_per_key"
|
|
2321
|
+
],
|
|
2322
|
+
"memory.dataset_percentage": memory_result[
|
|
2323
|
+
"dataset_percentage"
|
|
2324
|
+
],
|
|
2325
|
+
"memory.overhead_total": memory_result[
|
|
2326
|
+
"overhead_total"
|
|
2327
|
+
],
|
|
2328
|
+
"memory.fragmentation": memory_result["fragmentation"],
|
|
2329
|
+
"memory.fragmentation_bytes": memory_result[
|
|
2330
|
+
"fragmentation_bytes"
|
|
2331
|
+
],
|
|
2332
|
+
"memory.allocator_allocated": memory_result[
|
|
2333
|
+
"allocator_allocated"
|
|
2334
|
+
],
|
|
2335
|
+
"memory.allocator_resident": memory_result[
|
|
2336
|
+
"allocator_resident"
|
|
2337
|
+
],
|
|
2338
|
+
"memory.allocator_fragmentation_ratio": memory_result[
|
|
2339
|
+
"allocator_fragmentation_ratio"
|
|
2340
|
+
],
|
|
2341
|
+
"memory.encoding_confidence": memory_result[
|
|
2342
|
+
"encoding_confidence"
|
|
2343
|
+
],
|
|
2344
|
+
"memory.encoding_sample_size": memory_result[
|
|
2345
|
+
"encoding_sample_size"
|
|
2346
|
+
],
|
|
2347
|
+
}
|
|
2348
|
+
|
|
2349
|
+
# Add object encoding to metadata
|
|
2350
|
+
metadata["object_encoding"] = memory_result[
|
|
2351
|
+
"object_encoding"
|
|
2352
|
+
]
|
|
2353
|
+
metadata["encoding_confidence"] = (
|
|
2354
|
+
f"{memory_result['encoding_confidence']:.3f}"
|
|
2355
|
+
)
|
|
2356
|
+
metadata["encoding_sample_size"] = str(
|
|
2357
|
+
memory_result["encoding_sample_size"]
|
|
2358
|
+
)
|
|
2359
|
+
metadata["encoding_scan_type"] = (
|
|
2360
|
+
"complete"
|
|
2361
|
+
if memory_result.get("encoding_is_complete_scan", False)
|
|
2362
|
+
else "sample"
|
|
2363
|
+
)
|
|
2364
|
+
|
|
2365
|
+
# Add encoding distribution to metadata if multiple encodings found
|
|
2366
|
+
if len(memory_result["encoding_distribution"]) > 1:
|
|
2367
|
+
for enc, percentage in memory_result[
|
|
2368
|
+
"encoding_distribution"
|
|
2369
|
+
].items():
|
|
2370
|
+
metadata[f"encoding_dist_{enc}"] = (
|
|
2371
|
+
f"{percentage:.1f}%"
|
|
2372
|
+
)
|
|
2373
|
+
|
|
2374
|
+
# Set datapoint_time_ms for memory comparison mode
|
|
2375
|
+
datapoint_time_ms = start_time_ms
|
|
2376
|
+
# 7 days from now
|
|
2377
|
+
expire_redis_metrics_ms = 7 * 24 * 60 * 60 * 1000
|
|
2378
|
+
metadata["metric-type"] = "memory-stats"
|
|
2379
|
+
|
|
2380
|
+
# Debug: Check git_hash value and memory metrics before export
|
|
2381
|
+
logging.info(
|
|
2382
|
+
f"DEBUG: About to export memory metrics with git_hash='{git_hash}', type={type(git_hash)}"
|
|
2383
|
+
)
|
|
2384
|
+
logging.info(
|
|
2385
|
+
f"DEBUG: memory_metrics_dict has {len(memory_metrics_dict)} items: {list(memory_metrics_dict.keys())}"
|
|
2386
|
+
)
|
|
2387
|
+
logging.info(
|
|
2388
|
+
f"DEBUG: Sample values: {dict(list(memory_metrics_dict.items())[:3])}"
|
|
2389
|
+
)
|
|
2390
|
+
export_redis_metrics(
|
|
2391
|
+
git_version,
|
|
2392
|
+
datapoint_time_ms,
|
|
2393
|
+
memory_metrics_dict,
|
|
2394
|
+
datasink_conn,
|
|
2395
|
+
setup_name,
|
|
2396
|
+
setup_type,
|
|
2397
|
+
test_name,
|
|
2398
|
+
git_branch,
|
|
2399
|
+
tf_github_org,
|
|
2400
|
+
tf_github_repo,
|
|
2401
|
+
tf_triggering_env,
|
|
2402
|
+
metadata,
|
|
2403
|
+
expire_redis_metrics_ms,
|
|
2404
|
+
git_hash,
|
|
2405
|
+
running_platform,
|
|
2406
|
+
)
|
|
2407
|
+
|
|
2408
|
+
exporter_datasink_common(
|
|
2409
|
+
benchmark_config,
|
|
2410
|
+
0, # benchmark_duration_seconds = 0 for memory only
|
|
2411
|
+
build_variant_name,
|
|
2412
|
+
datapoint_time_ms,
|
|
2413
|
+
dataset_load_duration_seconds,
|
|
2414
|
+
datasink_conn,
|
|
2415
|
+
datasink_push_results_redistimeseries,
|
|
2416
|
+
git_branch,
|
|
2417
|
+
git_version,
|
|
2418
|
+
metadata,
|
|
2419
|
+
redis_conns,
|
|
2420
|
+
memory_metrics_dict,
|
|
2421
|
+
running_platform,
|
|
2422
|
+
args.deployment_name,
|
|
2423
|
+
args.deployment_type,
|
|
2424
|
+
test_name,
|
|
2425
|
+
tf_github_org,
|
|
2426
|
+
tf_github_repo,
|
|
2427
|
+
tf_triggering_env,
|
|
2428
|
+
topology_spec_name,
|
|
2429
|
+
default_metrics,
|
|
2430
|
+
git_hash,
|
|
2431
|
+
False,
|
|
2432
|
+
True,
|
|
2433
|
+
)
|
|
2434
|
+
|
|
2435
|
+
# Send MEMORY PURGE after memory comparison (if FLUSHALL at test end is not enabled)
|
|
2436
|
+
if not args.flushall_on_every_test_end:
|
|
2437
|
+
try:
|
|
2438
|
+
logging.info(
|
|
2439
|
+
"Sending MEMORY PURGE after memory comparison"
|
|
2440
|
+
)
|
|
2441
|
+
for conn in redis_conns:
|
|
2442
|
+
conn.execute_command("MEMORY", "PURGE")
|
|
2443
|
+
except Exception as e:
|
|
2444
|
+
logging.warning(
|
|
2445
|
+
f"MEMORY PURGE failed after memory comparison: {e}"
|
|
2446
|
+
)
|
|
2447
|
+
|
|
2448
|
+
logging.info(
|
|
2449
|
+
f"Memory comparison completed for test {test_name}"
|
|
2450
|
+
)
|
|
2451
|
+
delete_temporary_files(
|
|
2452
|
+
temporary_dir_client=temporary_dir_client,
|
|
2453
|
+
full_result_path=None,
|
|
2454
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2455
|
+
)
|
|
2456
|
+
continue
|
|
2457
|
+
|
|
2458
|
+
if dry_run_include_preload is True:
|
|
2459
|
+
dry_run_count = dry_run_count + 1
|
|
2460
|
+
dry_run_tests.append(test_name)
|
|
2461
|
+
delete_temporary_files(
|
|
2462
|
+
temporary_dir_client=temporary_dir_client,
|
|
2463
|
+
full_result_path=None,
|
|
2464
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2465
|
+
)
|
|
2466
|
+
continue
|
|
2467
|
+
|
|
2468
|
+
benchmark_tool = extract_client_tool(benchmark_config)
|
|
2469
|
+
benchmark_tool_global = benchmark_tool
|
|
2470
|
+
# backwards compatible
|
|
2471
|
+
if benchmark_tool is None:
|
|
2472
|
+
benchmark_tool = "redis-benchmark"
|
|
2473
|
+
|
|
2474
|
+
# Set benchmark path based on local install option
|
|
2475
|
+
if (
|
|
2476
|
+
args.benchmark_local_install
|
|
2477
|
+
and "memtier_benchmark" in benchmark_tool
|
|
2478
|
+
):
|
|
2479
|
+
full_benchmark_path = getattr(
|
|
2480
|
+
args, "memtier_bin_path", "memtier_benchmark"
|
|
2481
|
+
)
|
|
2482
|
+
else:
|
|
2483
|
+
full_benchmark_path = f"/usr/local/bin/{benchmark_tool}"
|
|
2484
|
+
|
|
2485
|
+
# setup the benchmark
|
|
2486
|
+
(
|
|
2487
|
+
start_time,
|
|
2488
|
+
start_time_ms,
|
|
2489
|
+
start_time_str,
|
|
2490
|
+
) = get_start_time_vars()
|
|
2491
|
+
local_benchmark_output_filename = get_local_run_full_filename(
|
|
2492
|
+
start_time_str,
|
|
2493
|
+
git_hash,
|
|
2494
|
+
test_name,
|
|
2495
|
+
setup_name,
|
|
2496
|
+
)
|
|
2497
|
+
logging.info(
|
|
2498
|
+
"Will store benchmark json output to local file {}".format(
|
|
2499
|
+
local_benchmark_output_filename
|
|
2500
|
+
)
|
|
2501
|
+
)
|
|
2502
|
+
arbitrary_command = False
|
|
2503
|
+
|
|
2504
|
+
# Check if we have multiple client configurations
|
|
2505
|
+
client_configs = extract_client_configs(benchmark_config)
|
|
2506
|
+
is_multiple_clients = len(client_configs) > 1
|
|
2507
|
+
|
|
2508
|
+
if is_multiple_clients:
|
|
2509
|
+
logging.info(
|
|
2510
|
+
f"Running test with {len(client_configs)} client configurations"
|
|
2511
|
+
)
|
|
2512
|
+
else:
|
|
2513
|
+
# Legacy single client mode - prepare benchmark parameters
|
|
2514
|
+
client_container_image = extract_client_container_image(
|
|
2515
|
+
benchmark_config
|
|
2516
|
+
)
|
|
2517
|
+
benchmark_tool = extract_client_tool(benchmark_config)
|
|
2518
|
+
|
|
2519
|
+
# Prepare benchmark command for single client
|
|
2520
|
+
if "memtier_benchmark" in benchmark_tool:
|
|
2521
|
+
(
|
|
2522
|
+
_,
|
|
2523
|
+
benchmark_command_str,
|
|
2524
|
+
arbitrary_command,
|
|
2525
|
+
) = prepare_memtier_benchmark_parameters(
|
|
2526
|
+
benchmark_config["clientconfig"],
|
|
2527
|
+
full_benchmark_path,
|
|
2528
|
+
port,
|
|
2529
|
+
host,
|
|
2530
|
+
password,
|
|
2531
|
+
local_benchmark_output_filename,
|
|
2532
|
+
oss_cluster_api_enabled,
|
|
2533
|
+
tls_enabled,
|
|
2534
|
+
tls_skip_verify,
|
|
2535
|
+
test_tls_cert,
|
|
2536
|
+
test_tls_key,
|
|
2537
|
+
test_tls_cacert,
|
|
2538
|
+
resp_version,
|
|
2539
|
+
override_memtier_test_time,
|
|
2540
|
+
override_test_runs,
|
|
2541
|
+
unix_socket,
|
|
2542
|
+
)
|
|
2543
|
+
elif "pubsub-sub-bench" in benchmark_tool:
|
|
2544
|
+
(
|
|
2545
|
+
_,
|
|
2546
|
+
benchmark_command_str,
|
|
2547
|
+
arbitrary_command,
|
|
2548
|
+
) = prepare_pubsub_sub_bench_parameters(
|
|
2549
|
+
benchmark_config["clientconfig"],
|
|
2550
|
+
full_benchmark_path,
|
|
2551
|
+
port,
|
|
2552
|
+
host,
|
|
2553
|
+
password,
|
|
2554
|
+
local_benchmark_output_filename,
|
|
2555
|
+
oss_cluster_api_enabled,
|
|
2556
|
+
tls_enabled,
|
|
2557
|
+
tls_skip_verify,
|
|
2558
|
+
test_tls_cert,
|
|
2559
|
+
test_tls_key,
|
|
2560
|
+
test_tls_cacert,
|
|
2561
|
+
resp_version,
|
|
2562
|
+
override_memtier_test_time,
|
|
2563
|
+
unix_socket,
|
|
2564
|
+
None, # username
|
|
2565
|
+
)
|
|
2566
|
+
elif "vector-db-benchmark" in benchmark_tool:
|
|
2567
|
+
(
|
|
2568
|
+
_,
|
|
2569
|
+
benchmark_command_str,
|
|
2570
|
+
arbitrary_command,
|
|
2571
|
+
env_vars,
|
|
2572
|
+
) = prepare_vector_db_benchmark_parameters(
|
|
2573
|
+
benchmark_config["clientconfig"],
|
|
2574
|
+
full_benchmark_path,
|
|
2575
|
+
port,
|
|
2576
|
+
host,
|
|
2577
|
+
password,
|
|
2578
|
+
local_benchmark_output_filename,
|
|
2579
|
+
oss_cluster_api_enabled,
|
|
2580
|
+
tls_enabled,
|
|
2581
|
+
tls_skip_verify,
|
|
2582
|
+
test_tls_cert,
|
|
2583
|
+
test_tls_key,
|
|
2584
|
+
test_tls_cacert,
|
|
2585
|
+
resp_version,
|
|
2586
|
+
override_memtier_test_time,
|
|
2587
|
+
unix_socket,
|
|
2588
|
+
None, # username
|
|
2589
|
+
)
|
|
2590
|
+
else:
|
|
2591
|
+
# prepare the benchmark command for other tools
|
|
2592
|
+
(
|
|
2593
|
+
benchmark_command,
|
|
2594
|
+
benchmark_command_str,
|
|
2595
|
+
) = prepare_benchmark_parameters(
|
|
2596
|
+
benchmark_config,
|
|
2597
|
+
full_benchmark_path,
|
|
2598
|
+
port,
|
|
2599
|
+
host,
|
|
2600
|
+
local_benchmark_output_filename,
|
|
2601
|
+
False,
|
|
2602
|
+
benchmark_tool_workdir,
|
|
2603
|
+
False,
|
|
2604
|
+
)
|
|
2605
|
+
profiler_call_graph_mode = "dwarf"
|
|
2606
|
+
profiler_frequency = 99
|
|
2607
|
+
|
|
2608
|
+
# start the profile
|
|
2609
|
+
(
|
|
2610
|
+
profiler_name,
|
|
2611
|
+
profilers_map,
|
|
2612
|
+
) = profilers_start_if_required(
|
|
2613
|
+
profilers_enabled,
|
|
2614
|
+
profilers_list,
|
|
2615
|
+
redis_pids,
|
|
2616
|
+
setup_name,
|
|
2617
|
+
start_time_str,
|
|
2618
|
+
test_name,
|
|
2619
|
+
profiler_frequency,
|
|
2620
|
+
profiler_call_graph_mode,
|
|
2621
|
+
)
|
|
2622
|
+
|
|
2623
|
+
# start remote profiling if enabled
|
|
2624
|
+
remote_profiler = None
|
|
2625
|
+
if args.enable_remote_profiling:
|
|
2626
|
+
try:
|
|
2627
|
+
remote_profiler = RemoteProfiler(
|
|
2628
|
+
args.remote_profile_host,
|
|
2629
|
+
args.remote_profile_port,
|
|
2630
|
+
args.remote_profile_output_dir,
|
|
2631
|
+
args.remote_profile_username,
|
|
2632
|
+
args.remote_profile_password,
|
|
2633
|
+
)
|
|
2634
|
+
|
|
2635
|
+
# Extract expected benchmark duration
|
|
2636
|
+
expected_duration = extract_expected_benchmark_duration(
|
|
2637
|
+
benchmark_command_str, override_memtier_test_time
|
|
2638
|
+
)
|
|
2639
|
+
|
|
2640
|
+
# Start remote profiling
|
|
2641
|
+
profiling_started = remote_profiler.start_profiling(
|
|
2642
|
+
redis_conns[0] if redis_conns else None,
|
|
2643
|
+
test_name,
|
|
2644
|
+
expected_duration,
|
|
2645
|
+
)
|
|
2646
|
+
|
|
2647
|
+
if profiling_started:
|
|
2648
|
+
logging.info(
|
|
2649
|
+
f"Started remote profiling for test: {test_name}"
|
|
2650
|
+
)
|
|
2651
|
+
else:
|
|
2652
|
+
logging.warning(
|
|
2653
|
+
f"Failed to start remote profiling for test: {test_name}"
|
|
2654
|
+
)
|
|
2655
|
+
remote_profiler = None
|
|
2656
|
+
|
|
2657
|
+
except Exception as e:
|
|
2658
|
+
logging.error(f"Error starting remote profiling: {e}")
|
|
2659
|
+
remote_profiler = None
|
|
2660
|
+
|
|
2661
|
+
# run the benchmark
|
|
2662
|
+
benchmark_start_time = datetime.datetime.now()
|
|
2663
|
+
|
|
2664
|
+
if is_multiple_clients:
|
|
2665
|
+
# Run multiple client configurations
|
|
2666
|
+
logging.info(
|
|
2667
|
+
"Running multiple client configurations simultaneously"
|
|
2668
|
+
)
|
|
2669
|
+
client_container_stdout, client_results = run_multiple_clients(
|
|
2670
|
+
benchmark_config,
|
|
2671
|
+
docker_client,
|
|
2672
|
+
temporary_dir_client,
|
|
2673
|
+
client_mnt_point,
|
|
2674
|
+
benchmark_tool_workdir,
|
|
2675
|
+
client_cpuset_cpus,
|
|
2676
|
+
port,
|
|
2677
|
+
host,
|
|
2678
|
+
password,
|
|
2679
|
+
oss_cluster_api_enabled,
|
|
2680
|
+
tls_enabled,
|
|
2681
|
+
tls_skip_verify,
|
|
2682
|
+
test_tls_cert,
|
|
2683
|
+
test_tls_key,
|
|
2684
|
+
test_tls_cacert,
|
|
2685
|
+
resp_version,
|
|
2686
|
+
override_memtier_test_time,
|
|
2687
|
+
override_test_runs,
|
|
2688
|
+
unix_socket,
|
|
2689
|
+
args,
|
|
2690
|
+
)
|
|
2691
|
+
logging.info(
|
|
2692
|
+
f"Completed {len(client_results)} client configurations"
|
|
2693
|
+
)
|
|
2694
|
+
else:
|
|
2695
|
+
# Legacy single client execution
|
|
2696
|
+
if args.benchmark_local_install:
|
|
2697
|
+
logging.info("Running memtier benchmark outside of docker")
|
|
2698
|
+
benchmark_command_str = (
|
|
2699
|
+
"taskset -c "
|
|
2700
|
+
+ client_cpuset_cpus
|
|
2701
|
+
+ " "
|
|
2702
|
+
+ benchmark_command_str
|
|
2703
|
+
)
|
|
2704
|
+
|
|
2705
|
+
# Calculate timeout for local process
|
|
2706
|
+
buffer_timeout = getattr(
|
|
2707
|
+
args,
|
|
2708
|
+
"timeout_buffer",
|
|
2709
|
+
getattr(args, "container_timeout_buffer", 60),
|
|
2710
|
+
)
|
|
2711
|
+
process_timeout = calculate_process_timeout(
|
|
2712
|
+
benchmark_command_str, buffer_timeout
|
|
2713
|
+
)
|
|
2714
|
+
|
|
2715
|
+
# Run with timeout
|
|
2716
|
+
success, client_container_stdout, stderr = (
|
|
2717
|
+
run_local_command_with_timeout(
|
|
2718
|
+
benchmark_command_str,
|
|
2719
|
+
process_timeout,
|
|
2720
|
+
"memtier benchmark",
|
|
2721
|
+
)
|
|
2722
|
+
)
|
|
2723
|
+
|
|
2724
|
+
if not success:
|
|
2725
|
+
logging.error(f"Memtier benchmark failed: {stderr}")
|
|
2726
|
+
# Clean up database after failure (timeout or error)
|
|
2727
|
+
if (
|
|
2728
|
+
args.flushall_on_every_test_end
|
|
2729
|
+
or args.flushall_on_every_test_start
|
|
2730
|
+
):
|
|
2731
|
+
logging.warning(
|
|
2732
|
+
"Benchmark failed - cleaning up database with FLUSHALL"
|
|
2733
|
+
)
|
|
2734
|
+
try:
|
|
2735
|
+
for r in redis_conns:
|
|
2736
|
+
r.flushall()
|
|
2737
|
+
except Exception as e:
|
|
2738
|
+
logging.error(
|
|
2739
|
+
f"FLUSHALL failed after benchmark failure: {e}"
|
|
2740
|
+
)
|
|
2741
|
+
# Continue with the test but log the failure
|
|
2742
|
+
client_container_stdout = f"ERROR: {stderr}"
|
|
2743
|
+
|
|
2744
|
+
move_command = "mv {} {}".format(
|
|
2745
|
+
local_benchmark_output_filename, temporary_dir_client
|
|
2746
|
+
)
|
|
2747
|
+
os.system(move_command)
|
|
2748
|
+
else:
|
|
2749
|
+
logging.info(
|
|
2750
|
+
"Using docker image {} as benchmark client image (cpuset={}) with the following args: {}".format(
|
|
2751
|
+
client_container_image,
|
|
2752
|
+
client_cpuset_cpus,
|
|
2753
|
+
benchmark_command_str,
|
|
2754
|
+
)
|
|
2755
|
+
)
|
|
2756
|
+
|
|
2757
|
+
# Use explicit container management for single client
|
|
2758
|
+
# Set working directory based on tool
|
|
2759
|
+
working_dir = benchmark_tool_workdir
|
|
2760
|
+
if "vector-db-benchmark" in benchmark_tool:
|
|
2761
|
+
working_dir = (
|
|
2762
|
+
"/app" # vector-db-benchmark needs to run from /app
|
|
2763
|
+
)
|
|
2764
|
+
|
|
2765
|
+
# Prepare volumes
|
|
2766
|
+
volumes = {
|
|
2767
|
+
temporary_dir_client: {
|
|
2768
|
+
"bind": client_mnt_point,
|
|
2769
|
+
"mode": "rw",
|
|
2770
|
+
},
|
|
2771
|
+
}
|
|
2772
|
+
|
|
2773
|
+
# For vector-db-benchmark, also mount the results directory
|
|
2774
|
+
if "vector-db-benchmark" in benchmark_tool:
|
|
2775
|
+
volumes[temporary_dir_client] = {
|
|
2776
|
+
"bind": "/app/results",
|
|
2777
|
+
"mode": "rw",
|
|
2778
|
+
}
|
|
2779
|
+
|
|
2780
|
+
container_kwargs = {
|
|
2781
|
+
"image": client_container_image,
|
|
2782
|
+
"volumes": volumes,
|
|
2783
|
+
"auto_remove": False,
|
|
2784
|
+
"privileged": True,
|
|
2785
|
+
"working_dir": working_dir,
|
|
2786
|
+
"command": benchmark_command_str,
|
|
2787
|
+
"network_mode": "host",
|
|
2788
|
+
"detach": True,
|
|
2789
|
+
"cpuset_cpus": client_cpuset_cpus,
|
|
2790
|
+
}
|
|
2791
|
+
|
|
2792
|
+
# Only add user for non-vector-db-benchmark tools to avoid permission issues
|
|
2793
|
+
if "vector-db-benchmark" not in benchmark_tool:
|
|
2794
|
+
container_kwargs["user"] = (
|
|
2795
|
+
f"{os.getuid()}:{os.getgid()}"
|
|
2796
|
+
)
|
|
2797
|
+
|
|
2798
|
+
# Add environment variables for vector-db-benchmark
|
|
2799
|
+
if "vector-db-benchmark" in benchmark_tool:
|
|
2800
|
+
try:
|
|
2801
|
+
container_kwargs["environment"] = env_vars
|
|
2802
|
+
except NameError:
|
|
2803
|
+
# env_vars not defined, skip environment variables
|
|
2804
|
+
pass
|
|
2805
|
+
|
|
2806
|
+
container = docker_client.containers.run(**container_kwargs)
|
|
2807
|
+
|
|
2808
|
+
# Wait for container and get output
|
|
2809
|
+
try:
|
|
2810
|
+
exit_code = container.wait()
|
|
2811
|
+
client_container_stdout = container.logs().decode(
|
|
2812
|
+
"utf-8"
|
|
2813
|
+
)
|
|
2814
|
+
logging.info(
|
|
2815
|
+
f"Single client completed with exit code: {exit_code}"
|
|
2816
|
+
)
|
|
2817
|
+
except Exception as wait_error:
|
|
2818
|
+
logging.error(f"Single client wait error: {wait_error}")
|
|
2819
|
+
client_container_stdout = container.logs().decode(
|
|
2820
|
+
"utf-8"
|
|
2821
|
+
)
|
|
2822
|
+
finally:
|
|
2823
|
+
# Clean up container
|
|
2824
|
+
try:
|
|
2825
|
+
container.remove(force=True)
|
|
2826
|
+
except Exception as cleanup_error:
|
|
2827
|
+
logging.warning(
|
|
2828
|
+
f"Single client cleanup error: {cleanup_error}"
|
|
2829
|
+
)
|
|
2830
|
+
|
|
2831
|
+
benchmark_end_time = datetime.datetime.now()
|
|
2832
|
+
benchmark_duration_seconds = (
|
|
2833
|
+
calculate_client_tool_duration_and_check(
|
|
2834
|
+
benchmark_end_time, benchmark_start_time
|
|
2835
|
+
)
|
|
2836
|
+
)
|
|
2837
|
+
(
|
|
2838
|
+
_,
|
|
2839
|
+
overall_tabular_data_map,
|
|
2840
|
+
) = profilers_stop_if_required(
|
|
2841
|
+
datasink_push_results_redistimeseries,
|
|
2842
|
+
benchmark_duration_seconds,
|
|
2843
|
+
collection_summary_str,
|
|
2844
|
+
dso,
|
|
2845
|
+
tf_github_org,
|
|
2846
|
+
tf_github_repo,
|
|
2847
|
+
profiler_name,
|
|
2848
|
+
profilers_artifacts_matrix,
|
|
2849
|
+
profilers_enabled,
|
|
2850
|
+
profilers_map,
|
|
2851
|
+
redis_pids,
|
|
2852
|
+
S3_BUCKET_NAME,
|
|
2853
|
+
test_name,
|
|
2854
|
+
)
|
|
2855
|
+
|
|
2856
|
+
# wait for remote profiling completion
|
|
2857
|
+
if remote_profiler is not None:
|
|
2858
|
+
try:
|
|
2859
|
+
logging.info("Waiting for remote profiling to complete...")
|
|
2860
|
+
profiling_success = remote_profiler.wait_for_completion(
|
|
2861
|
+
timeout=60
|
|
2862
|
+
)
|
|
2863
|
+
if profiling_success:
|
|
2864
|
+
logging.info("Remote profiling completed successfully")
|
|
2865
|
+
else:
|
|
2866
|
+
logging.warning(
|
|
2867
|
+
"Remote profiling did not complete successfully"
|
|
2868
|
+
)
|
|
2869
|
+
except Exception as e:
|
|
2870
|
+
logging.error(
|
|
2871
|
+
f"Error waiting for remote profiling completion: {e}"
|
|
2872
|
+
)
|
|
2873
|
+
|
|
2874
|
+
logging.info("Printing client tool stdout output")
|
|
2875
|
+
if client_container_stdout:
|
|
2876
|
+
print("=== Container Output ===")
|
|
2877
|
+
print(client_container_stdout)
|
|
2878
|
+
print("=== End Container Output ===")
|
|
2879
|
+
else:
|
|
2880
|
+
logging.warning("No container output captured")
|
|
2881
|
+
|
|
2882
|
+
used_memory_check(
|
|
2883
|
+
test_name,
|
|
2884
|
+
benchmark_required_memory,
|
|
2885
|
+
redis_conns,
|
|
2886
|
+
"end of benchmark",
|
|
2887
|
+
used_memory_check_fail,
|
|
2888
|
+
)
|
|
2889
|
+
datapoint_time_ms = start_time_ms
|
|
2890
|
+
|
|
2891
|
+
post_process_benchmark_results(
|
|
2892
|
+
benchmark_tool,
|
|
2893
|
+
local_benchmark_output_filename,
|
|
2894
|
+
datapoint_time_ms,
|
|
2895
|
+
start_time_str,
|
|
2896
|
+
client_container_stdout,
|
|
2897
|
+
None,
|
|
2898
|
+
)
|
|
2899
|
+
# Check if we have multi-client results with aggregated JSON
|
|
2900
|
+
if (
|
|
2901
|
+
is_multiple_clients
|
|
2902
|
+
and client_container_stdout.strip().startswith("{")
|
|
2903
|
+
):
|
|
2904
|
+
# Use aggregated JSON from multi-client runner
|
|
2905
|
+
logging.info(
|
|
2906
|
+
"Using aggregated JSON results from multi-client execution"
|
|
2907
|
+
)
|
|
2908
|
+
results_dict = json.loads(client_container_stdout)
|
|
2909
|
+
|
|
2910
|
+
# Validate benchmark metrics
|
|
2911
|
+
is_valid, validation_error = validate_benchmark_metrics(
|
|
2912
|
+
results_dict, test_name, benchmark_config, default_metrics
|
|
2913
|
+
)
|
|
2914
|
+
if not is_valid:
|
|
2915
|
+
logging.error(
|
|
2916
|
+
f"Test {test_name} failed metric validation: {validation_error}"
|
|
2917
|
+
)
|
|
2918
|
+
test_result = False
|
|
2919
|
+
delete_temporary_files(
|
|
2920
|
+
temporary_dir_client=temporary_dir_client,
|
|
2921
|
+
full_result_path=full_result_path,
|
|
2922
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
2923
|
+
)
|
|
2924
|
+
continue
|
|
2925
|
+
|
|
2926
|
+
# Print results table for multi-client
|
|
2927
|
+
print_results_table_stdout(
|
|
2928
|
+
benchmark_config,
|
|
2929
|
+
default_metrics,
|
|
2930
|
+
results_dict,
|
|
2931
|
+
setup_type,
|
|
2932
|
+
test_name,
|
|
2933
|
+
)
|
|
2934
|
+
# Add results to overall summary table
|
|
2935
|
+
prepare_overall_total_test_results(
|
|
2936
|
+
benchmark_config,
|
|
2937
|
+
default_metrics,
|
|
2938
|
+
results_dict,
|
|
2939
|
+
test_name,
|
|
2940
|
+
results_matrix,
|
|
2941
|
+
redis_conns,
|
|
2942
|
+
setup_name,
|
|
2943
|
+
)
|
|
2944
|
+
else:
|
|
2945
|
+
# Single client - read from file as usual
|
|
2946
|
+
full_result_path = local_benchmark_output_filename
|
|
2947
|
+
if "memtier_benchmark" in benchmark_tool:
|
|
2948
|
+
full_result_path = "{}/{}".format(
|
|
2949
|
+
temporary_dir_client, local_benchmark_output_filename
|
|
2950
|
+
)
|
|
2951
|
+
elif "pubsub-sub-bench" in benchmark_tool:
|
|
2952
|
+
full_result_path = "{}/{}".format(
|
|
2953
|
+
temporary_dir_client, local_benchmark_output_filename
|
|
2954
|
+
)
|
|
2955
|
+
elif "vector-db-benchmark" in benchmark_tool:
|
|
2956
|
+
# For vector-db-benchmark, look for summary JSON file
|
|
2957
|
+
summary_files = [
|
|
2958
|
+
f
|
|
2959
|
+
for f in os.listdir(temporary_dir_client)
|
|
2960
|
+
if f.endswith("-summary.json")
|
|
2961
|
+
]
|
|
2962
|
+
if summary_files:
|
|
2963
|
+
full_result_path = os.path.join(
|
|
2964
|
+
temporary_dir_client, summary_files[0]
|
|
2965
|
+
)
|
|
2966
|
+
logging.info(
|
|
2967
|
+
f"Found vector-db-benchmark summary file: {summary_files[0]}"
|
|
2968
|
+
)
|
|
2969
|
+
else:
|
|
2970
|
+
logging.warning(
|
|
2971
|
+
"No vector-db-benchmark summary JSON file found"
|
|
2972
|
+
)
|
|
2973
|
+
# Create empty results dict to avoid crash
|
|
2974
|
+
results_dict = {}
|
|
2975
|
+
|
|
2976
|
+
logging.info(f"Reading results json from {full_result_path}")
|
|
2977
|
+
|
|
2978
|
+
if (
|
|
2979
|
+
"vector-db-benchmark" in benchmark_tool
|
|
2980
|
+
and not os.path.exists(full_result_path)
|
|
2981
|
+
):
|
|
2982
|
+
# Handle case where vector-db-benchmark didn't produce results
|
|
2983
|
+
results_dict = {}
|
|
2984
|
+
logging.warning(
|
|
2985
|
+
"Vector-db-benchmark did not produce results file"
|
|
2986
|
+
)
|
|
2987
|
+
else:
|
|
2988
|
+
with open(
|
|
2989
|
+
full_result_path,
|
|
2990
|
+
"r",
|
|
2991
|
+
) as json_file:
|
|
2992
|
+
results_dict = json.load(json_file)
|
|
2993
|
+
|
|
2994
|
+
# Validate benchmark metrics
|
|
2995
|
+
is_valid, validation_error = validate_benchmark_metrics(
|
|
2996
|
+
results_dict, test_name, benchmark_config, default_metrics
|
|
2997
|
+
)
|
|
2998
|
+
if not is_valid:
|
|
2999
|
+
logging.error(
|
|
3000
|
+
f"Test {test_name} failed metric validation: {validation_error}"
|
|
3001
|
+
)
|
|
3002
|
+
test_result = False
|
|
3003
|
+
delete_temporary_files(
|
|
3004
|
+
temporary_dir_client=temporary_dir_client,
|
|
3005
|
+
full_result_path=full_result_path,
|
|
3006
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
3007
|
+
)
|
|
3008
|
+
continue
|
|
3009
|
+
|
|
3010
|
+
print_results_table_stdout(
|
|
3011
|
+
benchmark_config,
|
|
3012
|
+
default_metrics,
|
|
3013
|
+
results_dict,
|
|
3014
|
+
setup_type,
|
|
3015
|
+
test_name,
|
|
3016
|
+
None,
|
|
3017
|
+
)
|
|
3018
|
+
prepare_overall_total_test_results(
|
|
3019
|
+
benchmark_config,
|
|
3020
|
+
default_metrics,
|
|
3021
|
+
results_dict,
|
|
3022
|
+
test_name,
|
|
3023
|
+
results_matrix,
|
|
3024
|
+
redis_conns,
|
|
3025
|
+
setup_name,
|
|
3026
|
+
)
|
|
3027
|
+
|
|
3028
|
+
dataset_load_duration_seconds = 0
|
|
3029
|
+
|
|
3030
|
+
exporter_datasink_common(
|
|
3031
|
+
benchmark_config,
|
|
3032
|
+
benchmark_duration_seconds,
|
|
3033
|
+
build_variant_name,
|
|
3034
|
+
datapoint_time_ms,
|
|
3035
|
+
dataset_load_duration_seconds,
|
|
3036
|
+
datasink_conn,
|
|
3037
|
+
datasink_push_results_redistimeseries,
|
|
3038
|
+
git_branch,
|
|
3039
|
+
git_version,
|
|
3040
|
+
metadata,
|
|
3041
|
+
redis_conns,
|
|
3042
|
+
results_dict,
|
|
3043
|
+
running_platform,
|
|
3044
|
+
args.deployment_name,
|
|
3045
|
+
args.deployment_type,
|
|
3046
|
+
test_name,
|
|
3047
|
+
tf_github_org,
|
|
3048
|
+
tf_github_repo,
|
|
3049
|
+
tf_triggering_env,
|
|
3050
|
+
topology_spec_name,
|
|
3051
|
+
default_metrics,
|
|
3052
|
+
git_hash,
|
|
3053
|
+
)
|
|
3054
|
+
test_result = True
|
|
3055
|
+
total_test_suite_runs = total_test_suite_runs + 1
|
|
3056
|
+
|
|
3057
|
+
if args.flushall_on_every_test_end:
|
|
3058
|
+
logging.info("Sending FLUSHALL to the DB")
|
|
3059
|
+
for r in redis_conns:
|
|
3060
|
+
r.flushall()
|
|
3061
|
+
|
|
3062
|
+
# Send MEMORY PURGE after FLUSHALL for memory comparison mode
|
|
3063
|
+
if memory_comparison_only:
|
|
3064
|
+
try:
|
|
3065
|
+
logging.info(
|
|
3066
|
+
"Sending MEMORY PURGE after FLUSHALL at test end"
|
|
3067
|
+
)
|
|
3068
|
+
for r in redis_conns:
|
|
3069
|
+
r.execute_command("MEMORY", "PURGE")
|
|
3070
|
+
except Exception as e:
|
|
3071
|
+
logging.warning(
|
|
3072
|
+
f"MEMORY PURGE failed after FLUSHALL at test end: {e}"
|
|
3073
|
+
)
|
|
3074
|
+
|
|
3075
|
+
except KeyboardInterrupt:
|
|
3076
|
+
logging.info("KeyboardInterrupt caught. Exiting...")
|
|
3077
|
+
print("\nKeyboardInterrupt caught. Exiting...")
|
|
3078
|
+
break
|
|
3079
|
+
except:
|
|
3080
|
+
logging.critical(
|
|
3081
|
+
"Some unexpected exception was caught "
|
|
3082
|
+
"during local work. Failing test...."
|
|
3083
|
+
)
|
|
3084
|
+
logging.critical(sys.exc_info()[0])
|
|
3085
|
+
print("-" * 60)
|
|
3086
|
+
traceback.print_exc(file=sys.stdout)
|
|
3087
|
+
print("-" * 60)
|
|
3088
|
+
test_result = False
|
|
3089
|
+
|
|
3090
|
+
# Clean up database after exception to prevent contamination of next test
|
|
3091
|
+
if (
|
|
3092
|
+
args.flushall_on_every_test_end
|
|
3093
|
+
or args.flushall_on_every_test_start
|
|
3094
|
+
):
|
|
3095
|
+
logging.warning(
|
|
3096
|
+
"Exception caught - cleaning up database with FLUSHALL"
|
|
3097
|
+
)
|
|
3098
|
+
try:
|
|
3099
|
+
for r in redis_conns:
|
|
3100
|
+
r.flushall()
|
|
3101
|
+
except Exception as e:
|
|
3102
|
+
logging.error(f"FLUSHALL failed after exception: {e}")
|
|
3103
|
+
|
|
3104
|
+
# Check if user requested exit via Ctrl+C
|
|
3105
|
+
if _exit_requested:
|
|
3106
|
+
logging.info(
|
|
3107
|
+
"Exit requested by user. Stopping after exception."
|
|
3108
|
+
)
|
|
3109
|
+
break
|
|
3110
|
+
# tear-down
|
|
3111
|
+
logging.info("Tearing down setup")
|
|
3112
|
+
for container in client_containers:
|
|
3113
|
+
if type(container) == Container:
|
|
3114
|
+
try:
|
|
3115
|
+
container.stop()
|
|
3116
|
+
except docker.errors.NotFound:
|
|
3117
|
+
logging.info(
|
|
3118
|
+
"When trying to stop Client container with id {} and image {} it was already stopped".format(
|
|
3119
|
+
container.id, container.image
|
|
3120
|
+
)
|
|
3121
|
+
)
|
|
3122
|
+
pass
|
|
3123
|
+
|
|
3124
|
+
if client_aggregated_results_folder != "":
|
|
3125
|
+
os.makedirs(client_aggregated_results_folder, exist_ok=True)
|
|
3126
|
+
dest_fpath = "{}/{}".format(
|
|
3127
|
+
client_aggregated_results_folder,
|
|
3128
|
+
local_benchmark_output_filename,
|
|
3129
|
+
)
|
|
3130
|
+
# Safety check: ensure full_result_path exists before copying
|
|
3131
|
+
if full_result_path is None:
|
|
3132
|
+
logging.error(
|
|
3133
|
+
f"Cannot preserve results: full_result_path is None for test {test_name}. "
|
|
3134
|
+
f"This may indicate a missing benchmark tool handler in the result path construction."
|
|
3135
|
+
)
|
|
3136
|
+
elif not os.path.exists(full_result_path):
|
|
3137
|
+
logging.error(
|
|
3138
|
+
f"Cannot preserve results: file does not exist at {full_result_path} for test {test_name}"
|
|
3139
|
+
)
|
|
3140
|
+
else:
|
|
3141
|
+
logging.info(
|
|
3142
|
+
"Preserving local results file {} into {}".format(
|
|
3143
|
+
full_result_path, dest_fpath
|
|
3144
|
+
)
|
|
3145
|
+
)
|
|
3146
|
+
shutil.copy(full_result_path, dest_fpath)
|
|
3147
|
+
overall_result &= test_result
|
|
3148
|
+
|
|
3149
|
+
delete_temporary_files(
|
|
3150
|
+
temporary_dir_client=temporary_dir_client,
|
|
3151
|
+
full_result_path=full_result_path,
|
|
3152
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
3153
|
+
)
|
|
3154
|
+
|
|
3155
|
+
# Check if user requested exit via Ctrl+C
|
|
3156
|
+
if _exit_requested:
|
|
3157
|
+
logging.info("Exit requested by user. Printing summary before exit.")
|
|
3158
|
+
print(
|
|
3159
|
+
"\nExecution stopped by user request. Printing summary of completed tests..."
|
|
3160
|
+
)
|
|
3161
|
+
|
|
3162
|
+
# Print Redis server information section before results
|
|
3163
|
+
if len(results_matrix) > 0:
|
|
3164
|
+
# Get redis_conns from the first test context (we need to pass it somehow)
|
|
3165
|
+
# For now, try to get it from the current context if available
|
|
3166
|
+
try:
|
|
3167
|
+
print_redis_info_section(redis_conns)
|
|
3168
|
+
except Exception as e:
|
|
3169
|
+
logging.info(f"Could not connect to Redis for server info: {e}")
|
|
3170
|
+
|
|
3171
|
+
table_name = "Results for entire test-suite"
|
|
3172
|
+
results_matrix_headers = [
|
|
3173
|
+
"Test Name",
|
|
3174
|
+
"Metric JSON Path",
|
|
3175
|
+
"Metric Value",
|
|
3176
|
+
]
|
|
3177
|
+
writer = MarkdownTableWriter(
|
|
3178
|
+
table_name=table_name,
|
|
3179
|
+
headers=results_matrix_headers,
|
|
3180
|
+
value_matrix=results_matrix,
|
|
3181
|
+
)
|
|
3182
|
+
writer.write_table()
|
|
3183
|
+
|
|
3184
|
+
# Add note if execution was stopped early
|
|
3185
|
+
if _exit_requested:
|
|
3186
|
+
print(
|
|
3187
|
+
"\n(Note: Execution was stopped early by user request - showing results for completed tests only)"
|
|
3188
|
+
)
|
|
3189
|
+
|
|
3190
|
+
if client_aggregated_results_folder != "":
|
|
3191
|
+
os.makedirs(client_aggregated_results_folder, exist_ok=True)
|
|
3192
|
+
dest_fpath = f"{client_aggregated_results_folder}/aggregate-results.csv"
|
|
3193
|
+
logging.info(f"Storing an aggregated results CSV into {full_result_path}")
|
|
3194
|
+
|
|
3195
|
+
csv_writer = CsvTableWriter(
|
|
3196
|
+
table_name=table_name,
|
|
3197
|
+
headers=results_matrix_headers,
|
|
3198
|
+
value_matrix=results_matrix,
|
|
3199
|
+
)
|
|
3200
|
+
csv_writer.dump(dest_fpath)
|
|
3201
|
+
|
|
3202
|
+
# Print memory comparison summary if in memory comparison mode
|
|
3203
|
+
if memory_comparison_only and memory_results:
|
|
3204
|
+
logging.info("\n" + "=" * 80)
|
|
3205
|
+
logging.info("MEMORY COMPARISON SUMMARY")
|
|
3206
|
+
logging.info("=" * 80)
|
|
3207
|
+
logging.info(f"Total unique datasets loaded: {len(loaded_datasets)}")
|
|
3208
|
+
if loaded_datasets:
|
|
3209
|
+
logging.info(f"Datasets: {', '.join(sorted(loaded_datasets))}")
|
|
3210
|
+
logging.info("=" * 80)
|
|
3211
|
+
|
|
3212
|
+
# Create memory summary table
|
|
3213
|
+
memory_headers = [
|
|
3214
|
+
"Test Name",
|
|
3215
|
+
"Total Allocated",
|
|
3216
|
+
"Dataset Bytes",
|
|
3217
|
+
"Keys Count",
|
|
3218
|
+
"Bytes/Key",
|
|
3219
|
+
"Dataset %",
|
|
3220
|
+
"Overhead",
|
|
3221
|
+
"Fragmentation",
|
|
3222
|
+
"Alloc Fragmentation",
|
|
3223
|
+
"Object Encoding",
|
|
3224
|
+
"Encoding Confidence",
|
|
3225
|
+
"Scan Type",
|
|
3226
|
+
]
|
|
3227
|
+
|
|
3228
|
+
memory_matrix = []
|
|
3229
|
+
for result in memory_results:
|
|
3230
|
+
# Convert bytes to human readable format
|
|
3231
|
+
total_mb = result["total_allocated"] / (1024 * 1024)
|
|
3232
|
+
dataset_mb = result["dataset_bytes"] / (1024 * 1024)
|
|
3233
|
+
overhead_mb = result["overhead_total"] / (1024 * 1024)
|
|
3234
|
+
|
|
3235
|
+
memory_matrix.append(
|
|
3236
|
+
[
|
|
3237
|
+
result["test_name"],
|
|
3238
|
+
f"{total_mb:.1f}MB",
|
|
3239
|
+
f"{dataset_mb:.1f}MB",
|
|
3240
|
+
f"{result['keys_count']:,}",
|
|
3241
|
+
f"{result['keys_bytes_per_key']:.0f}B",
|
|
3242
|
+
f"{result['dataset_percentage']:.1f}%",
|
|
3243
|
+
f"{overhead_mb:.1f}MB",
|
|
3244
|
+
f"{result['fragmentation']:.2f}",
|
|
3245
|
+
f"{result['allocator_fragmentation_ratio']:.3f}",
|
|
3246
|
+
result.get("object_encoding", "unknown"),
|
|
3247
|
+
f"{result.get('encoding_confidence', 0.0)*100:.1f}%",
|
|
3248
|
+
(
|
|
3249
|
+
"complete"
|
|
3250
|
+
if result.get("encoding_is_complete_scan", False)
|
|
3251
|
+
else "sample"
|
|
3252
|
+
),
|
|
3253
|
+
]
|
|
3254
|
+
)
|
|
3255
|
+
|
|
3256
|
+
memory_writer = MarkdownTableWriter(
|
|
3257
|
+
table_name="Memory Usage Summary",
|
|
3258
|
+
headers=memory_headers,
|
|
3259
|
+
value_matrix=memory_matrix,
|
|
3260
|
+
)
|
|
3261
|
+
memory_writer.write_table()
|
|
3262
|
+
|
|
3263
|
+
if dry_run is True:
|
|
3264
|
+
mode_description = (
|
|
3265
|
+
"memory comparison" if memory_comparison_only else "benchmark"
|
|
3266
|
+
)
|
|
3267
|
+
logging.info(
|
|
3268
|
+
"Number of tests that would have been run ({}): {}".format(
|
|
3269
|
+
mode_description, dry_run_count
|
|
3270
|
+
)
|
|
3271
|
+
)
|
|
3272
|
+
if _exit_requested:
|
|
3273
|
+
logging.info("(Note: Execution was stopped early by user request)")
|
|
3274
|
+
if dry_run_tests:
|
|
3275
|
+
logging.info(f"Tests that would be run ({mode_description} mode):")
|
|
3276
|
+
for test in dry_run_tests:
|
|
3277
|
+
logging.info(f" - {test}")
|
|
3278
|
+
final_test_regex = "|".join(dry_run_tests)
|
|
3279
|
+
logging.info(f"Final test regex: {final_test_regex}")
|
|
3280
|
+
|
|
3281
|
+
# For memory comparison mode, show dataset analysis
|
|
3282
|
+
if memory_comparison_only:
|
|
3283
|
+
unique_datasets = set()
|
|
3284
|
+
tests_with_datasets = 0
|
|
3285
|
+
|
|
3286
|
+
for test_file in testsuite_spec_files:
|
|
3287
|
+
if defaults_filename in test_file:
|
|
3288
|
+
continue
|
|
3289
|
+
try:
|
|
3290
|
+
with open(test_file, "r") as stream:
|
|
3291
|
+
benchmark_config = yaml.safe_load(stream)
|
|
3292
|
+
|
|
3293
|
+
test_name = extract_test_name_from_test_configuration_file(
|
|
3294
|
+
test_file
|
|
3295
|
+
)
|
|
3296
|
+
if (
|
|
3297
|
+
test_name in dry_run_tests
|
|
3298
|
+
and "dbconfig" in benchmark_config
|
|
3299
|
+
):
|
|
3300
|
+
# Skip load tests in dry run analysis too
|
|
3301
|
+
keyspacelen = (
|
|
3302
|
+
benchmark_config["dbconfig"]
|
|
3303
|
+
.get("check", {})
|
|
3304
|
+
.get("keyspacelen", None)
|
|
3305
|
+
)
|
|
3306
|
+
if keyspacelen is not None and keyspacelen == 0:
|
|
3307
|
+
continue
|
|
3308
|
+
|
|
3309
|
+
dataset_name = benchmark_config["dbconfig"].get(
|
|
3310
|
+
"dataset_name"
|
|
3311
|
+
)
|
|
3312
|
+
if dataset_name:
|
|
3313
|
+
unique_datasets.add(dataset_name)
|
|
3314
|
+
tests_with_datasets += 1
|
|
3315
|
+
|
|
3316
|
+
except Exception as e:
|
|
3317
|
+
logging.debug(f"Error analyzing {test_file} for dry run: {e}")
|
|
3318
|
+
|
|
3319
|
+
if tests_with_datasets > 0:
|
|
3320
|
+
logging.info(f"\nMemory comparison analysis:")
|
|
3321
|
+
logging.info(f" Tests with datasets: {tests_with_datasets}")
|
|
3322
|
+
logging.info(f" Unique datasets: {len(unique_datasets)}")
|
|
3323
|
+
logging.info(
|
|
3324
|
+
f" Dataset ingestion savings: {tests_with_datasets - len(unique_datasets)} skipped loads"
|
|
3325
|
+
)
|
|
3326
|
+
if unique_datasets:
|
|
3327
|
+
logging.info(f" Datasets that would be loaded:")
|
|
3328
|
+
for dataset in sorted(unique_datasets):
|
|
3329
|
+
logging.info(f" - {dataset}")
|
|
3330
|
+
|
|
3331
|
+
|
|
3332
|
+
def get_maxmemory(r):
|
|
3333
|
+
memory_info = r.info("memory")
|
|
3334
|
+
|
|
3335
|
+
# Check if maxmemory key exists in Redis memory info
|
|
3336
|
+
if "maxmemory" not in memory_info:
|
|
3337
|
+
logging.warning(
|
|
3338
|
+
"maxmemory not present in Redis memory info. Cannot enforce memory checks."
|
|
3339
|
+
)
|
|
3340
|
+
return 0
|
|
3341
|
+
|
|
3342
|
+
maxmemory = int(memory_info["maxmemory"])
|
|
3343
|
+
if maxmemory == 0:
|
|
3344
|
+
total_system_memory = int(memory_info["total_system_memory"])
|
|
3345
|
+
logging.info(" Using total system memory as max {}".format(total_system_memory))
|
|
3346
|
+
maxmemory = total_system_memory
|
|
3347
|
+
else:
|
|
3348
|
+
logging.info(" Detected redis maxmemory config value {}".format(maxmemory))
|
|
3349
|
+
|
|
3350
|
+
return maxmemory
|
|
3351
|
+
|
|
3352
|
+
|
|
3353
|
+
def get_benchmark_required_memory(benchmark_config):
|
|
3354
|
+
benchmark_required_memory = 0
|
|
3355
|
+
if "dbconfig" in benchmark_config:
|
|
3356
|
+
if "resources" in benchmark_config["dbconfig"]:
|
|
3357
|
+
resources = benchmark_config["dbconfig"]["resources"]
|
|
3358
|
+
if "requests" in resources:
|
|
3359
|
+
resources_requests = benchmark_config["dbconfig"]["resources"][
|
|
3360
|
+
"requests"
|
|
3361
|
+
]
|
|
3362
|
+
if "memory" in resources_requests:
|
|
3363
|
+
benchmark_required_memory = resources_requests["memory"]
|
|
3364
|
+
benchmark_required_memory = int(
|
|
3365
|
+
parse_size(benchmark_required_memory)
|
|
3366
|
+
)
|
|
3367
|
+
logging.info(
|
|
3368
|
+
"Benchmark required memory: {} Bytes".format(
|
|
3369
|
+
benchmark_required_memory
|
|
3370
|
+
)
|
|
3371
|
+
)
|
|
3372
|
+
return benchmark_required_memory
|
|
3373
|
+
|
|
3374
|
+
|
|
3375
|
+
def used_memory_check(
|
|
3376
|
+
test_name,
|
|
3377
|
+
benchmark_required_memory,
|
|
3378
|
+
redis_conns,
|
|
3379
|
+
stage,
|
|
3380
|
+
used_memory_check_fail=False,
|
|
3381
|
+
):
|
|
3382
|
+
used_memory = 0
|
|
3383
|
+
for conn in redis_conns:
|
|
3384
|
+
info_mem = conn.info("memory")
|
|
3385
|
+
if "used_memory" in info_mem:
|
|
3386
|
+
used_memory = used_memory + info_mem["used_memory"]
|
|
3387
|
+
else:
|
|
3388
|
+
logging.warning(
|
|
3389
|
+
"used_memory not present in Redis memory info. Cannot enforce memory checks."
|
|
3390
|
+
)
|
|
3391
|
+
used_memory_gb = int(math.ceil(float(used_memory) / 1024.0 / 1024.0 / 1024.0))
|
|
3392
|
+
logging.info("Benchmark used memory at {}: {}g".format(stage, used_memory_gb))
|
|
3393
|
+
if used_memory > benchmark_required_memory:
|
|
3394
|
+
logging.error(
|
|
3395
|
+
"The benchmark {} specified a dbconfig resource request of memory ({}) bellow the REAL MEMORY USAGE OF: {}. FIX IT!.".format(
|
|
3396
|
+
test_name, benchmark_required_memory, used_memory_gb
|
|
3397
|
+
)
|
|
3398
|
+
)
|
|
3399
|
+
if used_memory_check_fail:
|
|
3400
|
+
exit(1)
|
|
3401
|
+
|
|
3402
|
+
|
|
3403
|
+
def cp_to_workdir(benchmark_tool_workdir, srcfile):
|
|
3404
|
+
head, filename = os.path.split(srcfile)
|
|
3405
|
+
dstfile = f"{benchmark_tool_workdir}/{filename}"
|
|
3406
|
+
shutil.copyfile(srcfile, dstfile)
|
|
3407
|
+
logging.info(
|
|
3408
|
+
f"Copying to workdir the following file {srcfile}. Final workdir file {dstfile}"
|
|
3409
|
+
)
|
|
3410
|
+
return dstfile, filename
|
|
3411
|
+
|
|
3412
|
+
|
|
3413
|
+
def print_results_table_stdout(
|
|
3414
|
+
benchmark_config,
|
|
3415
|
+
default_metrics,
|
|
3416
|
+
results_dict,
|
|
3417
|
+
setup_name,
|
|
3418
|
+
test_name,
|
|
3419
|
+
cpu_usage=None,
|
|
3420
|
+
):
|
|
3421
|
+
# check which metrics to extract
|
|
3422
|
+
(
|
|
3423
|
+
_,
|
|
3424
|
+
metrics,
|
|
3425
|
+
) = merge_default_and_config_metrics(
|
|
3426
|
+
benchmark_config,
|
|
3427
|
+
default_metrics,
|
|
3428
|
+
None,
|
|
3429
|
+
)
|
|
3430
|
+
table_name = f"Results for {test_name} test-case on {setup_name} topology"
|
|
3431
|
+
results_matrix_headers = [
|
|
3432
|
+
"Metric JSON Path",
|
|
3433
|
+
"Metric Value",
|
|
3434
|
+
]
|
|
3435
|
+
results_matrix = extract_results_table(metrics, results_dict)
|
|
3436
|
+
|
|
3437
|
+
# Use resolved metric name for precision_summary metrics, otherwise use original path
|
|
3438
|
+
def get_display_name(x):
|
|
3439
|
+
# For precision_summary metrics with wildcards, construct the resolved path
|
|
3440
|
+
if (
|
|
3441
|
+
len(x) > 1
|
|
3442
|
+
and isinstance(x[0], str)
|
|
3443
|
+
and "precision_summary" in x[0]
|
|
3444
|
+
and "*" in x[0]
|
|
3445
|
+
):
|
|
3446
|
+
|
|
3447
|
+
# Look for the precision level in the cleaned metrics logs
|
|
3448
|
+
# We need to find the corresponding cleaned metric to get the precision level
|
|
3449
|
+
# For now, let's extract it from the time series logs that we know are working
|
|
3450
|
+
# The pattern is: replace "*" with the actual precision level
|
|
3451
|
+
|
|
3452
|
+
# Since we know from logs that the precision level is available,
|
|
3453
|
+
# let's reconstruct it from the metric context path (x[1]) if available
|
|
3454
|
+
if (
|
|
3455
|
+
len(x) > 1
|
|
3456
|
+
and isinstance(x[1], str)
|
|
3457
|
+
and x[1].startswith("'")
|
|
3458
|
+
and x[1].endswith("'")
|
|
3459
|
+
):
|
|
3460
|
+
precision_level = x[1] # This should be something like "'1.0000'"
|
|
3461
|
+
resolved_path = x[0].replace("*", precision_level)
|
|
3462
|
+
return resolved_path
|
|
3463
|
+
|
|
3464
|
+
return x[0] # Use original path
|
|
3465
|
+
|
|
3466
|
+
results_matrix = [[get_display_name(x), f"{x[3]:.3f}"] for x in results_matrix]
|
|
3467
|
+
writer = MarkdownTableWriter(
|
|
3468
|
+
table_name=table_name,
|
|
3469
|
+
headers=results_matrix_headers,
|
|
3470
|
+
value_matrix=results_matrix,
|
|
3471
|
+
)
|
|
3472
|
+
writer.write_table()
|
|
3473
|
+
|
|
3474
|
+
|
|
3475
|
+
def print_redis_info_section(redis_conns):
|
|
3476
|
+
"""Print Redis server information as a separate section"""
|
|
3477
|
+
if redis_conns is not None and len(redis_conns) > 0:
|
|
3478
|
+
try:
|
|
3479
|
+
redis_info = redis_conns[0].info()
|
|
3480
|
+
server_name = "redis"
|
|
3481
|
+
if "server_name" in redis_info:
|
|
3482
|
+
server_name = redis_info["server_name"]
|
|
3483
|
+
|
|
3484
|
+
print("\n# Redis Server Information")
|
|
3485
|
+
redis_info_data = [
|
|
3486
|
+
[
|
|
3487
|
+
f"{server_name} version",
|
|
3488
|
+
redis_info.get(f"{server_name}_version", "unknown"),
|
|
3489
|
+
],
|
|
3490
|
+
["redis version", redis_info.get("redis_version", "unknown")],
|
|
3491
|
+
["io_threads_active", redis_info.get("io_threads_active", "unknown")],
|
|
3492
|
+
[
|
|
3493
|
+
f"{server_name} Git SHA1",
|
|
3494
|
+
redis_info.get("redis_git_sha1", "unknown"),
|
|
3495
|
+
],
|
|
3496
|
+
[
|
|
3497
|
+
f"{server_name} Git Dirty",
|
|
3498
|
+
str(redis_info.get("redis_git_dirty", "unknown")),
|
|
3499
|
+
],
|
|
3500
|
+
[
|
|
3501
|
+
f"{server_name} Build ID",
|
|
3502
|
+
redis_info.get("redis_build_id", "unknown"),
|
|
3503
|
+
],
|
|
3504
|
+
[f"{server_name} Mode", redis_info.get("redis_mode", "unknown")],
|
|
3505
|
+
["OS", redis_info.get("os", "unknown")],
|
|
3506
|
+
["Arch Bits", str(redis_info.get("arch_bits", "unknown"))],
|
|
3507
|
+
["GCC Version", redis_info.get("gcc_version", "unknown")],
|
|
3508
|
+
["Process ID", str(redis_info.get("process_id", "unknown"))],
|
|
3509
|
+
["TCP Port", str(redis_info.get("tcp_port", "unknown"))],
|
|
3510
|
+
[
|
|
3511
|
+
"Uptime (seconds)",
|
|
3512
|
+
str(redis_info.get("uptime_in_seconds", "unknown")),
|
|
3513
|
+
],
|
|
3514
|
+
]
|
|
3515
|
+
|
|
3516
|
+
from pytablewriter import MarkdownTableWriter
|
|
3517
|
+
|
|
3518
|
+
writer = MarkdownTableWriter(
|
|
3519
|
+
table_name="",
|
|
3520
|
+
headers=["Property", "Value"],
|
|
3521
|
+
value_matrix=redis_info_data,
|
|
3522
|
+
)
|
|
3523
|
+
writer.write_table()
|
|
3524
|
+
|
|
3525
|
+
logging.info(
|
|
3526
|
+
f"Displayed Redis server information: Redis {redis_info.get('redis_version', 'unknown')}"
|
|
3527
|
+
)
|
|
3528
|
+
except Exception as e:
|
|
3529
|
+
logging.warning(f"Failed to collect Redis server information: {e}")
|
|
3530
|
+
|
|
3531
|
+
|
|
3532
|
+
def get_supported_redis_commands(redis_conns):
|
|
3533
|
+
"""Get list of supported Redis commands from the server"""
|
|
3534
|
+
if redis_conns is not None and len(redis_conns) > 0:
|
|
3535
|
+
try:
|
|
3536
|
+
# Execute COMMAND to get all supported commands
|
|
3537
|
+
commands_info = redis_conns[0].execute_command("COMMAND")
|
|
3538
|
+
logging.info(
|
|
3539
|
+
f"COMMAND response type: {type(commands_info)}, length: {len(commands_info) if hasattr(commands_info, '__len__') else 'N/A'}"
|
|
3540
|
+
)
|
|
3541
|
+
|
|
3542
|
+
# Extract command names
|
|
3543
|
+
supported_commands = set()
|
|
3544
|
+
|
|
3545
|
+
if isinstance(commands_info, dict):
|
|
3546
|
+
# COMMAND response is a dict with command names as keys
|
|
3547
|
+
for cmd_name in commands_info.keys():
|
|
3548
|
+
if isinstance(cmd_name, bytes):
|
|
3549
|
+
cmd_name = cmd_name.decode("utf-8")
|
|
3550
|
+
supported_commands.add(str(cmd_name).upper())
|
|
3551
|
+
elif isinstance(commands_info, (list, tuple)):
|
|
3552
|
+
# Fallback for list format (first element of each command info array)
|
|
3553
|
+
for cmd_info in commands_info:
|
|
3554
|
+
if isinstance(cmd_info, (list, tuple)) and len(cmd_info) > 0:
|
|
3555
|
+
cmd_name = cmd_info[0]
|
|
3556
|
+
if isinstance(cmd_name, bytes):
|
|
3557
|
+
cmd_name = cmd_name.decode("utf-8")
|
|
3558
|
+
supported_commands.add(str(cmd_name).upper())
|
|
3559
|
+
|
|
3560
|
+
logging.info(
|
|
3561
|
+
f"Retrieved {len(supported_commands)} supported Redis commands"
|
|
3562
|
+
)
|
|
3563
|
+
|
|
3564
|
+
# Handle case where COMMAND returns 0 commands (likely not supported)
|
|
3565
|
+
if len(supported_commands) == 0:
|
|
3566
|
+
logging.warning(
|
|
3567
|
+
"COMMAND returned 0 commands - likely not supported by this Redis instance"
|
|
3568
|
+
)
|
|
3569
|
+
return None
|
|
3570
|
+
|
|
3571
|
+
# Log some sample commands for debugging
|
|
3572
|
+
if supported_commands:
|
|
3573
|
+
sample_commands = sorted(list(supported_commands))[:10]
|
|
3574
|
+
logging.info(f"Sample commands: {sample_commands}")
|
|
3575
|
+
|
|
3576
|
+
# Check specifically for vector commands
|
|
3577
|
+
vector_commands = [
|
|
3578
|
+
cmd for cmd in supported_commands if cmd.startswith("V")
|
|
3579
|
+
]
|
|
3580
|
+
if vector_commands:
|
|
3581
|
+
logging.info(f"Vector commands found: {sorted(vector_commands)}")
|
|
3582
|
+
|
|
3583
|
+
return supported_commands
|
|
3584
|
+
except Exception as e:
|
|
3585
|
+
logging.warning(f"Failed to get supported Redis commands: {e}")
|
|
3586
|
+
logging.warning("Proceeding without command validation")
|
|
3587
|
+
return None
|
|
3588
|
+
return None
|
|
3589
|
+
|
|
3590
|
+
|
|
3591
|
+
def check_test_command_support(benchmark_config, supported_commands):
|
|
3592
|
+
"""Check if all tested-commands in the benchmark config are supported"""
|
|
3593
|
+
if supported_commands is None or len(supported_commands) == 0:
|
|
3594
|
+
logging.warning(
|
|
3595
|
+
"No supported commands list available (COMMAND not supported or returned 0 commands), skipping command check"
|
|
3596
|
+
)
|
|
3597
|
+
return True, []
|
|
3598
|
+
|
|
3599
|
+
if "tested-commands" not in benchmark_config:
|
|
3600
|
+
logging.info("No tested-commands specified in benchmark config")
|
|
3601
|
+
return True, []
|
|
3602
|
+
|
|
3603
|
+
tested_commands = benchmark_config["tested-commands"]
|
|
3604
|
+
unsupported_commands = []
|
|
3605
|
+
|
|
3606
|
+
for cmd in tested_commands:
|
|
3607
|
+
cmd_upper = cmd.upper()
|
|
3608
|
+
if cmd_upper not in supported_commands:
|
|
3609
|
+
unsupported_commands.append(cmd)
|
|
3610
|
+
|
|
3611
|
+
if unsupported_commands:
|
|
3612
|
+
logging.warning(f"Unsupported commands found: {unsupported_commands}")
|
|
3613
|
+
return False, unsupported_commands
|
|
3614
|
+
else:
|
|
3615
|
+
logging.info(f"All tested commands are supported: {tested_commands}")
|
|
3616
|
+
return True, []
|
|
3617
|
+
|
|
3618
|
+
|
|
3619
|
+
def prepare_overall_total_test_results(
|
|
3620
|
+
benchmark_config,
|
|
3621
|
+
default_metrics,
|
|
3622
|
+
results_dict,
|
|
3623
|
+
test_name,
|
|
3624
|
+
overall_results_matrix,
|
|
3625
|
+
redis_conns=None,
|
|
3626
|
+
topology=None,
|
|
3627
|
+
):
|
|
3628
|
+
# check which metrics to extract
|
|
3629
|
+
(
|
|
3630
|
+
_,
|
|
3631
|
+
metrics,
|
|
3632
|
+
) = merge_default_and_config_metrics(
|
|
3633
|
+
benchmark_config,
|
|
3634
|
+
default_metrics,
|
|
3635
|
+
None,
|
|
3636
|
+
)
|
|
3637
|
+
current_test_results_matrix = extract_results_table(metrics, results_dict)
|
|
3638
|
+
|
|
3639
|
+
# Use the same display name logic as in the individual test results
|
|
3640
|
+
def get_overall_display_name(x):
|
|
3641
|
+
# For precision_summary metrics with wildcards, construct the resolved path
|
|
3642
|
+
if (
|
|
3643
|
+
len(x) > 1
|
|
3644
|
+
and isinstance(x[0], str)
|
|
3645
|
+
and "precision_summary" in x[0]
|
|
3646
|
+
and "*" in x[0]
|
|
3647
|
+
):
|
|
3648
|
+
|
|
3649
|
+
# Reconstruct resolved path from metric context path (x[1]) if available
|
|
3650
|
+
if (
|
|
3651
|
+
len(x) > 1
|
|
3652
|
+
and isinstance(x[1], str)
|
|
3653
|
+
and x[1].startswith("'")
|
|
3654
|
+
and x[1].endswith("'")
|
|
3655
|
+
):
|
|
3656
|
+
precision_level = x[1] # This should be something like "'1.0000'"
|
|
3657
|
+
resolved_path = x[0].replace("*", precision_level)
|
|
3658
|
+
return resolved_path
|
|
3659
|
+
|
|
3660
|
+
return x[0] # Use original path
|
|
3661
|
+
|
|
3662
|
+
# Include topology in the test name if provided
|
|
3663
|
+
test_name_with_topology = test_name
|
|
3664
|
+
if topology:
|
|
3665
|
+
test_name_with_topology = f"{topology}-{test_name}"
|
|
3666
|
+
|
|
3667
|
+
current_test_results_matrix = [
|
|
3668
|
+
[test_name_with_topology, get_overall_display_name(x), f"{x[3]:.3f}"]
|
|
3669
|
+
for x in current_test_results_matrix
|
|
3670
|
+
]
|
|
3671
|
+
overall_results_matrix.extend(current_test_results_matrix)
|
|
3672
|
+
|
|
3673
|
+
|
|
3674
|
+
def data_prepopulation_step(
|
|
3675
|
+
benchmark_config,
|
|
3676
|
+
benchmark_tool_workdir,
|
|
3677
|
+
client_cpuset_cpus,
|
|
3678
|
+
docker_client,
|
|
3679
|
+
git_hash,
|
|
3680
|
+
port,
|
|
3681
|
+
temporary_dir,
|
|
3682
|
+
test_name,
|
|
3683
|
+
host,
|
|
3684
|
+
tls_enabled=False,
|
|
3685
|
+
tls_skip_verify=False,
|
|
3686
|
+
tls_cert=None,
|
|
3687
|
+
tls_key=None,
|
|
3688
|
+
tls_cacert=None,
|
|
3689
|
+
resp_version=None,
|
|
3690
|
+
benchmark_local_install=False,
|
|
3691
|
+
password=None,
|
|
3692
|
+
oss_cluster_api_enabled=False,
|
|
3693
|
+
unix_socket="",
|
|
3694
|
+
timeout_buffer=60,
|
|
3695
|
+
args=None,
|
|
3696
|
+
):
|
|
3697
|
+
result = True
|
|
3698
|
+
# setup the benchmark
|
|
3699
|
+
(
|
|
3700
|
+
start_time,
|
|
3701
|
+
start_time_ms,
|
|
3702
|
+
start_time_str,
|
|
3703
|
+
) = get_start_time_vars()
|
|
3704
|
+
local_benchmark_output_filename = get_local_run_full_filename(
|
|
3705
|
+
start_time_str,
|
|
3706
|
+
git_hash,
|
|
3707
|
+
"preload__" + test_name,
|
|
3708
|
+
"oss-standalone",
|
|
3709
|
+
)
|
|
3710
|
+
preload_image = extract_client_container_image(
|
|
3711
|
+
benchmark_config["dbconfig"], "preload_tool"
|
|
3712
|
+
)
|
|
3713
|
+
preload_tool = extract_client_tool(benchmark_config["dbconfig"], "preload_tool")
|
|
3714
|
+
|
|
3715
|
+
# Set preload tool path based on local install option
|
|
3716
|
+
if benchmark_local_install and "memtier_benchmark" in preload_tool and args:
|
|
3717
|
+
full_benchmark_path = getattr(args, "memtier_bin_path", "memtier_benchmark")
|
|
3718
|
+
else:
|
|
3719
|
+
full_benchmark_path = f"/usr/local/bin/{preload_tool}"
|
|
3720
|
+
client_mnt_point = "/mnt/client/"
|
|
3721
|
+
|
|
3722
|
+
if "memtier_benchmark" in preload_tool:
|
|
3723
|
+
override_memtier_test_time_preload = 0
|
|
3724
|
+
(
|
|
3725
|
+
_,
|
|
3726
|
+
preload_command_str,
|
|
3727
|
+
arbitrary_command,
|
|
3728
|
+
) = prepare_memtier_benchmark_parameters(
|
|
3729
|
+
benchmark_config["dbconfig"]["preload_tool"],
|
|
3730
|
+
full_benchmark_path,
|
|
3731
|
+
port,
|
|
3732
|
+
host,
|
|
3733
|
+
password,
|
|
3734
|
+
local_benchmark_output_filename,
|
|
3735
|
+
oss_cluster_api_enabled,
|
|
3736
|
+
tls_enabled,
|
|
3737
|
+
tls_skip_verify,
|
|
3738
|
+
tls_cert,
|
|
3739
|
+
tls_key,
|
|
3740
|
+
tls_cacert,
|
|
3741
|
+
resp_version,
|
|
3742
|
+
override_memtier_test_time_preload,
|
|
3743
|
+
1,
|
|
3744
|
+
unix_socket,
|
|
3745
|
+
)
|
|
3746
|
+
|
|
3747
|
+
# run the benchmark
|
|
3748
|
+
preload_start_time = datetime.datetime.now()
|
|
3749
|
+
|
|
3750
|
+
if benchmark_local_install:
|
|
3751
|
+
logging.info("Running memtier benchmark outside of docker")
|
|
3752
|
+
|
|
3753
|
+
preload_command_str = (
|
|
3754
|
+
"taskset -c " + client_cpuset_cpus + " " + preload_command_str
|
|
3755
|
+
)
|
|
3756
|
+
|
|
3757
|
+
# Calculate timeout for preload process
|
|
3758
|
+
process_timeout = calculate_process_timeout(
|
|
3759
|
+
preload_command_str, timeout_buffer
|
|
3760
|
+
)
|
|
3761
|
+
|
|
3762
|
+
# Run with timeout
|
|
3763
|
+
success, client_container_stdout, stderr = run_local_command_with_timeout(
|
|
3764
|
+
preload_command_str, process_timeout, "memtier preload"
|
|
3765
|
+
)
|
|
3766
|
+
|
|
3767
|
+
if not success:
|
|
3768
|
+
logging.error(f"Memtier preload failed: {stderr}")
|
|
3769
|
+
result = False
|
|
3770
|
+
return result
|
|
3771
|
+
|
|
3772
|
+
move_command = "mv {} {}".format(
|
|
3773
|
+
local_benchmark_output_filename, temporary_dir
|
|
3774
|
+
)
|
|
3775
|
+
os.system(move_command)
|
|
3776
|
+
|
|
3777
|
+
else:
|
|
3778
|
+
logging.info(
|
|
3779
|
+
"Using docker image {} as benchmark PRELOAD image (cpuset={}) with the following args: {}".format(
|
|
3780
|
+
preload_image,
|
|
3781
|
+
client_cpuset_cpus,
|
|
3782
|
+
preload_command_str,
|
|
3783
|
+
)
|
|
3784
|
+
)
|
|
3785
|
+
# Use explicit container management for preload tool
|
|
3786
|
+
container = docker_client.containers.run(
|
|
3787
|
+
image=preload_image,
|
|
3788
|
+
volumes={
|
|
3789
|
+
temporary_dir: {
|
|
3790
|
+
"bind": client_mnt_point,
|
|
3791
|
+
"mode": "rw",
|
|
3792
|
+
},
|
|
3793
|
+
},
|
|
3794
|
+
auto_remove=False,
|
|
3795
|
+
privileged=True,
|
|
3796
|
+
working_dir=benchmark_tool_workdir,
|
|
3797
|
+
command=preload_command_str,
|
|
3798
|
+
network_mode="host",
|
|
3799
|
+
detach=True,
|
|
3800
|
+
cpuset_cpus=client_cpuset_cpus,
|
|
3801
|
+
)
|
|
3802
|
+
|
|
3803
|
+
# Wait for preload container and get output
|
|
3804
|
+
try:
|
|
3805
|
+
exit_code = container.wait()
|
|
3806
|
+
client_container_stdout = container.logs().decode("utf-8")
|
|
3807
|
+
logging.info(f"Preload tool completed with exit code: {exit_code}")
|
|
3808
|
+
except Exception as wait_error:
|
|
3809
|
+
logging.error(f"Preload tool wait error: {wait_error}")
|
|
3810
|
+
client_container_stdout = container.logs().decode("utf-8")
|
|
3811
|
+
finally:
|
|
3812
|
+
# Clean up container
|
|
3813
|
+
try:
|
|
3814
|
+
container.remove(force=True)
|
|
3815
|
+
except Exception as cleanup_error:
|
|
3816
|
+
logging.warning(f"Preload tool cleanup error: {cleanup_error}")
|
|
3817
|
+
|
|
3818
|
+
preload_end_time = datetime.datetime.now()
|
|
3819
|
+
preload_duration_seconds = calculate_client_tool_duration_and_check(
|
|
3820
|
+
preload_end_time, preload_start_time, "Preload", False
|
|
3821
|
+
)
|
|
3822
|
+
logging.info(
|
|
3823
|
+
"Tool {} seconds to load data. Output {}".format(
|
|
3824
|
+
preload_duration_seconds,
|
|
3825
|
+
client_container_stdout,
|
|
3826
|
+
)
|
|
3827
|
+
)
|
|
3828
|
+
return result
|
|
3829
|
+
|
|
3830
|
+
|
|
3831
|
+
def generate_cpuset_cpus(ceil_db_cpu_limit, current_cpu_pos):
|
|
3832
|
+
previous_cpu_pos = current_cpu_pos
|
|
3833
|
+
current_cpu_pos = current_cpu_pos + int(ceil_db_cpu_limit)
|
|
3834
|
+
db_cpuset_cpus = ",".join(
|
|
3835
|
+
[str(x) for x in range(previous_cpu_pos, current_cpu_pos)]
|
|
3836
|
+
)
|
|
3837
|
+
return db_cpuset_cpus, current_cpu_pos
|