redis-benchmarks-specification 0.2.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis_benchmarks_specification/__api__/Readme.md +7 -0
- redis_benchmarks_specification/__api__/__init__.py +5 -0
- redis_benchmarks_specification/__api__/api.py +87 -0
- redis_benchmarks_specification/__api__/app.py +191 -0
- redis_benchmarks_specification/__builder__/Readme.md +7 -0
- redis_benchmarks_specification/__builder__/__init__.py +5 -0
- redis_benchmarks_specification/__builder__/builder.py +1010 -0
- redis_benchmarks_specification/__builder__/schema.py +23 -0
- redis_benchmarks_specification/__cli__/__init__.py +5 -0
- redis_benchmarks_specification/__cli__/args.py +226 -0
- redis_benchmarks_specification/__cli__/cli.py +624 -0
- redis_benchmarks_specification/__cli__/stats.py +1304 -0
- redis_benchmarks_specification/__common__/__init__.py +0 -0
- redis_benchmarks_specification/__common__/builder_schema.py +256 -0
- redis_benchmarks_specification/__common__/env.py +96 -0
- redis_benchmarks_specification/__common__/github.py +280 -0
- redis_benchmarks_specification/__common__/package.py +28 -0
- redis_benchmarks_specification/__common__/runner.py +485 -0
- redis_benchmarks_specification/__common__/spec.py +143 -0
- redis_benchmarks_specification/__common__/suppress_warnings.py +20 -0
- redis_benchmarks_specification/__common__/timeseries.py +1621 -0
- redis_benchmarks_specification/__compare__/__init__.py +5 -0
- redis_benchmarks_specification/__compare__/args.py +240 -0
- redis_benchmarks_specification/__compare__/compare.py +3322 -0
- redis_benchmarks_specification/__init__.py +15 -0
- redis_benchmarks_specification/__runner__/__init__.py +5 -0
- redis_benchmarks_specification/__runner__/args.py +334 -0
- redis_benchmarks_specification/__runner__/remote_profiling.py +535 -0
- redis_benchmarks_specification/__runner__/runner.py +3837 -0
- redis_benchmarks_specification/__self_contained_coordinator__/__init__.py +5 -0
- redis_benchmarks_specification/__self_contained_coordinator__/args.py +210 -0
- redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py +27 -0
- redis_benchmarks_specification/__self_contained_coordinator__/build_info.py +61 -0
- redis_benchmarks_specification/__self_contained_coordinator__/clients.py +58 -0
- redis_benchmarks_specification/__self_contained_coordinator__/cpuset.py +17 -0
- redis_benchmarks_specification/__self_contained_coordinator__/docker.py +108 -0
- redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py +19 -0
- redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py +96 -0
- redis_benchmarks_specification/__self_contained_coordinator__/runners.py +740 -0
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +2554 -0
- redis_benchmarks_specification/__setups__/__init__.py +0 -0
- redis_benchmarks_specification/__setups__/topologies.py +17 -0
- redis_benchmarks_specification/__spec__/__init__.py +5 -0
- redis_benchmarks_specification/__spec__/args.py +78 -0
- redis_benchmarks_specification/__spec__/cli.py +259 -0
- redis_benchmarks_specification/__watchdog__/__init__.py +5 -0
- redis_benchmarks_specification/__watchdog__/args.py +54 -0
- redis_benchmarks_specification/__watchdog__/watchdog.py +175 -0
- redis_benchmarks_specification/commands/__init__.py +0 -0
- redis_benchmarks_specification/commands/commands.py +15 -0
- redis_benchmarks_specification/setups/builders/gcc:15.2.0-amd64-debian-bookworm-default.yml +20 -0
- redis_benchmarks_specification/setups/builders/gcc:15.2.0-arm64-debian-bookworm-default.yml +20 -0
- redis_benchmarks_specification/setups/platforms/aws-ec2-1node-c5.4xlarge.yml +27 -0
- redis_benchmarks_specification/setups/topologies/topologies.yml +153 -0
- redis_benchmarks_specification/test-suites/defaults.yml +32 -0
- redis_benchmarks_specification/test-suites/generate.py +114 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpire-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpire-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpireat-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hexpireat-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetall-50-fields-100B-values.yml +52 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetex-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetex-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hgetex-persist-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpire-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpire-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpireat-5-fields-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-hpexpireat-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-hash-htll-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-1000B-values-expiration.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-10B-values-expiration.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-10B-values-long-expiration.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-1-fields-with-10B-values-short-expiration.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-20-fields-with-1B-values-pipeline-30.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-1000B-values-expiration.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-10B-values-expiration.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-10B-values-long-expiration.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-5-fields-with-10B-values-short-expiration.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-1000B-values-expiration.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-1000B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-100B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values-expiration.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values-long-expiration.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values-short-expiration.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-hash-50-fields-with-10000B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-rpush-bulkload-pipeline-50.yml +39 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-with-10B-values-pipeline-50.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values-pipeline-10.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values-pipeline-10.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-string-get-10B-pipeline-100-nokeyprefix.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Kkeys-hash-listpack-500-fields-update-20-fields-with-1B-to-64B-values.yml +75 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-expire-use-case.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-psetex-expire-use-case.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-setex-expire-use-case.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-1KiB-expire-use-case.yml +49 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-4KiB-expire-use-case.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-bitmap-getbit-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-exists-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expire-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-expireat-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-pexpire-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-count-500-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-count-500-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-count-5000-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-type-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-touch-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-ttl-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hexists.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hget-hgetall-hkeys-hvals-with-100B-values.yml +48 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hgetall-50-fields-10B-values.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hincrby.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hincrbyfloat.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-10-fields-with-10B-values-with-expiration-pipeline-10.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-5-fields-with-100B-values-with-expiration-pipeline-10.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-5-fields-with-10B-values-with-expiration-pipeline-10.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hkeys-50-fields-with-10B-values-with-expiration-pipeline-10.yml +54 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-hmget-5-fields-with-100B-values-pipeline-10.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-hash-transactions-multi-exec-pipeline-20.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-100B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-10B-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-lpop-rpop-with-1KiB-values.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-list-rpoplpush-with-10B-values.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values-pipeline-10.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-5-fields-with-1000B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-50-fields-with-10B-values-long-expiration-pipeline-10.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-hash-hmset-5-fields-with-1000B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-rpush-with-10B-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-100B-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-10B-values-pipeline-10.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-10B-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-list-with-1KiB-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits-pipeline-10.yml +58 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits.yml +58 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values-pipeline-10.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-1-fields-with-100B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values-pipeline-10.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-stream-5-fields-with-100B-values.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values-pipeline-10.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-100B-values.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-10.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-100-nokeyprefix.yml +29 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-100.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-50.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values-pipeline-500.yml +33 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-10B-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-1KiB-values-pipeline-10.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-1KiB-values.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-string-with-20KiB-values.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-listpack-with-100-elements-double-score.yml +91 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-double-score.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-zset-with-10-elements-int-score.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-append-1-100B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-append-1-100B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-decr.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-100B.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-100-nokeyprefix.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-100.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-50.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B-pipeline-500.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-10B.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-1KiB.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-32B-pipeline-10.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-get-32B.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incr-pipeline-10.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrby-pipeline-10.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrby.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrbyfloat-pipeline-10.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-incrbyfloat.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-int-encoding-strlen-pipeline-10.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mget-1KiB.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-expire-pipeline-10.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-expire.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-100B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-1KB-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-1KB.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-32B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-32B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-512B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-512B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-with-expiration-240B-400_conns.yml +47 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-set-with-ex-100B-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setex-100B-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-1.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-4KiB-pipeline-1.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-4KiB-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-512B-pipeline-1.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-512B-pipeline-10.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-100M-bits-bitmap-bitcount.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-1Billion-bits-bitmap-bitcount.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-2-elements-geopos.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-2-elements-geosearch-fromlonlat-withcoord.yml +39 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist-pipeline-10.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geodist.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash-pipeline-10.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geohash.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos-pipeline-10.yml +35 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geopos.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-bybox.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat-pipeline-10.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-geo-60M-elements-geosearch-fromlonlat.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-1K-fields-hgetall-pipeline-10.yml +285 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-1K-fields-hgetall.yml +284 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-100B-values-cursor-count-1000.yml +291 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values-cursor-count-100.yml +291 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values.yml +290 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-50-fields-10B-values.yml +54 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10-elements-lrange-all-elements-pipeline-10.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10-elements-lrange-all-elements.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-int-7bit-uint-lrange-all-elements-pipeline-10.yml +44 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-int-lrange-all-elements-pipeline-10.yml +52 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-llen-pipeline-10.yml +52 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-lrange-all-elements-pipeline-10.yml +52 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-100-elements-lrange-all-elements.yml +51 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-integer.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-string-pipeline-10.yml +42 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lindex-string.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-linsert-lrem-integer.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-linsert-lrem-string.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lpos-integer.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-10K-elements-lpos-string.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-1K-elements-lrange-all-elements-pipeline-10.yml +202 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-1K-elements-lrange-all-elements.yml +201 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-list-2K-elements-quicklist-lrange-all-elements-longs.yml +258 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-hash-1K-fields-with-5B-values.yml +282 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-zset-with-5-elements-parsing-float-score.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-load-zset-with-5-elements-parsing-hexa-score.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-pfadd-4KB-values-pipeline-10.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers-pipeline-10.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smembers.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10-elements-smismember.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sismember-is-a-member.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sismember-not-a-member.yml +53 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smembers.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-smismember.yml +54 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-100-elements-sscan.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10M-elements-sismember-50pct-chance.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-10M-elements-srem-50pct-chance.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-smembers.yml +200 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan-cursor-count-100.yml +201 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan.yml +200 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1M-elements-sismember-50pct-chance.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-200K-elements-sadd-constant.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-2M-elements-sadd-increasing.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zincrby-1M-elements-pipeline-1.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-100K-elements-pipeline-1.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-10M-elements-pipeline-1.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrank-1M-elements-pipeline-1.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrem-5M-elements-pipeline-1.yml +47 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrangebyscore-256K-elements-pipeline-1.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrangebyscore-256K-elements-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zrevrank-1M-elements-pipeline-1.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements-long-scores.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-10-elements-zrange-all-elements.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrange-all-elements.yml +66 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements-long-scores.yml +66 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zrangebyscore-all-elements.yml +66 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-100-elements-zscan.yml +65 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zrange-all-elements.yml +322 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zscan.yml +321 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zcard-pipeline-10.yml +39 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zremrangebyscore-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zrevrange-5-elements.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zrevrange-withscores-5-elements-pipeline-10.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1M-elements-zscore-pipeline-10.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-600K-elements-zrangestore-1K-elements.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-600K-elements-zrangestore-300K-elements.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-listpack-zrank-100-elements-pipeline-1.yml +50 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-eval-hset-expire.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-lua-evalsha-hset-expire.yml +41 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sdiff.yml +57 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sinter.yml +57 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-elements-sunion.yml +57 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries-pipeline-10.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-zset-300-elements-skiplist-encoded-zunion.yml +434 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-zset-300-elements-skiplist-encoded-zunionstore.yml +434 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-load-string-with-512B-values-pipeline-10.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-load-string-with-512B-values.yml +37 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-400_conns.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-40_conns.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-2000_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-400_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-40_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-400_conns.yml +45 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-2000_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-400_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-5200_conns.yml +46 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-50-50-with-512B-values-with-expiration-pipeline-10-400_conns.yml +43 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-connection-hello-pipeline-10.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-connection-hello.yml +32 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-multiple-hll-pfcount-100B-values.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-multiple-hll-pfmerge-100B-values.yml +34 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-connection-ping-pipeline-10.yml +29 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-100-subscribers.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-1000-subscribers.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-5000-subscribers.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns.yml +40 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-publish-1K-channels-10B-no-subscribers.yml +30 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-server-time-pipeline-10.yml +29 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-leaderboard-top-10.yml +68 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-leaderboard-top-100.yml +69 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-leaderboard-top-1000.yml +68 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-rate-limiting-lua-100k-sessions.yml +64 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-realtime-analytics-membership-pipeline-10.yml +56 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-realtime-analytics-membership.yml +56 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-caching-hash-100k-sessions.yml +108 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-caching-json-100k-sessions.yml +109 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-caching-string-100k-sessions.yml +98 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-100k-sessions.yml +205 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml +205 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-stream-10M-entries-xread-count-100.yml +36 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-stream-10M-entries-xreadgroup-count-100-noack.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-stream-10M-entries-xreadgroup-count-100.yml +38 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-stream-concurrent-xadd-xreadgroup-70-30.yml +50 -0
- redis_benchmarks_specification/test-suites/template.txt +18 -0
- redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml +41 -0
- redis_benchmarks_specification-0.2.42.dist-info/LICENSE +201 -0
- redis_benchmarks_specification-0.2.42.dist-info/METADATA +434 -0
- redis_benchmarks_specification-0.2.42.dist-info/RECORD +336 -0
- redis_benchmarks_specification-0.2.42.dist-info/WHEEL +4 -0
- redis_benchmarks_specification-0.2.42.dist-info/entry_points.txt +10 -0
|
@@ -0,0 +1,1304 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import redis
|
|
6
|
+
import oyaml as yaml
|
|
7
|
+
import csv
|
|
8
|
+
|
|
9
|
+
from redis_benchmarks_specification.__common__.runner import get_benchmark_specs
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# logging settings
|
|
13
|
+
logging.basicConfig(
|
|
14
|
+
format="%(asctime)s %(levelname)-4s %(message)s",
|
|
15
|
+
level=logging.INFO,
|
|
16
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def clean_number(value):
|
|
21
|
+
"""Cleans and converts numeric values from CSV, handling B (billion), M (million), K (thousand)."""
|
|
22
|
+
try:
|
|
23
|
+
value = value.replace(",", "").strip() # Remove commas and spaces
|
|
24
|
+
|
|
25
|
+
# Determine the scale factor
|
|
26
|
+
multiplier = 1
|
|
27
|
+
if value.endswith("B"):
|
|
28
|
+
multiplier = 1_000_000_000 # Billion
|
|
29
|
+
value = value[:-1] # Remove "B"
|
|
30
|
+
elif value.endswith("M"):
|
|
31
|
+
multiplier = 1_000_000 # Million
|
|
32
|
+
value = value[:-1] # Remove "M"
|
|
33
|
+
elif value.endswith("K"):
|
|
34
|
+
multiplier = 1_000 # Thousand
|
|
35
|
+
value = value[:-1] # Remove "K"
|
|
36
|
+
|
|
37
|
+
return int(float(value) * multiplier) # Convert to full number
|
|
38
|
+
except ValueError:
|
|
39
|
+
logging.error(f"Skipping invalid count value: {value}")
|
|
40
|
+
return 0 # Default to 0 if invalid
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def clean_percentage(value):
|
|
44
|
+
"""Parse percentage values like '17.810220866%'"""
|
|
45
|
+
try:
|
|
46
|
+
value = value.replace("%", "").strip()
|
|
47
|
+
return float(value)
|
|
48
|
+
except ValueError:
|
|
49
|
+
logging.error(f"Skipping invalid percentage value: {value}")
|
|
50
|
+
return 0.0
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def format_number_with_suffix(value):
|
|
54
|
+
"""Format large numbers with B/M/K suffixes for readability"""
|
|
55
|
+
if value >= 1_000_000_000:
|
|
56
|
+
return f"{value / 1_000_000_000:.1f}B"
|
|
57
|
+
elif value >= 1_000_000:
|
|
58
|
+
return f"{value / 1_000_000:.1f}M"
|
|
59
|
+
elif value >= 1_000:
|
|
60
|
+
return f"{value / 1_000:.1f}K"
|
|
61
|
+
else:
|
|
62
|
+
return str(value)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def get_arg_value(args, flag, default):
|
|
66
|
+
"""Extract integer values safely from CLI arguments"""
|
|
67
|
+
if flag in args:
|
|
68
|
+
try:
|
|
69
|
+
val = (
|
|
70
|
+
args[args.index(flag) + 1].lstrip("=").strip()
|
|
71
|
+
) # Remove any leading '='
|
|
72
|
+
return int(val) # Convert to integer safely
|
|
73
|
+
except (IndexError, ValueError):
|
|
74
|
+
logging.error(f"Failed to extract {flag}, using default: {default}")
|
|
75
|
+
return default # Return default if not found or invalid
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
79
|
+
logging.info(
|
|
80
|
+
"Using: {project_name} {project_version}".format(
|
|
81
|
+
project_name=project_name, project_version=project_version
|
|
82
|
+
)
|
|
83
|
+
)
|
|
84
|
+
commands_json_file = os.path.abspath(args.commands_json_file)
|
|
85
|
+
include_modules = args.commandstats_csv_include_modules
|
|
86
|
+
logging.info("Reading commands.json file from {}".format(commands_json_file))
|
|
87
|
+
commands_json = {}
|
|
88
|
+
tracked_commands_json = {}
|
|
89
|
+
groups_json = {}
|
|
90
|
+
total_commands = 0
|
|
91
|
+
total_tracked_commands = 0
|
|
92
|
+
with open(commands_json_file, "r") as groups_json_file_fd:
|
|
93
|
+
commands_json = json.load(groups_json_file_fd)
|
|
94
|
+
total_commands = len(commands_json.keys())
|
|
95
|
+
groups_json_file = os.path.abspath(args.groups_json_file)
|
|
96
|
+
logging.info("Reading groups.json file from {}".format(groups_json_file))
|
|
97
|
+
with open(groups_json_file, "r") as groups_json_file_fd:
|
|
98
|
+
groups_json = json.load(groups_json_file_fd)
|
|
99
|
+
testsuites_folder = os.path.abspath(args.test_suites_folder)
|
|
100
|
+
logging.info("Using test-suites folder dir {}".format(testsuites_folder))
|
|
101
|
+
testsuite_spec_files = get_benchmark_specs(testsuites_folder)
|
|
102
|
+
logging.info(
|
|
103
|
+
"There are a total of {} test-suites being run in folder {}".format(
|
|
104
|
+
len(testsuite_spec_files), testsuites_folder
|
|
105
|
+
)
|
|
106
|
+
)
|
|
107
|
+
priority_json = {}
|
|
108
|
+
if args.commands_priority_file != "":
|
|
109
|
+
with open(args.commands_priority_file, "r") as fd:
|
|
110
|
+
logging.info(
|
|
111
|
+
"Reading {} file with priority by commandstats".format(
|
|
112
|
+
args.commands_priority_file
|
|
113
|
+
)
|
|
114
|
+
)
|
|
115
|
+
priority_json = json.load(fd)
|
|
116
|
+
tracked_groups = []
|
|
117
|
+
tracked_groups_hist = {}
|
|
118
|
+
|
|
119
|
+
# ACL category tracking for benchmark YAML files
|
|
120
|
+
benchmark_read_commands = {}
|
|
121
|
+
benchmark_write_commands = {}
|
|
122
|
+
benchmark_fast_commands = {}
|
|
123
|
+
benchmark_slow_commands = {}
|
|
124
|
+
benchmark_total_command_count = 0
|
|
125
|
+
|
|
126
|
+
# Group-based read/write tracking for benchmarks
|
|
127
|
+
benchmark_group_read = {} # group -> count
|
|
128
|
+
benchmark_group_write = {} # group -> count
|
|
129
|
+
benchmark_group_total = {} # group -> total count
|
|
130
|
+
|
|
131
|
+
# ACL category tracking for commandstats CSV
|
|
132
|
+
csv_read_commands = {}
|
|
133
|
+
csv_write_commands = {}
|
|
134
|
+
csv_fast_commands = {}
|
|
135
|
+
csv_slow_commands = {}
|
|
136
|
+
csv_total_command_count = 0
|
|
137
|
+
|
|
138
|
+
# Group-based read/write tracking for CSV
|
|
139
|
+
csv_group_read = {} # group -> count
|
|
140
|
+
csv_group_write = {} # group -> count
|
|
141
|
+
csv_group_total = {} # group -> total count
|
|
142
|
+
|
|
143
|
+
# Percentage validation tracking
|
|
144
|
+
csv_provided_percentages = {} # command -> provided percentage
|
|
145
|
+
csv_original_counts = {} # command -> original count from CSV
|
|
146
|
+
|
|
147
|
+
override_enabled = args.override_tests
|
|
148
|
+
fail_on_required_diff = args.fail_on_required_diff
|
|
149
|
+
overall_result = True
|
|
150
|
+
test_names = []
|
|
151
|
+
pipelines = {}
|
|
152
|
+
connections = {}
|
|
153
|
+
data_sizes = {}
|
|
154
|
+
defaults_filename = args.defaults_filename
|
|
155
|
+
|
|
156
|
+
for test_file in testsuite_spec_files:
|
|
157
|
+
if defaults_filename in test_file:
|
|
158
|
+
continue
|
|
159
|
+
benchmark_config = {}
|
|
160
|
+
requires_override = False
|
|
161
|
+
test_result = True
|
|
162
|
+
tested_groups_match_origin = True
|
|
163
|
+
|
|
164
|
+
with open(test_file, "r") as stream:
|
|
165
|
+
|
|
166
|
+
try:
|
|
167
|
+
benchmark_config = yaml.safe_load(stream)
|
|
168
|
+
test_name = benchmark_config["name"]
|
|
169
|
+
if test_name in test_names:
|
|
170
|
+
logging.error(
|
|
171
|
+
"Duplicate testname detected! {} is already present in {}".format(
|
|
172
|
+
test_name, test_names
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
test_result = False
|
|
176
|
+
|
|
177
|
+
# Validate client configuration format
|
|
178
|
+
has_clientconfig = "clientconfig" in benchmark_config
|
|
179
|
+
has_clientconfigs = "clientconfigs" in benchmark_config
|
|
180
|
+
|
|
181
|
+
if has_clientconfig and has_clientconfigs:
|
|
182
|
+
logging.error(
|
|
183
|
+
"Test {} has both 'clientconfig' and 'clientconfigs'. Only one format is allowed.".format(
|
|
184
|
+
test_name
|
|
185
|
+
)
|
|
186
|
+
)
|
|
187
|
+
test_result = False
|
|
188
|
+
elif not has_clientconfig and not has_clientconfigs:
|
|
189
|
+
logging.error(
|
|
190
|
+
"Test {} is missing client configuration. Must have either 'clientconfig' or 'clientconfigs'.".format(
|
|
191
|
+
test_name
|
|
192
|
+
)
|
|
193
|
+
)
|
|
194
|
+
test_result = False
|
|
195
|
+
|
|
196
|
+
test_names.append(test_name)
|
|
197
|
+
group = ""
|
|
198
|
+
is_memtier = False
|
|
199
|
+
|
|
200
|
+
## defaults
|
|
201
|
+
pipeline_size = 1
|
|
202
|
+
clients = 50
|
|
203
|
+
threads = 4
|
|
204
|
+
data_size = 32
|
|
205
|
+
|
|
206
|
+
if "memtier" in test_name:
|
|
207
|
+
is_memtier = True
|
|
208
|
+
tested_groups = []
|
|
209
|
+
origin_tested_groups = []
|
|
210
|
+
if "tested-groups" in benchmark_config:
|
|
211
|
+
origin_tested_groups = benchmark_config["tested-groups"]
|
|
212
|
+
origin_tested_commands = []
|
|
213
|
+
|
|
214
|
+
tested_commands = []
|
|
215
|
+
if "tested-commands" in benchmark_config:
|
|
216
|
+
origin_tested_commands = benchmark_config["tested-commands"]
|
|
217
|
+
else:
|
|
218
|
+
logging.warn("dont have test commands in {}".format(test_name))
|
|
219
|
+
|
|
220
|
+
for tested_command in origin_tested_commands:
|
|
221
|
+
tested_commands.append(tested_command.lower())
|
|
222
|
+
if is_memtier:
|
|
223
|
+
# Handle both clientconfig and clientconfigs formats
|
|
224
|
+
if "clientconfigs" in benchmark_config:
|
|
225
|
+
# For multiple configs, use the first one for stats analysis
|
|
226
|
+
# TODO: Consider aggregating stats from all configs
|
|
227
|
+
arguments = benchmark_config["clientconfigs"][0]["arguments"]
|
|
228
|
+
arg_list = (
|
|
229
|
+
benchmark_config["clientconfigs"][0]["arguments"]
|
|
230
|
+
.replace('"', "")
|
|
231
|
+
.split()
|
|
232
|
+
)
|
|
233
|
+
else:
|
|
234
|
+
# Legacy single clientconfig format
|
|
235
|
+
arguments = benchmark_config["clientconfig"]["arguments"]
|
|
236
|
+
arg_list = (
|
|
237
|
+
benchmark_config["clientconfig"]["arguments"]
|
|
238
|
+
.replace('"', "")
|
|
239
|
+
.split()
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
data_size = get_arg_value(arg_list, "--data-size", data_size)
|
|
243
|
+
data_size = get_arg_value(arg_list, "-d", data_size)
|
|
244
|
+
|
|
245
|
+
# Extract values using the safer parsing function
|
|
246
|
+
pipeline_size = get_arg_value(arg_list, "--pipeline", pipeline_size)
|
|
247
|
+
pipeline_size = get_arg_value(
|
|
248
|
+
arg_list, "-P", pipeline_size
|
|
249
|
+
) # Support short form
|
|
250
|
+
|
|
251
|
+
# Extract values using the safer parsing function
|
|
252
|
+
clients = get_arg_value(arg_list, "--clients", clients)
|
|
253
|
+
clients = get_arg_value(
|
|
254
|
+
arg_list, "-c", clients
|
|
255
|
+
) # Support short form
|
|
256
|
+
|
|
257
|
+
threads = get_arg_value(arg_list, "--threads", threads)
|
|
258
|
+
threads = get_arg_value(
|
|
259
|
+
arg_list, "-t", threads
|
|
260
|
+
) # Support short form
|
|
261
|
+
|
|
262
|
+
arguments_split = arguments.split("--command")
|
|
263
|
+
|
|
264
|
+
if len(arguments_split) == 1:
|
|
265
|
+
# this means no arbitrary command is being used so we default to memtier default group, which is 'string'
|
|
266
|
+
tested_groups.append("string")
|
|
267
|
+
|
|
268
|
+
for command_part in arguments_split[1:]:
|
|
269
|
+
command_part = command_part.strip()
|
|
270
|
+
command_p = command_part.split(" ", 1)[0]
|
|
271
|
+
command = command_p.replace(" ", "")
|
|
272
|
+
command = command.replace("=", "")
|
|
273
|
+
command = command.replace('"', "")
|
|
274
|
+
command = command.replace("'", "")
|
|
275
|
+
if "-key-pattern" in command:
|
|
276
|
+
continue
|
|
277
|
+
# Skip command-ratio and other memtier arguments that start with -
|
|
278
|
+
if command.startswith("-"):
|
|
279
|
+
continue
|
|
280
|
+
command = command.lower()
|
|
281
|
+
if command not in tested_commands:
|
|
282
|
+
tested_commands.append(command)
|
|
283
|
+
command_json = {}
|
|
284
|
+
if command in commands_json:
|
|
285
|
+
command_json = commands_json[command]
|
|
286
|
+
elif command.upper() in commands_json:
|
|
287
|
+
command_json = commands_json[command.upper()]
|
|
288
|
+
else:
|
|
289
|
+
logging.error(
|
|
290
|
+
"command {} not in commands.json".format(command)
|
|
291
|
+
)
|
|
292
|
+
if command not in tracked_commands_json:
|
|
293
|
+
tracked_commands_json[command] = command_json
|
|
294
|
+
|
|
295
|
+
# Only process if command_json has group information
|
|
296
|
+
if "group" in command_json:
|
|
297
|
+
group = command_json["group"]
|
|
298
|
+
if group not in tested_groups:
|
|
299
|
+
|
|
300
|
+
tested_groups.append(group)
|
|
301
|
+
if group not in tracked_groups:
|
|
302
|
+
tracked_groups.append(group)
|
|
303
|
+
tracked_groups_hist[group] = 0
|
|
304
|
+
tracked_groups_hist[group] = tracked_groups_hist[group] + 1
|
|
305
|
+
|
|
306
|
+
# Track ACL categories for read/write and fast/slow analysis
|
|
307
|
+
if "acl_categories" in command_json:
|
|
308
|
+
acl_categories = command_json["acl_categories"]
|
|
309
|
+
benchmark_total_command_count += 1
|
|
310
|
+
|
|
311
|
+
# Track total by group (all commands)
|
|
312
|
+
if group not in benchmark_group_total:
|
|
313
|
+
benchmark_group_total[group] = 0
|
|
314
|
+
benchmark_group_total[group] += 1
|
|
315
|
+
|
|
316
|
+
# Track read/write commands
|
|
317
|
+
is_read = False
|
|
318
|
+
is_write = False
|
|
319
|
+
|
|
320
|
+
if "@read" in acl_categories:
|
|
321
|
+
is_read = True
|
|
322
|
+
elif "@write" in acl_categories:
|
|
323
|
+
is_write = True
|
|
324
|
+
elif "_ro" in command.lower():
|
|
325
|
+
# Commands with _ro suffix are read-only (like EVALSHA_RO)
|
|
326
|
+
is_read = True
|
|
327
|
+
elif "@pubsub" in acl_categories:
|
|
328
|
+
# Pubsub commands: SUBSCRIBE/UNSUBSCRIBE are read, PUBLISH is write
|
|
329
|
+
if command.lower() in [
|
|
330
|
+
"subscribe",
|
|
331
|
+
"unsubscribe",
|
|
332
|
+
"psubscribe",
|
|
333
|
+
"punsubscribe",
|
|
334
|
+
]:
|
|
335
|
+
is_read = True
|
|
336
|
+
else:
|
|
337
|
+
is_write = (
|
|
338
|
+
True # PUBLISH and other pubsub commands
|
|
339
|
+
)
|
|
340
|
+
else:
|
|
341
|
+
# Commands without explicit read/write ACL but not _ro are assumed write
|
|
342
|
+
# This covers cases like EVALSHA which can modify data
|
|
343
|
+
is_write = True
|
|
344
|
+
|
|
345
|
+
if is_read:
|
|
346
|
+
if command not in benchmark_read_commands:
|
|
347
|
+
benchmark_read_commands[command] = 0
|
|
348
|
+
benchmark_read_commands[command] += 1
|
|
349
|
+
|
|
350
|
+
# Track by group
|
|
351
|
+
if group not in benchmark_group_read:
|
|
352
|
+
benchmark_group_read[group] = 0
|
|
353
|
+
benchmark_group_read[group] += 1
|
|
354
|
+
|
|
355
|
+
elif is_write:
|
|
356
|
+
if command not in benchmark_write_commands:
|
|
357
|
+
benchmark_write_commands[command] = 0
|
|
358
|
+
benchmark_write_commands[command] += 1
|
|
359
|
+
|
|
360
|
+
# Track by group
|
|
361
|
+
if group not in benchmark_group_write:
|
|
362
|
+
benchmark_group_write[group] = 0
|
|
363
|
+
benchmark_group_write[group] += 1
|
|
364
|
+
|
|
365
|
+
# Track fast/slow commands
|
|
366
|
+
if "@fast" in acl_categories:
|
|
367
|
+
if command not in benchmark_fast_commands:
|
|
368
|
+
benchmark_fast_commands[command] = 0
|
|
369
|
+
benchmark_fast_commands[command] += 1
|
|
370
|
+
elif "@slow" in acl_categories:
|
|
371
|
+
if command not in benchmark_slow_commands:
|
|
372
|
+
benchmark_slow_commands[command] = 0
|
|
373
|
+
benchmark_slow_commands[command] += 1
|
|
374
|
+
|
|
375
|
+
# Calculate total connections
|
|
376
|
+
total_connections = clients * threads
|
|
377
|
+
|
|
378
|
+
if pipeline_size not in pipelines:
|
|
379
|
+
pipelines[pipeline_size] = 0
|
|
380
|
+
pipelines[pipeline_size] = pipelines[pipeline_size] + 1
|
|
381
|
+
|
|
382
|
+
if total_connections not in connections:
|
|
383
|
+
connections[total_connections] = 0
|
|
384
|
+
connections[total_connections] = connections[total_connections] + 1
|
|
385
|
+
|
|
386
|
+
if data_size not in data_sizes:
|
|
387
|
+
data_sizes[data_size] = 0
|
|
388
|
+
data_sizes[data_size] = data_sizes[data_size] + 1
|
|
389
|
+
|
|
390
|
+
if sorted(tested_commands) != sorted(origin_tested_commands):
|
|
391
|
+
requires_override = True
|
|
392
|
+
benchmark_config["tested-commands"] = tested_commands
|
|
393
|
+
logging.warn(
|
|
394
|
+
"there is a difference between specified test-commands in the yaml (name={}) and the ones we've detected {}!={}".format(
|
|
395
|
+
test_name,
|
|
396
|
+
sorted(origin_tested_commands),
|
|
397
|
+
sorted(tested_commands),
|
|
398
|
+
)
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
priority = None
|
|
402
|
+
# maximum priority of all tested commands
|
|
403
|
+
priority_json_value = None
|
|
404
|
+
for command in tested_commands:
|
|
405
|
+
if command in priority_json:
|
|
406
|
+
priority_v = priority_json[command]
|
|
407
|
+
if priority_json_value is None:
|
|
408
|
+
priority_json_value = priority_v
|
|
409
|
+
if priority_v > priority_json_value:
|
|
410
|
+
priority_json_value = priority_v
|
|
411
|
+
|
|
412
|
+
if "priority" in benchmark_config:
|
|
413
|
+
priority = benchmark_config["priority"]
|
|
414
|
+
else:
|
|
415
|
+
if priority_json_value is not None:
|
|
416
|
+
requires_override = True
|
|
417
|
+
logging.warn(
|
|
418
|
+
"dont have priority in {}, but the commands in the test have max priority of {}".format(
|
|
419
|
+
test_name, priority_json_value
|
|
420
|
+
)
|
|
421
|
+
)
|
|
422
|
+
priority = priority_json_value
|
|
423
|
+
if priority is not None:
|
|
424
|
+
benchmark_config["priority"] = priority
|
|
425
|
+
|
|
426
|
+
resources = {}
|
|
427
|
+
if "resources" in benchmark_config["dbconfig"]:
|
|
428
|
+
resources = benchmark_config["dbconfig"]["resources"]
|
|
429
|
+
else:
|
|
430
|
+
benchmark_config["dbconfig"]["resources"] = resources
|
|
431
|
+
|
|
432
|
+
resources_requests = {}
|
|
433
|
+
if "requests" in resources:
|
|
434
|
+
resources_requests = benchmark_config["dbconfig"]["resources"][
|
|
435
|
+
"requests"
|
|
436
|
+
]
|
|
437
|
+
else:
|
|
438
|
+
benchmark_config["dbconfig"]["resources"][
|
|
439
|
+
"requests"
|
|
440
|
+
] = resources_requests
|
|
441
|
+
|
|
442
|
+
if "memory" not in resources_requests:
|
|
443
|
+
benchmark_config["dbconfig"]["resources"]["requests"][
|
|
444
|
+
"memory"
|
|
445
|
+
] = "1g"
|
|
446
|
+
requires_override = True
|
|
447
|
+
logging.warn(
|
|
448
|
+
"dont have resources.requests.memory in {}. Setting 1GB default".format(
|
|
449
|
+
test_name
|
|
450
|
+
)
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
if sorted(tested_groups) != sorted(origin_tested_groups):
|
|
454
|
+
tested_groups_match_origin = False
|
|
455
|
+
benchmark_config["tested-groups"] = tested_groups
|
|
456
|
+
logging.warn(
|
|
457
|
+
"there is a difference between specified test-groups in the yaml (name={}) and the ones we've detected {}!={}".format(
|
|
458
|
+
test_name,
|
|
459
|
+
sorted(origin_tested_groups),
|
|
460
|
+
sorted(tested_groups),
|
|
461
|
+
)
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
except Exception as e:
|
|
465
|
+
logging.error(
|
|
466
|
+
"while loading file {} and error was returned: {}".format(
|
|
467
|
+
test_file, e.__str__()
|
|
468
|
+
)
|
|
469
|
+
)
|
|
470
|
+
test_result = False
|
|
471
|
+
pass
|
|
472
|
+
|
|
473
|
+
if requires_override:
|
|
474
|
+
test_result = False
|
|
475
|
+
overall_result &= test_result
|
|
476
|
+
|
|
477
|
+
if not tested_groups_match_origin:
|
|
478
|
+
if len(tested_groups) > 0:
|
|
479
|
+
overall_result = False
|
|
480
|
+
else:
|
|
481
|
+
logging.warn(
|
|
482
|
+
"difference between specified and detected test-groups was ignored since command info is not available in this benchmark version"
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
if (requires_override or not tested_groups_match_origin) and override_enabled:
|
|
486
|
+
logging.info(
|
|
487
|
+
"Saving a new version of the file {} with the overrided data".format(
|
|
488
|
+
test_file
|
|
489
|
+
)
|
|
490
|
+
)
|
|
491
|
+
with open(test_file, "w") as file:
|
|
492
|
+
yaml.dump(benchmark_config, file, sort_keys=False, width=100000)
|
|
493
|
+
total_tracked_commands_pct = "n/a"
|
|
494
|
+
|
|
495
|
+
module_names = {
|
|
496
|
+
"ft": "redisearch",
|
|
497
|
+
"search": "redisearch",
|
|
498
|
+
"_ft": "redisearch",
|
|
499
|
+
"graph": "redisgraph",
|
|
500
|
+
"ts": "redistimeseries",
|
|
501
|
+
"timeseries": "redistimeseries",
|
|
502
|
+
"json": "redisjson",
|
|
503
|
+
"bf": "redisbloom",
|
|
504
|
+
"cf": "redisbloom",
|
|
505
|
+
"topk": "redisbloom",
|
|
506
|
+
"cms": "redisbloom",
|
|
507
|
+
"tdigest": "redisbloom",
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
group_usage_calls = {}
|
|
511
|
+
group_usage_usecs = {}
|
|
512
|
+
|
|
513
|
+
if args.commandstats_csv != "":
|
|
514
|
+
logging.info(
|
|
515
|
+
"Reading commandstats csv {} to determine commands/test coverage".format(
|
|
516
|
+
args.commandstats_csv
|
|
517
|
+
)
|
|
518
|
+
)
|
|
519
|
+
from csv import reader
|
|
520
|
+
|
|
521
|
+
rows = []
|
|
522
|
+
priority = {}
|
|
523
|
+
priority_usecs = {}
|
|
524
|
+
|
|
525
|
+
# open file in read mode
|
|
526
|
+
total_count = 0
|
|
527
|
+
total_usecs = 0
|
|
528
|
+
total_tracked_count = 0
|
|
529
|
+
with open(
|
|
530
|
+
args.commandstats_csv, "r", encoding="utf8", errors="ignore"
|
|
531
|
+
) as read_obj:
|
|
532
|
+
# pass the file object to reader() to get the reader object
|
|
533
|
+
csv_reader = reader(x.replace("\0", "") for x in read_obj)
|
|
534
|
+
# Iterate over each row in the csv using reader object
|
|
535
|
+
for row in csv_reader:
|
|
536
|
+
if len(row) <= 2:
|
|
537
|
+
continue
|
|
538
|
+
if "cmdstat_" not in row[0]:
|
|
539
|
+
continue
|
|
540
|
+
# row variable is a list that represents a row in csv
|
|
541
|
+
cmdstat = row[0]
|
|
542
|
+
cmdstat = cmdstat.lower()
|
|
543
|
+
if "cmdstat_" not in cmdstat:
|
|
544
|
+
continue
|
|
545
|
+
cmdstat = cmdstat.replace("cmdstat_", "")
|
|
546
|
+
count = clean_number(row[1])
|
|
547
|
+
usecs = None
|
|
548
|
+
if len(row) > 2:
|
|
549
|
+
usecs = clean_number(row[2])
|
|
550
|
+
total_usecs += usecs
|
|
551
|
+
|
|
552
|
+
# Parse percentage and original count if available
|
|
553
|
+
provided_percentage = None
|
|
554
|
+
original_count = None
|
|
555
|
+
if len(row) > 3:
|
|
556
|
+
provided_percentage = clean_percentage(row[3])
|
|
557
|
+
if len(row) > 4:
|
|
558
|
+
original_count = clean_number(row[4])
|
|
559
|
+
|
|
560
|
+
if count == 0:
|
|
561
|
+
continue
|
|
562
|
+
tracked = False
|
|
563
|
+
module = False
|
|
564
|
+
cmd = cmdstat.upper()
|
|
565
|
+
group = "n/a"
|
|
566
|
+
deprecated = False
|
|
567
|
+
if "." in cmdstat:
|
|
568
|
+
module = True
|
|
569
|
+
cmd_module_prefix = cmdstat.split(".")[0]
|
|
570
|
+
if cmd_module_prefix in module_names:
|
|
571
|
+
group = module_names[cmd_module_prefix]
|
|
572
|
+
else:
|
|
573
|
+
logging.error(
|
|
574
|
+
"command with a module prefix does not have module name {}".format(
|
|
575
|
+
cmd_module_prefix
|
|
576
|
+
)
|
|
577
|
+
)
|
|
578
|
+
if cmd in commands_json:
|
|
579
|
+
command_json = commands_json[cmd]
|
|
580
|
+
group = command_json["group"]
|
|
581
|
+
if "deprecated_since" in command_json:
|
|
582
|
+
deprecated = True
|
|
583
|
+
|
|
584
|
+
# Track ACL categories for commandstats CSV data
|
|
585
|
+
if "acl_categories" in command_json:
|
|
586
|
+
acl_categories = command_json["acl_categories"]
|
|
587
|
+
|
|
588
|
+
# Use original count if available, otherwise use parsed count
|
|
589
|
+
tracking_count = (
|
|
590
|
+
original_count if original_count is not None else count
|
|
591
|
+
)
|
|
592
|
+
csv_total_command_count += tracking_count
|
|
593
|
+
|
|
594
|
+
# Track total by group (all commands)
|
|
595
|
+
if group not in csv_group_total:
|
|
596
|
+
csv_group_total[group] = 0
|
|
597
|
+
csv_group_total[group] += tracking_count
|
|
598
|
+
|
|
599
|
+
# Track read/write commands
|
|
600
|
+
is_read = False
|
|
601
|
+
is_write = False
|
|
602
|
+
|
|
603
|
+
if "@read" in acl_categories:
|
|
604
|
+
is_read = True
|
|
605
|
+
elif "@write" in acl_categories:
|
|
606
|
+
is_write = True
|
|
607
|
+
elif "_ro" in cmd.lower():
|
|
608
|
+
# Commands with _ro suffix are read-only (like EVALSHA_RO)
|
|
609
|
+
is_read = True
|
|
610
|
+
elif "@pubsub" in acl_categories:
|
|
611
|
+
# Pubsub commands: SUBSCRIBE/UNSUBSCRIBE are read, PUBLISH is write
|
|
612
|
+
if cmd.lower() in [
|
|
613
|
+
"subscribe",
|
|
614
|
+
"unsubscribe",
|
|
615
|
+
"psubscribe",
|
|
616
|
+
"punsubscribe",
|
|
617
|
+
]:
|
|
618
|
+
is_read = True
|
|
619
|
+
else:
|
|
620
|
+
is_write = True # PUBLISH and other pubsub commands
|
|
621
|
+
else:
|
|
622
|
+
# Commands without explicit read/write ACL but not _ro are assumed write
|
|
623
|
+
# This covers cases like EVALSHA which can modify data
|
|
624
|
+
is_write = True
|
|
625
|
+
|
|
626
|
+
if is_read:
|
|
627
|
+
if cmd.lower() not in csv_read_commands:
|
|
628
|
+
csv_read_commands[cmd.lower()] = 0
|
|
629
|
+
csv_read_commands[cmd.lower()] += tracking_count
|
|
630
|
+
|
|
631
|
+
# Track by group
|
|
632
|
+
if group not in csv_group_read:
|
|
633
|
+
csv_group_read[group] = 0
|
|
634
|
+
csv_group_read[group] += tracking_count
|
|
635
|
+
|
|
636
|
+
elif is_write:
|
|
637
|
+
if cmd.lower() not in csv_write_commands:
|
|
638
|
+
csv_write_commands[cmd.lower()] = 0
|
|
639
|
+
csv_write_commands[cmd.lower()] += tracking_count
|
|
640
|
+
|
|
641
|
+
# Track by group
|
|
642
|
+
if group not in csv_group_write:
|
|
643
|
+
csv_group_write[group] = 0
|
|
644
|
+
csv_group_write[group] += tracking_count
|
|
645
|
+
|
|
646
|
+
# Track fast/slow commands
|
|
647
|
+
if "@fast" in acl_categories:
|
|
648
|
+
if cmd.lower() not in csv_fast_commands:
|
|
649
|
+
csv_fast_commands[cmd.lower()] = 0
|
|
650
|
+
csv_fast_commands[cmd.lower()] += tracking_count
|
|
651
|
+
elif "@slow" in acl_categories:
|
|
652
|
+
if cmd.lower() not in csv_slow_commands:
|
|
653
|
+
csv_slow_commands[cmd.lower()] = 0
|
|
654
|
+
csv_slow_commands[cmd.lower()] += tracking_count
|
|
655
|
+
|
|
656
|
+
if module is False or include_modules:
|
|
657
|
+
# Use original count if available and different from parsed count
|
|
658
|
+
final_count = count
|
|
659
|
+
if original_count is not None and original_count != count:
|
|
660
|
+
logging.warning(
|
|
661
|
+
f"Using original count for {cmd}: {original_count:,} instead of parsed {count:,}"
|
|
662
|
+
)
|
|
663
|
+
final_count = original_count
|
|
664
|
+
|
|
665
|
+
priority[cmd.lower()] = final_count
|
|
666
|
+
if type(usecs) == int:
|
|
667
|
+
priority_usecs[cmd.lower()] = usecs
|
|
668
|
+
|
|
669
|
+
# Store percentage and original count for validation
|
|
670
|
+
if provided_percentage is not None:
|
|
671
|
+
csv_provided_percentages[cmd.lower()] = provided_percentage
|
|
672
|
+
if original_count is not None:
|
|
673
|
+
csv_original_counts[cmd.lower()] = original_count
|
|
674
|
+
|
|
675
|
+
if cmdstat in tracked_commands_json:
|
|
676
|
+
tracked = True
|
|
677
|
+
if module is False or include_modules:
|
|
678
|
+
row = [cmdstat, group, count, usecs, tracked, deprecated]
|
|
679
|
+
rows.append(row)
|
|
680
|
+
if group not in group_usage_calls:
|
|
681
|
+
group_usage_calls[group] = {}
|
|
682
|
+
group_usage_calls[group]["call"] = 0
|
|
683
|
+
if group not in group_usage_usecs:
|
|
684
|
+
group_usage_usecs[group] = {}
|
|
685
|
+
group_usage_usecs[group]["usecs"] = 0
|
|
686
|
+
if type(count) == int:
|
|
687
|
+
group_usage_calls[group]["call"] = (
|
|
688
|
+
group_usage_calls[group]["call"] + count
|
|
689
|
+
)
|
|
690
|
+
if type(usecs) == int:
|
|
691
|
+
group_usage_usecs[group]["usecs"] = (
|
|
692
|
+
group_usage_usecs[group]["usecs"] + usecs
|
|
693
|
+
)
|
|
694
|
+
if group == "n/a":
|
|
695
|
+
logging.warn("Unable to detect group in {}".format(cmd))
|
|
696
|
+
|
|
697
|
+
priority_list = sorted(((priority[cmd], cmd) for cmd in priority), reverse=True)
|
|
698
|
+
|
|
699
|
+
priority_json = {}
|
|
700
|
+
top_10_missing = []
|
|
701
|
+
top_30_missing = []
|
|
702
|
+
top_50_missing = []
|
|
703
|
+
# first pass on count
|
|
704
|
+
for x in priority_list:
|
|
705
|
+
count = x[0]
|
|
706
|
+
total_count += count
|
|
707
|
+
|
|
708
|
+
for group_name, group in group_usage_calls.items():
|
|
709
|
+
call = group["call"]
|
|
710
|
+
pct = call / total_count
|
|
711
|
+
group["pct"] = pct
|
|
712
|
+
|
|
713
|
+
for group_name, group in group_usage_usecs.items():
|
|
714
|
+
usecs = group["usecs"]
|
|
715
|
+
pct = usecs / total_usecs
|
|
716
|
+
group["pct"] = pct
|
|
717
|
+
|
|
718
|
+
for pos, x in enumerate(priority_list, 1):
|
|
719
|
+
count = x[0]
|
|
720
|
+
cmd = x[1]
|
|
721
|
+
priority_json[cmd] = pos
|
|
722
|
+
pct = count / total_count
|
|
723
|
+
if cmd not in tracked_commands_json:
|
|
724
|
+
if pos <= 10:
|
|
725
|
+
top_10_missing.append(cmd)
|
|
726
|
+
if pos <= 30:
|
|
727
|
+
top_30_missing.append(cmd)
|
|
728
|
+
if pos <= 50:
|
|
729
|
+
top_50_missing.append(cmd)
|
|
730
|
+
else:
|
|
731
|
+
total_tracked_count += count
|
|
732
|
+
|
|
733
|
+
if args.commands_priority_file != "":
|
|
734
|
+
with open(args.commands_priority_file, "w") as fd:
|
|
735
|
+
logging.info(
|
|
736
|
+
"Updating {} file with priority by commandstats".format(
|
|
737
|
+
args.commands_priority_file
|
|
738
|
+
)
|
|
739
|
+
)
|
|
740
|
+
json.dump(priority_json, fd, indent=True)
|
|
741
|
+
|
|
742
|
+
if args.group_csv != "":
|
|
743
|
+
header = [
|
|
744
|
+
"group",
|
|
745
|
+
"count",
|
|
746
|
+
"usecs",
|
|
747
|
+
"usec_per_call",
|
|
748
|
+
"% count",
|
|
749
|
+
"% usecs",
|
|
750
|
+
]
|
|
751
|
+
with open(args.group_csv, "w", encoding="UTF8", newline="") as f:
|
|
752
|
+
writer = csv.writer(f)
|
|
753
|
+
|
|
754
|
+
# write the header
|
|
755
|
+
writer.writerow(header)
|
|
756
|
+
for group_name, group_usage_info in group_usage_calls.items():
|
|
757
|
+
count = group_usage_info["call"]
|
|
758
|
+
call_pct = group_usage_info["pct"]
|
|
759
|
+
usecs = group_usage_usecs[group_name]["usecs"]
|
|
760
|
+
usecs_pct = group_usage_usecs[group_name]["pct"]
|
|
761
|
+
usecs_per_call = usecs / count
|
|
762
|
+
|
|
763
|
+
writer.writerow(
|
|
764
|
+
[group_name, count, usecs, usecs_per_call, call_pct, usecs_pct]
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
if args.summary_csv != "":
|
|
768
|
+
header = [
|
|
769
|
+
"command",
|
|
770
|
+
"group",
|
|
771
|
+
"count",
|
|
772
|
+
"usecs",
|
|
773
|
+
"tracked",
|
|
774
|
+
"deprecated",
|
|
775
|
+
"usec_per_call",
|
|
776
|
+
"% count",
|
|
777
|
+
"% usecs",
|
|
778
|
+
"diff count usecs",
|
|
779
|
+
]
|
|
780
|
+
|
|
781
|
+
with open(args.summary_csv, "w", encoding="UTF8", newline="") as f:
|
|
782
|
+
writer = csv.writer(f)
|
|
783
|
+
|
|
784
|
+
# write the header
|
|
785
|
+
writer.writerow(header)
|
|
786
|
+
for row in rows:
|
|
787
|
+
# write the data
|
|
788
|
+
count = row[2]
|
|
789
|
+
usec = row[3]
|
|
790
|
+
pct = count / total_count
|
|
791
|
+
pct_usec = "n/a"
|
|
792
|
+
usec_per_call = "n/a"
|
|
793
|
+
diff_pct = "n/a"
|
|
794
|
+
if usec is not None:
|
|
795
|
+
pct_usec = usec / total_usecs
|
|
796
|
+
usec_per_call = float(usec) / float(count)
|
|
797
|
+
diff_pct = pct_usec - pct
|
|
798
|
+
row.append(usec_per_call)
|
|
799
|
+
row.append(pct)
|
|
800
|
+
row.append(pct_usec)
|
|
801
|
+
row.append(diff_pct)
|
|
802
|
+
writer.writerow(row)
|
|
803
|
+
|
|
804
|
+
if total_tracked_count > 0:
|
|
805
|
+
total_tracked_commands_pct = "{0:.3g} %".format(
|
|
806
|
+
total_tracked_count / total_count * 100.0
|
|
807
|
+
)
|
|
808
|
+
|
|
809
|
+
logging.info("Total commands: {}".format(total_commands))
|
|
810
|
+
total_tracked_commands = len(tracked_commands_json.keys())
|
|
811
|
+
logging.info("Total tracked commands: {}".format(total_tracked_commands))
|
|
812
|
+
logging.info(
|
|
813
|
+
"Total tracked commands pct: {}".format(total_tracked_commands_pct)
|
|
814
|
+
)
|
|
815
|
+
all_groups = groups_json.keys()
|
|
816
|
+
total_groups = len(all_groups)
|
|
817
|
+
logging.info("Total groups: {}".format(total_groups))
|
|
818
|
+
total_tracked_groups = len(tracked_groups)
|
|
819
|
+
logging.info("Total tracked groups: {}".format(total_tracked_groups))
|
|
820
|
+
logging.info(
|
|
821
|
+
"Total untracked groups: {}".format(total_groups - total_tracked_groups)
|
|
822
|
+
)
|
|
823
|
+
logging.info("Printing untracked groups:")
|
|
824
|
+
for group_name in all_groups:
|
|
825
|
+
if group_name not in tracked_groups:
|
|
826
|
+
logging.info(" - {}".format(group_name))
|
|
827
|
+
logging.info("Top 10 fully tracked?: {}".format(len(top_10_missing) == 0))
|
|
828
|
+
logging.info("Top 30 fully tracked?: {}".format(len(top_30_missing) == 0))
|
|
829
|
+
if len(top_30_missing) > 0:
|
|
830
|
+
logging.info(
|
|
831
|
+
f"\t\tTotal missing for Top 30: {len(top_30_missing)}. {top_30_missing}"
|
|
832
|
+
)
|
|
833
|
+
|
|
834
|
+
logging.info("Top 50 fully tracked?: {}".format(len(top_50_missing) == 0))
|
|
835
|
+
if len(top_50_missing) > 0:
|
|
836
|
+
logging.info(
|
|
837
|
+
f"\t\tTotal missing for Top 50: {len(top_50_missing)}. {top_50_missing}"
|
|
838
|
+
)
|
|
839
|
+
|
|
840
|
+
if overall_result is False and fail_on_required_diff:
|
|
841
|
+
logging.error(
|
|
842
|
+
"Failing given there were changes required to be made and --fail-on-required-diff was enabled"
|
|
843
|
+
)
|
|
844
|
+
exit(1)
|
|
845
|
+
|
|
846
|
+
if args.push_stats_redis:
|
|
847
|
+
logging.info(
|
|
848
|
+
"Pushing stats to redis at: {}:{}".format(args.redis_host, args.redis_port)
|
|
849
|
+
)
|
|
850
|
+
conn = redis.StrictRedis(
|
|
851
|
+
host=args.redis_host,
|
|
852
|
+
port=args.redis_port,
|
|
853
|
+
password=args.redis_pass,
|
|
854
|
+
username=args.redis_user,
|
|
855
|
+
decode_responses=False,
|
|
856
|
+
)
|
|
857
|
+
|
|
858
|
+
tested_groups_key = "gh/redis/redis:set:tested_groups"
|
|
859
|
+
tested_commands_key = "gh/redis/redis:set:tested_commands"
|
|
860
|
+
for group in tracked_groups:
|
|
861
|
+
conn.sadd(tested_groups_key, group)
|
|
862
|
+
for command in list(tracked_commands_json.keys()):
|
|
863
|
+
conn.sadd(tested_commands_key, command)
|
|
864
|
+
|
|
865
|
+
logging.info(f"There is a total of : {len(tracked_groups)} tracked command groups.")
|
|
866
|
+
logging.info(
|
|
867
|
+
f"There is a total of : {len(list(tracked_commands_json.keys()))} tracked commands."
|
|
868
|
+
)
|
|
869
|
+
|
|
870
|
+
# ACL Category Analysis Summary
|
|
871
|
+
logging.info("=" * 80)
|
|
872
|
+
logging.info("ACL CATEGORY ANALYSIS SUMMARY")
|
|
873
|
+
logging.info("=" * 80)
|
|
874
|
+
|
|
875
|
+
# Benchmark YAML files analysis
|
|
876
|
+
if benchmark_total_command_count > 0:
|
|
877
|
+
logging.info("BENCHMARK TEST SUITES ANALYSIS (from YAML files):")
|
|
878
|
+
logging.info("-" * 50)
|
|
879
|
+
|
|
880
|
+
# Calculate read/write percentages for benchmarks
|
|
881
|
+
benchmark_read_count = sum(benchmark_read_commands.values())
|
|
882
|
+
benchmark_write_count = sum(benchmark_write_commands.values())
|
|
883
|
+
benchmark_rw_count = benchmark_read_count + benchmark_write_count
|
|
884
|
+
|
|
885
|
+
if benchmark_rw_count > 0:
|
|
886
|
+
read_percentage = (benchmark_read_count / benchmark_rw_count) * 100
|
|
887
|
+
write_percentage = (benchmark_write_count / benchmark_rw_count) * 100
|
|
888
|
+
|
|
889
|
+
logging.info(f"READ/WRITE COMMAND DISTRIBUTION:")
|
|
890
|
+
logging.info(
|
|
891
|
+
f" Read commands: {benchmark_read_count:6d} ({read_percentage:5.1f}%)"
|
|
892
|
+
)
|
|
893
|
+
logging.info(
|
|
894
|
+
f" Write commands: {benchmark_write_count:6d} ({write_percentage:5.1f}%)"
|
|
895
|
+
)
|
|
896
|
+
logging.info(f" Total R/W: {benchmark_rw_count:6d} (100.0%)")
|
|
897
|
+
else:
|
|
898
|
+
logging.info("No read/write commands detected in benchmark ACL categories")
|
|
899
|
+
|
|
900
|
+
# Calculate fast/slow percentages for benchmarks
|
|
901
|
+
benchmark_fast_count = sum(benchmark_fast_commands.values())
|
|
902
|
+
benchmark_slow_count = sum(benchmark_slow_commands.values())
|
|
903
|
+
benchmark_fs_count = benchmark_fast_count + benchmark_slow_count
|
|
904
|
+
|
|
905
|
+
if benchmark_fs_count > 0:
|
|
906
|
+
fast_percentage = (benchmark_fast_count / benchmark_fs_count) * 100
|
|
907
|
+
slow_percentage = (benchmark_slow_count / benchmark_fs_count) * 100
|
|
908
|
+
|
|
909
|
+
logging.info(f"")
|
|
910
|
+
logging.info(f"FAST/SLOW COMMAND DISTRIBUTION:")
|
|
911
|
+
logging.info(
|
|
912
|
+
f" Fast commands: {benchmark_fast_count:6d} ({fast_percentage:5.1f}%)"
|
|
913
|
+
)
|
|
914
|
+
logging.info(
|
|
915
|
+
f" Slow commands: {benchmark_slow_count:6d} ({slow_percentage:5.1f}%)"
|
|
916
|
+
)
|
|
917
|
+
logging.info(f" Total F/S: {benchmark_fs_count:6d} (100.0%)")
|
|
918
|
+
else:
|
|
919
|
+
logging.info("No fast/slow commands detected in benchmark ACL categories")
|
|
920
|
+
|
|
921
|
+
# Group breakdown for benchmarks
|
|
922
|
+
if benchmark_group_total:
|
|
923
|
+
logging.info("")
|
|
924
|
+
logging.info("READ/WRITE BREAKDOWN BY COMMAND GROUP:")
|
|
925
|
+
|
|
926
|
+
# Calculate total calls across all groups
|
|
927
|
+
total_all_calls = sum(benchmark_group_total.values())
|
|
928
|
+
|
|
929
|
+
# Create list of groups with their total calls for sorting
|
|
930
|
+
group_data = []
|
|
931
|
+
for group, total_group in benchmark_group_total.items():
|
|
932
|
+
read_count = benchmark_group_read.get(group, 0)
|
|
933
|
+
write_count = benchmark_group_write.get(group, 0)
|
|
934
|
+
group_data.append((group, read_count, write_count, total_group))
|
|
935
|
+
|
|
936
|
+
# Sort by total calls (descending)
|
|
937
|
+
group_data.sort(key=lambda x: x[3], reverse=True)
|
|
938
|
+
|
|
939
|
+
total_read_all = 0
|
|
940
|
+
total_write_all = 0
|
|
941
|
+
|
|
942
|
+
for group, read_count, write_count, total_group in group_data:
|
|
943
|
+
group_pct = (total_group / total_all_calls) * 100
|
|
944
|
+
read_pct = (read_count / total_group) * 100 if total_group > 0 else 0
|
|
945
|
+
write_pct = (write_count / total_group) * 100 if total_group > 0 else 0
|
|
946
|
+
|
|
947
|
+
read_formatted = format_number_with_suffix(read_count)
|
|
948
|
+
write_formatted = format_number_with_suffix(write_count)
|
|
949
|
+
|
|
950
|
+
logging.info(
|
|
951
|
+
f" {group.upper():>12} ({group_pct:4.1f}%): {read_formatted:>8} read ({read_pct:5.1f}%), {write_formatted:>8} write ({write_pct:5.1f}%)"
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
total_read_all += read_count
|
|
955
|
+
total_write_all += write_count
|
|
956
|
+
|
|
957
|
+
# Add total row
|
|
958
|
+
if group_data:
|
|
959
|
+
total_read_pct = (total_read_all / total_all_calls) * 100
|
|
960
|
+
total_write_pct = (total_write_all / total_all_calls) * 100
|
|
961
|
+
total_read_formatted = format_number_with_suffix(total_read_all)
|
|
962
|
+
total_write_formatted = format_number_with_suffix(total_write_all)
|
|
963
|
+
|
|
964
|
+
logging.info(
|
|
965
|
+
f" {'TOTAL':>12} (100.0%): {total_read_formatted:>8} read ({total_read_pct:5.1f}%), {total_write_formatted:>8} write ({total_write_pct:5.1f}%)"
|
|
966
|
+
)
|
|
967
|
+
else:
|
|
968
|
+
logging.info(
|
|
969
|
+
"BENCHMARK TEST SUITES ANALYSIS: No commands with ACL categories found"
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
# CommandStats CSV analysis
|
|
973
|
+
if csv_total_command_count > 0:
|
|
974
|
+
logging.info("")
|
|
975
|
+
logging.info("COMMANDSTATS CSV ANALYSIS (actual Redis usage):")
|
|
976
|
+
logging.info("-" * 50)
|
|
977
|
+
|
|
978
|
+
# Calculate read/write percentages for CSV data
|
|
979
|
+
csv_read_count = sum(csv_read_commands.values())
|
|
980
|
+
csv_write_count = sum(csv_write_commands.values())
|
|
981
|
+
csv_rw_count = csv_read_count + csv_write_count
|
|
982
|
+
|
|
983
|
+
if csv_rw_count > 0:
|
|
984
|
+
read_percentage = (csv_read_count / csv_rw_count) * 100
|
|
985
|
+
write_percentage = (csv_write_count / csv_rw_count) * 100
|
|
986
|
+
|
|
987
|
+
logging.info(f"READ/WRITE COMMAND DISTRIBUTION:")
|
|
988
|
+
logging.info(
|
|
989
|
+
f" Read commands: {csv_read_count:8d} ({read_percentage:5.1f}%)"
|
|
990
|
+
)
|
|
991
|
+
logging.info(
|
|
992
|
+
f" Write commands: {csv_write_count:8d} ({write_percentage:5.1f}%)"
|
|
993
|
+
)
|
|
994
|
+
logging.info(f" Total R/W: {csv_rw_count:8d} (100.0%)")
|
|
995
|
+
else:
|
|
996
|
+
logging.info("No read/write commands detected in CSV ACL categories")
|
|
997
|
+
|
|
998
|
+
# Calculate fast/slow percentages for CSV data
|
|
999
|
+
csv_fast_count = sum(csv_fast_commands.values())
|
|
1000
|
+
csv_slow_count = sum(csv_slow_commands.values())
|
|
1001
|
+
csv_fs_count = csv_fast_count + csv_slow_count
|
|
1002
|
+
|
|
1003
|
+
if csv_fs_count > 0:
|
|
1004
|
+
fast_percentage = (csv_fast_count / csv_fs_count) * 100
|
|
1005
|
+
slow_percentage = (csv_slow_count / csv_fs_count) * 100
|
|
1006
|
+
|
|
1007
|
+
logging.info(f"")
|
|
1008
|
+
logging.info(f"FAST/SLOW COMMAND DISTRIBUTION:")
|
|
1009
|
+
logging.info(
|
|
1010
|
+
f" Fast commands: {csv_fast_count:8d} ({fast_percentage:5.1f}%)"
|
|
1011
|
+
)
|
|
1012
|
+
logging.info(
|
|
1013
|
+
f" Slow commands: {csv_slow_count:8d} ({slow_percentage:5.1f}%)"
|
|
1014
|
+
)
|
|
1015
|
+
logging.info(f" Total F/S: {csv_fs_count:8d} (100.0%)")
|
|
1016
|
+
else:
|
|
1017
|
+
logging.info("No fast/slow commands detected in CSV ACL categories")
|
|
1018
|
+
|
|
1019
|
+
# Group breakdown for CSV data
|
|
1020
|
+
if csv_group_total:
|
|
1021
|
+
logging.info("")
|
|
1022
|
+
logging.info("READ/WRITE BREAKDOWN BY COMMAND GROUP:")
|
|
1023
|
+
|
|
1024
|
+
# Calculate total calls across all groups
|
|
1025
|
+
total_all_calls = sum(csv_group_total.values())
|
|
1026
|
+
|
|
1027
|
+
# Create list of groups with their total calls for sorting
|
|
1028
|
+
group_data = []
|
|
1029
|
+
for group, total_group in csv_group_total.items():
|
|
1030
|
+
read_count = csv_group_read.get(group, 0)
|
|
1031
|
+
write_count = csv_group_write.get(group, 0)
|
|
1032
|
+
group_data.append((group, read_count, write_count, total_group))
|
|
1033
|
+
|
|
1034
|
+
# Sort by total calls (descending)
|
|
1035
|
+
group_data.sort(key=lambda x: x[3], reverse=True)
|
|
1036
|
+
|
|
1037
|
+
total_read_all = 0
|
|
1038
|
+
total_write_all = 0
|
|
1039
|
+
|
|
1040
|
+
for group, read_count, write_count, total_group in group_data:
|
|
1041
|
+
group_pct = (total_group / total_all_calls) * 100
|
|
1042
|
+
read_pct = (read_count / total_group) * 100 if total_group > 0 else 0
|
|
1043
|
+
write_pct = (write_count / total_group) * 100 if total_group > 0 else 0
|
|
1044
|
+
|
|
1045
|
+
read_formatted = format_number_with_suffix(read_count)
|
|
1046
|
+
write_formatted = format_number_with_suffix(write_count)
|
|
1047
|
+
|
|
1048
|
+
logging.info(
|
|
1049
|
+
f" {group.upper():>12} ({group_pct:4.1f}%): {read_formatted:>8} read ({read_pct:5.1f}%), {write_formatted:>8} write ({write_pct:5.1f}%)"
|
|
1050
|
+
)
|
|
1051
|
+
|
|
1052
|
+
total_read_all += read_count
|
|
1053
|
+
total_write_all += write_count
|
|
1054
|
+
|
|
1055
|
+
# Add total row
|
|
1056
|
+
if group_data:
|
|
1057
|
+
total_read_pct = (total_read_all / total_all_calls) * 100
|
|
1058
|
+
total_write_pct = (total_write_all / total_all_calls) * 100
|
|
1059
|
+
total_read_formatted = format_number_with_suffix(total_read_all)
|
|
1060
|
+
total_write_formatted = format_number_with_suffix(total_write_all)
|
|
1061
|
+
|
|
1062
|
+
logging.info(
|
|
1063
|
+
f" {'TOTAL':>12} (100.0%): {total_read_formatted:>8} read ({total_read_pct:5.1f}%), {total_write_formatted:>8} write ({total_write_pct:5.1f}%)"
|
|
1064
|
+
)
|
|
1065
|
+
|
|
1066
|
+
# Validate parsing accuracy by comparing with provided percentages
|
|
1067
|
+
if csv_provided_percentages and csv_original_counts:
|
|
1068
|
+
logging.info("")
|
|
1069
|
+
logging.info("PARSING VALIDATION:")
|
|
1070
|
+
logging.info("-" * 30)
|
|
1071
|
+
|
|
1072
|
+
# Calculate total from original counts
|
|
1073
|
+
total_original = sum(csv_original_counts.values())
|
|
1074
|
+
total_provided_percentage = sum(csv_provided_percentages.values())
|
|
1075
|
+
|
|
1076
|
+
logging.info(f"Total original count: {total_original:,}")
|
|
1077
|
+
logging.info(
|
|
1078
|
+
f"Sum of provided percentages: {total_provided_percentage:.6f}%"
|
|
1079
|
+
)
|
|
1080
|
+
|
|
1081
|
+
# Check if our billion parsing matches original counts
|
|
1082
|
+
parsing_errors = 0
|
|
1083
|
+
for cmd in csv_original_counts:
|
|
1084
|
+
if cmd in priority: # priority contains our parsed values
|
|
1085
|
+
parsed_value = priority[cmd]
|
|
1086
|
+
original_value = csv_original_counts[cmd]
|
|
1087
|
+
if parsed_value != original_value:
|
|
1088
|
+
parsing_errors += 1
|
|
1089
|
+
logging.warning(
|
|
1090
|
+
f"Parsing mismatch for {cmd}: parsed={parsed_value:,} vs original={original_value:,}"
|
|
1091
|
+
)
|
|
1092
|
+
|
|
1093
|
+
if parsing_errors == 0:
|
|
1094
|
+
logging.info("✓ All billion/million/thousand parsing is accurate")
|
|
1095
|
+
else:
|
|
1096
|
+
logging.warning(f"✗ Found {parsing_errors} parsing errors")
|
|
1097
|
+
|
|
1098
|
+
# Validate percentage calculation
|
|
1099
|
+
if abs(total_provided_percentage - 100.0) < 0.001:
|
|
1100
|
+
logging.info("✓ Provided percentages sum to 100%")
|
|
1101
|
+
else:
|
|
1102
|
+
logging.warning(
|
|
1103
|
+
f"✗ Provided percentages sum to {total_provided_percentage:.6f}% (not 100%)"
|
|
1104
|
+
)
|
|
1105
|
+
else:
|
|
1106
|
+
logging.info("")
|
|
1107
|
+
logging.info(
|
|
1108
|
+
"COMMANDSTATS CSV ANALYSIS: No CSV file provided or no commands found"
|
|
1109
|
+
)
|
|
1110
|
+
|
|
1111
|
+
logging.info("=" * 80)
|
|
1112
|
+
# Save pipeline count to CSV
|
|
1113
|
+
csv_filename = "memtier_pipeline_count.csv"
|
|
1114
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1115
|
+
fieldnames = ["pipeline", "count"]
|
|
1116
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1117
|
+
writer.writeheader()
|
|
1118
|
+
for pipeline_size in sorted(pipelines.keys()):
|
|
1119
|
+
writer.writerow(
|
|
1120
|
+
{"pipeline": pipeline_size, "count": pipelines[pipeline_size]}
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
logging.info(f"Pipeline count data saved to {csv_filename}")
|
|
1124
|
+
|
|
1125
|
+
csv_filename = "memtier_connection_count.csv"
|
|
1126
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1127
|
+
fieldnames = ["connections", "count"]
|
|
1128
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1129
|
+
writer.writeheader()
|
|
1130
|
+
|
|
1131
|
+
# Sort connections dictionary by keys before writing
|
|
1132
|
+
for connection_count in sorted(connections.keys()):
|
|
1133
|
+
writer.writerow(
|
|
1134
|
+
{
|
|
1135
|
+
"connections": connection_count,
|
|
1136
|
+
"count": connections[connection_count],
|
|
1137
|
+
}
|
|
1138
|
+
)
|
|
1139
|
+
|
|
1140
|
+
logging.info(f"Sorted connection count data saved to {csv_filename}")
|
|
1141
|
+
|
|
1142
|
+
csv_filename = "memtier_data_size_histogram.csv"
|
|
1143
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1144
|
+
fieldnames = ["data_size", "count"]
|
|
1145
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1146
|
+
writer.writeheader()
|
|
1147
|
+
|
|
1148
|
+
# Sort connections dictionary by keys before writing
|
|
1149
|
+
for data_size in sorted(data_sizes.keys()):
|
|
1150
|
+
writer.writerow(
|
|
1151
|
+
{
|
|
1152
|
+
"data_size": data_size,
|
|
1153
|
+
"count": data_sizes[data_size],
|
|
1154
|
+
}
|
|
1155
|
+
)
|
|
1156
|
+
|
|
1157
|
+
logging.info(f"Sorted data size count data saved to {csv_filename}")
|
|
1158
|
+
|
|
1159
|
+
csv_filename = "memtier_groups_histogram.csv"
|
|
1160
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1161
|
+
fieldnames = ["group", "count"]
|
|
1162
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1163
|
+
writer.writeheader()
|
|
1164
|
+
|
|
1165
|
+
# Sort connections dictionary by keys before writing
|
|
1166
|
+
for group in sorted(tracked_groups_hist.keys()):
|
|
1167
|
+
writer.writerow(
|
|
1168
|
+
{
|
|
1169
|
+
"group": group,
|
|
1170
|
+
"count": tracked_groups_hist[group],
|
|
1171
|
+
}
|
|
1172
|
+
)
|
|
1173
|
+
|
|
1174
|
+
logging.info(f"Sorted command groups count data saved to {csv_filename}")
|
|
1175
|
+
|
|
1176
|
+
# Save ACL category data to CSV files
|
|
1177
|
+
|
|
1178
|
+
# Benchmark data CSV files
|
|
1179
|
+
csv_filename = "benchmark_acl_read_write_commands.csv"
|
|
1180
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1181
|
+
fieldnames = ["command", "type", "count"]
|
|
1182
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1183
|
+
writer.writeheader()
|
|
1184
|
+
|
|
1185
|
+
for command, count in sorted(benchmark_read_commands.items()):
|
|
1186
|
+
writer.writerow({"command": command, "type": "read", "count": count})
|
|
1187
|
+
for command, count in sorted(benchmark_write_commands.items()):
|
|
1188
|
+
writer.writerow({"command": command, "type": "write", "count": count})
|
|
1189
|
+
|
|
1190
|
+
logging.info(f"Benchmark ACL read/write commands data saved to {csv_filename}")
|
|
1191
|
+
|
|
1192
|
+
csv_filename = "benchmark_acl_fast_slow_commands.csv"
|
|
1193
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1194
|
+
fieldnames = ["command", "type", "count"]
|
|
1195
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1196
|
+
writer.writeheader()
|
|
1197
|
+
|
|
1198
|
+
for command, count in sorted(benchmark_fast_commands.items()):
|
|
1199
|
+
writer.writerow({"command": command, "type": "fast", "count": count})
|
|
1200
|
+
for command, count in sorted(benchmark_slow_commands.items()):
|
|
1201
|
+
writer.writerow({"command": command, "type": "slow", "count": count})
|
|
1202
|
+
|
|
1203
|
+
logging.info(f"Benchmark ACL fast/slow commands data saved to {csv_filename}")
|
|
1204
|
+
|
|
1205
|
+
# CommandStats CSV data files
|
|
1206
|
+
csv_filename = "commandstats_acl_read_write_commands.csv"
|
|
1207
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1208
|
+
fieldnames = ["command", "type", "count"]
|
|
1209
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1210
|
+
writer.writeheader()
|
|
1211
|
+
|
|
1212
|
+
for command, count in sorted(csv_read_commands.items()):
|
|
1213
|
+
writer.writerow({"command": command, "type": "read", "count": count})
|
|
1214
|
+
for command, count in sorted(csv_write_commands.items()):
|
|
1215
|
+
writer.writerow({"command": command, "type": "write", "count": count})
|
|
1216
|
+
|
|
1217
|
+
logging.info(f"CommandStats ACL read/write commands data saved to {csv_filename}")
|
|
1218
|
+
|
|
1219
|
+
csv_filename = "commandstats_acl_fast_slow_commands.csv"
|
|
1220
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1221
|
+
fieldnames = ["command", "type", "count"]
|
|
1222
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1223
|
+
writer.writeheader()
|
|
1224
|
+
|
|
1225
|
+
for command, count in sorted(csv_fast_commands.items()):
|
|
1226
|
+
writer.writerow({"command": command, "type": "fast", "count": count})
|
|
1227
|
+
for command, count in sorted(csv_slow_commands.items()):
|
|
1228
|
+
writer.writerow({"command": command, "type": "slow", "count": count})
|
|
1229
|
+
|
|
1230
|
+
logging.info(f"CommandStats ACL fast/slow commands data saved to {csv_filename}")
|
|
1231
|
+
|
|
1232
|
+
# Save group breakdown data to CSV files
|
|
1233
|
+
|
|
1234
|
+
# Benchmark group breakdown
|
|
1235
|
+
csv_filename = "benchmark_group_read_write_breakdown.csv"
|
|
1236
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1237
|
+
fieldnames = [
|
|
1238
|
+
"group",
|
|
1239
|
+
"read_count",
|
|
1240
|
+
"write_count",
|
|
1241
|
+
"total_count",
|
|
1242
|
+
"read_percentage",
|
|
1243
|
+
"write_percentage",
|
|
1244
|
+
]
|
|
1245
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1246
|
+
writer.writeheader()
|
|
1247
|
+
|
|
1248
|
+
all_groups = set(benchmark_group_read.keys()) | set(
|
|
1249
|
+
benchmark_group_write.keys()
|
|
1250
|
+
)
|
|
1251
|
+
for group in sorted(all_groups):
|
|
1252
|
+
read_count = benchmark_group_read.get(group, 0)
|
|
1253
|
+
write_count = benchmark_group_write.get(group, 0)
|
|
1254
|
+
total_count = read_count + write_count
|
|
1255
|
+
read_pct = (read_count / total_count * 100) if total_count > 0 else 0
|
|
1256
|
+
write_pct = (write_count / total_count * 100) if total_count > 0 else 0
|
|
1257
|
+
|
|
1258
|
+
writer.writerow(
|
|
1259
|
+
{
|
|
1260
|
+
"group": group,
|
|
1261
|
+
"read_count": read_count,
|
|
1262
|
+
"write_count": write_count,
|
|
1263
|
+
"total_count": total_count,
|
|
1264
|
+
"read_percentage": round(read_pct, 2),
|
|
1265
|
+
"write_percentage": round(write_pct, 2),
|
|
1266
|
+
}
|
|
1267
|
+
)
|
|
1268
|
+
|
|
1269
|
+
logging.info(f"Benchmark group read/write breakdown saved to {csv_filename}")
|
|
1270
|
+
|
|
1271
|
+
# CommandStats group breakdown
|
|
1272
|
+
csv_filename = "commandstats_group_read_write_breakdown.csv"
|
|
1273
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1274
|
+
fieldnames = [
|
|
1275
|
+
"group",
|
|
1276
|
+
"read_count",
|
|
1277
|
+
"write_count",
|
|
1278
|
+
"total_count",
|
|
1279
|
+
"read_percentage",
|
|
1280
|
+
"write_percentage",
|
|
1281
|
+
]
|
|
1282
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1283
|
+
writer.writeheader()
|
|
1284
|
+
|
|
1285
|
+
all_groups = set(csv_group_read.keys()) | set(csv_group_write.keys())
|
|
1286
|
+
for group in sorted(all_groups):
|
|
1287
|
+
read_count = csv_group_read.get(group, 0)
|
|
1288
|
+
write_count = csv_group_write.get(group, 0)
|
|
1289
|
+
total_count = read_count + write_count
|
|
1290
|
+
read_pct = (read_count / total_count * 100) if total_count > 0 else 0
|
|
1291
|
+
write_pct = (write_count / total_count * 100) if total_count > 0 else 0
|
|
1292
|
+
|
|
1293
|
+
writer.writerow(
|
|
1294
|
+
{
|
|
1295
|
+
"group": group,
|
|
1296
|
+
"read_count": read_count,
|
|
1297
|
+
"write_count": write_count,
|
|
1298
|
+
"total_count": total_count,
|
|
1299
|
+
"read_percentage": round(read_pct, 2),
|
|
1300
|
+
"write_percentage": round(write_pct, 2),
|
|
1301
|
+
}
|
|
1302
|
+
)
|
|
1303
|
+
|
|
1304
|
+
logging.info(f"CommandStats group read/write breakdown saved to {csv_filename}")
|