redisbench-admin 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redisbench_admin/compare/compare.py +3 -3
- redisbench_admin/deploy/deploy.py +1 -9
- redisbench_admin/export/export.py +1 -7
- redisbench_admin/profilers/perf.py +24 -24
- redisbench_admin/run/ann/pkg/.dockerignore +2 -0
- redisbench_admin/run/ann/pkg/.git +1 -0
- redisbench_admin/run/ann/pkg/.github/workflows/benchmarks.yml +100 -0
- redisbench_admin/run/ann/pkg/.gitignore +21 -0
- redisbench_admin/run/ann/pkg/LICENSE +21 -0
- redisbench_admin/run/ann/pkg/README.md +157 -0
- redisbench_admin/run/ann/pkg/algos.yaml +1294 -0
- redisbench_admin/run/ann/pkg/algosP.yaml +67 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/__init__.py +2 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/__init__.py +0 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/annoy.py +26 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/balltree.py +22 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/base.py +36 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/bruteforce.py +110 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/ckdtree.py +17 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/datasketch.py +29 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/definitions.py +187 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/diskann.py +190 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/dolphinnpy.py +31 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/dummy_algo.py +25 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/elasticsearch.py +107 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/elastiknn.py +124 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/faiss.py +124 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/faiss_gpu.py +61 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/faiss_hnsw.py +39 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/flann.py +27 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/hnswlib.py +36 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/kdtree.py +22 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/kgraph.py +39 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/lshf.py +25 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/milvus.py +99 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/mrpt.py +41 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/n2.py +28 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/nearpy.py +48 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/nmslib.py +74 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/onng_ngt.py +100 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/opensearchknn.py +107 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/panng_ngt.py +79 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/pinecone.py +39 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/puffinn.py +45 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/pynndescent.py +115 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/qg_ngt.py +102 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/redisearch.py +90 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/rpforest.py +20 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/scann.py +34 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/sptag.py +28 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/subprocess.py +246 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/vald.py +149 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/vecsim-hnsw.py +43 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/algorithms/vespa.py +47 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/constants.py +1 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/data.py +48 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/datasets.py +620 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/distance.py +53 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/main.py +325 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/plotting/__init__.py +2 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/plotting/metrics.py +183 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/plotting/plot_variants.py +17 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/plotting/utils.py +165 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/results.py +71 -0
- redisbench_admin/run/ann/pkg/ann_benchmarks/runner.py +333 -0
- redisbench_admin/run/ann/pkg/create_dataset.py +12 -0
- redisbench_admin/run/ann/pkg/create_hybrid_dataset.py +147 -0
- redisbench_admin/run/ann/pkg/create_text_to_image_ds.py +117 -0
- redisbench_admin/run/ann/pkg/create_website.py +272 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile +11 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.annoy +5 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.datasketch +4 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.diskann +29 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.diskann_pq +31 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.dolphinn +5 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.elasticsearch +45 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.elastiknn +61 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.faiss +18 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.flann +10 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.hnswlib +10 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.kgraph +6 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.mih +4 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.milvus +27 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.mrpt +4 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.n2 +5 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.nearpy +5 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.ngt +13 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.nmslib +10 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.opensearchknn +43 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.puffinn +6 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.pynndescent +4 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.redisearch +18 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.rpforest +5 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.scann +5 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.scipy +4 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.sklearn +4 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.sptag +30 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.vald +8 -0
- redisbench_admin/run/ann/pkg/install/Dockerfile.vespa +17 -0
- redisbench_admin/run/ann/pkg/install.py +70 -0
- redisbench_admin/run/ann/pkg/logging.conf +34 -0
- redisbench_admin/run/ann/pkg/multirun.py +298 -0
- redisbench_admin/run/ann/pkg/plot.py +159 -0
- redisbench_admin/run/ann/pkg/protocol/bf-runner +10 -0
- redisbench_admin/run/ann/pkg/protocol/bf-runner.py +204 -0
- redisbench_admin/run/ann/pkg/protocol/ext-add-query-metric.md +51 -0
- redisbench_admin/run/ann/pkg/protocol/ext-batch-queries.md +77 -0
- redisbench_admin/run/ann/pkg/protocol/ext-prepared-queries.md +77 -0
- redisbench_admin/run/ann/pkg/protocol/ext-query-parameters.md +47 -0
- redisbench_admin/run/ann/pkg/protocol/specification.md +194 -0
- redisbench_admin/run/ann/pkg/requirements.txt +14 -0
- redisbench_admin/run/ann/pkg/requirements_py38.txt +11 -0
- redisbench_admin/run/ann/pkg/results/fashion-mnist-784-euclidean.png +0 -0
- redisbench_admin/run/ann/pkg/results/gist-960-euclidean.png +0 -0
- redisbench_admin/run/ann/pkg/results/glove-100-angular.png +0 -0
- redisbench_admin/run/ann/pkg/results/glove-25-angular.png +0 -0
- redisbench_admin/run/ann/pkg/results/lastfm-64-dot.png +0 -0
- redisbench_admin/run/ann/pkg/results/mnist-784-euclidean.png +0 -0
- redisbench_admin/run/ann/pkg/results/nytimes-256-angular.png +0 -0
- redisbench_admin/run/ann/pkg/results/sift-128-euclidean.png +0 -0
- redisbench_admin/run/ann/pkg/run.py +12 -0
- redisbench_admin/run/ann/pkg/run_algorithm.py +3 -0
- redisbench_admin/run/ann/pkg/templates/chartjs.template +102 -0
- redisbench_admin/run/ann/pkg/templates/detail_page.html +23 -0
- redisbench_admin/run/ann/pkg/templates/general.html +58 -0
- redisbench_admin/run/ann/pkg/templates/latex.template +30 -0
- redisbench_admin/run/ann/pkg/templates/summary.html +60 -0
- redisbench_admin/run/ann/pkg/test/__init__.py +0 -0
- redisbench_admin/run/ann/pkg/test/test-jaccard.py +19 -0
- redisbench_admin/run/ann/pkg/test/test-metrics.py +99 -0
- redisbench_admin/run/common.py +6 -24
- redisbench_admin/run/run.py +7 -3
- redisbench_admin/run_async/async_terraform.py +2 -10
- redisbench_admin/run_async/render_files.py +3 -3
- redisbench_admin/run_local/run_local.py +12 -12
- redisbench_admin/run_remote/run_remote.py +17 -15
- redisbench_admin/run_remote/standalone.py +5 -1
- redisbench_admin/run_remote/terraform.py +1 -5
- redisbench_admin/utils/remote.py +10 -9
- {redisbench_admin-0.11.1.dist-info → redisbench_admin-0.11.3.dist-info}/METADATA +3 -2
- redisbench_admin-0.11.3.dist-info/RECORD +242 -0
- redisbench_admin-0.11.1.dist-info/RECORD +0 -116
- {redisbench_admin-0.11.1.dist-info → redisbench_admin-0.11.3.dist-info}/LICENSE +0 -0
- {redisbench_admin-0.11.1.dist-info → redisbench_admin-0.11.3.dist-info}/WHEEL +0 -0
- {redisbench_admin-0.11.1.dist-info → redisbench_admin-0.11.3.dist-info}/entry_points.txt +0 -0
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import pathlib
|
|
3
|
+
|
|
4
|
+
from ann_benchmarks.main import main
|
|
5
|
+
from multiprocessing import freeze_support
|
|
6
|
+
|
|
7
|
+
if __name__ == "__main__":
|
|
8
|
+
workdir = pathlib.Path(__file__).parent.absolute()
|
|
9
|
+
print("Changing the workdir to {}".format(workdir))
|
|
10
|
+
os.chdir(workdir)
|
|
11
|
+
freeze_support()
|
|
12
|
+
main()
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
<h3>{{xlabel}}/{{ylabel}}</h3>
|
|
2
|
+
<div id="{{ xlabel }}{{ ylabel }}{{ label }}">
|
|
3
|
+
<canvas id="chart{{ xlabel }}{{ ylabel }}{{ label }}" width="800" height="600"></canvas>
|
|
4
|
+
<script>
|
|
5
|
+
var ctx = document.getElementById("chart{{ xlabel }}{{ ylabel }}{{ label }}");
|
|
6
|
+
var chart = new Chart(ctx, {
|
|
7
|
+
{% if not render_all_points %}
|
|
8
|
+
type: "line",
|
|
9
|
+
{% else %}
|
|
10
|
+
type: "bubble",
|
|
11
|
+
{% endif %}
|
|
12
|
+
data: { datasets: [
|
|
13
|
+
{% for run in data_points %}
|
|
14
|
+
{
|
|
15
|
+
label: "{{ run["name"] }}",
|
|
16
|
+
fill: false,
|
|
17
|
+
pointStyle: "{{ linestyle[run["name"]][3] }}",
|
|
18
|
+
borderColor: "{{ linestyle[run["name"]][0] }}",
|
|
19
|
+
data: [
|
|
20
|
+
{% for (x, y), l in zip(run["coords"], run["labels"]) %}
|
|
21
|
+
{ x: {{ x }} , y: {{ y }}, label: "{{ l }}" },
|
|
22
|
+
{% endfor %}
|
|
23
|
+
]
|
|
24
|
+
},
|
|
25
|
+
{% endfor %}
|
|
26
|
+
]},
|
|
27
|
+
options: {
|
|
28
|
+
responsive: false,
|
|
29
|
+
title:{
|
|
30
|
+
display:true,
|
|
31
|
+
text: '{{ plot_label }}'
|
|
32
|
+
},
|
|
33
|
+
scales: {
|
|
34
|
+
xAxes: [{
|
|
35
|
+
display: true,
|
|
36
|
+
type: 'linear',
|
|
37
|
+
max: '1',
|
|
38
|
+
position: 'bottom',
|
|
39
|
+
scaleLabel: {
|
|
40
|
+
display: true,
|
|
41
|
+
labelString: ' {{ xlabel }} '
|
|
42
|
+
}
|
|
43
|
+
}],
|
|
44
|
+
yAxes: [{
|
|
45
|
+
display: true,
|
|
46
|
+
type: 'logarithmic',
|
|
47
|
+
scaleLabel: {
|
|
48
|
+
display: true,
|
|
49
|
+
labelString: ' {{ ylabel }} '
|
|
50
|
+
}
|
|
51
|
+
}]
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
});
|
|
55
|
+
function pushOrConcat(base, toPush) {
|
|
56
|
+
if (toPush) {
|
|
57
|
+
if (Chart.helpers.isArray(toPush)) {
|
|
58
|
+
// base = base.concat(toPush);
|
|
59
|
+
Array.prototype.push.apply(base, toPush);
|
|
60
|
+
} else {
|
|
61
|
+
base.push(toPush);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return base;
|
|
66
|
+
}
|
|
67
|
+
Chart.Tooltip.prototype.getFooter = function(tooltipItem, data) {
|
|
68
|
+
var me = this;
|
|
69
|
+
var callbacks = me._options.callbacks;
|
|
70
|
+
var item = tooltipItem[0];
|
|
71
|
+
|
|
72
|
+
var beforeFooter = callbacks.beforeFooter.apply(me, arguments);
|
|
73
|
+
var footer = "Parameters: " + data.datasets[item.datasetIndex].data[item.index].label || '';
|
|
74
|
+
var afterFooter = callbacks.afterFooter.apply(me, arguments);
|
|
75
|
+
|
|
76
|
+
var lines = [];
|
|
77
|
+
lines = pushOrConcat(lines, beforeFooter);
|
|
78
|
+
lines = pushOrConcat(lines, footer);
|
|
79
|
+
lines = pushOrConcat(lines, afterFooter);
|
|
80
|
+
|
|
81
|
+
return lines;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
</script>
|
|
85
|
+
</div>
|
|
86
|
+
{% if args.latex %}
|
|
87
|
+
<div class="row">
|
|
88
|
+
<div class="col-md-4 text-center">
|
|
89
|
+
<button type="button" id="button_{{button_label}}" class="btn btn-default" >Toggle latex code</button>
|
|
90
|
+
</div>
|
|
91
|
+
</div>
|
|
92
|
+
<script>
|
|
93
|
+
$("#button_{{button_label}}").click(function() {
|
|
94
|
+
$("#plot_{{button_label}}").toggle();
|
|
95
|
+
});
|
|
96
|
+
</script>
|
|
97
|
+
<div id="plot_{{button_label}}" style="display:none">
|
|
98
|
+
<pre>
|
|
99
|
+
{{latex_code}}
|
|
100
|
+
</pre>
|
|
101
|
+
</div>
|
|
102
|
+
{% endif %}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
{% extends "general.html" %}
|
|
2
|
+
{% block content %}
|
|
3
|
+
<div class="container">
|
|
4
|
+
{% for item in plot_data.keys() %}
|
|
5
|
+
{% if item=="normal" %}
|
|
6
|
+
{% if batch %}
|
|
7
|
+
<h2>Plots for {{title}} in batch mode</h2>
|
|
8
|
+
{% else %}
|
|
9
|
+
<h2>Plots for {{title}}</h2>
|
|
10
|
+
{% endif %}
|
|
11
|
+
{% elif item=="scatter" and args.scatter %}
|
|
12
|
+
{% if batch %}
|
|
13
|
+
<h2>Scatterplots for {{title}} in batch mode</h2>
|
|
14
|
+
{% else %}
|
|
15
|
+
<h2>Scatterplots for {{title}}</h2>
|
|
16
|
+
{% endif %}
|
|
17
|
+
{% endif %}
|
|
18
|
+
{% for plot in plot_data[item] %}
|
|
19
|
+
{{ plot }}
|
|
20
|
+
{% endfor %}
|
|
21
|
+
<hr />
|
|
22
|
+
{% endfor %}
|
|
23
|
+
{% endblock %}
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
<!DOCTYPE html>
|
|
2
|
+
<html lang="en">
|
|
3
|
+
<head>
|
|
4
|
+
<meta charset="utf-8">
|
|
5
|
+
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
|
6
|
+
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
7
|
+
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
|
|
8
|
+
<title>{{ title }}</title>
|
|
9
|
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.5.0/Chart.js"></script>
|
|
10
|
+
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.12.4/jquery.min.js"></script>
|
|
11
|
+
<!-- Include all compiled plugins (below), or include individual files as needed -->
|
|
12
|
+
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
|
|
13
|
+
<!-- Bootstrap -->
|
|
14
|
+
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
|
|
15
|
+
<style>
|
|
16
|
+
body { padding-top: 50px; }
|
|
17
|
+
</style>
|
|
18
|
+
<!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries -->
|
|
19
|
+
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
|
|
20
|
+
<!--[if lt IE 9]>
|
|
21
|
+
<script src="https://oss.maxcdn.com/html5shiv/3.7.3/html5shiv.min.js"></script>
|
|
22
|
+
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
|
|
23
|
+
<![endif]-->
|
|
24
|
+
</head>
|
|
25
|
+
<body>
|
|
26
|
+
|
|
27
|
+
<nav class="navbar navbar-inverse navbar-fixed-top">
|
|
28
|
+
<div class="container">
|
|
29
|
+
<div class="navbar-header">
|
|
30
|
+
<a class="navbar-brand" href="index.html">ANN Benchmarks</a>
|
|
31
|
+
</div>
|
|
32
|
+
<div id="navbar" class="collapse navbar-collapse">
|
|
33
|
+
<ul class="nav navbar-nav">
|
|
34
|
+
<li class="active"><a href="index.html">Home</a></li>
|
|
35
|
+
</ul>
|
|
36
|
+
<ul class="nav navbar-nav">
|
|
37
|
+
<li class="active"><a href="index.html#datasets">Datasets</a></li>
|
|
38
|
+
</ul>
|
|
39
|
+
<ul class="nav navbar-nav">
|
|
40
|
+
<li class="active"><a href="index.html#algorithms">Algorithms</a></li>
|
|
41
|
+
</ul>
|
|
42
|
+
<ul class="nav navbar-nav">
|
|
43
|
+
<li class="active"><a href="index.html#contact">Contact</a></li>
|
|
44
|
+
</ul>
|
|
45
|
+
</div><!--/.nav-collapse -->
|
|
46
|
+
</div>
|
|
47
|
+
</nav>
|
|
48
|
+
|
|
49
|
+
{% block content %} {% endblock %}
|
|
50
|
+
|
|
51
|
+
<div id="contact">
|
|
52
|
+
<h2>Contact</h2>
|
|
53
|
+
<p>ANN-Benchmarks has been developed by Martin Aumueller (maau@itu.dk), Erik Bernhardsson (mail@erikbern.com), and Alec Faitfull (alef@itu.dk). Please use
|
|
54
|
+
<a href="https://github.com/erikbern/ann-benchmarks/">Github</a> to submit your implementation or improvements.</p>
|
|
55
|
+
</div>
|
|
56
|
+
</div>
|
|
57
|
+
</body>
|
|
58
|
+
</html>
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
|
|
2
|
+
\begin{figure}
|
|
3
|
+
\centering
|
|
4
|
+
\begin{tikzpicture}
|
|
5
|
+
\begin{axis}[
|
|
6
|
+
xlabel={ {{xlabel}} },
|
|
7
|
+
ylabel={ {{ylabel}} },
|
|
8
|
+
ymode = log,
|
|
9
|
+
yticklabel style={/pgf/number format/fixed,
|
|
10
|
+
/pgf/number format/precision=3},
|
|
11
|
+
legend style = { anchor=west},
|
|
12
|
+
cycle list name = black white
|
|
13
|
+
]
|
|
14
|
+
{% for algo in plot_data %}
|
|
15
|
+
{% if algo.scatter %}
|
|
16
|
+
\addplot [only marks] coordinates {
|
|
17
|
+
{% else %}
|
|
18
|
+
\addplot coordinates {
|
|
19
|
+
{% endif %}
|
|
20
|
+
{% for coord in algo.coords %}
|
|
21
|
+
({{ coord[0]}}, {{ coord[1] }})
|
|
22
|
+
{% endfor %}
|
|
23
|
+
};
|
|
24
|
+
\addlegendentry{ {{algo.name}} };
|
|
25
|
+
{% endfor %}
|
|
26
|
+
\end{axis}
|
|
27
|
+
\end{tikzpicture}
|
|
28
|
+
\caption{ {{caption}} }
|
|
29
|
+
\label{}
|
|
30
|
+
\end{figure}
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
{% extends "general.html" %}
|
|
2
|
+
{% block content %}
|
|
3
|
+
<div class="container">
|
|
4
|
+
<h1>Info</h1>
|
|
5
|
+
<p>ANN-Benchmarks is a benchmarking environment for approximate nearest neighbor algorithms search. This website contains the current benchmarking results. Please visit <a href="http://github.com/erikbern/ann-benchmarks/">http://github.com/erikbern/ann-benchmarks/</a> to get an overview over evaluated data sets and algorithms. Make a pull request on <a href="http://github.com/erikbern/ann-benchmarks/">Github</a> to add your own code or improvements to the
|
|
6
|
+
benchmarking system.
|
|
7
|
+
</p>
|
|
8
|
+
<div id="results">
|
|
9
|
+
<h1>Benchmarking Results</h1>
|
|
10
|
+
<p>Results are split by distance measure and dataset. In the bottom, you can find an overview of an algorithm's performance on all datasets. Each dataset is annoted
|
|
11
|
+
by <em>(k = ...)</em>, the number of nearest neighbors an algorithm was supposed to return. The plot shown depicts <em>Recall</em> (the fraction
|
|
12
|
+
of true nearest neighbors found, on average over all queries) against <em>Queries per second</em>. Clicking on a plot reveils detailled interactive plots, including
|
|
13
|
+
approximate recall, index size, and build time.</p>
|
|
14
|
+
{% for type in ['non-batch', 'batch'] %}
|
|
15
|
+
{% if len(dataset_with_distances[type]) > 0 %}
|
|
16
|
+
{% if type == 'batch' %}
|
|
17
|
+
<h2>Benchmarks for Batched Queries</h2>
|
|
18
|
+
{% else %}
|
|
19
|
+
<h2>Benchmarks for Single Queries</h2>
|
|
20
|
+
{% endif %}
|
|
21
|
+
|
|
22
|
+
<h2 id ="datasets">Results by Dataset</h2>
|
|
23
|
+
{% for distance_data in dataset_with_distances[type] %}
|
|
24
|
+
<h3>Distance: {{ distance_data.name }} </h3>
|
|
25
|
+
{% for entry in distance_data.entries %}
|
|
26
|
+
<a href="./{{ entry.name }}.html">
|
|
27
|
+
<div class="row" id="{{entry.name}}">
|
|
28
|
+
<div class = "col-md-4 bg-success">
|
|
29
|
+
<h4>{{entry.desc}}</h4>
|
|
30
|
+
</div>
|
|
31
|
+
<div class = "col-md-8">
|
|
32
|
+
<img class = "img-responsive" src="{{ entry.name }}.png" />
|
|
33
|
+
</div>
|
|
34
|
+
</div>
|
|
35
|
+
</a>
|
|
36
|
+
<hr />
|
|
37
|
+
{% endfor %}
|
|
38
|
+
{% endfor %}
|
|
39
|
+
<h2 id="algorithms">Results by Algorithm</h2>
|
|
40
|
+
<ul class="list-inline"><b>Algorithms:</b>
|
|
41
|
+
{% for algo in algorithms[type].keys() %}
|
|
42
|
+
<li><a href="#{{algo}}">{{algo}}</a></li>
|
|
43
|
+
{% endfor %}
|
|
44
|
+
</ul>
|
|
45
|
+
{% for algo in algorithms[type].keys()%}
|
|
46
|
+
<a href="./{{ algo }}.html">
|
|
47
|
+
<div class="row" id="{{algo}}">
|
|
48
|
+
<div class = "col-md-4 bg-success">
|
|
49
|
+
<h4>{{algo}}</h4>
|
|
50
|
+
</div>
|
|
51
|
+
<div class = "col-md-8">
|
|
52
|
+
<img class = "img-responsive" src="{{ algo }}.png" />
|
|
53
|
+
</div>
|
|
54
|
+
</div>
|
|
55
|
+
</a>
|
|
56
|
+
<hr />
|
|
57
|
+
{% endfor %}
|
|
58
|
+
{% endif %}
|
|
59
|
+
{% endfor %}
|
|
60
|
+
{% endblock %}
|
|
File without changes
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import unittest
|
|
2
|
+
import numpy
|
|
3
|
+
from ann_benchmarks.distance import jaccard
|
|
4
|
+
|
|
5
|
+
class TestJaccard(unittest.TestCase):
|
|
6
|
+
def setUp(self):
|
|
7
|
+
pass
|
|
8
|
+
|
|
9
|
+
def test_similarity(self):
|
|
10
|
+
a = [1, 2, 3, 4]
|
|
11
|
+
b = []
|
|
12
|
+
c = [1, 2]
|
|
13
|
+
d = [5, 6]
|
|
14
|
+
|
|
15
|
+
self.assertAlmostEqual(jaccard(a, b), 0.0)
|
|
16
|
+
self.assertAlmostEqual(jaccard(a, a), 1.0)
|
|
17
|
+
self.assertAlmostEqual(jaccard(a, c), 0.5)
|
|
18
|
+
self.assertAlmostEqual(jaccard(c, d), 0.0)
|
|
19
|
+
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import unittest
|
|
2
|
+
from ann_benchmarks.plotting.metrics import (
|
|
3
|
+
knn, queries_per_second, index_size, build_time, candidates,
|
|
4
|
+
epsilon, rel)
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class DummyMetric():
|
|
8
|
+
|
|
9
|
+
def __init__(self):
|
|
10
|
+
self.attrs = {}
|
|
11
|
+
self.d = {}
|
|
12
|
+
|
|
13
|
+
def __getitem__(self, key):
|
|
14
|
+
return self.d.get(key, None)
|
|
15
|
+
|
|
16
|
+
def __setitem__(self, key, value):
|
|
17
|
+
self.d[key] = value
|
|
18
|
+
|
|
19
|
+
def __contains__(self, key):
|
|
20
|
+
return key in self.d
|
|
21
|
+
|
|
22
|
+
def create_group(self, name):
|
|
23
|
+
self.d[name] = DummyMetric()
|
|
24
|
+
return self.d[name]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class TestMetrics(unittest.TestCase):
|
|
28
|
+
|
|
29
|
+
def setUp(self):
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
def test_recall(self):
|
|
33
|
+
exact_queries = [[0.1, 0.25]]
|
|
34
|
+
run1 = [[]]
|
|
35
|
+
run2 = [[0.2, 0.3]]
|
|
36
|
+
run3 = [[0.2]]
|
|
37
|
+
run4 = [[0.2, 0.25]]
|
|
38
|
+
|
|
39
|
+
self.assertAlmostEqual(
|
|
40
|
+
knn(exact_queries, run1, 2, DummyMetric()).attrs['mean'], 0.0)
|
|
41
|
+
self.assertAlmostEqual(
|
|
42
|
+
knn(exact_queries, run2, 2, DummyMetric()).attrs['mean'], 0.5)
|
|
43
|
+
self.assertAlmostEqual(
|
|
44
|
+
knn(exact_queries, run3, 2, DummyMetric()).attrs['mean'], 0.5)
|
|
45
|
+
self.assertAlmostEqual(
|
|
46
|
+
knn(exact_queries, run4, 2, DummyMetric()).attrs['mean'], 1.0)
|
|
47
|
+
|
|
48
|
+
def test_epsilon_recall(self):
|
|
49
|
+
exact_queries = [[0.05, 0.08, 0.24, 0.3]]
|
|
50
|
+
run1 = [[]]
|
|
51
|
+
run2 = [[0.1, 0.2, 0.55, 0.7]]
|
|
52
|
+
|
|
53
|
+
self.assertAlmostEqual(
|
|
54
|
+
epsilon(exact_queries, run1, 4, DummyMetric(), 1).attrs['mean'],
|
|
55
|
+
0.0)
|
|
56
|
+
|
|
57
|
+
self.assertAlmostEqual(
|
|
58
|
+
epsilon(exact_queries, run2, 4,
|
|
59
|
+
DummyMetric(), 0.0001).attrs['mean'],
|
|
60
|
+
0.5)
|
|
61
|
+
# distance can be off by factor (1 + 1) * 0.3 = 0.6 => recall .75
|
|
62
|
+
self.assertAlmostEqual(
|
|
63
|
+
epsilon(exact_queries, run2, 4, DummyMetric(), 1).attrs['mean'],
|
|
64
|
+
0.75)
|
|
65
|
+
# distance can be off by factor (1 + 2) * 0.3 = 0.9 => recall 1
|
|
66
|
+
self.assertAlmostEqual(
|
|
67
|
+
epsilon(exact_queries, run2, 4, DummyMetric(), 2).attrs['mean'],
|
|
68
|
+
1.0)
|
|
69
|
+
|
|
70
|
+
def test_relative(self):
|
|
71
|
+
exact_queries = [[0.1, 0.2, 0.25, 0.3]]
|
|
72
|
+
run1 = []
|
|
73
|
+
run2 = [[0.1, 0.2, 0.25, 0.3]]
|
|
74
|
+
run3 = [[0.1, 0.2, 0.55, 0.9]]
|
|
75
|
+
|
|
76
|
+
self.assertAlmostEqual(
|
|
77
|
+
rel(exact_queries, run1, DummyMetric()), float("inf"))
|
|
78
|
+
self.assertAlmostEqual(rel(exact_queries, run2, DummyMetric()), 1)
|
|
79
|
+
# total distance exact: 0.85, total distance run3: 1.75
|
|
80
|
+
self.assertAlmostEqual(rel(exact_queries, run3, DummyMetric()),
|
|
81
|
+
1.75 / 0.85)
|
|
82
|
+
|
|
83
|
+
def test_queries_per_second(self):
|
|
84
|
+
self.assertAlmostEqual(
|
|
85
|
+
queries_per_second([], {"best_search_time": 0.01}),
|
|
86
|
+
100)
|
|
87
|
+
|
|
88
|
+
def test_index_size(self):
|
|
89
|
+
self.assertEqual(index_size([], {"index_size": 100}), 100)
|
|
90
|
+
|
|
91
|
+
def test_build_time(self):
|
|
92
|
+
self.assertEqual(build_time([], {"build_time": 100}), 100)
|
|
93
|
+
|
|
94
|
+
def test_candidates(self):
|
|
95
|
+
self.assertEqual(candidates([], {"candidates": 10}), 10)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
if __name__ == '__main__':
|
|
99
|
+
unittest.main()
|
redisbench_admin/run/common.py
CHANGED
|
@@ -206,10 +206,7 @@ def prepare_benchmark_parameters_specif_tooling(
|
|
|
206
206
|
if isremote is True:
|
|
207
207
|
benchmark_tool = "/tmp/{}".format(benchmark_tool)
|
|
208
208
|
input_data_file = "/tmp/input.data"
|
|
209
|
-
(
|
|
210
|
-
command_arr,
|
|
211
|
-
command_str,
|
|
212
|
-
) = prepare_tsbs_benchmark_command(
|
|
209
|
+
(command_arr, command_str,) = prepare_tsbs_benchmark_command(
|
|
213
210
|
benchmark_tool,
|
|
214
211
|
server_private_ip,
|
|
215
212
|
server_plaintext_port,
|
|
@@ -221,10 +218,7 @@ def prepare_benchmark_parameters_specif_tooling(
|
|
|
221
218
|
cluster_api_enabled,
|
|
222
219
|
)
|
|
223
220
|
if "memtier_benchmark" in benchmark_tool:
|
|
224
|
-
(
|
|
225
|
-
command_arr,
|
|
226
|
-
command_str,
|
|
227
|
-
) = prepare_memtier_benchmark_command(
|
|
221
|
+
(command_arr, command_str,) = prepare_memtier_benchmark_command(
|
|
228
222
|
benchmark_tool,
|
|
229
223
|
server_private_ip,
|
|
230
224
|
server_plaintext_port,
|
|
@@ -242,10 +236,7 @@ def prepare_benchmark_parameters_specif_tooling(
|
|
|
242
236
|
ann_path = stdout[0].strip() + "/run/ann/pkg/multirun.py"
|
|
243
237
|
logging.info("Remote ann-benchmark path: {}".format(ann_path))
|
|
244
238
|
|
|
245
|
-
(
|
|
246
|
-
command_arr,
|
|
247
|
-
command_str,
|
|
248
|
-
) = prepare_ann_benchmark_command(
|
|
239
|
+
(command_arr, command_str,) = prepare_ann_benchmark_command(
|
|
249
240
|
server_private_ip,
|
|
250
241
|
server_plaintext_port,
|
|
251
242
|
cluster_api_enabled,
|
|
@@ -259,10 +250,7 @@ def prepare_benchmark_parameters_specif_tooling(
|
|
|
259
250
|
if isremote is True:
|
|
260
251
|
benchmark_tool = "/tmp/{}".format(benchmark_tool)
|
|
261
252
|
input_data_file = "/tmp/input.data"
|
|
262
|
-
(
|
|
263
|
-
command_arr,
|
|
264
|
-
command_str,
|
|
265
|
-
) = prepare_ftsb_benchmark_command(
|
|
253
|
+
(command_arr, command_str,) = prepare_ftsb_benchmark_command(
|
|
266
254
|
benchmark_tool,
|
|
267
255
|
server_private_ip,
|
|
268
256
|
server_plaintext_port,
|
|
@@ -279,10 +267,7 @@ def prepare_benchmark_parameters_specif_tooling(
|
|
|
279
267
|
if isremote is True:
|
|
280
268
|
benchmark_tool = "/tmp/{}".format(benchmark_tool)
|
|
281
269
|
input_data_file = "/tmp/input.data"
|
|
282
|
-
(
|
|
283
|
-
command_arr,
|
|
284
|
-
command_str,
|
|
285
|
-
) = prepare_aibench_benchmark_command(
|
|
270
|
+
(command_arr, command_str,) = prepare_aibench_benchmark_command(
|
|
286
271
|
benchmark_tool,
|
|
287
272
|
server_private_ip,
|
|
288
273
|
server_plaintext_port,
|
|
@@ -778,10 +763,7 @@ def print_results_table_stdout(
|
|
|
778
763
|
metric_names=[],
|
|
779
764
|
):
|
|
780
765
|
# check which metrics to extract
|
|
781
|
-
(
|
|
782
|
-
_,
|
|
783
|
-
metrics,
|
|
784
|
-
) = merge_default_and_config_metrics(
|
|
766
|
+
(_, metrics,) = merge_default_and_config_metrics(
|
|
785
767
|
benchmark_config,
|
|
786
768
|
default_metrics,
|
|
787
769
|
None,
|
redisbench_admin/run/run.py
CHANGED
|
@@ -57,9 +57,13 @@ def define_benchmark_plan(benchmark_definitions, default_specs):
|
|
|
57
57
|
benchmark_runs_plan[benchmark_type] = {}
|
|
58
58
|
|
|
59
59
|
# extract dataset-name
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
60
|
+
(
|
|
61
|
+
benchmark_contains_dbconfig,
|
|
62
|
+
dataset_name,
|
|
63
|
+
_,
|
|
64
|
+
_,
|
|
65
|
+
_,
|
|
66
|
+
) = extract_redis_dbconfig_parameters(benchmark_config, "dbconfig")
|
|
63
67
|
logging.info(
|
|
64
68
|
f"Benchmark contains specific dbconfig on test {test_name}: {benchmark_contains_dbconfig}"
|
|
65
69
|
)
|
|
@@ -114,11 +114,7 @@ class TerraformClass:
|
|
|
114
114
|
def async_runner_setup(
|
|
115
115
|
self,
|
|
116
116
|
):
|
|
117
|
-
(
|
|
118
|
-
remote_setup,
|
|
119
|
-
deployment_type,
|
|
120
|
-
remote_id,
|
|
121
|
-
) = fetch_remote_setup_from_config(
|
|
117
|
+
(remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
|
|
122
118
|
[{"type": "async", "setup": "runner"}],
|
|
123
119
|
"https://github.com/RedisLabsModules/testing-infrastructure.git",
|
|
124
120
|
"master",
|
|
@@ -233,11 +229,7 @@ def terraform_spin_or_reuse_env(
|
|
|
233
229
|
tf_override_name=None,
|
|
234
230
|
tf_folder_path=None,
|
|
235
231
|
):
|
|
236
|
-
(
|
|
237
|
-
remote_setup,
|
|
238
|
-
deployment_type,
|
|
239
|
-
remote_id,
|
|
240
|
-
) = fetch_remote_setup_from_config(
|
|
232
|
+
(remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
|
|
241
233
|
benchmark_config["remote"],
|
|
242
234
|
"https://github.com/RedisLabsModules/testing-infrastructure.git",
|
|
243
235
|
"master",
|
|
@@ -28,9 +28,9 @@ WantedBy=multi-user.target
|
|
|
28
28
|
argv.append("--private_key")
|
|
29
29
|
argv.append("/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem")
|
|
30
30
|
else:
|
|
31
|
-
argv[
|
|
32
|
-
|
|
33
|
-
|
|
31
|
+
argv[
|
|
32
|
+
argv.index(args.private_key)
|
|
33
|
+
] = "/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem"
|
|
34
34
|
if len(args.module_path) != 0:
|
|
35
35
|
argv[argv.index(args.module_path[0])] = (
|
|
36
36
|
"/home/ubuntu/work_dir/tests/benchmarks/"
|
|
@@ -680,17 +680,17 @@ def commandstats_latencystats_process_name(
|
|
|
680
680
|
branch = variant_labels_dict["branch"]
|
|
681
681
|
|
|
682
682
|
if version is not None:
|
|
683
|
-
variant_labels_dict[
|
|
684
|
-
"
|
|
685
|
-
)
|
|
686
|
-
variant_labels_dict[
|
|
687
|
-
"
|
|
688
|
-
)
|
|
683
|
+
variant_labels_dict[
|
|
684
|
+
"command_and_metric_and_version"
|
|
685
|
+
] = "{} - {} - {}".format(command, metric, version)
|
|
686
|
+
variant_labels_dict[
|
|
687
|
+
"command_and_metric_and_setup_and_version"
|
|
688
|
+
] = "{} - {} - {} - {}".format(command, metric, setup_name, version)
|
|
689
689
|
|
|
690
690
|
if branch is not None:
|
|
691
|
-
variant_labels_dict[
|
|
692
|
-
"
|
|
693
|
-
)
|
|
694
|
-
variant_labels_dict[
|
|
695
|
-
"
|
|
696
|
-
)
|
|
691
|
+
variant_labels_dict[
|
|
692
|
+
"command_and_metric_and_branch"
|
|
693
|
+
] = "{} - {} - {}".format(command, metric, branch)
|
|
694
|
+
variant_labels_dict[
|
|
695
|
+
"command_and_metric_and_setup_and_branch"
|
|
696
|
+
] = "{} - {} - {} - {}".format(command, metric, setup_name, branch)
|
|
@@ -346,7 +346,10 @@ def run_remote_command_logic(args, project_name, project_version):
|
|
|
346
346
|
|
|
347
347
|
# map from setup name to overall target-tables ( if any target is defined )
|
|
348
348
|
overall_tables[setup_name] = {}
|
|
349
|
+
total_benchmarks = len(benchmarks_map.keys())
|
|
350
|
+
import tqdm
|
|
349
351
|
|
|
352
|
+
pbar = tqdm.tqdm(total=total_benchmarks, unit="benchmarks")
|
|
350
353
|
for test_name, benchmark_config in benchmarks_map.items():
|
|
351
354
|
if return_code != 0 and args.fail_fast:
|
|
352
355
|
logging.warning(
|
|
@@ -371,9 +374,7 @@ def run_remote_command_logic(args, project_name, project_version):
|
|
|
371
374
|
continue
|
|
372
375
|
remote_perf = None
|
|
373
376
|
logging.info(
|
|
374
|
-
"Repetition {} of {}. Running test {}"
|
|
375
|
-
repetition, BENCHMARK_REPETITIONS, test_name
|
|
376
|
-
)
|
|
377
|
+
f"Repetition {repetition} of {BENCHMARK_REPETITIONS}. Running test {test_name}. Total benchmarks {total_benchmarks}"
|
|
377
378
|
)
|
|
378
379
|
(
|
|
379
380
|
setup_name,
|
|
@@ -1092,6 +1093,7 @@ def run_remote_command_logic(args, project_name, project_version):
|
|
|
1092
1093
|
f"Test {test_name} does not have remote config. Skipping test."
|
|
1093
1094
|
)
|
|
1094
1095
|
|
|
1096
|
+
pbar.update()
|
|
1095
1097
|
if len(benchmark_artifacts_links) > 0:
|
|
1096
1098
|
writer = MarkdownTableWriter(
|
|
1097
1099
|
table_name=benchmark_artifacts_table_name,
|
|
@@ -1374,20 +1376,20 @@ def commandstats_latencystats_process_name(
|
|
|
1374
1376
|
branch = variant_labels_dict["branch"]
|
|
1375
1377
|
|
|
1376
1378
|
if version is not None:
|
|
1377
|
-
variant_labels_dict[
|
|
1378
|
-
"
|
|
1379
|
-
)
|
|
1380
|
-
variant_labels_dict[
|
|
1381
|
-
"
|
|
1382
|
-
)
|
|
1379
|
+
variant_labels_dict[
|
|
1380
|
+
"command_and_metric_and_version"
|
|
1381
|
+
] = "{} - {} - {}".format(command, metric, version)
|
|
1382
|
+
variant_labels_dict[
|
|
1383
|
+
"command_and_metric_and_setup_and_version"
|
|
1384
|
+
] = "{} - {} - {} - {}".format(command, metric, setup_name, version)
|
|
1383
1385
|
|
|
1384
1386
|
if branch is not None:
|
|
1385
|
-
variant_labels_dict[
|
|
1386
|
-
"
|
|
1387
|
-
)
|
|
1388
|
-
variant_labels_dict[
|
|
1389
|
-
"
|
|
1390
|
-
)
|
|
1387
|
+
variant_labels_dict[
|
|
1388
|
+
"command_and_metric_and_branch"
|
|
1389
|
+
] = "{} - {} - {}".format(command, metric, branch)
|
|
1390
|
+
variant_labels_dict[
|
|
1391
|
+
"command_and_metric_and_setup_and_branch"
|
|
1392
|
+
] = "{} - {} - {} - {}".format(command, metric, setup_name, branch)
|
|
1391
1393
|
|
|
1392
1394
|
|
|
1393
1395
|
def shutdown_remote_redis(redis_conns, ssh_tunnel):
|