PostBOUND 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- postbound/__init__.py +211 -0
- postbound/_base.py +6 -0
- postbound/_bench.py +1012 -0
- postbound/_core.py +1153 -0
- postbound/_hints.py +1373 -0
- postbound/_jointree.py +1079 -0
- postbound/_pipelines.py +1121 -0
- postbound/_qep.py +1986 -0
- postbound/_stages.py +876 -0
- postbound/_validation.py +734 -0
- postbound/db/__init__.py +72 -0
- postbound/db/_db.py +2348 -0
- postbound/db/_duckdb.py +785 -0
- postbound/db/mysql.py +1195 -0
- postbound/db/postgres.py +4216 -0
- postbound/experiments/__init__.py +12 -0
- postbound/experiments/analysis.py +674 -0
- postbound/experiments/benchmarking.py +54 -0
- postbound/experiments/ceb.py +877 -0
- postbound/experiments/interactive.py +105 -0
- postbound/experiments/querygen.py +334 -0
- postbound/experiments/workloads.py +980 -0
- postbound/optimizer/__init__.py +92 -0
- postbound/optimizer/__init__.pyi +73 -0
- postbound/optimizer/_cardinalities.py +369 -0
- postbound/optimizer/_joingraph.py +1150 -0
- postbound/optimizer/dynprog.py +1825 -0
- postbound/optimizer/enumeration.py +432 -0
- postbound/optimizer/native.py +539 -0
- postbound/optimizer/noopt.py +54 -0
- postbound/optimizer/presets.py +147 -0
- postbound/optimizer/randomized.py +650 -0
- postbound/optimizer/tonic.py +1479 -0
- postbound/optimizer/ues.py +1607 -0
- postbound/qal/__init__.py +343 -0
- postbound/qal/_qal.py +9678 -0
- postbound/qal/formatter.py +1089 -0
- postbound/qal/parser.py +2344 -0
- postbound/qal/relalg.py +4257 -0
- postbound/qal/transform.py +2184 -0
- postbound/shortcuts.py +70 -0
- postbound/util/__init__.py +46 -0
- postbound/util/_errors.py +33 -0
- postbound/util/collections.py +490 -0
- postbound/util/dataframe.py +71 -0
- postbound/util/dicts.py +330 -0
- postbound/util/jsonize.py +68 -0
- postbound/util/logging.py +106 -0
- postbound/util/misc.py +168 -0
- postbound/util/networkx.py +401 -0
- postbound/util/numbers.py +438 -0
- postbound/util/proc.py +107 -0
- postbound/util/stats.py +37 -0
- postbound/util/system.py +48 -0
- postbound/util/typing.py +35 -0
- postbound/vis/__init__.py +5 -0
- postbound/vis/fdl.py +69 -0
- postbound/vis/graphs.py +48 -0
- postbound/vis/optimizer.py +538 -0
- postbound/vis/plots.py +84 -0
- postbound/vis/tonic.py +70 -0
- postbound/vis/trees.py +105 -0
- postbound-0.19.0.dist-info/METADATA +355 -0
- postbound-0.19.0.dist-info/RECORD +67 -0
- postbound-0.19.0.dist-info/WHEEL +5 -0
- postbound-0.19.0.dist-info/licenses/LICENSE.txt +202 -0
- postbound-0.19.0.dist-info/top_level.txt +1 -0
postbound/vis/plots.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import math
|
|
4
|
+
from collections.abc import Callable
|
|
5
|
+
|
|
6
|
+
import matplotlib.pyplot as plt
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from matplotlib.axis import Axis
|
|
9
|
+
from matplotlib.figure import Figure
|
|
10
|
+
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
|
|
11
|
+
|
|
12
|
+
Plotter = Callable[[str, pd.DataFrame, Axis], None]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def make_grid_plot(
|
|
16
|
+
data: pd.DataFrame,
|
|
17
|
+
*,
|
|
18
|
+
plot_func: Plotter,
|
|
19
|
+
label_col: str = "label",
|
|
20
|
+
ncols: int = 4,
|
|
21
|
+
base_widht: int = 5,
|
|
22
|
+
base_height: int = 3,
|
|
23
|
+
) -> tuple[Figure, Axis]:
|
|
24
|
+
labels = data[label_col].unique()
|
|
25
|
+
nrows = math.ceil(len(labels) / ncols)
|
|
26
|
+
fig, ax = plt.subplots(
|
|
27
|
+
ncols=ncols, nrows=nrows, figsize=(ncols * base_widht, nrows * base_height)
|
|
28
|
+
)
|
|
29
|
+
current_col, current_row = 0, 0
|
|
30
|
+
|
|
31
|
+
for label in labels: # label is accessed using @ syntax
|
|
32
|
+
current_ax = ax[current_row][current_col] if nrows > 1 else ax[current_col]
|
|
33
|
+
current_samples = data.query(f"{label_col} == @label")
|
|
34
|
+
|
|
35
|
+
plot_func(label, current_samples, current_ax)
|
|
36
|
+
|
|
37
|
+
current_col = (current_col + 1) % ncols
|
|
38
|
+
current_row = current_row + 1 if current_col == 0 else current_row
|
|
39
|
+
|
|
40
|
+
extra_rows = range(ncols - len(labels) % ncols) if len(labels) % ncols != 0 else []
|
|
41
|
+
for extra_col in extra_rows:
|
|
42
|
+
ax[current_row][ncols - extra_col - 1].axis("off")
|
|
43
|
+
|
|
44
|
+
return fig, ax
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def make_facetted_grid_plot(
|
|
48
|
+
data: pd.DataFrame,
|
|
49
|
+
*,
|
|
50
|
+
upper_plotter: Plotter,
|
|
51
|
+
lower_plotter: Plotter,
|
|
52
|
+
label_col: str = "label",
|
|
53
|
+
ncols: int = 4,
|
|
54
|
+
base_width: int = 5,
|
|
55
|
+
base_height: int = 3,
|
|
56
|
+
grid_wspace: float = 0.4,
|
|
57
|
+
grid_hspace: float = 0.6,
|
|
58
|
+
) -> Figure:
|
|
59
|
+
labels = data[label_col].unique()
|
|
60
|
+
nrows = math.ceil(len(labels) / ncols)
|
|
61
|
+
fig = plt.figure(
|
|
62
|
+
constrained_layout=True, figsize=(ncols * base_width, nrows * base_height)
|
|
63
|
+
)
|
|
64
|
+
parent_gridspec = GridSpec(
|
|
65
|
+
nrows, ncols, figure=fig, wspace=grid_wspace, hspace=grid_hspace
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
for i, label in enumerate(labels):
|
|
69
|
+
current_gridspec = GridSpecFromSubplotSpec(
|
|
70
|
+
2, 1, subplot_spec=parent_gridspec[i], wspace=0.1, hspace=0.1
|
|
71
|
+
)
|
|
72
|
+
current_samples = data.query(f"{label_col} == @label")
|
|
73
|
+
|
|
74
|
+
upper_ax = plt.Subplot(fig, current_gridspec[0])
|
|
75
|
+
upper_plotter(label, current_samples, upper_ax)
|
|
76
|
+
upper_ax.set_xlabel("")
|
|
77
|
+
plt.setp(upper_ax.get_xticklabels(), visible=False)
|
|
78
|
+
fig.add_subplot(upper_ax)
|
|
79
|
+
|
|
80
|
+
lower_ax = plt.Subplot(fig, current_gridspec[1], sharex=upper_ax)
|
|
81
|
+
lower_plotter(label, current_samples, lower_ax)
|
|
82
|
+
fig.add_subplot(lower_ax)
|
|
83
|
+
|
|
84
|
+
return fig
|
postbound/vis/tonic.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import random
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
import graphviz as gv
|
|
7
|
+
|
|
8
|
+
from ..optimizer import tonic
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _unique_node_identifier(identifier: tonic.QepsIdentifier) -> str:
|
|
12
|
+
return str(hash((identifier, random.random())))
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _render_subquery_path(
|
|
16
|
+
qeps: tonic.QEPsNode, current_node: str, current_graph: gv.Digraph
|
|
17
|
+
) -> None:
|
|
18
|
+
for identifier, qeps_child in qeps.child_nodes.items():
|
|
19
|
+
child_node = _make_node_label(identifier, qeps_child)
|
|
20
|
+
node_identifier = _unique_node_identifier(identifier)
|
|
21
|
+
current_graph.node(node_identifier, label=child_node, style="dashed")
|
|
22
|
+
current_graph.edge(current_node, node_identifier, style="dashed")
|
|
23
|
+
_render_subquery_path(qeps_child, node_identifier, current_graph)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _make_node_label(identifier: tonic.QepsIdentifier, node: tonic.QEPsNode) -> str:
|
|
27
|
+
cost_str = (
|
|
28
|
+
"["
|
|
29
|
+
+ ", ".join(
|
|
30
|
+
f"{operator.value}={cost}" for operator, cost in node.operator_costs.items()
|
|
31
|
+
)
|
|
32
|
+
+ "]"
|
|
33
|
+
if node.operator_costs
|
|
34
|
+
else ""
|
|
35
|
+
)
|
|
36
|
+
label = str(identifier)
|
|
37
|
+
return label + "\n" + cost_str
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def plot_tonic_qeps(
|
|
41
|
+
qeps: tonic.QEPsNode | tonic.QEPSynopsis,
|
|
42
|
+
*,
|
|
43
|
+
_current_node: Optional[str] = None,
|
|
44
|
+
_current_graph: Optional[gv.Digraph] = None,
|
|
45
|
+
) -> gv.Digraph:
|
|
46
|
+
if not _current_graph:
|
|
47
|
+
_current_graph = gv.Digraph()
|
|
48
|
+
|
|
49
|
+
if isinstance(qeps, tonic.QEPSynopsis):
|
|
50
|
+
_current_node = "∅"
|
|
51
|
+
_current_graph.node(_current_node, style="dotted")
|
|
52
|
+
qeps = qeps.root
|
|
53
|
+
|
|
54
|
+
if qeps.subquery_root:
|
|
55
|
+
_render_subquery_path(qeps.subquery_root, _current_node, _current_graph)
|
|
56
|
+
|
|
57
|
+
for identifier, qeps_child in qeps.child_nodes.items():
|
|
58
|
+
child_node = _make_node_label(identifier, qeps_child)
|
|
59
|
+
node_identifier = _unique_node_identifier(identifier)
|
|
60
|
+
if qeps_child.subquery_root:
|
|
61
|
+
_current_graph.node(node_identifier, label=child_node, style="dashed")
|
|
62
|
+
else:
|
|
63
|
+
_current_graph.node(node_identifier, label=child_node)
|
|
64
|
+
if _current_node:
|
|
65
|
+
_current_graph.edge(_current_node, node_identifier)
|
|
66
|
+
plot_tonic_qeps(
|
|
67
|
+
qeps_child, _current_node=node_identifier, _current_graph=_current_graph
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
return _current_graph
|
postbound/vis/trees.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""Provides generic utilities to transform arbitrary graph-like structures into Graphviz objects."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Callable, Sequence
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
import graphviz as gv
|
|
9
|
+
|
|
10
|
+
from .._base import T
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _gv_escape(node: T, node_id_generator: Callable[[T], int] = hash) -> str:
|
|
14
|
+
"""Generates a unique identifier of a specific node.
|
|
15
|
+
|
|
16
|
+
Parameters
|
|
17
|
+
----------
|
|
18
|
+
node : T
|
|
19
|
+
The node to generate the identifier for.
|
|
20
|
+
|
|
21
|
+
Returns
|
|
22
|
+
-------
|
|
23
|
+
str
|
|
24
|
+
The identifier.
|
|
25
|
+
"""
|
|
26
|
+
return str(node_id_generator(node))
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def plot_tree(
|
|
30
|
+
node: T,
|
|
31
|
+
label_generator: Callable[[T], tuple[str, dict]],
|
|
32
|
+
child_supplier: Callable[[T], Sequence[T]],
|
|
33
|
+
*,
|
|
34
|
+
escape_labels: bool = True,
|
|
35
|
+
out_path: str = "",
|
|
36
|
+
out_format: str = "svg",
|
|
37
|
+
node_id_generator: Callable[[T], int] = hash,
|
|
38
|
+
_graph: Optional[gv.Graph] = None,
|
|
39
|
+
**kwargs,
|
|
40
|
+
) -> gv.Graph:
|
|
41
|
+
"""Transforms an arbitrary tree into a Graphviz graph. The tree traversal is achieved via callback functions.
|
|
42
|
+
|
|
43
|
+
Start the traversal at the root node.
|
|
44
|
+
|
|
45
|
+
Parameters
|
|
46
|
+
----------
|
|
47
|
+
node : T
|
|
48
|
+
The node to plot.
|
|
49
|
+
label_generator : Callable[[T], tuple[str, dict]]
|
|
50
|
+
Callback function to generate labels of the nodes in the graph. The dictionary can contain additional formatting
|
|
51
|
+
attributes (e.g. bold font). Consult the Graphviz documentation for allowed values
|
|
52
|
+
child_supplier : Callable[[T], Sequence[T]]
|
|
53
|
+
Provides the children of the current node.
|
|
54
|
+
escape_labels : bool, optional
|
|
55
|
+
Whether to escape the labels of the nodes. Defaults to True. If set to False, the labels will be rendered as-is and all
|
|
56
|
+
HTML-like tags will be interpreted as such.
|
|
57
|
+
out_path : str, optional
|
|
58
|
+
An optional file path to store the graph at. If empty, the graph will only be provided as a Graphviz object.
|
|
59
|
+
out_format : str, optional
|
|
60
|
+
The output format of the graph. Defaults to SVG and will only be used if the graph should be stored to disk (according
|
|
61
|
+
to `out_path`).
|
|
62
|
+
node_id_generator : Callable[[T], int], optional
|
|
63
|
+
Callback function to generate unique identifiers for the nodes. Defaults to the hash function of the nodes.
|
|
64
|
+
These identifiers are only used internally to identify the different nodes in the graph.
|
|
65
|
+
_graph : Optional[gv.Graph], optional
|
|
66
|
+
Internal parameter used for state-management within the plotting function. Do not set this parameter yourself!
|
|
67
|
+
|
|
68
|
+
Returns
|
|
69
|
+
-------
|
|
70
|
+
gv.Graph
|
|
71
|
+
_description_
|
|
72
|
+
|
|
73
|
+
See Also
|
|
74
|
+
--------
|
|
75
|
+
gv.Dot.node
|
|
76
|
+
gv.Dot.edge
|
|
77
|
+
|
|
78
|
+
References
|
|
79
|
+
----------
|
|
80
|
+
|
|
81
|
+
.. Graphviz project: https://graphviz.org/
|
|
82
|
+
"""
|
|
83
|
+
initial = _graph is None
|
|
84
|
+
_graph = gv.Graph(**kwargs) if initial else _graph
|
|
85
|
+
label, params = label_generator(node)
|
|
86
|
+
if escape_labels:
|
|
87
|
+
label = gv.escape(label)
|
|
88
|
+
node_key = _gv_escape(node, node_id_generator=node_id_generator)
|
|
89
|
+
_graph.node(node_key, label=label, **params)
|
|
90
|
+
|
|
91
|
+
for child in child_supplier(node):
|
|
92
|
+
child_key = _gv_escape(child, node_id_generator=node_id_generator)
|
|
93
|
+
_graph.edge(node_key, child_key)
|
|
94
|
+
_graph = plot_tree(
|
|
95
|
+
child,
|
|
96
|
+
label_generator,
|
|
97
|
+
child_supplier,
|
|
98
|
+
escape_labels=escape_labels,
|
|
99
|
+
node_id_generator=node_id_generator,
|
|
100
|
+
_graph=_graph,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
if initial and out_path:
|
|
104
|
+
_graph.render(out_path, format=out_format, cleanup=True)
|
|
105
|
+
return _graph
|
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: PostBOUND
|
|
3
|
+
Version: 0.19.0
|
|
4
|
+
Summary: PostBOUND is a research framework to prototype and benchmark database query optimizers
|
|
5
|
+
Author-email: Rico Bergmann <rico.bergmann1@tu-dresden.de>
|
|
6
|
+
License-Expression: Apache-2.0
|
|
7
|
+
Project-URL: Homepage, https://github.com/rbergm/PostBOUND
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Development Status :: 4 - Beta
|
|
10
|
+
Classifier: Intended Audience :: Science/Research
|
|
11
|
+
Classifier: Topic :: Database :: Database Engines/Servers
|
|
12
|
+
Classifier: Topic :: Scientific/Engineering
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Requires-Python: >=3.12
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
License-File: LICENSE.txt
|
|
17
|
+
Requires-Dist: lazy-loader~=0.4
|
|
18
|
+
Requires-Dist: levenshtein~=0.27
|
|
19
|
+
Requires-Dist: natsort~=8.4
|
|
20
|
+
Requires-Dist: networkx~=3.5
|
|
21
|
+
Requires-Dist: numpy~=2.3
|
|
22
|
+
Requires-Dist: pandas~=2.3
|
|
23
|
+
Requires-Dist: pglast~=7.10
|
|
24
|
+
Requires-Dist: psycopg[binary]~=3.2
|
|
25
|
+
Requires-Dist: tqdm~=4.67
|
|
26
|
+
Provides-Extra: vis
|
|
27
|
+
Requires-Dist: matplotlib~=3.10; extra == "vis"
|
|
28
|
+
Requires-Dist: seaborn~=0.13; extra == "vis"
|
|
29
|
+
Requires-Dist: graphviz~=0.21; extra == "vis"
|
|
30
|
+
Provides-Extra: mysql
|
|
31
|
+
Requires-Dist: mysql-connector-python~=9.5; extra == "mysql"
|
|
32
|
+
Dynamic: license-file
|
|
33
|
+
|
|
34
|
+
# PostBOUND
|
|
35
|
+
|
|
36
|
+

|
|
37
|
+

|
|
38
|
+
|
|
39
|
+
<p align="center">
|
|
40
|
+
<img src="docs/figures/postbound-logo.svg" style="width: 150px; margin: 15px;">
|
|
41
|
+
</p>
|
|
42
|
+
|
|
43
|
+
PostBOUND is a Python framework for studying query optimization in database systems.
|
|
44
|
+
At a high level, PostBOUND has the following goals and features:
|
|
45
|
+
|
|
46
|
+
- 🏃♀️ **Rapid prototyping:** PostBOUND allows researchers to implement specific phases of the optimization process, such
|
|
47
|
+
as the cardinality estimator or the join order optimization. Researchers can focus precisely on the parts that they are
|
|
48
|
+
studying and use _reasonable_ defaults for the rest. See [🧑🏫 Example](#-example) for how this looks in practice.
|
|
49
|
+
- 🧰 **No boilerplate:** to ease the implementation of the optimizers, PostBOUND provides a large toolbox of
|
|
50
|
+
support functionality, such as query parsing, join graph construction, relational algebra or database statistics.
|
|
51
|
+
- 📊 **Transparent benchmarks:** once a new optimizer prototype is completed, the benchmarking tools allow to compare this
|
|
52
|
+
prototype against other optimization strategies in a transparent and reproducible way. As a core design principle,
|
|
53
|
+
PostBOUND executes queries on an actual database system such as PostgreSQL, or DuckDB, rather than research systems or
|
|
54
|
+
artificial "lab" comparisons. See [💡 Essentials](#-essentials) for more information on this.
|
|
55
|
+
- 🔋 **Batteries included:** in addition to the Python package, PostBOUND provides a lot of utilities to setup databases
|
|
56
|
+
and load commonly used benchmarks (e.g., JOB, Stats and Stack).
|
|
57
|
+
|
|
58
|
+
| **[💻 Installation](https://postbound.readthedocs.io/en/latest/setup.html)**
|
|
59
|
+
| **[📖 Documentation](https://postbound.readthedocs.io/en/latest/)**
|
|
60
|
+
| **[🧑🏫 Examples](https://github.com/rbergm/PostBOUND/tree/main/examples)** |
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
## ⚡️ Quick Start
|
|
64
|
+
|
|
65
|
+
An installation of PostBOUND consists of two parts: the PostBOUND framework itself, as well as a running database instance
|
|
66
|
+
(such as PostgreSQL or DuckDB) that is used to actually execute the optimized queries (see [💡 Essentials](#-essentials)
|
|
67
|
+
below).
|
|
68
|
+
|
|
69
|
+
The easiest way to install PostBOUND is via Pip:
|
|
70
|
+
|
|
71
|
+
```sh
|
|
72
|
+
pip install postbound
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
Afterwards, you can [connect](https://postbound.readthedocs.io/en/latest/10minutes.html#database-connection) it to a
|
|
76
|
+
Postgres server running [pg_hint_plan](https://github.com/ossc-db/pg_hint_plan).
|
|
77
|
+
|
|
78
|
+
If you prefer a more integrated setup, we provide a Docker image that contains PostBOUND as well as a readily-configured
|
|
79
|
+
Postgres server or DuckDB installation.
|
|
80
|
+
You can build your Docker image with the following command:
|
|
81
|
+
|
|
82
|
+
```sh
|
|
83
|
+
docker build -t postbound --build-arg TIMEZONE=$(cat /etc/timezone) .
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
Once the image is built, you can create any number of containers with different setups.
|
|
87
|
+
For example, to create a container with a local Postgres instance (using [pg_lab](https://github.com/rbergm/pg_lab)) and
|
|
88
|
+
setup the Stats, JOB and Stack benchmarks, use the following command:
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
```sh
|
|
92
|
+
docker run -dt \
|
|
93
|
+
--shm-size 4G \
|
|
94
|
+
--name postbound \
|
|
95
|
+
--env USE_PGLAB=true \
|
|
96
|
+
--env OPTIMIZE_PG_CONFIG=true \
|
|
97
|
+
--env SETUP_DUCKDB=false \
|
|
98
|
+
--env SETUP_STATS=true \
|
|
99
|
+
--env SETUP_JOB=false \
|
|
100
|
+
--env SETUP_STACK=false \
|
|
101
|
+
--volume $PWD/vol-postbound:/postbound \
|
|
102
|
+
--volume $PWD/vol-pglab:/pg_lab \
|
|
103
|
+
--publish 5432:5432 \
|
|
104
|
+
--publish 8888:8888 \
|
|
105
|
+
postbound
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
All supported build arguments are listed under [Docker options](#-docker-options).
|
|
109
|
+
See [Essentials](#-essentials) for why a database instance is necessary.
|
|
110
|
+
For Postgres, adjust the amount of shared memory depending on your machine.
|
|
111
|
+
Note that the initial start of the container will take a substantial amount of time.
|
|
112
|
+
This is because the container needs to compile a fresh Postgres server from source, download and import workloads, etc.
|
|
113
|
+
Use `docker logs -f postbound` to monitor the startup process.
|
|
114
|
+
|
|
115
|
+
> [!TIP]
|
|
116
|
+
> Shared memory is used by Postgres for its internal caching and therefore paramount for good server performance.
|
|
117
|
+
> The general recommendation is to set it to at least 1/4 of the available RAM.
|
|
118
|
+
|
|
119
|
+
The Postgres server will be available at port 5432 from the host machine (using the user _postbound_ with the same
|
|
120
|
+
password).
|
|
121
|
+
You can also create a local DuckDB installation by setting `SETUP_DUCKDB` to _true_.
|
|
122
|
+
If you plan on using Jupyter for data analysis, also publish port 8888.
|
|
123
|
+
The volume mountpoints provide all internal files from PostBOUND and pg_lab (if used).
|
|
124
|
+
|
|
125
|
+
You can connect to the PostBOUND container using the usual
|
|
126
|
+
|
|
127
|
+
```sh
|
|
128
|
+
docker exec -it postbound /bin/bash
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
The shell enviroment is setup to have PostBOUND available in a fresh Python virtual environment (which is activated by
|
|
132
|
+
default).
|
|
133
|
+
Furthermore, all Postgres and DuckDB utilities are available on the _PATH_ (if the respective systems have been build during
|
|
134
|
+
the setup).
|
|
135
|
+
|
|
136
|
+
> [!TIP]
|
|
137
|
+
> If you want to install PostBOUND directly on your machine, the
|
|
138
|
+
> [documentation](https://postbound.readthedocs.io/en/latest/setup.html) provides a detailed setup guide.
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
## 💡 Essentials
|
|
142
|
+
|
|
143
|
+
As a central design decision, PostBOUND is not integrated into a specific database system.
|
|
144
|
+
Instead, it is implemented as a Python framework operating on top of a running database instance.
|
|
145
|
+
In the end, all query plans generated by PostBOUND should be executed on a real-world database system.
|
|
146
|
+
This decision was made to ensure that the optimization strategies are actually useful in practice and we treat the execution
|
|
147
|
+
time as the ultimate measure of optimization quality.
|
|
148
|
+
|
|
149
|
+
However, this decision means that we need a way to ensure that the optimization decisions made within the framework are
|
|
150
|
+
actually used when executing the query in the context of the target database.
|
|
151
|
+
This is achieved by using query hints which typically encode the optimization decisions in comment blocks within the query.
|
|
152
|
+
|
|
153
|
+
In the case of Postgres, this interaction roughly looks like this:
|
|
154
|
+
|
|
155
|
+
<p align="center">
|
|
156
|
+
<img src="docs/figures/postbound-pg-interaction.svg" style="width: 600px; margin: 15px;">
|
|
157
|
+
</p>
|
|
158
|
+
|
|
159
|
+
Users implement their optimization strategies in terms of
|
|
160
|
+
[optimization pipelines](https://postbound.readthedocs.io/en/latest/core/optimization.html).
|
|
161
|
+
For an incoming SQL query, the pipeline computes an abstract representation of the resulting query plan without user
|
|
162
|
+
intervention.
|
|
163
|
+
This plan is automatically converted into equivalent hints for the target database system such as Postgres or DuckDB.
|
|
164
|
+
Afterwards, the hints ensure that the database system executes the query plan that was originally computed in the optimization
|
|
165
|
+
pipeline.
|
|
166
|
+
|
|
167
|
+
Depending on the actual database system, the hints might differ in syntax as well as semantics.
|
|
168
|
+
Generally speaking, PostBOUND figures out which hints to use on its own, without user intervention.
|
|
169
|
+
In the case of PostgreSQL, PostBOUND relies on either [pg_hint_plan](https://github.com/ossc-db/pg_hint_plan) or
|
|
170
|
+
[pg_lab](https://github.com/rbergm/pg_lab) to provide the necessary hinting functionality when targeting Postgres.
|
|
171
|
+
For DuckDB, PostBOUND uses [quacklab](https://github.com/rbergm/quacklab) hints to guide the optimizer.
|
|
172
|
+
|
|
173
|
+
> [!NOTE]
|
|
174
|
+
> PostBOUND's database interaction is designed to be independent of a specific system (such as PostgreSQL, Oracle, ...).
|
|
175
|
+
> However, the current implementation is most complete for PostgreSQL and DuckDB with limited support for MySQL.
|
|
176
|
+
> This is due to practical reasons, mostly our own time budget and the popularity of the two systems in the optimizer research
|
|
177
|
+
> community.
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
## 🧑🏫 Example
|
|
181
|
+
|
|
182
|
+
The typical end-to-end workflow using PostBOUND looks like this:
|
|
183
|
+
|
|
184
|
+
1. **Implement your new optimization strategy**. To do so, you need to figure out which parts of the optimization process you
|
|
185
|
+
want to customize and what the most appropriate optimization pipeline is. In a nutshell, the optimization pipeline is
|
|
186
|
+
a mental model of how the optimizer works. Commonly used pipelines are the textbook-style pipeline (i.e. using plan
|
|
187
|
+
enumerator, cost model and cardinality estimator), or the multi-stage pipeline which first computes a join order and
|
|
188
|
+
afterwards selects the best physical operators. The pipeline determines which interfaces can be implemented.
|
|
189
|
+
2. Select your **target database system** and **benchmark** that should be used for the evaluation.
|
|
190
|
+
3. Optionally, select different optimization strategies that you want to compare against.
|
|
191
|
+
4. Use the **benchmarking tools** to execute the workload against the target database system.
|
|
192
|
+
|
|
193
|
+
For example, a random join order optimizer could be implemented like this:
|
|
194
|
+
|
|
195
|
+
```python
|
|
196
|
+
import random
|
|
197
|
+
|
|
198
|
+
import postbound as pb
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
# Step 1: define our optimization strategy.
|
|
202
|
+
# In this example we develop a simple join order optimizer that
|
|
203
|
+
# selects a linear join order at random.
|
|
204
|
+
# We delegate most of the actual work to the pre-defined join grap
|
|
205
|
+
# that keeps track of free tables.
|
|
206
|
+
class RandomJoinOrderOptimizer(pb.JoinOrderOptimization):
|
|
207
|
+
def optimize_join_order(self, query: pb.SqlQuery) -> pb.LogicalJoinTree:
|
|
208
|
+
join_tree = pb.LogicalJoinTree()
|
|
209
|
+
join_graph = pb.opt.JoinGraph(query)
|
|
210
|
+
|
|
211
|
+
while join_graph.contains_free_tables():
|
|
212
|
+
candidate_tables = [
|
|
213
|
+
path.target_table for path in join_graph.available_join_paths()
|
|
214
|
+
]
|
|
215
|
+
next_table = random.choice(candidate_tables)
|
|
216
|
+
|
|
217
|
+
join_tree = join_tree.join_with(next_table)
|
|
218
|
+
join_graph.mark_joined(next_table)
|
|
219
|
+
|
|
220
|
+
return join_tree
|
|
221
|
+
|
|
222
|
+
def describe(self) -> pb.util.jsondict:
|
|
223
|
+
return {"name": "random-join-order"}
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
# Step 2: connect to the target database, load the workload and
|
|
227
|
+
# setup the optimization pipeline.
|
|
228
|
+
# In our case, we evaluate on the Join Order Benchmark on Postgres
|
|
229
|
+
pg_imdb = pb.postgres.connect(config_file=".psycopg_connection_job")
|
|
230
|
+
job = pb.workloads.job()
|
|
231
|
+
|
|
232
|
+
optimization_pipeline = (
|
|
233
|
+
pb.MultiStageOptimizationPipeline(pg_imdb)
|
|
234
|
+
.use(RandomJoinOrderOptimizer())
|
|
235
|
+
.build()
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
# (Step 3): in this example we just compare against the native Postgres optimizer
|
|
239
|
+
# Therefore, we do not need to setup any additional optimizers.
|
|
240
|
+
|
|
241
|
+
# Step 4: execute the workload.
|
|
242
|
+
# We use the QueryPreparationService to prewarm the database buffer and run all
|
|
243
|
+
# queries as EXPLAIN ANALYZE.
|
|
244
|
+
query_prep = pb.bench.QueryPreparation(
|
|
245
|
+
prewarm=True, analyze=True, preparatory_statements=["SET geqo TO off;"]
|
|
246
|
+
)
|
|
247
|
+
native_results = pb.bench.execute_workload(
|
|
248
|
+
job,
|
|
249
|
+
on=pg_imdb,
|
|
250
|
+
query_preparation=query_prep,
|
|
251
|
+
workload_repetitions=3,
|
|
252
|
+
progressive_output="job-results-native.csv",
|
|
253
|
+
logger="tqdm",
|
|
254
|
+
)
|
|
255
|
+
optimized_results = pb.bench.execute_workload(
|
|
256
|
+
job,
|
|
257
|
+
on=optimization_pipeline,
|
|
258
|
+
query_preparation=query_prep,
|
|
259
|
+
workload_repetitions=3,
|
|
260
|
+
progressive_output="job-results-optimized.csv",
|
|
261
|
+
logger="tqdm",
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
## 🤬 Issues
|
|
268
|
+
|
|
269
|
+
Something feels wrong or broken, or a part of PostBOUND is poorly documented or otherwise unclear?
|
|
270
|
+
Please don't hestitate to file an issue or open a pull request!
|
|
271
|
+
PostBOUND is not one-off software, but an ongoing research project.
|
|
272
|
+
We are always happy to improve both PostBOUND and its documentation and we feel that the user experience (specifically,
|
|
273
|
+
_your_ user experience) is a very important part of this.
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
## 🫶 Reference
|
|
277
|
+
|
|
278
|
+
If you find our work useful, please cite the following paper:
|
|
279
|
+
|
|
280
|
+
```bibtex
|
|
281
|
+
@inproceedings{bergmann2025elephant,
|
|
282
|
+
author = {Rico Bergmann and
|
|
283
|
+
Claudio Hartmann and
|
|
284
|
+
Dirk Habich and
|
|
285
|
+
Wolfgang Lehner},
|
|
286
|
+
title = {An Elephant Under the Microscope: Analyzing the Interaction of Optimizer
|
|
287
|
+
Components in PostgreSQL},
|
|
288
|
+
journal = {Proc. {ACM} Manag. Data},
|
|
289
|
+
volume = {3},
|
|
290
|
+
number = {1},
|
|
291
|
+
pages = {9:1--9:28},
|
|
292
|
+
year = {2025},
|
|
293
|
+
url = {https://doi.org/10.1145/3709659},
|
|
294
|
+
doi = {10.1145/3709659},
|
|
295
|
+
timestamp = {Tue, 01 Apr 2025 19:03:19 +0200},
|
|
296
|
+
biburl = {https://dblp.org/rec/journals/pacmmod/BergmannHHL25.bib},
|
|
297
|
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
|
298
|
+
}
|
|
299
|
+
```
|
|
300
|
+
|
|
301
|
+
---
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
## 📖 Documentation
|
|
305
|
+
|
|
306
|
+
A detailed documentation of PostBOUND is available [here](https://postbound.readthedocs.io/en/latest/).
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
## 🐳 Docker options
|
|
310
|
+
|
|
311
|
+
The following options can be used when starting the Docker container as `--env` parameters (with the exception of _TIMEZONE_,
|
|
312
|
+
which must be specified as a `--build-arg` when creating the image).
|
|
313
|
+
|
|
314
|
+
| Argument | Allowed values | Description | Default |
|
|
315
|
+
|----------|----------------|-------------|---------|
|
|
316
|
+
| `TIMEZONE` | Any valid timezone identifier | Timezone of the Docker container (and hence the Postgres server). It is probably best to just use the value of `cat /etc/timezone` | `UTC` |
|
|
317
|
+
| `USERNAME` | Any valid UNIX username. | The username within the Docker container. This will also be the Postgres user and password. | `postbound` |
|
|
318
|
+
| `SETUP_POSTGRES` | `true` or `false` | Whether to include a Postgres server in the setup. By default, this is a vanilla Postgres server with the latest minor release. However, this can be customized with `USE_PGLAB` and `PGVER`. | `true` |
|
|
319
|
+
| `USE_PGLAB` | `true` or `false` | Whether to initialize a [pg_lab](https://github.com/rbergm/pg_lab) server instead of a normal Postgres server. pg_lab provides advanced hinting capabilities and offers additional extension points for the query optimizer. | `false` |
|
|
320
|
+
| `OPTIMIZE_PG_CONFIG` | `true` or `false` | Whether the Postgres configuration parameters should be automatically set based on your hardware platform. Rules are based on [PGTune](https://pgtune.leopard.in.ua/) by [le0pard](https://github.com/le0pard). | `false` |
|
|
321
|
+
| `PG_DISK_TYPE` | `SSD` or `HDD` | In case the Postgres server is automatically configured (see `OPTIMIZE_PG_CONFIG`) this indicates the kind of storage for the actual database. In turn, this influences the relative cost of sequential access and index-based access for the query optimizer. | `SSD` |
|
|
322
|
+
| `PGVER` | 16, 17, ... | The Postgres version to use. Notice that pg_lab supports fewer versions. This value is passed to the `postgres-setup.sh` script of the Postgres tooling (either under `db-support` or from pg_lab), which provides the most up to date list of supported versions. | 17 |
|
|
323
|
+
| `SETUP_DUCKDB` | `true` or `false` | Whether DuckDB-support should be added to PostBOUND. If enabled, a [DuckDB version with hinting support](https://github.com/rbergm/quacklab) will be compiled and images for all selected benchmarks will be created. Please be aware that during testing we noticed that creating an optimized build of DuckDB takes a lot of time on some platforms (think a couple of hours). | `false` |
|
|
324
|
+
| `SETUP_IMDB` | `true` or `false` | Whether an [IMDB](https://doi.org/10.14778/2850583.2850594) instance should be created as part of the setup. If a Postgres server is included in the setup, PostBOUND can connect to the database using the `.psycopg_connection_job` config file. For DuckDB, the database will be located at `/postbound/imdb.duckdb`. | `false` |
|
|
325
|
+
| `SETUP_STATS` | `true` or `false` | Whether a [Stats](https://doi.org/10.14778/3503585.3503586) instance should be created as part of the setup. If a Postgres server is included in the setup, PostBOUND can connect to the database using the `.psycopg_connection_stats` config file. For DuckDB, the database will be located at `/postbound/stats.duckdb` | `false` |
|
|
326
|
+
| `SETUP_STACK` | `true` or `false`| Whether a [Stack](https://doi.org/10.1145/3448016.3452838) instance should be created as part of the Postgres setup. If a Postgres server is included in the setup, PostBOUND can connect to the database using the `.psycopg_connection_stack` config file. DuckDB is currently not supported. | `false` |
|
|
327
|
+
|
|
328
|
+
The PostBOUND source code is located at `/postbound`. If pg_lab is being used, the corresponding files are located at `/pg_lab`.
|
|
329
|
+
The container automatically exposes the Postgres port 5432 and provides volume mountpoints at `/postbound` and `/pg_lab`.
|
|
330
|
+
These mountpoints can be used as backups or to easily ingest data into the container.
|
|
331
|
+
If the pg_lab mountpoint points to an existing (i.e. non-empty) directory, the setup assumes that this is already a valid
|
|
332
|
+
pg_lab installation and skips the corresponding setup.
|
|
333
|
+
|
|
334
|
+
> [!TIP]
|
|
335
|
+
> pg_lab provides advanced hinting support (e.g. for materialization or cardinality hints for base tables) and offers
|
|
336
|
+
> additional extension points for the query optimizer (e.g. hooks for the different cost functions).
|
|
337
|
+
> If pg_lab is not used, the Postgres server will setup pg_hint_plan instead.
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
## 📑 Repo Structure
|
|
341
|
+
|
|
342
|
+
The repository is structured as follows.
|
|
343
|
+
The `postbound` directory contains the actual source code, all other folders are concerned with "supporting" aspects
|
|
344
|
+
(which are nevertheless important..).
|
|
345
|
+
Almost all of the subdirectories contain further READMEs that explain their purpose and structure in more detail.
|
|
346
|
+
|
|
347
|
+
| Folder | Description |
|
|
348
|
+
| ------------- | ----------- |
|
|
349
|
+
| `postbound` | Contains the source code of the PostBOUND framework |
|
|
350
|
+
| `docs` | contains the high-level documentation as well as infrastructure to export the source code documentation |
|
|
351
|
+
| `examples` | contains general examples for typical usage scenarios. These should be run from the root directory, e.g. as `python3 -m examples.example-01-basic-workflow` |
|
|
352
|
+
| `tests` | contains the unit tests and integration tests for the framework implementatino. These should also be run from the root directory, e.g. as `python3 -m unittest tests` |
|
|
353
|
+
| `db-support` | Contains utilities to setup instances of the respective database systems and contain system-specific scripts to import popular benchmarks for them |
|
|
354
|
+
| `workloads` | Contains the raw SQL queries of some popular benchmarks |
|
|
355
|
+
| `tools` | Provides different other utilities that are not directly concerned with specific database systems, but rather with common problems encoutered when benchmarking query optimizers |
|