duckdb 1.1.4-dev11.0 → 1.1.4-dev14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/binding.gyp +1 -0
- package/package.json +1 -1
- package/src/duckdb/extension/core_functions/function_list.cpp +1 -0
- package/src/duckdb/extension/core_functions/include/core_functions/scalar/map_functions.hpp +9 -0
- package/src/duckdb/extension/core_functions/scalar/date/current.cpp +1 -0
- package/src/duckdb/extension/core_functions/scalar/generic/can_implicitly_cast.cpp +2 -2
- package/src/duckdb/extension/core_functions/scalar/generic/typeof.cpp +1 -1
- package/src/duckdb/extension/core_functions/scalar/list/flatten.cpp +91 -61
- package/src/duckdb/extension/core_functions/scalar/map/map_extract.cpp +89 -8
- package/src/duckdb/extension/icu/icu-current.cpp +63 -0
- package/src/duckdb/extension/icu/icu-makedate.cpp +43 -39
- package/src/duckdb/extension/icu/icu-timezone.cpp +63 -63
- package/src/duckdb/extension/icu/icu_extension.cpp +2 -0
- package/src/duckdb/extension/icu/include/icu-casts.hpp +39 -0
- package/src/duckdb/extension/icu/include/icu-current.hpp +17 -0
- package/src/duckdb/extension/icu/third_party/icu/stubdata/stubdata.cpp +1 -1
- package/src/duckdb/extension/json/json_functions/json_structure.cpp +3 -1
- package/src/duckdb/extension/parquet/column_writer.cpp +26 -18
- package/src/duckdb/extension/parquet/include/parquet_reader.hpp +0 -6
- package/src/duckdb/extension/parquet/include/parquet_writer.hpp +15 -1
- package/src/duckdb/extension/parquet/include/resizable_buffer.hpp +1 -0
- package/src/duckdb/extension/parquet/parquet_extension.cpp +67 -15
- package/src/duckdb/extension/parquet/parquet_reader.cpp +5 -3
- package/src/duckdb/extension/parquet/parquet_writer.cpp +5 -6
- package/src/duckdb/src/catalog/catalog.cpp +21 -8
- package/src/duckdb/src/catalog/catalog_search_path.cpp +17 -1
- package/src/duckdb/src/catalog/catalog_set.cpp +1 -1
- package/src/duckdb/src/catalog/default/default_functions.cpp +0 -3
- package/src/duckdb/src/catalog/dependency_list.cpp +7 -0
- package/src/duckdb/src/common/adbc/adbc.cpp +1 -56
- package/src/duckdb/src/common/arrow/arrow_converter.cpp +3 -2
- package/src/duckdb/src/common/arrow/arrow_type_extension.cpp +58 -28
- package/src/duckdb/src/common/arrow/schema_metadata.cpp +1 -1
- package/src/duckdb/src/common/compressed_file_system.cpp +6 -2
- package/src/duckdb/src/common/enum_util.cpp +26 -22
- package/src/duckdb/src/common/error_data.cpp +3 -2
- package/src/duckdb/src/common/gzip_file_system.cpp +8 -8
- package/src/duckdb/src/common/local_file_system.cpp +2 -2
- package/src/duckdb/src/common/multi_file_reader.cpp +1 -1
- package/src/duckdb/src/common/random_engine.cpp +4 -1
- package/src/duckdb/src/common/serializer/memory_stream.cpp +23 -19
- package/src/duckdb/src/common/serializer/serializer.cpp +1 -1
- package/src/duckdb/src/common/types/bit.cpp +1 -1
- package/src/duckdb/src/common/types/column/column_data_allocator.cpp +0 -5
- package/src/duckdb/src/common/types/column/column_data_collection.cpp +4 -1
- package/src/duckdb/src/common/types/data_chunk.cpp +2 -1
- package/src/duckdb/src/common/types/row/tuple_data_segment.cpp +0 -4
- package/src/duckdb/src/common/types.cpp +1 -1
- package/src/duckdb/src/execution/index/art/art.cpp +52 -42
- package/src/duckdb/src/execution/index/art/leaf.cpp +4 -9
- package/src/duckdb/src/execution/index/art/node.cpp +13 -13
- package/src/duckdb/src/execution/index/art/prefix.cpp +21 -16
- package/src/duckdb/src/execution/index/bound_index.cpp +6 -8
- package/src/duckdb/src/execution/index/fixed_size_allocator.cpp +39 -34
- package/src/duckdb/src/execution/index/fixed_size_buffer.cpp +2 -1
- package/src/duckdb/src/execution/index/unbound_index.cpp +10 -0
- package/src/duckdb/src/execution/operator/aggregate/physical_streaming_window.cpp +62 -44
- package/src/duckdb/src/execution/operator/csv_scanner/scanner/column_count_scanner.cpp +26 -0
- package/src/duckdb/src/execution/operator/csv_scanner/scanner/string_value_scanner.cpp +69 -40
- package/src/duckdb/src/execution/operator/csv_scanner/sniffer/dialect_detection.cpp +3 -7
- package/src/duckdb/src/execution/operator/csv_scanner/sniffer/header_detection.cpp +11 -5
- package/src/duckdb/src/execution/operator/csv_scanner/sniffer/type_detection.cpp +4 -0
- package/src/duckdb/src/execution/operator/csv_scanner/state_machine/csv_state_machine_cache.cpp +8 -8
- package/src/duckdb/src/execution/operator/csv_scanner/util/csv_error.cpp +36 -12
- package/src/duckdb/src/execution/operator/csv_scanner/util/csv_reader_options.cpp +12 -9
- package/src/duckdb/src/execution/operator/join/physical_hash_join.cpp +0 -1
- package/src/duckdb/src/execution/operator/persistent/physical_copy_database.cpp +29 -1
- package/src/duckdb/src/execution/operator/persistent/physical_delete.cpp +58 -10
- package/src/duckdb/src/execution/operator/persistent/physical_insert.cpp +58 -35
- package/src/duckdb/src/execution/operator/schema/physical_create_art_index.cpp +2 -1
- package/src/duckdb/src/execution/radix_partitioned_hashtable.cpp +9 -4
- package/src/duckdb/src/execution/sample/reservoir_sample.cpp +7 -6
- package/src/duckdb/src/function/compression_config.cpp +4 -0
- package/src/duckdb/src/function/function_binder.cpp +1 -1
- package/src/duckdb/src/function/scalar/system/write_log.cpp +2 -2
- package/src/duckdb/src/function/table/arrow/arrow_duck_schema.cpp +15 -2
- package/src/duckdb/src/function/table/arrow_conversion.cpp +10 -10
- package/src/duckdb/src/function/table/copy_csv.cpp +8 -5
- package/src/duckdb/src/function/table/read_csv.cpp +21 -4
- package/src/duckdb/src/function/table/sniff_csv.cpp +7 -0
- package/src/duckdb/src/function/table/system/duckdb_extensions.cpp +4 -0
- package/src/duckdb/src/function/table/system/duckdb_secret_types.cpp +71 -0
- package/src/duckdb/src/function/table/system_functions.cpp +1 -0
- package/src/duckdb/src/function/table/table_scan.cpp +120 -36
- package/src/duckdb/src/function/table/version/pragma_version.cpp +4 -4
- package/src/duckdb/src/function/window/window_aggregate_function.cpp +6 -1
- package/src/duckdb/src/function/window/window_boundaries_state.cpp +135 -11
- package/src/duckdb/src/function/window/window_segment_tree.cpp +50 -22
- package/src/duckdb/src/function/window/window_token_tree.cpp +4 -3
- package/src/duckdb/src/include/duckdb/catalog/catalog.hpp +4 -0
- package/src/duckdb/src/include/duckdb/catalog/catalog_search_path.hpp +2 -0
- package/src/duckdb/src/include/duckdb/catalog/dependency_list.hpp +1 -0
- package/src/duckdb/src/include/duckdb/common/arrow/arrow_type_extension.hpp +4 -2
- package/src/duckdb/src/include/duckdb/common/enum_util.hpp +8 -8
- package/src/duckdb/src/include/duckdb/common/multi_file_reader.hpp +0 -2
- package/src/duckdb/src/include/duckdb/common/serializer/deserializer.hpp +8 -3
- package/src/duckdb/src/include/duckdb/common/serializer/memory_stream.hpp +6 -1
- package/src/duckdb/src/include/duckdb/common/serializer/serialization_data.hpp +25 -0
- package/src/duckdb/src/include/duckdb/common/serializer/serializer.hpp +9 -3
- package/src/duckdb/src/include/duckdb/common/types/selection_vector.hpp +1 -1
- package/src/duckdb/src/include/duckdb/execution/index/art/art.hpp +11 -14
- package/src/duckdb/src/include/duckdb/execution/index/art/prefix.hpp +5 -4
- package/src/duckdb/src/include/duckdb/execution/index/bound_index.hpp +21 -10
- package/src/duckdb/src/include/duckdb/execution/index/fixed_size_allocator.hpp +6 -5
- package/src/duckdb/src/include/duckdb/execution/index/fixed_size_buffer.hpp +37 -32
- package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/base_scanner.hpp +36 -1
- package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/column_count_scanner.hpp +3 -0
- package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/sniffer/csv_sniffer.hpp +2 -0
- package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/state_machine_options.hpp +5 -5
- package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/string_value_scanner.hpp +5 -30
- package/src/duckdb/src/include/duckdb/execution/reservoir_sample.hpp +7 -1
- package/src/duckdb/src/include/duckdb/function/scalar_function.hpp +3 -3
- package/src/duckdb/src/include/duckdb/function/table/arrow/arrow_duck_schema.hpp +1 -0
- package/src/duckdb/src/include/duckdb/function/table/system_functions.hpp +4 -0
- package/src/duckdb/src/include/duckdb/function/window/window_boundaries_state.hpp +2 -2
- package/src/duckdb/src/include/duckdb/logging/logger.hpp +40 -119
- package/src/duckdb/src/include/duckdb/logging/logging.hpp +0 -2
- package/src/duckdb/src/include/duckdb/main/config.hpp +5 -0
- package/src/duckdb/src/include/duckdb/main/connection.hpp +0 -8
- package/src/duckdb/src/include/duckdb/main/connection_manager.hpp +2 -1
- package/src/duckdb/src/include/duckdb/main/extension.hpp +1 -0
- package/src/duckdb/src/include/duckdb/main/extension_entries.hpp +11 -7
- package/src/duckdb/src/include/duckdb/main/extension_helper.hpp +1 -0
- package/src/duckdb/src/include/duckdb/main/secret/secret.hpp +2 -0
- package/src/duckdb/src/include/duckdb/main/secret/secret_manager.hpp +3 -0
- package/src/duckdb/src/include/duckdb/main/settings.hpp +10 -0
- package/src/duckdb/src/include/duckdb/parser/constraint.hpp +9 -0
- package/src/duckdb/src/include/duckdb/parser/expression/window_expression.hpp +36 -9
- package/src/duckdb/src/include/duckdb/parser/parsed_data/create_view_info.hpp +2 -1
- package/src/duckdb/src/include/duckdb/parser/query_node/set_operation_node.hpp +8 -2
- package/src/duckdb/src/include/duckdb/planner/binder.hpp +4 -0
- package/src/duckdb/src/include/duckdb/planner/expression/bound_parameter_data.hpp +9 -1
- package/src/duckdb/src/include/duckdb/planner/filter/constant_filter.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/filter/in_filter.hpp +0 -2
- package/src/duckdb/src/include/duckdb/planner/filter/optional_filter.hpp +4 -4
- package/src/duckdb/src/include/duckdb/planner/table_filter.hpp +1 -1
- package/src/duckdb/src/include/duckdb/storage/data_table.hpp +14 -10
- package/src/duckdb/src/include/duckdb/storage/index_storage_info.hpp +4 -0
- package/src/duckdb/src/include/duckdb/storage/single_file_block_manager.hpp +6 -1
- package/src/duckdb/src/include/duckdb/storage/storage_info.hpp +7 -2
- package/src/duckdb/src/include/duckdb/storage/storage_manager.hpp +9 -0
- package/src/duckdb/src/include/duckdb/storage/storage_options.hpp +2 -0
- package/src/duckdb/src/include/duckdb/storage/string_uncompressed.hpp +4 -3
- package/src/duckdb/src/include/duckdb/storage/table/column_data.hpp +2 -0
- package/src/duckdb/src/include/duckdb/storage/table/table_index_list.hpp +6 -4
- package/src/duckdb/src/include/duckdb/storage/table/table_statistics.hpp +1 -1
- package/src/duckdb/src/include/duckdb/storage/write_ahead_log.hpp +2 -0
- package/src/duckdb/src/include/duckdb/transaction/local_storage.hpp +2 -0
- package/src/duckdb/src/include/duckdb/transaction/meta_transaction.hpp +1 -1
- package/src/duckdb/src/logging/logger.cpp +8 -66
- package/src/duckdb/src/main/attached_database.cpp +3 -1
- package/src/duckdb/src/main/client_context.cpp +4 -2
- package/src/duckdb/src/main/config.cpp +20 -2
- package/src/duckdb/src/main/connection.cpp +2 -29
- package/src/duckdb/src/main/connection_manager.cpp +5 -3
- package/src/duckdb/src/main/database.cpp +2 -2
- package/src/duckdb/src/main/extension/extension_helper.cpp +4 -5
- package/src/duckdb/src/main/extension/extension_install.cpp +23 -10
- package/src/duckdb/src/main/extension/extension_load.cpp +6 -7
- package/src/duckdb/src/main/extension.cpp +27 -9
- package/src/duckdb/src/main/secret/secret_manager.cpp +11 -0
- package/src/duckdb/src/main/settings/custom_settings.cpp +44 -0
- package/src/duckdb/src/optimizer/column_lifetime_analyzer.cpp +6 -0
- package/src/duckdb/src/optimizer/filter_combiner.cpp +13 -3
- package/src/duckdb/src/optimizer/filter_pushdown.cpp +33 -6
- package/src/duckdb/src/optimizer/late_materialization.cpp +14 -3
- package/src/duckdb/src/optimizer/remove_unused_columns.cpp +0 -3
- package/src/duckdb/src/parser/parsed_data/attach_info.cpp +5 -1
- package/src/duckdb/src/parser/parsed_data/create_view_info.cpp +6 -3
- package/src/duckdb/src/parser/query_node/set_operation_node.cpp +49 -0
- package/src/duckdb/src/parser/transform/expression/transform_columnref.cpp +1 -0
- package/src/duckdb/src/parser/transform/expression/transform_function.cpp +50 -12
- package/src/duckdb/src/planner/binder/expression/bind_columnref_expression.cpp +7 -5
- package/src/duckdb/src/planner/binder/expression/bind_comparison_expression.cpp +1 -0
- package/src/duckdb/src/planner/binder/expression/bind_operator_expression.cpp +2 -2
- package/src/duckdb/src/planner/binder/expression/bind_star_expression.cpp +12 -2
- package/src/duckdb/src/planner/binder/statement/bind_copy_database.cpp +0 -1
- package/src/duckdb/src/planner/binder/statement/bind_create.cpp +55 -39
- package/src/duckdb/src/planner/binder/statement/bind_execute.cpp +2 -1
- package/src/duckdb/src/planner/binder/tableref/bind_basetableref.cpp +15 -7
- package/src/duckdb/src/planner/binder/tableref/bind_showref.cpp +13 -8
- package/src/duckdb/src/planner/binder/tableref/bind_table_function.cpp +8 -3
- package/src/duckdb/src/planner/expression/bound_function_expression.cpp +17 -1
- package/src/duckdb/src/planner/expression_binder/index_binder.cpp +1 -0
- package/src/duckdb/src/planner/filter/conjunction_filter.cpp +1 -0
- package/src/duckdb/src/planner/filter/constant_filter.cpp +21 -0
- package/src/duckdb/src/planner/filter/in_filter.cpp +4 -7
- package/src/duckdb/src/planner/logical_operator.cpp +5 -3
- package/src/duckdb/src/planner/planner.cpp +1 -1
- package/src/duckdb/src/planner/subquery/flatten_dependent_join.cpp +2 -0
- package/src/duckdb/src/storage/checkpoint/table_data_writer.cpp +3 -4
- package/src/duckdb/src/storage/checkpoint_manager.cpp +3 -5
- package/src/duckdb/src/storage/compression/dictionary/decompression.cpp +4 -4
- package/src/duckdb/src/storage/compression/fsst.cpp +2 -2
- package/src/duckdb/src/storage/compression/roaring/common.cpp +10 -1
- package/src/duckdb/src/storage/compression/string_uncompressed.cpp +11 -6
- package/src/duckdb/src/storage/compression/validity_uncompressed.cpp +4 -0
- package/src/duckdb/src/storage/compression/zstd.cpp +6 -0
- package/src/duckdb/src/storage/data_table.cpp +104 -109
- package/src/duckdb/src/storage/local_storage.cpp +8 -6
- package/src/duckdb/src/storage/magic_bytes.cpp +1 -1
- package/src/duckdb/src/storage/serialization/serialize_dependency.cpp +3 -3
- package/src/duckdb/src/storage/serialization/serialize_nodes.cpp +3 -3
- package/src/duckdb/src/storage/serialization/serialize_query_node.cpp +7 -5
- package/src/duckdb/src/storage/single_file_block_manager.cpp +95 -28
- package/src/duckdb/src/storage/storage_info.cpp +38 -0
- package/src/duckdb/src/storage/storage_manager.cpp +11 -0
- package/src/duckdb/src/storage/table/column_data.cpp +4 -0
- package/src/duckdb/src/storage/table/column_data_checkpointer.cpp +3 -3
- package/src/duckdb/src/storage/table/row_group_collection.cpp +67 -68
- package/src/duckdb/src/storage/table/table_statistics.cpp +4 -4
- package/src/duckdb/src/storage/table_index_list.cpp +41 -15
- package/src/duckdb/src/storage/wal_replay.cpp +3 -1
- package/src/duckdb/src/storage/write_ahead_log.cpp +11 -4
- package/src/duckdb/src/transaction/meta_transaction.cpp +1 -1
- package/src/duckdb/src/verification/deserialized_statement_verifier.cpp +2 -1
- package/src/duckdb/third_party/httplib/httplib.hpp +0 -1
- package/src/duckdb/third_party/re2/util/logging.h +10 -10
- package/src/duckdb/ub_src_function_table_system.cpp +2 -0
@@ -4,11 +4,13 @@
|
|
4
4
|
#include "duckdb/common/checksum.hpp"
|
5
5
|
#include "duckdb/common/exception.hpp"
|
6
6
|
#include "duckdb/common/serializer/memory_stream.hpp"
|
7
|
+
#include "duckdb/main/attached_database.hpp"
|
7
8
|
#include "duckdb/main/config.hpp"
|
8
9
|
#include "duckdb/main/database.hpp"
|
9
10
|
#include "duckdb/storage/buffer_manager.hpp"
|
10
11
|
#include "duckdb/storage/metadata/metadata_reader.hpp"
|
11
12
|
#include "duckdb/storage/metadata/metadata_writer.hpp"
|
13
|
+
#include "duckdb/storage/storage_manager.hpp"
|
12
14
|
|
13
15
|
#include <algorithm>
|
14
16
|
#include <cstring>
|
@@ -59,27 +61,25 @@ MainHeader MainHeader::Read(ReadStream &source) {
|
|
59
61
|
}
|
60
62
|
header.version_number = source.Read<uint64_t>();
|
61
63
|
// check the version number
|
62
|
-
if (header.version_number
|
64
|
+
if (header.version_number < VERSION_NUMBER_LOWER || header.version_number > VERSION_NUMBER_UPPER) {
|
63
65
|
auto version = GetDuckDBVersion(header.version_number);
|
64
66
|
string version_text;
|
65
67
|
if (!version.empty()) {
|
66
68
|
// known version
|
67
69
|
version_text = "DuckDB version " + string(version);
|
68
70
|
} else {
|
69
|
-
version_text = string("an ") +
|
71
|
+
version_text = string("an ") +
|
72
|
+
(VERSION_NUMBER_UPPER > header.version_number ? "older development" : "newer") +
|
70
73
|
string(" version of DuckDB");
|
71
74
|
}
|
72
75
|
throw IOException(
|
73
|
-
"Trying to read a database file with version number %lld, but we can only read
|
76
|
+
"Trying to read a database file with version number %lld, but we can only read versions between %lld and "
|
77
|
+
"%lld.\n"
|
74
78
|
"The database file was created with %s.\n\n"
|
75
|
-
"
|
76
|
-
"
|
77
|
-
"
|
78
|
-
|
79
|
-
"EXPORT DATABASE command "
|
80
|
-
"followed by IMPORT DATABASE on the current version of DuckDB.\n\n"
|
81
|
-
"See the storage page for more information: https://duckdb.org/internals/storage",
|
82
|
-
header.version_number, VERSION_NUMBER, version_text);
|
79
|
+
"Newer DuckDB version might introduce backward incompatible changes (possibly guarded by compatibility "
|
80
|
+
"settings)"
|
81
|
+
"See the storage page for migration strategy and more information: https://duckdb.org/internals/storage",
|
82
|
+
header.version_number, VERSION_NUMBER_LOWER, VERSION_NUMBER_UPPER, version_text);
|
83
83
|
}
|
84
84
|
// read the flags
|
85
85
|
for (idx_t i = 0; i < FLAG_COUNT; i++) {
|
@@ -97,15 +97,15 @@ void DatabaseHeader::Write(WriteStream &ser) {
|
|
97
97
|
ser.Write<uint64_t>(block_count);
|
98
98
|
ser.Write<idx_t>(block_alloc_size);
|
99
99
|
ser.Write<idx_t>(vector_size);
|
100
|
+
ser.Write<idx_t>(serialization_compatibility);
|
100
101
|
}
|
101
102
|
|
102
|
-
DatabaseHeader DatabaseHeader::Read(ReadStream &source) {
|
103
|
+
DatabaseHeader DatabaseHeader::Read(const MainHeader &main_header, ReadStream &source) {
|
103
104
|
DatabaseHeader header;
|
104
105
|
header.iteration = source.Read<uint64_t>();
|
105
106
|
header.meta_block = source.Read<idx_t>();
|
106
107
|
header.free_list = source.Read<idx_t>();
|
107
108
|
header.block_count = source.Read<uint64_t>();
|
108
|
-
|
109
109
|
header.block_alloc_size = source.Read<idx_t>();
|
110
110
|
|
111
111
|
// backwards compatibility
|
@@ -123,7 +123,13 @@ DatabaseHeader DatabaseHeader::Read(ReadStream &source) {
|
|
123
123
|
"vector size of %llu bytes.",
|
124
124
|
STANDARD_VECTOR_SIZE, header.vector_size);
|
125
125
|
}
|
126
|
-
|
126
|
+
if (main_header.version_number == 64) {
|
127
|
+
// version number 64 does not have the serialization compatibility in the file - default to 1
|
128
|
+
header.serialization_compatibility = 1;
|
129
|
+
} else {
|
130
|
+
// read from the file
|
131
|
+
header.serialization_compatibility = source.Read<idx_t>();
|
132
|
+
}
|
127
133
|
return header;
|
128
134
|
}
|
129
135
|
|
@@ -133,10 +139,14 @@ void SerializeHeaderStructure(T header, data_ptr_t ptr) {
|
|
133
139
|
header.Write(ser);
|
134
140
|
}
|
135
141
|
|
136
|
-
|
137
|
-
|
142
|
+
MainHeader DeserializeMainHeader(data_ptr_t ptr) {
|
143
|
+
MemoryStream source(ptr, Storage::FILE_HEADER_SIZE);
|
144
|
+
return MainHeader::Read(source);
|
145
|
+
}
|
146
|
+
|
147
|
+
DatabaseHeader DeserializeDatabaseHeader(const MainHeader &main_header, data_ptr_t ptr) {
|
138
148
|
MemoryStream source(ptr, Storage::FILE_HEADER_SIZE);
|
139
|
-
return
|
149
|
+
return DatabaseHeader::Read(main_header, source);
|
140
150
|
}
|
141
151
|
|
142
152
|
SingleFileBlockManager::SingleFileBlockManager(AttachedDatabase &db, const string &path_p,
|
@@ -166,6 +176,25 @@ FileOpenFlags SingleFileBlockManager::GetFileFlags(bool create_new) const {
|
|
166
176
|
return result;
|
167
177
|
}
|
168
178
|
|
179
|
+
void SingleFileBlockManager::AddStorageVersionTag() {
|
180
|
+
db.tags["storage_version"] = GetStorageVersionName(options.storage_version.GetIndex());
|
181
|
+
}
|
182
|
+
|
183
|
+
uint64_t SingleFileBlockManager::GetVersionNumber() {
|
184
|
+
uint64_t version_number = VERSION_NUMBER;
|
185
|
+
if (options.storage_version.GetIndex() >= 4) {
|
186
|
+
version_number = 65;
|
187
|
+
}
|
188
|
+
return version_number;
|
189
|
+
}
|
190
|
+
|
191
|
+
MainHeader ConstructMainHeader(idx_t version_number) {
|
192
|
+
MainHeader main_header;
|
193
|
+
main_header.version_number = version_number;
|
194
|
+
memset(main_header.flags, 0, sizeof(uint64_t) * MainHeader::FLAG_COUNT);
|
195
|
+
return main_header;
|
196
|
+
}
|
197
|
+
|
169
198
|
void SingleFileBlockManager::CreateNewDatabase() {
|
170
199
|
auto flags = GetFileFlags(true);
|
171
200
|
|
@@ -177,10 +206,11 @@ void SingleFileBlockManager::CreateNewDatabase() {
|
|
177
206
|
// first fill in the new header
|
178
207
|
header_buffer.Clear();
|
179
208
|
|
180
|
-
|
181
|
-
|
182
|
-
|
209
|
+
options.version_number = GetVersionNumber();
|
210
|
+
db.GetStorageManager().SetStorageVersion(options.storage_version.GetIndex());
|
211
|
+
AddStorageVersionTag();
|
183
212
|
|
213
|
+
MainHeader main_header = ConstructMainHeader(options.version_number.GetIndex());
|
184
214
|
SerializeHeaderStructure<MainHeader>(main_header, header_buffer.buffer);
|
185
215
|
// now write the header to the file
|
186
216
|
ChecksumAndWrite(header_buffer, 0);
|
@@ -198,6 +228,7 @@ void SingleFileBlockManager::CreateNewDatabase() {
|
|
198
228
|
// We create the SingleFileBlockManager with the desired block allocation size before calling CreateNewDatabase.
|
199
229
|
h1.block_alloc_size = GetBlockAllocSize();
|
200
230
|
h1.vector_size = STANDARD_VECTOR_SIZE;
|
231
|
+
h1.serialization_compatibility = options.storage_version.GetIndex();
|
201
232
|
SerializeHeaderStructure<DatabaseHeader>(h1, header_buffer.buffer);
|
202
233
|
ChecksumAndWrite(header_buffer, Storage::FILE_HEADER_SIZE);
|
203
234
|
|
@@ -210,6 +241,7 @@ void SingleFileBlockManager::CreateNewDatabase() {
|
|
210
241
|
// We create the SingleFileBlockManager with the desired block allocation size before calling CreateNewDatabase.
|
211
242
|
h2.block_alloc_size = GetBlockAllocSize();
|
212
243
|
h2.vector_size = STANDARD_VECTOR_SIZE;
|
244
|
+
h2.serialization_compatibility = options.storage_version.GetIndex();
|
213
245
|
SerializeHeaderStructure<DatabaseHeader>(h2, header_buffer.buffer);
|
214
246
|
ChecksumAndWrite(header_buffer, Storage::FILE_HEADER_SIZE * 2ULL);
|
215
247
|
|
@@ -235,16 +267,17 @@ void SingleFileBlockManager::LoadExistingDatabase() {
|
|
235
267
|
MainHeader::CheckMagicBytes(*handle);
|
236
268
|
// otherwise, we check the metadata of the file
|
237
269
|
ReadAndChecksum(header_buffer, 0);
|
238
|
-
|
270
|
+
MainHeader main_header = DeserializeMainHeader(header_buffer.buffer);
|
271
|
+
options.version_number = main_header.version_number;
|
239
272
|
|
240
273
|
// read the database headers from disk
|
241
274
|
DatabaseHeader h1;
|
242
275
|
ReadAndChecksum(header_buffer, Storage::FILE_HEADER_SIZE);
|
243
|
-
h1 =
|
276
|
+
h1 = DeserializeDatabaseHeader(main_header, header_buffer.buffer);
|
244
277
|
|
245
278
|
DatabaseHeader h2;
|
246
279
|
ReadAndChecksum(header_buffer, Storage::FILE_HEADER_SIZE * 2ULL);
|
247
|
-
h2 =
|
280
|
+
h2 = DeserializeDatabaseHeader(main_header, header_buffer.buffer);
|
248
281
|
|
249
282
|
// check the header with the highest iteration count
|
250
283
|
if (h1.iteration > h2.iteration) {
|
@@ -256,6 +289,7 @@ void SingleFileBlockManager::LoadExistingDatabase() {
|
|
256
289
|
active_header = 1;
|
257
290
|
Initialize(h2, GetOptionalBlockAllocSize());
|
258
291
|
}
|
292
|
+
AddStorageVersionTag();
|
259
293
|
LoadFreeList();
|
260
294
|
}
|
261
295
|
|
@@ -288,11 +322,32 @@ void SingleFileBlockManager::Initialize(const DatabaseHeader &header, const opti
|
|
288
322
|
meta_block = header.meta_block;
|
289
323
|
iteration_count = header.iteration;
|
290
324
|
max_block = NumericCast<block_id_t>(header.block_count);
|
325
|
+
if (options.storage_version.IsValid()) {
|
326
|
+
// storage version specified explicity - use requested storage version
|
327
|
+
auto requested_compat_version = options.storage_version.GetIndex();
|
328
|
+
if (requested_compat_version < header.serialization_compatibility) {
|
329
|
+
throw InvalidInputException(
|
330
|
+
"Error opening \"%s\": cannot initialize database with storage version %d - which is lower than what "
|
331
|
+
"the database itself uses (%d). The storage version of an existing database cannot be lowered.",
|
332
|
+
path, requested_compat_version, header.serialization_compatibility);
|
333
|
+
}
|
334
|
+
} else {
|
335
|
+
// load storage version from header
|
336
|
+
options.storage_version = header.serialization_compatibility;
|
337
|
+
}
|
338
|
+
if (header.serialization_compatibility > SerializationCompatibility::Latest().serialization_version) {
|
339
|
+
throw InvalidInputException(
|
340
|
+
"Error opening \"%s\": file was written with a storage version greater than the latest version supported "
|
341
|
+
"by this DuckDB instance. Try opening the file with a newer version of DuckDB.",
|
342
|
+
path);
|
343
|
+
}
|
344
|
+
db.GetStorageManager().SetStorageVersion(options.storage_version.GetIndex());
|
291
345
|
|
292
346
|
if (block_alloc_size.IsValid() && block_alloc_size.GetIndex() != header.block_alloc_size) {
|
293
|
-
throw InvalidInputException(
|
294
|
-
|
295
|
-
|
347
|
+
throw InvalidInputException(
|
348
|
+
"Error opening \"%s\": cannot initialize the same database with a different block size: provided block "
|
349
|
+
"size: %llu, file block size: %llu",
|
350
|
+
path, GetBlockAllocSize(), header.block_alloc_size);
|
296
351
|
}
|
297
352
|
SetBlockAllocSize(header.block_alloc_size);
|
298
353
|
}
|
@@ -666,6 +721,7 @@ void SingleFileBlockManager::WriteHeader(DatabaseHeader header) {
|
|
666
721
|
}
|
667
722
|
metadata_manager.Flush();
|
668
723
|
header.block_count = NumericCast<idx_t>(max_block);
|
724
|
+
header.serialization_compatibility = options.storage_version.GetIndex();
|
669
725
|
|
670
726
|
auto &config = DBConfig::Get(db);
|
671
727
|
if (config.options.checkpoint_abort == CheckpointAbort::DEBUG_ABORT_AFTER_FREE_LIST_WRITE) {
|
@@ -675,9 +731,20 @@ void SingleFileBlockManager::WriteHeader(DatabaseHeader header) {
|
|
675
731
|
// We need to fsync BEFORE we write the header to ensure that all the previous blocks are written as well
|
676
732
|
handle->Sync();
|
677
733
|
|
678
|
-
// set the header inside the buffer
|
679
734
|
header_buffer.Clear();
|
680
|
-
|
735
|
+
// if we are upgrading the database from version 64 -> version 65, we need to re-write the main header
|
736
|
+
if (options.version_number.GetIndex() == 64 && options.storage_version.GetIndex() >= 4) {
|
737
|
+
// rewrite the main header
|
738
|
+
options.version_number = 65;
|
739
|
+
MainHeader main_header = ConstructMainHeader(options.version_number.GetIndex());
|
740
|
+
SerializeHeaderStructure<MainHeader>(main_header, header_buffer.buffer);
|
741
|
+
// now write the header to the file
|
742
|
+
ChecksumAndWrite(header_buffer, 0);
|
743
|
+
header_buffer.Clear();
|
744
|
+
}
|
745
|
+
|
746
|
+
// set the header inside the buffer
|
747
|
+
MemoryStream serializer(Allocator::Get(db));
|
681
748
|
header.Write(serializer);
|
682
749
|
memcpy(header_buffer.buffer, serializer.GetData(), serializer.GetPosition());
|
683
750
|
// now write the header to the file, active_header determines whether we write to h1 or h2
|
@@ -6,6 +6,11 @@
|
|
6
6
|
namespace duckdb {
|
7
7
|
|
8
8
|
const uint64_t VERSION_NUMBER = 64;
|
9
|
+
const uint64_t VERSION_NUMBER_LOWER = 64;
|
10
|
+
const uint64_t VERSION_NUMBER_UPPER = 65;
|
11
|
+
|
12
|
+
static_assert(VERSION_NUMBER_LOWER <= VERSION_NUMBER, "Check on VERSION_NUMBER lower bound");
|
13
|
+
static_assert(VERSION_NUMBER <= VERSION_NUMBER_UPPER, "Check on VERSION_NUMBER upper bound");
|
9
14
|
|
10
15
|
struct StorageVersionInfo {
|
11
16
|
const char *version_name;
|
@@ -71,6 +76,7 @@ static const StorageVersionInfo storage_version_info[] = {
|
|
71
76
|
{"v1.1.1", 64},
|
72
77
|
{"v1.1.2", 64},
|
73
78
|
{"v1.1.3", 64},
|
79
|
+
{"v1.2.0", 65},
|
74
80
|
{nullptr, 0}
|
75
81
|
};
|
76
82
|
// END OF STORAGE VERSION INFO
|
@@ -99,6 +105,38 @@ static const SerializationVersionInfo serialization_version_info[] = {
|
|
99
105
|
static_assert(DEFAULT_SERIALIZATION_VERSION_INFO <= LATEST_SERIALIZATION_VERSION_INFO,
|
100
106
|
"Check on SERIALIZATION_VERSION_INFO");
|
101
107
|
|
108
|
+
string GetStorageVersionName(idx_t serialization_version) {
|
109
|
+
if (serialization_version < 4) {
|
110
|
+
// special handling for lower serialization versions
|
111
|
+
return "v1.0.0 - v1.1.3";
|
112
|
+
}
|
113
|
+
optional_idx min_idx;
|
114
|
+
optional_idx max_idx;
|
115
|
+
for (idx_t i = 0; serialization_version_info[i].version_name; i++) {
|
116
|
+
if (strcmp(serialization_version_info[i].version_name, "latest") == 0) {
|
117
|
+
continue;
|
118
|
+
}
|
119
|
+
if (serialization_version_info[i].serialization_version != serialization_version) {
|
120
|
+
continue;
|
121
|
+
}
|
122
|
+
if (!min_idx.IsValid()) {
|
123
|
+
min_idx = i;
|
124
|
+
} else {
|
125
|
+
max_idx = i;
|
126
|
+
}
|
127
|
+
}
|
128
|
+
if (!min_idx.IsValid()) {
|
129
|
+
D_ASSERT(0);
|
130
|
+
return "--UNKNOWN--";
|
131
|
+
}
|
132
|
+
auto min_name = serialization_version_info[min_idx.GetIndex()].version_name;
|
133
|
+
if (!max_idx.IsValid()) {
|
134
|
+
return min_name;
|
135
|
+
}
|
136
|
+
auto max_name = serialization_version_info[max_idx.GetIndex()].version_name;
|
137
|
+
return string(min_name) + " - " + string(max_name);
|
138
|
+
}
|
139
|
+
|
102
140
|
optional_idx GetStorageVersion(const char *version_string) {
|
103
141
|
for (idx_t i = 0; storage_version_info[i].version_name; i++) {
|
104
142
|
if (!strcmp(storage_version_info[i].version_name, version_string)) {
|
@@ -145,6 +145,7 @@ void SingleFileStorageManager::LoadDatabase(StorageOptions storage_options) {
|
|
145
145
|
options.read_only = read_only;
|
146
146
|
options.use_direct_io = config.options.use_direct_io;
|
147
147
|
options.debug_initialize = config.options.debug_initialize;
|
148
|
+
options.storage_version = storage_options.storage_version;
|
148
149
|
|
149
150
|
idx_t row_group_size = DEFAULT_ROW_GROUP_SIZE;
|
150
151
|
if (storage_options.row_group_size.IsValid()) {
|
@@ -182,6 +183,10 @@ void SingleFileStorageManager::LoadDatabase(StorageOptions storage_options) {
|
|
182
183
|
// No explicit option provided: use the default option.
|
183
184
|
options.block_alloc_size = config.options.default_block_alloc_size;
|
184
185
|
}
|
186
|
+
if (!options.storage_version.IsValid()) {
|
187
|
+
// when creating a new database we default to the serialization version specified in the config
|
188
|
+
options.storage_version = config.options.serialization_compatibility.serialization_version;
|
189
|
+
}
|
185
190
|
|
186
191
|
// Initialize the block manager before creating a new database.
|
187
192
|
auto sf_block_manager = make_uniq<SingleFileBlockManager>(db, path, options);
|
@@ -218,6 +223,12 @@ void SingleFileStorageManager::LoadDatabase(StorageOptions storage_options) {
|
|
218
223
|
auto wal_path = GetWALPath();
|
219
224
|
wal = WriteAheadLog::Replay(fs, db, wal_path);
|
220
225
|
}
|
226
|
+
if (row_group_size > 122880ULL && GetStorageVersion() < 4) {
|
227
|
+
throw InvalidInputException("Unsupported row group size %llu - row group sizes >= 122_880 are only supported "
|
228
|
+
"with STORAGE_VERSION '1.2.0' or above.\nExplicitly specify a newer storage "
|
229
|
+
"version when creating the database to enable larger row groups",
|
230
|
+
row_group_size);
|
231
|
+
}
|
221
232
|
|
222
233
|
load_complete = true;
|
223
234
|
}
|
@@ -51,6 +51,10 @@ DataTableInfo &ColumnData::GetTableInfo() const {
|
|
51
51
|
return info;
|
52
52
|
}
|
53
53
|
|
54
|
+
StorageManager &ColumnData::GetStorageManager() const {
|
55
|
+
return info.GetDB().GetStorageManager();
|
56
|
+
}
|
57
|
+
|
54
58
|
const LogicalType &ColumnData::RootType() const {
|
55
59
|
if (parent) {
|
56
60
|
return parent->RootType();
|
@@ -250,9 +250,9 @@ vector<CheckpointAnalyzeResult> ColumnDataCheckpointer::DetectBestCompressionMet
|
|
250
250
|
D_ASSERT(compression_idx != DConstants::INVALID_INDEX);
|
251
251
|
|
252
252
|
auto &best_function = *functions[compression_idx];
|
253
|
-
|
254
|
-
|
255
|
-
|
253
|
+
DUCKDB_LOG_INFO(db, "duckdb.ColumnDataCheckPointer", "FinalAnalyze(%s) result for %s.%s.%d(%s): %d",
|
254
|
+
EnumUtil::ToString(best_function.type), col_data.info.GetSchemaName(),
|
255
|
+
col_data.info.GetTableName(), col_data.column_index, col_data.type.ToString(), best_score);
|
256
256
|
result[i] = CheckpointAnalyzeResult(std::move(chosen_state), best_function);
|
257
257
|
}
|
258
258
|
return result;
|
@@ -404,12 +404,6 @@ bool RowGroupCollection::Append(DataChunk &chunk, TableAppendState &state) {
|
|
404
404
|
column_stats.UpdateDistinctStatistics(chunk.data[col_idx], chunk.size(), state.hashes);
|
405
405
|
}
|
406
406
|
|
407
|
-
auto &table_sample = state.stats.GetTableSampleRef(*local_stats_lock);
|
408
|
-
if (!table_sample.destroyed) {
|
409
|
-
D_ASSERT(table_sample.type == SampleType::RESERVOIR_SAMPLE);
|
410
|
-
table_sample.AddToReservoir(chunk);
|
411
|
-
}
|
412
|
-
|
413
407
|
return new_row_group;
|
414
408
|
}
|
415
409
|
|
@@ -443,22 +437,6 @@ void RowGroupCollection::FinalizeAppend(TransactionData transaction, TableAppend
|
|
443
437
|
global_stats.DistinctStats().Merge(local_stats.DistinctStats());
|
444
438
|
}
|
445
439
|
|
446
|
-
auto local_sample = state.stats.GetTableSample(*local_stats_lock);
|
447
|
-
auto global_sample = stats.GetTableSample(*global_stats_lock);
|
448
|
-
|
449
|
-
if (local_sample && global_sample) {
|
450
|
-
D_ASSERT(global_sample->type == SampleType::RESERVOIR_SAMPLE);
|
451
|
-
auto &reservoir_sample = global_sample->Cast<ReservoirSample>();
|
452
|
-
reservoir_sample.Merge(std::move(local_sample));
|
453
|
-
// initialize the thread local sample again
|
454
|
-
auto new_local_sample = make_uniq<ReservoirSample>(reservoir_sample.GetSampleCount());
|
455
|
-
state.stats.SetTableSample(*local_stats_lock, std::move(new_local_sample));
|
456
|
-
stats.SetTableSample(*global_stats_lock, std::move(global_sample));
|
457
|
-
} else {
|
458
|
-
state.stats.SetTableSample(*local_stats_lock, std::move(local_sample));
|
459
|
-
stats.SetTableSample(*global_stats_lock, std::move(global_sample));
|
460
|
-
}
|
461
|
-
|
462
440
|
Verify();
|
463
441
|
}
|
464
442
|
|
@@ -607,10 +585,6 @@ idx_t RowGroupCollection::Delete(TransactionData transaction, DataTable &table,
|
|
607
585
|
delete_count += row_group->Delete(transaction, table, ids + start, pos - start);
|
608
586
|
} while (pos < count);
|
609
587
|
|
610
|
-
// When deleting destroy the sample.
|
611
|
-
auto stats_guard = stats.GetLock();
|
612
|
-
stats.DestroyTableSample(*stats_guard);
|
613
|
-
|
614
588
|
return delete_count;
|
615
589
|
}
|
616
590
|
|
@@ -619,6 +593,7 @@ idx_t RowGroupCollection::Delete(TransactionData transaction, DataTable &table,
|
|
619
593
|
//===--------------------------------------------------------------------===//
|
620
594
|
void RowGroupCollection::Update(TransactionData transaction, row_t *ids, const vector<PhysicalIndex> &column_ids,
|
621
595
|
DataChunk &updates) {
|
596
|
+
D_ASSERT(updates.size() >= 1);
|
622
597
|
idx_t pos = 0;
|
623
598
|
do {
|
624
599
|
idx_t start = pos;
|
@@ -648,72 +623,106 @@ void RowGroupCollection::Update(TransactionData transaction, row_t *ids, const v
|
|
648
623
|
stats.MergeStats(*l, column_id.index, *row_group->GetStatistics(column_id.index));
|
649
624
|
}
|
650
625
|
} while (pos < updates.size());
|
651
|
-
// on update destroy the sample
|
652
|
-
auto stats_guard = stats.GetLock();
|
653
|
-
stats.DestroyTableSample(*stats_guard);
|
654
626
|
}
|
655
627
|
|
656
628
|
void RowGroupCollection::RemoveFromIndexes(TableIndexList &indexes, Vector &row_identifiers, idx_t count) {
|
657
629
|
auto row_ids = FlatVector::GetData<row_t>(row_identifiers);
|
658
630
|
|
659
|
-
//
|
660
|
-
|
661
|
-
|
631
|
+
// Collect all indexed columns.
|
632
|
+
unordered_set<column_t> indexed_column_id_set;
|
633
|
+
indexes.Scan([&](Index &index) {
|
634
|
+
D_ASSERT(index.IsBound());
|
635
|
+
auto &set = index.GetColumnIdSet();
|
636
|
+
indexed_column_id_set.insert(set.begin(), set.end());
|
637
|
+
return false;
|
638
|
+
});
|
662
639
|
vector<StorageIndex> column_ids;
|
663
|
-
|
664
|
-
|
665
|
-
column_ids.emplace_back(i);
|
640
|
+
for (auto &col : indexed_column_id_set) {
|
641
|
+
column_ids.emplace_back(col);
|
666
642
|
}
|
643
|
+
sort(column_ids.begin(), column_ids.end());
|
644
|
+
|
645
|
+
vector<LogicalType> column_types;
|
646
|
+
for (auto &col : column_ids) {
|
647
|
+
column_types.push_back(types[col.GetPrimaryIndex()]);
|
648
|
+
}
|
649
|
+
|
650
|
+
// Initialize the fetch state. Only use indexed columns.
|
651
|
+
TableScanState state;
|
667
652
|
state.Initialize(std::move(column_ids));
|
668
653
|
state.table_state.max_row = row_start + total_rows;
|
669
654
|
|
670
|
-
//
|
671
|
-
DataChunk
|
672
|
-
|
655
|
+
// Used for scanning data. Only contains the indexed columns.
|
656
|
+
DataChunk fetch_chunk;
|
657
|
+
fetch_chunk.Initialize(GetAllocator(), column_types);
|
658
|
+
|
659
|
+
// Used for index value removal.
|
660
|
+
// Contains all columns but only initializes indexed ones.
|
661
|
+
DataChunk result_chunk;
|
662
|
+
auto fetched_columns = vector<bool>(types.size(), false);
|
663
|
+
result_chunk.Initialize(GetAllocator(), types, fetched_columns);
|
664
|
+
|
665
|
+
// Now set all to-be-fetched columns.
|
666
|
+
for (auto &col : indexed_column_id_set) {
|
667
|
+
fetched_columns[col] = true;
|
668
|
+
}
|
673
669
|
|
670
|
+
// Iterate over the row ids.
|
674
671
|
SelectionVector sel(STANDARD_VECTOR_SIZE);
|
675
|
-
// now iterate over the row ids
|
676
672
|
for (idx_t r = 0; r < count;) {
|
677
|
-
|
678
|
-
|
673
|
+
fetch_chunk.Reset();
|
674
|
+
result_chunk.Reset();
|
675
|
+
|
676
|
+
// Figure out which row_group to fetch from.
|
679
677
|
auto row_id = row_ids[r];
|
680
678
|
auto row_group = row_groups->GetSegment(UnsafeNumericCast<idx_t>(row_id));
|
681
679
|
auto row_group_vector_idx = (UnsafeNumericCast<idx_t>(row_id) - row_group->start) / STANDARD_VECTOR_SIZE;
|
682
680
|
auto base_row_id = row_group_vector_idx * STANDARD_VECTOR_SIZE + row_group->start;
|
683
681
|
|
684
|
-
//
|
682
|
+
// Fetch the current vector into fetch_chunk.
|
685
683
|
state.table_state.Initialize(GetTypes());
|
686
684
|
row_group->InitializeScanWithOffset(state.table_state, row_group_vector_idx);
|
687
|
-
row_group->ScanCommitted(state.table_state,
|
688
|
-
|
685
|
+
row_group->ScanCommitted(state.table_state, fetch_chunk, TableScanType::TABLE_SCAN_COMMITTED_ROWS);
|
686
|
+
fetch_chunk.Verify();
|
689
687
|
|
690
|
-
//
|
691
|
-
//
|
688
|
+
// Check for any remaining row ids, if they also fall into this vector.
|
689
|
+
// We try to fetch as many rows as possible at the same time.
|
692
690
|
idx_t sel_count = 0;
|
693
691
|
for (; r < count; r++) {
|
694
692
|
idx_t current_row = idx_t(row_ids[r]);
|
695
|
-
if (current_row < base_row_id || current_row >= base_row_id +
|
696
|
-
//
|
693
|
+
if (current_row < base_row_id || current_row >= base_row_id + fetch_chunk.size()) {
|
694
|
+
// This row id does not fall into the current chunk.
|
697
695
|
break;
|
698
696
|
}
|
699
697
|
auto row_in_vector = current_row - base_row_id;
|
700
|
-
D_ASSERT(row_in_vector <
|
698
|
+
D_ASSERT(row_in_vector < fetch_chunk.size());
|
701
699
|
sel.set_index(sel_count++, row_in_vector);
|
702
700
|
}
|
703
701
|
D_ASSERT(sel_count > 0);
|
704
|
-
// slice the vector with all rows that are present in this vector and erase from the index
|
705
|
-
result.Slice(sel, sel_count);
|
706
702
|
|
703
|
+
// Reference the necessary columns of the fetch_chunk.
|
704
|
+
idx_t fetch_idx = 0;
|
705
|
+
for (idx_t j = 0; j < types.size(); j++) {
|
706
|
+
if (fetched_columns[j]) {
|
707
|
+
result_chunk.data[j].Reference(fetch_chunk.data[fetch_idx++]);
|
708
|
+
continue;
|
709
|
+
}
|
710
|
+
result_chunk.data[j].Reference(Value(types[j]));
|
711
|
+
}
|
712
|
+
result_chunk.SetCardinality(fetch_chunk);
|
713
|
+
|
714
|
+
// Slice the vector with all rows that are present in this vector.
|
715
|
+
// Then, erase all values from the indexes.
|
716
|
+
result_chunk.Slice(sel, sel_count);
|
707
717
|
indexes.Scan([&](Index &index) {
|
708
718
|
if (index.IsBound()) {
|
709
|
-
index.Cast<BoundIndex>().Delete(
|
710
|
-
|
711
|
-
throw MissingExtensionException(
|
712
|
-
"Cannot delete from index '%s', unknown index type '%s'. You need to load the "
|
713
|
-
"extension that provides this index type before table '%s' can be modified.",
|
714
|
-
index.GetIndexName(), index.GetIndexType(), info->GetTableName());
|
719
|
+
index.Cast<BoundIndex>().Delete(result_chunk, row_identifiers);
|
720
|
+
return false;
|
715
721
|
}
|
716
|
-
|
722
|
+
throw MissingExtensionException(
|
723
|
+
"Cannot delete from index '%s', unknown index type '%s'. You need to load the "
|
724
|
+
"extension that provides this index type before table '%s' can be modified.",
|
725
|
+
index.GetIndexName(), index.GetIndexType(), info->GetTableName());
|
717
726
|
});
|
718
727
|
}
|
719
728
|
}
|
@@ -1134,8 +1143,6 @@ shared_ptr<RowGroupCollection> RowGroupCollection::AddColumn(ClientContext &cont
|
|
1134
1143
|
|
1135
1144
|
result->row_groups->AppendSegment(std::move(new_row_group));
|
1136
1145
|
}
|
1137
|
-
// When adding a column destroy the sample
|
1138
|
-
stats.DestroyTableSample(*lock);
|
1139
1146
|
|
1140
1147
|
return result;
|
1141
1148
|
}
|
@@ -1254,14 +1261,6 @@ unique_ptr<BaseStatistics> RowGroupCollection::CopyStats(column_t column_id) {
|
|
1254
1261
|
}
|
1255
1262
|
|
1256
1263
|
unique_ptr<BlockingSample> RowGroupCollection::GetSample() {
|
1257
|
-
auto lock = stats.GetLock();
|
1258
|
-
auto &sample = stats.GetTableSampleRef(*lock);
|
1259
|
-
if (!sample.destroyed) {
|
1260
|
-
D_ASSERT(sample.type == SampleType::RESERVOIR_SAMPLE);
|
1261
|
-
auto ret = sample.Copy();
|
1262
|
-
ret->Cast<ReservoirSample>().EvictOverBudgetSamples();
|
1263
|
-
return ret;
|
1264
|
-
}
|
1265
1264
|
return nullptr;
|
1266
1265
|
}
|
1267
1266
|
|
@@ -142,10 +142,10 @@ ColumnStatistics &TableStatistics::GetStats(TableStatisticsLock &lock, idx_t i)
|
|
142
142
|
return *column_stats[i];
|
143
143
|
}
|
144
144
|
|
145
|
-
BlockingSample &TableStatistics::GetTableSampleRef(TableStatisticsLock &lock) {
|
146
|
-
D_ASSERT(table_sample);
|
147
|
-
return *table_sample;
|
148
|
-
}
|
145
|
+
// BlockingSample &TableStatistics::GetTableSampleRef(TableStatisticsLock &lock) {
|
146
|
+
// D_ASSERT(table_sample);
|
147
|
+
// return *table_sample;
|
148
|
+
//}
|
149
149
|
|
150
150
|
unique_ptr<BlockingSample> TableStatistics::GetTableSample(TableStatisticsLock &lock) {
|
151
151
|
return std::move(table_sample);
|