duckdb 1.2.1-dev6.0 → 1.2.1-dev8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. package/package.json +1 -1
  2. package/src/duckdb/extension/core_functions/aggregate/distributive/string_agg.cpp +14 -22
  3. package/src/duckdb/extension/core_functions/aggregate/nested/list.cpp +0 -1
  4. package/src/duckdb/extension/core_functions/lambda_functions.cpp +0 -11
  5. package/src/duckdb/extension/core_functions/scalar/list/list_aggregates.cpp +18 -6
  6. package/src/duckdb/extension/icu/icu-datefunc.cpp +9 -2
  7. package/src/duckdb/extension/icu/icu-strptime.cpp +7 -11
  8. package/src/duckdb/extension/icu/include/icu-datefunc.hpp +3 -1
  9. package/src/duckdb/extension/json/buffered_json_reader.cpp +18 -31
  10. package/src/duckdb/extension/json/json_extension.cpp +8 -3
  11. package/src/duckdb/extension/parquet/column_reader.cpp +4 -6
  12. package/src/duckdb/extension/parquet/column_writer.cpp +33 -12
  13. package/src/duckdb/extension/parquet/include/column_reader.hpp +0 -2
  14. package/src/duckdb/extension/parquet/include/parquet_bss_encoder.hpp +0 -1
  15. package/src/duckdb/extension/parquet/include/parquet_dlba_encoder.hpp +1 -2
  16. package/src/duckdb/src/catalog/catalog.cpp +12 -0
  17. package/src/duckdb/src/catalog/catalog_entry/duck_table_entry.cpp +1 -1
  18. package/src/duckdb/src/catalog/catalog_entry_retriever.cpp +1 -1
  19. package/src/duckdb/src/catalog/catalog_search_path.cpp +8 -8
  20. package/src/duckdb/src/common/bind_helpers.cpp +3 -0
  21. package/src/duckdb/src/common/compressed_file_system.cpp +2 -0
  22. package/src/duckdb/src/common/hive_partitioning.cpp +1 -1
  23. package/src/duckdb/src/common/multi_file_reader.cpp +3 -3
  24. package/src/duckdb/src/execution/aggregate_hashtable.cpp +1 -1
  25. package/src/duckdb/src/execution/index/art/art.cpp +19 -6
  26. package/src/duckdb/src/execution/index/art/iterator.cpp +7 -3
  27. package/src/duckdb/src/execution/operator/aggregate/physical_window.cpp +11 -4
  28. package/src/duckdb/src/execution/operator/csv_scanner/buffer_manager/csv_buffer.cpp +2 -2
  29. package/src/duckdb/src/execution/operator/csv_scanner/encode/csv_encoder.cpp +5 -1
  30. package/src/duckdb/src/execution/operator/csv_scanner/scanner/base_scanner.cpp +3 -2
  31. package/src/duckdb/src/execution/operator/csv_scanner/scanner/csv_schema.cpp +2 -2
  32. package/src/duckdb/src/execution/operator/csv_scanner/scanner/scanner_boundary.cpp +1 -1
  33. package/src/duckdb/src/execution/operator/csv_scanner/scanner/string_value_scanner.cpp +20 -12
  34. package/src/duckdb/src/execution/operator/csv_scanner/sniffer/dialect_detection.cpp +19 -22
  35. package/src/duckdb/src/execution/operator/csv_scanner/sniffer/type_refinement.cpp +1 -1
  36. package/src/duckdb/src/execution/operator/csv_scanner/util/csv_error.cpp +1 -0
  37. package/src/duckdb/src/execution/operator/csv_scanner/util/csv_reader_options.cpp +16 -0
  38. package/src/duckdb/src/execution/operator/helper/physical_reservoir_sample.cpp +1 -0
  39. package/src/duckdb/src/execution/operator/helper/physical_streaming_sample.cpp +16 -7
  40. package/src/duckdb/src/execution/operator/persistent/physical_batch_insert.cpp +3 -1
  41. package/src/duckdb/src/execution/operator/scan/physical_table_scan.cpp +11 -1
  42. package/src/duckdb/src/execution/operator/schema/physical_create_art_index.cpp +5 -7
  43. package/src/duckdb/src/execution/physical_plan/plan_create_index.cpp +11 -0
  44. package/src/duckdb/src/execution/physical_plan/plan_sample.cpp +1 -3
  45. package/src/duckdb/src/execution/radix_partitioned_hashtable.cpp +14 -5
  46. package/src/duckdb/src/execution/sample/reservoir_sample.cpp +24 -12
  47. package/src/duckdb/src/function/scalar/generic/getvariable.cpp +3 -3
  48. package/src/duckdb/src/function/table/version/pragma_version.cpp +3 -3
  49. package/src/duckdb/src/function/window/window_aggregate_states.cpp +3 -0
  50. package/src/duckdb/src/function/window/window_boundaries_state.cpp +108 -48
  51. package/src/duckdb/src/function/window/window_constant_aggregator.cpp +5 -5
  52. package/src/duckdb/src/function/window/window_distinct_aggregator.cpp +6 -0
  53. package/src/duckdb/src/include/duckdb/catalog/catalog_entry_retriever.hpp +1 -1
  54. package/src/duckdb/src/include/duckdb/catalog/catalog_search_path.hpp +10 -9
  55. package/src/duckdb/src/include/duckdb/common/adbc/adbc-init.hpp +1 -1
  56. package/src/duckdb/src/include/duckdb/common/multi_file_reader.hpp +2 -2
  57. package/src/duckdb/src/include/duckdb/execution/index/art/iterator.hpp +2 -0
  58. package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/base_scanner.hpp +1 -1
  59. package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/csv_buffer.hpp +5 -4
  60. package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/csv_option.hpp +1 -1
  61. package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/csv_schema.hpp +2 -2
  62. package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/encode/csv_encoder.hpp +1 -1
  63. package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/sniffer/csv_sniffer.hpp +1 -1
  64. package/src/duckdb/src/include/duckdb/execution/operator/csv_scanner/string_value_scanner.hpp +2 -2
  65. package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_streaming_sample.hpp +3 -7
  66. package/src/duckdb/src/include/duckdb/execution/reservoir_sample.hpp +2 -1
  67. package/src/duckdb/src/include/duckdb/function/lambda_functions.hpp +11 -3
  68. package/src/duckdb/src/include/duckdb/function/window/window_boundaries_state.hpp +4 -0
  69. package/src/duckdb/src/include/duckdb/main/client_context_state.hpp +4 -0
  70. package/src/duckdb/src/include/duckdb/main/extension_entries.hpp +25 -7
  71. package/src/duckdb/src/include/duckdb/main/pending_query_result.hpp +2 -0
  72. package/src/duckdb/src/include/duckdb/main/query_profiler.hpp +7 -0
  73. package/src/duckdb/src/include/duckdb/optimizer/filter_combiner.hpp +2 -2
  74. package/src/duckdb/src/include/duckdb/optimizer/late_materialization.hpp +2 -1
  75. package/src/duckdb/src/include/duckdb/optimizer/optimizer_extension.hpp +11 -5
  76. package/src/duckdb/src/include/duckdb/parallel/executor_task.hpp +4 -1
  77. package/src/duckdb/src/include/duckdb/parallel/pipeline.hpp +0 -1
  78. package/src/duckdb/src/include/duckdb/parallel/task_executor.hpp +3 -0
  79. package/src/duckdb/src/include/duckdb/parallel/task_notifier.hpp +27 -0
  80. package/src/duckdb/src/include/duckdb/parallel/task_scheduler.hpp +4 -0
  81. package/src/duckdb/src/include/duckdb/planner/expression/bound_subquery_expression.hpp +1 -1
  82. package/src/duckdb/src/include/duckdb/planner/tableref/bound_cteref.hpp +1 -0
  83. package/src/duckdb/src/include/duckdb/storage/checkpoint/table_data_writer.hpp +3 -1
  84. package/src/duckdb/src/include/duckdb/storage/checkpoint_manager.hpp +7 -1
  85. package/src/duckdb/src/include/duckdb/storage/storage_manager.hpp +3 -2
  86. package/src/duckdb/src/include/duckdb.h +495 -480
  87. package/src/duckdb/src/main/attached_database.cpp +1 -1
  88. package/src/duckdb/src/main/capi/duckdb-c.cpp +5 -1
  89. package/src/duckdb/src/main/capi/helper-c.cpp +8 -0
  90. package/src/duckdb/src/main/config.cpp +7 -1
  91. package/src/duckdb/src/main/database.cpp +8 -8
  92. package/src/duckdb/src/main/extension/extension_helper.cpp +3 -1
  93. package/src/duckdb/src/main/extension/extension_load.cpp +12 -12
  94. package/src/duckdb/src/optimizer/column_lifetime_analyzer.cpp +1 -0
  95. package/src/duckdb/src/optimizer/join_order/query_graph_manager.cpp +2 -2
  96. package/src/duckdb/src/optimizer/late_materialization.cpp +26 -5
  97. package/src/duckdb/src/optimizer/optimizer.cpp +12 -1
  98. package/src/duckdb/src/parallel/executor_task.cpp +10 -6
  99. package/src/duckdb/src/parallel/task_executor.cpp +4 -1
  100. package/src/duckdb/src/parallel/task_notifier.cpp +23 -0
  101. package/src/duckdb/src/parallel/task_scheduler.cpp +33 -0
  102. package/src/duckdb/src/parser/transform/expression/transform_subquery.cpp +4 -1
  103. package/src/duckdb/src/planner/binder/expression/bind_subquery_expression.cpp +1 -1
  104. package/src/duckdb/src/planner/binder/query_node/plan_subquery.cpp +4 -2
  105. package/src/duckdb/src/planner/binder/statement/bind_create.cpp +7 -2
  106. package/src/duckdb/src/planner/binder/statement/bind_create_table.cpp +6 -5
  107. package/src/duckdb/src/storage/checkpoint/table_data_writer.cpp +4 -2
  108. package/src/duckdb/src/storage/checkpoint_manager.cpp +4 -3
  109. package/src/duckdb/src/storage/compression/string_uncompressed.cpp +21 -10
  110. package/src/duckdb/src/storage/storage_info.cpp +2 -0
  111. package/src/duckdb/src/storage/storage_manager.cpp +2 -2
  112. package/src/duckdb/src/storage/table/row_group.cpp +5 -6
  113. package/src/duckdb/src/storage/table/scan_state.cpp +6 -0
  114. package/src/duckdb/src/transaction/duck_transaction.cpp +11 -3
  115. package/src/duckdb/src/transaction/duck_transaction_manager.cpp +2 -2
  116. package/src/duckdb/third_party/concurrentqueue/concurrentqueue.h +17 -0
  117. package/src/duckdb/ub_src_parallel.cpp +2 -0
@@ -34,6 +34,7 @@ PhysicalCreateARTIndex::PhysicalCreateARTIndex(LogicalOperator &op, TableCatalog
34
34
 
35
35
  class CreateARTIndexGlobalSinkState : public GlobalSinkState {
36
36
  public:
37
+ //! We merge the local indexes into one global index.
37
38
  unique_ptr<BoundIndex> global_index;
38
39
  };
39
40
 
@@ -53,8 +54,10 @@ public:
53
54
  };
54
55
 
55
56
  unique_ptr<GlobalSinkState> PhysicalCreateARTIndex::GetGlobalSinkState(ClientContext &context) const {
56
- // Create the global sink state and add the global index.
57
+ // Create the global sink state.
57
58
  auto state = make_uniq<CreateARTIndexGlobalSinkState>();
59
+
60
+ // Create the global index.
58
61
  auto &storage = table.GetStorage();
59
62
  state->global_index = make_uniq<ART>(info->index_name, info->constraint_type, storage_ids,
60
63
  TableIOManager::Get(storage), unbound_expressions, storage.db);
@@ -123,7 +126,6 @@ SinkResultType PhysicalCreateARTIndex::SinkSorted(OperatorSinkInput &input) cons
123
126
 
124
127
  SinkResultType PhysicalCreateARTIndex::Sink(ExecutionContext &context, DataChunk &chunk,
125
128
  OperatorSinkInput &input) const {
126
-
127
129
  D_ASSERT(chunk.ColumnCount() >= 2);
128
130
  auto &l_state = input.local_state.Cast<CreateARTIndexLocalSinkState>();
129
131
  l_state.arena_allocator.Reset();
@@ -151,11 +153,10 @@ SinkResultType PhysicalCreateARTIndex::Sink(ExecutionContext &context, DataChunk
151
153
 
152
154
  SinkCombineResultType PhysicalCreateARTIndex::Combine(ExecutionContext &context,
153
155
  OperatorSinkCombineInput &input) const {
154
-
155
156
  auto &g_state = input.global_state.Cast<CreateARTIndexGlobalSinkState>();
156
- auto &l_state = input.local_state.Cast<CreateARTIndexLocalSinkState>();
157
157
 
158
158
  // Merge the local index into the global index.
159
+ auto &l_state = input.local_state.Cast<CreateARTIndexLocalSinkState>();
159
160
  if (!g_state.global_index->MergeIndexes(*l_state.local_index)) {
160
161
  throw ConstraintException("Data contains duplicates on indexed column(s)");
161
162
  }
@@ -165,8 +166,6 @@ SinkCombineResultType PhysicalCreateARTIndex::Combine(ExecutionContext &context,
165
166
 
166
167
  SinkFinalizeType PhysicalCreateARTIndex::Finalize(Pipeline &pipeline, Event &event, ClientContext &context,
167
168
  OperatorSinkFinalizeInput &input) const {
168
-
169
- // Here, we set the resulting global index as the newly created index of the table.
170
169
  auto &state = input.global_state.Cast<CreateARTIndexGlobalSinkState>();
171
170
 
172
171
  // Vacuum excess memory and verify.
@@ -182,7 +181,6 @@ SinkFinalizeType PhysicalCreateARTIndex::Finalize(Pipeline &pipeline, Event &eve
182
181
  auto &schema = table.schema;
183
182
  info->column_ids = storage_ids;
184
183
 
185
- // FIXME: We should check for catalog exceptions prior to index creation, and later double-check.
186
184
  if (!alter_table_info) {
187
185
  // Ensure that the index does not yet exist in the catalog.
188
186
  auto entry = schema.GetEntry(schema.GetCatalogTransaction(context), CatalogType::INDEX_ENTRY, info->index_name);
@@ -6,10 +6,21 @@
6
6
  #include "duckdb/planner/expression/bound_reference_expression.hpp"
7
7
  #include "duckdb/planner/operator/logical_create_index.hpp"
8
8
  #include "duckdb/planner/operator/logical_get.hpp"
9
+ #include "duckdb/execution/operator/scan/physical_dummy_scan.hpp"
9
10
 
10
11
  namespace duckdb {
11
12
 
12
13
  unique_ptr<PhysicalOperator> PhysicalPlanGenerator::CreatePlan(LogicalCreateIndex &op) {
14
+ // Early-out, if the index already exists.
15
+ auto &schema = op.table.schema;
16
+ auto entry = schema.GetEntry(schema.GetCatalogTransaction(context), CatalogType::INDEX_ENTRY, op.info->index_name);
17
+ if (entry) {
18
+ if (op.info->on_conflict != OnCreateConflict::IGNORE_ON_CONFLICT) {
19
+ throw CatalogException("Index with name \"%s\" already exists!", op.info->index_name);
20
+ }
21
+ return make_uniq<PhysicalDummyScan>(op.types, op.estimated_cardinality);
22
+ }
23
+
13
24
  // Ensure that all expressions contain valid scalar functions.
14
25
  // E.g., get_current_timestamp(), random(), and sequence values cannot be index keys.
15
26
  for (idx_t i = 0; i < op.unbound_expressions.size(); i++) {
@@ -28,9 +28,7 @@ unique_ptr<PhysicalOperator> PhysicalPlanGenerator::CreatePlan(LogicalSample &op
28
28
  "reservoir sampling or use a sample_size",
29
29
  EnumUtil::ToString(op.sample_options->method));
30
30
  }
31
- sample = make_uniq<PhysicalStreamingSample>(
32
- op.types, op.sample_options->method, op.sample_options->sample_size.GetValue<double>(),
33
- static_cast<int64_t>(op.sample_options->seed.GetIndex()), op.estimated_cardinality);
31
+ sample = make_uniq<PhysicalStreamingSample>(op.types, std::move(op.sample_options), op.estimated_cardinality);
34
32
  break;
35
33
  default:
36
34
  throw InternalException("Unimplemented sample method");
@@ -97,6 +97,7 @@ public:
97
97
  void SetRadixBits(const idx_t &radix_bits_p);
98
98
  bool SetRadixBitsToExternal();
99
99
  idx_t GetRadixBits() const;
100
+ idx_t GetMaximumSinkRadixBits() const;
100
101
  idx_t GetExternalRadixBits() const;
101
102
 
102
103
  private:
@@ -161,7 +162,7 @@ public:
161
162
  ClientContext &context;
162
163
  //! Temporary memory state for managing this hash table's memory usage
163
164
  unique_ptr<TemporaryMemoryState> temporary_memory_state;
164
- idx_t minimum_reservation;
165
+ atomic<idx_t> minimum_reservation;
165
166
 
166
167
  //! Whether we've called Finalize
167
168
  bool finalized;
@@ -211,11 +212,11 @@ RadixHTGlobalSinkState::RadixHTGlobalSinkState(ClientContext &context_p, const R
211
212
  auto tuples_per_block = block_alloc_size / radix_ht.GetLayout().GetRowWidth();
212
213
  idx_t ht_count =
213
214
  LossyNumericCast<idx_t>(static_cast<double>(config.sink_capacity) / GroupedAggregateHashTable::LOAD_FACTOR);
214
- auto num_partitions = RadixPartitioning::NumberOfPartitions(config.GetExternalRadixBits());
215
+ auto num_partitions = RadixPartitioning::NumberOfPartitions(config.GetMaximumSinkRadixBits());
215
216
  auto count_per_partition = ht_count / num_partitions;
216
- auto blocks_per_partition = (count_per_partition + tuples_per_block) / tuples_per_block + 1;
217
+ auto blocks_per_partition = (count_per_partition + tuples_per_block) / tuples_per_block;
217
218
  if (!radix_ht.GetLayout().AllConstant()) {
218
- blocks_per_partition += 2;
219
+ blocks_per_partition += 1;
219
220
  }
220
221
  auto ht_size = num_partitions * blocks_per_partition * block_alloc_size + config.sink_capacity * sizeof(ht_entry_t);
221
222
 
@@ -281,6 +282,10 @@ idx_t RadixHTConfig::GetRadixBits() const {
281
282
  return sink_radix_bits;
282
283
  }
283
284
 
285
+ idx_t RadixHTConfig::GetMaximumSinkRadixBits() const {
286
+ return maximum_sink_radix_bits;
287
+ }
288
+
284
289
  idx_t RadixHTConfig::GetExternalRadixBits() const {
285
290
  return MAXIMUM_FINAL_SINK_RADIX_BITS;
286
291
  }
@@ -296,8 +301,12 @@ void RadixHTConfig::SetRadixBitsInternal(const idx_t radix_bits_p, bool external
296
301
  }
297
302
 
298
303
  if (external) {
304
+ const auto partition_multiplier = RadixPartitioning::NumberOfPartitions(radix_bits_p) /
305
+ RadixPartitioning::NumberOfPartitions(sink_radix_bits);
306
+ sink.minimum_reservation = sink.minimum_reservation * partition_multiplier;
299
307
  sink.external = true;
300
308
  }
309
+
301
310
  sink_radix_bits = radix_bits_p;
302
311
  }
303
312
 
@@ -590,7 +599,7 @@ idx_t RadixPartitionedHashTable::MaxThreads(GlobalSinkState &sink_p) const {
590
599
 
591
600
  // we cannot spill aggregate state memory
592
601
  const auto usable_memory = sink.temporary_memory_state->GetReservation() > sink.stored_allocators_size
593
- ? sink.temporary_memory_state->GetReservation() - sink.max_partition_size
602
+ ? sink.temporary_memory_state->GetReservation() - sink.stored_allocators_size
594
603
  : 0;
595
604
  // This many partitions will fit given our reservation (at least 1))
596
605
  const auto partitions_fit = MaxValue<idx_t>(usable_memory / sink.max_partition_size, 1);
@@ -166,8 +166,15 @@ unique_ptr<ReservoirChunk> ReservoirSample::CreateNewSampleChunk(vector<LogicalT
166
166
 
167
167
  void ReservoirSample::Vacuum() {
168
168
  Verify();
169
- if (NumSamplesCollected() <= FIXED_SAMPLE_SIZE || !reservoir_chunk || destroyed) {
169
+ bool do_vacuum = false;
170
+ // when it's not a stats sample, sometimes we neverr collect more than FIXED_SAMPLE_SIZE tuples
171
+ // but we still need to vacuum, so the rules are a little bit different.
172
+ if (!stats_sample && GetActiveSampleCount() <= static_cast<idx_t>(GetReservoirChunkCapacity<double>() * 0.8)) {
173
+ do_vacuum = true;
174
+ }
175
+ if (!do_vacuum && (NumSamplesCollected() <= FIXED_SAMPLE_SIZE || !reservoir_chunk || destroyed)) {
170
176
  // sample is destroyed or too small to shrink
177
+ // sample does not need to be vacuumed.
171
178
  return;
172
179
  }
173
180
 
@@ -201,7 +208,7 @@ unique_ptr<BlockingSample> ReservoirSample::Copy() const {
201
208
  // how many values should be copied
202
209
  idx_t values_to_copy = MinValue<idx_t>(GetActiveSampleCount(), sample_count);
203
210
 
204
- auto new_sample_chunk = CreateNewSampleChunk(types, GetReservoirChunkCapacity());
211
+ auto new_sample_chunk = CreateNewSampleChunk(types, GetReservoirChunkCapacity<idx_t>());
205
212
 
206
213
  SelectionVector sel_copy(sel);
207
214
 
@@ -295,7 +302,7 @@ void ReservoirSample::SimpleMerge(ReservoirSample &other) {
295
302
  idx_t size_after_merge = MinValue<idx_t>(keep_from_other + keep_from_this, FIXED_SAMPLE_SIZE);
296
303
 
297
304
  // Check if appending the other samples to this will go over the sample chunk size
298
- if (reservoir_chunk->chunk.size() + keep_from_other > GetReservoirChunkCapacity()) {
305
+ if (reservoir_chunk->chunk.size() + keep_from_other > GetReservoirChunkCapacity<idx_t>()) {
299
306
  Vacuum();
300
307
  }
301
308
 
@@ -542,7 +549,7 @@ void ReservoirSample::ExpandSerializedSample() {
542
549
  }
543
550
 
544
551
  auto types = reservoir_chunk->chunk.GetTypes();
545
- auto new_res_chunk = CreateNewSampleChunk(types, GetReservoirChunkCapacity());
552
+ auto new_res_chunk = CreateNewSampleChunk(types, GetReservoirChunkCapacity<idx_t>());
546
553
  auto copy_count = reservoir_chunk->chunk.size();
547
554
  SelectionVector tmp_sel = SelectionVector(0, copy_count);
548
555
  UpdateSampleAppend(new_res_chunk->chunk, reservoir_chunk->chunk, tmp_sel, copy_count);
@@ -550,8 +557,10 @@ void ReservoirSample::ExpandSerializedSample() {
550
557
  std::swap(reservoir_chunk, new_res_chunk);
551
558
  }
552
559
 
553
- idx_t ReservoirSample::GetReservoirChunkCapacity() const {
554
- return sample_count + (FIXED_SAMPLE_SIZE_MULTIPLIER * MinValue<idx_t>(sample_count, FIXED_SAMPLE_SIZE));
560
+ template <typename T>
561
+ T ReservoirSample::GetReservoirChunkCapacity() const {
562
+ return static_cast<T>(sample_count +
563
+ (FIXED_SAMPLE_SIZE_MULTIPLIER * MinValue<idx_t>(sample_count, FIXED_SAMPLE_SIZE)));
555
564
  }
556
565
 
557
566
  idx_t ReservoirSample::FillReservoir(DataChunk &chunk) {
@@ -563,7 +572,7 @@ idx_t ReservoirSample::FillReservoir(DataChunk &chunk) {
563
572
  }
564
573
  auto types = chunk.GetTypes();
565
574
  // create a new sample chunk to store new samples
566
- reservoir_chunk = CreateNewSampleChunk(types, GetReservoirChunkCapacity());
575
+ reservoir_chunk = CreateNewSampleChunk(types, GetReservoirChunkCapacity<idx_t>());
567
576
  }
568
577
 
569
578
  idx_t actual_sample_index_start = GetActiveSampleCount();
@@ -694,9 +703,6 @@ void ReservoirSample::UpdateSampleAppend(DataChunk &this_, DataChunk &other, Sel
694
703
  return;
695
704
  }
696
705
  D_ASSERT(this_.GetTypes() == other.GetTypes());
697
-
698
- // UpdateSampleAppend(this_, other, other_sel, append_count);
699
- D_ASSERT(this_.GetTypes() == other.GetTypes());
700
706
  auto types = reservoir_chunk->chunk.GetTypes();
701
707
 
702
708
  for (idx_t i = 0; i < reservoir_chunk->chunk.ColumnCount(); i++) {
@@ -714,6 +720,9 @@ void ReservoirSample::AddToReservoir(DataChunk &chunk) {
714
720
  return;
715
721
  }
716
722
 
723
+ if (!reservoir_chunk && GetReservoirChunkCapacity<idx_t>() == 0) {
724
+ return;
725
+ }
717
726
  idx_t tuples_consumed = FillReservoir(chunk);
718
727
  base_reservoir_sample->num_entries_seen_total += tuples_consumed;
719
728
  D_ASSERT(sample_count == 0 || reservoir_chunk->chunk.size() >= 1);
@@ -752,8 +761,10 @@ void ReservoirSample::AddToReservoir(DataChunk &chunk) {
752
761
  base_reservoir_sample->num_entries_seen_total += chunk.size();
753
762
  return;
754
763
  }
764
+
755
765
  idx_t size = chunk_sel.size;
756
766
  D_ASSERT(size <= chunk.size());
767
+ D_ASSERT(reservoir_chunk->chunk.size() < GetReservoirChunkCapacity<idx_t>());
757
768
 
758
769
  UpdateSampleAppend(reservoir_chunk->chunk, chunk, chunk_sel.sel, size);
759
770
 
@@ -763,11 +774,12 @@ void ReservoirSample::AddToReservoir(DataChunk &chunk) {
763
774
 
764
775
  Verify();
765
776
 
766
- // if we are over the threshold, we ned to swith to slow sampling.
777
+ // if we are over the threshold, we ned to switch to slow sampling.
767
778
  if (GetSamplingState() == SamplingState::RANDOM && GetTuplesSeen() >= FIXED_SAMPLE_SIZE * FAST_TO_SLOW_THRESHOLD) {
768
779
  ConvertToReservoirSample();
769
780
  }
770
- if (reservoir_chunk->chunk.size() >= (GetReservoirChunkCapacity() - (static_cast<idx_t>(FIXED_SAMPLE_SIZE) * 3))) {
781
+ if (static_cast<int64_t>(reservoir_chunk->chunk.size()) >=
782
+ GetReservoirChunkCapacity<int64_t>() - (static_cast<int64_t>(FIXED_SAMPLE_SIZE) * 3)) {
771
783
  Vacuum();
772
784
  }
773
785
  }
@@ -24,12 +24,12 @@ struct GetVariableBindData : FunctionData {
24
24
 
25
25
  static unique_ptr<FunctionData> GetVariableBind(ClientContext &context, ScalarFunction &function,
26
26
  vector<unique_ptr<Expression>> &arguments) {
27
+ if (arguments[0]->HasParameter() || arguments[0]->return_type.id() == LogicalTypeId::UNKNOWN) {
28
+ throw ParameterNotResolvedException();
29
+ }
27
30
  if (!arguments[0]->IsFoldable()) {
28
31
  throw NotImplementedException("getvariable requires a constant input");
29
32
  }
30
- if (arguments[0]->HasParameter()) {
31
- throw ParameterNotResolvedException();
32
- }
33
33
  Value value;
34
34
  auto variable_name = ExpressionExecutor::EvaluateScalar(context, *arguments[0]);
35
35
  if (!variable_name.IsNull()) {
@@ -1,5 +1,5 @@
1
1
  #ifndef DUCKDB_PATCH_VERSION
2
- #define DUCKDB_PATCH_VERSION "0"
2
+ #define DUCKDB_PATCH_VERSION "1"
3
3
  #endif
4
4
  #ifndef DUCKDB_MINOR_VERSION
5
5
  #define DUCKDB_MINOR_VERSION 2
@@ -8,10 +8,10 @@
8
8
  #define DUCKDB_MAJOR_VERSION 1
9
9
  #endif
10
10
  #ifndef DUCKDB_VERSION
11
- #define DUCKDB_VERSION "v1.2.0"
11
+ #define DUCKDB_VERSION "v1.2.1"
12
12
  #endif
13
13
  #ifndef DUCKDB_SOURCE_ID
14
- #define DUCKDB_SOURCE_ID "5f5512b827"
14
+ #define DUCKDB_SOURCE_ID "8e52ec4395"
15
15
  #endif
16
16
  #include "duckdb/function/table/system_functions.hpp"
17
17
  #include "duckdb/main/database.hpp"
@@ -7,6 +7,9 @@ WindowAggregateStates::WindowAggregateStates(const AggregateObject &aggr)
7
7
  }
8
8
 
9
9
  void WindowAggregateStates::Initialize(idx_t count) {
10
+ // Don't leak - every Initialize must be matched with a Destroy
11
+ D_ASSERT(states.empty());
12
+
10
13
  states.resize(count * state_size);
11
14
  auto state_ptr = states.data();
12
15
 
@@ -180,9 +180,9 @@ struct OperationCompare : public std::function<bool(T, T)> {
180
180
  };
181
181
 
182
182
  template <typename T, typename OP, bool FROM>
183
- static idx_t FindTypedRangeBound(WindowCursor &over, const idx_t order_begin, const idx_t order_end,
184
- const WindowBoundary range, WindowInputExpression &boundary, const idx_t chunk_idx,
185
- const FrameBounds &prev) {
183
+ static idx_t FindTypedRangeBound(WindowCursor &range_lo, WindowCursor &range_hi, const idx_t order_begin,
184
+ const idx_t order_end, const WindowBoundary range, WindowInputExpression &boundary,
185
+ const idx_t chunk_idx, const FrameBounds &prev) {
186
186
  D_ASSERT(!boundary.CellIsNull(chunk_idx));
187
187
  const auto val = boundary.GetCell<T>(chunk_idx);
188
188
 
@@ -191,36 +191,43 @@ static idx_t FindTypedRangeBound(WindowCursor &over, const idx_t order_begin, co
191
191
  // Check that the value we are searching for is in range.
192
192
  if (range == WindowBoundary::EXPR_PRECEDING_RANGE) {
193
193
  // Preceding but value past the current value
194
- const auto cur_val = over.GetCell<T>(0, order_end - 1);
194
+ const auto cur_val = range_hi.GetCell<T>(0, order_end - 1);
195
195
  if (comp(cur_val, val)) {
196
196
  throw OutOfRangeException("Invalid RANGE PRECEDING value");
197
197
  }
198
198
  } else {
199
199
  // Following but value before the current value
200
200
  D_ASSERT(range == WindowBoundary::EXPR_FOLLOWING_RANGE);
201
- const auto cur_val = over.GetCell<T>(0, order_begin);
201
+ const auto cur_val = range_lo.GetCell<T>(0, order_begin);
202
202
  if (comp(val, cur_val)) {
203
203
  throw OutOfRangeException("Invalid RANGE FOLLOWING value");
204
204
  }
205
205
  }
206
-
207
206
  // Try to reuse the previous bounds to restrict the search.
208
207
  // This is only valid if the previous bounds were non-empty
209
208
  // Only inject the comparisons if the previous bounds are a strict subset.
210
- WindowColumnIterator<T> begin(over, order_begin);
211
- WindowColumnIterator<T> end(over, order_end);
209
+ WindowColumnIterator<T> begin(range_lo, order_begin);
210
+ WindowColumnIterator<T> end(range_hi, order_end);
212
211
  if (prev.start < prev.end) {
213
212
  if (order_begin < prev.start && prev.start < order_end) {
214
- const auto first = over.GetCell<T>(0, prev.start);
215
- if (!comp(val, first)) {
216
- // prev.first <= val, so we can start further forward
213
+ const auto first = range_lo.GetCell<T>(0, prev.start);
214
+ if (FROM && !comp(val, first)) {
215
+ // If prev.start == val and we are looking for a lower bound, then we are done
216
+ if (!comp(first, val)) {
217
+ return prev.start;
218
+ }
219
+ // prev.start <= val, so we can start further forward
217
220
  begin += UnsafeNumericCast<int64_t>(prev.start - order_begin);
218
221
  }
219
222
  }
220
223
  if (order_begin < prev.end && prev.end < order_end) {
221
- const auto second = over.GetCell<T>(0, prev.end - 1);
224
+ const auto second = range_hi.GetCell<T>(0, prev.end - 1);
222
225
  if (!comp(second, val)) {
223
- // val <= prev.second, so we can end further back
226
+ // If val == prev.end and we are looking for an upper bound, then we are done
227
+ if (!FROM && !comp(val, second)) {
228
+ return prev.end;
229
+ }
230
+ // val <= prev.end, so we can end further back
224
231
  // (prev.second is the largest peer)
225
232
  end -= UnsafeNumericCast<int64_t>(order_end - prev.end - 1);
226
233
  }
@@ -235,52 +242,65 @@ static idx_t FindTypedRangeBound(WindowCursor &over, const idx_t order_begin, co
235
242
  }
236
243
 
237
244
  template <typename OP, bool FROM>
238
- static idx_t FindRangeBound(WindowCursor &over, const idx_t order_begin, const idx_t order_end,
239
- const WindowBoundary range, WindowInputExpression &boundary, const idx_t chunk_idx,
240
- const FrameBounds &prev) {
245
+ static idx_t FindRangeBound(WindowCursor &range_lo, WindowCursor &range_hi, const idx_t order_begin,
246
+ const idx_t order_end, const WindowBoundary range, WindowInputExpression &boundary,
247
+ const idx_t chunk_idx, const FrameBounds &prev) {
241
248
  switch (boundary.InternalType()) {
242
249
  case PhysicalType::INT8:
243
- return FindTypedRangeBound<int8_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
250
+ return FindTypedRangeBound<int8_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
251
+ chunk_idx, prev);
244
252
  case PhysicalType::INT16:
245
- return FindTypedRangeBound<int16_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
253
+ return FindTypedRangeBound<int16_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
254
+ chunk_idx, prev);
246
255
  case PhysicalType::INT32:
247
- return FindTypedRangeBound<int32_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
256
+ return FindTypedRangeBound<int32_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
257
+ chunk_idx, prev);
248
258
  case PhysicalType::INT64:
249
- return FindTypedRangeBound<int64_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
259
+ return FindTypedRangeBound<int64_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
260
+ chunk_idx, prev);
250
261
  case PhysicalType::UINT8:
251
- return FindTypedRangeBound<uint8_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
262
+ return FindTypedRangeBound<uint8_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
263
+ chunk_idx, prev);
252
264
  case PhysicalType::UINT16:
253
- return FindTypedRangeBound<uint16_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
265
+ return FindTypedRangeBound<uint16_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
266
+ chunk_idx, prev);
254
267
  case PhysicalType::UINT32:
255
- return FindTypedRangeBound<uint32_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
268
+ return FindTypedRangeBound<uint32_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
269
+ chunk_idx, prev);
256
270
  case PhysicalType::UINT64:
257
- return FindTypedRangeBound<uint64_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
271
+ return FindTypedRangeBound<uint64_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
272
+ chunk_idx, prev);
258
273
  case PhysicalType::INT128:
259
- return FindTypedRangeBound<hugeint_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
274
+ return FindTypedRangeBound<hugeint_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
275
+ chunk_idx, prev);
260
276
  case PhysicalType::UINT128:
261
- return FindTypedRangeBound<uhugeint_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx,
262
- prev);
277
+ return FindTypedRangeBound<uhugeint_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
278
+ chunk_idx, prev);
263
279
  case PhysicalType::FLOAT:
264
- return FindTypedRangeBound<float, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
280
+ return FindTypedRangeBound<float, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
281
+ chunk_idx, prev);
265
282
  case PhysicalType::DOUBLE:
266
- return FindTypedRangeBound<double, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
283
+ return FindTypedRangeBound<double, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
284
+ chunk_idx, prev);
267
285
  case PhysicalType::INTERVAL:
268
- return FindTypedRangeBound<interval_t, OP, FROM>(over, order_begin, order_end, range, boundary, chunk_idx,
269
- prev);
286
+ return FindTypedRangeBound<interval_t, OP, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary,
287
+ chunk_idx, prev);
270
288
  default:
271
289
  throw InternalException("Unsupported column type for RANGE");
272
290
  }
273
291
  }
274
292
 
275
293
  template <bool FROM>
276
- static idx_t FindOrderedRangeBound(WindowCursor &over, const OrderType range_sense, const idx_t order_begin,
277
- const idx_t order_end, const WindowBoundary range, WindowInputExpression &boundary,
278
- const idx_t chunk_idx, const FrameBounds &prev) {
294
+ static idx_t FindOrderedRangeBound(WindowCursor &range_lo, WindowCursor &range_hi, const OrderType range_sense,
295
+ const idx_t order_begin, const idx_t order_end, const WindowBoundary range,
296
+ WindowInputExpression &boundary, const idx_t chunk_idx, const FrameBounds &prev) {
279
297
  switch (range_sense) {
280
298
  case OrderType::ASCENDING:
281
- return FindRangeBound<LessThan, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
299
+ return FindRangeBound<LessThan, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary, chunk_idx,
300
+ prev);
282
301
  case OrderType::DESCENDING:
283
- return FindRangeBound<GreaterThan, FROM>(over, order_begin, order_end, range, boundary, chunk_idx, prev);
302
+ return FindRangeBound<GreaterThan, FROM>(range_lo, range_hi, order_begin, order_end, range, boundary, chunk_idx,
303
+ prev);
284
304
  default:
285
305
  throw InternalException("Unsupported ORDER BY sense for RANGE");
286
306
  }
@@ -686,19 +706,15 @@ void WindowBoundariesState::ValidEnd(DataChunk &bounds, idx_t row_idx, const idx
686
706
  if (!is_same_partition || is_jump) {
687
707
  // Find valid ordering values for the new partition
688
708
  // so we can exclude NULLs from RANGE expression computations
709
+ const auto valid_start = valid_begin_data[chunk_idx];
689
710
  valid_end = partition_end_data[chunk_idx];
690
711
 
691
712
  if ((valid_start < valid_end) && has_following_range) {
692
713
  // Exclude any trailing NULLs
693
- const auto valid_start = valid_begin_data[chunk_idx];
694
714
  if (range->CellIsNull(0, valid_end - 1)) {
695
715
  idx_t n = 1;
696
716
  valid_end = FindPrevStart(order_mask, valid_start, valid_end, n);
697
717
  }
698
-
699
- // Reset range hints
700
- prev.start = valid_start;
701
- prev.end = valid_end;
702
718
  }
703
719
  }
704
720
 
@@ -718,6 +734,18 @@ void WindowBoundariesState::FrameBegin(DataChunk &bounds, idx_t row_idx, const i
718
734
 
719
735
  idx_t window_start = NumericLimits<idx_t>::Maximum();
720
736
 
737
+ // Reset previous range hints
738
+ idx_t prev_partition = partition_begin_data[0];
739
+ prev.start = valid_begin_data[0];
740
+ prev.end = valid_end_data[0];
741
+
742
+ if (has_preceding_range || has_following_range) {
743
+ if (range_lo.get() != range.get()) {
744
+ range_lo = range.get();
745
+ range_hi = range_lo->Copy();
746
+ }
747
+ }
748
+
721
749
  switch (start_boundary) {
722
750
  case WindowBoundary::UNBOUNDED_PRECEDING:
723
751
  bounds.data[FRAME_BEGIN].Reference(bounds.data[PARTITION_BEGIN]);
@@ -766,7 +794,12 @@ void WindowBoundariesState::FrameBegin(DataChunk &bounds, idx_t row_idx, const i
766
794
  } else {
767
795
  const auto valid_start = valid_begin_data[chunk_idx];
768
796
  prev.end = valid_end_data[chunk_idx];
769
- window_start = FindOrderedRangeBound<true>(*range, range_sense, valid_start, row_idx + 1,
797
+ const auto cur_partition = partition_begin_data[chunk_idx];
798
+ if (cur_partition != prev_partition) {
799
+ prev.start = valid_start;
800
+ prev_partition = cur_partition;
801
+ }
802
+ window_start = FindOrderedRangeBound<true>(*range_lo, *range_hi, range_sense, valid_start, row_idx + 1,
770
803
  start_boundary, boundary_begin, chunk_idx, prev);
771
804
  prev.start = window_start;
772
805
  }
@@ -780,8 +813,13 @@ void WindowBoundariesState::FrameBegin(DataChunk &bounds, idx_t row_idx, const i
780
813
  } else {
781
814
  const auto valid_end = valid_end_data[chunk_idx];
782
815
  prev.end = valid_end;
783
- window_start = FindOrderedRangeBound<true>(*range, range_sense, row_idx, valid_end, start_boundary,
784
- boundary_begin, chunk_idx, prev);
816
+ const auto cur_partition = partition_begin_data[chunk_idx];
817
+ if (cur_partition != prev_partition) {
818
+ prev.start = valid_begin_data[chunk_idx];
819
+ prev_partition = cur_partition;
820
+ }
821
+ window_start = FindOrderedRangeBound<true>(*range_lo, *range_hi, range_sense, row_idx, valid_end,
822
+ start_boundary, boundary_begin, chunk_idx, prev);
785
823
  prev.start = window_start;
786
824
  }
787
825
  frame_begin_data[chunk_idx] = window_start;
@@ -852,6 +890,18 @@ void WindowBoundariesState::FrameEnd(DataChunk &bounds, idx_t row_idx, const idx
852
890
 
853
891
  idx_t window_end = NumericLimits<idx_t>::Maximum();
854
892
 
893
+ // Reset previous range hints
894
+ idx_t prev_partition = partition_begin_data[0];
895
+ prev.start = valid_begin_data[0];
896
+ prev.end = valid_end_data[0];
897
+
898
+ if (has_preceding_range || has_following_range) {
899
+ if (range_lo.get() != range.get()) {
900
+ range_lo = range.get();
901
+ range_hi = range_lo->Copy();
902
+ }
903
+ }
904
+
855
905
  switch (end_boundary) {
856
906
  case WindowBoundary::CURRENT_ROW_ROWS:
857
907
  for (idx_t chunk_idx = 0; chunk_idx < count; ++chunk_idx, ++row_idx) {
@@ -901,8 +951,13 @@ void WindowBoundariesState::FrameEnd(DataChunk &bounds, idx_t row_idx, const idx
901
951
  } else {
902
952
  const auto valid_start = valid_begin_data[chunk_idx];
903
953
  prev.start = valid_start;
904
- window_end = FindOrderedRangeBound<false>(*range, range_sense, valid_start, row_idx + 1, end_boundary,
905
- boundary_end, chunk_idx, prev);
954
+ const auto cur_partition = partition_begin_data[chunk_idx];
955
+ if (cur_partition != prev_partition) {
956
+ prev.end = valid_end;
957
+ prev_partition = cur_partition;
958
+ }
959
+ window_end = FindOrderedRangeBound<false>(*range_lo, *range_hi, range_sense, valid_start, row_idx + 1,
960
+ end_boundary, boundary_end, chunk_idx, prev);
906
961
  prev.end = window_end;
907
962
  }
908
963
  frame_end_data[chunk_idx] = window_end;
@@ -915,8 +970,13 @@ void WindowBoundariesState::FrameEnd(DataChunk &bounds, idx_t row_idx, const idx
915
970
  } else {
916
971
  const auto valid_end = valid_end_data[chunk_idx];
917
972
  prev.start = valid_begin_data[chunk_idx];
918
- window_end = FindOrderedRangeBound<false>(*range, range_sense, row_idx, valid_end, end_boundary,
919
- boundary_end, chunk_idx, prev);
973
+ const auto cur_partition = partition_begin_data[chunk_idx];
974
+ if (cur_partition != prev_partition) {
975
+ prev.end = valid_end;
976
+ prev_partition = cur_partition;
977
+ }
978
+ window_end = FindOrderedRangeBound<false>(*range_lo, *range_hi, range_sense, row_idx, valid_end,
979
+ end_boundary, boundary_end, chunk_idx, prev);
920
980
  prev.end = window_end;
921
981
  }
922
982
  frame_end_data[chunk_idx] = window_end;
@@ -18,6 +18,10 @@ public:
18
18
 
19
19
  void Finalize(const FrameStats &stats);
20
20
 
21
+ ~WindowConstantAggregatorGlobalState() override {
22
+ statef.Destroy();
23
+ }
24
+
21
25
  //! Partition starts
22
26
  vector<idx_t> partition_offsets;
23
27
  //! Reused result state container for the window functions
@@ -304,11 +308,7 @@ void WindowConstantAggregator::Finalize(WindowAggregatorState &gstate, WindowAgg
304
308
  lastate.statef.Combine(gastate.statef);
305
309
  lastate.statef.Destroy();
306
310
 
307
- // Last one out turns off the lights!
308
- if (++gastate.finalized == gastate.locals) {
309
- gastate.statef.Finalize(*gastate.results);
310
- gastate.statef.Destroy();
311
- }
311
+ gastate.statef.Finalize(*gastate.results);
312
312
  }
313
313
 
314
314
  unique_ptr<WindowAggregatorState> WindowConstantAggregator::GetLocalState(const WindowAggregatorState &gstate) const {
@@ -190,6 +190,10 @@ class WindowDistinctAggregatorLocalState : public WindowAggregatorLocalState {
190
190
  public:
191
191
  explicit WindowDistinctAggregatorLocalState(const WindowDistinctAggregatorGlobalState &aggregator);
192
192
 
193
+ ~WindowDistinctAggregatorLocalState() override {
194
+ statef.Destroy();
195
+ }
196
+
193
197
  void Sink(DataChunk &sink_chunk, DataChunk &coll_chunk, idx_t input_idx, optional_ptr<SelectionVector> filter_sel,
194
198
  idx_t filtered);
195
199
  void Finalize(WindowAggregatorGlobalState &gastate, CollectionPtr collection) override;
@@ -740,6 +744,8 @@ void WindowDistinctAggregatorLocalState::Evaluate(const WindowDistinctAggregator
740
744
 
741
745
  // Finalise the result aggregates and write to the result
742
746
  statef.Finalize(result);
747
+
748
+ // Destruct any non-POD state
743
749
  statef.Destroy();
744
750
  }
745
751
 
@@ -56,7 +56,7 @@ public:
56
56
  OnEntryNotFound on_entry_not_found = OnEntryNotFound::THROW_EXCEPTION,
57
57
  QueryErrorContext error_context = QueryErrorContext());
58
58
 
59
- CatalogSearchPath &GetSearchPath();
59
+ const CatalogSearchPath &GetSearchPath() const;
60
60
  void SetSearchPath(vector<CatalogSearchEntry> entries);
61
61
 
62
62
  void SetCallback(catalog_entry_callback_t callback);