duckdb 1.3.2-dev0.0 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/.github/workflows/NodeJS.yml +2 -2
  2. package/package.json +1 -1
  3. package/src/duckdb/extension/icu/icu-timezone.cpp +10 -6
  4. package/src/duckdb/extension/parquet/column_reader.cpp +2 -0
  5. package/src/duckdb/extension/parquet/include/parquet_writer.hpp +7 -2
  6. package/src/duckdb/extension/parquet/include/writer/templated_column_writer.hpp +9 -5
  7. package/src/duckdb/extension/parquet/parquet_extension.cpp +5 -2
  8. package/src/duckdb/extension/parquet/parquet_writer.cpp +4 -2
  9. package/src/duckdb/src/catalog/catalog.cpp +1 -1
  10. package/src/duckdb/src/common/arrow/arrow_type_extension.cpp +4 -2
  11. package/src/duckdb/src/common/error_data.cpp +7 -0
  12. package/src/duckdb/src/common/operator/string_cast.cpp +3 -0
  13. package/src/duckdb/src/execution/operator/csv_scanner/scanner/scanner_boundary.cpp +5 -0
  14. package/src/duckdb/src/execution/operator/csv_scanner/scanner/string_value_scanner.cpp +2 -2
  15. package/src/duckdb/src/execution/operator/join/physical_iejoin.cpp +10 -2
  16. package/src/duckdb/src/execution/physical_plan/plan_asof_join.cpp +7 -2
  17. package/src/duckdb/src/function/function_list.cpp +1 -1
  18. package/src/duckdb/src/function/scalar/compressed_materialization/compress_string.cpp +6 -6
  19. package/src/duckdb/src/function/scalar/compressed_materialization_utils.cpp +1 -1
  20. package/src/duckdb/src/function/table/table_scan.cpp +43 -84
  21. package/src/duckdb/src/function/table/version/pragma_version.cpp +3 -3
  22. package/src/duckdb/src/include/duckdb/common/file_opener.hpp +1 -0
  23. package/src/duckdb/src/include/duckdb/common/operator/string_cast.hpp +3 -1
  24. package/src/duckdb/src/include/duckdb/function/scalar/compressed_materialization_functions.hpp +2 -2
  25. package/src/duckdb/src/include/duckdb/main/client_context_file_opener.hpp +1 -0
  26. package/src/duckdb/src/include/duckdb/main/database_file_opener.hpp +3 -0
  27. package/src/duckdb/src/include/duckdb/main/extension_entries.hpp +5 -0
  28. package/src/duckdb/src/include/duckdb/main/settings.hpp +2 -2
  29. package/src/duckdb/src/include/duckdb/optimizer/join_order/join_order_optimizer.hpp +1 -0
  30. package/src/duckdb/src/include/duckdb/parallel/concurrentqueue.hpp +8 -0
  31. package/src/duckdb/src/include/duckdb/planner/filter/expression_filter.hpp +2 -0
  32. package/src/duckdb/src/include/duckdb/storage/table/segment_tree.hpp +1 -1
  33. package/src/duckdb/src/main/client_context_file_opener.cpp +4 -0
  34. package/src/duckdb/src/main/database_manager.cpp +7 -2
  35. package/src/duckdb/src/main/database_path_and_type.cpp +1 -1
  36. package/src/duckdb/src/main/settings/autogenerated_settings.cpp +0 -34
  37. package/src/duckdb/src/main/settings/custom_settings.cpp +49 -0
  38. package/src/duckdb/src/optimizer/compressed_materialization.cpp +4 -4
  39. package/src/duckdb/src/optimizer/filter_combiner.cpp +1 -1
  40. package/src/duckdb/src/optimizer/join_order/join_order_optimizer.cpp +9 -1
  41. package/src/duckdb/src/optimizer/remove_unused_columns.cpp +3 -0
  42. package/src/duckdb/src/optimizer/statistics/operator/propagate_get.cpp +68 -3
  43. package/src/duckdb/src/parser/statement/set_statement.cpp +1 -1
  44. package/src/duckdb/src/planner/binder/expression/bind_columnref_expression.cpp +2 -3
  45. package/src/duckdb/src/planner/expression_binder/alter_binder.cpp +2 -3
  46. package/src/duckdb/src/planner/expression_binder/having_binder.cpp +2 -2
  47. package/src/duckdb/src/planner/expression_binder/table_function_binder.cpp +4 -4
  48. package/src/duckdb/src/planner/filter/expression_filter.cpp +4 -3
  49. package/src/duckdb/src/planner/table_filter.cpp +2 -1
  50. package/src/duckdb/src/storage/buffer/buffer_pool.cpp +5 -3
  51. package/src/duckdb/src/storage/compression/fsst.cpp +20 -10
  52. package/src/duckdb/src/storage/compression/roaring/compress.cpp +15 -9
  53. package/src/duckdb/src/storage/compression/roaring/scan.cpp +10 -1
  54. package/src/duckdb/src/storage/data_table.cpp +1 -1
  55. package/src/duckdb/src/storage/statistics/numeric_stats.cpp +2 -1
  56. package/src/duckdb/src/storage/storage_info.cpp +2 -0
  57. package/src/duckdb/src/storage/table/column_checkpoint_state.cpp +3 -1
  58. package/src/duckdb/src/transaction/duck_transaction_manager.cpp +6 -3
@@ -308,24 +308,30 @@ void RoaringCompressState::FlushSegment() {
308
308
  base_ptr += sizeof(idx_t);
309
309
 
310
310
  // Size of the 'd' part
311
- idx_t data_size = NumericCast<idx_t>(data_ptr - base_ptr);
312
- data_size = AlignValue(data_size);
311
+ auto unaligned_data_size = NumericCast<idx_t>(data_ptr - base_ptr);
312
+ auto data_size = AlignValue(unaligned_data_size);
313
+ data_ptr += data_size - unaligned_data_size;
313
314
 
314
315
  // Size of the 'm' part
315
- idx_t metadata_size = metadata_collection.GetMetadataSizeForSegment();
316
-
316
+ auto metadata_size = metadata_collection.GetMetadataSizeForSegment();
317
317
  if (current_segment->count.load() == 0) {
318
318
  D_ASSERT(metadata_size == 0);
319
319
  return;
320
320
  }
321
321
 
322
- idx_t serialized_metadata_size = metadata_collection.Serialize(data_ptr);
322
+ auto serialized_metadata_size = metadata_collection.Serialize(data_ptr);
323
+ if (metadata_size != serialized_metadata_size) {
324
+ throw InternalException("mismatch in metadata size during RoaringCompressState::FlushSegment");
325
+ }
326
+
323
327
  metadata_collection.FlushSegment();
324
- (void)serialized_metadata_size;
325
- D_ASSERT(metadata_size == serialized_metadata_size);
326
- idx_t metadata_start = static_cast<idx_t>(data_ptr - base_ptr);
328
+ auto metadata_start = static_cast<idx_t>(data_ptr - base_ptr);
329
+ if (metadata_start > info.GetBlockSize()) {
330
+ throw InternalException("metadata start outside of block size during RoaringCompressState::FlushSegment");
331
+ }
332
+
327
333
  Store<idx_t>(metadata_start, handle.Ptr());
328
- idx_t total_segment_size = sizeof(idx_t) + data_size + metadata_size;
334
+ auto total_segment_size = sizeof(idx_t) + data_size + metadata_size;
329
335
  state.FlushSegment(std::move(current_segment), std::move(handle), total_segment_size);
330
336
  }
331
337
 
@@ -203,11 +203,20 @@ void BitsetContainerScanState::Verify() const {
203
203
  RoaringScanState::RoaringScanState(ColumnSegment &segment) : segment(segment) {
204
204
  auto &buffer_manager = BufferManager::GetBufferManager(segment.db);
205
205
  handle = buffer_manager.Pin(segment.block);
206
- auto base_ptr = handle.Ptr() + segment.GetBlockOffset();
206
+ auto segment_size = segment.SegmentSize();
207
+ auto segment_block_offset = segment.GetBlockOffset();
208
+ if (segment_block_offset >= segment_size) {
209
+ throw InternalException("invalid segment_block_offset in RoaringScanState constructor");
210
+ }
211
+
212
+ auto base_ptr = handle.Ptr() + segment_block_offset;
207
213
  data_ptr = base_ptr + sizeof(idx_t);
208
214
 
209
215
  // Deserialize the container metadata for this segment
210
216
  auto metadata_offset = Load<idx_t>(base_ptr);
217
+ if (metadata_offset >= segment_size) {
218
+ throw InternalException("invalid metadata offset in RoaringScanState constructor");
219
+ }
211
220
  auto metadata_ptr = data_ptr + metadata_offset;
212
221
 
213
222
  auto segment_count = segment.count.load();
@@ -415,7 +415,7 @@ TableStorageInfo DataTable::GetStorageInfo() {
415
415
  //===--------------------------------------------------------------------===//
416
416
  void DataTable::Fetch(DuckTransaction &transaction, DataChunk &result, const vector<StorageIndex> &column_ids,
417
417
  const Vector &row_identifiers, idx_t fetch_count, ColumnFetchState &state) {
418
- auto lock = info->checkpoint_lock.GetSharedLock();
418
+ auto lock = transaction.SharedLockTable(*info);
419
419
  row_groups->Fetch(transaction, result, column_ids, row_identifiers, fetch_count, state);
420
420
  }
421
421
 
@@ -375,7 +375,8 @@ Value NumericValueUnionToValue(const LogicalType &type, const NumericValueUnion
375
375
  }
376
376
 
377
377
  bool NumericStats::HasMinMax(const BaseStatistics &stats) {
378
- return NumericStats::HasMin(stats) && NumericStats::HasMax(stats);
378
+ return NumericStats::HasMin(stats) && NumericStats::HasMax(stats) &&
379
+ NumericStats::Min(stats) <= NumericStats::Max(stats);
379
380
  }
380
381
 
381
382
  bool NumericStats::HasMin(const BaseStatistics &stats) {
@@ -81,6 +81,7 @@ static const StorageVersionInfo storage_version_info[] = {
81
81
  {"v1.2.2", 65},
82
82
  {"v1.3.0", 66},
83
83
  {"v1.3.1", 66},
84
+ {"v1.3.2", 66},
84
85
  {nullptr, 0}
85
86
  };
86
87
  // END OF STORAGE VERSION INFO
@@ -104,6 +105,7 @@ static const SerializationVersionInfo serialization_version_info[] = {
104
105
  {"v1.2.2", 4},
105
106
  {"v1.3.0", 5},
106
107
  {"v1.3.1", 5},
108
+ {"v1.3.2", 5},
107
109
  {"latest", 5},
108
110
  {nullptr, 0}
109
111
  };
@@ -117,7 +117,9 @@ void ColumnCheckpointState::FlushSegment(unique_ptr<ColumnSegment> segment, Buff
117
117
 
118
118
  void ColumnCheckpointState::FlushSegmentInternal(unique_ptr<ColumnSegment> segment, idx_t segment_size) {
119
119
  auto block_size = partial_block_manager.GetBlockManager().GetBlockSize();
120
- D_ASSERT(segment_size <= block_size);
120
+ if (segment_size > block_size) {
121
+ throw InternalException("segment size exceeds block size in ColumnCheckpointState::FlushSegmentInternal");
122
+ }
121
123
 
122
124
  auto tuple_count = segment->count.load();
123
125
  if (tuple_count == 0) { // LCOV_EXCL_START
@@ -269,14 +269,17 @@ ErrorData DuckTransactionManager::CommitTransaction(ClientContext &context, Tran
269
269
  if (!error.HasError()) {
270
270
  error = transaction.Commit(db, commit_id, std::move(commit_state));
271
271
  }
272
+
272
273
  if (error.HasError()) {
273
- // commit unsuccessful: rollback the transaction instead
274
+ // COMMIT not successful: ROLLBACK.
274
275
  checkpoint_decision = CheckpointDecision(error.Message());
275
276
  transaction.commit_id = 0;
277
+
276
278
  auto rollback_error = transaction.Rollback();
277
279
  if (rollback_error.HasError()) {
278
- throw FatalException("Failed to rollback transaction. Cannot continue operation.\nError: %s",
279
- rollback_error.Message());
280
+ throw FatalException(
281
+ "Failed to rollback transaction. Cannot continue operation.\nOriginal Error: %s\nRollback Error: %s",
282
+ error.Message(), rollback_error.Message());
280
283
  }
281
284
  } else {
282
285
  // check if catalog changes were made