duckdb 0.8.2-dev4203.0 → 0.8.2-dev4376.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/package.json +1 -1
  2. package/src/duckdb/extension/parquet/parquet_extension.cpp +1 -1
  3. package/src/duckdb/src/common/enum_util.cpp +5 -0
  4. package/src/duckdb/src/common/file_buffer.cpp +1 -1
  5. package/src/duckdb/src/common/sort/partition_state.cpp +107 -29
  6. package/src/duckdb/src/common/types/validity_mask.cpp +56 -0
  7. package/src/duckdb/src/execution/index/art/art.cpp +5 -1
  8. package/src/duckdb/src/execution/index/art/leaf.cpp +13 -10
  9. package/src/duckdb/src/execution/index/art/node48.cpp +0 -2
  10. package/src/duckdb/src/execution/index/fixed_size_allocator.cpp +38 -73
  11. package/src/duckdb/src/execution/index/fixed_size_buffer.cpp +245 -27
  12. package/src/duckdb/src/execution/operator/aggregate/physical_window.cpp +2 -3
  13. package/src/duckdb/src/execution/operator/join/physical_asof_join.cpp +35 -20
  14. package/src/duckdb/src/execution/operator/persistent/physical_batch_insert.cpp +1 -1
  15. package/src/duckdb/src/execution/operator/persistent/physical_insert.cpp +1 -1
  16. package/src/duckdb/src/function/table/arrow_conversion.cpp +9 -1
  17. package/src/duckdb/src/function/table/version/pragma_version.cpp +2 -2
  18. package/src/duckdb/src/include/duckdb/common/constants.hpp +0 -15
  19. package/src/duckdb/src/include/duckdb/common/serializer/memory_stream.hpp +1 -1
  20. package/src/duckdb/src/include/duckdb/common/sort/partition_state.hpp +14 -4
  21. package/src/duckdb/src/include/duckdb/common/types/validity_mask.hpp +3 -0
  22. package/src/duckdb/src/include/duckdb/execution/index/art/leaf.hpp +2 -0
  23. package/src/duckdb/src/include/duckdb/execution/index/fixed_size_allocator.hpp +1 -7
  24. package/src/duckdb/src/include/duckdb/execution/index/fixed_size_buffer.hpp +38 -8
  25. package/src/duckdb/src/include/duckdb/function/table/arrow.hpp +3 -0
  26. package/src/duckdb/src/include/duckdb/main/relation.hpp +9 -2
  27. package/src/duckdb/src/include/duckdb/storage/block.hpp +3 -3
  28. package/src/duckdb/src/include/duckdb/storage/compression/bitpacking.hpp +1 -8
  29. package/src/duckdb/src/include/duckdb/storage/data_pointer.hpp +2 -2
  30. package/src/duckdb/src/include/duckdb/storage/metadata/metadata_manager.hpp +2 -0
  31. package/src/duckdb/src/include/duckdb/storage/metadata/metadata_reader.hpp +2 -0
  32. package/src/duckdb/src/include/duckdb/storage/metadata/metadata_writer.hpp +6 -2
  33. package/src/duckdb/src/include/duckdb/storage/partial_block_manager.hpp +35 -19
  34. package/src/duckdb/src/include/duckdb/storage/storage_info.hpp +19 -0
  35. package/src/duckdb/src/include/duckdb/storage/table/chunk_info.hpp +19 -13
  36. package/src/duckdb/src/include/duckdb/storage/table/column_checkpoint_state.hpp +4 -19
  37. package/src/duckdb/src/include/duckdb/storage/table/column_data.hpp +1 -1
  38. package/src/duckdb/src/include/duckdb/storage/table/row_group.hpp +15 -15
  39. package/src/duckdb/src/include/duckdb/storage/table/row_version_manager.hpp +59 -0
  40. package/src/duckdb/src/include/duckdb/storage/table/update_segment.hpp +1 -1
  41. package/src/duckdb/src/include/duckdb/transaction/commit_state.hpp +1 -6
  42. package/src/duckdb/src/include/duckdb/transaction/delete_info.hpp +3 -2
  43. package/src/duckdb/src/include/duckdb/transaction/duck_transaction.hpp +4 -2
  44. package/src/duckdb/src/include/duckdb/transaction/local_storage.hpp +1 -1
  45. package/src/duckdb/src/include/duckdb/transaction/undo_buffer.hpp +0 -1
  46. package/src/duckdb/src/main/relation.cpp +15 -2
  47. package/src/duckdb/src/main/settings/settings.cpp +5 -10
  48. package/src/duckdb/src/optimizer/statistics/expression/propagate_cast.cpp +14 -0
  49. package/src/duckdb/src/storage/checkpoint/table_data_writer.cpp +0 -1
  50. package/src/duckdb/src/storage/compression/bitpacking.cpp +55 -48
  51. package/src/duckdb/src/storage/data_table.cpp +1 -1
  52. package/src/duckdb/src/storage/local_storage.cpp +1 -1
  53. package/src/duckdb/src/storage/metadata/metadata_manager.cpp +41 -2
  54. package/src/duckdb/src/storage/metadata/metadata_reader.cpp +12 -3
  55. package/src/duckdb/src/storage/metadata/metadata_writer.cpp +8 -2
  56. package/src/duckdb/src/storage/partial_block_manager.cpp +42 -15
  57. package/src/duckdb/src/storage/single_file_block_manager.cpp +1 -2
  58. package/src/duckdb/src/storage/storage_info.cpp +1 -1
  59. package/src/duckdb/src/storage/table/chunk_info.cpp +39 -33
  60. package/src/duckdb/src/storage/table/column_checkpoint_state.cpp +26 -32
  61. package/src/duckdb/src/storage/table/column_data.cpp +14 -9
  62. package/src/duckdb/src/storage/table/list_column_data.cpp +2 -2
  63. package/src/duckdb/src/storage/table/row_group.cpp +102 -192
  64. package/src/duckdb/src/storage/table/row_group_collection.cpp +2 -2
  65. package/src/duckdb/src/storage/table/row_version_manager.cpp +228 -0
  66. package/src/duckdb/src/storage/table/update_segment.cpp +2 -2
  67. package/src/duckdb/src/transaction/cleanup_state.cpp +2 -1
  68. package/src/duckdb/src/transaction/commit_state.cpp +5 -4
  69. package/src/duckdb/src/transaction/duck_transaction.cpp +4 -2
  70. package/src/duckdb/src/transaction/rollback_state.cpp +2 -1
  71. package/src/duckdb/src/transaction/undo_buffer.cpp +3 -5
  72. package/src/duckdb/ub_src_storage_table.cpp +2 -0
@@ -2,6 +2,7 @@
2
2
  #include "duckdb/transaction/transaction.hpp"
3
3
  #include "duckdb/common/serializer/serializer.hpp"
4
4
  #include "duckdb/common/serializer/deserializer.hpp"
5
+ #include "duckdb/common/serializer/memory_stream.hpp"
5
6
 
6
7
  namespace duckdb {
7
8
 
@@ -29,15 +30,19 @@ static bool UseVersion(TransactionData transaction, transaction_t id) {
29
30
  return TransactionVersionOperator::UseInsertedVersion(transaction.start_time, transaction.transaction_id, id);
30
31
  }
31
32
 
32
- unique_ptr<ChunkInfo> ChunkInfo::Deserialize(Deserializer &deserializer) {
33
- auto type = deserializer.ReadProperty<ChunkInfoType>(100, "type");
33
+ void ChunkInfo::Write(WriteStream &writer) const {
34
+ writer.Write<ChunkInfoType>(type);
35
+ }
36
+
37
+ unique_ptr<ChunkInfo> ChunkInfo::Read(ReadStream &reader) {
38
+ auto type = reader.Read<ChunkInfoType>();
34
39
  switch (type) {
35
40
  case ChunkInfoType::EMPTY_INFO:
36
41
  return nullptr;
37
42
  case ChunkInfoType::CONSTANT_INFO:
38
- return ChunkConstantInfo::Deserialize(deserializer);
43
+ return ChunkConstantInfo::Read(reader);
39
44
  case ChunkInfoType::VECTOR_INFO:
40
- return ChunkVectorInfo::Deserialize(deserializer);
45
+ return ChunkVectorInfo::Read(reader);
41
46
  default:
42
47
  throw SerializationException("Could not deserialize Chunk Info Type: unrecognized type");
43
48
  }
@@ -79,22 +84,23 @@ void ChunkConstantInfo::CommitAppend(transaction_t commit_id, idx_t start, idx_t
79
84
  insert_id = commit_id;
80
85
  }
81
86
 
87
+ bool ChunkConstantInfo::HasDeletes() const {
88
+ bool is_deleted = insert_id >= TRANSACTION_ID_START || delete_id < TRANSACTION_ID_START;
89
+ return is_deleted;
90
+ }
91
+
82
92
  idx_t ChunkConstantInfo::GetCommittedDeletedCount(idx_t max_count) {
83
93
  return delete_id < TRANSACTION_ID_START ? max_count : 0;
84
94
  }
85
95
 
86
- void ChunkConstantInfo::Serialize(Serializer &serializer) const {
87
- bool is_deleted = insert_id >= TRANSACTION_ID_START || delete_id < TRANSACTION_ID_START;
88
- if (!is_deleted) {
89
- serializer.WriteProperty(100, "type", ChunkInfoType::EMPTY_INFO);
90
- return;
91
- }
92
- serializer.WriteProperty(100, "type", type);
93
- serializer.WriteProperty(200, "start", start);
96
+ void ChunkConstantInfo::Write(WriteStream &writer) const {
97
+ D_ASSERT(HasDeletes());
98
+ ChunkInfo::Write(writer);
99
+ writer.Write<idx_t>(start);
94
100
  }
95
101
 
96
- unique_ptr<ChunkInfo> ChunkConstantInfo::Deserialize(Deserializer &deserializer) {
97
- auto start = deserializer.ReadProperty<idx_t>(200, "start");
102
+ unique_ptr<ChunkInfo> ChunkConstantInfo::Read(ReadStream &reader) {
103
+ auto start = reader.Read<idx_t>();
98
104
  auto info = make_uniq<ChunkConstantInfo>(start);
99
105
  info->insert_id = 0;
100
106
  info->delete_id = 0;
@@ -218,6 +224,10 @@ void ChunkVectorInfo::CommitAppend(transaction_t commit_id, idx_t start, idx_t e
218
224
  }
219
225
  }
220
226
 
227
+ bool ChunkVectorInfo::HasDeletes() const {
228
+ return any_deleted;
229
+ }
230
+
221
231
  idx_t ChunkVectorInfo::GetCommittedDeletedCount(idx_t max_count) {
222
232
  if (!any_deleted) {
223
233
  return 0;
@@ -231,45 +241,41 @@ idx_t ChunkVectorInfo::GetCommittedDeletedCount(idx_t max_count) {
231
241
  return delete_count;
232
242
  }
233
243
 
234
- void ChunkVectorInfo::Serialize(Serializer &serializer) const {
244
+ void ChunkVectorInfo::Write(WriteStream &writer) const {
235
245
  SelectionVector sel(STANDARD_VECTOR_SIZE);
236
246
  transaction_t start_time = TRANSACTION_ID_START - 1;
237
247
  transaction_t transaction_id = DConstants::INVALID_INDEX;
238
248
  idx_t count = GetSelVector(start_time, transaction_id, sel, STANDARD_VECTOR_SIZE);
239
249
  if (count == STANDARD_VECTOR_SIZE) {
240
250
  // nothing is deleted: skip writing anything
241
- serializer.WriteProperty(100, "type", ChunkInfoType::EMPTY_INFO);
251
+ writer.Write<ChunkInfoType>(ChunkInfoType::EMPTY_INFO);
242
252
  return;
243
253
  }
244
254
  if (count == 0) {
245
255
  // everything is deleted: write a constant vector
246
- serializer.WriteProperty(100, "type", ChunkInfoType::CONSTANT_INFO);
247
- serializer.WriteProperty(200, "start", start);
256
+ writer.Write<ChunkInfoType>(ChunkInfoType::CONSTANT_INFO);
257
+ writer.Write<idx_t>(start);
248
258
  return;
249
259
  }
250
260
  // write a boolean vector
251
- serializer.WriteProperty(100, "type", ChunkInfoType::VECTOR_INFO);
252
- serializer.WriteProperty(200, "start", start);
253
- bool deleted_tuples[STANDARD_VECTOR_SIZE];
254
- for (idx_t i = 0; i < STANDARD_VECTOR_SIZE; i++) {
255
- deleted_tuples[i] = true;
256
- }
261
+ ChunkInfo::Write(writer);
262
+ writer.Write<idx_t>(start);
263
+ ValidityMask mask(STANDARD_VECTOR_SIZE);
264
+ mask.Initialize(STANDARD_VECTOR_SIZE);
257
265
  for (idx_t i = 0; i < count; i++) {
258
- deleted_tuples[sel.get_index(i)] = false;
266
+ mask.SetInvalid(sel.get_index(i));
259
267
  }
260
- serializer.WriteProperty(201, "deleted_tuples", data_ptr_cast(deleted_tuples), sizeof(bool) * STANDARD_VECTOR_SIZE);
268
+ mask.Write(writer, STANDARD_VECTOR_SIZE);
261
269
  }
262
270
 
263
- unique_ptr<ChunkInfo> ChunkVectorInfo::Deserialize(Deserializer &deserializer) {
264
- auto start = deserializer.ReadProperty<idx_t>(200, "start");
265
-
271
+ unique_ptr<ChunkInfo> ChunkVectorInfo::Read(ReadStream &reader) {
272
+ auto start = reader.Read<idx_t>();
266
273
  auto result = make_uniq<ChunkVectorInfo>(start);
267
274
  result->any_deleted = true;
268
- bool deleted_tuples[STANDARD_VECTOR_SIZE];
269
- deserializer.ReadProperty(201, "deleted_tuples", data_ptr_cast(deleted_tuples),
270
- sizeof(bool) * STANDARD_VECTOR_SIZE);
275
+ ValidityMask mask;
276
+ mask.Read(reader, STANDARD_VECTOR_SIZE);
271
277
  for (idx_t i = 0; i < STANDARD_VECTOR_SIZE; i++) {
272
- if (deleted_tuples[i]) {
278
+ if (mask.RowIsValid(i)) {
273
279
  result->deleted[i] = 0;
274
280
  }
275
281
  }
@@ -22,9 +22,9 @@ unique_ptr<BaseStatistics> ColumnCheckpointState::GetStatistics() {
22
22
  return std::move(global_stats);
23
23
  }
24
24
 
25
- PartialBlockForCheckpoint::PartialBlockForCheckpoint(ColumnData &data, ColumnSegment &segment,
26
- BlockManager &block_manager, PartialBlockState state)
27
- : PartialBlock(state), block_manager(block_manager), block(segment.block) {
25
+ PartialBlockForCheckpoint::PartialBlockForCheckpoint(ColumnData &data, ColumnSegment &segment, PartialBlockState state,
26
+ BlockManager &block_manager)
27
+ : PartialBlock(state, block_manager, segment.block) {
28
28
  AddSegmentToTail(data, segment, 0);
29
29
  }
30
30
 
@@ -37,24 +37,15 @@ bool PartialBlockForCheckpoint::IsFlushed() {
37
37
  return segments.empty();
38
38
  }
39
39
 
40
- void PartialBlockForCheckpoint::AddUninitializedRegion(idx_t start, idx_t end) {
41
- uninitialized_regions.push_back({start, end});
42
- }
40
+ void PartialBlockForCheckpoint::Flush(const idx_t free_space_left) {
43
41
 
44
- void PartialBlockForCheckpoint::Flush(idx_t free_space_left) {
45
42
  if (IsFlushed()) {
46
43
  throw InternalException("Flush called on partial block that was already flushed");
47
44
  }
48
- // if we have any free space or uninitialized regions we need to zero-initialize them
49
- if (free_space_left > 0 || !uninitialized_regions.empty()) {
50
- auto handle = block_manager.buffer_manager.Pin(block);
51
- // memset any uninitialized regions
52
- for (auto &uninitialized : uninitialized_regions) {
53
- memset(handle.Ptr() + uninitialized.start, 0, uninitialized.end - uninitialized.start);
54
- }
55
- // memset any free space at the end of the block to 0 prior to writing to disk
56
- memset(handle.Ptr() + Storage::BLOCK_SIZE - free_space_left, 0, free_space_left);
57
- }
45
+
46
+ // zero-initialize unused memory
47
+ FlushInternal(free_space_left);
48
+
58
49
  // At this point, we've already copied all data from tail_segments
59
50
  // into the page owned by first_segment. We flush all segment data to
60
51
  // disk with the following call.
@@ -63,6 +54,7 @@ void PartialBlockForCheckpoint::Flush(idx_t free_space_left) {
63
54
  if (fetch_new_block) {
64
55
  state.block_id = block_manager.GetFreeBlockId();
65
56
  }
57
+
66
58
  for (idx_t i = 0; i < segments.size(); i++) {
67
59
  auto &segment = segments[i];
68
60
  segment.data.IncrementVersion();
@@ -71,23 +63,18 @@ void PartialBlockForCheckpoint::Flush(idx_t free_space_left) {
71
63
  D_ASSERT(segment.offset_in_block == 0);
72
64
  segment.segment.ConvertToPersistent(&block_manager, state.block_id);
73
65
  // update the block after it has been converted to a persistent segment
74
- block = segment.segment.block;
66
+ block_handle = segment.segment.block;
75
67
  } else {
76
68
  // subsequent segments are MARKED as persistent - they don't need to be rewritten
77
- segment.segment.MarkAsPersistent(block, segment.offset_in_block);
69
+ segment.segment.MarkAsPersistent(block_handle, segment.offset_in_block);
78
70
  if (fetch_new_block) {
79
71
  // if we fetched a new block we need to increase the reference count to the block
80
72
  block_manager.IncreaseBlockReferenceCount(state.block_id);
81
73
  }
82
74
  }
83
75
  }
84
- Clear();
85
- }
86
76
 
87
- void PartialBlockForCheckpoint::Clear() {
88
- uninitialized_regions.clear();
89
- block.reset();
90
- segments.clear();
77
+ Clear();
91
78
  }
92
79
 
93
80
  void PartialBlockForCheckpoint::Merge(PartialBlock &other_p, idx_t offset, idx_t other_size) {
@@ -95,13 +82,13 @@ void PartialBlockForCheckpoint::Merge(PartialBlock &other_p, idx_t offset, idx_t
95
82
 
96
83
  auto &buffer_manager = block_manager.buffer_manager;
97
84
  // pin the source block
98
- auto old_handle = buffer_manager.Pin(other.block);
85
+ auto old_handle = buffer_manager.Pin(other.block_handle);
99
86
  // pin the target block
100
- auto new_handle = buffer_manager.Pin(block);
87
+ auto new_handle = buffer_manager.Pin(block_handle);
101
88
  // memcpy the contents of the old block to the new block
102
89
  memcpy(new_handle.Ptr() + offset, old_handle.Ptr(), other_size);
103
90
 
104
- // now copy over all of the segments to the new block
91
+ // now copy over all segments to the new block
105
92
  // move over the uninitialized regions
106
93
  for (auto &region : other.uninitialized_regions) {
107
94
  region.start += offset;
@@ -113,6 +100,7 @@ void PartialBlockForCheckpoint::Merge(PartialBlock &other_p, idx_t offset, idx_t
113
100
  for (auto &segment : other.segments) {
114
101
  AddSegmentToTail(segment.data, segment.segment, segment.offset_in_block + offset);
115
102
  }
103
+
116
104
  other.Clear();
117
105
  }
118
106
 
@@ -120,6 +108,12 @@ void PartialBlockForCheckpoint::AddSegmentToTail(ColumnData &data, ColumnSegment
120
108
  segments.emplace_back(data, segment, offset_in_block);
121
109
  }
122
110
 
111
+ void PartialBlockForCheckpoint::Clear() {
112
+ uninitialized_regions.clear();
113
+ block_handle.reset();
114
+ segments.clear();
115
+ }
116
+
123
117
  void ColumnCheckpointState::FlushSegment(unique_ptr<ColumnSegment> segment, idx_t segment_size) {
124
118
  D_ASSERT(segment_size <= Storage::BLOCK_SIZE);
125
119
  auto tuple_count = segment->count.load();
@@ -140,7 +134,7 @@ void ColumnCheckpointState::FlushSegment(unique_ptr<ColumnSegment> segment, idx_
140
134
  // non-constant block
141
135
  PartialBlockAllocation allocation = partial_block_manager.GetBlockAllocation(segment_size);
142
136
  block_id = allocation.state.block_id;
143
- offset_in_block = allocation.state.offset_in_block;
137
+ offset_in_block = allocation.state.offset;
144
138
 
145
139
  if (allocation.partial_block) {
146
140
  // Use an existing block.
@@ -149,7 +143,7 @@ void ColumnCheckpointState::FlushSegment(unique_ptr<ColumnSegment> segment, idx_
149
143
  // pin the source block
150
144
  auto old_handle = buffer_manager.Pin(segment->block);
151
145
  // pin the target block
152
- auto new_handle = buffer_manager.Pin(pstate.block);
146
+ auto new_handle = buffer_manager.Pin(pstate.block_handle);
153
147
  // memcpy the contents of the old block to the new block
154
148
  memcpy(new_handle.Ptr() + offset_in_block, old_handle.Ptr(), segment_size);
155
149
  pstate.AddSegmentToTail(column_data, *segment, offset_in_block);
@@ -162,8 +156,8 @@ void ColumnCheckpointState::FlushSegment(unique_ptr<ColumnSegment> segment, idx_
162
156
  segment->Resize(Storage::BLOCK_SIZE);
163
157
  }
164
158
  D_ASSERT(offset_in_block == 0);
165
- allocation.partial_block = make_uniq<PartialBlockForCheckpoint>(
166
- column_data, *segment, *allocation.block_manager, allocation.state);
159
+ allocation.partial_block = make_uniq<PartialBlockForCheckpoint>(column_data, *segment, allocation.state,
160
+ *allocation.block_manager);
167
161
  }
168
162
  // Writer will decide whether to reuse this block.
169
163
  partial_block_manager.RegisterPartialBlock(std::move(allocation));
@@ -87,7 +87,7 @@ void ColumnData::InitializeScanWithOffset(ColumnScanState &state, idx_t row_idx)
87
87
  state.last_offset = 0;
88
88
  }
89
89
 
90
- idx_t ColumnData::ScanVector(ColumnScanState &state, Vector &result, idx_t remaining) {
90
+ idx_t ColumnData::ScanVector(ColumnScanState &state, Vector &result, idx_t remaining, bool has_updates) {
91
91
  state.previous_states.clear();
92
92
  if (state.version != version) {
93
93
  InitializeScanWithOffset(state, state.row_index);
@@ -113,7 +113,8 @@ idx_t ColumnData::ScanVector(ColumnScanState &state, Vector &result, idx_t remai
113
113
  idx_t scan_count = MinValue<idx_t>(remaining, state.current->start + state.current->count - state.row_index);
114
114
  idx_t result_offset = initial_remaining - remaining;
115
115
  if (scan_count > 0) {
116
- state.current->Scan(state, scan_count, result, result_offset, scan_count == initial_remaining);
116
+ state.current->Scan(state, scan_count, result, result_offset,
117
+ !has_updates && scan_count == initial_remaining);
117
118
 
118
119
  state.row_index += scan_count;
119
120
  remaining -= scan_count;
@@ -138,10 +139,14 @@ idx_t ColumnData::ScanVector(ColumnScanState &state, Vector &result, idx_t remai
138
139
 
139
140
  template <bool SCAN_COMMITTED, bool ALLOW_UPDATES>
140
141
  idx_t ColumnData::ScanVector(TransactionData transaction, idx_t vector_index, ColumnScanState &state, Vector &result) {
141
- auto scan_count = ScanVector(state, result, STANDARD_VECTOR_SIZE);
142
-
143
- lock_guard<mutex> update_guard(update_lock);
144
- if (updates) {
142
+ bool has_updates;
143
+ {
144
+ lock_guard<mutex> update_guard(update_lock);
145
+ has_updates = updates ? true : false;
146
+ }
147
+ auto scan_count = ScanVector(state, result, STANDARD_VECTOR_SIZE, has_updates);
148
+ if (has_updates) {
149
+ lock_guard<mutex> update_guard(update_lock);
145
150
  if (!ALLOW_UPDATES && updates->HasUncommittedUpdates(vector_index)) {
146
151
  throw TransactionException("Cannot create index with outstanding updates");
147
152
  }
@@ -179,7 +184,7 @@ idx_t ColumnData::ScanCommitted(idx_t vector_index, ColumnScanState &state, Vect
179
184
  void ColumnData::ScanCommittedRange(idx_t row_group_start, idx_t offset_in_row_group, idx_t count, Vector &result) {
180
185
  ColumnScanState child_state;
181
186
  InitializeScanWithOffset(child_state, row_group_start + offset_in_row_group);
182
- auto scan_count = ScanVector(child_state, result, count);
187
+ auto scan_count = ScanVector(child_state, result, count, updates ? true : false);
183
188
  if (updates) {
184
189
  result.Flatten(scan_count);
185
190
  updates->FetchCommittedRange(offset_in_row_group, count, result);
@@ -192,7 +197,7 @@ idx_t ColumnData::ScanCount(ColumnScanState &state, Vector &result, idx_t count)
192
197
  }
193
198
  // ScanCount can only be used if there are no updates
194
199
  D_ASSERT(!updates);
195
- return ScanVector(state, result, count);
200
+ return ScanVector(state, result, count, false);
196
201
  }
197
202
 
198
203
  void ColumnData::Select(TransactionData transaction, idx_t vector_index, ColumnScanState &state, Vector &result,
@@ -339,7 +344,7 @@ idx_t ColumnData::Fetch(ColumnScanState &state, row_t row_id, Vector &result) {
339
344
  state.row_index = start + ((row_id - start) / STANDARD_VECTOR_SIZE * STANDARD_VECTOR_SIZE);
340
345
  state.current = data.GetSegment(state.row_index);
341
346
  state.internal_index = state.current->start;
342
- return ScanVector(state, result, STANDARD_VECTOR_SIZE);
347
+ return ScanVector(state, result, STANDARD_VECTOR_SIZE, false);
343
348
  }
344
349
 
345
350
  void ColumnData::FetchRow(TransactionData transaction, ColumnFetchState &state, row_t row_id, Vector &result,
@@ -86,7 +86,7 @@ idx_t ListColumnData::ScanCount(ColumnScanState &state, Vector &result, idx_t co
86
86
  D_ASSERT(!updates);
87
87
 
88
88
  Vector offset_vector(LogicalType::UBIGINT, count);
89
- idx_t scan_count = ScanVector(state, offset_vector, count);
89
+ idx_t scan_count = ScanVector(state, offset_vector, count, false);
90
90
  D_ASSERT(scan_count > 0);
91
91
  validity.ScanCount(state.child_states[0], result, count);
92
92
 
@@ -132,7 +132,7 @@ void ListColumnData::Skip(ColumnScanState &state, idx_t count) {
132
132
  // note that we only need to read the first and last entry
133
133
  // however, let's just read all "count" entries for now
134
134
  Vector result(LogicalType::UBIGINT, count);
135
- idx_t scan_count = ScanVector(state, result, count);
135
+ idx_t scan_count = ScanVector(state, result, count, false);
136
136
  if (scan_count == 0) {
137
137
  return;
138
138
  }