duckdb 0.7.2-dev2233.0 → 0.7.2-dev2320.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (176) hide show
  1. package/binding.gyp +1 -0
  2. package/package.json +1 -1
  3. package/src/duckdb/src/catalog/catalog.cpp +18 -17
  4. package/src/duckdb/src/catalog/catalog_entry/duck_table_entry.cpp +0 -4
  5. package/src/duckdb/src/catalog/catalog_entry/table_catalog_entry.cpp +0 -4
  6. package/src/duckdb/src/catalog/catalog_set.cpp +3 -3
  7. package/src/duckdb/src/common/adbc/adbc.cpp +441 -0
  8. package/src/duckdb/src/common/adbc/driver_manager.cpp +749 -0
  9. package/src/duckdb/src/common/arrow/arrow_wrapper.cpp +1 -1
  10. package/src/duckdb/src/common/tree_renderer.cpp +3 -3
  11. package/src/duckdb/src/common/types/conflict_manager.cpp +2 -1
  12. package/src/duckdb/src/execution/column_binding_resolver.cpp +1 -1
  13. package/src/duckdb/src/execution/operator/aggregate/physical_ungrouped_aggregate.cpp +1 -1
  14. package/src/duckdb/src/execution/operator/filter/physical_filter.cpp +2 -2
  15. package/src/duckdb/src/execution/operator/helper/physical_execute.cpp +2 -2
  16. package/src/duckdb/src/execution/operator/helper/physical_result_collector.cpp +5 -5
  17. package/src/duckdb/src/execution/operator/join/physical_cross_product.cpp +1 -1
  18. package/src/duckdb/src/execution/operator/join/physical_delim_join.cpp +11 -10
  19. package/src/duckdb/src/execution/operator/join/physical_hash_join.cpp +3 -3
  20. package/src/duckdb/src/execution/operator/join/physical_iejoin.cpp +9 -9
  21. package/src/duckdb/src/execution/operator/join/physical_index_join.cpp +4 -4
  22. package/src/duckdb/src/execution/operator/join/physical_join.cpp +7 -7
  23. package/src/duckdb/src/execution/operator/join/physical_nested_loop_join.cpp +3 -3
  24. package/src/duckdb/src/execution/operator/join/physical_piecewise_merge_join.cpp +3 -3
  25. package/src/duckdb/src/execution/operator/join/physical_positional_join.cpp +2 -2
  26. package/src/duckdb/src/execution/operator/persistent/csv_reader_options.cpp +8 -9
  27. package/src/duckdb/src/execution/operator/persistent/physical_batch_insert.cpp +20 -19
  28. package/src/duckdb/src/execution/operator/persistent/physical_export.cpp +3 -3
  29. package/src/duckdb/src/execution/operator/persistent/physical_insert.cpp +25 -24
  30. package/src/duckdb/src/execution/operator/persistent/physical_update.cpp +1 -1
  31. package/src/duckdb/src/execution/operator/projection/physical_projection.cpp +2 -2
  32. package/src/duckdb/src/execution/operator/scan/physical_column_data_scan.cpp +12 -6
  33. package/src/duckdb/src/execution/operator/set/physical_recursive_cte.cpp +10 -11
  34. package/src/duckdb/src/execution/operator/set/physical_union.cpp +2 -2
  35. package/src/duckdb/src/execution/physical_operator.cpp +13 -13
  36. package/src/duckdb/src/execution/physical_plan/plan_column_data_get.cpp +2 -4
  37. package/src/duckdb/src/execution/physical_plan/plan_comparison_join.cpp +1 -1
  38. package/src/duckdb/src/execution/physical_plan/plan_create_table.cpp +5 -5
  39. package/src/duckdb/src/execution/physical_plan/plan_delete.cpp +3 -3
  40. package/src/duckdb/src/execution/physical_plan/plan_delim_join.cpp +6 -7
  41. package/src/duckdb/src/execution/physical_plan/plan_explain.cpp +2 -4
  42. package/src/duckdb/src/execution/physical_plan/plan_insert.cpp +2 -2
  43. package/src/duckdb/src/execution/physical_plan/plan_show_select.cpp +2 -4
  44. package/src/duckdb/src/execution/physical_plan/plan_update.cpp +3 -3
  45. package/src/duckdb/src/function/compression_config.cpp +9 -9
  46. package/src/duckdb/src/function/scalar/date/strftime.cpp +1 -1
  47. package/src/duckdb/src/function/table/copy_csv.cpp +5 -0
  48. package/src/duckdb/src/function/table/pragma_detailed_profiling_output.cpp +6 -5
  49. package/src/duckdb/src/function/table/pragma_last_profiling_output.cpp +7 -5
  50. package/src/duckdb/src/function/table/system/duckdb_databases.cpp +1 -1
  51. package/src/duckdb/src/function/table/system/duckdb_dependencies.cpp +1 -1
  52. package/src/duckdb/src/function/table/system/duckdb_extensions.cpp +1 -1
  53. package/src/duckdb/src/function/table/system/duckdb_indexes.cpp +1 -1
  54. package/src/duckdb/src/function/table/system/duckdb_keywords.cpp +1 -1
  55. package/src/duckdb/src/function/table/system/duckdb_schemas.cpp +1 -1
  56. package/src/duckdb/src/function/table/system/duckdb_sequences.cpp +1 -1
  57. package/src/duckdb/src/function/table/system/duckdb_settings.cpp +1 -1
  58. package/src/duckdb/src/function/table/system/duckdb_tables.cpp +1 -1
  59. package/src/duckdb/src/function/table/system/duckdb_temporary_files.cpp +1 -1
  60. package/src/duckdb/src/function/table/system/duckdb_types.cpp +1 -1
  61. package/src/duckdb/src/function/table/system/pragma_collations.cpp +1 -1
  62. package/src/duckdb/src/function/table/system/pragma_database_size.cpp +1 -1
  63. package/src/duckdb/src/function/table/system/pragma_storage_info.cpp +5 -5
  64. package/src/duckdb/src/function/table/system/pragma_table_info.cpp +1 -1
  65. package/src/duckdb/src/function/table/system/test_all_types.cpp +1 -1
  66. package/src/duckdb/src/function/table/table_scan.cpp +3 -4
  67. package/src/duckdb/src/function/table/version/pragma_version.cpp +2 -2
  68. package/src/duckdb/src/include/duckdb/catalog/catalog.hpp +3 -3
  69. package/src/duckdb/src/include/duckdb/catalog/catalog_entry/duck_table_entry.hpp +1 -2
  70. package/src/duckdb/src/include/duckdb/catalog/catalog_entry/table_catalog_entry.hpp +1 -2
  71. package/src/duckdb/src/include/duckdb/common/adbc/adbc-init.hpp +37 -0
  72. package/src/duckdb/src/include/duckdb/common/adbc/adbc.h +1088 -0
  73. package/src/duckdb/src/include/duckdb/common/adbc/adbc.hpp +85 -0
  74. package/src/duckdb/src/include/duckdb/common/adbc/driver_manager.h +84 -0
  75. package/src/duckdb/src/include/duckdb/common/helper.hpp +3 -0
  76. package/src/duckdb/src/include/duckdb/common/types/conflict_manager.hpp +3 -2
  77. package/src/duckdb/src/include/duckdb/common/types.hpp +0 -1
  78. package/src/duckdb/src/include/duckdb/execution/executor.hpp +7 -7
  79. package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_execute.hpp +1 -1
  80. package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_result_collector.hpp +1 -1
  81. package/src/duckdb/src/include/duckdb/execution/operator/join/physical_cross_product.hpp +1 -1
  82. package/src/duckdb/src/include/duckdb/execution/operator/join/physical_delim_join.hpp +3 -3
  83. package/src/duckdb/src/include/duckdb/execution/operator/join/physical_index_join.hpp +1 -1
  84. package/src/duckdb/src/include/duckdb/execution/operator/join/physical_join.hpp +1 -1
  85. package/src/duckdb/src/include/duckdb/execution/operator/join/physical_positional_join.hpp +1 -1
  86. package/src/duckdb/src/include/duckdb/execution/operator/persistent/csv_reader_options.hpp +2 -1
  87. package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_batch_insert.hpp +2 -2
  88. package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_export.hpp +1 -1
  89. package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_insert.hpp +2 -2
  90. package/src/duckdb/src/include/duckdb/execution/operator/scan/physical_column_data_scan.hpp +3 -4
  91. package/src/duckdb/src/include/duckdb/execution/operator/set/physical_recursive_cte.hpp +1 -1
  92. package/src/duckdb/src/include/duckdb/execution/operator/set/physical_union.hpp +1 -1
  93. package/src/duckdb/src/include/duckdb/execution/physical_operator.hpp +3 -3
  94. package/src/duckdb/src/include/duckdb/execution/physical_operator_states.hpp +1 -1
  95. package/src/duckdb/src/include/duckdb/function/cast/cast_function_set.hpp +1 -1
  96. package/src/duckdb/src/include/duckdb/main/config.hpp +2 -2
  97. package/src/duckdb/src/include/duckdb/main/query_profiler.hpp +10 -9
  98. package/src/duckdb/src/include/duckdb/parallel/meta_pipeline.hpp +4 -4
  99. package/src/duckdb/src/include/duckdb/parallel/pipeline.hpp +18 -17
  100. package/src/duckdb/src/include/duckdb/parallel/pipeline_executor.hpp +2 -2
  101. package/src/duckdb/src/include/duckdb/planner/bind_context.hpp +14 -17
  102. package/src/duckdb/src/include/duckdb/planner/binder.hpp +6 -6
  103. package/src/duckdb/src/include/duckdb/planner/expression_binder/index_binder.hpp +4 -4
  104. package/src/duckdb/src/include/duckdb/planner/expression_binder/where_binder.hpp +2 -2
  105. package/src/duckdb/src/include/duckdb/planner/operator/logical_create.hpp +3 -2
  106. package/src/duckdb/src/include/duckdb/planner/operator/logical_create_table.hpp +2 -2
  107. package/src/duckdb/src/include/duckdb/planner/operator/logical_delete.hpp +2 -2
  108. package/src/duckdb/src/include/duckdb/planner/operator/logical_insert.hpp +2 -2
  109. package/src/duckdb/src/include/duckdb/planner/operator/logical_update.hpp +2 -2
  110. package/src/duckdb/src/include/duckdb/planner/parsed_data/bound_create_function_info.hpp +3 -2
  111. package/src/duckdb/src/include/duckdb/planner/parsed_data/bound_create_table_info.hpp +3 -2
  112. package/src/duckdb/src/include/duckdb/planner/table_binding.hpp +6 -5
  113. package/src/duckdb/src/include/duckdb/planner/tableref/bound_basetableref.hpp +2 -2
  114. package/src/duckdb/src/include/duckdb/storage/compression/chimp/chimp_compress.hpp +3 -7
  115. package/src/duckdb/src/include/duckdb/storage/compression/patas/patas_compress.hpp +3 -7
  116. package/src/duckdb/src/include/duckdb/storage/data_table.hpp +1 -1
  117. package/src/duckdb/src/include/duckdb/storage/storage_manager.hpp +1 -1
  118. package/src/duckdb/src/include/duckdb/storage/table/column_data_checkpointer.hpp +2 -1
  119. package/src/duckdb/src/include/duckdb/storage/table/column_segment.hpp +2 -2
  120. package/src/duckdb/src/include/duckdb/storage/table/row_group.hpp +1 -1
  121. package/src/duckdb/src/include/duckdb/storage/table/row_group_collection.hpp +1 -1
  122. package/src/duckdb/src/include/duckdb/storage/table/update_segment.hpp +4 -4
  123. package/src/duckdb/src/include/duckdb/storage/write_ahead_log.hpp +3 -3
  124. package/src/duckdb/src/include/duckdb/transaction/cleanup_state.hpp +3 -3
  125. package/src/duckdb/src/include/duckdb/transaction/commit_state.hpp +5 -5
  126. package/src/duckdb/src/include/duckdb/transaction/duck_transaction.hpp +3 -3
  127. package/src/duckdb/src/include/duckdb/transaction/local_storage.hpp +31 -30
  128. package/src/duckdb/src/include/duckdb/transaction/transaction_data.hpp +2 -1
  129. package/src/duckdb/src/include/duckdb/transaction/undo_buffer.hpp +1 -1
  130. package/src/duckdb/src/main/client_context.cpp +1 -1
  131. package/src/duckdb/src/main/query_profiler.cpp +24 -22
  132. package/src/duckdb/src/parallel/executor.cpp +55 -49
  133. package/src/duckdb/src/parallel/meta_pipeline.cpp +5 -5
  134. package/src/duckdb/src/parallel/pipeline.cpp +43 -26
  135. package/src/duckdb/src/parallel/pipeline_executor.cpp +22 -22
  136. package/src/duckdb/src/planner/bind_context.cpp +67 -71
  137. package/src/duckdb/src/planner/binder/statement/bind_create.cpp +6 -7
  138. package/src/duckdb/src/planner/binder/statement/bind_create_table.cpp +6 -8
  139. package/src/duckdb/src/planner/binder/statement/bind_delete.cpp +3 -4
  140. package/src/duckdb/src/planner/binder/statement/bind_insert.cpp +17 -18
  141. package/src/duckdb/src/planner/binder/statement/bind_update.cpp +16 -17
  142. package/src/duckdb/src/planner/binder/statement/bind_vacuum.cpp +6 -5
  143. package/src/duckdb/src/planner/binder/tableref/bind_basetableref.cpp +9 -9
  144. package/src/duckdb/src/planner/binder/tableref/bind_joinref.cpp +20 -18
  145. package/src/duckdb/src/planner/binder/tableref/plan_joinref.cpp +1 -1
  146. package/src/duckdb/src/planner/binder.cpp +4 -4
  147. package/src/duckdb/src/planner/expression_binder/index_binder.cpp +2 -1
  148. package/src/duckdb/src/planner/expression_binder/where_binder.cpp +3 -2
  149. package/src/duckdb/src/planner/operator/logical_create_table.cpp +1 -1
  150. package/src/duckdb/src/planner/operator/logical_delete.cpp +5 -5
  151. package/src/duckdb/src/planner/operator/logical_insert.cpp +6 -7
  152. package/src/duckdb/src/planner/operator/logical_update.cpp +6 -7
  153. package/src/duckdb/src/planner/parsed_data/bound_create_table_info.cpp +4 -5
  154. package/src/duckdb/src/planner/table_binding.cpp +6 -5
  155. package/src/duckdb/src/storage/compression/bitpacking.cpp +5 -6
  156. package/src/duckdb/src/storage/compression/dictionary_compression.cpp +5 -6
  157. package/src/duckdb/src/storage/compression/fsst.cpp +3 -5
  158. package/src/duckdb/src/storage/compression/rle.cpp +4 -6
  159. package/src/duckdb/src/storage/data_table.cpp +27 -28
  160. package/src/duckdb/src/storage/local_storage.cpp +70 -68
  161. package/src/duckdb/src/storage/storage_manager.cpp +12 -13
  162. package/src/duckdb/src/storage/table/column_checkpoint_state.cpp +2 -2
  163. package/src/duckdb/src/storage/table/column_data.cpp +2 -2
  164. package/src/duckdb/src/storage/table/column_data_checkpointer.cpp +18 -6
  165. package/src/duckdb/src/storage/table/column_segment.cpp +23 -24
  166. package/src/duckdb/src/storage/table/row_group.cpp +3 -3
  167. package/src/duckdb/src/storage/table/row_group_collection.cpp +1 -1
  168. package/src/duckdb/src/storage/table/update_segment.cpp +15 -15
  169. package/src/duckdb/src/storage/wal_replay.cpp +1 -1
  170. package/src/duckdb/src/transaction/cleanup_state.cpp +10 -10
  171. package/src/duckdb/src/transaction/commit_state.cpp +19 -19
  172. package/src/duckdb/src/transaction/duck_transaction.cpp +7 -7
  173. package/src/duckdb/src/transaction/rollback_state.cpp +1 -1
  174. package/src/duckdb/src/transaction/undo_buffer.cpp +2 -1
  175. package/src/duckdb/ub_src_common_adbc.cpp +4 -0
  176. package/src/duckdb/src/include/duckdb/common/single_thread_ptr.hpp +0 -185
@@ -141,7 +141,7 @@ int ResultArrowArrayStreamWrapper::MyStreamGetNext(struct ArrowArrayStream *stre
141
141
  }
142
142
 
143
143
  void ResultArrowArrayStreamWrapper::MyStreamRelease(struct ArrowArrayStream *stream) {
144
- if (!stream->release) {
144
+ if (!stream || !stream->release) {
145
145
  return;
146
146
  }
147
147
  stream->release = nullptr;
@@ -403,10 +403,10 @@ void TreeChildrenIterator::Iterate(const PhysicalOperator &op,
403
403
  }
404
404
 
405
405
  struct PipelineRenderNode {
406
- explicit PipelineRenderNode(PhysicalOperator &op) : op(op) {
406
+ explicit PipelineRenderNode(const PhysicalOperator &op) : op(op) {
407
407
  }
408
408
 
409
- PhysicalOperator &op;
409
+ const PhysicalOperator &op;
410
410
  unique_ptr<PipelineRenderNode> child;
411
411
  };
412
412
 
@@ -542,7 +542,7 @@ unique_ptr<RenderTree> TreeRenderer::CreateTree(const Pipeline &op) {
542
542
  D_ASSERT(!operators.empty());
543
543
  unique_ptr<PipelineRenderNode> node;
544
544
  for (auto &op : operators) {
545
- auto new_node = make_uniq<PipelineRenderNode>(*op);
545
+ auto new_node = make_uniq<PipelineRenderNode>(op.get());
546
546
  new_node->child = std::move(node);
547
547
  node = std::move(new_node);
548
548
  }
@@ -5,7 +5,8 @@
5
5
 
6
6
  namespace duckdb {
7
7
 
8
- ConflictManager::ConflictManager(VerifyExistenceType lookup_type, idx_t input_size, ConflictInfo *conflict_info)
8
+ ConflictManager::ConflictManager(VerifyExistenceType lookup_type, idx_t input_size,
9
+ optional_ptr<ConflictInfo> conflict_info)
9
10
  : lookup_type(lookup_type), input_size(input_size), conflict_info(conflict_info), conflicts(input_size, false),
10
11
  mode(ConflictManagerMode::THROW) {
11
12
  }
@@ -80,7 +80,7 @@ void ColumnBindingResolver::VisitOperator(LogicalOperator &op) {
80
80
  if (insert_op.action_type != OnConflictAction::THROW) {
81
81
  // Get the bindings from the children
82
82
  VisitOperatorChildren(op);
83
- auto column_count = insert_op.table->GetColumns().PhysicalColumnCount();
83
+ auto column_count = insert_op.table.GetColumns().PhysicalColumnCount();
84
84
  auto dummy_bindings = LogicalOperator::GenerateColumnBindings(insert_op.excluded_table_index, column_count);
85
85
  // Now insert our dummy bindings at the start of the bindings,
86
86
  // so the first 'column_count' indices of the chunk are reserved for our 'excluded' columns
@@ -342,7 +342,7 @@ void PhysicalUngroupedAggregate::Combine(ExecutionContext &context, GlobalSinkSt
342
342
  }
343
343
 
344
344
  auto &client_profiler = QueryProfiler::Get(context.client);
345
- context.thread.profiler.Flush(this, &source.child_executor, "child_executor", 0);
345
+ context.thread.profiler.Flush(*this, source.child_executor, "child_executor", 0);
346
346
  client_profiler.Flush(context.thread.profiler);
347
347
  }
348
348
 
@@ -30,8 +30,8 @@ public:
30
30
  SelectionVector sel;
31
31
 
32
32
  public:
33
- void Finalize(PhysicalOperator *op, ExecutionContext &context) override {
34
- context.thread.profiler.Flush(op, &executor, "filter", 0);
33
+ void Finalize(const PhysicalOperator &op, ExecutionContext &context) override {
34
+ context.thread.profiler.Flush(op, executor, "filter", 0);
35
35
  }
36
36
  };
37
37
 
@@ -8,8 +8,8 @@ PhysicalExecute::PhysicalExecute(PhysicalOperator &plan)
8
8
  : PhysicalOperator(PhysicalOperatorType::EXECUTE, plan.types, -1), plan(plan) {
9
9
  }
10
10
 
11
- vector<PhysicalOperator *> PhysicalExecute::GetChildren() const {
12
- return {&plan};
11
+ vector<const_reference<PhysicalOperator>> PhysicalExecute::GetChildren() const {
12
+ return {plan};
13
13
  }
14
14
 
15
15
  void PhysicalExecute::BuildPipelines(Pipeline &current, MetaPipeline &meta_pipeline) {
@@ -31,8 +31,8 @@ unique_ptr<PhysicalResultCollector> PhysicalResultCollector::GetResultCollector(
31
31
  }
32
32
  }
33
33
 
34
- vector<PhysicalOperator *> PhysicalResultCollector::GetChildren() const {
35
- return {&plan};
34
+ vector<const_reference<PhysicalOperator>> PhysicalResultCollector::GetChildren() const {
35
+ return {plan};
36
36
  }
37
37
 
38
38
  void PhysicalResultCollector::BuildPipelines(Pipeline &current, MetaPipeline &meta_pipeline) {
@@ -43,11 +43,11 @@ void PhysicalResultCollector::BuildPipelines(Pipeline &current, MetaPipeline &me
43
43
 
44
44
  // single operator: the operator becomes the data source of the current pipeline
45
45
  auto &state = meta_pipeline.GetState();
46
- state.SetPipelineSource(current, this);
46
+ state.SetPipelineSource(current, *this);
47
47
 
48
48
  // we create a new pipeline starting from the child
49
- auto child_meta_pipeline = meta_pipeline.CreateChildMetaPipeline(current, this);
50
- child_meta_pipeline->Build(plan);
49
+ auto &child_meta_pipeline = meta_pipeline.CreateChildMetaPipeline(current, *this);
50
+ child_meta_pipeline.Build(plan);
51
51
  }
52
52
 
53
53
  } // namespace duckdb
@@ -140,7 +140,7 @@ void PhysicalCrossProduct::BuildPipelines(Pipeline &current, MetaPipeline &meta_
140
140
  PhysicalJoin::BuildJoinPipelines(current, meta_pipeline, *this);
141
141
  }
142
142
 
143
- vector<const PhysicalOperator *> PhysicalCrossProduct::GetSources() const {
143
+ vector<const_reference<PhysicalOperator>> PhysicalCrossProduct::GetSources() const {
144
144
  return children[0]->GetSources();
145
145
  }
146
146
 
@@ -12,7 +12,7 @@
12
12
  namespace duckdb {
13
13
 
14
14
  PhysicalDelimJoin::PhysicalDelimJoin(vector<LogicalType> types, unique_ptr<PhysicalOperator> original_join,
15
- vector<PhysicalOperator *> delim_scans, idx_t estimated_cardinality)
15
+ vector<const_reference<PhysicalOperator>> delim_scans, idx_t estimated_cardinality)
16
16
  : PhysicalOperator(PhysicalOperatorType::DELIM_JOIN, std::move(types), estimated_cardinality),
17
17
  join(std::move(original_join)), delim_scans(std::move(delim_scans)) {
18
18
  D_ASSERT(join->children.size() == 2);
@@ -27,13 +27,13 @@ PhysicalDelimJoin::PhysicalDelimJoin(vector<LogicalType> types, unique_ptr<Physi
27
27
  join->children[0] = std::move(cached_chunk_scan);
28
28
  }
29
29
 
30
- vector<PhysicalOperator *> PhysicalDelimJoin::GetChildren() const {
31
- vector<PhysicalOperator *> result;
30
+ vector<const_reference<PhysicalOperator>> PhysicalDelimJoin::GetChildren() const {
31
+ vector<const_reference<PhysicalOperator>> result;
32
32
  for (auto &child : children) {
33
- result.push_back(child.get());
33
+ result.push_back(*child);
34
34
  }
35
- result.push_back(join.get());
36
- result.push_back(distinct.get());
35
+ result.push_back(*join);
36
+ result.push_back(*distinct);
37
37
  return result;
38
38
  }
39
39
 
@@ -46,7 +46,7 @@ public:
46
46
  : lhs_data(context, delim_join.children[0]->GetTypes()) {
47
47
  D_ASSERT(delim_join.delim_scans.size() > 0);
48
48
  // set up the delim join chunk to scan in the original join
49
- auto &cached_chunk_scan = (PhysicalColumnDataScan &)*delim_join.join->children[0];
49
+ auto &cached_chunk_scan = delim_join.join->children[0]->Cast<PhysicalColumnDataScan>();
50
50
  cached_chunk_scan.collection = &lhs_data;
51
51
  }
52
52
 
@@ -124,8 +124,8 @@ void PhysicalDelimJoin::BuildPipelines(Pipeline &current, MetaPipeline &meta_pip
124
124
  op_state.reset();
125
125
  sink_state.reset();
126
126
 
127
- auto child_meta_pipeline = meta_pipeline.CreateChildMetaPipeline(current, this);
128
- child_meta_pipeline->Build(*children[0]);
127
+ auto &child_meta_pipeline = meta_pipeline.CreateChildMetaPipeline(current, *this);
128
+ child_meta_pipeline.Build(*children[0]);
129
129
 
130
130
  if (type == PhysicalOperatorType::DELIM_JOIN) {
131
131
  // recurse into the actual join
@@ -134,7 +134,8 @@ void PhysicalDelimJoin::BuildPipelines(Pipeline &current, MetaPipeline &meta_pip
134
134
  // we add an entry to the mapping of (PhysicalOperator*) -> (Pipeline*)
135
135
  auto &state = meta_pipeline.GetState();
136
136
  for (auto &delim_scan : delim_scans) {
137
- state.delim_join_dependencies[delim_scan] = child_meta_pipeline->GetBasePipeline().get();
137
+ state.delim_join_dependencies.insert(
138
+ make_pair(delim_scan, reference<Pipeline>(*child_meta_pipeline.GetBasePipeline())));
138
139
  }
139
140
  join->BuildPipelines(current, meta_pipeline);
140
141
  }
@@ -220,7 +220,7 @@ void PhysicalHashJoin::Combine(ExecutionContext &context, GlobalSinkState &gstat
220
220
  gstate.local_hash_tables.push_back(std::move(lstate.hash_table));
221
221
  }
222
222
  auto &client_profiler = QueryProfiler::Get(context.client);
223
- context.thread.profiler.Flush(this, &lstate.build_executor, "build_executor", 1);
223
+ context.thread.profiler.Flush(*this, lstate.build_executor, "build_executor", 1);
224
224
  client_profiler.Flush(context.thread.profiler);
225
225
  }
226
226
 
@@ -429,8 +429,8 @@ public:
429
429
  DataChunk spill_chunk;
430
430
 
431
431
  public:
432
- void Finalize(PhysicalOperator *op, ExecutionContext &context) override {
433
- context.thread.profiler.Flush(op, &probe_executor, "probe_executor", 0);
432
+ void Finalize(const PhysicalOperator &op, ExecutionContext &context) override {
433
+ context.thread.profiler.Flush(op, probe_executor, "probe_executor", 0);
434
434
  }
435
435
  };
436
436
 
@@ -150,7 +150,7 @@ void PhysicalIEJoin::Combine(ExecutionContext &context, GlobalSinkState &gstate_
150
150
  gstate.tables[gstate.child]->Combine(lstate.table);
151
151
  auto &client_profiler = QueryProfiler::Get(context.client);
152
152
 
153
- context.thread.profiler.Flush(this, &lstate.table.executor, gstate.child ? "rhs_executor" : "lhs_executor", 1);
153
+ context.thread.profiler.Flush(*this, lstate.table.executor, gstate.child ? "rhs_executor" : "lhs_executor", 1);
154
154
  client_profiler.Flush(context.thread.profiler);
155
155
  }
156
156
 
@@ -1011,24 +1011,24 @@ void PhysicalIEJoin::BuildPipelines(Pipeline &current, MetaPipeline &meta_pipeli
1011
1011
  }
1012
1012
 
1013
1013
  // becomes a source after both children fully sink their data
1014
- meta_pipeline.GetState().SetPipelineSource(current, this);
1014
+ meta_pipeline.GetState().SetPipelineSource(current, *this);
1015
1015
 
1016
1016
  // Create one child meta pipeline that will hold the LHS and RHS pipelines
1017
- auto child_meta_pipeline = meta_pipeline.CreateChildMetaPipeline(current, this);
1018
- auto lhs_pipeline = child_meta_pipeline->GetBasePipeline();
1019
- auto rhs_pipeline = child_meta_pipeline->CreatePipeline();
1017
+ auto &child_meta_pipeline = meta_pipeline.CreateChildMetaPipeline(current, *this);
1018
+ auto lhs_pipeline = child_meta_pipeline.GetBasePipeline();
1019
+ auto rhs_pipeline = child_meta_pipeline.CreatePipeline();
1020
1020
 
1021
1021
  // Build out LHS
1022
- children[0]->BuildPipelines(*lhs_pipeline, *child_meta_pipeline);
1022
+ children[0]->BuildPipelines(*lhs_pipeline, child_meta_pipeline);
1023
1023
 
1024
1024
  // RHS depends on everything in LHS
1025
- child_meta_pipeline->AddDependenciesFrom(rhs_pipeline, lhs_pipeline.get(), true);
1025
+ child_meta_pipeline.AddDependenciesFrom(rhs_pipeline, lhs_pipeline.get(), true);
1026
1026
 
1027
1027
  // Build out RHS
1028
- children[1]->BuildPipelines(*rhs_pipeline, *child_meta_pipeline);
1028
+ children[1]->BuildPipelines(*rhs_pipeline, child_meta_pipeline);
1029
1029
 
1030
1030
  // Despite having the same sink, RHS needs its own PipelineFinishEvent
1031
- child_meta_pipeline->AddFinishEvent(rhs_pipeline);
1031
+ child_meta_pipeline.AddFinishEvent(rhs_pipeline);
1032
1032
  }
1033
1033
 
1034
1034
  } // namespace duckdb
@@ -52,8 +52,8 @@ public:
52
52
  unique_ptr<ColumnFetchState> fetch_state;
53
53
 
54
54
  public:
55
- void Finalize(PhysicalOperator *op, ExecutionContext &context) override {
56
- context.thread.profiler.Flush(op, &probe_executor, "probe_executor", 0);
55
+ void Finalize(const PhysicalOperator &op, ExecutionContext &context) override {
56
+ context.thread.profiler.Flush(op, probe_executor, "probe_executor", 0);
57
57
  }
58
58
  };
59
59
 
@@ -229,11 +229,11 @@ void PhysicalIndexJoin::BuildPipelines(Pipeline &current, MetaPipeline &meta_pip
229
229
  // index join: we only continue into the LHS
230
230
  // the right side is probed by the index join
231
231
  // so we don't need to do anything in the pipeline with this child
232
- meta_pipeline.GetState().AddPipelineOperator(current, this);
232
+ meta_pipeline.GetState().AddPipelineOperator(current, *this);
233
233
  children[0]->BuildPipelines(current, meta_pipeline);
234
234
  }
235
235
 
236
- vector<const PhysicalOperator *> PhysicalIndexJoin::GetSources() const {
236
+ vector<const_reference<PhysicalOperator>> PhysicalIndexJoin::GetSources() const {
237
237
  return children[0]->GetSources();
238
238
  }
239
239
 
@@ -32,7 +32,7 @@ void PhysicalJoin::BuildJoinPipelines(Pipeline &current, MetaPipeline &meta_pipe
32
32
 
33
33
  // 'current' is the probe pipeline: add this operator
34
34
  auto &state = meta_pipeline.GetState();
35
- state.AddPipelineOperator(current, &op);
35
+ state.AddPipelineOperator(current, op);
36
36
 
37
37
  // save the last added pipeline to set up dependencies later (in case we need to add a child pipeline)
38
38
  vector<shared_ptr<Pipeline>> pipelines_so_far;
@@ -40,8 +40,8 @@ void PhysicalJoin::BuildJoinPipelines(Pipeline &current, MetaPipeline &meta_pipe
40
40
  auto last_pipeline = pipelines_so_far.back().get();
41
41
 
42
42
  // on the RHS (build side), we construct a child MetaPipeline with this operator as its sink
43
- auto child_meta_pipeline = meta_pipeline.CreateChildMetaPipeline(current, &op);
44
- child_meta_pipeline->Build(*op.children[1]);
43
+ auto &child_meta_pipeline = meta_pipeline.CreateChildMetaPipeline(current, op);
44
+ child_meta_pipeline.Build(*op.children[1]);
45
45
 
46
46
  // continue building the current pipeline on the LHS (probe side)
47
47
  op.children[0]->BuildPipelines(current, meta_pipeline);
@@ -49,7 +49,7 @@ void PhysicalJoin::BuildJoinPipelines(Pipeline &current, MetaPipeline &meta_pipe
49
49
  switch (op.type) {
50
50
  case PhysicalOperatorType::POSITIONAL_JOIN:
51
51
  // Positional joins are always outer
52
- meta_pipeline.CreateChildPipeline(current, &op, last_pipeline);
52
+ meta_pipeline.CreateChildPipeline(current, op, last_pipeline);
53
53
  return;
54
54
  case PhysicalOperatorType::CROSS_PRODUCT:
55
55
  return;
@@ -65,7 +65,7 @@ void PhysicalJoin::BuildJoinPipelines(Pipeline &current, MetaPipeline &meta_pipe
65
65
  }
66
66
 
67
67
  if (add_child_pipeline) {
68
- meta_pipeline.CreateChildPipeline(current, &op, last_pipeline);
68
+ meta_pipeline.CreateChildPipeline(current, op, last_pipeline);
69
69
  }
70
70
  }
71
71
 
@@ -73,10 +73,10 @@ void PhysicalJoin::BuildPipelines(Pipeline &current, MetaPipeline &meta_pipeline
73
73
  PhysicalJoin::BuildJoinPipelines(current, meta_pipeline, *this);
74
74
  }
75
75
 
76
- vector<const PhysicalOperator *> PhysicalJoin::GetSources() const {
76
+ vector<const_reference<PhysicalOperator>> PhysicalJoin::GetSources() const {
77
77
  auto result = children[0]->GetSources();
78
78
  if (IsSource()) {
79
- result.push_back(this);
79
+ result.push_back(*this);
80
80
  }
81
81
  return result;
82
82
  }
@@ -195,7 +195,7 @@ void PhysicalNestedLoopJoin::Combine(ExecutionContext &context, GlobalSinkState
195
195
  auto &state = lstate.Cast<NestedLoopJoinLocalState>();
196
196
  auto &client_profiler = QueryProfiler::Get(context.client);
197
197
 
198
- context.thread.profiler.Flush(this, &state.rhs_executor, "rhs_executor", 1);
198
+ context.thread.profiler.Flush(*this, state.rhs_executor, "rhs_executor", 1);
199
199
  client_profiler.Flush(context.thread.profiler);
200
200
  }
201
201
 
@@ -255,8 +255,8 @@ public:
255
255
  OuterJoinMarker left_outer;
256
256
 
257
257
  public:
258
- void Finalize(PhysicalOperator *op, ExecutionContext &context) override {
259
- context.thread.profiler.Flush(op, &lhs_executor, "lhs_executor", 0);
258
+ void Finalize(const PhysicalOperator &op, ExecutionContext &context) override {
259
+ context.thread.profiler.Flush(op, lhs_executor, "lhs_executor", 0);
260
260
  }
261
261
  };
262
262
 
@@ -125,7 +125,7 @@ void PhysicalPiecewiseMergeJoin::Combine(ExecutionContext &context, GlobalSinkSt
125
125
  gstate.table->Combine(lstate.table);
126
126
  auto &client_profiler = QueryProfiler::Get(context.client);
127
127
 
128
- context.thread.profiler.Flush(this, &lstate.table.executor, "rhs_executor", 1);
128
+ context.thread.profiler.Flush(*this, lstate.table.executor, "rhs_executor", 1);
129
129
  client_profiler.Flush(context.thread.profiler);
130
130
  }
131
131
 
@@ -245,9 +245,9 @@ public:
245
245
  lhs_local_table->executor.Execute(lhs_payload, lhs_local_table->keys);
246
246
  }
247
247
 
248
- void Finalize(PhysicalOperator *op, ExecutionContext &context) override {
248
+ void Finalize(const PhysicalOperator &op, ExecutionContext &context) override {
249
249
  if (lhs_local_table) {
250
- context.thread.profiler.Flush(op, &lhs_local_table->executor, "lhs_executor", 0);
250
+ context.thread.profiler.Flush(op, lhs_local_table->executor, "lhs_executor", 0);
251
251
  }
252
252
  }
253
253
  };
@@ -183,10 +183,10 @@ void PhysicalPositionalJoin::BuildPipelines(Pipeline &current, MetaPipeline &met
183
183
  PhysicalJoin::BuildJoinPipelines(current, meta_pipeline, *this);
184
184
  }
185
185
 
186
- vector<const PhysicalOperator *> PhysicalPositionalJoin::GetSources() const {
186
+ vector<const_reference<PhysicalOperator>> PhysicalPositionalJoin::GetSources() const {
187
187
  auto result = children[0]->GetSources();
188
188
  if (IsSource()) {
189
- result.push_back(this);
189
+ result.push_back(*this);
190
190
  }
191
191
  return result;
192
192
  }
@@ -65,8 +65,8 @@ void BufferedCSVReaderOptions::SetHeader(bool input) {
65
65
  this->has_header = true;
66
66
  }
67
67
 
68
- void BufferedCSVReaderOptions::SetCompression(const string &compression) {
69
- this->compression = FileCompressionTypeFromString(compression);
68
+ void BufferedCSVReaderOptions::SetCompression(const string &compression_p) {
69
+ this->compression = FileCompressionTypeFromString(compression_p);
70
70
  }
71
71
 
72
72
  void BufferedCSVReaderOptions::SetEscape(const string &input) {
@@ -82,8 +82,8 @@ void BufferedCSVReaderOptions::SetDelimiter(const string &input) {
82
82
  }
83
83
  }
84
84
 
85
- void BufferedCSVReaderOptions::SetQuote(const string &quote) {
86
- this->quote = quote;
85
+ void BufferedCSVReaderOptions::SetQuote(const string &quote_p) {
86
+ this->quote = quote_p;
87
87
  this->has_quote = true;
88
88
  }
89
89
 
@@ -101,12 +101,10 @@ void BufferedCSVReaderOptions::SetNewline(const string &input) {
101
101
  void BufferedCSVReaderOptions::SetDateFormat(LogicalTypeId type, const string &format, bool read_format) {
102
102
  string error;
103
103
  if (read_format) {
104
- auto &date_format = this->date_format[type];
105
- error = StrTimeFormat::ParseFormatSpecifier(format, date_format);
106
- date_format.format_specifier = format;
104
+ error = StrTimeFormat::ParseFormatSpecifier(format, date_format[type]);
105
+ date_format[type].format_specifier = format;
107
106
  } else {
108
- auto &date_format = this->write_date_format[type];
109
- error = StrTimeFormat::ParseFormatSpecifier(format, date_format);
107
+ error = StrTimeFormat::ParseFormatSpecifier(format, write_date_format[type]);
110
108
  }
111
109
  if (!error.empty()) {
112
110
  throw InvalidInputException("Could not parse DATEFORMAT: %s", error.c_str());
@@ -198,6 +196,7 @@ void BufferedCSVReaderOptions::SetWriteOption(const string &loption, const Value
198
196
  format = "%Y-%m-%dT%H:%M:%S.%fZ";
199
197
  }
200
198
  SetDateFormat(LogicalTypeId::TIMESTAMP, format, false);
199
+ SetDateFormat(LogicalTypeId::TIMESTAMP_TZ, format, false);
201
200
  } else {
202
201
  throw BinderException("Unrecognized option CSV writer \"%s\"", loption);
203
202
  }
@@ -11,11 +11,11 @@
11
11
 
12
12
  namespace duckdb {
13
13
 
14
- PhysicalBatchInsert::PhysicalBatchInsert(vector<LogicalType> types, TableCatalogEntry *table,
14
+ PhysicalBatchInsert::PhysicalBatchInsert(vector<LogicalType> types, TableCatalogEntry &table,
15
15
  physical_index_vector_t<idx_t> column_index_map,
16
16
  vector<unique_ptr<Expression>> bound_defaults, idx_t estimated_cardinality)
17
17
  : PhysicalOperator(PhysicalOperatorType::BATCH_INSERT, std::move(types), estimated_cardinality),
18
- column_index_map(std::move(column_index_map)), insert_table(table), insert_types(table->GetTypes()),
18
+ column_index_map(std::move(column_index_map)), insert_table(&table), insert_types(table.GetTypes()),
19
19
  bound_defaults(std::move(bound_defaults)) {
20
20
  }
21
21
 
@@ -96,11 +96,11 @@ public:
96
96
 
97
97
  class BatchInsertGlobalState : public GlobalSinkState {
98
98
  public:
99
- explicit BatchInsertGlobalState() : insert_count(0) {
99
+ explicit BatchInsertGlobalState(DuckTableEntry &table) : table(table), insert_count(0) {
100
100
  }
101
101
 
102
102
  mutex lock;
103
- optional_ptr<DuckTableEntry> table;
103
+ DuckTableEntry &table;
104
104
  idx_t insert_count;
105
105
  map<idx_t, unique_ptr<RowGroupCollection>> collections;
106
106
 
@@ -265,18 +265,19 @@ public:
265
265
  };
266
266
 
267
267
  unique_ptr<GlobalSinkState> PhysicalBatchInsert::GetGlobalSinkState(ClientContext &context) const {
268
- auto result = make_uniq<BatchInsertGlobalState>();
268
+ optional_ptr<TableCatalogEntry> table;
269
269
  if (info) {
270
270
  // CREATE TABLE AS
271
271
  D_ASSERT(!insert_table);
272
272
  auto &catalog = *schema->catalog;
273
- result->table = (DuckTableEntry *)catalog.CreateTable(catalog.GetCatalogTransaction(context),
274
- *schema.get_mutable(), info.get());
273
+ table = (TableCatalogEntry *)catalog.CreateTable(catalog.GetCatalogTransaction(context), *schema.get_mutable(),
274
+ info.get());
275
275
  } else {
276
276
  D_ASSERT(insert_table);
277
277
  D_ASSERT(insert_table->IsDuckTable());
278
- result->table = (DuckTableEntry *)insert_table;
278
+ table = insert_table.get_mutable();
279
279
  }
280
+ auto result = make_uniq<BatchInsertGlobalState>(table->Cast<DuckTableEntry>());
280
281
  return std::move(result);
281
282
  }
282
283
 
@@ -289,14 +290,14 @@ SinkResultType PhysicalBatchInsert::Sink(ExecutionContext &context, GlobalSinkSt
289
290
  auto &gstate = state.Cast<BatchInsertGlobalState>();
290
291
  auto &lstate = lstate_p.Cast<BatchInsertLocalState>();
291
292
 
292
- auto table = gstate.table;
293
- PhysicalInsert::ResolveDefaults(*table, chunk, column_index_map, lstate.default_executor, lstate.insert_chunk);
293
+ auto &table = gstate.table;
294
+ PhysicalInsert::ResolveDefaults(table, chunk, column_index_map, lstate.default_executor, lstate.insert_chunk);
294
295
 
295
296
  if (!lstate.current_collection) {
296
297
  lock_guard<mutex> l(gstate.lock);
297
298
  // no collection yet: create a new one
298
- lstate.CreateNewCollection(*table, insert_types);
299
- lstate.writer = gstate.table->GetStorage().CreateOptimisticWriter(context.client);
299
+ lstate.CreateNewCollection(table, insert_types);
300
+ lstate.writer = &table.GetStorage().CreateOptimisticWriter(context.client);
300
301
  } else if (lstate.current_index != lstate.batch_index) {
301
302
  // batch index has changed: move the old collection to the global state and create a new collection
302
303
  TransactionData tdata(0, 0);
@@ -304,11 +305,11 @@ SinkResultType PhysicalBatchInsert::Sink(ExecutionContext &context, GlobalSinkSt
304
305
  lstate.FlushToDisk();
305
306
  gstate.AddCollection(context.client, lstate.current_index, std::move(lstate.current_collection), lstate.writer,
306
307
  &lstate.written_to_disk);
307
- lstate.CreateNewCollection(*table, insert_types);
308
+ lstate.CreateNewCollection(table, insert_types);
308
309
  }
309
310
  lstate.current_index = lstate.batch_index;
310
311
 
311
- table->GetStorage().VerifyAppendConstraints(*table, context.client, lstate.insert_chunk);
312
+ table.GetStorage().VerifyAppendConstraints(table, context.client, lstate.insert_chunk);
312
313
 
313
314
  auto new_row_group = lstate.current_collection->Append(lstate.insert_chunk, lstate.current_append_state);
314
315
  if (new_row_group) {
@@ -323,7 +324,7 @@ void PhysicalBatchInsert::Combine(ExecutionContext &context, GlobalSinkState &gs
323
324
  auto &gstate = gstate_p.Cast<BatchInsertGlobalState>();
324
325
  auto &lstate = lstate_p.Cast<BatchInsertLocalState>();
325
326
  auto &client_profiler = QueryProfiler::Get(context.client);
326
- context.thread.profiler.Flush(this, &lstate.default_executor, "default_executor", 1);
327
+ context.thread.profiler.Flush(*this, lstate.default_executor, "default_executor", 1);
327
328
  client_profiler.Flush(context.thread.profiler);
328
329
 
329
330
  if (!lstate.current_collection) {
@@ -346,7 +347,7 @@ SinkFinalizeType PhysicalBatchInsert::Finalize(Pipeline &pipeline, Event &event,
346
347
  vector<unique_ptr<CollectionMerger>> mergers;
347
348
  unique_ptr<CollectionMerger> current_merger;
348
349
 
349
- auto &storage = gstate.table->GetStorage();
350
+ auto &storage = gstate.table.GetStorage();
350
351
  for (auto &collection : gstate.collections) {
351
352
  if (collection.second->GetTotalRows() < LocalStorage::MERGE_THRESHOLD) {
352
353
  // this collection has very few rows: add it to the merge set
@@ -374,11 +375,11 @@ SinkFinalizeType PhysicalBatchInsert::Finalize(Pipeline &pipeline, Event &event,
374
375
  // now that we have created all of the mergers, perform the actual merging
375
376
  vector<unique_ptr<RowGroupCollection>> final_collections;
376
377
  final_collections.reserve(mergers.size());
377
- auto writer = storage.CreateOptimisticWriter(context);
378
+ auto &writer = storage.CreateOptimisticWriter(context);
378
379
  for (auto &merger : mergers) {
379
- final_collections.push_back(merger->Flush(*writer));
380
+ final_collections.push_back(merger->Flush(writer));
380
381
  }
381
- writer->FinalFlush();
382
+ writer.FinalFlush();
382
383
 
383
384
  // finally, merge the row groups into the local storage
384
385
  for (auto &collection : final_collections) {
@@ -197,15 +197,15 @@ void PhysicalExport::BuildPipelines(Pipeline &current, MetaPipeline &meta_pipeli
197
197
  // EXPORT has an optional child
198
198
  // we only need to schedule child pipelines if there is a child
199
199
  auto &state = meta_pipeline.GetState();
200
- state.SetPipelineSource(current, this);
200
+ state.SetPipelineSource(current, *this);
201
201
  if (children.empty()) {
202
202
  return;
203
203
  }
204
204
  PhysicalOperator::BuildPipelines(current, meta_pipeline);
205
205
  }
206
206
 
207
- vector<const PhysicalOperator *> PhysicalExport::GetSources() const {
208
- return {this};
207
+ vector<const_reference<PhysicalOperator>> PhysicalExport::GetSources() const {
208
+ return {*this};
209
209
  }
210
210
 
211
211
  } // namespace duckdb