duckdb 0.7.2-dev2867.0 → 0.7.2-dev2995.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.gyp +1 -0
- package/package.json +1 -1
- package/src/duckdb/extension/icu/icu-datepart.cpp +5 -1
- package/src/duckdb/src/catalog/catalog_entry/table_catalog_entry.cpp +18 -7
- package/src/duckdb/src/catalog/default/default_functions.cpp +2 -0
- package/src/duckdb/src/common/arrow/arrow_appender.cpp +3 -3
- package/src/duckdb/src/common/arrow/arrow_converter.cpp +2 -2
- package/src/duckdb/src/common/sort/partition_state.cpp +1 -1
- package/src/duckdb/src/common/string_util.cpp +6 -1
- package/src/duckdb/src/core_functions/function_list.cpp +2 -0
- package/src/duckdb/src/core_functions/scalar/string/format_bytes.cpp +29 -0
- package/src/duckdb/src/execution/index/art/art.cpp +5 -1
- package/src/duckdb/src/execution/operator/aggregate/physical_hash_aggregate.cpp +62 -43
- package/src/duckdb/src/execution/operator/aggregate/physical_perfecthash_aggregate.cpp +17 -11
- package/src/duckdb/src/execution/operator/aggregate/physical_ungrouped_aggregate.cpp +32 -39
- package/src/duckdb/src/execution/operator/aggregate/physical_window.cpp +10 -9
- package/src/duckdb/src/execution/operator/helper/physical_batch_collector.cpp +4 -4
- package/src/duckdb/src/execution/operator/helper/physical_explain_analyze.cpp +6 -21
- package/src/duckdb/src/execution/operator/helper/physical_limit.cpp +13 -13
- package/src/duckdb/src/execution/operator/helper/physical_limit_percent.cpp +15 -14
- package/src/duckdb/src/execution/operator/helper/physical_load.cpp +3 -2
- package/src/duckdb/src/execution/operator/helper/physical_materialized_collector.cpp +4 -4
- package/src/duckdb/src/execution/operator/helper/physical_pragma.cpp +4 -2
- package/src/duckdb/src/execution/operator/helper/physical_prepare.cpp +4 -2
- package/src/duckdb/src/execution/operator/helper/physical_reservoir_sample.cpp +10 -8
- package/src/duckdb/src/execution/operator/helper/physical_reset.cpp +4 -3
- package/src/duckdb/src/execution/operator/helper/physical_set.cpp +7 -6
- package/src/duckdb/src/execution/operator/helper/physical_transaction.cpp +4 -2
- package/src/duckdb/src/execution/operator/helper/physical_vacuum.cpp +8 -8
- package/src/duckdb/src/execution/operator/join/physical_asof_join.cpp +17 -16
- package/src/duckdb/src/execution/operator/join/physical_blockwise_nl_join.cpp +10 -8
- package/src/duckdb/src/execution/operator/join/physical_cross_product.cpp +3 -4
- package/src/duckdb/src/execution/operator/join/physical_delim_join.cpp +5 -5
- package/src/duckdb/src/execution/operator/join/physical_hash_join.cpp +16 -15
- package/src/duckdb/src/execution/operator/join/physical_iejoin.cpp +13 -12
- package/src/duckdb/src/execution/operator/join/physical_nested_loop_join.cpp +12 -10
- package/src/duckdb/src/execution/operator/join/physical_piecewise_merge_join.cpp +13 -11
- package/src/duckdb/src/execution/operator/join/physical_positional_join.cpp +8 -6
- package/src/duckdb/src/execution/operator/join/physical_range_join.cpp +1 -1
- package/src/duckdb/src/execution/operator/order/physical_order.cpp +13 -13
- package/src/duckdb/src/execution/operator/order/physical_top_n.cpp +8 -8
- package/src/duckdb/src/execution/operator/persistent/physical_batch_insert.cpp +160 -145
- package/src/duckdb/src/execution/operator/persistent/physical_copy_to_file.cpp +10 -25
- package/src/duckdb/src/execution/operator/persistent/physical_delete.cpp +14 -19
- package/src/duckdb/src/execution/operator/persistent/physical_export.cpp +7 -6
- package/src/duckdb/src/execution/operator/persistent/physical_insert.cpp +18 -30
- package/src/duckdb/src/execution/operator/persistent/physical_update.cpp +14 -18
- package/src/duckdb/src/execution/operator/scan/physical_column_data_scan.cpp +6 -4
- package/src/duckdb/src/execution/operator/scan/physical_dummy_scan.cpp +4 -19
- package/src/duckdb/src/execution/operator/scan/physical_empty_result.cpp +3 -2
- package/src/duckdb/src/execution/operator/scan/physical_positional_scan.cpp +14 -5
- package/src/duckdb/src/execution/operator/scan/physical_table_scan.cpp +6 -4
- package/src/duckdb/src/execution/operator/schema/physical_alter.cpp +3 -19
- package/src/duckdb/src/execution/operator/schema/physical_attach.cpp +4 -18
- package/src/duckdb/src/execution/operator/schema/physical_create_function.cpp +4 -19
- package/src/duckdb/src/execution/operator/schema/physical_create_index.cpp +8 -9
- package/src/duckdb/src/execution/operator/schema/physical_create_schema.cpp +4 -19
- package/src/duckdb/src/execution/operator/schema/physical_create_sequence.cpp +4 -19
- package/src/duckdb/src/execution/operator/schema/physical_create_table.cpp +4 -19
- package/src/duckdb/src/execution/operator/schema/physical_create_type.cpp +9 -26
- package/src/duckdb/src/execution/operator/schema/physical_create_view.cpp +4 -19
- package/src/duckdb/src/execution/operator/schema/physical_detach.cpp +4 -19
- package/src/duckdb/src/execution/operator/schema/physical_drop.cpp +3 -19
- package/src/duckdb/src/execution/operator/set/physical_recursive_cte.cpp +9 -8
- package/src/duckdb/src/execution/operator/set/physical_union.cpp +1 -1
- package/src/duckdb/src/execution/physical_operator.cpp +11 -5
- package/src/duckdb/src/execution/radix_partitioned_hashtable.cpp +16 -16
- package/src/duckdb/src/function/table/arrow_conversion.cpp +3 -3
- package/src/duckdb/src/function/table/version/pragma_version.cpp +2 -2
- package/src/duckdb/src/include/duckdb/catalog/catalog_entry/table_catalog_entry.hpp +7 -1
- package/src/duckdb/src/include/duckdb/common/enums/operator_result_type.hpp +16 -4
- package/src/duckdb/src/include/duckdb/common/optional_idx.hpp +45 -0
- package/src/duckdb/src/include/duckdb/common/set.hpp +2 -1
- package/src/duckdb/src/include/duckdb/core_functions/scalar/string_functions.hpp +15 -0
- package/src/duckdb/src/include/duckdb/execution/executor.hpp +10 -1
- package/src/duckdb/src/include/duckdb/execution/operator/aggregate/physical_hash_aggregate.hpp +5 -8
- package/src/duckdb/src/include/duckdb/execution/operator/aggregate/physical_perfecthash_aggregate.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/aggregate/physical_ungrouped_aggregate.hpp +3 -7
- package/src/duckdb/src/include/duckdb/execution/operator/aggregate/physical_window.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_batch_collector.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_explain_analyze.hpp +2 -5
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_limit.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_limit_percent.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_load.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_materialized_collector.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_pragma.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_prepare.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_reservoir_sample.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_reset.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_set.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_transaction.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/helper/physical_vacuum.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/join/physical_asof_join.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/join/physical_blockwise_nl_join.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/join/physical_cross_product.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/join/physical_delim_join.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/join/physical_hash_join.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/join/physical_iejoin.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/join/physical_nested_loop_join.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/join/physical_piecewise_merge_join.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/join/physical_positional_join.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/order/physical_order.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/order/physical_top_n.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_batch_insert.hpp +3 -5
- package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_copy_to_file.hpp +2 -5
- package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_delete.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_export.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_insert.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_update.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/scan/physical_column_data_scan.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/scan/physical_dummy_scan.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/scan/physical_empty_result.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/scan/physical_positional_scan.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/scan/physical_table_scan.hpp +1 -2
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_alter.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_attach.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_create_function.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_create_index.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_create_schema.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_create_sequence.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_create_table.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_create_type.hpp +2 -5
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_create_view.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_detach.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/schema/physical_drop.hpp +1 -3
- package/src/duckdb/src/include/duckdb/execution/operator/set/physical_recursive_cte.hpp +2 -4
- package/src/duckdb/src/include/duckdb/execution/physical_operator.hpp +7 -4
- package/src/duckdb/src/include/duckdb/execution/physical_operator_states.hpp +26 -6
- package/src/duckdb/src/include/duckdb/execution/radix_partitioned_hashtable.hpp +5 -5
- package/src/duckdb/src/include/duckdb/function/aggregate_function.hpp +2 -1
- package/src/duckdb/src/include/duckdb/function/table_function.hpp +0 -1
- package/src/duckdb/src/include/duckdb/main/client_config.hpp +2 -0
- package/src/duckdb/src/include/duckdb/main/config.hpp +2 -0
- package/src/duckdb/src/include/duckdb/parallel/event.hpp +1 -1
- package/src/duckdb/src/include/duckdb/parallel/interrupt.hpp +63 -0
- package/src/duckdb/src/include/duckdb/parallel/pipeline.hpp +16 -3
- package/src/duckdb/src/include/duckdb/parallel/pipeline_executor.hpp +51 -7
- package/src/duckdb/src/include/duckdb/parallel/task.hpp +21 -2
- package/src/duckdb/src/include/duckdb/parallel/task_counter.hpp +2 -2
- package/src/duckdb/src/include/duckdb/parallel/task_scheduler.hpp +2 -2
- package/src/duckdb/src/include/duckdb/planner/column_binding.hpp +6 -0
- package/src/duckdb/src/include/duckdb/planner/expression/bound_columnref_expression.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_aggregate.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_column_data_get.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_cteref.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_delete.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_delim_get.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_dummy_scan.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_expression_get.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_insert.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_pivot.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_projection.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_recursive_cte.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_set_operation.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_unnest.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_update.hpp +1 -0
- package/src/duckdb/src/include/duckdb/planner/operator/logical_window.hpp +1 -0
- package/src/duckdb/src/include/duckdb/storage/data_table.hpp +1 -0
- package/src/duckdb/src/include/duckdb/storage/optimistic_data_writer.hpp +46 -0
- package/src/duckdb/src/include/duckdb/storage/partial_block_manager.hpp +24 -3
- package/src/duckdb/src/include/duckdb/storage/table/column_checkpoint_state.hpp +46 -1
- package/src/duckdb/src/include/duckdb/storage/table/column_data.hpp +9 -10
- package/src/duckdb/src/include/duckdb/storage/table/column_segment.hpp +1 -1
- package/src/duckdb/src/include/duckdb/storage/table/list_column_data.hpp +2 -2
- package/src/duckdb/src/include/duckdb/storage/table/row_group.hpp +3 -3
- package/src/duckdb/src/include/duckdb/storage/table/row_group_collection.hpp +1 -0
- package/src/duckdb/src/include/duckdb/storage/table/segment_base.hpp +1 -1
- package/src/duckdb/src/include/duckdb/storage/table/segment_tree.hpp +22 -0
- package/src/duckdb/src/include/duckdb/storage/table/standard_column_data.hpp +3 -3
- package/src/duckdb/src/include/duckdb/storage/table/struct_column_data.hpp +2 -2
- package/src/duckdb/src/include/duckdb/storage/table/update_segment.hpp +0 -2
- package/src/duckdb/src/include/duckdb/storage/table/validity_column_data.hpp +1 -2
- package/src/duckdb/src/include/duckdb/transaction/local_storage.hpp +9 -34
- package/src/duckdb/src/include/duckdb/verification/no_operator_caching_verifier.hpp +25 -0
- package/src/duckdb/src/include/duckdb/verification/statement_verifier.hpp +5 -0
- package/src/duckdb/src/main/client_verify.cpp +4 -0
- package/src/duckdb/src/main/config.cpp +4 -0
- package/src/duckdb/src/main/database.cpp +11 -11
- package/src/duckdb/src/main/extension/extension_load.cpp +19 -15
- package/src/duckdb/src/parallel/event.cpp +1 -1
- package/src/duckdb/src/parallel/executor.cpp +39 -3
- package/src/duckdb/src/parallel/executor_task.cpp +11 -0
- package/src/duckdb/src/parallel/interrupt.cpp +57 -0
- package/src/duckdb/src/parallel/pipeline.cpp +49 -6
- package/src/duckdb/src/parallel/pipeline_executor.cpp +248 -69
- package/src/duckdb/src/parallel/pipeline_initialize_event.cpp +1 -1
- package/src/duckdb/src/parallel/task_scheduler.cpp +57 -22
- package/src/duckdb/src/parser/base_expression.cpp +6 -0
- package/src/duckdb/src/planner/expression/bound_columnref_expression.cpp +17 -3
- package/src/duckdb/src/planner/expression/bound_reference_expression.cpp +8 -2
- package/src/duckdb/src/planner/operator/logical_aggregate.cpp +13 -1
- package/src/duckdb/src/planner/operator/logical_column_data_get.cpp +11 -0
- package/src/duckdb/src/planner/operator/logical_cteref.cpp +11 -0
- package/src/duckdb/src/planner/operator/logical_delete.cpp +10 -0
- package/src/duckdb/src/planner/operator/logical_delim_get.cpp +12 -1
- package/src/duckdb/src/planner/operator/logical_dummy_scan.cpp +12 -1
- package/src/duckdb/src/planner/operator/logical_expression_get.cpp +12 -1
- package/src/duckdb/src/planner/operator/logical_get.cpp +10 -4
- package/src/duckdb/src/planner/operator/logical_insert.cpp +12 -1
- package/src/duckdb/src/planner/operator/logical_pivot.cpp +11 -0
- package/src/duckdb/src/planner/operator/logical_projection.cpp +11 -0
- package/src/duckdb/src/planner/operator/logical_recursive_cte.cpp +11 -0
- package/src/duckdb/src/planner/operator/logical_set_operation.cpp +11 -0
- package/src/duckdb/src/planner/operator/logical_unnest.cpp +12 -1
- package/src/duckdb/src/planner/operator/logical_update.cpp +10 -0
- package/src/duckdb/src/planner/operator/logical_window.cpp +11 -0
- package/src/duckdb/src/storage/checkpoint_manager.cpp +1 -1
- package/src/duckdb/src/storage/data_table.cpp +5 -0
- package/src/duckdb/src/storage/local_storage.cpp +40 -110
- package/src/duckdb/src/storage/optimistic_data_writer.cpp +96 -0
- package/src/duckdb/src/storage/partial_block_manager.cpp +73 -9
- package/src/duckdb/src/storage/single_file_block_manager.cpp +3 -1
- package/src/duckdb/src/storage/standard_buffer_manager.cpp +17 -12
- package/src/duckdb/src/storage/statistics/base_statistics.cpp +3 -0
- package/src/duckdb/src/storage/table/column_checkpoint_state.cpp +90 -82
- package/src/duckdb/src/storage/table/column_data.cpp +19 -45
- package/src/duckdb/src/storage/table/column_data_checkpointer.cpp +7 -7
- package/src/duckdb/src/storage/table/column_segment.cpp +1 -1
- package/src/duckdb/src/storage/table/list_column_data.cpp +6 -11
- package/src/duckdb/src/storage/table/row_group.cpp +13 -14
- package/src/duckdb/src/storage/table/row_group_collection.cpp +10 -4
- package/src/duckdb/src/storage/table/standard_column_data.cpp +6 -10
- package/src/duckdb/src/storage/table/struct_column_data.cpp +7 -13
- package/src/duckdb/src/storage/table/update_segment.cpp +0 -25
- package/src/duckdb/src/storage/table/validity_column_data.cpp +2 -6
- package/src/duckdb/src/transaction/commit_state.cpp +4 -4
- package/src/duckdb/src/verification/no_operator_caching_verifier.cpp +13 -0
- package/src/duckdb/src/verification/statement_verifier.cpp +4 -0
- package/src/duckdb/ub_src_core_functions_scalar_string.cpp +2 -0
- package/src/duckdb/ub_src_parallel.cpp +2 -0
- package/src/duckdb/ub_src_storage.cpp +2 -0
@@ -2,16 +2,29 @@
|
|
2
2
|
#include "duckdb/main/client_context.hpp"
|
3
3
|
#include "duckdb/common/limits.hpp"
|
4
4
|
|
5
|
+
#ifdef DUCKDB_DEBUG_ASYNC_SINK_SOURCE
|
6
|
+
#include <thread>
|
7
|
+
#include <chrono>
|
8
|
+
#endif
|
9
|
+
|
5
10
|
namespace duckdb {
|
6
11
|
|
7
12
|
PipelineExecutor::PipelineExecutor(ClientContext &context_p, Pipeline &pipeline_p)
|
8
13
|
: pipeline(pipeline_p), thread(context_p), context(context_p, thread, &pipeline_p) {
|
9
14
|
D_ASSERT(pipeline.source_state);
|
10
|
-
local_source_state = pipeline.source->GetLocalSourceState(context, *pipeline.source_state);
|
11
15
|
if (pipeline.sink) {
|
12
16
|
local_sink_state = pipeline.sink->GetLocalSinkState(context);
|
13
17
|
requires_batch_index = pipeline.sink->RequiresBatchIndex() && pipeline.source->SupportsBatchIndex();
|
18
|
+
if (requires_batch_index) {
|
19
|
+
auto &partition_info = local_sink_state->partition_info;
|
20
|
+
if (!partition_info.batch_index.IsValid()) {
|
21
|
+
// batch index is not set yet - initialize before fetching anything
|
22
|
+
partition_info.batch_index = pipeline.RegisterNewBatchIndex();
|
23
|
+
partition_info.min_batch_index = partition_info.batch_index;
|
24
|
+
}
|
25
|
+
}
|
14
26
|
}
|
27
|
+
local_source_state = pipeline.source->GetLocalSourceState(context, *pipeline.source_state);
|
15
28
|
|
16
29
|
intermediate_chunks.reserve(pipeline.operators.size());
|
17
30
|
intermediate_states.reserve(pipeline.operators.size());
|
@@ -35,35 +48,136 @@ PipelineExecutor::PipelineExecutor(ClientContext &context_p, Pipeline &pipeline_
|
|
35
48
|
InitializeChunk(final_chunk);
|
36
49
|
}
|
37
50
|
|
38
|
-
bool PipelineExecutor::
|
51
|
+
bool PipelineExecutor::TryFlushCachingOperators() {
|
52
|
+
if (!started_flushing) {
|
53
|
+
// Remainder of this method assumes any in process operators are from flushing
|
54
|
+
D_ASSERT(in_process_operators.empty());
|
55
|
+
started_flushing = true;
|
56
|
+
flushing_idx = IsFinished() ? idx_t(finished_processing_idx) : 0;
|
57
|
+
}
|
58
|
+
|
59
|
+
// Go over each operator and keep flushing them using `FinalExecute` until empty
|
60
|
+
while (flushing_idx < pipeline.operators.size()) {
|
61
|
+
if (!pipeline.operators[flushing_idx].get().RequiresFinalExecute()) {
|
62
|
+
flushing_idx++;
|
63
|
+
continue;
|
64
|
+
}
|
65
|
+
|
66
|
+
// This slightly awkward way of increasing the flushing idx is to make the code re-entrant: We need to call this
|
67
|
+
// method again in the case of a Sink returning BLOCKED.
|
68
|
+
if (!should_flush_current_idx && in_process_operators.empty()) {
|
69
|
+
should_flush_current_idx = true;
|
70
|
+
flushing_idx++;
|
71
|
+
continue;
|
72
|
+
}
|
73
|
+
|
74
|
+
auto &curr_chunk =
|
75
|
+
flushing_idx + 1 >= intermediate_chunks.size() ? final_chunk : *intermediate_chunks[flushing_idx + 1];
|
76
|
+
auto ¤t_operator = pipeline.operators[flushing_idx].get();
|
77
|
+
|
78
|
+
OperatorFinalizeResultType finalize_result;
|
79
|
+
OperatorResultType push_result;
|
80
|
+
|
81
|
+
if (in_process_operators.empty()) {
|
82
|
+
StartOperator(current_operator);
|
83
|
+
finalize_result = current_operator.FinalExecute(context, curr_chunk, *current_operator.op_state,
|
84
|
+
*intermediate_states[flushing_idx]);
|
85
|
+
EndOperator(current_operator, &curr_chunk);
|
86
|
+
} else {
|
87
|
+
// Reset flag and reflush the last chunk we were flushing.
|
88
|
+
finalize_result = OperatorFinalizeResultType::HAVE_MORE_OUTPUT;
|
89
|
+
}
|
90
|
+
|
91
|
+
push_result = ExecutePushInternal(curr_chunk, flushing_idx + 1);
|
92
|
+
|
93
|
+
if (finalize_result == OperatorFinalizeResultType::HAVE_MORE_OUTPUT) {
|
94
|
+
should_flush_current_idx = true;
|
95
|
+
} else {
|
96
|
+
should_flush_current_idx = false;
|
97
|
+
}
|
98
|
+
|
99
|
+
if (push_result == OperatorResultType::BLOCKED) {
|
100
|
+
remaining_sink_chunk = true;
|
101
|
+
return false;
|
102
|
+
} else if (push_result == OperatorResultType::FINISHED) {
|
103
|
+
break;
|
104
|
+
}
|
105
|
+
}
|
106
|
+
return true;
|
107
|
+
}
|
108
|
+
|
109
|
+
PipelineExecuteResult PipelineExecutor::Execute(idx_t max_chunks) {
|
39
110
|
D_ASSERT(pipeline.sink);
|
40
|
-
bool exhausted_source = false;
|
41
111
|
auto &source_chunk = pipeline.operators.empty() ? final_chunk : *intermediate_chunks[0];
|
42
112
|
for (idx_t i = 0; i < max_chunks; i++) {
|
43
|
-
if (
|
44
|
-
|
113
|
+
if (context.client.interrupted) {
|
114
|
+
throw InterruptException();
|
45
115
|
}
|
46
|
-
|
47
|
-
|
48
|
-
if (
|
49
|
-
exhausted_source = true;
|
116
|
+
|
117
|
+
OperatorResultType result;
|
118
|
+
if (exhausted_source && done_flushing && !remaining_sink_chunk && in_process_operators.empty()) {
|
50
119
|
break;
|
120
|
+
} else if (remaining_sink_chunk) {
|
121
|
+
// The pipeline was interrupted by the Sink. We should retry sinking the final chunk.
|
122
|
+
result = ExecutePushInternal(final_chunk);
|
123
|
+
remaining_sink_chunk = false;
|
124
|
+
} else if (!in_process_operators.empty() && !started_flushing) {
|
125
|
+
// The pipeline was interrupted by the Sink when pushing a source chunk through the pipeline. We need to
|
126
|
+
// re-push the same source chunk through the pipeline because there are in_process operators, meaning that
|
127
|
+
// the result for the pipeline
|
128
|
+
D_ASSERT(source_chunk.size() > 0);
|
129
|
+
result = ExecutePushInternal(source_chunk);
|
130
|
+
} else if (exhausted_source && !done_flushing) {
|
131
|
+
// The source was exhausted, try flushing all operators
|
132
|
+
auto flush_completed = TryFlushCachingOperators();
|
133
|
+
if (flush_completed) {
|
134
|
+
done_flushing = true;
|
135
|
+
break;
|
136
|
+
} else {
|
137
|
+
return PipelineExecuteResult::INTERRUPTED;
|
138
|
+
}
|
139
|
+
} else if (!exhausted_source) {
|
140
|
+
// "Regular" path: fetch a chunk from the source and push it through the pipeline
|
141
|
+
source_chunk.Reset();
|
142
|
+
SourceResultType source_result = FetchFromSource(source_chunk);
|
143
|
+
|
144
|
+
if (source_result == SourceResultType::BLOCKED) {
|
145
|
+
return PipelineExecuteResult::INTERRUPTED;
|
146
|
+
}
|
147
|
+
|
148
|
+
if (source_result == SourceResultType::FINISHED) {
|
149
|
+
exhausted_source = true;
|
150
|
+
if (source_chunk.size() == 0) {
|
151
|
+
continue;
|
152
|
+
}
|
153
|
+
}
|
154
|
+
result = ExecutePushInternal(source_chunk);
|
155
|
+
} else {
|
156
|
+
throw InternalException("Unexpected state reached in pipeline executor");
|
51
157
|
}
|
52
|
-
|
158
|
+
|
159
|
+
// SINK INTERRUPT
|
160
|
+
if (result == OperatorResultType::BLOCKED) {
|
161
|
+
remaining_sink_chunk = true;
|
162
|
+
return PipelineExecuteResult::INTERRUPTED;
|
163
|
+
}
|
164
|
+
|
53
165
|
if (result == OperatorResultType::FINISHED) {
|
54
|
-
D_ASSERT(IsFinished());
|
55
166
|
break;
|
56
167
|
}
|
57
168
|
}
|
58
|
-
|
59
|
-
|
169
|
+
|
170
|
+
if ((!exhausted_source || !done_flushing) && !IsFinished()) {
|
171
|
+
return PipelineExecuteResult::NOT_FINISHED;
|
60
172
|
}
|
173
|
+
|
61
174
|
PushFinalize();
|
62
|
-
|
175
|
+
|
176
|
+
return PipelineExecuteResult::FINISHED;
|
63
177
|
}
|
64
178
|
|
65
|
-
|
66
|
-
Execute(NumericLimits<idx_t>::Maximum());
|
179
|
+
PipelineExecuteResult PipelineExecutor::Execute() {
|
180
|
+
return Execute(NumericLimits<idx_t>::Maximum());
|
67
181
|
}
|
68
182
|
|
69
183
|
OperatorResultType PipelineExecutor::ExecutePush(DataChunk &input) { // LCOV_EXCL_START
|
@@ -84,6 +198,10 @@ OperatorResultType PipelineExecutor::ExecutePushInternal(DataChunk &input, idx_t
|
|
84
198
|
if (input.size() == 0) { // LCOV_EXCL_START
|
85
199
|
return OperatorResultType::NEED_MORE_INPUT;
|
86
200
|
} // LCOV_EXCL_STOP
|
201
|
+
|
202
|
+
// this loop will continuously push the input chunk through the pipeline as long as:
|
203
|
+
// - the OperatorResultType for the Execute is HAVE_MORE_OUTPUT
|
204
|
+
// - the Sink doesn't block
|
87
205
|
while (true) {
|
88
206
|
OperatorResultType result;
|
89
207
|
// Note: if input is the final_chunk, we don't do any executing, the chunk just needs to be sinked
|
@@ -101,9 +219,15 @@ OperatorResultType PipelineExecutor::ExecutePushInternal(DataChunk &input, idx_t
|
|
101
219
|
StartOperator(*pipeline.sink);
|
102
220
|
D_ASSERT(pipeline.sink);
|
103
221
|
D_ASSERT(pipeline.sink->sink_state);
|
104
|
-
|
222
|
+
OperatorSinkInput sink_input {*pipeline.sink->sink_state, *local_sink_state, interrupt_state};
|
223
|
+
|
224
|
+
auto sink_result = Sink(sink_chunk, sink_input);
|
225
|
+
|
105
226
|
EndOperator(*pipeline.sink, nullptr);
|
106
|
-
|
227
|
+
|
228
|
+
if (sink_result == SinkResultType::BLOCKED) {
|
229
|
+
return OperatorResultType::BLOCKED;
|
230
|
+
} else if (sink_result == SinkResultType::FINISHED) {
|
107
231
|
FinishProcessing();
|
108
232
|
return OperatorResultType::FINISHED;
|
109
233
|
}
|
@@ -114,49 +238,15 @@ OperatorResultType PipelineExecutor::ExecutePushInternal(DataChunk &input, idx_t
|
|
114
238
|
}
|
115
239
|
}
|
116
240
|
|
117
|
-
// Push all remaining cached operator output through the pipeline
|
118
|
-
void PipelineExecutor::FlushCachingOperatorsPush() {
|
119
|
-
idx_t start_idx = IsFinished() ? idx_t(finished_processing_idx) : 0;
|
120
|
-
for (idx_t op_idx = start_idx; op_idx < pipeline.operators.size(); op_idx++) {
|
121
|
-
if (!pipeline.operators[op_idx].get().RequiresFinalExecute()) {
|
122
|
-
continue;
|
123
|
-
}
|
124
|
-
|
125
|
-
OperatorFinalizeResultType finalize_result;
|
126
|
-
OperatorResultType push_result;
|
127
|
-
|
128
|
-
do {
|
129
|
-
auto &curr_chunk =
|
130
|
-
op_idx + 1 >= intermediate_chunks.size() ? final_chunk : *intermediate_chunks[op_idx + 1];
|
131
|
-
auto ¤t_operator = pipeline.operators[op_idx].get();
|
132
|
-
StartOperator(current_operator);
|
133
|
-
finalize_result = current_operator.FinalExecute(context, curr_chunk, *current_operator.op_state,
|
134
|
-
*intermediate_states[op_idx]);
|
135
|
-
EndOperator(current_operator, &curr_chunk);
|
136
|
-
push_result = ExecutePushInternal(curr_chunk, op_idx + 1);
|
137
|
-
} while (finalize_result != OperatorFinalizeResultType::FINISHED &&
|
138
|
-
push_result != OperatorResultType::FINISHED);
|
139
|
-
|
140
|
-
if (push_result == OperatorResultType::FINISHED) {
|
141
|
-
break;
|
142
|
-
}
|
143
|
-
}
|
144
|
-
}
|
145
|
-
|
146
241
|
void PipelineExecutor::PushFinalize() {
|
147
242
|
if (finalized) {
|
148
243
|
throw InternalException("Calling PushFinalize on a pipeline that has been finalized already");
|
149
244
|
}
|
150
|
-
finalized = true;
|
151
|
-
// flush all caching operators
|
152
|
-
// note that even if an operator has finished, we might still need to flush caches AFTER
|
153
|
-
// that operator e.g. if we have SOURCE -> LIMIT -> CROSS_PRODUCT -> SINK, if the
|
154
|
-
// LIMIT reports no more rows will be passed on we still need to flush caches from the CROSS_PRODUCT
|
155
|
-
D_ASSERT(in_process_operators.empty());
|
156
|
-
|
157
|
-
FlushCachingOperatorsPush();
|
158
245
|
|
159
246
|
D_ASSERT(local_sink_state);
|
247
|
+
|
248
|
+
finalized = true;
|
249
|
+
|
160
250
|
// run the combine for the sink
|
161
251
|
pipeline.sink->Combine(context, *pipeline.sink->sink_state, *local_sink_state);
|
162
252
|
|
@@ -168,6 +258,7 @@ void PipelineExecutor::PushFinalize() {
|
|
168
258
|
local_sink_state.reset();
|
169
259
|
}
|
170
260
|
|
261
|
+
// TODO: Refactoring the StreamingQueryResult to use Push-based execution should eliminate the need for this code
|
171
262
|
void PipelineExecutor::ExecutePull(DataChunk &result) {
|
172
263
|
if (IsFinished()) {
|
173
264
|
return;
|
@@ -176,12 +267,32 @@ void PipelineExecutor::ExecutePull(DataChunk &result) {
|
|
176
267
|
try {
|
177
268
|
D_ASSERT(!pipeline.sink);
|
178
269
|
auto &source_chunk = pipeline.operators.empty() ? result : *intermediate_chunks[0];
|
179
|
-
while (result.size() == 0) {
|
270
|
+
while (result.size() == 0 && !exhausted_source) {
|
180
271
|
if (in_process_operators.empty()) {
|
181
272
|
source_chunk.Reset();
|
182
|
-
|
183
|
-
|
184
|
-
|
273
|
+
|
274
|
+
auto done_signal = make_shared<InterruptDoneSignalState>();
|
275
|
+
interrupt_state = InterruptState(done_signal);
|
276
|
+
SourceResultType source_result;
|
277
|
+
|
278
|
+
// Repeatedly try to fetch from the source until it doesn't block. Note that it may block multiple times
|
279
|
+
while (true) {
|
280
|
+
source_result = FetchFromSource(source_chunk);
|
281
|
+
|
282
|
+
// No interrupt happened, all good.
|
283
|
+
if (source_result != SourceResultType::BLOCKED) {
|
284
|
+
break;
|
285
|
+
}
|
286
|
+
|
287
|
+
// Busy wait for async callback from source operator
|
288
|
+
done_signal->Await();
|
289
|
+
}
|
290
|
+
|
291
|
+
if (source_result == SourceResultType::FINISHED) {
|
292
|
+
exhausted_source = true;
|
293
|
+
if (source_chunk.size() == 0) {
|
294
|
+
break;
|
295
|
+
}
|
185
296
|
}
|
186
297
|
}
|
187
298
|
if (!pipeline.operators.empty()) {
|
@@ -265,7 +376,7 @@ OperatorResultType PipelineExecutor::Execute(DataChunk &input, DataChunk &result
|
|
265
376
|
auto operator_idx = current_idx - 1;
|
266
377
|
auto ¤t_operator = pipeline.operators[operator_idx].get();
|
267
378
|
|
268
|
-
// if current_idx > source_idx, we pass the previous
|
379
|
+
// if current_idx > source_idx, we pass the previous operators' output through the Execute of the current
|
269
380
|
// operator
|
270
381
|
StartOperator(current_operator);
|
271
382
|
auto result = current_operator.Execute(context, prev_chunk, current_chunk, *current_operator.op_state,
|
@@ -307,18 +418,86 @@ OperatorResultType PipelineExecutor::Execute(DataChunk &input, DataChunk &result
|
|
307
418
|
return in_process_operators.empty() ? OperatorResultType::NEED_MORE_INPUT : OperatorResultType::HAVE_MORE_OUTPUT;
|
308
419
|
}
|
309
420
|
|
310
|
-
void PipelineExecutor::
|
421
|
+
void PipelineExecutor::SetTaskForInterrupts(weak_ptr<Task> current_task) {
|
422
|
+
interrupt_state = InterruptState(std::move(current_task));
|
423
|
+
}
|
424
|
+
|
425
|
+
SourceResultType PipelineExecutor::GetData(DataChunk &chunk, OperatorSourceInput &input) {
|
426
|
+
//! Testing feature to enable async source on every operator
|
427
|
+
#ifdef DUCKDB_DEBUG_ASYNC_SINK_SOURCE
|
428
|
+
if (debug_blocked_source_count < debug_blocked_target_count) {
|
429
|
+
debug_blocked_source_count++;
|
430
|
+
|
431
|
+
auto &callback_state = input.interrupt_state;
|
432
|
+
std::thread rewake_thread([callback_state] {
|
433
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
434
|
+
callback_state.Callback();
|
435
|
+
});
|
436
|
+
rewake_thread.detach();
|
437
|
+
|
438
|
+
return SourceResultType::BLOCKED;
|
439
|
+
}
|
440
|
+
#endif
|
441
|
+
|
442
|
+
return pipeline.source->GetData(context, chunk, input);
|
443
|
+
}
|
444
|
+
|
445
|
+
SinkResultType PipelineExecutor::Sink(DataChunk &chunk, OperatorSinkInput &input) {
|
446
|
+
//! Testing feature to enable async sink on every operator
|
447
|
+
#ifdef DUCKDB_DEBUG_ASYNC_SINK_SOURCE
|
448
|
+
if (debug_blocked_sink_count < debug_blocked_target_count) {
|
449
|
+
debug_blocked_sink_count++;
|
450
|
+
|
451
|
+
auto &callback_state = input.interrupt_state;
|
452
|
+
std::thread rewake_thread([callback_state] {
|
453
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
454
|
+
callback_state.Callback();
|
455
|
+
});
|
456
|
+
rewake_thread.detach();
|
457
|
+
|
458
|
+
return SinkResultType::BLOCKED;
|
459
|
+
}
|
460
|
+
#endif
|
461
|
+
return pipeline.sink->Sink(context, chunk, input);
|
462
|
+
}
|
463
|
+
|
464
|
+
SourceResultType PipelineExecutor::FetchFromSource(DataChunk &result) {
|
311
465
|
StartOperator(*pipeline.source);
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
466
|
+
|
467
|
+
OperatorSourceInput source_input = {*pipeline.source_state, *local_source_state, interrupt_state};
|
468
|
+
auto res = GetData(result, source_input);
|
469
|
+
|
470
|
+
// Ensures Sinks only return empty results when Blocking or Finished
|
471
|
+
D_ASSERT(res != SourceResultType::BLOCKED || result.size() == 0);
|
472
|
+
|
473
|
+
if (requires_batch_index && res != SourceResultType::BLOCKED) {
|
474
|
+
idx_t next_batch_index;
|
475
|
+
if (result.size() == 0) {
|
476
|
+
next_batch_index = NumericLimits<int64_t>::Maximum();
|
477
|
+
} else {
|
478
|
+
next_batch_index =
|
479
|
+
pipeline.source->GetBatchIndex(context, result, *pipeline.source_state, *local_source_state);
|
480
|
+
next_batch_index += pipeline.base_batch_index;
|
481
|
+
}
|
482
|
+
auto &partition_info = local_sink_state->partition_info;
|
483
|
+
if (next_batch_index != partition_info.batch_index.GetIndex()) {
|
484
|
+
// batch index has changed - update it
|
485
|
+
if (partition_info.batch_index.GetIndex() > next_batch_index) {
|
486
|
+
throw InternalException(
|
487
|
+
"Pipeline batch index - gotten lower batch index %llu (down from previous batch index of %llu)",
|
488
|
+
next_batch_index, partition_info.batch_index.GetIndex());
|
489
|
+
}
|
490
|
+
auto current_batch = partition_info.batch_index.GetIndex();
|
491
|
+
partition_info.batch_index = next_batch_index;
|
492
|
+
// call NextBatch before updating min_batch_index to provide the opportunity to flush the previous batch
|
493
|
+
pipeline.sink->NextBatch(context, *pipeline.sink->sink_state, *local_sink_state);
|
494
|
+
partition_info.min_batch_index = pipeline.UpdateBatchIndex(current_batch, next_batch_index);
|
495
|
+
}
|
320
496
|
}
|
497
|
+
|
321
498
|
EndOperator(*pipeline.source, &result);
|
499
|
+
|
500
|
+
return res;
|
322
501
|
}
|
323
502
|
|
324
503
|
void PipelineExecutor::InitializeChunk(DataChunk &chunk) {
|
@@ -27,7 +27,7 @@ public:
|
|
27
27
|
|
28
28
|
void PipelineInitializeEvent::Schedule() {
|
29
29
|
// needs to spawn a task to get the chain of tasks for the query plan going
|
30
|
-
vector<
|
30
|
+
vector<shared_ptr<Task>> tasks;
|
31
31
|
tasks.push_back(make_uniq<PipelineInitializeTask>(*pipeline, shared_from_this()));
|
32
32
|
SetTasks(std::move(tasks));
|
33
33
|
}
|
@@ -24,15 +24,15 @@ struct SchedulerThread {
|
|
24
24
|
};
|
25
25
|
|
26
26
|
#ifndef DUCKDB_NO_THREADS
|
27
|
-
typedef duckdb_moodycamel::ConcurrentQueue<
|
27
|
+
typedef duckdb_moodycamel::ConcurrentQueue<shared_ptr<Task>> concurrent_queue_t;
|
28
28
|
typedef duckdb_moodycamel::LightweightSemaphore lightweight_semaphore_t;
|
29
29
|
|
30
30
|
struct ConcurrentQueue {
|
31
31
|
concurrent_queue_t q;
|
32
32
|
lightweight_semaphore_t semaphore;
|
33
33
|
|
34
|
-
void Enqueue(ProducerToken &token,
|
35
|
-
bool DequeueFromProducer(ProducerToken &token,
|
34
|
+
void Enqueue(ProducerToken &token, shared_ptr<Task> task);
|
35
|
+
bool DequeueFromProducer(ProducerToken &token, shared_ptr<Task> &task);
|
36
36
|
};
|
37
37
|
|
38
38
|
struct QueueProducerToken {
|
@@ -42,7 +42,7 @@ struct QueueProducerToken {
|
|
42
42
|
duckdb_moodycamel::ProducerToken queue_token;
|
43
43
|
};
|
44
44
|
|
45
|
-
void ConcurrentQueue::Enqueue(ProducerToken &token,
|
45
|
+
void ConcurrentQueue::Enqueue(ProducerToken &token, shared_ptr<Task> task) {
|
46
46
|
lock_guard<mutex> producer_lock(token.producer_lock);
|
47
47
|
if (q.enqueue(token.token->queue_token, std::move(task))) {
|
48
48
|
semaphore.signal();
|
@@ -51,26 +51,26 @@ void ConcurrentQueue::Enqueue(ProducerToken &token, unique_ptr<Task> task) {
|
|
51
51
|
}
|
52
52
|
}
|
53
53
|
|
54
|
-
bool ConcurrentQueue::DequeueFromProducer(ProducerToken &token,
|
54
|
+
bool ConcurrentQueue::DequeueFromProducer(ProducerToken &token, shared_ptr<Task> &task) {
|
55
55
|
lock_guard<mutex> producer_lock(token.producer_lock);
|
56
56
|
return q.try_dequeue_from_producer(token.token->queue_token, task);
|
57
57
|
}
|
58
58
|
|
59
59
|
#else
|
60
60
|
struct ConcurrentQueue {
|
61
|
-
std::queue<
|
61
|
+
std::queue<shared_ptr<Task>> q;
|
62
62
|
mutex qlock;
|
63
63
|
|
64
|
-
void Enqueue(ProducerToken &token,
|
65
|
-
bool DequeueFromProducer(ProducerToken &token,
|
64
|
+
void Enqueue(ProducerToken &token, shared_ptr<Task> task);
|
65
|
+
bool DequeueFromProducer(ProducerToken &token, shared_ptr<Task> &task);
|
66
66
|
};
|
67
67
|
|
68
|
-
void ConcurrentQueue::Enqueue(ProducerToken &token,
|
68
|
+
void ConcurrentQueue::Enqueue(ProducerToken &token, shared_ptr<Task> task) {
|
69
69
|
lock_guard<mutex> lock(qlock);
|
70
70
|
q.push(std::move(task));
|
71
71
|
}
|
72
72
|
|
73
|
-
bool ConcurrentQueue::DequeueFromProducer(ProducerToken &token,
|
73
|
+
bool ConcurrentQueue::DequeueFromProducer(ProducerToken &token, shared_ptr<Task> &task) {
|
74
74
|
lock_guard<mutex> lock(qlock);
|
75
75
|
if (q.empty()) {
|
76
76
|
return false;
|
@@ -115,25 +115,37 @@ unique_ptr<ProducerToken> TaskScheduler::CreateProducer() {
|
|
115
115
|
return make_uniq<ProducerToken>(*this, std::move(token));
|
116
116
|
}
|
117
117
|
|
118
|
-
void TaskScheduler::ScheduleTask(ProducerToken &token,
|
118
|
+
void TaskScheduler::ScheduleTask(ProducerToken &token, shared_ptr<Task> task) {
|
119
119
|
// Enqueue a task for the given producer token and signal any sleeping threads
|
120
120
|
queue->Enqueue(token, std::move(task));
|
121
121
|
}
|
122
122
|
|
123
|
-
bool TaskScheduler::GetTaskFromProducer(ProducerToken &token,
|
123
|
+
bool TaskScheduler::GetTaskFromProducer(ProducerToken &token, shared_ptr<Task> &task) {
|
124
124
|
return queue->DequeueFromProducer(token, task);
|
125
125
|
}
|
126
126
|
|
127
127
|
void TaskScheduler::ExecuteForever(atomic<bool> *marker) {
|
128
128
|
#ifndef DUCKDB_NO_THREADS
|
129
|
-
|
129
|
+
shared_ptr<Task> task;
|
130
130
|
// loop until the marker is set to false
|
131
131
|
while (*marker) {
|
132
132
|
// wait for a signal with a timeout
|
133
133
|
queue->semaphore.wait();
|
134
134
|
if (queue->q.try_dequeue(task)) {
|
135
|
-
task->Execute(TaskExecutionMode::PROCESS_ALL);
|
136
|
-
|
135
|
+
auto execute_result = task->Execute(TaskExecutionMode::PROCESS_ALL);
|
136
|
+
|
137
|
+
switch (execute_result) {
|
138
|
+
case TaskExecutionResult::TASK_FINISHED:
|
139
|
+
case TaskExecutionResult::TASK_ERROR:
|
140
|
+
task.reset();
|
141
|
+
break;
|
142
|
+
case TaskExecutionResult::TASK_NOT_FINISHED:
|
143
|
+
throw InternalException("Task should not return TASK_NOT_FINISHED in PROCESS_ALL mode");
|
144
|
+
case TaskExecutionResult::TASK_BLOCKED:
|
145
|
+
task->Deschedule();
|
146
|
+
task.reset();
|
147
|
+
break;
|
148
|
+
}
|
137
149
|
}
|
138
150
|
}
|
139
151
|
#else
|
@@ -146,13 +158,25 @@ idx_t TaskScheduler::ExecuteTasks(atomic<bool> *marker, idx_t max_tasks) {
|
|
146
158
|
idx_t completed_tasks = 0;
|
147
159
|
// loop until the marker is set to false
|
148
160
|
while (*marker && completed_tasks < max_tasks) {
|
149
|
-
|
161
|
+
shared_ptr<Task> task;
|
150
162
|
if (!queue->q.try_dequeue(task)) {
|
151
163
|
return completed_tasks;
|
152
164
|
}
|
153
|
-
task->Execute(TaskExecutionMode::PROCESS_ALL);
|
154
|
-
|
155
|
-
|
165
|
+
auto execute_result = task->Execute(TaskExecutionMode::PROCESS_ALL);
|
166
|
+
|
167
|
+
switch (execute_result) {
|
168
|
+
case TaskExecutionResult::TASK_FINISHED:
|
169
|
+
case TaskExecutionResult::TASK_ERROR:
|
170
|
+
task.reset();
|
171
|
+
completed_tasks++;
|
172
|
+
break;
|
173
|
+
case TaskExecutionResult::TASK_NOT_FINISHED:
|
174
|
+
throw InternalException("Task should not return TASK_NOT_FINISHED in PROCESS_ALL mode");
|
175
|
+
case TaskExecutionResult::TASK_BLOCKED:
|
176
|
+
task->Deschedule();
|
177
|
+
task.reset();
|
178
|
+
break;
|
179
|
+
}
|
156
180
|
}
|
157
181
|
return completed_tasks;
|
158
182
|
#else
|
@@ -162,15 +186,26 @@ idx_t TaskScheduler::ExecuteTasks(atomic<bool> *marker, idx_t max_tasks) {
|
|
162
186
|
|
163
187
|
void TaskScheduler::ExecuteTasks(idx_t max_tasks) {
|
164
188
|
#ifndef DUCKDB_NO_THREADS
|
165
|
-
|
189
|
+
shared_ptr<Task> task;
|
166
190
|
for (idx_t i = 0; i < max_tasks; i++) {
|
167
191
|
queue->semaphore.wait(TASK_TIMEOUT_USECS);
|
168
192
|
if (!queue->q.try_dequeue(task)) {
|
169
193
|
return;
|
170
194
|
}
|
171
195
|
try {
|
172
|
-
task->Execute(TaskExecutionMode::PROCESS_ALL);
|
173
|
-
|
196
|
+
auto execute_result = task->Execute(TaskExecutionMode::PROCESS_ALL);
|
197
|
+
switch (execute_result) {
|
198
|
+
case TaskExecutionResult::TASK_FINISHED:
|
199
|
+
case TaskExecutionResult::TASK_ERROR:
|
200
|
+
task.reset();
|
201
|
+
break;
|
202
|
+
case TaskExecutionResult::TASK_NOT_FINISHED:
|
203
|
+
throw InternalException("Task should not return TASK_NOT_FINISHED in PROCESS_ALL mode");
|
204
|
+
case TaskExecutionResult::TASK_BLOCKED:
|
205
|
+
task->Deschedule();
|
206
|
+
task.reset();
|
207
|
+
break;
|
208
|
+
}
|
174
209
|
} catch (...) {
|
175
210
|
return;
|
176
211
|
}
|
@@ -1,5 +1,6 @@
|
|
1
1
|
#include "duckdb/parser/base_expression.hpp"
|
2
2
|
|
3
|
+
#include "duckdb/main/config.hpp"
|
3
4
|
#include "duckdb/common/printer.hpp"
|
4
5
|
|
5
6
|
namespace duckdb {
|
@@ -9,6 +10,11 @@ void BaseExpression::Print() const {
|
|
9
10
|
}
|
10
11
|
|
11
12
|
string BaseExpression::GetName() const {
|
13
|
+
#ifdef DEBUG
|
14
|
+
if (DBConfigOptions::debug_print_bindings) {
|
15
|
+
return ToString();
|
16
|
+
}
|
17
|
+
#endif
|
12
18
|
return !alias.empty() ? alias : ToString();
|
13
19
|
}
|
14
20
|
|
@@ -1,8 +1,8 @@
|
|
1
1
|
#include "duckdb/planner/expression/bound_columnref_expression.hpp"
|
2
2
|
|
3
|
-
#include "duckdb/common/types/hash.hpp"
|
4
|
-
#include "duckdb/common/to_string.hpp"
|
5
3
|
#include "duckdb/common/field_writer.hpp"
|
4
|
+
#include "duckdb/common/types/hash.hpp"
|
5
|
+
#include "duckdb/main/config.hpp"
|
6
6
|
|
7
7
|
namespace duckdb {
|
8
8
|
|
@@ -35,11 +35,25 @@ bool BoundColumnRefExpression::Equals(const BaseExpression *other_p) const {
|
|
35
35
|
return other.binding == binding && other.depth == depth;
|
36
36
|
}
|
37
37
|
|
38
|
+
string BoundColumnRefExpression::GetName() const {
|
39
|
+
#ifdef DEBUG
|
40
|
+
if (DBConfigOptions::debug_print_bindings) {
|
41
|
+
return binding.ToString();
|
42
|
+
}
|
43
|
+
#endif
|
44
|
+
return Expression::GetName();
|
45
|
+
}
|
46
|
+
|
38
47
|
string BoundColumnRefExpression::ToString() const {
|
48
|
+
#ifdef DEBUG
|
49
|
+
if (DBConfigOptions::debug_print_bindings) {
|
50
|
+
return binding.ToString();
|
51
|
+
}
|
52
|
+
#endif
|
39
53
|
if (!alias.empty()) {
|
40
54
|
return alias;
|
41
55
|
}
|
42
|
-
return
|
56
|
+
return binding.ToString();
|
43
57
|
}
|
44
58
|
|
45
59
|
void BoundColumnRefExpression::Serialize(FieldWriter &writer) const {
|
@@ -1,9 +1,10 @@
|
|
1
1
|
#include "duckdb/planner/expression/bound_reference_expression.hpp"
|
2
2
|
|
3
|
+
#include "duckdb/common/field_writer.hpp"
|
3
4
|
#include "duckdb/common/serializer.hpp"
|
4
|
-
#include "duckdb/common/types/hash.hpp"
|
5
5
|
#include "duckdb/common/to_string.hpp"
|
6
|
-
#include "duckdb/common/
|
6
|
+
#include "duckdb/common/types/hash.hpp"
|
7
|
+
#include "duckdb/main/config.hpp"
|
7
8
|
|
8
9
|
namespace duckdb {
|
9
10
|
|
@@ -16,6 +17,11 @@ BoundReferenceExpression::BoundReferenceExpression(LogicalType type, idx_t index
|
|
16
17
|
}
|
17
18
|
|
18
19
|
string BoundReferenceExpression::ToString() const {
|
20
|
+
#ifdef DEBUG
|
21
|
+
if (DBConfigOptions::debug_print_bindings) {
|
22
|
+
return "#" + to_string(index);
|
23
|
+
}
|
24
|
+
#endif
|
19
25
|
if (!alias.empty()) {
|
20
26
|
return alias;
|
21
27
|
}
|