duckdb 0.7.2-dev2699.0 → 0.7.2-dev2740.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/duckdb/extension/parquet/parquet_metadata.cpp +46 -20
- package/src/duckdb/src/catalog/catalog_entry/table_catalog_entry.cpp +3 -0
- package/src/duckdb/src/execution/operator/persistent/physical_insert.cpp +19 -12
- package/src/duckdb/src/function/table/range.cpp +1 -0
- package/src/duckdb/src/function/table/repeat_row.cpp +60 -0
- package/src/duckdb/src/function/table/version/pragma_version.cpp +2 -2
- package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_insert.hpp +4 -3
- package/src/duckdb/src/include/duckdb/function/table/range.hpp +4 -0
- package/src/duckdb/src/planner/binder/tableref/bind_table_function.cpp +5 -4
- package/src/duckdb/src/planner/operator/logical_create.cpp +1 -2
- package/src/duckdb/src/planner/operator/logical_create_index.cpp +1 -1
- package/src/duckdb/src/planner/operator/logical_delete.cpp +3 -3
- package/src/duckdb/src/planner/operator/logical_insert.cpp +1 -1
- package/src/duckdb/src/planner/operator/logical_update.cpp +1 -1
- package/src/duckdb/src/planner/parsed_data/bound_create_table_info.cpp +6 -27
- package/src/duckdb/src/storage/storage_info.cpp +1 -1
- package/src/duckdb/ub_src_function_table.cpp +2 -0
package/package.json
CHANGED
@@ -55,6 +55,30 @@ string PrintParquetElementToString(T &&entry) {
|
|
55
55
|
return ss.str();
|
56
56
|
}
|
57
57
|
|
58
|
+
template <class T>
|
59
|
+
Value ParquetElementString(T &&value, bool is_set) {
|
60
|
+
if (!is_set) {
|
61
|
+
return Value();
|
62
|
+
}
|
63
|
+
return Value(ConvertParquetElementToString(value));
|
64
|
+
}
|
65
|
+
|
66
|
+
template <class T>
|
67
|
+
Value ParquetElementInteger(T &&value, bool is_iset) {
|
68
|
+
if (!is_iset) {
|
69
|
+
return Value();
|
70
|
+
}
|
71
|
+
return Value::INTEGER(value);
|
72
|
+
}
|
73
|
+
|
74
|
+
template <class T>
|
75
|
+
Value ParquetElementBigint(T &&value, bool is_iset) {
|
76
|
+
if (!is_iset) {
|
77
|
+
return Value();
|
78
|
+
}
|
79
|
+
return Value::BIGINT(value);
|
80
|
+
}
|
81
|
+
|
58
82
|
void ParquetMetaDataOperatorData::BindMetaData(vector<LogicalType> &return_types, vector<string> &names) {
|
59
83
|
names.emplace_back("file_name");
|
60
84
|
return_types.emplace_back(LogicalType::VARCHAR);
|
@@ -186,7 +210,7 @@ void ParquetMetaDataOperatorData::LoadFileMetaData(ClientContext &context, const
|
|
186
210
|
current_chunk.SetValue(5, count, Value::BIGINT(col_idx));
|
187
211
|
|
188
212
|
// file_offset, LogicalType::BIGINT
|
189
|
-
current_chunk.SetValue(6, count,
|
213
|
+
current_chunk.SetValue(6, count, ParquetElementBigint(column.file_offset, row_group.__isset.file_offset));
|
190
214
|
|
191
215
|
// num_values, LogicalType::BIGINT
|
192
216
|
current_chunk.SetValue(7, count, Value::BIGINT(col_meta.num_values));
|
@@ -206,13 +230,10 @@ void ParquetMetaDataOperatorData::LoadFileMetaData(ClientContext &context, const
|
|
206
230
|
ConvertParquetStats(column_type, schema_element, stats.__isset.max, stats.max));
|
207
231
|
|
208
232
|
// stats_null_count, LogicalType::BIGINT
|
209
|
-
current_chunk.SetValue(
|
210
|
-
12, count, stats.__isset.null_count ? Value::BIGINT(stats.null_count) : Value(LogicalType::BIGINT));
|
233
|
+
current_chunk.SetValue(12, count, ParquetElementBigint(stats.null_count, stats.__isset.null_count));
|
211
234
|
|
212
235
|
// stats_distinct_count, LogicalType::BIGINT
|
213
|
-
current_chunk.SetValue(13, count,
|
214
|
-
stats.__isset.distinct_count ? Value::BIGINT(stats.distinct_count)
|
215
|
-
: Value(LogicalType::BIGINT));
|
236
|
+
current_chunk.SetValue(13, count, ParquetElementBigint(stats.distinct_count, stats.__isset.distinct_count));
|
216
237
|
|
217
238
|
// stats_min_value, LogicalType::VARCHAR
|
218
239
|
current_chunk.SetValue(
|
@@ -234,10 +255,13 @@ void ParquetMetaDataOperatorData::LoadFileMetaData(ClientContext &context, const
|
|
234
255
|
current_chunk.SetValue(17, count, Value(StringUtil::Join(encoding_string, ", ")));
|
235
256
|
|
236
257
|
// index_page_offset, LogicalType::BIGINT
|
237
|
-
current_chunk.SetValue(
|
258
|
+
current_chunk.SetValue(
|
259
|
+
18, count, ParquetElementBigint(col_meta.index_page_offset, col_meta.__isset.index_page_offset));
|
238
260
|
|
239
261
|
// dictionary_page_offset, LogicalType::BIGINT
|
240
|
-
current_chunk.SetValue(
|
262
|
+
current_chunk.SetValue(
|
263
|
+
19, count,
|
264
|
+
ParquetElementBigint(col_meta.dictionary_page_offset, col_meta.__isset.dictionary_page_offset));
|
241
265
|
|
242
266
|
// data_page_offset, LogicalType::BIGINT
|
243
267
|
current_chunk.SetValue(20, count, Value::BIGINT(col_meta.data_page_offset));
|
@@ -299,8 +323,10 @@ void ParquetMetaDataOperatorData::BindSchema(vector<LogicalType> &return_types,
|
|
299
323
|
return_types.emplace_back(LogicalType::VARCHAR);
|
300
324
|
}
|
301
325
|
|
302
|
-
Value ParquetLogicalTypeToString(const duckdb_parquet::format::LogicalType &type) {
|
303
|
-
|
326
|
+
Value ParquetLogicalTypeToString(const duckdb_parquet::format::LogicalType &type, bool is_set) {
|
327
|
+
if (!is_set) {
|
328
|
+
return Value();
|
329
|
+
}
|
304
330
|
if (type.__isset.STRING) {
|
305
331
|
return Value(PrintParquetElementToString(type.STRING));
|
306
332
|
}
|
@@ -362,31 +388,31 @@ void ParquetMetaDataOperatorData::LoadSchemaData(ClientContext &context, const v
|
|
362
388
|
current_chunk.SetValue(1, count, column.name);
|
363
389
|
|
364
390
|
// type, LogicalType::VARCHAR
|
365
|
-
current_chunk.SetValue(2, count,
|
391
|
+
current_chunk.SetValue(2, count, ParquetElementString(column.type, column.__isset.type));
|
366
392
|
|
367
|
-
// type_length, LogicalType::
|
368
|
-
current_chunk.SetValue(3, count,
|
393
|
+
// type_length, LogicalType::INTEGER
|
394
|
+
current_chunk.SetValue(3, count, ParquetElementInteger(column.type_length, column.__isset.type_length));
|
369
395
|
|
370
396
|
// repetition_type, LogicalType::VARCHAR
|
371
|
-
current_chunk.SetValue(4, count,
|
397
|
+
current_chunk.SetValue(4, count, ParquetElementString(column.repetition_type, column.__isset.repetition_type));
|
372
398
|
|
373
399
|
// num_children, LogicalType::BIGINT
|
374
|
-
current_chunk.SetValue(5, count,
|
400
|
+
current_chunk.SetValue(5, count, ParquetElementBigint(column.num_children, column.__isset.num_children));
|
375
401
|
|
376
402
|
// converted_type, LogicalType::VARCHAR
|
377
|
-
current_chunk.SetValue(6, count,
|
403
|
+
current_chunk.SetValue(6, count, ParquetElementString(column.converted_type, column.__isset.converted_type));
|
378
404
|
|
379
405
|
// scale, LogicalType::BIGINT
|
380
|
-
current_chunk.SetValue(7, count,
|
406
|
+
current_chunk.SetValue(7, count, ParquetElementBigint(column.scale, column.__isset.scale));
|
381
407
|
|
382
408
|
// precision, LogicalType::BIGINT
|
383
|
-
current_chunk.SetValue(8, count,
|
409
|
+
current_chunk.SetValue(8, count, ParquetElementBigint(column.precision, column.__isset.precision));
|
384
410
|
|
385
411
|
// field_id, LogicalType::BIGINT
|
386
|
-
current_chunk.SetValue(9, count,
|
412
|
+
current_chunk.SetValue(9, count, ParquetElementBigint(column.field_id, column.__isset.field_id));
|
387
413
|
|
388
414
|
// logical_type, LogicalType::VARCHAR
|
389
|
-
current_chunk.SetValue(10, count, ParquetLogicalTypeToString(column.logicalType));
|
415
|
+
current_chunk.SetValue(10, count, ParquetLogicalTypeToString(column.logicalType, column.__isset.logicalType));
|
390
416
|
|
391
417
|
count++;
|
392
418
|
if (count >= STANDARD_VECTOR_SIZE) {
|
@@ -55,6 +55,8 @@ void TableCatalogEntry::Serialize(Serializer &serializer) const {
|
|
55
55
|
D_ASSERT(!internal);
|
56
56
|
|
57
57
|
FieldWriter writer(serializer);
|
58
|
+
auto catalog_name = catalog.GetName();
|
59
|
+
writer.WriteString(catalog_name);
|
58
60
|
writer.WriteString(schema.name);
|
59
61
|
writer.WriteString(name);
|
60
62
|
columns.Serialize(writer);
|
@@ -66,6 +68,7 @@ unique_ptr<CreateTableInfo> TableCatalogEntry::Deserialize(Deserializer &source,
|
|
66
68
|
auto info = make_uniq<CreateTableInfo>();
|
67
69
|
|
68
70
|
FieldReader reader(source);
|
71
|
+
info->catalog = reader.ReadRequired<string>();
|
69
72
|
info->schema = reader.ReadRequired<string>();
|
70
73
|
info->table = reader.ReadRequired<string>();
|
71
74
|
info->columns = ColumnList::Deserialize(reader);
|
@@ -102,6 +102,7 @@ public:
|
|
102
102
|
optional_ptr<OptimisticDataWriter> writer;
|
103
103
|
// Rows that have been updated by a DO UPDATE conflict
|
104
104
|
unordered_set<row_t> updated_rows;
|
105
|
+
idx_t update_count = 0;
|
105
106
|
};
|
106
107
|
|
107
108
|
unique_ptr<GlobalSinkState> PhysicalInsert::GetGlobalSinkState(ClientContext &context) const {
|
@@ -217,10 +218,10 @@ void PhysicalInsert::CombineExistingAndInsertTuples(DataChunk &result, DataChunk
|
|
217
218
|
result.SetCardinality(input_chunk.size());
|
218
219
|
}
|
219
220
|
|
220
|
-
|
221
|
-
|
221
|
+
idx_t PhysicalInsert::PerformOnConflictAction(ExecutionContext &context, DataChunk &chunk, TableCatalogEntry &table,
|
222
|
+
Vector &row_ids) const {
|
222
223
|
if (action_type == OnConflictAction::NOTHING) {
|
223
|
-
return;
|
224
|
+
return 0;
|
224
225
|
}
|
225
226
|
|
226
227
|
DataChunk update_chunk; // contains only the to-update columns
|
@@ -259,6 +260,7 @@ void PhysicalInsert::PerformOnConflictAction(ExecutionContext &context, DataChun
|
|
259
260
|
auto &data_table = table.GetStorage();
|
260
261
|
// Perform the update, using the results of the SET expressions
|
261
262
|
data_table.Update(table, context.client, row_ids, set_columns, update_chunk);
|
263
|
+
return update_chunk.size();
|
262
264
|
}
|
263
265
|
|
264
266
|
// TODO: should we use a hash table to keep track of this instead?
|
@@ -275,12 +277,12 @@ void PhysicalInsert::RegisterUpdatedRows(InsertLocalState &lstate, const Vector
|
|
275
277
|
}
|
276
278
|
}
|
277
279
|
|
278
|
-
|
279
|
-
|
280
|
+
idx_t PhysicalInsert::OnConflictHandling(TableCatalogEntry &table, ExecutionContext &context,
|
281
|
+
InsertLocalState &lstate) const {
|
280
282
|
auto &data_table = table.GetStorage();
|
281
283
|
if (action_type == OnConflictAction::THROW) {
|
282
284
|
data_table.VerifyAppendConstraints(table, context.client, lstate.insert_chunk, nullptr);
|
283
|
-
return;
|
285
|
+
return 0;
|
284
286
|
}
|
285
287
|
// Check whether any conflicts arise, and if they all meet the conflict_target + condition
|
286
288
|
// If that's not the case - We throw the first error
|
@@ -291,8 +293,8 @@ void PhysicalInsert::OnConflictHandling(TableCatalogEntry &table, ExecutionConte
|
|
291
293
|
data_table.VerifyAppendConstraints(table, context.client, lstate.insert_chunk, &conflict_manager);
|
292
294
|
conflict_manager.Finalize();
|
293
295
|
if (conflict_manager.ConflictCount() == 0) {
|
294
|
-
// No conflicts found
|
295
|
-
return;
|
296
|
+
// No conflicts found, 0 updates performed
|
297
|
+
return 0;
|
296
298
|
}
|
297
299
|
auto &conflicts = conflict_manager.Conflicts();
|
298
300
|
auto &row_ids = conflict_manager.RowIds();
|
@@ -343,7 +345,7 @@ void PhysicalInsert::OnConflictHandling(TableCatalogEntry &table, ExecutionConte
|
|
343
345
|
|
344
346
|
RegisterUpdatedRows(lstate, row_ids, combined_chunk.size());
|
345
347
|
|
346
|
-
PerformOnConflictAction(context, combined_chunk, table, row_ids);
|
348
|
+
idx_t updated_tuples = PerformOnConflictAction(context, combined_chunk, table, row_ids);
|
347
349
|
|
348
350
|
// Remove the conflicting tuples from the insert chunk
|
349
351
|
SelectionVector sel_vec(lstate.insert_chunk.size());
|
@@ -351,6 +353,7 @@ void PhysicalInsert::OnConflictHandling(TableCatalogEntry &table, ExecutionConte
|
|
351
353
|
SelectionVector::Inverted(conflicts.Selection(), sel_vec, conflicts.Count(), lstate.insert_chunk.size());
|
352
354
|
lstate.insert_chunk.Slice(sel_vec, new_size);
|
353
355
|
lstate.insert_chunk.SetCardinality(new_size);
|
356
|
+
return updated_tuples;
|
354
357
|
}
|
355
358
|
|
356
359
|
SinkResultType PhysicalInsert::Sink(ExecutionContext &context, GlobalSinkState &state, LocalSinkState &lstate_p,
|
@@ -368,13 +371,14 @@ SinkResultType PhysicalInsert::Sink(ExecutionContext &context, GlobalSinkState &
|
|
368
371
|
gstate.initialized = true;
|
369
372
|
}
|
370
373
|
|
371
|
-
OnConflictHandling(table, context, lstate);
|
374
|
+
idx_t updated_tuples = OnConflictHandling(table, context, lstate);
|
372
375
|
storage.LocalAppend(gstate.append_state, table, context.client, lstate.insert_chunk, true);
|
373
376
|
|
374
377
|
if (return_chunk) {
|
375
378
|
gstate.return_collection.Append(lstate.insert_chunk);
|
376
379
|
}
|
377
|
-
gstate.insert_count +=
|
380
|
+
gstate.insert_count += lstate.insert_chunk.size();
|
381
|
+
gstate.insert_count += updated_tuples;
|
378
382
|
} else {
|
379
383
|
D_ASSERT(!return_chunk);
|
380
384
|
// parallel append
|
@@ -388,7 +392,8 @@ SinkResultType PhysicalInsert::Sink(ExecutionContext &context, GlobalSinkState &
|
|
388
392
|
lstate.local_collection->InitializeAppend(lstate.local_append_state);
|
389
393
|
lstate.writer = &gstate.table.GetStorage().CreateOptimisticWriter(context.client);
|
390
394
|
}
|
391
|
-
OnConflictHandling(table, context, lstate);
|
395
|
+
lstate.update_count += OnConflictHandling(table, context, lstate);
|
396
|
+
|
392
397
|
auto new_row_group = lstate.local_collection->Append(lstate.insert_chunk, lstate.local_append_state);
|
393
398
|
if (new_row_group) {
|
394
399
|
lstate.writer->CheckFlushToDisk(*lstate.local_collection);
|
@@ -421,6 +426,7 @@ void PhysicalInsert::Combine(ExecutionContext &context, GlobalSinkState &gstate_
|
|
421
426
|
// we have few rows - append to the local storage directly
|
422
427
|
lock_guard<mutex> lock(gstate.lock);
|
423
428
|
gstate.insert_count += append_count;
|
429
|
+
gstate.insert_count += lstate.update_count;
|
424
430
|
auto &table = gstate.table;
|
425
431
|
auto &storage = table.GetStorage();
|
426
432
|
storage.InitializeLocalAppend(gstate.append_state, context.client);
|
@@ -438,6 +444,7 @@ void PhysicalInsert::Combine(ExecutionContext &context, GlobalSinkState &gstate_
|
|
438
444
|
|
439
445
|
lock_guard<mutex> lock(gstate.lock);
|
440
446
|
gstate.insert_count += append_count;
|
447
|
+
gstate.insert_count += lstate.update_count;
|
441
448
|
gstate.table.GetStorage().LocalMerge(context.client, *lstate.local_collection);
|
442
449
|
}
|
443
450
|
}
|
@@ -273,6 +273,7 @@ void BuiltinFunctions::RegisterTableFunctions() {
|
|
273
273
|
RepeatTableFunction::RegisterFunction(*this);
|
274
274
|
SummaryTableFunction::RegisterFunction(*this);
|
275
275
|
UnnestTableFunction::RegisterFunction(*this);
|
276
|
+
RepeatRowTableFunction::RegisterFunction(*this);
|
276
277
|
}
|
277
278
|
|
278
279
|
} // namespace duckdb
|
@@ -0,0 +1,60 @@
|
|
1
|
+
#include "duckdb/function/table/range.hpp"
|
2
|
+
#include "duckdb/common/algorithm.hpp"
|
3
|
+
|
4
|
+
namespace duckdb {
|
5
|
+
|
6
|
+
struct RepeatRowFunctionData : public TableFunctionData {
|
7
|
+
RepeatRowFunctionData(vector<Value> values, idx_t target_count)
|
8
|
+
: values(std::move(values)), target_count(target_count) {
|
9
|
+
}
|
10
|
+
|
11
|
+
const vector<Value> values;
|
12
|
+
idx_t target_count;
|
13
|
+
};
|
14
|
+
|
15
|
+
struct RepeatRowOperatorData : public GlobalTableFunctionState {
|
16
|
+
RepeatRowOperatorData() : current_count(0) {
|
17
|
+
}
|
18
|
+
idx_t current_count;
|
19
|
+
};
|
20
|
+
|
21
|
+
static unique_ptr<FunctionData> RepeatRowBind(ClientContext &context, TableFunctionBindInput &input,
|
22
|
+
vector<LogicalType> &return_types, vector<string> &names) {
|
23
|
+
auto &inputs = input.inputs;
|
24
|
+
for (idx_t input_idx = 0; input_idx < inputs.size(); input_idx++) {
|
25
|
+
return_types.push_back(inputs[input_idx].type());
|
26
|
+
names.push_back("column" + std::to_string(input_idx));
|
27
|
+
}
|
28
|
+
return make_uniq<RepeatRowFunctionData>(inputs, input.named_parameters["num_rows"].GetValue<int64_t>());
|
29
|
+
}
|
30
|
+
|
31
|
+
static unique_ptr<GlobalTableFunctionState> RepeatRowInit(ClientContext &context, TableFunctionInitInput &input) {
|
32
|
+
return make_uniq<RepeatRowOperatorData>();
|
33
|
+
}
|
34
|
+
|
35
|
+
static void RepeatRowFunction(ClientContext &context, TableFunctionInput &data_p, DataChunk &output) {
|
36
|
+
auto &bind_data = (const RepeatRowFunctionData &)*data_p.bind_data;
|
37
|
+
auto &state = data_p.global_state->Cast<RepeatRowOperatorData>();
|
38
|
+
|
39
|
+
idx_t remaining = MinValue<idx_t>(bind_data.target_count - state.current_count, STANDARD_VECTOR_SIZE);
|
40
|
+
for (idx_t val_idx = 0; val_idx < bind_data.values.size(); val_idx++) {
|
41
|
+
output.data[val_idx].Reference(bind_data.values[val_idx]);
|
42
|
+
}
|
43
|
+
output.SetCardinality(remaining);
|
44
|
+
state.current_count += remaining;
|
45
|
+
}
|
46
|
+
|
47
|
+
static unique_ptr<NodeStatistics> RepeatRowCardinality(ClientContext &context, const FunctionData *bind_data_p) {
|
48
|
+
auto &bind_data = (const RepeatRowFunctionData &)*bind_data_p;
|
49
|
+
return make_uniq<NodeStatistics>(bind_data.target_count, bind_data.target_count);
|
50
|
+
}
|
51
|
+
|
52
|
+
void RepeatRowTableFunction::RegisterFunction(BuiltinFunctions &set) {
|
53
|
+
TableFunction repeat_row("repeat_row", {}, RepeatRowFunction, RepeatRowBind, RepeatRowInit);
|
54
|
+
repeat_row.varargs = LogicalType::ANY;
|
55
|
+
repeat_row.named_parameters["num_rows"] = LogicalType::BIGINT;
|
56
|
+
repeat_row.cardinality = RepeatRowCardinality;
|
57
|
+
set.AddFunction(repeat_row);
|
58
|
+
}
|
59
|
+
|
60
|
+
} // namespace duckdb
|
@@ -1,8 +1,8 @@
|
|
1
1
|
#ifndef DUCKDB_VERSION
|
2
|
-
#define DUCKDB_VERSION "0.7.2-
|
2
|
+
#define DUCKDB_VERSION "0.7.2-dev2740"
|
3
3
|
#endif
|
4
4
|
#ifndef DUCKDB_SOURCE_ID
|
5
|
-
#define DUCKDB_SOURCE_ID "
|
5
|
+
#define DUCKDB_SOURCE_ID "bb37f3fd06"
|
6
6
|
#endif
|
7
7
|
#include "duckdb/function/table/system_functions.hpp"
|
8
8
|
#include "duckdb/main/database.hpp"
|
@@ -116,9 +116,10 @@ public:
|
|
116
116
|
protected:
|
117
117
|
void CombineExistingAndInsertTuples(DataChunk &result, DataChunk &scan_chunk, DataChunk &input_chunk,
|
118
118
|
ClientContext &client) const;
|
119
|
-
|
120
|
-
|
121
|
-
|
119
|
+
//! Returns the amount of updated tuples
|
120
|
+
idx_t OnConflictHandling(TableCatalogEntry &table, ExecutionContext &context, InsertLocalState &lstate) const;
|
121
|
+
idx_t PerformOnConflictAction(ExecutionContext &context, DataChunk &chunk, TableCatalogEntry &table,
|
122
|
+
Vector &row_ids) const;
|
122
123
|
void RegisterUpdatedRows(InsertLocalState &lstate, const Vector &row_ids, idx_t count) const;
|
123
124
|
};
|
124
125
|
|
@@ -29,6 +29,10 @@ struct RepeatTableFunction {
|
|
29
29
|
static void RegisterFunction(BuiltinFunctions &set);
|
30
30
|
};
|
31
31
|
|
32
|
+
struct RepeatRowTableFunction {
|
33
|
+
static void RegisterFunction(BuiltinFunctions &set);
|
34
|
+
};
|
35
|
+
|
32
36
|
struct UnnestTableFunction {
|
33
37
|
static void RegisterFunction(BuiltinFunctions &set);
|
34
38
|
};
|
@@ -257,10 +257,11 @@ unique_ptr<BoundTableRef> Binder::Bind(TableFunctionRef &ref) {
|
|
257
257
|
|
258
258
|
// cast the parameters to the type of the function
|
259
259
|
for (idx_t i = 0; i < arguments.size(); i++) {
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
260
|
+
auto target_type = i < table_function.arguments.size() ? table_function.arguments[i] : table_function.varargs;
|
261
|
+
|
262
|
+
if (target_type != LogicalType::ANY && target_type != LogicalType::TABLE &&
|
263
|
+
target_type != LogicalType::POINTER && target_type.id() != LogicalTypeId::LIST) {
|
264
|
+
parameters[i] = parameters[i].CastAs(context, target_type);
|
264
265
|
}
|
265
266
|
}
|
266
267
|
|
@@ -10,8 +10,7 @@ unique_ptr<LogicalOperator> LogicalCreate::Deserialize(LogicalDeserializationSta
|
|
10
10
|
auto &context = state.gstate.context;
|
11
11
|
auto info = CreateInfo::Deserialize(reader.GetSource());
|
12
12
|
|
13
|
-
auto schema_catalog_entry =
|
14
|
-
Catalog::GetSchema(context, INVALID_CATALOG, info->schema, OnEntryNotFound::RETURN_NULL);
|
13
|
+
auto schema_catalog_entry = Catalog::GetSchema(context, info->catalog, info->schema, OnEntryNotFound::RETURN_NULL);
|
15
14
|
return make_uniq<LogicalCreate>(state.type, std::move(info), schema_catalog_entry);
|
16
15
|
}
|
17
16
|
|
@@ -22,7 +22,7 @@ unique_ptr<LogicalOperator> LogicalCreateIndex::Deserialize(LogicalDeserializati
|
|
22
22
|
auto catalog_info = TableCatalogEntry::Deserialize(reader.GetSource(), context);
|
23
23
|
|
24
24
|
auto &table =
|
25
|
-
Catalog::GetEntry<TableCatalogEntry>(context,
|
25
|
+
Catalog::GetEntry<TableCatalogEntry>(context, catalog_info->catalog, catalog_info->schema, catalog_info->table);
|
26
26
|
auto unbound_expressions = reader.ReadRequiredSerializableList<Expression>(state.gstate);
|
27
27
|
|
28
28
|
auto create_info = reader.ReadOptional<CreateInfo>(nullptr);
|
@@ -1,6 +1,7 @@
|
|
1
1
|
#include "duckdb/planner/operator/logical_delete.hpp"
|
2
|
-
|
2
|
+
|
3
3
|
#include "duckdb/catalog/catalog_entry/table_catalog_entry.hpp"
|
4
|
+
#include "duckdb/parser/parsed_data/create_table_info.hpp"
|
4
5
|
|
5
6
|
namespace duckdb {
|
6
7
|
|
@@ -19,8 +20,7 @@ unique_ptr<LogicalOperator> LogicalDelete::Deserialize(LogicalDeserializationSta
|
|
19
20
|
auto &context = state.gstate.context;
|
20
21
|
auto info = TableCatalogEntry::Deserialize(reader.GetSource(), context);
|
21
22
|
|
22
|
-
auto &table_catalog_entry =
|
23
|
-
Catalog::GetEntry<TableCatalogEntry>(context, INVALID_CATALOG, info->schema, info->table);
|
23
|
+
auto &table_catalog_entry = Catalog::GetEntry<TableCatalogEntry>(context, info->catalog, info->schema, info->table);
|
24
24
|
|
25
25
|
auto table_index = reader.ReadRequired<idx_t>();
|
26
26
|
auto result = make_uniq<LogicalDelete>(table_catalog_entry, table_index);
|
@@ -41,7 +41,7 @@ unique_ptr<LogicalOperator> LogicalInsert::Deserialize(LogicalDeserializationSta
|
|
41
41
|
auto bound_defaults = reader.ReadRequiredSerializableList<Expression>(state.gstate);
|
42
42
|
auto action_type = reader.ReadRequired<OnConflictAction>();
|
43
43
|
|
44
|
-
auto &catalog = Catalog::GetCatalog(context,
|
44
|
+
auto &catalog = Catalog::GetCatalog(context, info->catalog);
|
45
45
|
|
46
46
|
auto &table_catalog_entry = catalog.GetEntry<TableCatalogEntry>(context, info->schema, info->table);
|
47
47
|
auto result = make_uniq<LogicalInsert>(table_catalog_entry, table_index);
|
@@ -21,7 +21,7 @@ void LogicalUpdate::Serialize(FieldWriter &writer) const {
|
|
21
21
|
unique_ptr<LogicalOperator> LogicalUpdate::Deserialize(LogicalDeserializationState &state, FieldReader &reader) {
|
22
22
|
auto &context = state.gstate.context;
|
23
23
|
auto info = TableCatalogEntry::Deserialize(reader.GetSource(), context);
|
24
|
-
auto &catalog = Catalog::GetCatalog(context,
|
24
|
+
auto &catalog = Catalog::GetCatalog(context, info->catalog);
|
25
25
|
|
26
26
|
auto &table_catalog_entry = catalog.GetEntry<TableCatalogEntry>(context, info->schema, info->table);
|
27
27
|
auto result = make_uniq<LogicalUpdate>(table_catalog_entry);
|
@@ -5,46 +5,25 @@
|
|
5
5
|
|
6
6
|
namespace duckdb {
|
7
7
|
void BoundCreateTableInfo::Serialize(Serializer &serializer) const {
|
8
|
-
schema.Serialize(serializer);
|
9
8
|
serializer.WriteOptional(base);
|
10
|
-
|
11
|
-
// TODO[YLM]: Review if we want/need to serialize more of the fields.
|
12
|
-
//! The map of column names -> column index, used during binding
|
13
|
-
// case_insensitive_map_t<column_t> name_map;
|
14
|
-
|
15
|
-
//! Column dependency manager of the table
|
16
|
-
// ColumnDependencyManager column_dependency_manager;
|
17
|
-
|
18
9
|
serializer.WriteList(constraints);
|
19
10
|
serializer.WriteList(bound_constraints);
|
20
11
|
serializer.WriteList(bound_defaults);
|
21
|
-
|
22
|
-
//! Dependents of the table (in e.g. default values)
|
23
|
-
// unordered_set<CatalogEntry *> dependencies;
|
24
|
-
|
25
|
-
//! The existing table data on disk (if any)
|
26
|
-
// unique_ptr<PersistentTableData> data;
|
27
|
-
|
28
|
-
//! CREATE TABLE from QUERY
|
29
12
|
serializer.WriteOptional(query);
|
30
|
-
|
31
|
-
//! Indexes created by this table <Block_ID, Offset>
|
32
|
-
// vector<BlockPointer> indexes;
|
33
13
|
}
|
34
14
|
|
35
15
|
unique_ptr<BoundCreateTableInfo> BoundCreateTableInfo::Deserialize(Deserializer &source,
|
36
16
|
PlanDeserializationState &state) {
|
37
|
-
auto
|
38
|
-
|
39
|
-
auto schema_name =
|
40
|
-
auto
|
41
|
-
auto
|
42
|
-
result->base = source.ReadOptional<CreateInfo>();
|
17
|
+
auto create_info_base = source.ReadOptional<CreateInfo>();
|
18
|
+
// Get schema from the catalog to create BoundCreateTableInfo
|
19
|
+
auto schema_name = create_info_base->schema;
|
20
|
+
auto catalog = create_info_base->catalog;
|
21
|
+
auto &schema_catalog_entry = Catalog::GetSchema(state.context, catalog, schema_name);
|
43
22
|
|
23
|
+
auto result = make_uniq<BoundCreateTableInfo>(schema_catalog_entry, std::move(create_info_base));
|
44
24
|
source.ReadList<Constraint>(result->constraints);
|
45
25
|
source.ReadList<BoundConstraint>(result->bound_constraints);
|
46
26
|
source.ReadList<Expression>(result->bound_defaults, state);
|
47
|
-
|
48
27
|
result->query = source.ReadOptional<LogicalOperator>(state);
|
49
28
|
return result;
|
50
29
|
}
|