duckdb 0.8.2-dev5120.0 → 0.8.2-dev5216.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/duckdb/extension/icu/icu-dateadd.cpp +11 -19
- package/src/duckdb/extension/icu/icu-datepart.cpp +44 -53
- package/src/duckdb/extension/icu/icu-datesub.cpp +10 -15
- package/src/duckdb/extension/icu/icu-datetrunc.cpp +6 -8
- package/src/duckdb/extension/icu/icu-list-range.cpp +6 -8
- package/src/duckdb/extension/icu/icu-makedate.cpp +8 -10
- package/src/duckdb/extension/icu/icu-strptime.cpp +30 -32
- package/src/duckdb/extension/icu/icu-table-range.cpp +6 -9
- package/src/duckdb/extension/icu/icu-timebucket.cpp +5 -7
- package/src/duckdb/extension/icu/icu-timezone.cpp +18 -29
- package/src/duckdb/extension/icu/icu_extension.cpp +18 -25
- package/src/duckdb/extension/icu/include/icu-dateadd.hpp +1 -1
- package/src/duckdb/extension/icu/include/icu-datepart.hpp +1 -1
- package/src/duckdb/extension/icu/include/icu-datesub.hpp +1 -1
- package/src/duckdb/extension/icu/include/icu-datetrunc.hpp +1 -1
- package/src/duckdb/extension/icu/include/icu-list-range.hpp +1 -1
- package/src/duckdb/extension/icu/include/icu-makedate.hpp +1 -1
- package/src/duckdb/extension/icu/include/icu-strptime.hpp +1 -1
- package/src/duckdb/extension/icu/include/icu-table-range.hpp +1 -1
- package/src/duckdb/extension/icu/include/icu-timebucket.hpp +1 -1
- package/src/duckdb/extension/icu/include/icu-timezone.hpp +1 -1
- package/src/duckdb/extension/json/buffered_json_reader.cpp +2 -2
- package/src/duckdb/extension/json/json_functions/read_json.cpp +15 -0
- package/src/duckdb/src/catalog/catalog.cpp +6 -1
- package/src/duckdb/src/execution/operator/csv_scanner/parallel_csv_reader.cpp +14 -0
- package/src/duckdb/src/execution/operator/csv_scanner/sniffer/type_detection.cpp +22 -22
- package/src/duckdb/src/execution/operator/schema/physical_attach.cpp +4 -1
- package/src/duckdb/src/function/table/read_csv.cpp +3 -1
- package/src/duckdb/src/function/table/version/pragma_version.cpp +2 -2
- package/src/duckdb/src/include/duckdb/main/extension_entries.hpp +34 -0
- package/src/duckdb/src/include/duckdb/main/extension_util.hpp +14 -0
- package/src/duckdb/src/include/duckdb/main/settings.hpp +1 -1
- package/src/duckdb/src/include/duckdb/storage/checkpoint_manager.hpp +2 -0
- package/src/duckdb/src/main/extension/extension_util.cpp +56 -0
- package/src/duckdb/src/planner/binder.cpp +5 -1
- package/src/duckdb/src/storage/checkpoint_manager.cpp +162 -138
- package/src/duckdb/src/storage/storage_info.cpp +1 -1
@@ -7,6 +7,9 @@
|
|
7
7
|
#include "duckdb/parser/parsed_data/create_scalar_function_info.hpp"
|
8
8
|
#include "duckdb/parser/parsed_data/create_table_function_info.hpp"
|
9
9
|
#include "duckdb/parser/parsed_data/create_macro_info.hpp"
|
10
|
+
#include "duckdb/catalog/catalog_entry/scalar_function_catalog_entry.hpp"
|
11
|
+
#include "duckdb/catalog/catalog_entry/table_function_catalog_entry.hpp"
|
12
|
+
#include "duckdb/parser/parsed_data/create_collation_info.hpp"
|
10
13
|
#include "duckdb/catalog/catalog.hpp"
|
11
14
|
#include "duckdb/main/config.hpp"
|
12
15
|
|
@@ -86,6 +89,59 @@ void ExtensionUtil::RegisterFunction(DatabaseInstance &db, CreateMacroInfo &info
|
|
86
89
|
system_catalog.CreateFunction(data, info);
|
87
90
|
}
|
88
91
|
|
92
|
+
void ExtensionUtil::RegisterCollation(DatabaseInstance &db, CreateCollationInfo &info) {
|
93
|
+
auto &system_catalog = Catalog::GetSystemCatalog(db);
|
94
|
+
auto data = CatalogTransaction::GetSystemTransaction(db);
|
95
|
+
info.on_conflict = OnCreateConflict::IGNORE_ON_CONFLICT;
|
96
|
+
system_catalog.CreateCollation(data, info);
|
97
|
+
}
|
98
|
+
|
99
|
+
void ExtensionUtil::AddFunctionOverload(DatabaseInstance &db, ScalarFunction function) {
|
100
|
+
auto &scalar_function = ExtensionUtil::GetFunction(db, function.name);
|
101
|
+
scalar_function.functions.AddFunction(std::move(function));
|
102
|
+
}
|
103
|
+
|
104
|
+
void ExtensionUtil::AddFunctionOverload(DatabaseInstance &db, ScalarFunctionSet functions) { // NOLINT
|
105
|
+
D_ASSERT(!functions.name.empty());
|
106
|
+
auto &scalar_function = ExtensionUtil::GetFunction(db, functions.name);
|
107
|
+
for (auto &function : functions.functions) {
|
108
|
+
function.name = functions.name;
|
109
|
+
scalar_function.functions.AddFunction(std::move(function));
|
110
|
+
}
|
111
|
+
}
|
112
|
+
|
113
|
+
void ExtensionUtil::AddFunctionOverload(DatabaseInstance &db, TableFunctionSet functions) { // NOLINT
|
114
|
+
auto &table_function = ExtensionUtil::GetTableFunction(db, functions.name);
|
115
|
+
for (auto &function : functions.functions) {
|
116
|
+
function.name = functions.name;
|
117
|
+
table_function.functions.AddFunction(std::move(function));
|
118
|
+
}
|
119
|
+
}
|
120
|
+
|
121
|
+
ScalarFunctionCatalogEntry &ExtensionUtil::GetFunction(DatabaseInstance &db, const string &name) {
|
122
|
+
D_ASSERT(!name.empty());
|
123
|
+
auto &system_catalog = Catalog::GetSystemCatalog(db);
|
124
|
+
auto data = CatalogTransaction::GetSystemTransaction(db);
|
125
|
+
auto &schema = system_catalog.GetSchema(data, DEFAULT_SCHEMA);
|
126
|
+
auto catalog_entry = schema.GetEntry(data, CatalogType::SCALAR_FUNCTION_ENTRY, name);
|
127
|
+
if (!catalog_entry) {
|
128
|
+
throw InvalidInputException("Function with name \"%s\" not found in ExtensionUtil::GetFunction", name);
|
129
|
+
}
|
130
|
+
return catalog_entry->Cast<ScalarFunctionCatalogEntry>();
|
131
|
+
}
|
132
|
+
|
133
|
+
TableFunctionCatalogEntry &ExtensionUtil::GetTableFunction(DatabaseInstance &db, const string &name) {
|
134
|
+
D_ASSERT(!name.empty());
|
135
|
+
auto &system_catalog = Catalog::GetSystemCatalog(db);
|
136
|
+
auto data = CatalogTransaction::GetSystemTransaction(db);
|
137
|
+
auto &schema = system_catalog.GetSchema(data, DEFAULT_SCHEMA);
|
138
|
+
auto catalog_entry = schema.GetEntry(data, CatalogType::TABLE_FUNCTION_ENTRY, name);
|
139
|
+
if (!catalog_entry) {
|
140
|
+
throw InvalidInputException("Function with name \"%s\" not found in ExtensionUtil::GetTableFunction", name);
|
141
|
+
}
|
142
|
+
return catalog_entry->Cast<TableFunctionCatalogEntry>();
|
143
|
+
}
|
144
|
+
|
89
145
|
void ExtensionUtil::RegisterType(DatabaseInstance &db, string type_name, LogicalType type) {
|
90
146
|
D_ASSERT(!type_name.empty());
|
91
147
|
CreateTypeInfo info(std::move(type_name), std::move(type));
|
@@ -491,7 +491,11 @@ BoundStatement Binder::BindReturning(vector<unique_ptr<ParsedExpression>> return
|
|
491
491
|
projection->AddChild(std::move(child_operator));
|
492
492
|
D_ASSERT(result.types.size() == result.names.size());
|
493
493
|
result.plan = std::move(projection);
|
494
|
-
|
494
|
+
// If an insert/delete/update statement returns data, there are sometimes issues with streaming results
|
495
|
+
// where the data modification doesn't take place until the streamed result is exhausted. Once a row is
|
496
|
+
// returned, it should be guaranteed that the row has been inserted.
|
497
|
+
// see https://github.com/duckdb/duckdb/issues/8310
|
498
|
+
properties.allow_stream_result = false;
|
495
499
|
properties.return_type = StatementReturnType::QUERY_RESULT;
|
496
500
|
return result;
|
497
501
|
}
|
@@ -54,6 +54,74 @@ unique_ptr<TableDataWriter> SingleFileCheckpointWriter::GetTableDataWriter(Table
|
|
54
54
|
return make_uniq<SingleFileTableDataWriter>(*this, table, *table_metadata_writer);
|
55
55
|
}
|
56
56
|
|
57
|
+
static catalog_entry_vector_t GetCatalogEntries(vector<reference<SchemaCatalogEntry>> &schemas) {
|
58
|
+
catalog_entry_vector_t entries;
|
59
|
+
for (auto &schema_p : schemas) {
|
60
|
+
auto &schema = schema_p.get();
|
61
|
+
entries.push_back(schema);
|
62
|
+
schema.Scan(CatalogType::TYPE_ENTRY, [&](CatalogEntry &entry) {
|
63
|
+
if (entry.internal) {
|
64
|
+
return;
|
65
|
+
}
|
66
|
+
entries.push_back(entry);
|
67
|
+
});
|
68
|
+
|
69
|
+
schema.Scan(CatalogType::SEQUENCE_ENTRY, [&](CatalogEntry &entry) {
|
70
|
+
if (entry.internal) {
|
71
|
+
return;
|
72
|
+
}
|
73
|
+
entries.push_back(entry);
|
74
|
+
});
|
75
|
+
|
76
|
+
catalog_entry_vector_t tables;
|
77
|
+
vector<reference<ViewCatalogEntry>> views;
|
78
|
+
schema.Scan(CatalogType::TABLE_ENTRY, [&](CatalogEntry &entry) {
|
79
|
+
if (entry.internal) {
|
80
|
+
return;
|
81
|
+
}
|
82
|
+
if (entry.type == CatalogType::TABLE_ENTRY) {
|
83
|
+
tables.push_back(entry.Cast<TableCatalogEntry>());
|
84
|
+
} else if (entry.type == CatalogType::VIEW_ENTRY) {
|
85
|
+
views.push_back(entry.Cast<ViewCatalogEntry>());
|
86
|
+
} else {
|
87
|
+
throw NotImplementedException("Catalog type for entries");
|
88
|
+
}
|
89
|
+
});
|
90
|
+
// Reorder tables because of foreign key constraint
|
91
|
+
ReorderTableEntries(tables);
|
92
|
+
for (auto &table : tables) {
|
93
|
+
entries.push_back(table.get());
|
94
|
+
}
|
95
|
+
for (auto &view : views) {
|
96
|
+
entries.push_back(view.get());
|
97
|
+
}
|
98
|
+
|
99
|
+
schema.Scan(CatalogType::SCALAR_FUNCTION_ENTRY, [&](CatalogEntry &entry) {
|
100
|
+
if (entry.internal) {
|
101
|
+
return;
|
102
|
+
}
|
103
|
+
if (entry.type == CatalogType::MACRO_ENTRY) {
|
104
|
+
entries.push_back(entry);
|
105
|
+
}
|
106
|
+
});
|
107
|
+
|
108
|
+
schema.Scan(CatalogType::TABLE_FUNCTION_ENTRY, [&](CatalogEntry &entry) {
|
109
|
+
if (entry.internal) {
|
110
|
+
return;
|
111
|
+
}
|
112
|
+
if (entry.type == CatalogType::TABLE_MACRO_ENTRY) {
|
113
|
+
entries.push_back(entry);
|
114
|
+
}
|
115
|
+
});
|
116
|
+
|
117
|
+
schema.Scan(CatalogType::INDEX_ENTRY, [&](CatalogEntry &entry) {
|
118
|
+
D_ASSERT(!entry.internal);
|
119
|
+
entries.push_back(entry);
|
120
|
+
});
|
121
|
+
}
|
122
|
+
return entries;
|
123
|
+
}
|
124
|
+
|
57
125
|
void SingleFileCheckpointWriter::CreateCheckpoint() {
|
58
126
|
auto &config = DBConfig::Get(db);
|
59
127
|
auto &storage_manager = db.GetStorageManager().Cast<SingleFileStorageManager>();
|
@@ -97,11 +165,12 @@ void SingleFileCheckpointWriter::CreateCheckpoint() {
|
|
97
165
|
]
|
98
166
|
}
|
99
167
|
*/
|
168
|
+
auto catalog_entries = GetCatalogEntries(schemas);
|
100
169
|
BinarySerializer serializer(*metadata_writer);
|
101
170
|
serializer.Begin();
|
102
|
-
serializer.WriteList(100, "
|
103
|
-
auto &
|
104
|
-
list.WriteObject([&](Serializer &obj) {
|
171
|
+
serializer.WriteList(100, "catalog_entries", catalog_entries.size(), [&](Serializer::List &list, idx_t i) {
|
172
|
+
auto &entry = catalog_entries[i];
|
173
|
+
list.WriteObject([&](Serializer &obj) { WriteEntry(entry.get(), obj); });
|
105
174
|
});
|
106
175
|
serializer.End();
|
107
176
|
|
@@ -141,8 +210,8 @@ void SingleFileCheckpointWriter::CreateCheckpoint() {
|
|
141
210
|
void CheckpointReader::LoadCheckpoint(ClientContext &context, MetadataReader &reader) {
|
142
211
|
BinaryDeserializer deserializer(reader);
|
143
212
|
deserializer.Begin();
|
144
|
-
deserializer.ReadList(100, "
|
145
|
-
return list.ReadObject([&](Deserializer &obj) {
|
213
|
+
deserializer.ReadList(100, "catalog_entries", [&](Deserializer::List &list, idx_t i) {
|
214
|
+
return list.ReadObject([&](Deserializer &obj) { ReadEntry(context, obj); });
|
146
215
|
});
|
147
216
|
deserializer.End();
|
148
217
|
}
|
@@ -169,112 +238,102 @@ void SingleFileCheckpointReader::LoadFromStorage() {
|
|
169
238
|
con.Commit();
|
170
239
|
}
|
171
240
|
|
241
|
+
void CheckpointWriter::WriteEntry(CatalogEntry &entry, Serializer &serializer) {
|
242
|
+
serializer.WriteProperty(99, "catalog_type", entry.type);
|
243
|
+
|
244
|
+
switch (entry.type) {
|
245
|
+
case CatalogType::SCHEMA_ENTRY: {
|
246
|
+
auto &schema = entry.Cast<SchemaCatalogEntry>();
|
247
|
+
WriteSchema(schema, serializer);
|
248
|
+
break;
|
249
|
+
}
|
250
|
+
case CatalogType::TYPE_ENTRY: {
|
251
|
+
auto &custom_type = entry.Cast<TypeCatalogEntry>();
|
252
|
+
WriteType(custom_type, serializer);
|
253
|
+
break;
|
254
|
+
}
|
255
|
+
case CatalogType::SEQUENCE_ENTRY: {
|
256
|
+
auto &seq = entry.Cast<SequenceCatalogEntry>();
|
257
|
+
WriteSequence(seq, serializer);
|
258
|
+
break;
|
259
|
+
}
|
260
|
+
case CatalogType::TABLE_ENTRY: {
|
261
|
+
auto &table = entry.Cast<TableCatalogEntry>();
|
262
|
+
WriteTable(table, serializer);
|
263
|
+
break;
|
264
|
+
}
|
265
|
+
case CatalogType::VIEW_ENTRY: {
|
266
|
+
auto &view = entry.Cast<ViewCatalogEntry>();
|
267
|
+
WriteView(view, serializer);
|
268
|
+
break;
|
269
|
+
}
|
270
|
+
case CatalogType::MACRO_ENTRY: {
|
271
|
+
auto ¯o = entry.Cast<ScalarMacroCatalogEntry>();
|
272
|
+
WriteMacro(macro, serializer);
|
273
|
+
break;
|
274
|
+
}
|
275
|
+
case CatalogType::TABLE_MACRO_ENTRY: {
|
276
|
+
auto ¯o = entry.Cast<TableMacroCatalogEntry>();
|
277
|
+
WriteTableMacro(macro, serializer);
|
278
|
+
break;
|
279
|
+
}
|
280
|
+
case CatalogType::INDEX_ENTRY: {
|
281
|
+
auto &index = entry.Cast<IndexCatalogEntry>();
|
282
|
+
WriteIndex(index, serializer);
|
283
|
+
break;
|
284
|
+
}
|
285
|
+
default:
|
286
|
+
throw InternalException("Unrecognized catalog type in CheckpointWriter::WriteEntry");
|
287
|
+
}
|
288
|
+
}
|
289
|
+
|
172
290
|
//===--------------------------------------------------------------------===//
|
173
291
|
// Schema
|
174
292
|
//===--------------------------------------------------------------------===//
|
175
293
|
void CheckpointWriter::WriteSchema(SchemaCatalogEntry &schema, Serializer &serializer) {
|
176
294
|
// write the schema data
|
177
295
|
serializer.WriteProperty(100, "schema", &schema);
|
296
|
+
}
|
178
297
|
|
179
|
-
|
180
|
-
|
181
|
-
schema.Scan(CatalogType::TYPE_ENTRY, [&](CatalogEntry &entry) {
|
182
|
-
if (entry.internal) {
|
183
|
-
return;
|
184
|
-
}
|
185
|
-
custom_types.push_back(entry.Cast<TypeCatalogEntry>());
|
186
|
-
});
|
187
|
-
|
188
|
-
serializer.WriteList(101, "custom_types", custom_types.size(), [&](Serializer::List &list, idx_t i) {
|
189
|
-
auto &entry = custom_types[i];
|
190
|
-
list.WriteObject([&](Serializer &obj) { WriteType(entry, obj); });
|
191
|
-
});
|
192
|
-
|
193
|
-
// Write the sequences
|
194
|
-
vector<reference<SequenceCatalogEntry>> sequences;
|
195
|
-
schema.Scan(CatalogType::SEQUENCE_ENTRY, [&](CatalogEntry &entry) {
|
196
|
-
if (entry.internal) {
|
197
|
-
return;
|
198
|
-
}
|
199
|
-
sequences.push_back(entry.Cast<SequenceCatalogEntry>());
|
200
|
-
});
|
201
|
-
|
202
|
-
serializer.WriteList(102, "sequences", sequences.size(), [&](Serializer::List &list, idx_t i) {
|
203
|
-
auto &entry = sequences[i];
|
204
|
-
list.WriteObject([&](Serializer &obj) { WriteSequence(entry, obj); });
|
205
|
-
});
|
206
|
-
|
207
|
-
// Read the tables and views
|
208
|
-
catalog_entry_vector_t tables;
|
209
|
-
vector<reference<ViewCatalogEntry>> views;
|
210
|
-
schema.Scan(CatalogType::TABLE_ENTRY, [&](CatalogEntry &entry) {
|
211
|
-
if (entry.internal) {
|
212
|
-
return;
|
213
|
-
}
|
214
|
-
if (entry.type == CatalogType::TABLE_ENTRY) {
|
215
|
-
tables.push_back(entry.Cast<TableCatalogEntry>());
|
216
|
-
} else if (entry.type == CatalogType::VIEW_ENTRY) {
|
217
|
-
views.push_back(entry.Cast<ViewCatalogEntry>());
|
218
|
-
} else {
|
219
|
-
throw NotImplementedException("Catalog type for entries");
|
220
|
-
}
|
221
|
-
});
|
222
|
-
// Reorder tables because of foreign key constraint
|
223
|
-
ReorderTableEntries(tables);
|
224
|
-
// Tables
|
225
|
-
serializer.WriteList(103, "tables", tables.size(), [&](Serializer::List &list, idx_t i) {
|
226
|
-
auto &entry = tables[i];
|
227
|
-
auto &table = entry.get().Cast<TableCatalogEntry>();
|
228
|
-
list.WriteObject([&](Serializer &obj) { WriteTable(table, obj); });
|
229
|
-
});
|
230
|
-
|
231
|
-
// Views
|
232
|
-
serializer.WriteList(104, "views", views.size(), [&](Serializer::List &list, idx_t i) {
|
233
|
-
auto &entry = views[i];
|
234
|
-
list.WriteObject([&](Serializer &obj) { WriteView(entry.get(), obj); });
|
235
|
-
});
|
236
|
-
|
237
|
-
// Scalar macros
|
238
|
-
vector<reference<ScalarMacroCatalogEntry>> macros;
|
239
|
-
schema.Scan(CatalogType::SCALAR_FUNCTION_ENTRY, [&](CatalogEntry &entry) {
|
240
|
-
if (entry.internal) {
|
241
|
-
return;
|
242
|
-
}
|
243
|
-
if (entry.type == CatalogType::MACRO_ENTRY) {
|
244
|
-
macros.push_back(entry.Cast<ScalarMacroCatalogEntry>());
|
245
|
-
}
|
246
|
-
});
|
247
|
-
serializer.WriteList(105, "macros", macros.size(), [&](Serializer::List &list, idx_t i) {
|
248
|
-
auto &entry = macros[i];
|
249
|
-
list.WriteObject([&](Serializer &obj) { WriteMacro(entry.get(), obj); });
|
250
|
-
});
|
251
|
-
|
252
|
-
// Table macros
|
253
|
-
vector<reference<TableMacroCatalogEntry>> table_macros;
|
254
|
-
schema.Scan(CatalogType::TABLE_FUNCTION_ENTRY, [&](CatalogEntry &entry) {
|
255
|
-
if (entry.internal) {
|
256
|
-
return;
|
257
|
-
}
|
258
|
-
if (entry.type == CatalogType::TABLE_MACRO_ENTRY) {
|
259
|
-
table_macros.push_back(entry.Cast<TableMacroCatalogEntry>());
|
260
|
-
}
|
261
|
-
});
|
262
|
-
serializer.WriteList(106, "table_macros", table_macros.size(), [&](Serializer::List &list, idx_t i) {
|
263
|
-
auto &entry = table_macros[i];
|
264
|
-
list.WriteObject([&](Serializer &obj) { WriteTableMacro(entry.get(), obj); });
|
265
|
-
});
|
266
|
-
|
267
|
-
// Indexes
|
268
|
-
vector<reference<IndexCatalogEntry>> indexes;
|
269
|
-
schema.Scan(CatalogType::INDEX_ENTRY, [&](CatalogEntry &entry) {
|
270
|
-
D_ASSERT(!entry.internal);
|
271
|
-
indexes.push_back(entry.Cast<IndexCatalogEntry>());
|
272
|
-
});
|
298
|
+
void CheckpointReader::ReadEntry(ClientContext &context, Deserializer &deserializer) {
|
299
|
+
auto type = deserializer.ReadProperty<CatalogType>(99, "type");
|
273
300
|
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
301
|
+
switch (type) {
|
302
|
+
case CatalogType::SCHEMA_ENTRY: {
|
303
|
+
ReadSchema(context, deserializer);
|
304
|
+
break;
|
305
|
+
}
|
306
|
+
case CatalogType::TYPE_ENTRY: {
|
307
|
+
ReadType(context, deserializer);
|
308
|
+
break;
|
309
|
+
}
|
310
|
+
case CatalogType::SEQUENCE_ENTRY: {
|
311
|
+
ReadSequence(context, deserializer);
|
312
|
+
break;
|
313
|
+
}
|
314
|
+
case CatalogType::TABLE_ENTRY: {
|
315
|
+
ReadTable(context, deserializer);
|
316
|
+
break;
|
317
|
+
}
|
318
|
+
case CatalogType::VIEW_ENTRY: {
|
319
|
+
ReadView(context, deserializer);
|
320
|
+
break;
|
321
|
+
}
|
322
|
+
case CatalogType::MACRO_ENTRY: {
|
323
|
+
ReadMacro(context, deserializer);
|
324
|
+
break;
|
325
|
+
}
|
326
|
+
case CatalogType::TABLE_MACRO_ENTRY: {
|
327
|
+
ReadTableMacro(context, deserializer);
|
328
|
+
break;
|
329
|
+
}
|
330
|
+
case CatalogType::INDEX_ENTRY: {
|
331
|
+
ReadIndex(context, deserializer);
|
332
|
+
break;
|
333
|
+
}
|
334
|
+
default:
|
335
|
+
throw InternalException("Unrecognized catalog type in CheckpointWriter::WriteEntry");
|
336
|
+
}
|
278
337
|
}
|
279
338
|
|
280
339
|
void CheckpointReader::ReadSchema(ClientContext &context, Deserializer &deserializer) {
|
@@ -285,41 +344,6 @@ void CheckpointReader::ReadSchema(ClientContext &context, Deserializer &deserial
|
|
285
344
|
// we set create conflict to IGNORE_ON_CONFLICT, so that we can ignore a failure when recreating the main schema
|
286
345
|
schema_info.on_conflict = OnCreateConflict::IGNORE_ON_CONFLICT;
|
287
346
|
catalog.CreateSchema(context, schema_info);
|
288
|
-
|
289
|
-
// Read the custom types
|
290
|
-
deserializer.ReadList(101, "custom_types", [&](Deserializer::List &list, idx_t i) {
|
291
|
-
return list.ReadObject([&](Deserializer &obj) { ReadType(context, obj); });
|
292
|
-
});
|
293
|
-
|
294
|
-
// Read the sequences
|
295
|
-
deserializer.ReadList(102, "sequences", [&](Deserializer::List &list, idx_t i) {
|
296
|
-
return list.ReadObject([&](Deserializer &obj) { ReadSequence(context, obj); });
|
297
|
-
});
|
298
|
-
|
299
|
-
// Read the tables
|
300
|
-
deserializer.ReadList(103, "tables", [&](Deserializer::List &list, idx_t i) {
|
301
|
-
return list.ReadObject([&](Deserializer &obj) { ReadTable(context, obj); });
|
302
|
-
});
|
303
|
-
|
304
|
-
// Read the views
|
305
|
-
deserializer.ReadList(104, "views", [&](Deserializer::List &list, idx_t i) {
|
306
|
-
return list.ReadObject([&](Deserializer &obj) { ReadView(context, obj); });
|
307
|
-
});
|
308
|
-
|
309
|
-
// Read the macros
|
310
|
-
deserializer.ReadList(105, "macros", [&](Deserializer::List &list, idx_t i) {
|
311
|
-
return list.ReadObject([&](Deserializer &obj) { ReadMacro(context, obj); });
|
312
|
-
});
|
313
|
-
|
314
|
-
// Read the table macros
|
315
|
-
deserializer.ReadList(106, "table_macros", [&](Deserializer::List &list, idx_t i) {
|
316
|
-
return list.ReadObject([&](Deserializer &obj) { ReadTableMacro(context, obj); });
|
317
|
-
});
|
318
|
-
|
319
|
-
// Read the indexes
|
320
|
-
deserializer.ReadList(107, "indexes", [&](Deserializer::List &list, idx_t i) {
|
321
|
-
return list.ReadObject([&](Deserializer &obj) { ReadIndex(context, obj); });
|
322
|
-
});
|
323
347
|
}
|
324
348
|
|
325
349
|
//===--------------------------------------------------------------------===//
|