duckdb 0.8.2-dev5154.0 → 0.8.2-dev5216.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "duckdb",
3
3
  "main": "./lib/duckdb.js",
4
4
  "types": "./lib/duckdb.d.ts",
5
- "version": "0.8.2-dev5154.0",
5
+ "version": "0.8.2-dev5216.0",
6
6
  "description": "DuckDB node.js API",
7
7
  "gypfile": true,
8
8
  "dependencies": {
@@ -23,7 +23,7 @@ bool JSONFileHandle::IsOpen() const {
23
23
  }
24
24
 
25
25
  void JSONFileHandle::Close() {
26
- if (IsOpen() && plain_file_source) {
26
+ if (IsOpen() && file_handle->OnDiskFile()) {
27
27
  file_handle->Close();
28
28
  file_handle = nullptr;
29
29
  }
@@ -34,7 +34,7 @@ void JSONFileHandle::Reset() {
34
34
  read_position = 0;
35
35
  requested_reads = 0;
36
36
  actual_reads = 0;
37
- if (IsOpen() && plain_file_source) {
37
+ if (IsOpen() && CanSeek()) {
38
38
  file_handle->Reset();
39
39
  }
40
40
  }
@@ -432,7 +432,8 @@ void FindMinimalQualification(ClientContext &context, const string &catalog_name
432
432
  qualify_schema = true;
433
433
  }
434
434
 
435
- bool Catalog::TryAutoLoad(ClientContext &context, const string &extension_name) noexcept {
435
+ bool Catalog::TryAutoLoad(ClientContext &context, const string &original_name) noexcept {
436
+ string extension_name = ExtensionHelper::ApplyExtensionAlias(original_name);
436
437
  if (context.db->ExtensionIsLoaded(extension_name)) {
437
438
  return true;
438
439
  }
@@ -333,9 +333,23 @@ normal : {
333
333
  for (; position_buffer < end_buffer; position_buffer++) {
334
334
  auto c = (*buffer)[position_buffer];
335
335
  if (c == options.dialect_options.state_machine_options.delimiter) {
336
+ // Check if previous character is a quote, if yes, this means we are in a non-initialized quoted value
337
+ // This only matters for when trying to figure out where csv lines start
338
+ if (position_buffer > 0 && try_add_line) {
339
+ if ((*buffer)[position_buffer - 1] == options.dialect_options.state_machine_options.quote) {
340
+ return false;
341
+ }
342
+ }
336
343
  // delimiter: end the value and add it to the chunk
337
344
  goto add_value;
338
345
  } else if (StringUtil::CharacterIsNewline(c)) {
346
+ // Check if previous character is a quote, if yes, this means we are in a non-initialized quoted value
347
+ // This only matters for when trying to figure out where csv lines start
348
+ if (position_buffer > 0 && try_add_line) {
349
+ if ((*buffer)[position_buffer - 1] == options.dialect_options.state_machine_options.quote) {
350
+ return false;
351
+ }
352
+ }
339
353
  // newline: add row
340
354
  if (column > 0 || try_add_line || parse_chunk.data.size() == 1) {
341
355
  goto add_row;
@@ -290,37 +290,37 @@ void CSVSniffer::DetectTypes() {
290
290
  vector<TupleSniffing> tuples(STANDARD_VECTOR_SIZE);
291
291
  candidate->csv_buffer_iterator.Process<SniffValue>(*candidate, tuples);
292
292
  // Potentially Skip empty rows (I find this dirty, but it is what the original code does)
293
- idx_t true_start = 0;
293
+ // The true line where parsing starts in reference to the csv file
294
+ idx_t true_line_start = 0;
294
295
  idx_t true_pos = 0;
295
- idx_t values_start = 0;
296
- while (true_start < tuples.size()) {
297
- if (tuples[true_start].values.empty() ||
298
- (tuples[true_start].values.size() == 1 && tuples[true_start].values[0].IsNull())) {
299
- true_start = tuples[true_start].line_number;
300
- if (true_start < tuples.size()) {
301
- true_pos = tuples[true_start].position;
302
- }
303
- values_start++;
296
+ // The start point of the tuples
297
+ idx_t tuple_true_start = 0;
298
+ while (tuple_true_start < tuples.size()) {
299
+ if (tuples[tuple_true_start].values.empty() ||
300
+ (tuples[tuple_true_start].values.size() == 1 && tuples[tuple_true_start].values[0].IsNull())) {
301
+ true_line_start = tuples[tuple_true_start].line_number;
302
+ true_pos = tuples[tuple_true_start].position;
303
+ tuple_true_start++;
304
304
  } else {
305
305
  break;
306
306
  }
307
307
  }
308
308
 
309
309
  // Potentially Skip Notes (I also find this dirty, but it is what the original code does)
310
- while (true_start < tuples.size()) {
311
- if (tuples[true_start].values.size() < max_columns_found && !options.null_padding) {
312
-
313
- true_start = tuples[true_start].line_number;
314
- if (true_start < tuples.size()) {
315
- true_pos = tuples[true_start].position;
316
- }
317
- values_start++;
310
+ while (tuple_true_start < tuples.size()) {
311
+ if (tuples[tuple_true_start].values.size() < max_columns_found && !options.null_padding) {
312
+ true_line_start = tuples[tuple_true_start].line_number;
313
+ true_pos = tuples[tuple_true_start].position;
314
+ tuple_true_start++;
318
315
  } else {
319
316
  break;
320
317
  }
321
318
  }
322
- if (values_start > 0) {
323
- tuples.erase(tuples.begin(), tuples.begin() + values_start);
319
+ if (tuple_true_start < tuples.size()) {
320
+ true_pos = tuples[tuple_true_start].position;
321
+ }
322
+ if (tuple_true_start > 0) {
323
+ tuples.erase(tuples.begin(), tuples.begin() + tuple_true_start);
324
324
  }
325
325
 
326
326
  idx_t row_idx = 0;
@@ -390,9 +390,9 @@ void CSVSniffer::DetectTypes() {
390
390
  // it's good if the dialect creates more non-varchar columns, but only if we sacrifice < 30% of best_num_cols.
391
391
  if (varchar_cols < min_varchar_cols && info_sql_types_candidates.size() > (max_columns_found * 0.7)) {
392
392
  // we have a new best_options candidate
393
- if (true_start > 0) {
393
+ if (true_line_start > 0) {
394
394
  // Add empty rows to skip_rows
395
- candidate->dialect_options.skip_rows += true_start;
395
+ candidate->dialect_options.skip_rows += true_line_start;
396
396
  }
397
397
  best_candidate = std::move(candidate);
398
398
  min_varchar_cols = varchar_cols;
@@ -178,7 +178,8 @@ public:
178
178
  current_file_path = files_path_p[0];
179
179
  CSVFileHandle *file_handle_ptr;
180
180
 
181
- if (!buffer_manager || (options.skip_rows_set && options.dialect_options.skip_rows > 0)) {
181
+ if (!buffer_manager || (options.skip_rows_set && options.dialect_options.skip_rows > 0) ||
182
+ buffer_manager->file_handle->GetFilePath() != current_file_path) {
182
183
  // If our buffers are too small, and we skip too many rows there is a chance things will go over-buffer
183
184
  // for now don't reuse the buffer manager
184
185
  buffer_manager.reset();
@@ -210,6 +211,7 @@ public:
210
211
  line_info.lines_read[0][0]++;
211
212
  }
212
213
  first_position = options.dialect_options.true_start;
214
+ next_byte = options.dialect_options.true_start;
213
215
  }
214
216
  explicit ParallelCSVGlobalState(idx_t system_threads_p)
215
217
  : system_threads(system_threads_p), line_info(main_mutex, batch_to_tuple_end, tuple_start, tuple_end) {
@@ -1,8 +1,8 @@
1
1
  #ifndef DUCKDB_VERSION
2
- #define DUCKDB_VERSION "0.8.2-dev5154"
2
+ #define DUCKDB_VERSION "0.8.2-dev5216"
3
3
  #endif
4
4
  #ifndef DUCKDB_SOURCE_ID
5
- #define DUCKDB_SOURCE_ID "78bea4f92a"
5
+ #define DUCKDB_SOURCE_ID "7ffdb9fd0e"
6
6
  #endif
7
7
  #include "duckdb/function/table/system_functions.hpp"
8
8
  #include "duckdb/main/database.hpp"
@@ -117,8 +117,10 @@ static constexpr ExtensionEntry EXTENSION_FUNCTIONS[] = {
117
117
  {"st_dwithin", "spatial"},
118
118
  {"st_dwithin_spheroid", "spatial"},
119
119
  {"st_envelope", "spatial"},
120
+ {"st_envelope_agg", "spatial"},
120
121
  {"st_equals", "spatial"},
121
122
  {"st_extent", "spatial"},
123
+ {"st_exteriorring", "spatial"},
122
124
  {"st_flipcoordinates", "spatial"},
123
125
  {"st_geometrytype", "spatial"},
124
126
  {"st_geomfromgeojson", "spatial"},
@@ -129,6 +131,7 @@ static constexpr ExtensionEntry EXTENSION_FUNCTIONS[] = {
129
131
  {"st_intersection", "spatial"},
130
132
  {"st_intersection_agg", "spatial"},
131
133
  {"st_intersects", "spatial"},
134
+ {"st_intersects_extent", "spatial"},
132
135
  {"st_isclosed", "spatial"},
133
136
  {"st_isempty", "spatial"},
134
137
  {"st_isring", "spatial"},
@@ -139,8 +142,12 @@ static constexpr ExtensionEntry EXTENSION_FUNCTIONS[] = {
139
142
  {"st_linestring2dfromwkb", "spatial"},
140
143
  {"st_list_proj_crs", "spatial"},
141
144
  {"st_makeline", "spatial"},
145
+ {"st_ngeometries", "spatial"},
146
+ {"st_ninteriorrings", "spatial"},
142
147
  {"st_normalize", "spatial"},
143
148
  {"st_npoints", "spatial"},
149
+ {"st_numgeometries", "spatial"},
150
+ {"st_numinteriorrings", "spatial"},
144
151
  {"st_numpoints", "spatial"},
145
152
  {"st_overlaps", "spatial"},
146
153
  {"st_perimeter", "spatial"},
@@ -150,6 +157,7 @@ static constexpr ExtensionEntry EXTENSION_FUNCTIONS[] = {
150
157
  {"st_point2dfromwkb", "spatial"},
151
158
  {"st_point3d", "spatial"},
152
159
  {"st_point4d", "spatial"},
160
+ {"st_pointn", "spatial"},
153
161
  {"st_pointonsurface", "spatial"},
154
162
  {"st_polygon2dfromwkb", "spatial"},
155
163
  {"st_read", "spatial"},
@@ -508,7 +508,7 @@ struct SchemaSetting {
508
508
  struct SearchPathSetting {
509
509
  static constexpr const char *Name = "search_path";
510
510
  static constexpr const char *Description =
511
- "Sets the default search search path as a comma-separated list of values";
511
+ "Sets the default catalog search path as a comma-separated list of values";
512
512
  static constexpr const LogicalTypeId InputType = LogicalTypeId::VARCHAR;
513
513
  static void SetLocal(ClientContext &context, const Value &parameter);
514
514
  static void ResetLocal(ClientContext &context);
@@ -38,6 +38,7 @@ public:
38
38
  virtual unique_ptr<TableDataWriter> GetTableDataWriter(TableCatalogEntry &table) = 0;
39
39
 
40
40
  protected:
41
+ virtual void WriteEntry(CatalogEntry &entry, Serializer &serializer);
41
42
  virtual void WriteSchema(SchemaCatalogEntry &schema, Serializer &serializer);
42
43
  virtual void WriteTable(TableCatalogEntry &table, Serializer &serializer);
43
44
  virtual void WriteView(ViewCatalogEntry &table, Serializer &serializer);
@@ -60,6 +61,7 @@ protected:
60
61
 
61
62
  protected:
62
63
  virtual void LoadCheckpoint(ClientContext &context, MetadataReader &reader);
64
+ virtual void ReadEntry(ClientContext &context, Deserializer &deserializer);
63
65
  virtual void ReadSchema(ClientContext &context, Deserializer &deserializer);
64
66
  virtual void ReadTable(ClientContext &context, Deserializer &deserializer);
65
67
  virtual void ReadView(ClientContext &context, Deserializer &deserializer);
@@ -491,7 +491,11 @@ BoundStatement Binder::BindReturning(vector<unique_ptr<ParsedExpression>> return
491
491
  projection->AddChild(std::move(child_operator));
492
492
  D_ASSERT(result.types.size() == result.names.size());
493
493
  result.plan = std::move(projection);
494
- properties.allow_stream_result = true;
494
+ // If an insert/delete/update statement returns data, there are sometimes issues with streaming results
495
+ // where the data modification doesn't take place until the streamed result is exhausted. Once a row is
496
+ // returned, it should be guaranteed that the row has been inserted.
497
+ // see https://github.com/duckdb/duckdb/issues/8310
498
+ properties.allow_stream_result = false;
495
499
  properties.return_type = StatementReturnType::QUERY_RESULT;
496
500
  return result;
497
501
  }
@@ -54,6 +54,74 @@ unique_ptr<TableDataWriter> SingleFileCheckpointWriter::GetTableDataWriter(Table
54
54
  return make_uniq<SingleFileTableDataWriter>(*this, table, *table_metadata_writer);
55
55
  }
56
56
 
57
+ static catalog_entry_vector_t GetCatalogEntries(vector<reference<SchemaCatalogEntry>> &schemas) {
58
+ catalog_entry_vector_t entries;
59
+ for (auto &schema_p : schemas) {
60
+ auto &schema = schema_p.get();
61
+ entries.push_back(schema);
62
+ schema.Scan(CatalogType::TYPE_ENTRY, [&](CatalogEntry &entry) {
63
+ if (entry.internal) {
64
+ return;
65
+ }
66
+ entries.push_back(entry);
67
+ });
68
+
69
+ schema.Scan(CatalogType::SEQUENCE_ENTRY, [&](CatalogEntry &entry) {
70
+ if (entry.internal) {
71
+ return;
72
+ }
73
+ entries.push_back(entry);
74
+ });
75
+
76
+ catalog_entry_vector_t tables;
77
+ vector<reference<ViewCatalogEntry>> views;
78
+ schema.Scan(CatalogType::TABLE_ENTRY, [&](CatalogEntry &entry) {
79
+ if (entry.internal) {
80
+ return;
81
+ }
82
+ if (entry.type == CatalogType::TABLE_ENTRY) {
83
+ tables.push_back(entry.Cast<TableCatalogEntry>());
84
+ } else if (entry.type == CatalogType::VIEW_ENTRY) {
85
+ views.push_back(entry.Cast<ViewCatalogEntry>());
86
+ } else {
87
+ throw NotImplementedException("Catalog type for entries");
88
+ }
89
+ });
90
+ // Reorder tables because of foreign key constraint
91
+ ReorderTableEntries(tables);
92
+ for (auto &table : tables) {
93
+ entries.push_back(table.get());
94
+ }
95
+ for (auto &view : views) {
96
+ entries.push_back(view.get());
97
+ }
98
+
99
+ schema.Scan(CatalogType::SCALAR_FUNCTION_ENTRY, [&](CatalogEntry &entry) {
100
+ if (entry.internal) {
101
+ return;
102
+ }
103
+ if (entry.type == CatalogType::MACRO_ENTRY) {
104
+ entries.push_back(entry);
105
+ }
106
+ });
107
+
108
+ schema.Scan(CatalogType::TABLE_FUNCTION_ENTRY, [&](CatalogEntry &entry) {
109
+ if (entry.internal) {
110
+ return;
111
+ }
112
+ if (entry.type == CatalogType::TABLE_MACRO_ENTRY) {
113
+ entries.push_back(entry);
114
+ }
115
+ });
116
+
117
+ schema.Scan(CatalogType::INDEX_ENTRY, [&](CatalogEntry &entry) {
118
+ D_ASSERT(!entry.internal);
119
+ entries.push_back(entry);
120
+ });
121
+ }
122
+ return entries;
123
+ }
124
+
57
125
  void SingleFileCheckpointWriter::CreateCheckpoint() {
58
126
  auto &config = DBConfig::Get(db);
59
127
  auto &storage_manager = db.GetStorageManager().Cast<SingleFileStorageManager>();
@@ -97,11 +165,12 @@ void SingleFileCheckpointWriter::CreateCheckpoint() {
97
165
  ]
98
166
  }
99
167
  */
168
+ auto catalog_entries = GetCatalogEntries(schemas);
100
169
  BinarySerializer serializer(*metadata_writer);
101
170
  serializer.Begin();
102
- serializer.WriteList(100, "schemas", schemas.size(), [&](Serializer::List &list, idx_t i) {
103
- auto &schema = schemas[i];
104
- list.WriteObject([&](Serializer &obj) { WriteSchema(schema.get(), obj); });
171
+ serializer.WriteList(100, "catalog_entries", catalog_entries.size(), [&](Serializer::List &list, idx_t i) {
172
+ auto &entry = catalog_entries[i];
173
+ list.WriteObject([&](Serializer &obj) { WriteEntry(entry.get(), obj); });
105
174
  });
106
175
  serializer.End();
107
176
 
@@ -141,8 +210,8 @@ void SingleFileCheckpointWriter::CreateCheckpoint() {
141
210
  void CheckpointReader::LoadCheckpoint(ClientContext &context, MetadataReader &reader) {
142
211
  BinaryDeserializer deserializer(reader);
143
212
  deserializer.Begin();
144
- deserializer.ReadList(100, "schemas", [&](Deserializer::List &list, idx_t i) {
145
- return list.ReadObject([&](Deserializer &obj) { ReadSchema(context, obj); });
213
+ deserializer.ReadList(100, "catalog_entries", [&](Deserializer::List &list, idx_t i) {
214
+ return list.ReadObject([&](Deserializer &obj) { ReadEntry(context, obj); });
146
215
  });
147
216
  deserializer.End();
148
217
  }
@@ -169,112 +238,102 @@ void SingleFileCheckpointReader::LoadFromStorage() {
169
238
  con.Commit();
170
239
  }
171
240
 
241
+ void CheckpointWriter::WriteEntry(CatalogEntry &entry, Serializer &serializer) {
242
+ serializer.WriteProperty(99, "catalog_type", entry.type);
243
+
244
+ switch (entry.type) {
245
+ case CatalogType::SCHEMA_ENTRY: {
246
+ auto &schema = entry.Cast<SchemaCatalogEntry>();
247
+ WriteSchema(schema, serializer);
248
+ break;
249
+ }
250
+ case CatalogType::TYPE_ENTRY: {
251
+ auto &custom_type = entry.Cast<TypeCatalogEntry>();
252
+ WriteType(custom_type, serializer);
253
+ break;
254
+ }
255
+ case CatalogType::SEQUENCE_ENTRY: {
256
+ auto &seq = entry.Cast<SequenceCatalogEntry>();
257
+ WriteSequence(seq, serializer);
258
+ break;
259
+ }
260
+ case CatalogType::TABLE_ENTRY: {
261
+ auto &table = entry.Cast<TableCatalogEntry>();
262
+ WriteTable(table, serializer);
263
+ break;
264
+ }
265
+ case CatalogType::VIEW_ENTRY: {
266
+ auto &view = entry.Cast<ViewCatalogEntry>();
267
+ WriteView(view, serializer);
268
+ break;
269
+ }
270
+ case CatalogType::MACRO_ENTRY: {
271
+ auto &macro = entry.Cast<ScalarMacroCatalogEntry>();
272
+ WriteMacro(macro, serializer);
273
+ break;
274
+ }
275
+ case CatalogType::TABLE_MACRO_ENTRY: {
276
+ auto &macro = entry.Cast<TableMacroCatalogEntry>();
277
+ WriteTableMacro(macro, serializer);
278
+ break;
279
+ }
280
+ case CatalogType::INDEX_ENTRY: {
281
+ auto &index = entry.Cast<IndexCatalogEntry>();
282
+ WriteIndex(index, serializer);
283
+ break;
284
+ }
285
+ default:
286
+ throw InternalException("Unrecognized catalog type in CheckpointWriter::WriteEntry");
287
+ }
288
+ }
289
+
172
290
  //===--------------------------------------------------------------------===//
173
291
  // Schema
174
292
  //===--------------------------------------------------------------------===//
175
293
  void CheckpointWriter::WriteSchema(SchemaCatalogEntry &schema, Serializer &serializer) {
176
294
  // write the schema data
177
295
  serializer.WriteProperty(100, "schema", &schema);
296
+ }
178
297
 
179
- // Write the custom types
180
- vector<reference<TypeCatalogEntry>> custom_types;
181
- schema.Scan(CatalogType::TYPE_ENTRY, [&](CatalogEntry &entry) {
182
- if (entry.internal) {
183
- return;
184
- }
185
- custom_types.push_back(entry.Cast<TypeCatalogEntry>());
186
- });
187
-
188
- serializer.WriteList(101, "custom_types", custom_types.size(), [&](Serializer::List &list, idx_t i) {
189
- auto &entry = custom_types[i];
190
- list.WriteObject([&](Serializer &obj) { WriteType(entry, obj); });
191
- });
192
-
193
- // Write the sequences
194
- vector<reference<SequenceCatalogEntry>> sequences;
195
- schema.Scan(CatalogType::SEQUENCE_ENTRY, [&](CatalogEntry &entry) {
196
- if (entry.internal) {
197
- return;
198
- }
199
- sequences.push_back(entry.Cast<SequenceCatalogEntry>());
200
- });
201
-
202
- serializer.WriteList(102, "sequences", sequences.size(), [&](Serializer::List &list, idx_t i) {
203
- auto &entry = sequences[i];
204
- list.WriteObject([&](Serializer &obj) { WriteSequence(entry, obj); });
205
- });
206
-
207
- // Read the tables and views
208
- catalog_entry_vector_t tables;
209
- vector<reference<ViewCatalogEntry>> views;
210
- schema.Scan(CatalogType::TABLE_ENTRY, [&](CatalogEntry &entry) {
211
- if (entry.internal) {
212
- return;
213
- }
214
- if (entry.type == CatalogType::TABLE_ENTRY) {
215
- tables.push_back(entry.Cast<TableCatalogEntry>());
216
- } else if (entry.type == CatalogType::VIEW_ENTRY) {
217
- views.push_back(entry.Cast<ViewCatalogEntry>());
218
- } else {
219
- throw NotImplementedException("Catalog type for entries");
220
- }
221
- });
222
- // Reorder tables because of foreign key constraint
223
- ReorderTableEntries(tables);
224
- // Tables
225
- serializer.WriteList(103, "tables", tables.size(), [&](Serializer::List &list, idx_t i) {
226
- auto &entry = tables[i];
227
- auto &table = entry.get().Cast<TableCatalogEntry>();
228
- list.WriteObject([&](Serializer &obj) { WriteTable(table, obj); });
229
- });
230
-
231
- // Views
232
- serializer.WriteList(104, "views", views.size(), [&](Serializer::List &list, idx_t i) {
233
- auto &entry = views[i];
234
- list.WriteObject([&](Serializer &obj) { WriteView(entry.get(), obj); });
235
- });
236
-
237
- // Scalar macros
238
- vector<reference<ScalarMacroCatalogEntry>> macros;
239
- schema.Scan(CatalogType::SCALAR_FUNCTION_ENTRY, [&](CatalogEntry &entry) {
240
- if (entry.internal) {
241
- return;
242
- }
243
- if (entry.type == CatalogType::MACRO_ENTRY) {
244
- macros.push_back(entry.Cast<ScalarMacroCatalogEntry>());
245
- }
246
- });
247
- serializer.WriteList(105, "macros", macros.size(), [&](Serializer::List &list, idx_t i) {
248
- auto &entry = macros[i];
249
- list.WriteObject([&](Serializer &obj) { WriteMacro(entry.get(), obj); });
250
- });
251
-
252
- // Table macros
253
- vector<reference<TableMacroCatalogEntry>> table_macros;
254
- schema.Scan(CatalogType::TABLE_FUNCTION_ENTRY, [&](CatalogEntry &entry) {
255
- if (entry.internal) {
256
- return;
257
- }
258
- if (entry.type == CatalogType::TABLE_MACRO_ENTRY) {
259
- table_macros.push_back(entry.Cast<TableMacroCatalogEntry>());
260
- }
261
- });
262
- serializer.WriteList(106, "table_macros", table_macros.size(), [&](Serializer::List &list, idx_t i) {
263
- auto &entry = table_macros[i];
264
- list.WriteObject([&](Serializer &obj) { WriteTableMacro(entry.get(), obj); });
265
- });
266
-
267
- // Indexes
268
- vector<reference<IndexCatalogEntry>> indexes;
269
- schema.Scan(CatalogType::INDEX_ENTRY, [&](CatalogEntry &entry) {
270
- D_ASSERT(!entry.internal);
271
- indexes.push_back(entry.Cast<IndexCatalogEntry>());
272
- });
298
+ void CheckpointReader::ReadEntry(ClientContext &context, Deserializer &deserializer) {
299
+ auto type = deserializer.ReadProperty<CatalogType>(99, "type");
273
300
 
274
- serializer.WriteList(107, "indexes", indexes.size(), [&](Serializer::List &list, idx_t i) {
275
- auto &entry = indexes[i];
276
- list.WriteObject([&](Serializer &obj) { WriteIndex(entry.get(), obj); });
277
- });
301
+ switch (type) {
302
+ case CatalogType::SCHEMA_ENTRY: {
303
+ ReadSchema(context, deserializer);
304
+ break;
305
+ }
306
+ case CatalogType::TYPE_ENTRY: {
307
+ ReadType(context, deserializer);
308
+ break;
309
+ }
310
+ case CatalogType::SEQUENCE_ENTRY: {
311
+ ReadSequence(context, deserializer);
312
+ break;
313
+ }
314
+ case CatalogType::TABLE_ENTRY: {
315
+ ReadTable(context, deserializer);
316
+ break;
317
+ }
318
+ case CatalogType::VIEW_ENTRY: {
319
+ ReadView(context, deserializer);
320
+ break;
321
+ }
322
+ case CatalogType::MACRO_ENTRY: {
323
+ ReadMacro(context, deserializer);
324
+ break;
325
+ }
326
+ case CatalogType::TABLE_MACRO_ENTRY: {
327
+ ReadTableMacro(context, deserializer);
328
+ break;
329
+ }
330
+ case CatalogType::INDEX_ENTRY: {
331
+ ReadIndex(context, deserializer);
332
+ break;
333
+ }
334
+ default:
335
+ throw InternalException("Unrecognized catalog type in CheckpointWriter::WriteEntry");
336
+ }
278
337
  }
279
338
 
280
339
  void CheckpointReader::ReadSchema(ClientContext &context, Deserializer &deserializer) {
@@ -285,41 +344,6 @@ void CheckpointReader::ReadSchema(ClientContext &context, Deserializer &deserial
285
344
  // we set create conflict to IGNORE_ON_CONFLICT, so that we can ignore a failure when recreating the main schema
286
345
  schema_info.on_conflict = OnCreateConflict::IGNORE_ON_CONFLICT;
287
346
  catalog.CreateSchema(context, schema_info);
288
-
289
- // Read the custom types
290
- deserializer.ReadList(101, "custom_types", [&](Deserializer::List &list, idx_t i) {
291
- return list.ReadObject([&](Deserializer &obj) { ReadType(context, obj); });
292
- });
293
-
294
- // Read the sequences
295
- deserializer.ReadList(102, "sequences", [&](Deserializer::List &list, idx_t i) {
296
- return list.ReadObject([&](Deserializer &obj) { ReadSequence(context, obj); });
297
- });
298
-
299
- // Read the tables
300
- deserializer.ReadList(103, "tables", [&](Deserializer::List &list, idx_t i) {
301
- return list.ReadObject([&](Deserializer &obj) { ReadTable(context, obj); });
302
- });
303
-
304
- // Read the views
305
- deserializer.ReadList(104, "views", [&](Deserializer::List &list, idx_t i) {
306
- return list.ReadObject([&](Deserializer &obj) { ReadView(context, obj); });
307
- });
308
-
309
- // Read the macros
310
- deserializer.ReadList(105, "macros", [&](Deserializer::List &list, idx_t i) {
311
- return list.ReadObject([&](Deserializer &obj) { ReadMacro(context, obj); });
312
- });
313
-
314
- // Read the table macros
315
- deserializer.ReadList(106, "table_macros", [&](Deserializer::List &list, idx_t i) {
316
- return list.ReadObject([&](Deserializer &obj) { ReadTableMacro(context, obj); });
317
- });
318
-
319
- // Read the indexes
320
- deserializer.ReadList(107, "indexes", [&](Deserializer::List &list, idx_t i) {
321
- return list.ReadObject([&](Deserializer &obj) { ReadIndex(context, obj); });
322
- });
323
347
  }
324
348
 
325
349
  //===--------------------------------------------------------------------===//
@@ -2,7 +2,7 @@
2
2
 
3
3
  namespace duckdb {
4
4
 
5
- const uint64_t VERSION_NUMBER = 61;
5
+ const uint64_t VERSION_NUMBER = 62;
6
6
 
7
7
  struct StorageVersionInfo {
8
8
  const char *version_name;