duckdb 0.7.2-dev3666.0 → 0.7.2-dev3763.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. package/package.json +1 -1
  2. package/src/database.cpp +0 -1
  3. package/src/duckdb/extension/json/json_functions/json_transform.cpp +1 -1
  4. package/src/duckdb/extension/json/json_functions/read_json.cpp +4 -4
  5. package/src/duckdb/extension/json/json_functions/read_json_objects.cpp +1 -1
  6. package/src/duckdb/extension/json/json_scan.cpp +16 -12
  7. package/src/duckdb/src/common/arrow/arrow_converter.cpp +4 -4
  8. package/src/duckdb/src/common/compressed_file_system.cpp +2 -2
  9. package/src/duckdb/src/common/exception.cpp +17 -0
  10. package/src/duckdb/src/common/exception_format_value.cpp +14 -0
  11. package/src/duckdb/src/common/file_system.cpp +53 -31
  12. package/src/duckdb/src/common/local_file_system.cpp +5 -3
  13. package/src/duckdb/src/common/row_operations/row_gather.cpp +2 -2
  14. package/src/duckdb/src/common/serializer/binary_deserializer.cpp +1 -1
  15. package/src/duckdb/src/common/serializer/buffered_file_reader.cpp +1 -1
  16. package/src/duckdb/src/common/serializer/buffered_file_writer.cpp +1 -1
  17. package/src/duckdb/src/common/serializer/buffered_serializer.cpp +3 -3
  18. package/src/duckdb/src/common/serializer.cpp +1 -1
  19. package/src/duckdb/src/common/sort/radix_sort.cpp +5 -5
  20. package/src/duckdb/src/common/string_util.cpp +6 -2
  21. package/src/duckdb/src/common/types/bit.cpp +2 -2
  22. package/src/duckdb/src/common/types/blob.cpp +2 -2
  23. package/src/duckdb/src/common/types/data_chunk.cpp +2 -2
  24. package/src/duckdb/src/common/types/date.cpp +1 -1
  25. package/src/duckdb/src/common/types/decimal.cpp +2 -2
  26. package/src/duckdb/src/common/types/selection_vector.cpp +1 -1
  27. package/src/duckdb/src/common/types/time.cpp +1 -1
  28. package/src/duckdb/src/common/types/vector.cpp +7 -7
  29. package/src/duckdb/src/common/types.cpp +1 -1
  30. package/src/duckdb/src/common/windows_util.cpp +2 -2
  31. package/src/duckdb/src/core_functions/scalar/list/list_aggregates.cpp +1 -1
  32. package/src/duckdb/src/core_functions/scalar/list/list_lambdas.cpp +1 -1
  33. package/src/duckdb/src/core_functions/scalar/string/printf.cpp +1 -1
  34. package/src/duckdb/src/execution/aggregate_hashtable.cpp +1 -1
  35. package/src/duckdb/src/execution/join_hashtable.cpp +3 -3
  36. package/src/duckdb/src/execution/operator/aggregate/physical_ungrouped_aggregate.cpp +2 -2
  37. package/src/duckdb/src/execution/operator/join/outer_join_marker.cpp +1 -1
  38. package/src/duckdb/src/execution/operator/join/perfect_hash_join_executor.cpp +1 -1
  39. package/src/duckdb/src/execution/operator/join/physical_range_join.cpp +1 -1
  40. package/src/duckdb/src/execution/operator/persistent/buffered_csv_reader.cpp +1 -1
  41. package/src/duckdb/src/execution/operator/persistent/physical_export.cpp +2 -2
  42. package/src/duckdb/src/execution/operator/persistent/physical_insert.cpp +91 -30
  43. package/src/duckdb/src/execution/operator/projection/physical_pivot.cpp +1 -1
  44. package/src/duckdb/src/execution/operator/schema/physical_create_index.cpp +1 -1
  45. package/src/duckdb/src/execution/perfect_aggregate_hashtable.cpp +2 -2
  46. package/src/duckdb/src/execution/radix_partitioned_hashtable.cpp +3 -3
  47. package/src/duckdb/src/execution/window_segment_tree.cpp +1 -1
  48. package/src/duckdb/src/function/pragma/pragma_queries.cpp +2 -2
  49. package/src/duckdb/src/function/scalar/strftime_format.cpp +1 -1
  50. package/src/duckdb/src/function/scalar/string/concat.cpp +1 -1
  51. package/src/duckdb/src/function/scalar/string/like.cpp +2 -2
  52. package/src/duckdb/src/function/scalar/system/aggregate_export.cpp +5 -5
  53. package/src/duckdb/src/function/table/copy_csv.cpp +1 -1
  54. package/src/duckdb/src/function/table/read_csv.cpp +3 -0
  55. package/src/duckdb/src/function/table/table_scan.cpp +7 -3
  56. package/src/duckdb/src/function/table/version/pragma_version.cpp +4 -6
  57. package/src/duckdb/src/include/duckdb/common/compressed_file_system.hpp +2 -2
  58. package/src/duckdb/src/include/duckdb/common/exception_format_value.hpp +26 -0
  59. package/src/duckdb/src/include/duckdb/common/file_system.hpp +6 -0
  60. package/src/duckdb/src/include/duckdb/common/helper.hpp +9 -9
  61. package/src/duckdb/src/include/duckdb/common/http_state.hpp +2 -2
  62. package/src/duckdb/src/include/duckdb/common/serializer/buffered_file_reader.hpp +1 -1
  63. package/src/duckdb/src/include/duckdb/common/serializer/buffered_file_writer.hpp +1 -1
  64. package/src/duckdb/src/include/duckdb/common/serializer/buffered_serializer.hpp +2 -2
  65. package/src/duckdb/src/include/duckdb/common/sort/duckdb_pdqsort.hpp +10 -10
  66. package/src/duckdb/src/include/duckdb/common/string_util.hpp +20 -0
  67. package/src/duckdb/src/include/duckdb/common/types/data_chunk.hpp +1 -1
  68. package/src/duckdb/src/include/duckdb/common/types/selection_vector.hpp +1 -1
  69. package/src/duckdb/src/include/duckdb/common/types/validity_mask.hpp +3 -3
  70. package/src/duckdb/src/include/duckdb/common/types/vector_buffer.hpp +4 -4
  71. package/src/duckdb/src/include/duckdb/common/unique_ptr.hpp +8 -8
  72. package/src/duckdb/src/include/duckdb/execution/aggregate_hashtable.hpp +1 -1
  73. package/src/duckdb/src/include/duckdb/execution/join_hashtable.hpp +3 -3
  74. package/src/duckdb/src/include/duckdb/execution/operator/join/outer_join_marker.hpp +1 -1
  75. package/src/duckdb/src/include/duckdb/execution/operator/join/perfect_hash_join_executor.hpp +1 -1
  76. package/src/duckdb/src/include/duckdb/execution/operator/join/physical_range_join.hpp +1 -1
  77. package/src/duckdb/src/include/duckdb/execution/operator/persistent/buffered_csv_reader.hpp +2 -2
  78. package/src/duckdb/src/include/duckdb/execution/operator/persistent/parallel_csv_reader.hpp +2 -2
  79. package/src/duckdb/src/include/duckdb/execution/operator/persistent/physical_insert.hpp +2 -3
  80. package/src/duckdb/src/include/duckdb/execution/perfect_aggregate_hashtable.hpp +2 -2
  81. package/src/duckdb/src/include/duckdb/execution/window_segment_tree.hpp +1 -1
  82. package/src/duckdb/src/include/duckdb/function/table/read_csv.hpp +1 -1
  83. package/src/duckdb/src/include/duckdb/main/client_data.hpp +1 -1
  84. package/src/duckdb/src/include/duckdb/optimizer/join_order/join_relation.hpp +3 -3
  85. package/src/duckdb/src/include/duckdb/parser/expression/function_expression.hpp +1 -1
  86. package/src/duckdb/src/include/duckdb/parser/expression/operator_expression.hpp +2 -2
  87. package/src/duckdb/src/include/duckdb/parser/keyword_helper.hpp +5 -0
  88. package/src/duckdb/src/include/duckdb/storage/data_table.hpp +4 -0
  89. package/src/duckdb/src/include/duckdb/storage/statistics/base_statistics.hpp +1 -1
  90. package/src/duckdb/src/include/duckdb/storage/table/append_state.hpp +1 -1
  91. package/src/duckdb/src/include/duckdb/storage/table/row_group.hpp +1 -1
  92. package/src/duckdb/src/include/duckdb/storage/table/scan_state.hpp +5 -5
  93. package/src/duckdb/src/include/duckdb/storage/table/update_segment.hpp +2 -2
  94. package/src/duckdb/src/include/duckdb/transaction/local_storage.hpp +1 -1
  95. package/src/duckdb/src/main/client_context.cpp +4 -4
  96. package/src/duckdb/src/main/db_instance_cache.cpp +5 -3
  97. package/src/duckdb/src/main/extension/extension_install.cpp +22 -18
  98. package/src/duckdb/src/optimizer/join_order/join_relation_set.cpp +5 -5
  99. package/src/duckdb/src/parser/expression/collate_expression.cpp +1 -1
  100. package/src/duckdb/src/parser/keyword_helper.cpp +11 -1
  101. package/src/duckdb/src/parser/query_node/select_node.cpp +1 -1
  102. package/src/duckdb/src/parser/statement/copy_statement.cpp +2 -2
  103. package/src/duckdb/src/parser/tableref.cpp +1 -1
  104. package/src/duckdb/src/planner/binder/tableref/bind_basetableref.cpp +7 -1
  105. package/src/duckdb/src/planner/expression_binder/index_binder.cpp +1 -1
  106. package/src/duckdb/src/storage/checkpoint/write_overflow_strings_to_disk.cpp +1 -1
  107. package/src/duckdb/src/storage/compression/string_uncompressed.cpp +2 -2
  108. package/src/duckdb/src/storage/data_table.cpp +75 -44
  109. package/src/duckdb/src/storage/local_storage.cpp +1 -1
  110. package/src/duckdb/src/storage/statistics/list_stats.cpp +1 -1
  111. package/src/duckdb/src/storage/statistics/struct_stats.cpp +1 -1
  112. package/src/duckdb/src/storage/storage_manager.cpp +3 -0
  113. package/src/duckdb/src/storage/table/row_group.cpp +11 -11
  114. package/src/duckdb/src/storage/table/scan_state.cpp +1 -1
  115. package/src/duckdb/src/storage/table/update_segment.cpp +6 -6
@@ -193,6 +193,10 @@ public:
193
193
  void VerifyAppendConstraints(TableCatalogEntry &table, ClientContext &context, DataChunk &chunk,
194
194
  ConflictManager *conflict_manager = nullptr);
195
195
 
196
+ public:
197
+ static void VerifyUniqueIndexes(TableIndexList &indexes, ClientContext &context, DataChunk &chunk,
198
+ ConflictManager *conflict_manager);
199
+
196
200
  private:
197
201
  //! Verify the new added constraints against current persistent&local data
198
202
  void VerifyNewConstraint(ClientContext &context, DataTable &parent, const BoundConstraint *constraint);
@@ -138,7 +138,7 @@ private:
138
138
  StringStatsData string_data;
139
139
  } stats_union;
140
140
  //! Child stats (for LIST and STRUCT)
141
- unsafe_array_ptr<BaseStatistics> child_stats;
141
+ unsafe_unique_array<BaseStatistics> child_stats;
142
142
  };
143
143
 
144
144
  } // namespace duckdb
@@ -44,7 +44,7 @@ struct RowGroupAppendState {
44
44
  //! The current row_group we are appending to
45
45
  RowGroup *row_group;
46
46
  //! The column append states
47
- unsafe_array_ptr<ColumnAppendState> states;
47
+ unsafe_unique_array<ColumnAppendState> states;
48
48
  //! Offset within the row_group
49
49
  idx_t offset_in_row_group;
50
50
  };
@@ -149,7 +149,7 @@ public:
149
149
 
150
150
  private:
151
151
  ChunkInfo *GetChunkInfo(idx_t vector_idx);
152
- ColumnData &GetColumn(idx_t c);
152
+ ColumnData &GetColumn(storage_t c);
153
153
  idx_t GetColumnCount() const;
154
154
  vector<shared_ptr<ColumnData>> &GetColumns();
155
155
 
@@ -99,7 +99,7 @@ public:
99
99
  //! The maximum row within the row group
100
100
  idx_t max_row_group_row;
101
101
  //! Child column scans
102
- unsafe_array_ptr<ColumnScanState> column_scans;
102
+ unsafe_unique_array<ColumnScanState> column_scans;
103
103
  //! Row group segment tree
104
104
  RowGroupSegmentTree *row_groups;
105
105
  //! The total maximum row index
@@ -109,7 +109,7 @@ public:
109
109
 
110
110
  public:
111
111
  void Initialize(const vector<LogicalType> &types);
112
- const vector<column_t> &GetColumnIds();
112
+ const vector<storage_t> &GetColumnIds();
113
113
  TableFilterSet *GetFilters();
114
114
  AdaptiveFilter *GetAdaptiveFilter();
115
115
  bool Scan(DuckTransaction &transaction, DataChunk &result);
@@ -130,15 +130,15 @@ public:
130
130
  CollectionScanState local_state;
131
131
 
132
132
  public:
133
- void Initialize(vector<column_t> column_ids, TableFilterSet *table_filters = nullptr);
133
+ void Initialize(vector<storage_t> column_ids, TableFilterSet *table_filters = nullptr);
134
134
 
135
- const vector<column_t> &GetColumnIds();
135
+ const vector<storage_t> &GetColumnIds();
136
136
  TableFilterSet *GetFilters();
137
137
  AdaptiveFilter *GetAdaptiveFilter();
138
138
 
139
139
  private:
140
140
  //! The column identifiers of the scan
141
- vector<column_t> column_ids;
141
+ vector<storage_t> column_ids;
142
142
  //! The table filters (if any)
143
143
  TableFilterSet *table_filters;
144
144
  //! Adaptive filter info (if any)
@@ -96,8 +96,8 @@ private:
96
96
 
97
97
  struct UpdateNodeData {
98
98
  unique_ptr<UpdateInfo> info;
99
- unsafe_array_ptr<sel_t> tuples;
100
- unsafe_array_ptr<data_t> tuple_data;
99
+ unsafe_unique_array<sel_t> tuples;
100
+ unsafe_unique_array<data_t> tuple_data;
101
101
  };
102
102
 
103
103
  struct UpdateNode {
@@ -108,7 +108,7 @@ public:
108
108
  //! Initialize a scan of the local storage
109
109
  void InitializeScan(DataTable &table, CollectionScanState &state, optional_ptr<TableFilterSet> table_filters);
110
110
  //! Scan
111
- void Scan(CollectionScanState &state, const vector<column_t> &column_ids, DataChunk &result);
111
+ void Scan(CollectionScanState &state, const vector<storage_t> &column_ids, DataChunk &result);
112
112
 
113
113
  void InitializeParallelScan(DataTable &table, ParallelCollectionScanState &state);
114
114
  bool NextParallelScan(ClientContext &context, DataTable &table, ParallelCollectionScanState &state,
@@ -209,7 +209,7 @@ PreservedError ClientContext::EndQueryInternal(ClientContextLock &lock, bool suc
209
209
  }
210
210
 
211
211
  void ClientContext::CleanupInternal(ClientContextLock &lock, BaseQueryResult *result, bool invalidate_transaction) {
212
- client_data->http_state = make_uniq<HTTPState>();
212
+ client_data->http_state = make_shared<HTTPState>();
213
213
  if (!active_query) {
214
214
  // no query currently active
215
215
  return;
@@ -491,7 +491,7 @@ unique_ptr<LogicalOperator> ClientContext::ExtractPlan(const string &query) {
491
491
  }
492
492
 
493
493
  unique_ptr<LogicalOperator> plan;
494
- client_data->http_state = make_uniq<HTTPState>();
494
+ client_data->http_state = make_shared<HTTPState>();
495
495
  RunFunctionInTransactionInternal(*lock, [&]() {
496
496
  Planner planner(*this);
497
497
  planner.CreatePlan(std::move(statements[0]));
@@ -934,7 +934,7 @@ void ClientContext::RunFunctionInTransactionInternal(ClientContextLock &lock, co
934
934
  bool requires_valid_transaction) {
935
935
  if (requires_valid_transaction && transaction.HasActiveTransaction() &&
936
936
  ValidChecker::IsInvalidated(ActiveTransaction())) {
937
- throw Exception(ErrorManager::FormatException(*this, ErrorType::INVALIDATED_TRANSACTION));
937
+ throw TransactionException(ErrorManager::FormatException(*this, ErrorType::INVALIDATED_TRANSACTION));
938
938
  }
939
939
  // check if we are on AutoCommit. In this case we should start a transaction
940
940
  bool require_new_transaction = transaction.IsAutoCommit() && !transaction.HasActiveTransaction();
@@ -1013,7 +1013,7 @@ void ClientContext::TryBindRelation(Relation &relation, vector<ColumnDefinition>
1013
1013
  D_ASSERT(!relation.GetAlias().empty());
1014
1014
  D_ASSERT(!relation.ToString().empty());
1015
1015
  #endif
1016
- client_data->http_state = make_uniq<HTTPState>();
1016
+ client_data->http_state = make_shared<HTTPState>();
1017
1017
  RunFunctionInTransaction([&]() {
1018
1018
  // bind the expressions
1019
1019
  auto binder = Binder::CreateBinder(*this);
@@ -1,8 +1,10 @@
1
1
  #include "duckdb/main/db_instance_cache.hpp"
2
2
  #include "duckdb/main/extension_helper.hpp"
3
+
3
4
  namespace duckdb {
4
5
 
5
- string GetDBAbsolutePath(const string &database) {
6
+ string GetDBAbsolutePath(const string &database_p) {
7
+ auto database = FileSystem::ExpandPath(database_p, nullptr);
6
8
  if (database.empty()) {
7
9
  return ":memory:";
8
10
  }
@@ -15,9 +17,9 @@ string GetDBAbsolutePath(const string &database) {
15
17
  return database;
16
18
  }
17
19
  if (FileSystem::IsPathAbsolute(database)) {
18
- return database;
20
+ return FileSystem::NormalizeAbsolutePath(database);
19
21
  }
20
- return FileSystem::JoinPath(FileSystem::GetWorkingDirectory(), database);
22
+ return FileSystem::NormalizeAbsolutePath(FileSystem::JoinPath(FileSystem::GetWorkingDirectory(), database));
21
23
  }
22
24
 
23
25
  shared_ptr<DuckDB> DBInstanceCache::GetInstanceInternal(const string &database, const DBConfig &config) {
@@ -133,6 +133,23 @@ void ExtensionHelper::InstallExtension(ClientContext &context, const string &ext
133
133
  InstallExtensionInternal(config, &client_config, fs, local_path, extension, force_install);
134
134
  }
135
135
 
136
+ unsafe_unique_array<data_t> ReadExtensionFileFromDisk(FileSystem &fs, const string &path, idx_t &file_size) {
137
+ auto source_file = fs.OpenFile(path, FileFlags::FILE_FLAGS_READ);
138
+ file_size = source_file->GetFileSize();
139
+ auto in_buffer = make_unsafe_uniq_array<data_t>(file_size);
140
+ source_file->Read(in_buffer.get(), file_size);
141
+ source_file->Close();
142
+ return in_buffer;
143
+ }
144
+
145
+ void WriteExtensionFileToDisk(FileSystem &fs, const string &path, void *data, idx_t data_size) {
146
+ auto target_file = fs.OpenFile(path, FileFlags::FILE_FLAGS_WRITE | FileFlags::FILE_FLAGS_APPEND |
147
+ FileFlags::FILE_FLAGS_FILE_CREATE_NEW);
148
+ target_file->Write(data, data_size);
149
+ target_file->Close();
150
+ target_file.reset();
151
+ }
152
+
136
153
  void ExtensionHelper::InstallExtensionInternal(DBConfig &config, ClientConfig *client_config, FileSystem &fs,
137
154
  const string &local_path, const string &extension, bool force_install) {
138
155
  if (!config.options.enable_external_access) {
@@ -152,18 +169,9 @@ void ExtensionHelper::InstallExtensionInternal(DBConfig &config, ClientConfig *c
152
169
  }
153
170
  auto is_http_url = StringUtil::Contains(extension, "http://");
154
171
  if (fs.FileExists(extension)) {
155
-
156
- std::ifstream in(extension, std::ios::binary);
157
- if (in.bad()) {
158
- throw IOException("Failed to read extension from \"%s\"", extension);
159
- }
160
- std::ofstream out(temp_path, std::ios::binary);
161
- out << in.rdbuf();
162
- if (out.bad()) {
163
- throw IOException("Failed to write extension to \"%s\"", temp_path);
164
- }
165
- in.close();
166
- out.close();
172
+ idx_t file_size;
173
+ auto in_buffer = ReadExtensionFileFromDisk(fs, extension, file_size);
174
+ WriteExtensionFileToDisk(fs, temp_path, in_buffer.get(), file_size);
167
175
 
168
176
  fs.MoveFile(temp_path, local_extension_path);
169
177
  return;
@@ -225,12 +233,8 @@ void ExtensionHelper::InstallExtensionInternal(DBConfig &config, ClientConfig *c
225
233
  }
226
234
  }
227
235
  auto decompressed_body = GZipFileSystem::UncompressGZIPString(res->body);
228
- std::ofstream out(temp_path, std::ios::binary);
229
- out.write(decompressed_body.data(), decompressed_body.size());
230
- if (out.bad()) {
231
- throw IOException("Failed to write extension to %s", temp_path);
232
- }
233
- out.close();
236
+
237
+ WriteExtensionFileToDisk(fs, temp_path, (void *)decompressed_body.data(), decompressed_body.size());
234
238
  fs.MoveFile(temp_path, local_extension_path);
235
239
  #endif
236
240
  }
@@ -35,7 +35,7 @@ bool JoinRelationSet::IsSubset(JoinRelationSet &super, JoinRelationSet &sub) {
35
35
  return false;
36
36
  }
37
37
 
38
- JoinRelationSet &JoinRelationSetManager::GetJoinRelation(unsafe_array_ptr<idx_t> relations, idx_t count) {
38
+ JoinRelationSet &JoinRelationSetManager::GetJoinRelation(unsafe_unique_array<idx_t> relations, idx_t count) {
39
39
  // now look it up in the tree
40
40
  reference<JoinRelationTreeNode> info(root);
41
41
  for (idx_t i = 0; i < count; i++) {
@@ -59,7 +59,7 @@ JoinRelationSet &JoinRelationSetManager::GetJoinRelation(unsafe_array_ptr<idx_t>
59
59
  //! Create or get a JoinRelationSet from a single node with the given index
60
60
  JoinRelationSet &JoinRelationSetManager::GetJoinRelation(idx_t index) {
61
61
  // create a sorted vector of the relations
62
- auto relations = make_unsafe_array<idx_t>(1);
62
+ auto relations = make_unsafe_uniq_array<idx_t>(1);
63
63
  relations[0] = index;
64
64
  idx_t count = 1;
65
65
  return GetJoinRelation(std::move(relations), count);
@@ -67,7 +67,7 @@ JoinRelationSet &JoinRelationSetManager::GetJoinRelation(idx_t index) {
67
67
 
68
68
  JoinRelationSet &JoinRelationSetManager::GetJoinRelation(unordered_set<idx_t> &bindings) {
69
69
  // create a sorted vector of the relations
70
- unsafe_array_ptr<idx_t> relations = bindings.empty() ? nullptr : make_unsafe_array<idx_t>(bindings.size());
70
+ unsafe_unique_array<idx_t> relations = bindings.empty() ? nullptr : make_unsafe_uniq_array<idx_t>(bindings.size());
71
71
  idx_t count = 0;
72
72
  for (auto &entry : bindings) {
73
73
  relations[count++] = entry;
@@ -77,7 +77,7 @@ JoinRelationSet &JoinRelationSetManager::GetJoinRelation(unordered_set<idx_t> &b
77
77
  }
78
78
 
79
79
  JoinRelationSet &JoinRelationSetManager::Union(JoinRelationSet &left, JoinRelationSet &right) {
80
- auto relations = make_unsafe_array<idx_t>(left.count + right.count);
80
+ auto relations = make_unsafe_uniq_array<idx_t>(left.count + right.count);
81
81
  idx_t count = 0;
82
82
  // move through the left and right relations, eliminating duplicates
83
83
  idx_t i = 0, j = 0;
@@ -113,7 +113,7 @@ JoinRelationSet &JoinRelationSetManager::Union(JoinRelationSet &left, JoinRelati
113
113
  }
114
114
 
115
115
  // JoinRelationSet *JoinRelationSetManager::Difference(JoinRelationSet *left, JoinRelationSet *right) {
116
- // auto relations = unsafe_array_ptr<idx_t>(new idx_t[left->count]);
116
+ // auto relations = unsafe_unique_array<idx_t>(new idx_t[left->count]);
117
117
  // idx_t count = 0;
118
118
  // // move through the left and right relations
119
119
  // idx_t i = 0, j = 0;
@@ -15,7 +15,7 @@ CollateExpression::CollateExpression(string collation_p, unique_ptr<ParsedExpres
15
15
  }
16
16
 
17
17
  string CollateExpression::ToString() const {
18
- return child->ToString() + " COLLATE " + KeywordHelper::WriteOptionallyQuoted(collation);
18
+ return StringUtil::Format("%s COLLATE %s", child->ToString(), SQLIdentifier(collation));
19
19
  }
20
20
 
21
21
  bool CollateExpression::Equal(const CollateExpression *a, const CollateExpression *b) {
@@ -29,11 +29,21 @@ bool KeywordHelper::RequiresQuotes(const string &text, bool allow_caps) {
29
29
  return IsKeyword(text);
30
30
  }
31
31
 
32
+ string KeywordHelper::EscapeQuotes(const string &text, char quote) {
33
+ return StringUtil::Replace(text, string(1, quote), string(2, quote));
34
+ }
35
+
36
+ string KeywordHelper::WriteQuoted(const string &text, char quote) {
37
+ // 1. Escapes all occurences of 'quote' by doubling them (escape in SQL)
38
+ // 2. Adds quotes around the string
39
+ return string(1, quote) + EscapeQuotes(text) + string(1, quote);
40
+ }
41
+
32
42
  string KeywordHelper::WriteOptionallyQuoted(const string &text, char quote, bool allow_caps) {
33
43
  if (!RequiresQuotes(text, allow_caps)) {
34
44
  return text;
35
45
  }
36
- return string(1, quote) + StringUtil::Replace(text, string(1, quote), string(2, quote)) + string(1, quote);
46
+ return WriteQuoted(text, quote);
37
47
  }
38
48
 
39
49
  } // namespace duckdb
@@ -39,7 +39,7 @@ string SelectNode::ToString() const {
39
39
  }
40
40
  result += select_list[i]->ToString();
41
41
  if (!select_list[i]->alias.empty()) {
42
- result += " AS " + KeywordHelper::WriteOptionallyQuoted(select_list[i]->alias);
42
+ result += StringUtil::Format(" AS %s", SQLIdentifier(select_list[i]->alias));
43
43
  }
44
44
  }
45
45
  if (from_table && from_table->type != TableReferenceType::EMPTY) {
@@ -86,7 +86,7 @@ string CopyStatement::ToString() const {
86
86
  D_ASSERT(!select_statement);
87
87
  result += TablePart(*info);
88
88
  result += " FROM";
89
- result += StringUtil::Format(" '%s'", info->file_path);
89
+ result += StringUtil::Format(" %s", SQLString(info->file_path));
90
90
  result += CopyOptionsToString(info->format, info->options);
91
91
  } else {
92
92
  if (select_statement) {
@@ -96,7 +96,7 @@ string CopyStatement::ToString() const {
96
96
  result += TablePart(*info);
97
97
  }
98
98
  result += " TO ";
99
- result += StringUtil::Format("'%s'", info->file_path);
99
+ result += StringUtil::Format("%s", SQLString(info->file_path));
100
100
  result += CopyOptionsToString(info->format, info->options);
101
101
  }
102
102
  return result;
@@ -16,7 +16,7 @@ string TableRef::BaseToString(string result) const {
16
16
 
17
17
  string TableRef::BaseToString(string result, const vector<string> &column_name_alias) const {
18
18
  if (!alias.empty()) {
19
- result += " AS " + KeywordHelper::WriteOptionallyQuoted(alias);
19
+ result += StringUtil::Format(" AS %s", SQLIdentifier(alias));
20
20
  }
21
21
  if (!column_name_alias.empty()) {
22
22
  D_ASSERT(!alias.empty());
@@ -94,7 +94,13 @@ unique_ptr<BoundTableRef> Binder::Bind(BaseTableRef &ref) {
94
94
  for (auto &scan : config.replacement_scans) {
95
95
  auto replacement_function = scan.function(context, table_name, scan.data.get());
96
96
  if (replacement_function) {
97
- replacement_function->alias = ref.alias.empty() ? replacement_function->alias : ref.alias;
97
+ if (!ref.alias.empty()) {
98
+ // user-provided alias overrides the default alias
99
+ replacement_function->alias = ref.alias;
100
+ } else if (replacement_function->alias.empty()) {
101
+ // if the replacement scan itself did not provide an alias we use the table name
102
+ replacement_function->alias = ref.table_name;
103
+ }
98
104
  if (replacement_function->type == TableReferenceType::TABLE_FUNCTION) {
99
105
  auto &table_function = replacement_function->Cast<TableFunctionRef>();
100
106
  table_function.column_name_alias = ref.column_name_alias;
@@ -40,7 +40,7 @@ BindResult IndexBinder::BindExpression(unique_ptr<ParsedExpression> &expr_ptr, i
40
40
  throw InternalException("failed to replay CREATE INDEX statement - column id not found");
41
41
  }
42
42
  return BindResult(
43
- make_uniq<BoundColumnRefExpression>(col_ref.alias, col_type, ColumnBinding(0, col_id_idx)));
43
+ make_uniq<BoundColumnRefExpression>(col_ref.GetColumnName(), col_type, ColumnBinding(0, col_id_idx)));
44
44
  }
45
45
  return ExpressionBinder::BindExpression(expr_ptr, depth);
46
46
  }
@@ -32,7 +32,7 @@ void WriteOverflowStringsToDisk::WriteString(string_t string, block_id_t &result
32
32
  MiniZStream s;
33
33
  size_t compressed_size = 0;
34
34
  compressed_size = s.MaxCompressedLength(uncompressed_size);
35
- auto compressed_buf = make_unsafe_array<data_t>(compressed_size);
35
+ auto compressed_buf = make_unsafe_uniq_array<data_t>(compressed_size);
36
36
  s.Compress((const char *)string.GetData(), uncompressed_size, (char *)compressed_buf.get(), &compressed_size);
37
37
  string_t compressed_string((const char *)compressed_buf.get(), compressed_size);
38
38
 
@@ -292,13 +292,13 @@ string_t UncompressedStringStorage::ReadOverflowString(ColumnSegment &segment, V
292
292
  offset += 2 * sizeof(uint32_t);
293
293
 
294
294
  data_ptr_t decompression_ptr;
295
- unsafe_array_ptr<data_t> decompression_buffer;
295
+ unsafe_unique_array<data_t> decompression_buffer;
296
296
 
297
297
  // If string is in single block we decompress straight from it, else we copy first
298
298
  if (remaining <= Storage::BLOCK_SIZE - sizeof(block_id_t) - offset) {
299
299
  decompression_ptr = handle.Ptr() + offset;
300
300
  } else {
301
- decompression_buffer = make_unsafe_array<data_t>(compressed_size);
301
+ decompression_buffer = make_unsafe_uniq_array<data_t>(compressed_size);
302
302
  auto target_ptr = decompression_buffer.get();
303
303
 
304
304
  // now append the string to the single buffer
@@ -530,6 +530,75 @@ void DataTable::VerifyNewConstraint(ClientContext &context, DataTable &parent, c
530
530
  local_storage.VerifyNewConstraint(parent, *constraint);
531
531
  }
532
532
 
533
+ bool HasUniqueIndexes(TableIndexList &list) {
534
+ bool has_unique_index = false;
535
+ list.Scan([&](Index &index) {
536
+ if (index.IsUnique()) {
537
+ return has_unique_index = true;
538
+ return true;
539
+ }
540
+ return false;
541
+ });
542
+ return has_unique_index;
543
+ }
544
+
545
+ void DataTable::VerifyUniqueIndexes(TableIndexList &indexes, ClientContext &context, DataChunk &chunk,
546
+ ConflictManager *conflict_manager) {
547
+ //! check whether or not the chunk can be inserted into the indexes
548
+ if (!conflict_manager) {
549
+ // Only need to verify that no unique constraints are violated
550
+ indexes.Scan([&](Index &index) {
551
+ if (!index.IsUnique()) {
552
+ return false;
553
+ }
554
+ index.VerifyAppend(chunk);
555
+ return false;
556
+ });
557
+ return;
558
+ }
559
+
560
+ D_ASSERT(conflict_manager);
561
+ // The conflict manager is only provided when a ON CONFLICT clause was provided to the INSERT statement
562
+
563
+ idx_t matching_indexes = 0;
564
+ auto &conflict_info = conflict_manager->GetConflictInfo();
565
+ // First we figure out how many indexes match our conflict target
566
+ // So we can optimize accordingly
567
+ indexes.Scan([&](Index &index) {
568
+ matching_indexes += conflict_info.ConflictTargetMatches(index);
569
+ return false;
570
+ });
571
+ conflict_manager->SetMode(ConflictManagerMode::SCAN);
572
+ conflict_manager->SetIndexCount(matching_indexes);
573
+ // First we verify only the indexes that match our conflict target
574
+ unordered_set<Index *> checked_indexes;
575
+ indexes.Scan([&](Index &index) {
576
+ if (!index.IsUnique()) {
577
+ return false;
578
+ }
579
+ if (conflict_info.ConflictTargetMatches(index)) {
580
+ index.VerifyAppend(chunk, *conflict_manager);
581
+ checked_indexes.insert(&index);
582
+ }
583
+ return false;
584
+ });
585
+
586
+ conflict_manager->SetMode(ConflictManagerMode::THROW);
587
+ // Then we scan the other indexes, throwing if they cause conflicts on tuples that were not found during
588
+ // the scan
589
+ indexes.Scan([&](Index &index) {
590
+ if (!index.IsUnique()) {
591
+ return false;
592
+ }
593
+ if (checked_indexes.count(&index)) {
594
+ // Already checked this constraint
595
+ return false;
596
+ }
597
+ index.VerifyAppend(chunk, *conflict_manager);
598
+ return false;
599
+ });
600
+ }
601
+
533
602
  void DataTable::VerifyAppendConstraints(TableCatalogEntry &table, ClientContext &context, DataChunk &chunk,
534
603
  ConflictManager *conflict_manager) {
535
604
  if (table.HasGeneratedColumns()) {
@@ -548,6 +617,11 @@ void DataTable::VerifyAppendConstraints(TableCatalogEntry &table, ClientContext
548
617
  VerifyGeneratedExpressionSuccess(context, table, chunk, *bound_expression, col.Oid());
549
618
  }
550
619
  }
620
+
621
+ if (HasUniqueIndexes(info->indexes)) {
622
+ VerifyUniqueIndexes(info->indexes, context, chunk, conflict_manager);
623
+ }
624
+
551
625
  auto &constraints = table.GetConstraints();
552
626
  auto &bound_constraints = table.GetBoundConstraints();
553
627
  for (idx_t i = 0; i < bound_constraints.size(); i++) {
@@ -567,50 +641,7 @@ void DataTable::VerifyAppendConstraints(TableCatalogEntry &table, ClientContext
567
641
  break;
568
642
  }
569
643
  case ConstraintType::UNIQUE: {
570
- //! check whether or not the chunk can be inserted into the indexes
571
- if (conflict_manager) {
572
- // This is only provided when a ON CONFLICT clause was provided
573
- idx_t matching_indexes = 0;
574
- auto &conflict_info = conflict_manager->GetConflictInfo();
575
- // First we figure out how many indexes match our conflict target
576
- // So we can optimize accordingly
577
- info->indexes.Scan([&](Index &index) {
578
- matching_indexes += conflict_info.ConflictTargetMatches(index);
579
- return false;
580
- });
581
- conflict_manager->SetMode(ConflictManagerMode::SCAN);
582
- conflict_manager->SetIndexCount(matching_indexes);
583
- // First we verify only the indexes that match our conflict target
584
- info->indexes.Scan([&](Index &index) {
585
- if (!index.IsUnique()) {
586
- return false;
587
- }
588
- if (conflict_info.ConflictTargetMatches(index)) {
589
- index.VerifyAppend(chunk, *conflict_manager);
590
- }
591
- return false;
592
- });
593
-
594
- conflict_manager->SetMode(ConflictManagerMode::THROW);
595
- // Then we scan the other indexes, throwing if they cause conflicts on tuples that were not found during
596
- // the scan
597
- info->indexes.Scan([&](Index &index) {
598
- if (!index.IsUnique()) {
599
- return false;
600
- }
601
- index.VerifyAppend(chunk, *conflict_manager);
602
- return false;
603
- });
604
- } else {
605
- // Only need to verify that no unique constraints are violated
606
- info->indexes.Scan([&](Index &index) {
607
- if (!index.IsUnique()) {
608
- return false;
609
- }
610
- index.VerifyAppend(chunk);
611
- return false;
612
- });
613
- }
644
+ // These were handled earlier on
614
645
  break;
615
646
  }
616
647
  case ConstraintType::FOREIGN_KEY: {
@@ -310,7 +310,7 @@ void LocalStorage::InitializeScan(DataTable &table, CollectionScanState &state,
310
310
  storage->InitializeScan(state, table_filters);
311
311
  }
312
312
 
313
- void LocalStorage::Scan(CollectionScanState &state, const vector<column_t> &column_ids, DataChunk &result) {
313
+ void LocalStorage::Scan(CollectionScanState &state, const vector<storage_t> &column_ids, DataChunk &result) {
314
314
  state.Scan(transaction, result);
315
315
  }
316
316
 
@@ -7,7 +7,7 @@
7
7
  namespace duckdb {
8
8
 
9
9
  void ListStats::Construct(BaseStatistics &stats) {
10
- stats.child_stats = unsafe_array_ptr<BaseStatistics>(new BaseStatistics[1]);
10
+ stats.child_stats = unsafe_unique_array<BaseStatistics>(new BaseStatistics[1]);
11
11
  BaseStatistics::Construct(stats.child_stats[0], ListType::GetChildType(stats.GetType()));
12
12
  }
13
13
 
@@ -7,7 +7,7 @@ namespace duckdb {
7
7
 
8
8
  void StructStats::Construct(BaseStatistics &stats) {
9
9
  auto &child_types = StructType::GetChildTypes(stats.GetType());
10
- stats.child_stats = unsafe_array_ptr<BaseStatistics>(new BaseStatistics[child_types.size()]);
10
+ stats.child_stats = unsafe_unique_array<BaseStatistics>(new BaseStatistics[child_types.size()]);
11
11
  for (idx_t i = 0; i < child_types.size(); i++) {
12
12
  BaseStatistics::Construct(stats.child_stats[i], child_types[i].second);
13
13
  }
@@ -19,6 +19,9 @@ StorageManager::StorageManager(AttachedDatabase &db, string path_p, bool read_on
19
19
  : db(db), path(std::move(path_p)), read_only(read_only) {
20
20
  if (path.empty()) {
21
21
  path = ":memory:";
22
+ } else {
23
+ auto &fs = FileSystem::Get(db);
24
+ this->path = fs.ExpandPath(path);
22
25
  }
23
26
  }
24
27