duckdb 0.7.1-dev284.0 → 0.7.1-dev341.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/package.json +3 -3
- package/src/duckdb/extension/json/json_scan.cpp +1 -4
- package/src/duckdb/extension/parquet/column_reader.cpp +7 -0
- package/src/duckdb/extension/parquet/include/column_reader.hpp +1 -0
- package/src/duckdb/extension/parquet/parquet-extension.cpp +2 -10
- package/src/duckdb/src/catalog/catalog.cpp +47 -13
- package/src/duckdb/src/catalog/default/default_views.cpp +1 -1
- package/src/duckdb/src/common/file_system.cpp +23 -9
- package/src/duckdb/src/common/local_file_system.cpp +4 -4
- package/src/duckdb/src/common/types/partitioned_column_data.cpp +1 -0
- package/src/duckdb/src/execution/operator/aggregate/physical_window.cpp +6 -27
- package/src/duckdb/src/execution/operator/helper/physical_reset.cpp +1 -9
- package/src/duckdb/src/execution/operator/helper/physical_set.cpp +1 -9
- package/src/duckdb/src/function/scalar/generic/current_setting.cpp +2 -2
- package/src/duckdb/src/function/scalar/map/map.cpp +69 -21
- package/src/duckdb/src/function/table/read_csv.cpp +1 -4
- package/src/duckdb/src/function/table/version/pragma_version.cpp +2 -2
- package/src/duckdb/src/include/duckdb/catalog/catalog.hpp +2 -0
- package/src/duckdb/src/include/duckdb/common/file_system.hpp +1 -1
- package/src/duckdb/src/include/duckdb/main/{extension_functions.hpp → extension_entries.hpp} +26 -5
- package/src/duckdb/src/include/duckdb/main/extension_helper.hpp +3 -0
- package/src/duckdb/src/main/database.cpp +4 -2
- package/src/duckdb/src/main/extension/extension_load.cpp +22 -3
- package/src/duckdb/src/planner/binder/statement/bind_drop.cpp +2 -2
- package/src/duckdb/src/storage/buffer_manager.cpp +30 -3
- package/src/duckdb/third_party/concurrentqueue/blockingconcurrentqueue.h +2 -2
- package/test/arrow.test.ts +3 -3
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# DuckDB Node Bindings
|
|
2
2
|
|
|
3
|
-
This package provides a node.js API for [DuckDB](https://github.com/
|
|
3
|
+
This package provides a node.js API for [DuckDB](https://github.com/duckdb/duckdb), the "SQLite for Analytics". The API for this client is somewhat compliant to the SQLite node.js client for easier transition (and transition you must eventually).
|
|
4
4
|
|
|
5
5
|
Load the package and create a database object:
|
|
6
6
|
|
package/package.json
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"name": "duckdb",
|
|
3
3
|
"main": "./lib/duckdb.js",
|
|
4
4
|
"types": "./lib/duckdb.d.ts",
|
|
5
|
-
"version": "0.7.1-
|
|
5
|
+
"version": "0.7.1-dev341.0",
|
|
6
6
|
"description": "DuckDB node.js API",
|
|
7
7
|
"gypfile": true,
|
|
8
8
|
"dependencies": {
|
|
@@ -41,7 +41,7 @@
|
|
|
41
41
|
},
|
|
42
42
|
"repository": {
|
|
43
43
|
"type": "git",
|
|
44
|
-
"url": "git+https://github.com/
|
|
44
|
+
"url": "git+https://github.com/duckdb/duckdb.git"
|
|
45
45
|
},
|
|
46
46
|
"ts-node": {
|
|
47
47
|
"require": [
|
|
@@ -56,7 +56,7 @@
|
|
|
56
56
|
"author": "Hannes Mühleisen",
|
|
57
57
|
"license": "MPL-2.0",
|
|
58
58
|
"bugs": {
|
|
59
|
-
"url": "https://github.com/
|
|
59
|
+
"url": "https://github.com/duckdb/duckdb/issues"
|
|
60
60
|
},
|
|
61
61
|
"homepage": "https://www.duckdb.org"
|
|
62
62
|
}
|
|
@@ -75,10 +75,7 @@ void JSONScanData::InitializeFilePaths(ClientContext &context, const vector<stri
|
|
|
75
75
|
vector<string> &file_paths) {
|
|
76
76
|
auto &fs = FileSystem::GetFileSystem(context);
|
|
77
77
|
for (auto &file_pattern : patterns) {
|
|
78
|
-
auto found_files = fs.
|
|
79
|
-
if (found_files.empty()) {
|
|
80
|
-
throw FileSystem::MissingFileException(file_pattern, context);
|
|
81
|
-
}
|
|
78
|
+
auto found_files = fs.GlobFiles(file_pattern, context);
|
|
82
79
|
file_paths.insert(file_paths.end(), found_files.begin(), found_files.end());
|
|
83
80
|
}
|
|
84
81
|
}
|
|
@@ -589,6 +589,7 @@ void StringColumnReader::PrepareDeltaLengthByteArray(ResizeableBuffer &buffer) {
|
|
|
589
589
|
}
|
|
590
590
|
auto length_data = (uint32_t *)length_buffer->ptr;
|
|
591
591
|
byte_array_data = make_unique<Vector>(LogicalType::VARCHAR, value_count);
|
|
592
|
+
byte_array_count = value_count;
|
|
592
593
|
auto string_data = FlatVector::GetData<string_t>(*byte_array_data);
|
|
593
594
|
for (idx_t i = 0; i < value_count; i++) {
|
|
594
595
|
auto str_len = length_data[i];
|
|
@@ -615,6 +616,7 @@ void StringColumnReader::PrepareDeltaByteArray(ResizeableBuffer &buffer) {
|
|
|
615
616
|
auto prefix_data = (uint32_t *)prefix_buffer->ptr;
|
|
616
617
|
auto suffix_data = (uint32_t *)suffix_buffer->ptr;
|
|
617
618
|
byte_array_data = make_unique<Vector>(LogicalType::VARCHAR, prefix_count);
|
|
619
|
+
byte_array_count = prefix_count;
|
|
618
620
|
auto string_data = FlatVector::GetData<string_t>(*byte_array_data);
|
|
619
621
|
for (idx_t i = 0; i < prefix_count; i++) {
|
|
620
622
|
auto str_len = prefix_data[i] + suffix_data[i];
|
|
@@ -646,6 +648,11 @@ void StringColumnReader::DeltaByteArray(uint8_t *defines, idx_t num_values, parq
|
|
|
646
648
|
continue;
|
|
647
649
|
}
|
|
648
650
|
if (filter[row_idx + result_offset]) {
|
|
651
|
+
if (delta_offset >= byte_array_count) {
|
|
652
|
+
throw IOException("DELTA_BYTE_ARRAY - length mismatch between values and byte array lengths (attempted "
|
|
653
|
+
"read of %d from %d entries) - corrupt file?",
|
|
654
|
+
delta_offset + 1, byte_array_count);
|
|
655
|
+
}
|
|
649
656
|
result_ptr[row_idx + result_offset] = string_data[delta_offset++];
|
|
650
657
|
} else {
|
|
651
658
|
delta_offset++;
|
|
@@ -221,10 +221,7 @@ public:
|
|
|
221
221
|
}
|
|
222
222
|
|
|
223
223
|
FileSystem &fs = FileSystem::GetFileSystem(context);
|
|
224
|
-
auto files = fs.
|
|
225
|
-
if (files.empty()) {
|
|
226
|
-
throw FileSystem::MissingFileException(info.file_path, context);
|
|
227
|
-
}
|
|
224
|
+
auto files = fs.GlobFiles(info.file_path, context);
|
|
228
225
|
|
|
229
226
|
// The most likely path (Parquet read without union by name option)
|
|
230
227
|
if (!parquet_options.union_by_name) {
|
|
@@ -362,12 +359,7 @@ public:
|
|
|
362
359
|
}
|
|
363
360
|
|
|
364
361
|
static vector<string> ParquetGlob(FileSystem &fs, const string &glob, ClientContext &context) {
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
if (files.empty()) {
|
|
368
|
-
throw FileSystem::MissingFileException(glob, context);
|
|
369
|
-
}
|
|
370
|
-
return files;
|
|
362
|
+
return fs.GlobFiles(glob, context);
|
|
371
363
|
}
|
|
372
364
|
|
|
373
365
|
static unique_ptr<FunctionData> ParquetScanBind(ClientContext &context, TableFunctionBindInput &input,
|
|
@@ -27,7 +27,7 @@
|
|
|
27
27
|
#include "duckdb/planner/parsed_data/bound_create_table_info.hpp"
|
|
28
28
|
#include "duckdb/planner/binder.hpp"
|
|
29
29
|
#include "duckdb/catalog/default/default_types.hpp"
|
|
30
|
-
#include "duckdb/main/
|
|
30
|
+
#include "duckdb/main/extension_entries.hpp"
|
|
31
31
|
#include "duckdb/main/connection.hpp"
|
|
32
32
|
#include "duckdb/main/attached_database.hpp"
|
|
33
33
|
#include "duckdb/main/database_manager.hpp"
|
|
@@ -332,17 +332,26 @@ SimilarCatalogEntry Catalog::SimilarEntryInSchemas(ClientContext &context, const
|
|
|
332
332
|
return result;
|
|
333
333
|
}
|
|
334
334
|
|
|
335
|
-
string
|
|
336
|
-
auto
|
|
337
|
-
auto it = std::lower_bound(
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
if (it != EXTENSION_FUNCTIONS + size && it->function == function_name) {
|
|
335
|
+
string FindExtensionGeneric(const string &name, const ExtensionEntry entries[], idx_t size) {
|
|
336
|
+
auto lcase = StringUtil::Lower(name);
|
|
337
|
+
auto it = std::lower_bound(entries, entries + size, lcase,
|
|
338
|
+
[](const ExtensionEntry &element, const string &value) { return element.name < value; });
|
|
339
|
+
if (it != entries + size && it->name == lcase) {
|
|
341
340
|
return it->extension;
|
|
342
341
|
}
|
|
343
342
|
return "";
|
|
344
343
|
}
|
|
345
344
|
|
|
345
|
+
string FindExtensionForFunction(const string &name) {
|
|
346
|
+
idx_t size = sizeof(EXTENSION_FUNCTIONS) / sizeof(ExtensionEntry);
|
|
347
|
+
return FindExtensionGeneric(name, EXTENSION_FUNCTIONS, size);
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
string FindExtensionForSetting(const string &name) {
|
|
351
|
+
idx_t size = sizeof(EXTENSION_SETTINGS) / sizeof(ExtensionEntry);
|
|
352
|
+
return FindExtensionGeneric(name, EXTENSION_SETTINGS, size);
|
|
353
|
+
}
|
|
354
|
+
|
|
346
355
|
vector<CatalogSearchEntry> GetCatalogEntries(ClientContext &context, const string &catalog, const string &schema) {
|
|
347
356
|
vector<CatalogSearchEntry> entries;
|
|
348
357
|
auto &search_path = *context.client_data->catalog_search_path;
|
|
@@ -407,6 +416,26 @@ void FindMinimalQualification(ClientContext &context, const string &catalog_name
|
|
|
407
416
|
qualify_schema = true;
|
|
408
417
|
}
|
|
409
418
|
|
|
419
|
+
CatalogException Catalog::UnrecognizedConfigurationError(ClientContext &context, const string &name) {
|
|
420
|
+
// check if the setting exists in any extensions
|
|
421
|
+
auto extension_name = FindExtensionForSetting(name);
|
|
422
|
+
if (!extension_name.empty()) {
|
|
423
|
+
return CatalogException(
|
|
424
|
+
"Setting with name \"%s\" is not in the catalog, but it exists in the %s extension.\n\nTo "
|
|
425
|
+
"install and load the extension, run:\nINSTALL %s;\nLOAD %s;",
|
|
426
|
+
name, extension_name, extension_name, extension_name);
|
|
427
|
+
}
|
|
428
|
+
// the setting is not in an extension
|
|
429
|
+
// get a list of all options
|
|
430
|
+
vector<string> potential_names = DBConfig::GetOptionNames();
|
|
431
|
+
for (auto &entry : DBConfig::GetConfig(context).extension_parameters) {
|
|
432
|
+
potential_names.push_back(entry.first);
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
throw CatalogException("unrecognized configuration parameter \"%s\"\n%s", name,
|
|
436
|
+
StringUtil::CandidatesErrorMessage(potential_names, name, "Did you mean"));
|
|
437
|
+
}
|
|
438
|
+
|
|
410
439
|
CatalogException Catalog::CreateMissingEntryException(ClientContext &context, const string &entry_name,
|
|
411
440
|
CatalogType type,
|
|
412
441
|
const unordered_set<SchemaCatalogEntry *> &schemas,
|
|
@@ -423,13 +452,18 @@ CatalogException Catalog::CreateMissingEntryException(ClientContext &context, co
|
|
|
423
452
|
unseen_schemas.insert(current_schema);
|
|
424
453
|
}
|
|
425
454
|
}
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
455
|
+
// check if the entry exists in any extension
|
|
456
|
+
if (type == CatalogType::TABLE_FUNCTION_ENTRY || type == CatalogType::SCALAR_FUNCTION_ENTRY ||
|
|
457
|
+
type == CatalogType::AGGREGATE_FUNCTION_ENTRY) {
|
|
458
|
+
auto extension_name = FindExtensionForFunction(entry_name);
|
|
459
|
+
if (!extension_name.empty()) {
|
|
460
|
+
return CatalogException(
|
|
461
|
+
"Function with name \"%s\" is not in the catalog, but it exists in the %s extension.\n\nTo "
|
|
462
|
+
"install and load the extension, run:\nINSTALL %s;\nLOAD %s;",
|
|
463
|
+
entry_name, extension_name, extension_name, extension_name);
|
|
464
|
+
}
|
|
432
465
|
}
|
|
466
|
+
auto unseen_entry = SimilarEntryInSchemas(context, entry_name, type, unseen_schemas);
|
|
433
467
|
string did_you_mean;
|
|
434
468
|
if (unseen_entry.Found() && unseen_entry.distance < entry.distance) {
|
|
435
469
|
// the closest matching entry requires qualification as it is not in the default search path
|
|
@@ -48,7 +48,7 @@ static DefaultView internal_views[] = {
|
|
|
48
48
|
{"pg_catalog", "pg_views", "SELECT schema_name schemaname, view_name viewname, 'duckdb' viewowner, sql definition FROM duckdb_views()"},
|
|
49
49
|
{"information_schema", "columns", "SELECT database_name table_catalog, schema_name table_schema, table_name, column_name, column_index ordinal_position, column_default, CASE WHEN is_nullable THEN 'YES' ELSE 'NO' END is_nullable, data_type, character_maximum_length, NULL character_octet_length, numeric_precision, numeric_precision_radix, numeric_scale, NULL datetime_precision, NULL interval_type, NULL interval_precision, NULL character_set_catalog, NULL character_set_schema, NULL character_set_name, NULL collation_catalog, NULL collation_schema, NULL collation_name, NULL domain_catalog, NULL domain_schema, NULL domain_name, NULL udt_catalog, NULL udt_schema, NULL udt_name, NULL scope_catalog, NULL scope_schema, NULL scope_name, NULL maximum_cardinality, NULL dtd_identifier, NULL is_self_referencing, NULL is_identity, NULL identity_generation, NULL identity_start, NULL identity_increment, NULL identity_maximum, NULL identity_minimum, NULL identity_cycle, NULL is_generated, NULL generation_expression, NULL is_updatable FROM duckdb_columns;"},
|
|
50
50
|
{"information_schema", "schemata", "SELECT database_name catalog_name, schema_name, 'duckdb' schema_owner, NULL default_character_set_catalog, NULL default_character_set_schema, NULL default_character_set_name, sql sql_path FROM duckdb_schemas()"},
|
|
51
|
-
{"information_schema", "tables", "SELECT database_name table_catalog, schema_name table_schema, table_name, CASE WHEN temporary THEN 'LOCAL TEMPORARY' ELSE 'BASE TABLE' END table_type, NULL self_referencing_column_name, NULL reference_generation, NULL user_defined_type_catalog, NULL user_defined_type_schema, NULL user_defined_type_name, 'YES' is_insertable_into, 'NO' is_typed, CASE WHEN temporary THEN 'PRESERVE' ELSE NULL END commit_action FROM duckdb_tables() UNION ALL SELECT
|
|
51
|
+
{"information_schema", "tables", "SELECT database_name table_catalog, schema_name table_schema, table_name, CASE WHEN temporary THEN 'LOCAL TEMPORARY' ELSE 'BASE TABLE' END table_type, NULL self_referencing_column_name, NULL reference_generation, NULL user_defined_type_catalog, NULL user_defined_type_schema, NULL user_defined_type_name, 'YES' is_insertable_into, 'NO' is_typed, CASE WHEN temporary THEN 'PRESERVE' ELSE NULL END commit_action FROM duckdb_tables() UNION ALL SELECT database_name table_catalog, schema_name table_schema, view_name table_name, 'VIEW' table_type, NULL self_referencing_column_name, NULL reference_generation, NULL user_defined_type_catalog, NULL user_defined_type_schema, NULL user_defined_type_name, 'NO' is_insertable_into, 'NO' is_typed, NULL commit_action FROM duckdb_views;"},
|
|
52
52
|
{nullptr, nullptr, nullptr}};
|
|
53
53
|
|
|
54
54
|
static unique_ptr<CreateViewInfo> GetDefaultView(ClientContext &context, const string &input_schema, const string &input_name) {
|
|
@@ -10,6 +10,7 @@
|
|
|
10
10
|
#include "duckdb/main/client_context.hpp"
|
|
11
11
|
#include "duckdb/main/client_data.hpp"
|
|
12
12
|
#include "duckdb/main/database.hpp"
|
|
13
|
+
#include "duckdb/main/extension_helper.hpp"
|
|
13
14
|
|
|
14
15
|
#include <cstdint>
|
|
15
16
|
#include <cstdio>
|
|
@@ -335,18 +336,31 @@ bool FileSystem::CanHandleFile(const string &fpath) {
|
|
|
335
336
|
throw NotImplementedException("%s: CanHandleFile is not implemented!", GetName());
|
|
336
337
|
}
|
|
337
338
|
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
339
|
+
vector<string> FileSystem::GlobFiles(const string &pattern, ClientContext &context) {
|
|
340
|
+
auto result = Glob(pattern, context);
|
|
341
|
+
if (result.empty()) {
|
|
342
|
+
string required_extension;
|
|
343
|
+
const string prefixes[] = {"http://", "https://", "s3://"};
|
|
344
|
+
for (auto &prefix : prefixes) {
|
|
345
|
+
if (StringUtil::StartsWith(pattern, prefix)) {
|
|
346
|
+
required_extension = "httpfs";
|
|
347
|
+
break;
|
|
346
348
|
}
|
|
347
349
|
}
|
|
350
|
+
if (!required_extension.empty() && !context.db->ExtensionIsLoaded(required_extension)) {
|
|
351
|
+
// an extension is required to read this file but it is not loaded - try to load it
|
|
352
|
+
ExtensionHelper::LoadExternalExtension(context, required_extension);
|
|
353
|
+
// success! glob again
|
|
354
|
+
// check the extension is loaded just in case to prevent an infinite loop here
|
|
355
|
+
if (!context.db->ExtensionIsLoaded(required_extension)) {
|
|
356
|
+
throw InternalException("Extension load \"%s\" did not throw but somehow the extension was not loaded",
|
|
357
|
+
required_extension);
|
|
358
|
+
}
|
|
359
|
+
return GlobFiles(pattern, context);
|
|
360
|
+
}
|
|
361
|
+
throw IOException("No files found that match the pattern \"%s\"", pattern);
|
|
348
362
|
}
|
|
349
|
-
return
|
|
363
|
+
return result;
|
|
350
364
|
}
|
|
351
365
|
|
|
352
366
|
void FileSystem::Seek(FileHandle &handle, idx_t location) {
|
|
@@ -833,8 +833,8 @@ static bool HasGlob(const string &str) {
|
|
|
833
833
|
return false;
|
|
834
834
|
}
|
|
835
835
|
|
|
836
|
-
static void
|
|
837
|
-
|
|
836
|
+
static void GlobFilesInternal(FileSystem &fs, const string &path, const string &glob, bool match_directory,
|
|
837
|
+
vector<string> &result, bool join_path) {
|
|
838
838
|
fs.ListFiles(path, [&](const string &fname, bool is_directory) {
|
|
839
839
|
if (is_directory != match_directory) {
|
|
840
840
|
return;
|
|
@@ -951,12 +951,12 @@ vector<string> LocalFileSystem::Glob(const string &path, FileOpener *opener) {
|
|
|
951
951
|
} else {
|
|
952
952
|
if (previous_directories.empty()) {
|
|
953
953
|
// no previous directories: list in the current path
|
|
954
|
-
|
|
954
|
+
GlobFilesInternal(*this, ".", splits[i], !is_last_chunk, result, false);
|
|
955
955
|
} else {
|
|
956
956
|
// previous directories
|
|
957
957
|
// we iterate over each of the previous directories, and apply the glob of the current directory
|
|
958
958
|
for (auto &prev_directory : previous_directories) {
|
|
959
|
-
|
|
959
|
+
GlobFilesInternal(*this, prev_directory, splits[i], !is_last_chunk, result, true);
|
|
960
960
|
}
|
|
961
961
|
}
|
|
962
962
|
}
|
|
@@ -137,6 +137,7 @@ void PartitionedColumnData::FlushAppendState(PartitionedColumnDataAppendState &s
|
|
|
137
137
|
auto &partition_buffer = *state.partition_buffers[i];
|
|
138
138
|
if (partition_buffer.size() > 0) {
|
|
139
139
|
partitions[i]->Append(partition_buffer);
|
|
140
|
+
partition_buffer.Reset();
|
|
140
141
|
}
|
|
141
142
|
}
|
|
142
143
|
}
|
|
@@ -169,6 +169,10 @@ private:
|
|
|
169
169
|
};
|
|
170
170
|
|
|
171
171
|
void WindowGlobalSinkState::ResizeGroupingData(idx_t cardinality) {
|
|
172
|
+
// Have we started to combine? Then just live with it.
|
|
173
|
+
if (grouping_data && !grouping_data->GetPartitions().empty()) {
|
|
174
|
+
return;
|
|
175
|
+
}
|
|
172
176
|
// Is the average partition size too large?
|
|
173
177
|
const idx_t partition_size = STANDARD_ROW_GROUPS_SIZE;
|
|
174
178
|
const auto bits = grouping_data ? grouping_data->GetRadixBits() : 0;
|
|
@@ -180,31 +184,7 @@ void WindowGlobalSinkState::ResizeGroupingData(idx_t cardinality) {
|
|
|
180
184
|
// Repartition the grouping data
|
|
181
185
|
if (new_bits != bits) {
|
|
182
186
|
const auto hash_col_idx = payload_types.size();
|
|
183
|
-
|
|
184
|
-
make_unique<RadixPartitionedColumnData>(context, grouping_types, new_bits, hash_col_idx);
|
|
185
|
-
|
|
186
|
-
// We have to append to a shared copy for some reason
|
|
187
|
-
if (grouping_data) {
|
|
188
|
-
auto new_shared = new_grouping_data->CreateShared();
|
|
189
|
-
PartitionedColumnDataAppendState shared_append;
|
|
190
|
-
new_shared->InitializeAppendState(shared_append);
|
|
191
|
-
|
|
192
|
-
auto &partitions = grouping_data->GetPartitions();
|
|
193
|
-
for (auto &partition : partitions) {
|
|
194
|
-
ColumnDataScanState scanner;
|
|
195
|
-
partition->InitializeScan(scanner);
|
|
196
|
-
|
|
197
|
-
DataChunk scan_chunk;
|
|
198
|
-
partition->InitializeScanChunk(scan_chunk);
|
|
199
|
-
for (scan_chunk.Reset(); partition->Scan(scanner, scan_chunk); scan_chunk.Reset()) {
|
|
200
|
-
new_shared->Append(shared_append, scan_chunk);
|
|
201
|
-
}
|
|
202
|
-
}
|
|
203
|
-
new_shared->FlushAppendState(shared_append);
|
|
204
|
-
new_grouping_data->Combine(*new_shared);
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
grouping_data = std::move(new_grouping_data);
|
|
187
|
+
grouping_data = make_unique<RadixPartitionedColumnData>(context, grouping_types, new_bits, hash_col_idx);
|
|
208
188
|
}
|
|
209
189
|
}
|
|
210
190
|
|
|
@@ -432,8 +412,6 @@ void WindowLocalSinkState::Sink(DataChunk &input_chunk, WindowGlobalSinkState &g
|
|
|
432
412
|
}
|
|
433
413
|
|
|
434
414
|
// OVER(...)
|
|
435
|
-
gstate.UpdateLocalPartition(local_partition, local_append);
|
|
436
|
-
|
|
437
415
|
payload_chunk.Reset();
|
|
438
416
|
auto &hash_vector = payload_chunk.data.back();
|
|
439
417
|
Hash(input_chunk, hash_vector);
|
|
@@ -442,6 +420,7 @@ void WindowLocalSinkState::Sink(DataChunk &input_chunk, WindowGlobalSinkState &g
|
|
|
442
420
|
}
|
|
443
421
|
payload_chunk.SetCardinality(input_chunk);
|
|
444
422
|
|
|
423
|
+
gstate.UpdateLocalPartition(local_partition, local_append);
|
|
445
424
|
local_partition->Append(*local_append, payload_chunk);
|
|
446
425
|
}
|
|
447
426
|
|
|
@@ -27,15 +27,7 @@ void PhysicalReset::GetData(ExecutionContext &context, DataChunk &chunk, GlobalS
|
|
|
27
27
|
auto &config = DBConfig::GetConfig(context.client);
|
|
28
28
|
auto entry = config.extension_parameters.find(name);
|
|
29
29
|
if (entry == config.extension_parameters.end()) {
|
|
30
|
-
|
|
31
|
-
// get a list of all options
|
|
32
|
-
vector<string> potential_names = DBConfig::GetOptionNames();
|
|
33
|
-
for (auto &entry : config.extension_parameters) {
|
|
34
|
-
potential_names.push_back(entry.first);
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
throw CatalogException("unrecognized configuration parameter \"%s\"\n%s", name,
|
|
38
|
-
StringUtil::CandidatesErrorMessage(potential_names, name, "Did you mean"));
|
|
30
|
+
throw Catalog::UnrecognizedConfigurationError(context.client, name);
|
|
39
31
|
}
|
|
40
32
|
ResetExtensionVariable(context, config, entry->second);
|
|
41
33
|
return;
|
|
@@ -30,15 +30,7 @@ void PhysicalSet::GetData(ExecutionContext &context, DataChunk &chunk, GlobalSou
|
|
|
30
30
|
auto &config = DBConfig::GetConfig(context.client);
|
|
31
31
|
auto entry = config.extension_parameters.find(name);
|
|
32
32
|
if (entry == config.extension_parameters.end()) {
|
|
33
|
-
|
|
34
|
-
// get a list of all options
|
|
35
|
-
vector<string> potential_names = DBConfig::GetOptionNames();
|
|
36
|
-
for (auto &entry : config.extension_parameters) {
|
|
37
|
-
potential_names.push_back(entry.first);
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
throw CatalogException("unrecognized configuration parameter \"%s\"\n%s", name,
|
|
41
|
-
StringUtil::CandidatesErrorMessage(potential_names, name, "Did you mean"));
|
|
33
|
+
throw Catalog::UnrecognizedConfigurationError(context.client, name);
|
|
42
34
|
}
|
|
43
35
|
SetExtensionVariable(context.client, entry->second, name, scope, value);
|
|
44
36
|
return;
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
#include "duckdb/main/client_context.hpp"
|
|
5
5
|
#include "duckdb/planner/expression/bound_function_expression.hpp"
|
|
6
6
|
#include "duckdb/execution/expression_executor.hpp"
|
|
7
|
-
|
|
7
|
+
#include "duckdb/catalog/catalog.hpp"
|
|
8
8
|
namespace duckdb {
|
|
9
9
|
|
|
10
10
|
struct CurrentSettingBindData : public FunctionData {
|
|
@@ -51,7 +51,7 @@ unique_ptr<FunctionData> CurrentSettingBind(ClientContext &context, ScalarFuncti
|
|
|
51
51
|
auto key = StringUtil::Lower(key_str);
|
|
52
52
|
Value val;
|
|
53
53
|
if (!context.TryGetCurrentSetting(key, val)) {
|
|
54
|
-
throw
|
|
54
|
+
throw Catalog::UnrecognizedConfigurationError(context, key);
|
|
55
55
|
}
|
|
56
56
|
|
|
57
57
|
bound_function.return_type = val.type();
|
|
@@ -66,46 +66,94 @@ void MapConversionVerify(Vector &vector, idx_t count) {
|
|
|
66
66
|
}
|
|
67
67
|
}
|
|
68
68
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
69
|
+
// Example:
|
|
70
|
+
// source: [1,2,3], expansion_factor: 4
|
|
71
|
+
// target (result): [1,2,3,1,2,3,1,2,3,1,2,3]
|
|
72
|
+
static void CreateExpandedVector(const Vector &source, Vector &target, idx_t expansion_factor) {
|
|
73
|
+
idx_t count = ListVector::GetListSize(source);
|
|
74
|
+
auto &entry = ListVector::GetEntry(source);
|
|
75
|
+
|
|
76
|
+
idx_t target_idx = 0;
|
|
77
|
+
for (idx_t copy = 0; copy < expansion_factor; copy++) {
|
|
78
|
+
for (idx_t key_idx = 0; key_idx < count; key_idx++) {
|
|
79
|
+
target.SetValue(target_idx, entry.GetValue(key_idx));
|
|
80
|
+
target_idx++;
|
|
77
81
|
}
|
|
78
82
|
}
|
|
83
|
+
D_ASSERT(target_idx == count * expansion_factor);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
static void AlignVectorToReference(const Vector &original, const Vector &reference, idx_t tuple_count, Vector &result) {
|
|
87
|
+
auto original_length = ListVector::GetListSize(original);
|
|
88
|
+
auto new_length = ListVector::GetListSize(reference);
|
|
89
|
+
|
|
90
|
+
Vector expanded_const(ListType::GetChildType(original.GetType()), new_length);
|
|
91
|
+
|
|
92
|
+
auto expansion_factor = new_length / original_length;
|
|
93
|
+
if (expansion_factor != tuple_count) {
|
|
94
|
+
throw InvalidInputException("Error in MAP creation: key list and value list do not align. i.e. different "
|
|
95
|
+
"size or incompatible structure");
|
|
96
|
+
}
|
|
97
|
+
CreateExpandedVector(original, expanded_const, expansion_factor);
|
|
98
|
+
result.Reference(expanded_const);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
static void MapFunction(DataChunk &args, ExpressionState &state, Vector &result) {
|
|
102
|
+
D_ASSERT(result.GetType().id() == LogicalTypeId::MAP);
|
|
79
103
|
|
|
80
104
|
auto &key_vector = MapVector::GetKeys(result);
|
|
81
105
|
auto &value_vector = MapVector::GetValues(result);
|
|
82
|
-
auto
|
|
106
|
+
auto result_data = ListVector::GetData(result);
|
|
83
107
|
|
|
108
|
+
result.SetVectorType(VectorType::CONSTANT_VECTOR);
|
|
84
109
|
if (args.data.empty()) {
|
|
85
110
|
ListVector::SetListSize(result, 0);
|
|
86
|
-
|
|
87
|
-
|
|
111
|
+
result_data->offset = 0;
|
|
112
|
+
result_data->length = 0;
|
|
88
113
|
result.Verify(args.size());
|
|
89
114
|
return;
|
|
90
115
|
}
|
|
91
116
|
|
|
92
|
-
|
|
117
|
+
bool keys_are_const = args.data[0].GetVectorType() == VectorType::CONSTANT_VECTOR;
|
|
118
|
+
bool values_are_const = args.data[1].GetVectorType() == VectorType::CONSTANT_VECTOR;
|
|
119
|
+
if (!keys_are_const || !values_are_const) {
|
|
120
|
+
result.SetVectorType(VectorType::FLAT_VECTOR);
|
|
121
|
+
}
|
|
122
|
+
|
|
93
123
|
auto key_count = ListVector::GetListSize(args.data[0]);
|
|
94
124
|
auto value_count = ListVector::GetListSize(args.data[1]);
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
125
|
+
auto key_data = ListVector::GetData(args.data[0]);
|
|
126
|
+
auto value_data = ListVector::GetData(args.data[1]);
|
|
127
|
+
auto src_data = key_data;
|
|
128
|
+
|
|
129
|
+
if (keys_are_const && !values_are_const) {
|
|
130
|
+
AlignVectorToReference(args.data[0], args.data[1], args.size(), key_vector);
|
|
131
|
+
src_data = value_data;
|
|
132
|
+
} else if (values_are_const && !keys_are_const) {
|
|
133
|
+
AlignVectorToReference(args.data[1], args.data[0], args.size(), value_vector);
|
|
134
|
+
} else {
|
|
135
|
+
if (key_count != value_count || memcmp(key_data, value_data, args.size() * sizeof(list_entry_t)) != 0) {
|
|
136
|
+
throw InvalidInputException("Error in MAP creation: key list and value list do not align. i.e. different "
|
|
137
|
+
"size or incompatible structure");
|
|
138
|
+
}
|
|
99
139
|
}
|
|
100
|
-
ListVector::Reserve(result, key_count);
|
|
101
|
-
ListVector::SetListSize(result, key_count);
|
|
102
140
|
|
|
141
|
+
ListVector::SetListSize(result, MaxValue(key_count, value_count));
|
|
142
|
+
|
|
143
|
+
result_data = ListVector::GetData(result);
|
|
103
144
|
for (idx_t i = 0; i < args.size(); i++) {
|
|
104
|
-
|
|
145
|
+
result_data[i] = src_data[i];
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// check whether one of the vectors has already been referenced to an expanded vector in the case of const/non-const
|
|
149
|
+
// combination. If not, then referencing is still necessary
|
|
150
|
+
if (!(keys_are_const && !values_are_const)) {
|
|
151
|
+
key_vector.Reference(ListVector::GetEntry(args.data[0]));
|
|
152
|
+
}
|
|
153
|
+
if (!(values_are_const && !keys_are_const)) {
|
|
154
|
+
value_vector.Reference(ListVector::GetEntry(args.data[1]));
|
|
105
155
|
}
|
|
106
156
|
|
|
107
|
-
key_vector.Reference(ListVector::GetEntry(args.data[0]));
|
|
108
|
-
value_vector.Reference(ListVector::GetEntry(args.data[1]));
|
|
109
157
|
MapConversionVerify(result, args.size());
|
|
110
158
|
result.Verify(args.size());
|
|
111
159
|
}
|
|
@@ -28,10 +28,7 @@ unique_ptr<CSVFileHandle> ReadCSV::OpenCSV(const string &file_path, FileCompress
|
|
|
28
28
|
void ReadCSVData::InitializeFiles(ClientContext &context, const vector<string> &patterns) {
|
|
29
29
|
auto &fs = FileSystem::GetFileSystem(context);
|
|
30
30
|
for (auto &file_pattern : patterns) {
|
|
31
|
-
auto found_files = fs.
|
|
32
|
-
if (found_files.empty()) {
|
|
33
|
-
throw FileSystem::MissingFileException(file_pattern, context);
|
|
34
|
-
}
|
|
31
|
+
auto found_files = fs.GlobFiles(file_pattern, context);
|
|
35
32
|
files.insert(files.end(), found_files.begin(), found_files.end());
|
|
36
33
|
}
|
|
37
34
|
}
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
#ifndef DUCKDB_VERSION
|
|
2
|
-
#define DUCKDB_VERSION "0.7.1-
|
|
2
|
+
#define DUCKDB_VERSION "0.7.1-dev341"
|
|
3
3
|
#endif
|
|
4
4
|
#ifndef DUCKDB_SOURCE_ID
|
|
5
|
-
#define DUCKDB_SOURCE_ID "
|
|
5
|
+
#define DUCKDB_SOURCE_ID "d58ab188ff"
|
|
6
6
|
#endif
|
|
7
7
|
#include "duckdb/function/table/system_functions.hpp"
|
|
8
8
|
#include "duckdb/main/database.hpp"
|
|
@@ -188,6 +188,7 @@ public:
|
|
|
188
188
|
//! Runs a glob on the file system, returning a list of matching files
|
|
189
189
|
DUCKDB_API virtual vector<string> Glob(const string &path, FileOpener *opener = nullptr);
|
|
190
190
|
DUCKDB_API virtual vector<string> Glob(const string &path, ClientContext &context);
|
|
191
|
+
DUCKDB_API vector<string> GlobFiles(const string &path, ClientContext &context);
|
|
191
192
|
|
|
192
193
|
//! registers a sub-file system to handle certain file name prefixes, e.g. http:// etc.
|
|
193
194
|
DUCKDB_API virtual void RegisterSubSystem(unique_ptr<FileSystem> sub_fs);
|
|
@@ -201,7 +202,6 @@ public:
|
|
|
201
202
|
|
|
202
203
|
//! Whether or not a sub-system can handle a specific file path
|
|
203
204
|
DUCKDB_API virtual bool CanHandleFile(const string &fpath);
|
|
204
|
-
DUCKDB_API static IOException MissingFileException(const string &file_path, ClientContext &context);
|
|
205
205
|
|
|
206
206
|
//! Set the file pointer of a file handle to a specified location. Reads and writes will happen from this location
|
|
207
207
|
DUCKDB_API virtual void Seek(FileHandle &handle, idx_t location);
|
package/src/duckdb/src/include/duckdb/main/{extension_functions.hpp → extension_entries.hpp}
RENAMED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
//===----------------------------------------------------------------------===//
|
|
2
2
|
// DuckDB
|
|
3
3
|
//
|
|
4
|
-
// duckdb/main/
|
|
4
|
+
// duckdb/main/extension_entries.hpp
|
|
5
5
|
//
|
|
6
6
|
//
|
|
7
7
|
//===----------------------------------------------------------------------===//
|
|
@@ -12,12 +12,12 @@
|
|
|
12
12
|
|
|
13
13
|
namespace duckdb {
|
|
14
14
|
|
|
15
|
-
struct
|
|
16
|
-
char
|
|
15
|
+
struct ExtensionEntry {
|
|
16
|
+
char name[48];
|
|
17
17
|
char extension[48];
|
|
18
18
|
};
|
|
19
19
|
|
|
20
|
-
static constexpr
|
|
20
|
+
static constexpr ExtensionEntry EXTENSION_FUNCTIONS[] = {
|
|
21
21
|
{"->>", "json"},
|
|
22
22
|
{"array_to_json", "json"},
|
|
23
23
|
{"create_fts_index", "fts"},
|
|
@@ -30,9 +30,9 @@ static constexpr ExtensionFunction EXTENSION_FUNCTIONS[] = {
|
|
|
30
30
|
{"from_json", "json"},
|
|
31
31
|
{"from_json_strict", "json"},
|
|
32
32
|
{"from_substrait", "substrait"},
|
|
33
|
+
{"from_substrait_json", "substrait"},
|
|
33
34
|
{"get_substrait", "substrait"},
|
|
34
35
|
{"get_substrait_json", "substrait"},
|
|
35
|
-
{"from_substrait_json", "substrait"},
|
|
36
36
|
{"icu_calendar_names", "icu"},
|
|
37
37
|
{"icu_sort_key", "icu"},
|
|
38
38
|
{"json", "json"},
|
|
@@ -88,4 +88,25 @@ static constexpr ExtensionFunction EXTENSION_FUNCTIONS[] = {
|
|
|
88
88
|
{"visualize_json_profiling_output", "visualizer"},
|
|
89
89
|
{"visualize_last_profiling_output", "visualizer"},
|
|
90
90
|
};
|
|
91
|
+
|
|
92
|
+
static constexpr ExtensionEntry EXTENSION_SETTINGS[] = {
|
|
93
|
+
{"binary_as_string", "parquet"},
|
|
94
|
+
{"calendar", "icu"},
|
|
95
|
+
{"http_retries", "httpfs"},
|
|
96
|
+
{"http_retry_backoff", "httpfs"},
|
|
97
|
+
{"http_retry_wait_ms", "httpfs"},
|
|
98
|
+
{"http_timeout", "httpfs"},
|
|
99
|
+
{"s3_access_key_id", "httpfs"},
|
|
100
|
+
{"s3_endpoint", "httpfs"},
|
|
101
|
+
{"s3_region", "httpfs"},
|
|
102
|
+
{"s3_secret_access_key", "httpfs"},
|
|
103
|
+
{"s3_session_token", "httpfs"},
|
|
104
|
+
{"s3_uploader_max_filesize", "httpfs"},
|
|
105
|
+
{"s3_uploader_max_parts_per_file", "httpfs"},
|
|
106
|
+
{"s3_uploader_thread_limit", "httpfs"},
|
|
107
|
+
{"s3_url_style", "httpfs"},
|
|
108
|
+
{"s3_use_ssl", "httpfs"},
|
|
109
|
+
{"sqlite_all_varchar", "sqlite_scanner"},
|
|
110
|
+
{"timezone", "icu"},
|
|
111
|
+
};
|
|
91
112
|
} // namespace duckdb
|
|
@@ -62,6 +62,9 @@ public:
|
|
|
62
62
|
//! Apply any known extension aliases
|
|
63
63
|
static string ApplyExtensionAlias(string extension_name);
|
|
64
64
|
|
|
65
|
+
static string GetExtensionName(const string &extension);
|
|
66
|
+
static bool IsFullPath(const string &extension);
|
|
67
|
+
|
|
65
68
|
private:
|
|
66
69
|
static const vector<string> PathComponents();
|
|
67
70
|
static ExtensionInitResult InitialLoad(DBConfig &context, FileOpener *opener, const string &extension);
|
|
@@ -356,7 +356,8 @@ idx_t DuckDB::NumberOfThreads() {
|
|
|
356
356
|
}
|
|
357
357
|
|
|
358
358
|
bool DatabaseInstance::ExtensionIsLoaded(const std::string &name) {
|
|
359
|
-
|
|
359
|
+
auto extension_name = ExtensionHelper::GetExtensionName(name);
|
|
360
|
+
return loaded_extensions.find(extension_name) != loaded_extensions.end();
|
|
360
361
|
}
|
|
361
362
|
|
|
362
363
|
bool DuckDB::ExtensionIsLoaded(const std::string &name) {
|
|
@@ -364,7 +365,8 @@ bool DuckDB::ExtensionIsLoaded(const std::string &name) {
|
|
|
364
365
|
}
|
|
365
366
|
|
|
366
367
|
void DatabaseInstance::SetExtensionLoaded(const std::string &name) {
|
|
367
|
-
|
|
368
|
+
auto extension_name = ExtensionHelper::GetExtensionName(name);
|
|
369
|
+
loaded_extensions.insert(extension_name);
|
|
368
370
|
}
|
|
369
371
|
|
|
370
372
|
bool DatabaseInstance::TryGetCurrentSetting(const std::string &key, Value &result) {
|
|
@@ -31,7 +31,7 @@ ExtensionInitResult ExtensionHelper::InitialLoad(DBConfig &config, FileOpener *o
|
|
|
31
31
|
auto filename = fs.ConvertSeparators(extension);
|
|
32
32
|
|
|
33
33
|
// shorthand case
|
|
34
|
-
if (!
|
|
34
|
+
if (!ExtensionHelper::IsFullPath(extension)) {
|
|
35
35
|
string local_path = fs.GetHomeDirectory(opener);
|
|
36
36
|
auto path_components = PathComponents();
|
|
37
37
|
for (auto &path_ele : path_components) {
|
|
@@ -121,9 +121,28 @@ ExtensionInitResult ExtensionHelper::InitialLoad(DBConfig &config, FileOpener *o
|
|
|
121
121
|
return res;
|
|
122
122
|
}
|
|
123
123
|
|
|
124
|
+
bool ExtensionHelper::IsFullPath(const string &extension) {
|
|
125
|
+
return StringUtil::Contains(extension, ".") || StringUtil::Contains(extension, "/") ||
|
|
126
|
+
StringUtil::Contains(extension, "\\");
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
string ExtensionHelper::GetExtensionName(const string &extension) {
|
|
130
|
+
if (!IsFullPath(extension)) {
|
|
131
|
+
return extension;
|
|
132
|
+
}
|
|
133
|
+
auto splits = StringUtil::Split(StringUtil::Replace(extension, "\\", "/"), '/');
|
|
134
|
+
if (splits.empty()) {
|
|
135
|
+
return extension;
|
|
136
|
+
}
|
|
137
|
+
splits = StringUtil::Split(splits.back(), '.');
|
|
138
|
+
if (splits.empty()) {
|
|
139
|
+
return extension;
|
|
140
|
+
}
|
|
141
|
+
return StringUtil::Lower(splits.front());
|
|
142
|
+
}
|
|
143
|
+
|
|
124
144
|
void ExtensionHelper::LoadExternalExtension(DatabaseInstance &db, FileOpener *opener, const string &extension) {
|
|
125
|
-
|
|
126
|
-
if (loaded_extensions.find(extension) != loaded_extensions.end()) {
|
|
145
|
+
if (db.ExtensionIsLoaded(extension)) {
|
|
127
146
|
return;
|
|
128
147
|
}
|
|
129
148
|
|
|
@@ -54,7 +54,7 @@ BoundStatement Binder::Bind(DropStatement &stmt) {
|
|
|
54
54
|
auto &config = DBConfig::GetConfig(context);
|
|
55
55
|
// for now assume only one storage extension provides the custom drop_database impl
|
|
56
56
|
for (auto &extension_entry : config.storage_extensions) {
|
|
57
|
-
if (extension_entry.second->drop_database
|
|
57
|
+
if (extension_entry.second->drop_database == nullptr) {
|
|
58
58
|
continue;
|
|
59
59
|
}
|
|
60
60
|
auto &storage_extension = extension_entry.second;
|
|
@@ -64,7 +64,7 @@ BoundStatement Binder::Bind(DropStatement &stmt) {
|
|
|
64
64
|
auto bound_drop_database_func = Bind(*drop_database_function_ref);
|
|
65
65
|
result.plan = CreatePlan(*bound_drop_database_func);
|
|
66
66
|
result.names = {"Success"};
|
|
67
|
-
result.types = {LogicalType::
|
|
67
|
+
result.types = {LogicalType::BIGINT};
|
|
68
68
|
properties.allow_stream_result = false;
|
|
69
69
|
properties.return_type = StatementReturnType::NOTHING;
|
|
70
70
|
return result;
|
|
@@ -230,6 +230,7 @@ public:
|
|
|
230
230
|
private:
|
|
231
231
|
DatabaseInstance &db;
|
|
232
232
|
string temp_directory;
|
|
233
|
+
bool created_directory = false;
|
|
233
234
|
unique_ptr<TemporaryFileManager> temp_file;
|
|
234
235
|
};
|
|
235
236
|
|
|
@@ -868,7 +869,10 @@ TemporaryDirectoryHandle::TemporaryDirectoryHandle(DatabaseInstance &db, string
|
|
|
868
869
|
: db(db), temp_directory(std::move(path_p)), temp_file(make_unique<TemporaryFileManager>(db, temp_directory)) {
|
|
869
870
|
auto &fs = FileSystem::GetFileSystem(db);
|
|
870
871
|
if (!temp_directory.empty()) {
|
|
871
|
-
fs.
|
|
872
|
+
if (!fs.DirectoryExists(temp_directory)) {
|
|
873
|
+
fs.CreateDirectory(temp_directory);
|
|
874
|
+
created_directory = true;
|
|
875
|
+
}
|
|
872
876
|
}
|
|
873
877
|
}
|
|
874
878
|
TemporaryDirectoryHandle::~TemporaryDirectoryHandle() {
|
|
@@ -877,7 +881,30 @@ TemporaryDirectoryHandle::~TemporaryDirectoryHandle() {
|
|
|
877
881
|
// then delete the temporary file directory
|
|
878
882
|
auto &fs = FileSystem::GetFileSystem(db);
|
|
879
883
|
if (!temp_directory.empty()) {
|
|
880
|
-
|
|
884
|
+
bool delete_directory = created_directory;
|
|
885
|
+
vector<string> files_to_delete;
|
|
886
|
+
if (!created_directory) {
|
|
887
|
+
bool deleted_everything = true;
|
|
888
|
+
fs.ListFiles(temp_directory, [&](const string &path, bool isdir) {
|
|
889
|
+
if (isdir) {
|
|
890
|
+
deleted_everything = false;
|
|
891
|
+
return;
|
|
892
|
+
}
|
|
893
|
+
if (!StringUtil::StartsWith(path, "duckdb_temp_")) {
|
|
894
|
+
deleted_everything = false;
|
|
895
|
+
return;
|
|
896
|
+
}
|
|
897
|
+
files_to_delete.push_back(path);
|
|
898
|
+
});
|
|
899
|
+
}
|
|
900
|
+
if (delete_directory) {
|
|
901
|
+
// we want to remove all files in the directory
|
|
902
|
+
fs.RemoveDirectory(temp_directory);
|
|
903
|
+
} else {
|
|
904
|
+
for (auto &file : files_to_delete) {
|
|
905
|
+
fs.RemoveFile(fs.JoinPath(temp_directory, file));
|
|
906
|
+
}
|
|
907
|
+
}
|
|
881
908
|
}
|
|
882
909
|
}
|
|
883
910
|
|
|
@@ -887,7 +914,7 @@ TemporaryFileManager &TemporaryDirectoryHandle::GetTempFile() {
|
|
|
887
914
|
|
|
888
915
|
string BufferManager::GetTemporaryPath(block_id_t id) {
|
|
889
916
|
auto &fs = FileSystem::GetFileSystem(db);
|
|
890
|
-
return fs.JoinPath(temp_directory, to_string(id) + ".block");
|
|
917
|
+
return fs.JoinPath(temp_directory, "duckdb_temp_block-" + to_string(id) + ".block");
|
|
891
918
|
}
|
|
892
919
|
|
|
893
920
|
void BufferManager::RequireTemporaryDirectory() {
|
|
@@ -24,8 +24,8 @@ template<typename T, typename Traits = ConcurrentQueueDefaultTraits>
|
|
|
24
24
|
class BlockingConcurrentQueue
|
|
25
25
|
{
|
|
26
26
|
private:
|
|
27
|
-
typedef ::
|
|
28
|
-
typedef ::
|
|
27
|
+
typedef ::duckdb_moodycamel::ConcurrentQueue<T, Traits> ConcurrentQueue;
|
|
28
|
+
typedef ::duckdb_moodycamel::LightweightSemaphore LightweightSemaphore;
|
|
29
29
|
|
|
30
30
|
public:
|
|
31
31
|
typedef typename ConcurrentQueue::producer_token_t producer_token_t;
|
package/test/arrow.test.ts
CHANGED
|
@@ -20,13 +20,13 @@ describe('arrow IPC API fails neatly when extension not loaded', function() {
|
|
|
20
20
|
db.arrowIPCStream(query).then(
|
|
21
21
|
() => Promise.reject(new Error('Expected method to reject.')),
|
|
22
22
|
err => {
|
|
23
|
-
assert.ok(err.message.includes("
|
|
23
|
+
assert.ok(err.message.includes("arrow"))
|
|
24
24
|
}
|
|
25
25
|
);
|
|
26
26
|
|
|
27
27
|
db.arrowIPCAll(`SELECT * FROM ipc_table`, function (err: null | Error, result: ArrowArray) {
|
|
28
28
|
if (err) {
|
|
29
|
-
assert.ok(err.message.includes("
|
|
29
|
+
assert.ok(err.message.includes("arrow"))
|
|
30
30
|
} else {
|
|
31
31
|
assert.fail("Expected error");
|
|
32
32
|
}
|
|
@@ -39,7 +39,7 @@ describe('arrow IPC API fails neatly when extension not loaded', function() {
|
|
|
39
39
|
it('register buffer should be disabled currently', function(done) {
|
|
40
40
|
db.register_buffer("test", [new Uint8Array(new ArrayBuffer(10))], true, (err: null | Error) => {
|
|
41
41
|
assert.ok(err)
|
|
42
|
-
assert.ok(err.toString().includes("
|
|
42
|
+
assert.ok(err.toString().includes("arrow"));
|
|
43
43
|
done()
|
|
44
44
|
});
|
|
45
45
|
});
|