duckdb 0.6.1-dev3.0 → 0.6.1-dev32.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/duckdb.cpp +53 -8
- package/src/duckdb.hpp +1176 -1031
- package/src/parquet-amalgamation.cpp +36916 -36914
package/package.json
CHANGED
package/src/duckdb.cpp
CHANGED
|
@@ -18593,6 +18593,7 @@ FileType FileHandle::GetType() {
|
|
|
18593
18593
|
|
|
18594
18594
|
|
|
18595
18595
|
|
|
18596
|
+
|
|
18596
18597
|
#include <limits>
|
|
18597
18598
|
#include <cstring>
|
|
18598
18599
|
#include <cmath>
|
|
@@ -22487,6 +22488,7 @@ unique_ptr<FileSystem> FileSystem::CreateLocal() {
|
|
|
22487
22488
|
|
|
22488
22489
|
|
|
22489
22490
|
|
|
22491
|
+
|
|
22490
22492
|
namespace duckdb {
|
|
22491
22493
|
|
|
22492
22494
|
struct ConvertToString {
|
|
@@ -24109,8 +24111,11 @@ string_t StringCastFromDecimal::Operation(hugeint_t input, uint8_t width, uint8_
|
|
|
24109
24111
|
|
|
24110
24112
|
|
|
24111
24113
|
|
|
24114
|
+
|
|
24112
24115
|
namespace duckdb {
|
|
24113
24116
|
|
|
24117
|
+
struct interval_t;
|
|
24118
|
+
|
|
24114
24119
|
struct MultiplyOperator {
|
|
24115
24120
|
template <class TA, class TB, class TR>
|
|
24116
24121
|
static inline TR Operation(TA left, TB right) {
|
|
@@ -45487,6 +45492,7 @@ string Decimal::ToString(hugeint_t value, uint8_t width, uint8_t scale) {
|
|
|
45487
45492
|
|
|
45488
45493
|
|
|
45489
45494
|
|
|
45495
|
+
|
|
45490
45496
|
#include <functional>
|
|
45491
45497
|
#include <cmath>
|
|
45492
45498
|
|
|
@@ -46720,6 +46726,7 @@ DUCKDB_API DatePartSpecifier GetDatePartSpecifier(const string &specifier);
|
|
|
46720
46726
|
|
|
46721
46727
|
|
|
46722
46728
|
|
|
46729
|
+
|
|
46723
46730
|
namespace duckdb {
|
|
46724
46731
|
|
|
46725
46732
|
struct AddOperator {
|
|
@@ -46847,8 +46854,14 @@ dtime_t AddTimeOperator::Operation(interval_t left, dtime_t right);
|
|
|
46847
46854
|
|
|
46848
46855
|
|
|
46849
46856
|
|
|
46857
|
+
|
|
46850
46858
|
namespace duckdb {
|
|
46851
46859
|
|
|
46860
|
+
struct interval_t;
|
|
46861
|
+
struct date_t;
|
|
46862
|
+
struct timestamp_t;
|
|
46863
|
+
struct dtime_t;
|
|
46864
|
+
|
|
46852
46865
|
struct SubtractOperator {
|
|
46853
46866
|
template <class TA, class TB, class TR>
|
|
46854
46867
|
static inline TR Operation(TA left, TB right) {
|
|
@@ -106724,16 +106737,16 @@ struct StructDatePart {
|
|
|
106724
106737
|
const auto idx = rdata.sel->get_index(i);
|
|
106725
106738
|
if (arg_valid.RowIsValid(idx)) {
|
|
106726
106739
|
if (Value::IsFinite(tdata[idx])) {
|
|
106727
|
-
DatePart::StructOperator::Operation(part_values.data(), tdata[idx],
|
|
106740
|
+
DatePart::StructOperator::Operation(part_values.data(), tdata[idx], i, part_mask);
|
|
106728
106741
|
} else {
|
|
106729
106742
|
for (auto &child_entry : child_entries) {
|
|
106730
|
-
FlatVector::Validity(*child_entry).SetInvalid(
|
|
106743
|
+
FlatVector::Validity(*child_entry).SetInvalid(i);
|
|
106731
106744
|
}
|
|
106732
106745
|
}
|
|
106733
106746
|
} else {
|
|
106734
|
-
res_valid.SetInvalid(
|
|
106747
|
+
res_valid.SetInvalid(i);
|
|
106735
106748
|
for (auto &child_entry : child_entries) {
|
|
106736
|
-
FlatVector::Validity(*child_entry).SetInvalid(
|
|
106749
|
+
FlatVector::Validity(*child_entry).SetInvalid(i);
|
|
106737
106750
|
}
|
|
106738
106751
|
}
|
|
106739
106752
|
}
|
|
@@ -138304,12 +138317,19 @@ public:
|
|
|
138304
138317
|
//! Creates and caches a new DB Instance (Fails if a cached instance already exists)
|
|
138305
138318
|
shared_ptr<DuckDB> CreateInstance(const string &database, DBConfig &config_dict, bool cache_instance = true);
|
|
138306
138319
|
|
|
138320
|
+
//! Creates and caches a new DB Instance (Fails if a cached instance already exists)
|
|
138321
|
+
shared_ptr<DuckDB> GetOrCreateInstance(const string &database, DBConfig &config_dict, bool cache_instance);
|
|
138322
|
+
|
|
138307
138323
|
private:
|
|
138308
138324
|
//! A map with the cached instances <absolute_path/instance>
|
|
138309
138325
|
unordered_map<string, weak_ptr<DuckDB>> db_instances;
|
|
138310
138326
|
|
|
138311
138327
|
//! Lock to alter cache
|
|
138312
138328
|
mutex cache_lock;
|
|
138329
|
+
|
|
138330
|
+
private:
|
|
138331
|
+
shared_ptr<DuckDB> GetInstanceInternal(const string &database, const DBConfig &config_dict);
|
|
138332
|
+
shared_ptr<DuckDB> CreateInstanceInternal(const string &database, DBConfig &config_dict, bool cache_instance);
|
|
138313
138333
|
};
|
|
138314
138334
|
} // namespace duckdb
|
|
138315
138335
|
|
|
@@ -138330,8 +138350,7 @@ string GetDBAbsolutePath(const string &database) {
|
|
|
138330
138350
|
return FileSystem::JoinPath(FileSystem::GetWorkingDirectory(), database);
|
|
138331
138351
|
}
|
|
138332
138352
|
|
|
138333
|
-
shared_ptr<DuckDB> DBInstanceCache::
|
|
138334
|
-
lock_guard<mutex> l(cache_lock);
|
|
138353
|
+
shared_ptr<DuckDB> DBInstanceCache::GetInstanceInternal(const string &database, const DBConfig &config) {
|
|
138335
138354
|
shared_ptr<DuckDB> db_instance;
|
|
138336
138355
|
auto abs_database_path = GetDBAbsolutePath(database);
|
|
138337
138356
|
if (db_instances.find(abs_database_path) != db_instances.end()) {
|
|
@@ -138350,8 +138369,13 @@ shared_ptr<DuckDB> DBInstanceCache::GetInstance(const string &database, const DB
|
|
|
138350
138369
|
return db_instance;
|
|
138351
138370
|
}
|
|
138352
138371
|
|
|
138353
|
-
shared_ptr<DuckDB> DBInstanceCache::
|
|
138372
|
+
shared_ptr<DuckDB> DBInstanceCache::GetInstance(const string &database, const DBConfig &config) {
|
|
138354
138373
|
lock_guard<mutex> l(cache_lock);
|
|
138374
|
+
return GetInstanceInternal(database, config);
|
|
138375
|
+
}
|
|
138376
|
+
|
|
138377
|
+
shared_ptr<DuckDB> DBInstanceCache::CreateInstanceInternal(const string &database, DBConfig &config,
|
|
138378
|
+
bool cache_instance) {
|
|
138355
138379
|
auto abs_database_path = GetDBAbsolutePath(database);
|
|
138356
138380
|
if (db_instances.find(abs_database_path) != db_instances.end()) {
|
|
138357
138381
|
throw duckdb::Exception(ExceptionType::CONNECTION,
|
|
@@ -138369,6 +138393,23 @@ shared_ptr<DuckDB> DBInstanceCache::CreateInstance(const string &database, DBCon
|
|
|
138369
138393
|
return db_instance;
|
|
138370
138394
|
}
|
|
138371
138395
|
|
|
138396
|
+
shared_ptr<DuckDB> DBInstanceCache::CreateInstance(const string &database, DBConfig &config, bool cache_instance) {
|
|
138397
|
+
lock_guard<mutex> l(cache_lock);
|
|
138398
|
+
return CreateInstanceInternal(database, config, cache_instance);
|
|
138399
|
+
}
|
|
138400
|
+
|
|
138401
|
+
shared_ptr<DuckDB> DBInstanceCache::GetOrCreateInstance(const string &database, DBConfig &config_dict,
|
|
138402
|
+
bool cache_instance) {
|
|
138403
|
+
lock_guard<mutex> l(cache_lock);
|
|
138404
|
+
if (cache_instance) {
|
|
138405
|
+
auto instance = GetInstanceInternal(database, config_dict);
|
|
138406
|
+
if (instance) {
|
|
138407
|
+
return instance;
|
|
138408
|
+
}
|
|
138409
|
+
}
|
|
138410
|
+
return CreateInstanceInternal(database, config_dict, cache_instance);
|
|
138411
|
+
}
|
|
138412
|
+
|
|
138372
138413
|
} // namespace duckdb
|
|
138373
138414
|
|
|
138374
138415
|
|
|
@@ -155444,7 +155485,6 @@ unique_ptr<LogicalOperator> FilterPullup::PullupJoin(unique_ptr<LogicalOperator>
|
|
|
155444
155485
|
case JoinType::LEFT:
|
|
155445
155486
|
case JoinType::ANTI:
|
|
155446
155487
|
case JoinType::SEMI: {
|
|
155447
|
-
can_add_column = true;
|
|
155448
155488
|
return PullupFromLeft(move(op));
|
|
155449
155489
|
}
|
|
155450
155490
|
default:
|
|
@@ -157982,6 +158022,8 @@ unique_ptr<LogicalOperator> FilterPullup::PullupBothSide(unique_ptr<LogicalOpera
|
|
|
157982
158022
|
FilterPullup right_pullup(true, can_add_column);
|
|
157983
158023
|
op->children[0] = left_pullup.Rewrite(move(op->children[0]));
|
|
157984
158024
|
op->children[1] = right_pullup.Rewrite(move(op->children[1]));
|
|
158025
|
+
D_ASSERT(left_pullup.can_add_column == can_add_column);
|
|
158026
|
+
D_ASSERT(right_pullup.can_add_column == can_add_column);
|
|
157985
158027
|
|
|
157986
158028
|
// merging filter expressions
|
|
157987
158029
|
for (idx_t i = 0; i < right_pullup.filters_expr_pullup.size(); ++i) {
|
|
@@ -158764,6 +158806,9 @@ unique_ptr<LogicalOperator> FilterPushdown::PushdownSetOperation(unique_ptr<Logi
|
|
|
158764
158806
|
D_ASSERT(op->children.size() == 2);
|
|
158765
158807
|
auto left_bindings = op->children[0]->GetColumnBindings();
|
|
158766
158808
|
auto right_bindings = op->children[1]->GetColumnBindings();
|
|
158809
|
+
if (left_bindings.size() != right_bindings.size()) {
|
|
158810
|
+
throw InternalException("Filter pushdown - set operation LHS and RHS have incompatible counts");
|
|
158811
|
+
}
|
|
158767
158812
|
|
|
158768
158813
|
// pushdown into set operation, we can duplicate the condition and pushdown the expressions into both sides
|
|
158769
158814
|
FilterPushdown left_pushdown(optimizer), right_pushdown(optimizer);
|