gqlite 1.4.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. checksums.yaml +4 -4
  2. data/ext/Cargo.toml +5 -4
  3. data/ext/gqlitedb/Cargo.toml +14 -7
  4. data/ext/gqlitedb/benches/common/pokec.rs +63 -11
  5. data/ext/gqlitedb/benches/pokec_divan.rs +11 -11
  6. data/ext/gqlitedb/benches/pokec_iai.rs +11 -0
  7. data/ext/gqlitedb/src/aggregators/containers.rs +4 -1
  8. data/ext/gqlitedb/src/aggregators/stats.rs +129 -0
  9. data/ext/gqlitedb/src/aggregators.rs +2 -0
  10. data/ext/gqlitedb/src/connection.rs +16 -6
  11. data/ext/gqlitedb/src/error.rs +32 -2
  12. data/ext/gqlitedb/src/functions/containers.rs +4 -5
  13. data/ext/gqlitedb/src/functions/edge.rs +1 -1
  14. data/ext/gqlitedb/src/functions/math.rs +2 -2
  15. data/ext/gqlitedb/src/functions/node.rs +1 -1
  16. data/ext/gqlitedb/src/functions/path.rs +2 -2
  17. data/ext/gqlitedb/src/functions/scalar.rs +70 -14
  18. data/ext/gqlitedb/src/functions/string.rs +2 -2
  19. data/ext/gqlitedb/src/functions/value.rs +4 -4
  20. data/ext/gqlitedb/src/functions.rs +21 -14
  21. data/ext/gqlitedb/src/store/postgres.rs +702 -0
  22. data/ext/gqlitedb/src/store/sqlbase/sqlbindingvalue.rs +140 -0
  23. data/ext/gqlitedb/src/store/sqlbase/sqlmetadata.rs +7 -7
  24. data/ext/gqlitedb/src/store/sqlbase/sqlresultvalue.rs +124 -0
  25. data/ext/gqlitedb/src/store/sqlbase/sqlstore.rs +3 -3
  26. data/ext/gqlitedb/src/store/sqlbase.rs +83 -120
  27. data/ext/gqlitedb/src/store/sqlite.rs +29 -16
  28. data/ext/gqlitedb/src/tests/connection/postgres.rs +8 -0
  29. data/ext/gqlitedb/src/tests/connection.rs +19 -0
  30. data/ext/gqlitedb/src/tests/evaluators.rs +6 -11
  31. data/ext/gqlitedb/src/tests/store/postgres.rs +41 -0
  32. data/ext/gqlitedb/src/tests/store.rs +17 -21
  33. data/ext/gqlitedb/src/tests.rs +50 -1
  34. data/ext/gqlitedb/src/value_table.rs +2 -2
  35. data/ext/gqlitedb/templates/sql/postgres/call_stats.sql +22 -0
  36. data/ext/gqlitedb/templates/sql/postgres/edge_count_for_node.sql +3 -0
  37. data/ext/gqlitedb/templates/sql/postgres/edge_create.sql +6 -0
  38. data/ext/gqlitedb/templates/sql/postgres/edge_delete.sql +1 -0
  39. data/ext/gqlitedb/templates/sql/postgres/edge_delete_by_nodes.sql +4 -0
  40. data/ext/gqlitedb/templates/sql/postgres/edge_select.sql +109 -0
  41. data/ext/gqlitedb/templates/sql/postgres/edge_update.sql +4 -0
  42. data/ext/gqlitedb/templates/sql/postgres/graph_create.sql +21 -0
  43. data/ext/gqlitedb/templates/sql/postgres/graph_delete.sql +3 -0
  44. data/ext/gqlitedb/templates/sql/postgres/metadata_create_table.sql +1 -0
  45. data/ext/gqlitedb/templates/sql/postgres/metadata_get.sql +1 -0
  46. data/ext/gqlitedb/templates/sql/postgres/metadata_set.sql +1 -0
  47. data/ext/gqlitedb/templates/sql/postgres/node_create.sql +1 -0
  48. data/ext/gqlitedb/templates/sql/postgres/node_delete.sql +1 -0
  49. data/ext/gqlitedb/templates/sql/postgres/node_select.sql +32 -0
  50. data/ext/gqlitedb/templates/sql/postgres/node_update.sql +4 -0
  51. data/ext/gqlitedb/templates/sql/postgres/table_exists.sql +1 -0
  52. data/ext/gqlitedb/templates/sql/sqlite/metadata_get.sql +1 -1
  53. data/ext/gqlitedb/templates/sql/sqlite/metadata_set.sql +1 -1
  54. data/ext/gqlitedb/templates/sql/sqlite/node_create.sql +1 -1
  55. data/ext/gqlitedb/templates/sql/sqlite/node_select.sql +3 -3
  56. data/ext/gqliterb/Cargo.toml +4 -1
  57. data/ext/gqliterb/src/lib.rs +8 -15
  58. data/ext/graphcore/src/lib.rs +1 -1
  59. data/ext/graphcore/src/value.rs +60 -1
  60. metadata +24 -3
  61. data/ext/gqlitedb/src/store/sqlbase/sqlvalue.rs +0 -189
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1b7eba40745af114ff57ca358b2e03c67a7adb9717ad7c7b81ddd7444503fbb1
4
- data.tar.gz: 3ae6e7c7b038a2caeaf728ff7ce98c49266160d181f478c8259b98d7d8157ba5
3
+ metadata.gz: 5aac591ce0775f004fc859b9cd453a940d39f9f80aa1535e2a37aae948b6baac
4
+ data.tar.gz: c66e83597f70d3c69ed412d5431530d1c2cc211507b68c18c7df74b65da017ef
5
5
  SHA512:
6
- metadata.gz: 6674d35ca4d0ec02090ef1521f9f8bb2a4025a050ed312e9c20d50033c8a3472a6a31bdb73dfe6fa9ec99612518c5f5edd72d3ea9d8ead7885c39ed106ac6056
7
- data.tar.gz: e200b7d2a4203cf803a65bcd6eef82a990fc8c1fa2e2846148673109731353898dacd7b388fe23c9c5b8e5eeae2b56768912408f6f916805111d322c0a83d5ac
6
+ metadata.gz: 32503eae6aec73407846b1cce09f58374445473ef59122efe87280b4b756118d202199fc07da0b2ae7dd906f457997aac7b27d47eb17e490ce9c1a648c4e76cf
7
+ data.tar.gz: 95f14af4c49792144d5efa84ccc906c9d49ac065d808d927858d3d3a448ece3aa3137b1ece5057bb5b3534b301dd839e159a4fb745af0265efb352255c36d5c0
data/ext/Cargo.toml CHANGED
@@ -3,19 +3,20 @@ resolver = "2"
3
3
  members = ["gqliterb", "gqlitedb", "graphcore"]
4
4
 
5
5
  [workspace.package]
6
- version = "0.7.0"
6
+ version = "0.8.0"
7
7
  edition = "2021"
8
8
  license = "MIT"
9
9
  homepage = "https://gqlite.org"
10
10
  repository = "https://gitlab.com/gqlite/gqlite"
11
11
 
12
12
  [workspace.dependencies]
13
- graphcore = { version = "0.7.0", path = "graphcore" }
14
- gqlitedb = { version = "0.7.0", path = "gqlitedb" }
13
+ graphcore = { version = "0.8.0", path = "graphcore" }
14
+ gqlitedb = { version = "0.8.0", path = "gqlitedb" }
15
15
 
16
- askama = { version = "0.14" }
16
+ askama = { version = "0.15" }
17
17
  ccutils = { version = "0.4" }
18
18
  itertools = "0.14"
19
+ rusqlite = { version = "0.37" }
19
20
  serde = "1"
20
21
  thiserror = "2"
21
22
  uuid = { version = "1", features = ["v4"] }
@@ -12,7 +12,7 @@ repository.workspace = true
12
12
  crate-type = ["cdylib", "lib"]
13
13
 
14
14
  [features]
15
- default = ["redb", "capi", "sqlite"]
15
+ default = ["capi", "redb"]
16
16
  _backtrace = []
17
17
  capi = []
18
18
  redb = ["dep:redb", "dep:redb2"]
@@ -22,10 +22,9 @@ _pg14 = ["pgrx/pg14"]
22
22
  _pg15 = ["pgrx/pg15"]
23
23
  _pg16 = ["pgrx/pg16"]
24
24
  _pg17 = ["pgrx/pg17"]
25
- postgres = ["dep:postgres"]
25
+ postgres = ["dep:postgres", "dep:askama"]
26
26
  sqlite = ["dep:rusqlite", "dep:askama"]
27
- _value_private = []
28
- bundled = ["rusqlite/bundled"]
27
+ _cmake = ["postgres", "redb", "sqlite"]
29
28
 
30
29
  [dependencies]
31
30
  graphcore = { workspace = true }
@@ -37,11 +36,14 @@ itertools = { workspace = true }
37
36
  pest = "2"
38
37
  pest_derive = "2"
39
38
  pgrx = { version = "0.16", optional = true }
40
- postgres = { version = "0.19", optional = true }
39
+ postgres = { version = "0.19", optional = true, features = [
40
+ "with-uuid-1",
41
+ "with-serde_json-1",
42
+ ] }
41
43
  rand = "0.9"
42
44
  redb = { version = "3", optional = true }
43
45
  redb2 = { version = "2", optional = true, package = "redb" }
44
- rusqlite = { package = "rusqlite", version = "0.37", optional = true, features = [
46
+ rusqlite = { workspace = true, optional = true, features = [
45
47
  "functions",
46
48
  "uuid",
47
49
  ] }
@@ -54,9 +56,11 @@ uuid = { workspace = true }
54
56
  ccutils = { workspace = true, features = ["alias", "temporary"] }
55
57
  divan = "0.1"
56
58
  iai-callgrind = { version = "0.16" }
57
- pgtemp = "0.6"
58
59
  regex = "1"
59
60
 
61
+ [target.'cfg(not(windows))'.dev-dependencies]
62
+ pgtemp = "0.7"
63
+
60
64
  # web:
61
65
  [target.'cfg(target_arch = "wasm32")'.dependencies]
62
66
  # rusqlite_wasm = { package = "rusqlite", version = "0.37", optional = true, git = "https://github.com/Spxg/rusqlite.git", branch = "wasm-demo", features = [
@@ -71,6 +75,9 @@ uuid = { version = "1", features = ["js"] }
71
75
  # "uuid",
72
76
  # ] }
73
77
 
78
+ [target.'cfg(windows)'.dependencies]
79
+ rusqlite = { workspace = true, features = ["bundled"] }
80
+
74
81
  [[bench]]
75
82
  name = "pokec_divan"
76
83
  harness = false
@@ -5,10 +5,15 @@ use gqlitedb::{value_map, Connection};
5
5
  use rand::{seq::IndexedRandom, Rng};
6
6
  use regex::Regex;
7
7
 
8
+ /// Marker trait to keep backend-specific resources alive for the benchmark lifetime.
9
+ trait BackendResource {}
10
+ impl BackendResource for TemporaryFile {}
11
+ impl BackendResource for pgtemp::PgTempDB {}
12
+
8
13
  pub(crate) struct Pokec
9
14
  {
10
15
  #[allow(dead_code)]
11
- temporary_file: TemporaryFile,
16
+ file_handle: Box<dyn BackendResource>,
12
17
  connection: Connection,
13
18
  ids: Vec<i64>,
14
19
  }
@@ -28,17 +33,64 @@ impl Pokec
28
33
  {
29
34
  "sqlite" => gqlitedb::Backend::SQLite,
30
35
  "redb" => gqlitedb::Backend::Redb,
36
+ "postgres" => gqlitedb::Backend::Postgres,
31
37
  o => panic!("Unknown backend '{}'", o),
32
38
  };
33
- let temporary_file = TemporaryFile::builder()
34
- .should_create_file(false)
35
- .label("gqlite_bench")
36
- .create();
37
- let connection = Connection::builder()
38
- .path(temporary_file.path())
39
- .backend(backend)
40
- .create()
41
- .unwrap();
39
+ let (file_handle, connection): (Box<dyn BackendResource>, _) = match backend
40
+ {
41
+ gqlitedb::Backend::Redb | gqlitedb::Backend::SQLite =>
42
+ {
43
+ let temporary_file = TemporaryFile::builder()
44
+ .should_create_file(false)
45
+ .label("gqlite_bench")
46
+ .create();
47
+ let connection = Connection::builder()
48
+ .path(temporary_file.path())
49
+ .backend(backend)
50
+ .create()
51
+ .unwrap();
52
+ (Box::new(temporary_file), connection)
53
+ }
54
+ gqlitedb::Backend::Postgres =>
55
+ {
56
+ use std::{
57
+ thread::sleep,
58
+ time::{Duration, Instant},
59
+ };
60
+ let output = std::process::Command::new("pg_config")
61
+ .arg("--bindir")
62
+ .output()
63
+ .expect("failed to execute pg_config");
64
+
65
+ let bin_path = String::from_utf8_lossy(&output.stdout).trim().to_string();
66
+
67
+ let db = pgtemp::PgTempDB::builder().with_bin_path(bin_path).start();
68
+
69
+ const MAX_WAIT: Duration = Duration::from_secs(30);
70
+ const INTERVAL: Duration = Duration::from_secs(1);
71
+
72
+ let deadline = Instant::now() + MAX_WAIT;
73
+
74
+ let mut connection = Connection::builder()
75
+ .set_option("url", db.connection_uri())
76
+ .backend(backend)
77
+ .create();
78
+ while connection.is_err() && Instant::now() < deadline
79
+ {
80
+ sleep(INTERVAL);
81
+ connection = Connection::builder()
82
+ .set_option("url", db.connection_uri())
83
+ .backend(backend)
84
+ .create()
85
+ }
86
+ let connection = connection.expect("Failed to connect to temporary Postgres DB within 30s");
87
+ (Box::new(db), connection)
88
+ }
89
+ gqlitedb::Backend::Automatic =>
90
+ {
91
+ panic!("Should not be selected.")
92
+ }
93
+ };
42
94
 
43
95
  let filename = match size
44
96
  {
@@ -52,7 +104,7 @@ impl Pokec
52
104
  .execute_oc_query(import_query, Default::default())
53
105
  .unwrap();
54
106
  Self {
55
- temporary_file,
107
+ file_handle,
56
108
  connection,
57
109
  ids: Default::default(),
58
110
  }
@@ -23,13 +23,13 @@ fn load_pokec(backend: &str) -> Pokec
23
23
  }
24
24
 
25
25
  // Import the tiny `pokec` dataset.
26
- #[divan::bench(args = ["sqlite", "redb"], sample_count = IMPORT_COUNT)]
26
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = IMPORT_COUNT)]
27
27
  fn import_tiny_pokec(backend: &str)
28
28
  {
29
29
  let _ = load_pokec(backend);
30
30
  }
31
31
 
32
- #[divan::bench(args = ["sqlite", "redb"], sample_count = SINGLE_VERTEX_COUNT)]
32
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = SINGLE_VERTEX_COUNT)]
33
33
  fn single_vertex(bencher: Bencher, backend: &str)
34
34
  {
35
35
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -40,7 +40,7 @@ fn single_vertex(bencher: Bencher, backend: &str)
40
40
  });
41
41
  }
42
42
 
43
- #[divan::bench(args = ["sqlite", "redb"], sample_count = SINGLE_VERTEX_COUNT)]
43
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = SINGLE_VERTEX_COUNT)]
44
44
  fn single_vertex_where(bencher: Bencher, backend: &str)
45
45
  {
46
46
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -51,7 +51,7 @@ fn single_vertex_where(bencher: Bencher, backend: &str)
51
51
  });
52
52
  }
53
53
 
54
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
54
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
55
55
  fn friend_of(bencher: Bencher, backend: &str)
56
56
  {
57
57
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -62,7 +62,7 @@ fn friend_of(bencher: Bencher, backend: &str)
62
62
  });
63
63
  }
64
64
 
65
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
65
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
66
66
  fn friend_of_filter(bencher: Bencher, backend: &str)
67
67
  {
68
68
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -73,7 +73,7 @@ fn friend_of_filter(bencher: Bencher, backend: &str)
73
73
  });
74
74
  }
75
75
 
76
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
76
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
77
77
  fn friend_of_friend_of(bencher: Bencher, backend: &str)
78
78
  {
79
79
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -84,7 +84,7 @@ fn friend_of_friend_of(bencher: Bencher, backend: &str)
84
84
  });
85
85
  }
86
86
 
87
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
87
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
88
88
  fn friend_of_friend_of_filter(bencher: Bencher, backend: &str)
89
89
  {
90
90
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -95,7 +95,7 @@ fn friend_of_friend_of_filter(bencher: Bencher, backend: &str)
95
95
  });
96
96
  }
97
97
 
98
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
98
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
99
99
  fn reciprocal_friends(bencher: Bencher, backend: &str)
100
100
  {
101
101
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -106,7 +106,7 @@ fn reciprocal_friends(bencher: Bencher, backend: &str)
106
106
  });
107
107
  }
108
108
 
109
- #[divan::bench(args = ["sqlite", "redb"])]
109
+ #[divan::bench(args = ["postgres", "sqlite", "redb"])]
110
110
  fn aggregate_count(bencher: Bencher, backend: &str)
111
111
  {
112
112
  let tiny_pokec = load_pokec(backend);
@@ -116,7 +116,7 @@ fn aggregate_count(bencher: Bencher, backend: &str)
116
116
  });
117
117
  }
118
118
 
119
- #[divan::bench(args = ["sqlite", "redb"])]
119
+ #[divan::bench(args = ["postgres", "sqlite", "redb"])]
120
120
  fn aggregate_count_filter(bencher: Bencher, backend: &str)
121
121
  {
122
122
  let tiny_pokec = load_pokec(backend);
@@ -126,7 +126,7 @@ fn aggregate_count_filter(bencher: Bencher, backend: &str)
126
126
  });
127
127
  }
128
128
 
129
- #[divan::bench(args = ["sqlite", "redb"])]
129
+ #[divan::bench(args = ["postgres", "sqlite", "redb"])]
130
130
  fn aggregate_min_max_avg(bencher: Bencher, backend: &str)
131
131
  {
132
132
  let tiny_pokec = load_pokec(backend);
@@ -18,6 +18,7 @@ fn load_with_ids(backend: &str) -> Pokec
18
18
  }
19
19
 
20
20
  #[library_benchmark]
21
+ #[bench::postgres("postgres")]
21
22
  #[bench::redb("redb")]
22
23
  #[bench::sqlite("sqlite")]
23
24
  fn import_micro_pokec(backend: &str)
@@ -26,6 +27,7 @@ fn import_micro_pokec(backend: &str)
26
27
  }
27
28
 
28
29
  #[library_benchmark(setup = load_with_ids)]
30
+ #[bench::postgres("postgres")]
29
31
  #[bench::redb("redb")]
30
32
  #[bench::sqlite("sqlite")]
31
33
  fn single_vertex(micro_pokec: Pokec)
@@ -35,6 +37,7 @@ fn single_vertex(micro_pokec: Pokec)
35
37
  }
36
38
 
37
39
  #[library_benchmark(setup = load_with_ids)]
40
+ #[bench::postgres("postgres")]
38
41
  #[bench::redb("redb")]
39
42
  #[bench::sqlite("sqlite")]
40
43
  fn single_vertex_where(micro_pokec: Pokec)
@@ -44,6 +47,7 @@ fn single_vertex_where(micro_pokec: Pokec)
44
47
  }
45
48
 
46
49
  #[library_benchmark(setup = load_with_ids)]
50
+ #[bench::postgres("postgres")]
47
51
  #[bench::redb("redb")]
48
52
  #[bench::sqlite("sqlite")]
49
53
  fn friend_of(micro_pokec: Pokec)
@@ -53,6 +57,7 @@ fn friend_of(micro_pokec: Pokec)
53
57
  }
54
58
 
55
59
  #[library_benchmark(setup = load_with_ids)]
60
+ #[bench::postgres("postgres")]
56
61
  #[bench::redb("redb")]
57
62
  #[bench::sqlite("sqlite")]
58
63
  fn friend_of_filter(micro_pokec: Pokec)
@@ -62,6 +67,7 @@ fn friend_of_filter(micro_pokec: Pokec)
62
67
  }
63
68
 
64
69
  #[library_benchmark(setup = load_with_ids)]
70
+ #[bench::postgres("postgres")]
65
71
  #[bench::redb("redb")]
66
72
  #[bench::sqlite("sqlite")]
67
73
  fn friend_of_friend_of(micro_pokec: Pokec)
@@ -71,6 +77,7 @@ fn friend_of_friend_of(micro_pokec: Pokec)
71
77
  }
72
78
 
73
79
  #[library_benchmark(setup = load_with_ids)]
80
+ #[bench::postgres("postgres")]
74
81
  #[bench::redb("redb")]
75
82
  #[bench::sqlite("sqlite")]
76
83
  fn friend_of_friend_of_filter(micro_pokec: Pokec)
@@ -80,6 +87,7 @@ fn friend_of_friend_of_filter(micro_pokec: Pokec)
80
87
  }
81
88
 
82
89
  #[library_benchmark(setup = load_with_ids)]
90
+ #[bench::postgres("postgres")]
83
91
  #[bench::redb("redb")]
84
92
  #[bench::sqlite("sqlite")]
85
93
  fn reciprocal_friends(micro_pokec: Pokec)
@@ -89,6 +97,7 @@ fn reciprocal_friends(micro_pokec: Pokec)
89
97
  }
90
98
 
91
99
  #[library_benchmark(setup = load_pokec)]
100
+ #[bench::postgres("postgres")]
92
101
  #[bench::redb("redb")]
93
102
  #[bench::sqlite("sqlite")]
94
103
  fn aggregate_count(micro_pokec: Pokec)
@@ -97,6 +106,7 @@ fn aggregate_count(micro_pokec: Pokec)
97
106
  }
98
107
 
99
108
  #[library_benchmark(setup = load_pokec)]
109
+ #[bench::postgres("postgres")]
100
110
  #[bench::redb("redb")]
101
111
  #[bench::sqlite("sqlite")]
102
112
  fn aggregate_count_filter(micro_pokec: Pokec)
@@ -105,6 +115,7 @@ fn aggregate_count_filter(micro_pokec: Pokec)
105
115
  }
106
116
 
107
117
  #[library_benchmark(setup = load_pokec)]
118
+ #[bench::postgres("postgres")]
108
119
  #[bench::redb("redb")]
109
120
  #[bench::sqlite("sqlite")]
110
121
  fn aggregate_min_max_avg(micro_pokec: Pokec)
@@ -21,7 +21,10 @@ impl AggregatorState for CollectState
21
21
  {
22
22
  fn next(&mut self, value: Value) -> crate::Result<()>
23
23
  {
24
- self.value.push(value);
24
+ if !value.is_null()
25
+ {
26
+ self.value.push(value);
27
+ }
25
28
  Ok(())
26
29
  }
27
30
  fn finalise(self: Box<Self>) -> crate::Result<crate::value::Value>
@@ -144,3 +144,132 @@ impl AggregatorState for MaxState
144
144
  }
145
145
 
146
146
  super::declare_aggregator!(max, Max, MaxState, () -> i64);
147
+
148
+ #[derive(Debug)]
149
+ struct PercentileDiscState
150
+ {
151
+ values: Vec<f64>,
152
+ percentile: f64,
153
+ }
154
+
155
+ impl PercentileDiscState
156
+ {
157
+ fn new(percentile: value::Value) -> Result<Self>
158
+ {
159
+ Ok(Self {
160
+ values: Default::default(),
161
+ percentile: percentile.try_into()?,
162
+ })
163
+ }
164
+ }
165
+
166
+ impl AggregatorState for PercentileDiscState
167
+ {
168
+ fn next(&mut self, value: value::Value) -> crate::Result<()>
169
+ {
170
+ if !value.is_null()
171
+ {
172
+ self.values.push(value.try_into()?);
173
+ }
174
+ Ok(())
175
+ }
176
+ fn finalise(self: Box<Self>) -> crate::Result<crate::value::Value>
177
+ {
178
+ if !self.percentile.is_finite() || !(0.0..=1.0).contains(&self.percentile)
179
+ {
180
+ return Err(
181
+ RunTimeError::NumberOutOfRange {
182
+ value: self.percentile,
183
+ min: 0.0,
184
+ max: 1.0,
185
+ }
186
+ .into(),
187
+ );
188
+ }
189
+
190
+ if self.values.is_empty()
191
+ {
192
+ return Ok(0.0.into());
193
+ }
194
+
195
+ let mut sorted = self.values;
196
+ sorted.sort_by(|a, b| a.total_cmp(b));
197
+
198
+ let n = sorted.len() as f64;
199
+ let index = (self.percentile * (n - 1.0)).round() as usize;
200
+
201
+ Ok(sorted[index].into())
202
+ }
203
+ }
204
+
205
+ super::declare_aggregator!(percentiledisc, PercentileDisc, PercentileDiscState, (value::Value) -> f64);
206
+
207
+ #[derive(Debug)]
208
+ struct PercentileContState
209
+ {
210
+ values: Vec<f64>,
211
+ percentile: f64,
212
+ }
213
+
214
+ impl PercentileContState
215
+ {
216
+ fn new(percentile: value::Value) -> Result<Self>
217
+ {
218
+ Ok(Self {
219
+ values: Default::default(),
220
+ percentile: percentile.try_into()?,
221
+ })
222
+ }
223
+ }
224
+
225
+ impl AggregatorState for PercentileContState
226
+ {
227
+ fn next(&mut self, value: value::Value) -> crate::Result<()>
228
+ {
229
+ if !value.is_null()
230
+ {
231
+ self.values.push(value.try_into()?);
232
+ }
233
+ Ok(())
234
+ }
235
+ fn finalise(self: Box<Self>) -> crate::Result<crate::value::Value>
236
+ {
237
+ if !self.percentile.is_finite() || !(0.0..=1.0).contains(&self.percentile)
238
+ {
239
+ return Err(
240
+ RunTimeError::NumberOutOfRange {
241
+ value: self.percentile,
242
+ min: 0.0,
243
+ max: 1.0,
244
+ }
245
+ .into(),
246
+ );
247
+ }
248
+
249
+ if self.values.is_empty()
250
+ {
251
+ return Ok(0.0.into());
252
+ }
253
+
254
+ let mut sorted = self.values;
255
+ sorted.sort_by(|a, b| a.total_cmp(b));
256
+
257
+ let n = sorted.len() as f64;
258
+ let pos = self.percentile * (n - 1.0);
259
+ let lower_index = pos.floor() as usize;
260
+ let upper_index = lower_index + 1;
261
+
262
+ if lower_index >= sorted.len() - 1
263
+ {
264
+ return Ok(sorted[sorted.len() - 1].into());
265
+ }
266
+
267
+ let weight = pos - pos.floor();
268
+ let lower = sorted[lower_index];
269
+ let upper = sorted[upper_index];
270
+
271
+ Ok((lower + weight * (upper - lower)).into())
272
+ }
273
+ }
274
+
275
+ super::declare_aggregator!(percentilecont, PercentileCont, PercentileContState, (value::Value) -> f64);
@@ -69,6 +69,8 @@ pub(crate) fn init_aggregators() -> std::collections::HashMap<String, Aggregator
69
69
  stats::Avg::create(),
70
70
  stats::Min::create(),
71
71
  stats::Max::create(),
72
+ stats::PercentileDisc::create(),
73
+ stats::PercentileCont::create(),
72
74
  ]
73
75
  .into()
74
76
  }
@@ -4,6 +4,7 @@ use crate::{prelude::*, QueryResult};
4
4
  use value::ValueTryIntoRef;
5
5
 
6
6
  /// Backend
7
+ #[derive(Debug, Clone, Copy)]
7
8
  pub enum Backend
8
9
  {
9
10
  /// Select the first available backend.
@@ -28,12 +29,16 @@ pub struct ConnectionBuilder
28
29
  impl ConnectionBuilder
29
30
  {
30
31
  /// Merge options. This might overwrite value from the builder
31
- pub fn options(mut self, options: value::ValueMap) -> Self
32
+ pub fn options(mut self, options: impl Into<value::ValueMap>) -> Self
32
33
  {
33
- for (k, v) in options.into_iter()
34
- {
35
- self.map.insert(k, v);
36
- }
34
+ let mut options = options.into();
35
+ self.map.extend(options.drain());
36
+ self
37
+ }
38
+ /// Set the option value for the given key.
39
+ pub fn set_option(mut self, key: impl Into<String>, value: impl Into<value::Value>) -> Self
40
+ {
41
+ self.map.insert(key.into(), value.into());
37
42
  self
38
43
  }
39
44
  /// Set path
@@ -263,6 +268,10 @@ impl Connection
263
268
  "postgres" =>
264
269
  {
265
270
  let mut config = postgres::Config::new();
271
+ if let Some(url) = options.get("url")
272
+ {
273
+ config = url.to_string().parse()?;
274
+ }
266
275
  if let Some(host) = options.get("host")
267
276
  {
268
277
  let host: &String = host.try_into_ref()?;
@@ -283,7 +292,8 @@ impl Connection
283
292
  connection: ConnectionImpl {
284
293
  store,
285
294
  function_manager: functions::Manager::new(),
286
- },
295
+ }
296
+ .boxed(),
287
297
  })
288
298
  }
289
299
  _ => Err(StoreError::UnknownBackend { backend }.into()),
@@ -162,6 +162,11 @@ pub enum RunTimeError
162
162
  },
163
163
  #[error("Path cannot have null key.")]
164
164
  MissingKey,
165
+ #[error("NumberOutOfRange: expected a value between {min} and {max}, got {value}.")]
166
+ NumberOutOfRange
167
+ {
168
+ value: f64, min: f64, max: f64
169
+ },
165
170
  }
166
171
 
167
172
  /// Internal errors, should be treated as bugs.
@@ -336,6 +341,22 @@ pub enum StoreError
336
341
  #[error("redb: {0}")]
337
342
  Redb2Error(#[from] redb2::Error),
338
343
 
344
+ #[cfg(feature = "postgres")]
345
+ #[error("Postgres: {0}")]
346
+ PostgresError(#[from] postgres::Error),
347
+
348
+ #[cfg(feature = "postgres")]
349
+ #[error("Postgres: {error} while executing {query}.")]
350
+ PostgresErrorWithQuery
351
+ {
352
+ error: postgres::Error,
353
+ query: String,
354
+ },
355
+
356
+ #[cfg(feature = "postgres")]
357
+ #[error("Postgres: unsupported type {0}")]
358
+ UnsupportedPostgresType(String),
359
+
339
360
  #[error("UnknownBackend: backend '{backend}' is unknown.")]
340
361
  UnknownBackend
341
362
  {
@@ -522,8 +543,6 @@ macro_rules! error_as_internal {
522
543
  };
523
544
  }
524
545
 
525
- pub(crate) use error_as_internal;
526
-
527
546
  macro_rules! error_as_store {
528
547
  ($err_type:ty) => {
529
548
  impl From<$err_type> for crate::prelude::ErrorType
@@ -588,6 +607,17 @@ mod _trait_impl_redb
588
607
  mod _trait_impl_sqlite
589
608
  {
590
609
  error_as_store! {rusqlite::Error}
610
+ }
611
+
612
+ #[cfg(feature = "postgres")]
613
+ mod _trait_impl_postgres
614
+ {
615
+ error_as_store! {postgres::Error}
616
+ }
617
+
618
+ #[cfg(any(feature = "postgres", feature = "sqlite"))]
619
+ mod _trait_impl_askama
620
+ {
591
621
  error_as_internal! {askama::Error}
592
622
  }
593
623
 
@@ -6,8 +6,7 @@ pub(super) struct Head {}
6
6
 
7
7
  impl Head
8
8
  {
9
- #[allow(clippy::ptr_arg)]
10
- fn call_impl(array: &Vec<value::Value>) -> FResult<value::Value>
9
+ fn call_impl(array: Vec<value::Value>) -> FResult<value::Value>
11
10
  {
12
11
  Ok(
13
12
  array
@@ -30,7 +29,7 @@ pub(super) struct Keys {}
30
29
 
31
30
  impl Keys
32
31
  {
33
- fn call_impl(container: &value::Value) -> Result<Vec<value::Value>>
32
+ fn call_impl(container: value::Value) -> Result<Vec<value::Value>>
34
33
  {
35
34
  match container
36
35
  {
@@ -57,9 +56,9 @@ pub(super) struct Range {}
57
56
 
58
57
  impl Range
59
58
  {
60
- fn call_impl(min: &i64, max: &i64) -> FResult<Vec<i64>>
59
+ fn call_impl(min: i64, max: i64) -> FResult<Vec<i64>>
61
60
  {
62
- Ok((*min..=*max).step_by(1).collect())
61
+ Ok((min..=max).step_by(1).collect())
63
62
  }
64
63
  }
65
64
 
@@ -7,7 +7,7 @@ pub(super) struct Type {}
7
7
 
8
8
  impl Type
9
9
  {
10
- fn call_impl(edge: &graph::Edge) -> FResult<String>
10
+ fn call_impl(edge: graph::Edge) -> FResult<String>
11
11
  {
12
12
  edge
13
13
  .labels()