gqlite 1.3.1 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. checksums.yaml +4 -4
  2. data/ext/Cargo.toml +5 -4
  3. data/ext/gqlitedb/Cargo.toml +16 -6
  4. data/ext/gqlitedb/benches/common/pokec.rs +63 -11
  5. data/ext/gqlitedb/benches/pokec_divan.rs +11 -11
  6. data/ext/gqlitedb/benches/pokec_iai.rs +11 -0
  7. data/ext/gqlitedb/src/aggregators/arithmetic.rs +1 -0
  8. data/ext/gqlitedb/src/aggregators/containers.rs +4 -1
  9. data/ext/gqlitedb/src/aggregators/stats.rs +129 -0
  10. data/ext/gqlitedb/src/aggregators.rs +2 -0
  11. data/ext/gqlitedb/src/compiler/expression_analyser.rs +2 -0
  12. data/ext/gqlitedb/src/compiler.rs +1 -0
  13. data/ext/gqlitedb/src/connection.rs +57 -12
  14. data/ext/gqlitedb/src/error.rs +35 -2
  15. data/ext/gqlitedb/src/functions/containers.rs +4 -5
  16. data/ext/gqlitedb/src/functions/edge.rs +1 -1
  17. data/ext/gqlitedb/src/functions/math.rs +2 -2
  18. data/ext/gqlitedb/src/functions/node.rs +1 -1
  19. data/ext/gqlitedb/src/functions/path.rs +2 -2
  20. data/ext/gqlitedb/src/functions/scalar.rs +70 -14
  21. data/ext/gqlitedb/src/functions/string.rs +2 -2
  22. data/ext/gqlitedb/src/functions/value.rs +4 -4
  23. data/ext/gqlitedb/src/functions.rs +21 -14
  24. data/ext/gqlitedb/src/interpreter/evaluators.rs +5 -2
  25. data/ext/gqlitedb/src/interpreter/instructions.rs +1 -1
  26. data/ext/gqlitedb/src/lib.rs +1 -1
  27. data/ext/gqlitedb/src/parser/ast.rs +1 -0
  28. data/ext/gqlitedb/src/parser/gql.pest +3 -1
  29. data/ext/gqlitedb/src/parser/parser_impl.rs +8 -0
  30. data/ext/gqlitedb/src/prelude.rs +3 -0
  31. data/ext/gqlitedb/src/store/{pgql.rs → pgrx.rs} +2 -0
  32. data/ext/gqlitedb/src/store/postgres.rs +702 -0
  33. data/ext/gqlitedb/src/store/sqlbase/sqlbindingvalue.rs +140 -0
  34. data/ext/gqlitedb/src/store/sqlbase/sqlmetadata.rs +117 -0
  35. data/ext/gqlitedb/src/store/sqlbase/sqlqueries.rs +62 -0
  36. data/ext/gqlitedb/src/store/sqlbase/sqlresultvalue.rs +124 -0
  37. data/ext/gqlitedb/src/store/sqlbase/sqlstore.rs +55 -0
  38. data/ext/gqlitedb/src/store/sqlbase.rs +419 -0
  39. data/ext/gqlitedb/src/store/sqlite.rs +284 -573
  40. data/ext/gqlitedb/src/store.rs +7 -5
  41. data/ext/gqlitedb/src/tests/connection/postgres.rs +8 -0
  42. data/ext/gqlitedb/src/tests/connection.rs +19 -0
  43. data/ext/gqlitedb/src/tests/evaluators.rs +6 -11
  44. data/ext/gqlitedb/src/tests/store/postgres.rs +41 -0
  45. data/ext/gqlitedb/src/tests/store.rs +17 -21
  46. data/ext/gqlitedb/src/tests.rs +50 -1
  47. data/ext/gqlitedb/src/utils.rs +25 -0
  48. data/ext/gqlitedb/src/value/compare.rs +6 -0
  49. data/ext/gqlitedb/src/value.rs +18 -2
  50. data/ext/gqlitedb/src/value_table.rs +2 -2
  51. data/ext/gqlitedb/templates/sql/postgres/call_stats.sql +22 -0
  52. data/ext/gqlitedb/templates/sql/postgres/edge_count_for_node.sql +3 -0
  53. data/ext/gqlitedb/templates/sql/postgres/edge_create.sql +6 -0
  54. data/ext/gqlitedb/templates/sql/postgres/edge_delete.sql +1 -0
  55. data/ext/gqlitedb/templates/sql/postgres/edge_delete_by_nodes.sql +4 -0
  56. data/ext/gqlitedb/templates/sql/postgres/edge_select.sql +109 -0
  57. data/ext/gqlitedb/templates/sql/postgres/edge_update.sql +4 -0
  58. data/ext/gqlitedb/templates/sql/postgres/graph_create.sql +21 -0
  59. data/ext/gqlitedb/templates/sql/postgres/graph_delete.sql +3 -0
  60. data/ext/gqlitedb/templates/sql/postgres/metadata_create_table.sql +1 -0
  61. data/ext/gqlitedb/templates/sql/postgres/metadata_get.sql +1 -0
  62. data/ext/gqlitedb/templates/sql/postgres/metadata_set.sql +1 -0
  63. data/ext/gqlitedb/templates/sql/postgres/node_create.sql +1 -0
  64. data/ext/gqlitedb/templates/sql/postgres/node_delete.sql +1 -0
  65. data/ext/gqlitedb/templates/sql/postgres/node_select.sql +32 -0
  66. data/ext/gqlitedb/templates/sql/postgres/node_update.sql +4 -0
  67. data/ext/gqlitedb/templates/sql/postgres/table_exists.sql +1 -0
  68. data/ext/gqlitedb/templates/sql/sqlite/edge_select.sql +18 -18
  69. data/ext/gqlitedb/templates/sql/sqlite/edge_update.sql +3 -3
  70. data/ext/gqlitedb/templates/sql/sqlite/metadata_get.sql +1 -1
  71. data/ext/gqlitedb/templates/sql/sqlite/metadata_set.sql +1 -1
  72. data/ext/gqlitedb/templates/sql/sqlite/node_create.sql +1 -1
  73. data/ext/gqlitedb/templates/sql/sqlite/node_select.sql +6 -6
  74. data/ext/gqlitedb/templates/sql/sqlite/node_update.sql +3 -3
  75. data/ext/gqliterb/Cargo.toml +4 -1
  76. data/ext/gqliterb/src/lib.rs +38 -15
  77. data/ext/graphcore/Cargo.toml +3 -2
  78. data/ext/graphcore/src/error.rs +2 -0
  79. data/ext/graphcore/src/lib.rs +3 -1
  80. data/ext/graphcore/src/prelude.rs +1 -1
  81. data/ext/graphcore/src/timestamp.rs +104 -0
  82. data/ext/graphcore/src/value.rs +165 -23
  83. metadata +36 -5
  84. data/ext/graphcore/release.toml +0 -1
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 38e53ea482654edda2dec42d18ba7aff7e5660f5643f80f66dfd966e21415ebd
4
- data.tar.gz: ce0cb6a21adbca63605e86d753a3c20f281a2126601c5225c446d161efe97094
3
+ metadata.gz: 5aac591ce0775f004fc859b9cd453a940d39f9f80aa1535e2a37aae948b6baac
4
+ data.tar.gz: c66e83597f70d3c69ed412d5431530d1c2cc211507b68c18c7df74b65da017ef
5
5
  SHA512:
6
- metadata.gz: f1b60e9c256da4300381ef244158ea61a464529e7518dfe1ee532c0e5ada468860ea08a5c0ab91304b00b5e09590774131fd7487d0bb79d2435d7c13009bd0e6
7
- data.tar.gz: 4009f247ebcdc9519bfa40853b5ab8b32ab3c53fd365cf6f9dd3a25e395596dc70a2c12f7e795496695d8227428f37edfb1375cbcc59ac35e71a76ba973bd318
6
+ metadata.gz: 32503eae6aec73407846b1cce09f58374445473ef59122efe87280b4b756118d202199fc07da0b2ae7dd906f457997aac7b27d47eb17e490ce9c1a648c4e76cf
7
+ data.tar.gz: 95f14af4c49792144d5efa84ccc906c9d49ac065d808d927858d3d3a448ece3aa3137b1ece5057bb5b3534b301dd839e159a4fb745af0265efb352255c36d5c0
data/ext/Cargo.toml CHANGED
@@ -3,19 +3,20 @@ resolver = "2"
3
3
  members = ["gqliterb", "gqlitedb", "graphcore"]
4
4
 
5
5
  [workspace.package]
6
- version = "0.6.0"
6
+ version = "0.8.0"
7
7
  edition = "2021"
8
8
  license = "MIT"
9
9
  homepage = "https://gqlite.org"
10
10
  repository = "https://gitlab.com/gqlite/gqlite"
11
11
 
12
12
  [workspace.dependencies]
13
- graphcore = { version = "0.2.0", path = "graphcore" }
14
- gqlitedb = { version = "0.6.0", path = "gqlitedb" }
13
+ graphcore = { version = "0.8.0", path = "graphcore" }
14
+ gqlitedb = { version = "0.8.0", path = "gqlitedb" }
15
15
 
16
- askama = { version = "0.14" }
16
+ askama = { version = "0.15" }
17
17
  ccutils = { version = "0.4" }
18
18
  itertools = "0.14"
19
+ rusqlite = { version = "0.37" }
19
20
  serde = "1"
20
21
  thiserror = "2"
21
22
  uuid = { version = "1", features = ["v4"] }
@@ -12,19 +12,19 @@ repository.workspace = true
12
12
  crate-type = ["cdylib", "lib"]
13
13
 
14
14
  [features]
15
- default = ["redb", "capi", "sqlite"]
15
+ default = ["capi", "redb"]
16
16
  _backtrace = []
17
17
  capi = []
18
18
  redb = ["dep:redb", "dep:redb2"]
19
- _pgql = ["dep:pgrx"]
19
+ _pgrx = ["dep:pgrx"]
20
20
  _pg13 = ["pgrx/pg13"]
21
21
  _pg14 = ["pgrx/pg14"]
22
22
  _pg15 = ["pgrx/pg15"]
23
23
  _pg16 = ["pgrx/pg16"]
24
24
  _pg17 = ["pgrx/pg17"]
25
+ postgres = ["dep:postgres", "dep:askama"]
25
26
  sqlite = ["dep:rusqlite", "dep:askama"]
26
- _value_private = []
27
- bundled = ["rusqlite/bundled"]
27
+ _cmake = ["postgres", "redb", "sqlite"]
28
28
 
29
29
  [dependencies]
30
30
  graphcore = { workspace = true }
@@ -33,13 +33,17 @@ askama = { workspace = true, optional = true }
33
33
  ccutils = { workspace = true, features = ["alias", "pool", "sync"] }
34
34
  ciborium = "0.2"
35
35
  itertools = { workspace = true }
36
- pgrx = { version = "0.16", optional = true }
37
36
  pest = "2"
38
37
  pest_derive = "2"
38
+ pgrx = { version = "0.16", optional = true }
39
+ postgres = { version = "0.19", optional = true, features = [
40
+ "with-uuid-1",
41
+ "with-serde_json-1",
42
+ ] }
39
43
  rand = "0.9"
40
44
  redb = { version = "3", optional = true }
41
45
  redb2 = { version = "2", optional = true, package = "redb" }
42
- rusqlite = { package = "rusqlite", version = "0.37", optional = true, features = [
46
+ rusqlite = { workspace = true, optional = true, features = [
43
47
  "functions",
44
48
  "uuid",
45
49
  ] }
@@ -54,6 +58,9 @@ divan = "0.1"
54
58
  iai-callgrind = { version = "0.16" }
55
59
  regex = "1"
56
60
 
61
+ [target.'cfg(not(windows))'.dev-dependencies]
62
+ pgtemp = "0.7"
63
+
57
64
  # web:
58
65
  [target.'cfg(target_arch = "wasm32")'.dependencies]
59
66
  # rusqlite_wasm = { package = "rusqlite", version = "0.37", optional = true, git = "https://github.com/Spxg/rusqlite.git", branch = "wasm-demo", features = [
@@ -68,6 +75,9 @@ uuid = { version = "1", features = ["js"] }
68
75
  # "uuid",
69
76
  # ] }
70
77
 
78
+ [target.'cfg(windows)'.dependencies]
79
+ rusqlite = { workspace = true, features = ["bundled"] }
80
+
71
81
  [[bench]]
72
82
  name = "pokec_divan"
73
83
  harness = false
@@ -5,10 +5,15 @@ use gqlitedb::{value_map, Connection};
5
5
  use rand::{seq::IndexedRandom, Rng};
6
6
  use regex::Regex;
7
7
 
8
+ /// Marker trait to keep backend-specific resources alive for the benchmark lifetime.
9
+ trait BackendResource {}
10
+ impl BackendResource for TemporaryFile {}
11
+ impl BackendResource for pgtemp::PgTempDB {}
12
+
8
13
  pub(crate) struct Pokec
9
14
  {
10
15
  #[allow(dead_code)]
11
- temporary_file: TemporaryFile,
16
+ file_handle: Box<dyn BackendResource>,
12
17
  connection: Connection,
13
18
  ids: Vec<i64>,
14
19
  }
@@ -28,17 +33,64 @@ impl Pokec
28
33
  {
29
34
  "sqlite" => gqlitedb::Backend::SQLite,
30
35
  "redb" => gqlitedb::Backend::Redb,
36
+ "postgres" => gqlitedb::Backend::Postgres,
31
37
  o => panic!("Unknown backend '{}'", o),
32
38
  };
33
- let temporary_file = TemporaryFile::builder()
34
- .should_create_file(false)
35
- .label("gqlite_bench")
36
- .create();
37
- let connection = Connection::builder()
38
- .path(temporary_file.path())
39
- .backend(backend)
40
- .create()
41
- .unwrap();
39
+ let (file_handle, connection): (Box<dyn BackendResource>, _) = match backend
40
+ {
41
+ gqlitedb::Backend::Redb | gqlitedb::Backend::SQLite =>
42
+ {
43
+ let temporary_file = TemporaryFile::builder()
44
+ .should_create_file(false)
45
+ .label("gqlite_bench")
46
+ .create();
47
+ let connection = Connection::builder()
48
+ .path(temporary_file.path())
49
+ .backend(backend)
50
+ .create()
51
+ .unwrap();
52
+ (Box::new(temporary_file), connection)
53
+ }
54
+ gqlitedb::Backend::Postgres =>
55
+ {
56
+ use std::{
57
+ thread::sleep,
58
+ time::{Duration, Instant},
59
+ };
60
+ let output = std::process::Command::new("pg_config")
61
+ .arg("--bindir")
62
+ .output()
63
+ .expect("failed to execute pg_config");
64
+
65
+ let bin_path = String::from_utf8_lossy(&output.stdout).trim().to_string();
66
+
67
+ let db = pgtemp::PgTempDB::builder().with_bin_path(bin_path).start();
68
+
69
+ const MAX_WAIT: Duration = Duration::from_secs(30);
70
+ const INTERVAL: Duration = Duration::from_secs(1);
71
+
72
+ let deadline = Instant::now() + MAX_WAIT;
73
+
74
+ let mut connection = Connection::builder()
75
+ .set_option("url", db.connection_uri())
76
+ .backend(backend)
77
+ .create();
78
+ while connection.is_err() && Instant::now() < deadline
79
+ {
80
+ sleep(INTERVAL);
81
+ connection = Connection::builder()
82
+ .set_option("url", db.connection_uri())
83
+ .backend(backend)
84
+ .create()
85
+ }
86
+ let connection = connection.expect("Failed to connect to temporary Postgres DB within 30s");
87
+ (Box::new(db), connection)
88
+ }
89
+ gqlitedb::Backend::Automatic =>
90
+ {
91
+ panic!("Should not be selected.")
92
+ }
93
+ };
42
94
 
43
95
  let filename = match size
44
96
  {
@@ -52,7 +104,7 @@ impl Pokec
52
104
  .execute_oc_query(import_query, Default::default())
53
105
  .unwrap();
54
106
  Self {
55
- temporary_file,
107
+ file_handle,
56
108
  connection,
57
109
  ids: Default::default(),
58
110
  }
@@ -23,13 +23,13 @@ fn load_pokec(backend: &str) -> Pokec
23
23
  }
24
24
 
25
25
  // Import the tiny `pokec` dataset.
26
- #[divan::bench(args = ["sqlite", "redb"], sample_count = IMPORT_COUNT)]
26
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = IMPORT_COUNT)]
27
27
  fn import_tiny_pokec(backend: &str)
28
28
  {
29
29
  let _ = load_pokec(backend);
30
30
  }
31
31
 
32
- #[divan::bench(args = ["sqlite", "redb"], sample_count = SINGLE_VERTEX_COUNT)]
32
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = SINGLE_VERTEX_COUNT)]
33
33
  fn single_vertex(bencher: Bencher, backend: &str)
34
34
  {
35
35
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -40,7 +40,7 @@ fn single_vertex(bencher: Bencher, backend: &str)
40
40
  });
41
41
  }
42
42
 
43
- #[divan::bench(args = ["sqlite", "redb"], sample_count = SINGLE_VERTEX_COUNT)]
43
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = SINGLE_VERTEX_COUNT)]
44
44
  fn single_vertex_where(bencher: Bencher, backend: &str)
45
45
  {
46
46
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -51,7 +51,7 @@ fn single_vertex_where(bencher: Bencher, backend: &str)
51
51
  });
52
52
  }
53
53
 
54
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
54
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
55
55
  fn friend_of(bencher: Bencher, backend: &str)
56
56
  {
57
57
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -62,7 +62,7 @@ fn friend_of(bencher: Bencher, backend: &str)
62
62
  });
63
63
  }
64
64
 
65
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
65
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
66
66
  fn friend_of_filter(bencher: Bencher, backend: &str)
67
67
  {
68
68
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -73,7 +73,7 @@ fn friend_of_filter(bencher: Bencher, backend: &str)
73
73
  });
74
74
  }
75
75
 
76
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
76
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
77
77
  fn friend_of_friend_of(bencher: Bencher, backend: &str)
78
78
  {
79
79
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -84,7 +84,7 @@ fn friend_of_friend_of(bencher: Bencher, backend: &str)
84
84
  });
85
85
  }
86
86
 
87
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
87
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
88
88
  fn friend_of_friend_of_filter(bencher: Bencher, backend: &str)
89
89
  {
90
90
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -95,7 +95,7 @@ fn friend_of_friend_of_filter(bencher: Bencher, backend: &str)
95
95
  });
96
96
  }
97
97
 
98
- #[divan::bench(args = ["sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
98
+ #[divan::bench(args = ["postgres", "sqlite", "redb"], sample_count = FRIEND_OF_COUNT)]
99
99
  fn reciprocal_friends(bencher: Bencher, backend: &str)
100
100
  {
101
101
  let tiny_pokec = load_pokec(backend).read_ids();
@@ -106,7 +106,7 @@ fn reciprocal_friends(bencher: Bencher, backend: &str)
106
106
  });
107
107
  }
108
108
 
109
- #[divan::bench(args = ["sqlite", "redb"])]
109
+ #[divan::bench(args = ["postgres", "sqlite", "redb"])]
110
110
  fn aggregate_count(bencher: Bencher, backend: &str)
111
111
  {
112
112
  let tiny_pokec = load_pokec(backend);
@@ -116,7 +116,7 @@ fn aggregate_count(bencher: Bencher, backend: &str)
116
116
  });
117
117
  }
118
118
 
119
- #[divan::bench(args = ["sqlite", "redb"])]
119
+ #[divan::bench(args = ["postgres", "sqlite", "redb"])]
120
120
  fn aggregate_count_filter(bencher: Bencher, backend: &str)
121
121
  {
122
122
  let tiny_pokec = load_pokec(backend);
@@ -126,7 +126,7 @@ fn aggregate_count_filter(bencher: Bencher, backend: &str)
126
126
  });
127
127
  }
128
128
 
129
- #[divan::bench(args = ["sqlite", "redb"])]
129
+ #[divan::bench(args = ["postgres", "sqlite", "redb"])]
130
130
  fn aggregate_min_max_avg(bencher: Bencher, backend: &str)
131
131
  {
132
132
  let tiny_pokec = load_pokec(backend);
@@ -18,6 +18,7 @@ fn load_with_ids(backend: &str) -> Pokec
18
18
  }
19
19
 
20
20
  #[library_benchmark]
21
+ #[bench::postgres("postgres")]
21
22
  #[bench::redb("redb")]
22
23
  #[bench::sqlite("sqlite")]
23
24
  fn import_micro_pokec(backend: &str)
@@ -26,6 +27,7 @@ fn import_micro_pokec(backend: &str)
26
27
  }
27
28
 
28
29
  #[library_benchmark(setup = load_with_ids)]
30
+ #[bench::postgres("postgres")]
29
31
  #[bench::redb("redb")]
30
32
  #[bench::sqlite("sqlite")]
31
33
  fn single_vertex(micro_pokec: Pokec)
@@ -35,6 +37,7 @@ fn single_vertex(micro_pokec: Pokec)
35
37
  }
36
38
 
37
39
  #[library_benchmark(setup = load_with_ids)]
40
+ #[bench::postgres("postgres")]
38
41
  #[bench::redb("redb")]
39
42
  #[bench::sqlite("sqlite")]
40
43
  fn single_vertex_where(micro_pokec: Pokec)
@@ -44,6 +47,7 @@ fn single_vertex_where(micro_pokec: Pokec)
44
47
  }
45
48
 
46
49
  #[library_benchmark(setup = load_with_ids)]
50
+ #[bench::postgres("postgres")]
47
51
  #[bench::redb("redb")]
48
52
  #[bench::sqlite("sqlite")]
49
53
  fn friend_of(micro_pokec: Pokec)
@@ -53,6 +57,7 @@ fn friend_of(micro_pokec: Pokec)
53
57
  }
54
58
 
55
59
  #[library_benchmark(setup = load_with_ids)]
60
+ #[bench::postgres("postgres")]
56
61
  #[bench::redb("redb")]
57
62
  #[bench::sqlite("sqlite")]
58
63
  fn friend_of_filter(micro_pokec: Pokec)
@@ -62,6 +67,7 @@ fn friend_of_filter(micro_pokec: Pokec)
62
67
  }
63
68
 
64
69
  #[library_benchmark(setup = load_with_ids)]
70
+ #[bench::postgres("postgres")]
65
71
  #[bench::redb("redb")]
66
72
  #[bench::sqlite("sqlite")]
67
73
  fn friend_of_friend_of(micro_pokec: Pokec)
@@ -71,6 +77,7 @@ fn friend_of_friend_of(micro_pokec: Pokec)
71
77
  }
72
78
 
73
79
  #[library_benchmark(setup = load_with_ids)]
80
+ #[bench::postgres("postgres")]
74
81
  #[bench::redb("redb")]
75
82
  #[bench::sqlite("sqlite")]
76
83
  fn friend_of_friend_of_filter(micro_pokec: Pokec)
@@ -80,6 +87,7 @@ fn friend_of_friend_of_filter(micro_pokec: Pokec)
80
87
  }
81
88
 
82
89
  #[library_benchmark(setup = load_with_ids)]
90
+ #[bench::postgres("postgres")]
83
91
  #[bench::redb("redb")]
84
92
  #[bench::sqlite("sqlite")]
85
93
  fn reciprocal_friends(micro_pokec: Pokec)
@@ -89,6 +97,7 @@ fn reciprocal_friends(micro_pokec: Pokec)
89
97
  }
90
98
 
91
99
  #[library_benchmark(setup = load_pokec)]
100
+ #[bench::postgres("postgres")]
92
101
  #[bench::redb("redb")]
93
102
  #[bench::sqlite("sqlite")]
94
103
  fn aggregate_count(micro_pokec: Pokec)
@@ -97,6 +106,7 @@ fn aggregate_count(micro_pokec: Pokec)
97
106
  }
98
107
 
99
108
  #[library_benchmark(setup = load_pokec)]
109
+ #[bench::postgres("postgres")]
100
110
  #[bench::redb("redb")]
101
111
  #[bench::sqlite("sqlite")]
102
112
  fn aggregate_count_filter(micro_pokec: Pokec)
@@ -105,6 +115,7 @@ fn aggregate_count_filter(micro_pokec: Pokec)
105
115
  }
106
116
 
107
117
  #[library_benchmark(setup = load_pokec)]
118
+ #[bench::postgres("postgres")]
108
119
  #[bench::redb("redb")]
109
120
  #[bench::sqlite("sqlite")]
110
121
  fn aggregate_min_max_avg(micro_pokec: Pokec)
@@ -46,6 +46,7 @@ where
46
46
  | Value::Edge(..)
47
47
  | Value::Array(..)
48
48
  | Value::String(..)
49
+ | Value::TimeStamp(..)
49
50
  | Value::Map(..)
50
51
  | Value::Path(..) => Err(RunTimeError::InvalidBinaryOperands)?,
51
52
  Value::Null =>
@@ -21,7 +21,10 @@ impl AggregatorState for CollectState
21
21
  {
22
22
  fn next(&mut self, value: Value) -> crate::Result<()>
23
23
  {
24
- self.value.push(value);
24
+ if !value.is_null()
25
+ {
26
+ self.value.push(value);
27
+ }
25
28
  Ok(())
26
29
  }
27
30
  fn finalise(self: Box<Self>) -> crate::Result<crate::value::Value>
@@ -144,3 +144,132 @@ impl AggregatorState for MaxState
144
144
  }
145
145
 
146
146
  super::declare_aggregator!(max, Max, MaxState, () -> i64);
147
+
148
+ #[derive(Debug)]
149
+ struct PercentileDiscState
150
+ {
151
+ values: Vec<f64>,
152
+ percentile: f64,
153
+ }
154
+
155
+ impl PercentileDiscState
156
+ {
157
+ fn new(percentile: value::Value) -> Result<Self>
158
+ {
159
+ Ok(Self {
160
+ values: Default::default(),
161
+ percentile: percentile.try_into()?,
162
+ })
163
+ }
164
+ }
165
+
166
+ impl AggregatorState for PercentileDiscState
167
+ {
168
+ fn next(&mut self, value: value::Value) -> crate::Result<()>
169
+ {
170
+ if !value.is_null()
171
+ {
172
+ self.values.push(value.try_into()?);
173
+ }
174
+ Ok(())
175
+ }
176
+ fn finalise(self: Box<Self>) -> crate::Result<crate::value::Value>
177
+ {
178
+ if !self.percentile.is_finite() || !(0.0..=1.0).contains(&self.percentile)
179
+ {
180
+ return Err(
181
+ RunTimeError::NumberOutOfRange {
182
+ value: self.percentile,
183
+ min: 0.0,
184
+ max: 1.0,
185
+ }
186
+ .into(),
187
+ );
188
+ }
189
+
190
+ if self.values.is_empty()
191
+ {
192
+ return Ok(0.0.into());
193
+ }
194
+
195
+ let mut sorted = self.values;
196
+ sorted.sort_by(|a, b| a.total_cmp(b));
197
+
198
+ let n = sorted.len() as f64;
199
+ let index = (self.percentile * (n - 1.0)).round() as usize;
200
+
201
+ Ok(sorted[index].into())
202
+ }
203
+ }
204
+
205
+ super::declare_aggregator!(percentiledisc, PercentileDisc, PercentileDiscState, (value::Value) -> f64);
206
+
207
+ #[derive(Debug)]
208
+ struct PercentileContState
209
+ {
210
+ values: Vec<f64>,
211
+ percentile: f64,
212
+ }
213
+
214
+ impl PercentileContState
215
+ {
216
+ fn new(percentile: value::Value) -> Result<Self>
217
+ {
218
+ Ok(Self {
219
+ values: Default::default(),
220
+ percentile: percentile.try_into()?,
221
+ })
222
+ }
223
+ }
224
+
225
+ impl AggregatorState for PercentileContState
226
+ {
227
+ fn next(&mut self, value: value::Value) -> crate::Result<()>
228
+ {
229
+ if !value.is_null()
230
+ {
231
+ self.values.push(value.try_into()?);
232
+ }
233
+ Ok(())
234
+ }
235
+ fn finalise(self: Box<Self>) -> crate::Result<crate::value::Value>
236
+ {
237
+ if !self.percentile.is_finite() || !(0.0..=1.0).contains(&self.percentile)
238
+ {
239
+ return Err(
240
+ RunTimeError::NumberOutOfRange {
241
+ value: self.percentile,
242
+ min: 0.0,
243
+ max: 1.0,
244
+ }
245
+ .into(),
246
+ );
247
+ }
248
+
249
+ if self.values.is_empty()
250
+ {
251
+ return Ok(0.0.into());
252
+ }
253
+
254
+ let mut sorted = self.values;
255
+ sorted.sort_by(|a, b| a.total_cmp(b));
256
+
257
+ let n = sorted.len() as f64;
258
+ let pos = self.percentile * (n - 1.0);
259
+ let lower_index = pos.floor() as usize;
260
+ let upper_index = lower_index + 1;
261
+
262
+ if lower_index >= sorted.len() - 1
263
+ {
264
+ return Ok(sorted[sorted.len() - 1].into());
265
+ }
266
+
267
+ let weight = pos - pos.floor();
268
+ let lower = sorted[lower_index];
269
+ let upper = sorted[upper_index];
270
+
271
+ Ok((lower + weight * (upper - lower)).into())
272
+ }
273
+ }
274
+
275
+ super::declare_aggregator!(percentilecont, PercentileCont, PercentileContState, (value::Value) -> f64);
@@ -69,6 +69,8 @@ pub(crate) fn init_aggregators() -> std::collections::HashMap<String, Aggregator
69
69
  stats::Avg::create(),
70
70
  stats::Min::create(),
71
71
  stats::Max::create(),
72
+ stats::PercentileDisc::create(),
73
+ stats::PercentileCont::create(),
72
74
  ]
73
75
  .into()
74
76
  }
@@ -21,6 +21,7 @@ pub(crate) enum ExpressionType
21
21
  Float,
22
22
  Path,
23
23
  String,
24
+ TimeStamp,
24
25
  Variant,
25
26
  }
26
27
 
@@ -419,6 +420,7 @@ impl<'b> Analyser<'b>
419
420
  value::Value::Map(_) => ExpressionType::Map,
420
421
  value::Value::Path(_) => ExpressionType::Path,
421
422
  value::Value::String(_) => ExpressionType::String,
423
+ value::Value::TimeStamp(_) => ExpressionType::TimeStamp,
422
424
  },
423
425
  true,
424
426
  false,
@@ -942,6 +942,7 @@ pub(crate) fn compile(
942
942
  {
943
943
  ast::Statement::CreateGraph(create_graph) => Ok(Block::CreateGraph {
944
944
  name: create_graph.name.to_owned(),
945
+ if_not_exists: create_graph.if_not_exists,
945
946
  }),
946
947
  ast::Statement::DropGraph(drop_graph) => Ok(Block::DropGraph {
947
948
  name: drop_graph.name.to_owned(),
@@ -4,16 +4,20 @@ use crate::{prelude::*, QueryResult};
4
4
  use value::ValueTryIntoRef;
5
5
 
6
6
  /// Backend
7
+ #[derive(Debug, Clone, Copy)]
7
8
  pub enum Backend
8
9
  {
9
10
  /// Select the first available backend.
10
11
  Automatic,
11
- /// SQLite backend.
12
- #[cfg(feature = "sqlite")]
13
- SQLite,
12
+ /// Postgres backend.
13
+ #[cfg(feature = "postgres")]
14
+ Postgres,
14
15
  /// Redb backend.
15
16
  #[cfg(feature = "redb")]
16
17
  Redb,
18
+ /// SQLite backend.
19
+ #[cfg(feature = "sqlite")]
20
+ SQLite,
17
21
  }
18
22
 
19
23
  /// Builder with high-level API for creating connection.
@@ -25,12 +29,16 @@ pub struct ConnectionBuilder
25
29
  impl ConnectionBuilder
26
30
  {
27
31
  /// Merge options. This might overwrite value from the builder
28
- pub fn options(mut self, options: value::ValueMap) -> Self
32
+ pub fn options(mut self, options: impl Into<value::ValueMap>) -> Self
29
33
  {
30
- for (k, v) in options.into_iter()
31
- {
32
- self.map.insert(k, v);
33
- }
34
+ let mut options = options.into();
35
+ self.map.extend(options.drain());
36
+ self
37
+ }
38
+ /// Set the option value for the given key.
39
+ pub fn set_option(mut self, key: impl Into<String>, value: impl Into<value::Value>) -> Self
40
+ {
41
+ self.map.insert(key.into(), value.into());
34
42
  self
35
43
  }
36
44
  /// Set path
@@ -52,16 +60,21 @@ impl ConnectionBuilder
52
60
  {
53
61
  self.map.insert(key, "automatic".into());
54
62
  }
55
- #[cfg(feature = "sqlite")]
56
- Backend::SQLite =>
63
+ #[cfg(feature = "postgres")]
64
+ Backend::Postgres =>
57
65
  {
58
- self.map.insert(key, "sqlite".into());
66
+ self.map.insert(key, "postgres".into());
59
67
  }
60
68
  #[cfg(feature = "redb")]
61
69
  Backend::Redb =>
62
70
  {
63
71
  self.map.insert(key, "redb".into());
64
72
  }
73
+ #[cfg(feature = "sqlite")]
74
+ Backend::SQLite =>
75
+ {
76
+ self.map.insert(key, "sqlite".into());
77
+ }
65
78
  }
66
79
  self
67
80
  }
@@ -204,7 +217,7 @@ impl Connection
204
217
 
205
218
  sq_r.map_err(|rb_e| {
206
219
  StoreError::OpeningError {
207
- errors: error::vec_to_error(&vec![sq_e, rb_e]),
220
+ errors: error::vec_to_error(&[sq_e, rb_e]),
208
221
  }
209
222
  .into()
210
223
  })
@@ -251,6 +264,38 @@ impl Connection
251
264
  .boxed(),
252
265
  })
253
266
  }
267
+ #[cfg(feature = "postgres")]
268
+ "postgres" =>
269
+ {
270
+ let mut config = postgres::Config::new();
271
+ if let Some(url) = options.get("url")
272
+ {
273
+ config = url.to_string().parse()?;
274
+ }
275
+ if let Some(host) = options.get("host")
276
+ {
277
+ let host: &String = host.try_into_ref()?;
278
+ config.host(host);
279
+ }
280
+ if let Some(user) = options.get("user")
281
+ {
282
+ let user: &String = user.try_into_ref()?;
283
+ config.user(user);
284
+ }
285
+ if let Some(password) = options.get("password")
286
+ {
287
+ let password: &String = password.try_into_ref()?;
288
+ config.password(password);
289
+ }
290
+ let store = store::postgres::Store::connect(config)?;
291
+ Ok(Connection {
292
+ connection: ConnectionImpl {
293
+ store,
294
+ function_manager: functions::Manager::new(),
295
+ }
296
+ .boxed(),
297
+ })
298
+ }
254
299
  _ => Err(StoreError::UnknownBackend { backend }.into()),
255
300
  }
256
301
  }