monetdb 2.0.0 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -4
- package/docs/pages/_meta.json +3 -1
- package/docs/pages/apis/connection.mdx +1 -1
- package/docs/pages/filetransfer.mdx +43 -0
- package/docs/pages/prepstmt.mdx +13 -0
- package/docs/pages/result.mdx +41 -0
- package/package.json +1 -1
- package/src/defaults.ts +7 -9
- package/src/mapi.ts +1 -1
- package/test/exec-queries.ts +12 -0
package/README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
# MonetDB Node.js
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
|
3
|
+

|
4
|
+

|
5
|
+

|
6
6
|
|
7
7
|
Node.js connector for MonetDB.
|
8
8
|
|
@@ -39,7 +39,7 @@ const conn = new Connection('mapi:monetdb://<username>:<password>@<hostname>:<po
|
|
39
39
|
## Features
|
40
40
|
- prepared statements
|
41
41
|
- streaming query results
|
42
|
-
- bulk import & export with `COPY
|
42
|
+
- bulk import & export with `COPY INTO`
|
43
43
|
## Contributing
|
44
44
|
|
45
45
|
**We :heart: contributions!**
|
package/docs/pages/_meta.json
CHANGED
@@ -0,0 +1,43 @@
|
|
1
|
+
## File Transfer
|
2
|
+
MonetDB supports the non-standard `COPY INTO` statement to load a CSV-like
|
3
|
+
text file into a table or to dump a table into a text file. This statement has an
|
4
|
+
optional modifier `ON CLIENT` to indicate that the server should not
|
5
|
+
try to open the file on the server side but instead ask the client to open it
|
6
|
+
on its behalf.
|
7
|
+
|
8
|
+
For example::
|
9
|
+
```sql
|
10
|
+
COPY INTO mytable FROM 'data.csv' ON CLIENT
|
11
|
+
USING DELIMITERS ',', E'\n', '"';
|
12
|
+
```
|
13
|
+
For security reason `monetdb-nodejs` enforces files to be realtive to the current
|
14
|
+
working directory of the Node.js process.
|
15
|
+
## Skip rows and early cancellation
|
16
|
+
MonetDB's `COPY INTO` statement allows you to skip, for example, the first
|
17
|
+
line in a file using the modifier `OFFSET 2`, and load `n` records from the file using
|
18
|
+
`RECORDS` modifier.
|
19
|
+
```sql
|
20
|
+
COPY 100 RECORDS OFFSET 2 INTO mytable FROM 'data.csv' ON CLIENT
|
21
|
+
```
|
22
|
+
, for detailed documentation on `COPY INTO` statement please vist [MonetDB documentation](https://www.monetdb.org/documentation-Jun2023/user-guide/sql-manual/data-loading/)
|
23
|
+
## Example
|
24
|
+
Assume `data.csv` with the following content
|
25
|
+
```bash
|
26
|
+
cat<<EOF>data.csv
|
27
|
+
1|one
|
28
|
+
2|two
|
29
|
+
3|three
|
30
|
+
EOF
|
31
|
+
```
|
32
|
+
, then load that into MonetDB
|
33
|
+
```ts
|
34
|
+
import {Connection} from 'monetdb';
|
35
|
+
|
36
|
+
const conn = new Connection({database: 'test'});
|
37
|
+
const ready = await conn.connect();
|
38
|
+
await conn.execute('create table foo(i int, a varchar(10))');
|
39
|
+
let res = await conn.execute(`copy into foo from \'data.csv\' on client`);
|
40
|
+
res = await conn.execute('select * from foo order by i');
|
41
|
+
console.log(res.data);
|
42
|
+
// [[1, 'one'], [2, 'two'], [3, 'three']]
|
43
|
+
```
|
@@ -0,0 +1,13 @@
|
|
1
|
+
## Prepare Statement
|
2
|
+
The PREPARE statement compiles an SQL statement into its execution plan on the server. This is useful for statements which need to be executed many times but with different values each time, such as an INSERT or UPDATE or SELECT query.
|
3
|
+
```ts
|
4
|
+
const ready = await conn.connect();
|
5
|
+
let res = await conn.execute('create table foo(a int, b boolean, c string, d date, f decimal)');
|
6
|
+
const prepStmt = await conn.prepare('insert into foo values (?, ?, ?, ?, ?)');
|
7
|
+
res = await prepStmt.execute(1, true, 'first', '2022-12-12', 1.11);
|
8
|
+
res = await prepStmt.execute(2, false, 'second', '2022-12-12', 2.22);
|
9
|
+
res = await prepStmt.execute(3, true, 'third', '2022-12-12', 3.33);
|
10
|
+
await prepStmt.release();
|
11
|
+
```
|
12
|
+
For more information on prepare statement please visit [MonetDB documentation](https://www.monetdb.org/documentation-Jun2023/user-guide/sql-manual/data-manipulation/prepare-statement/).
|
13
|
+
|
@@ -0,0 +1,41 @@
|
|
1
|
+
## Query result size
|
2
|
+
Query result size can be set with `replySize` `Connection` option. It defines the number of data rows returned in the initial response.
|
3
|
+
By default this options is set to `-1`, which means fetch all the data. If `replySize` is set to any positive number, only that many
|
4
|
+
data rows will be fetched, while the rest will stay cached at the server. The `replySize` can be set initially when connection is created, or by invoking `setReplySize` method on the `Connection` object.
|
5
|
+
```ts
|
6
|
+
// set replySize to 100
|
7
|
+
const conn = new Connection({database: 'test', replySize: 100});
|
8
|
+
await conn.connect();
|
9
|
+
let res = await conn.execute("select * from generate_series(1, 1001)");
|
10
|
+
console.log(res.data.length);
|
11
|
+
// 100
|
12
|
+
await conn.setReplySize(-1); // back to default
|
13
|
+
res = await conn.execute("select * from generate_series(1, 1001)");
|
14
|
+
console.log(res.data.length);
|
15
|
+
// 1000
|
16
|
+
```
|
17
|
+
## Streaming query result
|
18
|
+
When query result is quite large it's often useful to start process the data chunks as soon as they are available
|
19
|
+
on the client. This can be achieved by invoking `execute` method on the `Connection` object with `stream=true`.
|
20
|
+
```ts
|
21
|
+
// execute(sql: string, stream?: boolean): Promise<any>;
|
22
|
+
const ready = await conn.connect();
|
23
|
+
const stream: QueryStream = await conn.execute('select * from generate_series(1, 10000)', true);
|
24
|
+
const colInfo = [];
|
25
|
+
const data = [];
|
26
|
+
stream.on('header', (cols: any[]) => {
|
27
|
+
// do something with col info
|
28
|
+
});
|
29
|
+
|
30
|
+
stream.on('data', (tuples: any[]) => {
|
31
|
+
for (let t of tuples) {
|
32
|
+
data.push(t);
|
33
|
+
}
|
34
|
+
});
|
35
|
+
|
36
|
+
stream.on('end', () => {
|
37
|
+
// do something on end of streaming
|
38
|
+
});
|
39
|
+
|
40
|
+
await conn.close();
|
41
|
+
```
|
package/package.json
CHANGED
package/src/defaults.ts
CHANGED
@@ -1,13 +1,11 @@
|
|
1
|
-
|
2
1
|
const defaults = {
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
2
|
+
host: process.env.MAPI_HOST || "localhost",
|
3
|
+
port: process.env.MAPI_PORT || 50000,
|
4
|
+
username: process.env.MAPI_USER || "monetdb",
|
5
|
+
password: process.env.MAPI_PASSWORD || "monetdb",
|
6
|
+
database: process.env.MAPI_DATABASE,
|
7
|
+
autoCommit: false,
|
8
|
+
replySize: -1,
|
10
9
|
};
|
11
10
|
|
12
11
|
export default defaults;
|
13
|
-
|
package/src/mapi.ts
CHANGED
@@ -985,7 +985,7 @@ class MapiConnection extends EventEmitter {
|
|
985
985
|
}
|
986
986
|
|
987
987
|
if (resp.isMsgMore()) {
|
988
|
-
console.log("server wants more");
|
988
|
+
// console.log("server wants more");
|
989
989
|
if (resp.fileHandler instanceof FileUploader)
|
990
990
|
return resp.settle(resp.fileHandler.upload());
|
991
991
|
}
|
package/test/exec-queries.ts
CHANGED
@@ -20,6 +20,18 @@ describe("Exec queres", function () {
|
|
20
20
|
assert.equal(res.data.length, 9);
|
21
21
|
});
|
22
22
|
|
23
|
+
it("should respect replySize", async function () {
|
24
|
+
const ready = await conn.connect();
|
25
|
+
assert(ready, new Error("failed to connect"));
|
26
|
+
let res = await conn.execute("select * from generate_series(1, 1001)");
|
27
|
+
// default reply size
|
28
|
+
assert.equal(res.data.length, 1000);
|
29
|
+
await conn.setReplySize(10);
|
30
|
+
assert.equal(conn.replySize, 10);
|
31
|
+
res = await conn.execute("select * from generate_series(1, 1001)");
|
32
|
+
assert.equal(res.data.length, 10);
|
33
|
+
});
|
34
|
+
|
23
35
|
it("should handle many queres", async function () {
|
24
36
|
const ready = await conn.connect();
|
25
37
|
assert(ready, new Error("failed to connect"));
|