@opengis/fastify-table 1.2.61 → 1.2.63
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import { createHash } from 'node:crypto';
|
|
2
2
|
|
|
3
|
+
import config from '../../../../config.js';
|
|
3
4
|
import getRedis from '../../redis/funcs/getRedis.js';
|
|
5
|
+
import logger from '../../logger/getLogger.js';
|
|
4
6
|
|
|
5
7
|
const rclient = getRedis({ db: 0 });
|
|
6
8
|
|
|
@@ -18,20 +20,35 @@ async function init(client) {
|
|
|
18
20
|
from pg_class where relkind in ('r','v')`);
|
|
19
21
|
const relkinds = rows.reduce((acc, curr) => Object.assign(acc, { [curr.tname]: curr.relkind }), {});
|
|
20
22
|
|
|
21
|
-
|
|
22
|
-
|
|
23
|
+
// const { queryOriginal = client.query } = client;
|
|
24
|
+
|
|
25
|
+
async function query(q, args = []) {
|
|
26
|
+
try {
|
|
27
|
+
const data = await client.query(q, args);
|
|
28
|
+
return data;
|
|
29
|
+
} catch (err) {
|
|
30
|
+
if (err.message === 'canceling statement due to statement timeout') {
|
|
31
|
+
logger.file('timeout/query', { q, stack: err.stack });
|
|
32
|
+
return { rows: [], timeout: true };
|
|
33
|
+
}
|
|
34
|
+
throw new Error(err);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
async function one(q, param = {}) {
|
|
39
|
+
const data = await query(q, Array.isArray(param) ? param : param.args || []);
|
|
23
40
|
const result = ((Array.isArray(data) ? data.pop() : data)?.rows || [])[0] || {};
|
|
24
41
|
return result;
|
|
25
42
|
}
|
|
26
43
|
|
|
27
|
-
async function queryNotice(
|
|
44
|
+
async function queryNotice(q, args = [], cb = () => { }) {
|
|
28
45
|
const clientCb = await client.connect();
|
|
29
46
|
clientCb.on('notice', (e) => {
|
|
30
47
|
cb(e.message);
|
|
31
48
|
});
|
|
32
49
|
let result;
|
|
33
50
|
try {
|
|
34
|
-
result = await clientCb.query(
|
|
51
|
+
result = await clientCb.query(q, args);
|
|
35
52
|
clientCb.end();
|
|
36
53
|
}
|
|
37
54
|
catch (err) {
|
|
@@ -42,16 +59,16 @@ async function init(client) {
|
|
|
42
59
|
return result;
|
|
43
60
|
}
|
|
44
61
|
|
|
45
|
-
async function queryCache(
|
|
62
|
+
async function queryCache(q, param = {}) {
|
|
46
63
|
const { table, args = [], time = 15 } = param;
|
|
47
|
-
const seconds = typeof time !== 'number' || time < 0 ? 15 : time * 60;
|
|
64
|
+
const seconds = typeof time !== 'number' || time < 0 ? 15 : (config.local ? 0 : time * 60);
|
|
48
65
|
|
|
49
66
|
// CRUD table state
|
|
50
67
|
const keyCacheTable = `pg:${table}:crud`;
|
|
51
68
|
const crudInc = table ? await rclient.get(keyCacheTable) || 0 : 0;
|
|
52
69
|
|
|
53
70
|
//
|
|
54
|
-
const hash = createHash('sha1').update([
|
|
71
|
+
const hash = createHash('sha1').update([q, JSON.stringify(args)].join()).digest('base64');
|
|
55
72
|
const keyCache = `pg:${hash}:${crudInc}`;
|
|
56
73
|
|
|
57
74
|
const cacheData = await rclient.get(keyCache);
|
|
@@ -61,7 +78,7 @@ async function init(client) {
|
|
|
61
78
|
return JSON.parse(cacheData);
|
|
62
79
|
}
|
|
63
80
|
|
|
64
|
-
const data = await
|
|
81
|
+
const data = await query(q, args || []);
|
|
65
82
|
|
|
66
83
|
if (seconds > 0) {
|
|
67
84
|
rclient.set(keyCache, JSON.stringify(data), 'EX', seconds);
|
|
@@ -71,7 +88,7 @@ async function init(client) {
|
|
|
71
88
|
}
|
|
72
89
|
|
|
73
90
|
Object.assign(client, {
|
|
74
|
-
one, pgType, pk, tlist, relkinds, queryCache, queryNotice,
|
|
91
|
+
one, pgType, pk, tlist, relkinds, queryCache, queryNotice, /* queryOriginal, */
|
|
75
92
|
});
|
|
76
93
|
}
|
|
77
94
|
|
|
@@ -1,21 +1,22 @@
|
|
|
1
|
-
import pg from 'pg';
|
|
2
|
-
|
|
3
|
-
import config from '../../../config.js';
|
|
4
|
-
import init from './funcs/init.js';
|
|
5
|
-
|
|
6
|
-
const pgClients = {};
|
|
7
|
-
if (config.pg) {
|
|
8
|
-
const client = new pg.Pool({
|
|
9
|
-
host: config.pg?.host || '127.0.0.1',
|
|
10
|
-
port: config.pg?.port || 5432,
|
|
11
|
-
database: config.pg?.database || 'postgres',
|
|
12
|
-
user: config.pg?.user || 'postgres',
|
|
13
|
-
password: config.pg?.password || 'postgres',
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
1
|
+
import pg from 'pg';
|
|
2
|
+
|
|
3
|
+
import config from '../../../config.js';
|
|
4
|
+
import init from './funcs/init.js';
|
|
5
|
+
|
|
6
|
+
const pgClients = {};
|
|
7
|
+
if (config.pg) {
|
|
8
|
+
const client = new pg.Pool({
|
|
9
|
+
host: config.pg?.host || '127.0.0.1',
|
|
10
|
+
port: config.pg?.port || 5432,
|
|
11
|
+
database: config.pg?.database || 'postgres',
|
|
12
|
+
user: config.pg?.user || 'postgres',
|
|
13
|
+
password: config.pg?.password || 'postgres',
|
|
14
|
+
statement_timeout: config.pg?.statement_timeout || 10000,
|
|
15
|
+
});
|
|
16
|
+
client.init = async () => {
|
|
17
|
+
await init(client);
|
|
18
|
+
};
|
|
19
|
+
client.init();
|
|
20
|
+
pgClients.client = client;
|
|
21
|
+
}
|
|
22
|
+
export default pgClients;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { logger, autoIndex, getSelect, getFilterSQL, getTemplate, getSelectVal, pgClients } from '../../../../utils.js';
|
|
2
2
|
|
|
3
3
|
export default async function filterAPI(req) {
|
|
4
4
|
const time = Date.now();
|
|
@@ -44,7 +44,7 @@ export default async function filterAPI(req) {
|
|
|
44
44
|
|
|
45
45
|
filters?.forEach?.(el => Object.assign(el, { id: el.id || el.name }));
|
|
46
46
|
|
|
47
|
-
await Promise.all(filters.filter((el) => el.data && el.id).map(async (el) => {
|
|
47
|
+
await Promise.all(filters.filter((el) => el.data && el.id && el.type !== 'Autocomplete').map(async (el) => {
|
|
48
48
|
const cls = await getSelect(el.data, pg);
|
|
49
49
|
|
|
50
50
|
if (!cls || !loadTable.table) return;
|
|
@@ -60,44 +60,42 @@ export default async function filterAPI(req) {
|
|
|
60
60
|
}
|
|
61
61
|
|
|
62
62
|
const q = pg.pgType[dataTypeID]?.includes('[]')
|
|
63
|
-
? `select unnest(${el.id})::text as id,count(*) from (${optimizedSQL})q group by unnest(${el.id}) limit
|
|
64
|
-
: `select ${el.id}::text as id,count(*) from (${optimizedSQL})q group by ${el.id} limit
|
|
63
|
+
? `select unnest(${el.id})::text as id,count(*) from (${optimizedSQL})q group by unnest(${el.id}) limit 100`
|
|
64
|
+
: `select ${el.id}::text as id,count(*) from (${optimizedSQL})q group by ${el.id} limit 100`;
|
|
65
|
+
|
|
66
|
+
const countArr = await pg.queryCache(q, { table: loadTable.table });
|
|
67
|
+
if (countArr.timeout) {
|
|
68
|
+
Object.assign(el, { timeout: countArr.timeout });
|
|
69
|
+
console.log('timeout filter', params.table, el.id);
|
|
70
|
+
logger.file('timeout/filter', { table: params.table, type: 'cls', filter: el.id });
|
|
71
|
+
}
|
|
65
72
|
|
|
66
|
-
|
|
67
|
-
if (config.debug) console.log('filter options start', el.id, 'array', pg.pgType[dataTypeID]?.includes('[]'));
|
|
68
|
-
const countArr = await pg.queryCache(q, { table: loadTable.table, time: config.local ? 0 : undefined });
|
|
69
|
-
if (config.debug) console.log('filter options finish', el.id, 'array', pg.pgType[dataTypeID]?.includes('[]'));
|
|
73
|
+
const ids = countArr.rows.map(el1 => el1.id);
|
|
70
74
|
|
|
71
|
-
|
|
75
|
+
const clsData = await getSelectVal({ pg, values: ids, name: el.data });
|
|
72
76
|
|
|
73
|
-
|
|
77
|
+
const options = countArr.rows.map(cel => {
|
|
78
|
+
const data = cls?.arr?.find(c => c.id === cel.id) || { text: clsData[cel.id] };
|
|
79
|
+
return { ...cel, ...data };
|
|
80
|
+
});
|
|
81
|
+
Object.assign(el, { options });
|
|
74
82
|
|
|
75
|
-
const options = countArr.rows.map(cel => {
|
|
76
|
-
const data = cls?.arr?.find(c => c.id === cel.id) || { text: clsData[cel.id] };
|
|
77
|
-
return { ...cel, ...data };
|
|
78
|
-
});
|
|
79
|
-
Object.assign(el, { options });
|
|
80
|
-
} catch (err) {
|
|
81
|
-
Object.assign(el, { timeout: true });
|
|
82
|
-
logger.file('timeout', { table: params?.table, filter: el.id, stack: err.stack });
|
|
83
|
-
console.log('filter query timeout', params.table, el.id);
|
|
84
|
-
}
|
|
85
83
|
}));
|
|
86
84
|
|
|
87
85
|
const q = ((loadTable?.filterState || []).concat(loadTable?.filterCustom || [])).filter((el) => el.name && el.sql).map((el) => `select count(*), '${el.name}' as name from (${optimizedSQL})q where ${el.sql}`).join(' union all ');
|
|
88
86
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
});
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
logger.file('timeout', { table: params?.table, filter: 'sql', stack: err.stack });
|
|
87
|
+
|
|
88
|
+
const { rows = [], timeout: timeout1 } = q ? await pg.queryCache(q) : {};
|
|
89
|
+
if (timeout1) logger.file('timeout/filter', { table: params.table, type: 'state/custom' });
|
|
90
|
+
|
|
91
|
+
if (rows?.length) {
|
|
92
|
+
((loadTable?.filterState || []).concat(loadTable?.filterCustom || [])).filter((el) => el.name && el.sql).forEach((el) => {
|
|
93
|
+
const { count } = rows.find((row) => row.name === el.name) || {};
|
|
94
|
+
Object.assign(el, { count, sql: undefined });
|
|
95
|
+
});
|
|
99
96
|
}
|
|
100
97
|
|
|
98
|
+
|
|
101
99
|
const sqlList = loadTable?.sql
|
|
102
100
|
?.filter((el) => !el.disabled && el?.sql?.replace)
|
|
103
101
|
?.map((el, i) => ` left join lateral (${el.filter ? el.sql.replace(/limit 1/ig, '') : el.sql}) as ${el.name || `t${i + 1}`} on 1=1 `)
|
|
@@ -105,35 +103,36 @@ export default async function filterAPI(req) {
|
|
|
105
103
|
|
|
106
104
|
// percentile_cont - alternative
|
|
107
105
|
await Promise.all(filters.filter((el) => el.name && el.type === 'Range' && fields?.find?.((item) => item?.name === el.name)).map(async (el) => {
|
|
108
|
-
|
|
109
|
-
|
|
106
|
+
|
|
107
|
+
const data = await pg.queryCache(`select array[
|
|
110
108
|
min(${el.name}),
|
|
111
109
|
percentile_disc(0.25) within group (order by ${el.name}),
|
|
112
110
|
percentile_disc(0.5) within group (order by ${el.name}),
|
|
113
111
|
percentile_disc(0.75) within group (order by ${el.name}),
|
|
114
112
|
max(${el.name})
|
|
115
113
|
] as range from ${loadTable.table} ${sqlList && false ? ` t ${sqlList}` : ''} where ${loadTable.query || '1=1'}`,
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
114
|
+
{ table: loadTable.table }).then(el => {
|
|
115
|
+
if (el.timeout) {
|
|
116
|
+
logger.file('timeout/filter', { table: params.table, type: 'Range', filter: el.name });
|
|
117
|
+
return el;
|
|
118
|
+
}
|
|
119
|
+
return el.rows?.[0]?.range;
|
|
120
|
+
});
|
|
121
|
+
Object.assign(el, { data });
|
|
122
|
+
|
|
122
123
|
}));
|
|
123
124
|
|
|
124
125
|
const sqlFilters = (loadTable?.filterCustom || []).filter((el) => el.name && el.sql);
|
|
125
126
|
const q1 = sqlFilters.map((el) => `select count(*), '${el.name}' as name from ${loadTable.table} where ${loadTable.query || '1=1'} and ${el.sql}`).join(' union all ');
|
|
126
127
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
});
|
|
134
|
-
}
|
|
135
|
-
} catch (err) {
|
|
136
|
-
logger.file('timeout', { table: params?.table, filter: 'custom', stack: err.stack });
|
|
128
|
+
const { rows: sqlRows = [], timeout } = q1 ? await pg.queryCache(q1, { table: loadTable.table }) : {};
|
|
129
|
+
if (timeout) logger.file('timeout/filter', { table: params.table, type: 'sqlFilters' });
|
|
130
|
+
|
|
131
|
+
if (sqlRows?.length) {
|
|
132
|
+
sqlFilters.forEach((el) => {
|
|
133
|
+
const { count } = sqlRows.find((row) => row.name === el.name) || {};
|
|
134
|
+
Object.assign(el, { count, sql: undefined });
|
|
135
|
+
});
|
|
137
136
|
}
|
|
138
137
|
|
|
139
138
|
return {
|