@opengis/fastify-table 1.2.60 → 1.2.62

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@opengis/fastify-table",
3
- "version": "1.2.60",
3
+ "version": "1.2.62",
4
4
  "type": "module",
5
5
  "description": "core-plugins",
6
6
  "keywords": [
@@ -24,6 +24,7 @@ function getPG(param) {
24
24
  host: host || config.pg?.host,
25
25
  port: port || config.pg?.port,
26
26
  database: db || database || config.pg?.db || config.pg?.database,
27
+ statement_timeout: config.pg?.statement_timeout || 10000,
27
28
  };
28
29
 
29
30
  pgClients[name] = new pg.Pool(dbConfig);
@@ -25,6 +25,7 @@ async function getPGAsync(param) {
25
25
  host: host || config.pg?.host,
26
26
  port: port || config.pg?.port,
27
27
  database: db || database || config.pg?.db || config.pg?.database,
28
+ statement_timeout: config.pg?.statement_timeout || 10000,
28
29
  };
29
30
 
30
31
  pgClients[name] = new pg.Pool(dbConfig);
@@ -1,6 +1,8 @@
1
1
  import { createHash } from 'node:crypto';
2
2
 
3
+ import config from '../../../../config.js';
3
4
  import getRedis from '../../redis/funcs/getRedis.js';
5
+ import logger from '../../logger/getLogger.js';
4
6
 
5
7
  const rclient = getRedis({ db: 0 });
6
8
 
@@ -18,20 +20,35 @@ async function init(client) {
18
20
  from pg_class where relkind in ('r','v')`);
19
21
  const relkinds = rows.reduce((acc, curr) => Object.assign(acc, { [curr.tname]: curr.relkind }), {});
20
22
 
21
- async function one(query, param = {}) {
22
- const data = await client.query(query, Array.isArray(param) ? param : param.args || []);
23
+ // const { queryOriginal = client.query } = client;
24
+
25
+ async function query(q, args = []) {
26
+ try {
27
+ const data = await client.query(q, args);
28
+ return data;
29
+ } catch (err) {
30
+ if (err.message === 'canceling statement due to statement timeout') {
31
+ logger.file('timeout/query', { q, stack: err.stack });
32
+ return { rows: [], timeout: true };
33
+ }
34
+ throw new Error(err);
35
+ }
36
+ }
37
+
38
+ async function one(q, param = {}) {
39
+ const data = await query(q, Array.isArray(param) ? param : param.args || []);
23
40
  const result = ((Array.isArray(data) ? data.pop() : data)?.rows || [])[0] || {};
24
41
  return result;
25
42
  }
26
43
 
27
- async function queryNotice(query, args = [], cb = () => { }) {
44
+ async function queryNotice(q, args = [], cb = () => { }) {
28
45
  const clientCb = await client.connect();
29
46
  clientCb.on('notice', (e) => {
30
47
  cb(e.message);
31
48
  });
32
49
  let result;
33
50
  try {
34
- result = await clientCb.query(query, args);
51
+ result = await clientCb.query(q, args);
35
52
  clientCb.end();
36
53
  }
37
54
  catch (err) {
@@ -42,16 +59,16 @@ async function init(client) {
42
59
  return result;
43
60
  }
44
61
 
45
- async function queryCache(query, param = {}) {
62
+ async function queryCache(q, param = {}) {
46
63
  const { table, args = [], time = 15 } = param;
47
- const seconds = typeof time !== 'number' || time < 0 ? 15 : time * 60;
64
+ const seconds = typeof time !== 'number' || time < 0 ? 15 : (config.local ? 0 : time * 60);
48
65
 
49
66
  // CRUD table state
50
67
  const keyCacheTable = `pg:${table}:crud`;
51
68
  const crudInc = table ? await rclient.get(keyCacheTable) || 0 : 0;
52
69
 
53
70
  //
54
- const hash = createHash('sha1').update([query, JSON.stringify(args)].join()).digest('base64');
71
+ const hash = createHash('sha1').update([q, JSON.stringify(args)].join()).digest('base64');
55
72
  const keyCache = `pg:${hash}:${crudInc}`;
56
73
 
57
74
  const cacheData = await rclient.get(keyCache);
@@ -61,7 +78,7 @@ async function init(client) {
61
78
  return JSON.parse(cacheData);
62
79
  }
63
80
 
64
- const data = await client.query(query, args || []);
81
+ const data = await query(q, args || []);
65
82
 
66
83
  if (seconds > 0) {
67
84
  rclient.set(keyCache, JSON.stringify(data), 'EX', seconds);
@@ -71,7 +88,7 @@ async function init(client) {
71
88
  }
72
89
 
73
90
  Object.assign(client, {
74
- one, pgType, pk, tlist, relkinds, queryCache, queryNotice,
91
+ one, pgType, pk, tlist, relkinds, queryCache, queryNotice, /* queryOriginal, */
75
92
  });
76
93
  }
77
94
 
@@ -1,21 +1,22 @@
1
- import pg from 'pg';
2
-
3
- import config from '../../../config.js';
4
- import init from './funcs/init.js';
5
-
6
- const pgClients = {};
7
- if (config.pg) {
8
- const client = new pg.Pool({
9
- host: config.pg?.host || '127.0.0.1',
10
- port: config.pg?.port || 5432,
11
- database: config.pg?.database || 'postgres',
12
- user: config.pg?.user || 'postgres',
13
- password: config.pg?.password || 'postgres',
14
- });
15
- client.init = async () => {
16
- await init(client);
17
- };
18
- client.init();
19
- pgClients.client = client;
20
- }
21
- export default pgClients;
1
+ import pg from 'pg';
2
+
3
+ import config from '../../../config.js';
4
+ import init from './funcs/init.js';
5
+
6
+ const pgClients = {};
7
+ if (config.pg) {
8
+ const client = new pg.Pool({
9
+ host: config.pg?.host || '127.0.0.1',
10
+ port: config.pg?.port || 5432,
11
+ database: config.pg?.database || 'postgres',
12
+ user: config.pg?.user || 'postgres',
13
+ password: config.pg?.password || 'postgres',
14
+ statement_timeout: config.pg?.statement_timeout || 10000,
15
+ });
16
+ client.init = async () => {
17
+ await init(client);
18
+ };
19
+ client.init();
20
+ pgClients.client = client;
21
+ }
22
+ export default pgClients;
@@ -32,14 +32,14 @@ export default async function filterAPI(req) {
32
32
  const filters = (loadTable?.filter_list || loadTable?.filters || loadTable?.filterList || []).concat(loadTable?.filterSql || []);
33
33
 
34
34
  // admin.custom_column - user filter NA-165
35
- const { rows: properties = [] } = await pg.query(`select column_id, name, title, format, data from admin.custom_column where entity=$1 and uid=$2 and filter`, [params.name, user?.uid]);
35
+ const { rows: properties = [] } = await pg.query(`select column_id, name, title, format, data from admin.custom_column where entity=$1 and uid=$2 and filter`, [params.table, user?.uid]);
36
36
  properties.forEach((row) => filters.push({ id: row.name, name: row.name, ua: row.title, type: row.format, data: row.data }));
37
37
 
38
38
  // KRYVYIRIH-231
39
39
  autoIndex({ table: loadTable.table, columns: filters.filter((el) => columns?.find?.((item) => item?.name === el.name)) })
40
40
  .catch(err => {
41
41
  console.error(err.toString());
42
- logger.file('autoindex/error', { name: params?.name, error: err.toString(), stack: err.stack });
42
+ logger.file('autoindex/error', { name: params?.table, error: err.toString(), stack: err.stack });
43
43
  });
44
44
 
45
45
  filters?.forEach?.(el => Object.assign(el, { id: el.id || el.name }));
@@ -59,9 +59,17 @@ export default async function filterAPI(req) {
59
59
  Object.assign(el, { options });
60
60
  }
61
61
 
62
- const countArr = pg.pgType[dataTypeID]?.includes('[]')
63
- ? await pg.queryCache(`select unnest(${el.id})::text as id,count(*) from (${optimizedSQL})q group by unnest(${el.id})`, { table: loadTable.table, time: 5 })
64
- : await pg.queryCache(`select ${el.id}::text as id,count(*) from (${optimizedSQL})q group by ${el.id}`, { table: loadTable.table, time: 5 });
62
+ const q = pg.pgType[dataTypeID]?.includes('[]')
63
+ ? `select unnest(${el.id})::text as id,count(*) from (${optimizedSQL})q group by unnest(${el.id}) limit 100`
64
+ : `select ${el.id}::text as id,count(*) from (${optimizedSQL})q group by ${el.id} limit 100`;
65
+
66
+ const countArr = await pg.queryCache(q, { table: loadTable.table });
67
+ if (countArr.timeout) {
68
+ Object.assign(el, { timeout: countArr.timeout });
69
+ console.log('timeout filter', params.table, el.id);
70
+ logger.file('timeout/filter', { table: params.table, type: 'cls', filter: el.id });
71
+ }
72
+
65
73
  const ids = countArr.rows.map(el1 => el1.id);
66
74
 
67
75
  const clsData = await getSelectVal({ pg, values: ids, name: el.data });
@@ -71,10 +79,15 @@ export default async function filterAPI(req) {
71
79
  return { ...cel, ...data };
72
80
  });
73
81
  Object.assign(el, { options });
82
+
74
83
  }));
75
84
 
76
85
  const q = ((loadTable?.filterState || []).concat(loadTable?.filterCustom || [])).filter((el) => el.name && el.sql).map((el) => `select count(*), '${el.name}' as name from (${optimizedSQL})q where ${el.sql}`).join(' union all ');
77
- const { rows = [] } = q ? await pg.query(q) : {};
86
+
87
+
88
+ const { rows = [], timeout: timeout1 } = q ? await pg.queryCache(q) : {};
89
+ if (timeout1) logger.file('timeout/filter', { table: params.table, type: 'state/custom' });
90
+
78
91
  if (rows?.length) {
79
92
  ((loadTable?.filterState || []).concat(loadTable?.filterCustom || [])).filter((el) => el.name && el.sql).forEach((el) => {
80
93
  const { count } = rows.find((row) => row.name === el.name) || {};
@@ -82,6 +95,7 @@ export default async function filterAPI(req) {
82
95
  });
83
96
  }
84
97
 
98
+
85
99
  const sqlList = loadTable?.sql
86
100
  ?.filter((el) => !el.disabled && el?.sql?.replace)
87
101
  ?.map((el, i) => ` left join lateral (${el.filter ? el.sql.replace(/limit 1/ig, '') : el.sql}) as ${el.name || `t${i + 1}`} on 1=1 `)
@@ -89,19 +103,31 @@ export default async function filterAPI(req) {
89
103
 
90
104
  // percentile_cont - alternative
91
105
  await Promise.all(filters.filter((el) => el.name && el.type === 'Range' && fields?.find?.((item) => item?.name === el.name)).map(async (el) => {
106
+
92
107
  const data = await pg.queryCache(`select array[
93
108
  min(${el.name}),
94
109
  percentile_disc(0.25) within group (order by ${el.name}),
95
110
  percentile_disc(0.5) within group (order by ${el.name}),
96
111
  percentile_disc(0.75) within group (order by ${el.name}),
97
112
  max(${el.name})
98
- ] as range from ${loadTable.table} ${sqlList && false ? ` t ${sqlList}` : ''} where ${loadTable.query || '1=1'}`, { table: loadTable.table }).then(el => el.rows?.[0]?.range);
113
+ ] as range from ${loadTable.table} ${sqlList && false ? ` t ${sqlList}` : ''} where ${loadTable.query || '1=1'}`,
114
+ { table: loadTable.table }).then(el => {
115
+ if (el.timeout) {
116
+ logger.file('timeout/filter', { table: params.table, type: 'Range', filter: el.name });
117
+ return el;
118
+ }
119
+ return el.rows?.[0]?.range;
120
+ });
99
121
  Object.assign(el, { data });
122
+
100
123
  }));
101
124
 
102
125
  const sqlFilters = (loadTable?.filterCustom || []).filter((el) => el.name && el.sql);
103
126
  const q1 = sqlFilters.map((el) => `select count(*), '${el.name}' as name from ${loadTable.table} where ${loadTable.query || '1=1'} and ${el.sql}`).join(' union all ');
104
- const { rows: sqlRows = [] } = q1 ? await pg.queryCache(q1, { table: loadTable.table }) : {};
127
+
128
+ const { rows: sqlRows = [], timeout } = q1 ? await pg.queryCache(q1, { table: loadTable.table }) : {};
129
+ if (timeout) logger.file('timeout/filter', { table: params.table, type: 'sqlFilters' });
130
+
105
131
  if (sqlRows?.length) {
106
132
  sqlFilters.forEach((el) => {
107
133
  const { count } = sqlRows.find((row) => row.name === el.name) || {};