@opengis/bi 1.2.27 → 1.2.29
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bi.js +1 -1
- package/dist/bi.umd.cjs +23 -23
- package/dist/{import-file-DN5MrFOd.js → import-file-C1s2X9Kt.js} +539 -575
- package/dist/style.css +1 -1
- package/dist/{vs-funnel-bar-DwVkX7Q2.js → vs-funnel-bar-BuV_pQBq.js} +1 -1
- package/dist/{vs-list-BWyOIZbY.js → vs-list-uhEudqNr.js} +1 -1
- package/dist/{vs-map-cluster-ltvourI_.js → vs-map-cluster-CeJxLtZx.js} +2 -2
- package/dist/{vs-map-BeYJ2aj0.js → vs-map-qs7P1yfP.js} +2 -2
- package/dist/{vs-number-BvAcFzRT.js → vs-number-BNnFUzs3.js} +1 -1
- package/dist/{vs-table-CBFIS-pF.js → vs-table-BEvWIkoj.js} +1 -1
- package/dist/{vs-text-GIhpwEL4.js → vs-text-CPMz7TWB.js} +1 -1
- package/package.json +75 -75
- package/server/routes/dashboard/controllers/dashboard.list.js +36 -16
- package/server/routes/dashboard/controllers/utils/yaml.js +11 -11
- package/server/routes/edit/controllers/widget.edit.js +22 -13
- package/server/routes/map/controllers/cluster.js +125 -125
- package/server/routes/map/controllers/clusterVtile.js +166 -166
- package/server/routes/map/controllers/geojson.js +127 -127
- package/server/routes/map/controllers/map.js +69 -69
- package/server/routes/map/controllers/utils/downloadClusterData.js +44 -44
- package/server/routes/map/controllers/vtile.js +183 -183
- package/utils.js +12 -12
|
@@ -1,125 +1,125 @@
|
|
|
1
|
-
import { getFilterSQL, logger, pgClients, getMeta } from '@opengis/fastify-table/utils.js';
|
|
2
|
-
|
|
3
|
-
import { getWidget } from '../../../../utils.js';
|
|
4
|
-
|
|
5
|
-
import downloadClusterData from './utils/downloadClusterData.js';
|
|
6
|
-
|
|
7
|
-
const clusterExists = {};
|
|
8
|
-
|
|
9
|
-
export default async function cluster(req, reply) {
|
|
10
|
-
const { query = {} } = req;
|
|
11
|
-
const { widget, filter, dashboard, search } = query;
|
|
12
|
-
|
|
13
|
-
if (!widget) {
|
|
14
|
-
return { message: 'not enough params: widget', status: 400 };
|
|
15
|
-
}
|
|
16
|
-
|
|
17
|
-
const { pg = req.pg || pgClients.client, data, style, controls } = await getWidget({ pg: req.pg, dashboard, widget });
|
|
18
|
-
|
|
19
|
-
const pkey = pg.pk?.[data?.table];
|
|
20
|
-
|
|
21
|
-
if (!pkey) {
|
|
22
|
-
return {
|
|
23
|
-
message: `invalid ${widget ? 'widget' : 'dashboard'}: table pk not found (${data?.table})`,
|
|
24
|
-
status: 400,
|
|
25
|
-
};
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
// data param
|
|
29
|
-
const {
|
|
30
|
-
table,
|
|
31
|
-
query: where = '1=1',
|
|
32
|
-
metrics = [],
|
|
33
|
-
cluster,
|
|
34
|
-
clusterTable = {},
|
|
35
|
-
} = data;
|
|
36
|
-
|
|
37
|
-
if (!cluster) {
|
|
38
|
-
return {
|
|
39
|
-
message: `invalid ${widget ? 'widget' : 'dashboard'}: cluster column not specified`,
|
|
40
|
-
status: 400,
|
|
41
|
-
};
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
if (!metrics.length) {
|
|
45
|
-
return {
|
|
46
|
-
message: `invalid ${widget ? 'widget' : 'dashboard'}: metric columns not found`,
|
|
47
|
-
status: 400,
|
|
48
|
-
};
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
if (!clusterTable?.name) {
|
|
52
|
-
Object.assign(clusterTable, {
|
|
53
|
-
name: 'bi.cluster',
|
|
54
|
-
title: 'title',
|
|
55
|
-
query: `type='${cluster}'`,
|
|
56
|
-
});
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
try {
|
|
60
|
-
if (cluster && !clusterExists[cluster]) {
|
|
61
|
-
const res = await downloadClusterData({ pg, cluster });
|
|
62
|
-
if (res) return res;
|
|
63
|
-
clusterExists[cluster] = 1;
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
if (clusterTable?.name && !pg.pk?.[clusterTable?.name]) {
|
|
67
|
-
return {
|
|
68
|
-
message: 'invalid widget params: clusterTable pkey not found',
|
|
69
|
-
status: 404,
|
|
70
|
-
};
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
const { bounds, extentStr } = await pg.query(`select count(*),
|
|
74
|
-
st_asgeojson(st_extent(geom))::json as bounds,
|
|
75
|
-
replace(regexp_replace(st_extent(geom)::box2d::text,'BOX\\(|\\)','','g'),' ',',') as "extentStr"
|
|
76
|
-
from ${table} where ${where || '1=1'}`).then((res) => res.rows?.[0] || {});
|
|
77
|
-
const extent = extentStr ? extentStr.split(',') : undefined;
|
|
78
|
-
|
|
79
|
-
// get sql
|
|
80
|
-
const { optimizedSQL } =
|
|
81
|
-
filter || search
|
|
82
|
-
? await getFilterSQL({ pg, table, filter, search })
|
|
83
|
-
: {};
|
|
84
|
-
|
|
85
|
-
const { columns = [] } = await getMeta({ pg, table });
|
|
86
|
-
const columnList = columns.map(el => el.name);
|
|
87
|
-
|
|
88
|
-
if (query.metric && typeof query.metric === 'string') {
|
|
89
|
-
const checkInvalid = query.metric.split(',').find(el => !columnList.includes(el) && el !== 'count');
|
|
90
|
-
if (checkInvalid) {
|
|
91
|
-
return reply.status(404).send(`invalid query metric value: ${checkInvalid}`);
|
|
92
|
-
}
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
const multipleMetrics = query.metric ? query.metric.split(',').map(el => el === 'count' ? 'count(*)' : `sum(${el.replace(/'/g, "''")})::float as ${el}`).join(',') : null;
|
|
96
|
-
const multipleMetricsOrder = query.metric ? query.metric.split(',').map(el => el === 'count' ? 'count(*)' : `sum(${el.replace(/'/g, "''")})::float`).join(',') : null;
|
|
97
|
-
const metricFunc = multipleMetrics
|
|
98
|
-
|| `${clusterTable?.operator || 'sum'}("${metrics[0]}")::float`;
|
|
99
|
-
|
|
100
|
-
const q = `select b.*, ${metricFunc} ${multipleMetrics ? '' : 'as metric'}
|
|
101
|
-
from ${optimizedSQL ? `(${optimizedSQL})` : table} q
|
|
102
|
-
left join lateral (select "${pg.pk?.[clusterTable?.name]}" as id, ${clusterTable?.column || cluster} as name, ${clusterTable?.title} as title from ${clusterTable?.name} where ${clusterTable?.codifierColumn || 'codifier'}=q."${clusterTable?.column || cluster}" limit 1)b on 1=1
|
|
103
|
-
where ${where} group by b.id, b.name, b.title order by ${multipleMetricsOrder || metricFunc} desc`;
|
|
104
|
-
|
|
105
|
-
if (query.sql === '1') return q;
|
|
106
|
-
|
|
107
|
-
// auto Index
|
|
108
|
-
// autoIndex({ table, columns: (metrics || []).concat([cluster]) });
|
|
109
|
-
|
|
110
|
-
const { rows = [] } = await pg.query(q);
|
|
111
|
-
const vals = rows.map((el) => el.metric - 0).sort((a, b) => a - b);
|
|
112
|
-
const len = vals.length;
|
|
113
|
-
const sizes = [
|
|
114
|
-
vals[0],
|
|
115
|
-
vals[Math.floor(len / 4)],
|
|
116
|
-
vals[Math.floor(len / 2)],
|
|
117
|
-
vals[Math.floor(len * 0.75)],
|
|
118
|
-
vals[len - 1],
|
|
119
|
-
];
|
|
120
|
-
return { sizes, style, controls, metrics, rows, columns: columns.map(({ name, title, dataTypeID }) => ({ name, title, type: pg.pgType[dataTypeID] })), bounds, extent, count: rows.length, total: rows?.reduce((acc, curr) => (curr.metric || 0) + acc, 0) };
|
|
121
|
-
} catch (err) {
|
|
122
|
-
logger.file('bi/cluster/error', { error: err.toString(), query });
|
|
123
|
-
return { error: err.toString(), status: 500 };
|
|
124
|
-
}
|
|
125
|
-
}
|
|
1
|
+
import { getFilterSQL, logger, pgClients, getMeta } from '@opengis/fastify-table/utils.js';
|
|
2
|
+
|
|
3
|
+
import { getWidget } from '../../../../utils.js';
|
|
4
|
+
|
|
5
|
+
import downloadClusterData from './utils/downloadClusterData.js';
|
|
6
|
+
|
|
7
|
+
const clusterExists = {};
|
|
8
|
+
|
|
9
|
+
export default async function cluster(req, reply) {
|
|
10
|
+
const { query = {} } = req;
|
|
11
|
+
const { widget, filter, dashboard, search } = query;
|
|
12
|
+
|
|
13
|
+
if (!widget) {
|
|
14
|
+
return { message: 'not enough params: widget', status: 400 };
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
const { pg = req.pg || pgClients.client, data, style, controls } = await getWidget({ pg: req.pg, dashboard, widget });
|
|
18
|
+
|
|
19
|
+
const pkey = pg.pk?.[data?.table];
|
|
20
|
+
|
|
21
|
+
if (!pkey) {
|
|
22
|
+
return {
|
|
23
|
+
message: `invalid ${widget ? 'widget' : 'dashboard'}: table pk not found (${data?.table})`,
|
|
24
|
+
status: 400,
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
// data param
|
|
29
|
+
const {
|
|
30
|
+
table,
|
|
31
|
+
query: where = '1=1',
|
|
32
|
+
metrics = [],
|
|
33
|
+
cluster,
|
|
34
|
+
clusterTable = {},
|
|
35
|
+
} = data;
|
|
36
|
+
|
|
37
|
+
if (!cluster) {
|
|
38
|
+
return {
|
|
39
|
+
message: `invalid ${widget ? 'widget' : 'dashboard'}: cluster column not specified`,
|
|
40
|
+
status: 400,
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
if (!metrics.length) {
|
|
45
|
+
return {
|
|
46
|
+
message: `invalid ${widget ? 'widget' : 'dashboard'}: metric columns not found`,
|
|
47
|
+
status: 400,
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
if (!clusterTable?.name) {
|
|
52
|
+
Object.assign(clusterTable, {
|
|
53
|
+
name: 'bi.cluster',
|
|
54
|
+
title: 'title',
|
|
55
|
+
query: `type='${cluster}'`,
|
|
56
|
+
});
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
try {
|
|
60
|
+
if (cluster && !clusterExists[cluster]) {
|
|
61
|
+
const res = await downloadClusterData({ pg, cluster });
|
|
62
|
+
if (res) return res;
|
|
63
|
+
clusterExists[cluster] = 1;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
if (clusterTable?.name && !pg.pk?.[clusterTable?.name]) {
|
|
67
|
+
return {
|
|
68
|
+
message: 'invalid widget params: clusterTable pkey not found',
|
|
69
|
+
status: 404,
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const { bounds, extentStr } = await pg.query(`select count(*),
|
|
74
|
+
st_asgeojson(st_extent(geom))::json as bounds,
|
|
75
|
+
replace(regexp_replace(st_extent(geom)::box2d::text,'BOX\\(|\\)','','g'),' ',',') as "extentStr"
|
|
76
|
+
from ${table} where ${where || '1=1'}`).then((res) => res.rows?.[0] || {});
|
|
77
|
+
const extent = extentStr ? extentStr.split(',') : undefined;
|
|
78
|
+
|
|
79
|
+
// get sql
|
|
80
|
+
const { optimizedSQL } =
|
|
81
|
+
filter || search
|
|
82
|
+
? await getFilterSQL({ pg, table, filter, search })
|
|
83
|
+
: {};
|
|
84
|
+
|
|
85
|
+
const { columns = [] } = await getMeta({ pg, table });
|
|
86
|
+
const columnList = columns.map(el => el.name);
|
|
87
|
+
|
|
88
|
+
if (query.metric && typeof query.metric === 'string') {
|
|
89
|
+
const checkInvalid = query.metric.split(',').find(el => !columnList.includes(el) && el !== 'count');
|
|
90
|
+
if (checkInvalid) {
|
|
91
|
+
return reply.status(404).send(`invalid query metric value: ${checkInvalid}`);
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
const multipleMetrics = query.metric ? query.metric.split(',').map(el => el === 'count' ? 'count(*)' : `sum(${el.replace(/'/g, "''")})::float as ${el}`).join(',') : null;
|
|
96
|
+
const multipleMetricsOrder = query.metric ? query.metric.split(',').map(el => el === 'count' ? 'count(*)' : `sum(${el.replace(/'/g, "''")})::float`).join(',') : null;
|
|
97
|
+
const metricFunc = multipleMetrics
|
|
98
|
+
|| `${clusterTable?.operator || 'sum'}("${metrics[0]}")::float`;
|
|
99
|
+
|
|
100
|
+
const q = `select b.*, ${metricFunc} ${multipleMetrics ? '' : 'as metric'}
|
|
101
|
+
from ${optimizedSQL ? `(${optimizedSQL})` : table} q
|
|
102
|
+
left join lateral (select "${pg.pk?.[clusterTable?.name]}" as id, ${clusterTable?.column || cluster} as name, ${clusterTable?.title} as title from ${clusterTable?.name} where ${clusterTable?.codifierColumn || 'codifier'}=q."${clusterTable?.column || cluster}" limit 1)b on 1=1
|
|
103
|
+
where ${where} group by b.id, b.name, b.title order by ${multipleMetricsOrder || metricFunc} desc`;
|
|
104
|
+
|
|
105
|
+
if (query.sql === '1') return q;
|
|
106
|
+
|
|
107
|
+
// auto Index
|
|
108
|
+
// autoIndex({ table, columns: (metrics || []).concat([cluster]) });
|
|
109
|
+
|
|
110
|
+
const { rows = [] } = await pg.query(q);
|
|
111
|
+
const vals = rows.map((el) => el.metric - 0).sort((a, b) => a - b);
|
|
112
|
+
const len = vals.length;
|
|
113
|
+
const sizes = [
|
|
114
|
+
vals[0],
|
|
115
|
+
vals[Math.floor(len / 4)],
|
|
116
|
+
vals[Math.floor(len / 2)],
|
|
117
|
+
vals[Math.floor(len * 0.75)],
|
|
118
|
+
vals[len - 1],
|
|
119
|
+
];
|
|
120
|
+
return { sizes, style, controls, metrics, rows, columns: columns.map(({ name, title, dataTypeID }) => ({ name, title, type: pg.pgType[dataTypeID] })), bounds, extent, count: rows.length, total: rows?.reduce((acc, curr) => (curr.metric || 0) + acc, 0) };
|
|
121
|
+
} catch (err) {
|
|
122
|
+
logger.file('bi/cluster/error', { error: err.toString(), query });
|
|
123
|
+
return { error: err.toString(), status: 500 };
|
|
124
|
+
}
|
|
125
|
+
}
|
|
@@ -1,166 +1,166 @@
|
|
|
1
|
-
import Sphericalmercator from '@mapbox/sphericalmercator';
|
|
2
|
-
|
|
3
|
-
import path from 'path';
|
|
4
|
-
import { createHash } from 'crypto';
|
|
5
|
-
import { writeFile, mkdir } from 'fs/promises';
|
|
6
|
-
|
|
7
|
-
import { logger, getFolder, getFilterSQL, autoIndex, pgClients } from '@opengis/fastify-table/utils.js';
|
|
8
|
-
|
|
9
|
-
import { getWidget } from '../../../../utils.js';
|
|
10
|
-
|
|
11
|
-
import downloadClusterData from './utils/downloadClusterData.js';
|
|
12
|
-
|
|
13
|
-
const mercator = new Sphericalmercator({ size: 256 });
|
|
14
|
-
|
|
15
|
-
const clusterExists = {};
|
|
16
|
-
|
|
17
|
-
export default async function clusterVtile(req, reply) {
|
|
18
|
-
const { params = {}, query = {} } = req;
|
|
19
|
-
const { z, y } = params;
|
|
20
|
-
const x = params.x?.split('.')[0] - 0;
|
|
21
|
-
|
|
22
|
-
if (!x || !y || !z) {
|
|
23
|
-
return { message: 'not enough params: xyz', status: 400 };
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
const { widget, filter, dashboard, search, clusterZoom, nocache, pointZoom } =
|
|
27
|
-
query;
|
|
28
|
-
|
|
29
|
-
if (!widget) {
|
|
30
|
-
return { message: 'not enough params: widget', status: 400 };
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
const { pg = req.pg || pgClients.client, data } = await getWidget({ pg: req.pg, dashboard, widget });
|
|
34
|
-
|
|
35
|
-
const headers = {
|
|
36
|
-
'Content-Type': 'application/x-protobuf',
|
|
37
|
-
'Cache-Control':
|
|
38
|
-
nocache || query.sql ? 'no-cache' : 'public, max-age=86400',
|
|
39
|
-
};
|
|
40
|
-
|
|
41
|
-
const hash = [pointZoom, filter].filter((el) => el).join();
|
|
42
|
-
|
|
43
|
-
const root = getFolder(req);
|
|
44
|
-
const file = path.join(
|
|
45
|
-
root,
|
|
46
|
-
`/map/vtile/${widget}/${hash ? `${createHash('sha1').update(hash).digest('base64')}/` : ''}${z}/${x}/${y}.mvt`
|
|
47
|
-
);
|
|
48
|
-
|
|
49
|
-
try {
|
|
50
|
-
if (!data?.table) {
|
|
51
|
-
return {
|
|
52
|
-
message: `invalid ${widget ? 'widget' : 'dashboard'}: table not specified`,
|
|
53
|
-
status: 400,
|
|
54
|
-
};
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
const pkey = pg.pk?.[data?.table];
|
|
58
|
-
|
|
59
|
-
if (!pkey) {
|
|
60
|
-
return {
|
|
61
|
-
message: `invalid ${widget ? 'widget' : 'dashboard'}: table pk not found (${data?.table})`,
|
|
62
|
-
status: 400,
|
|
63
|
-
};
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
// data param
|
|
67
|
-
const {
|
|
68
|
-
table,
|
|
69
|
-
query: where = '1=1',
|
|
70
|
-
metrics = [],
|
|
71
|
-
cluster,
|
|
72
|
-
clusterTable = {},
|
|
73
|
-
} = data;
|
|
74
|
-
if (!clusterTable?.name) {
|
|
75
|
-
Object.assign(clusterTable, {
|
|
76
|
-
name: 'bi.cluster',
|
|
77
|
-
title: 'title',
|
|
78
|
-
query: `type='${data.cluster}'`,
|
|
79
|
-
});
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
if (cluster && !clusterExists[data.cluster]) {
|
|
83
|
-
const res = await downloadClusterData({ pg, cluster });
|
|
84
|
-
if (res) return res;
|
|
85
|
-
clusterExists[cluster] = 1;
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
if (!cluster) {
|
|
89
|
-
return {
|
|
90
|
-
message: `invalid ${widget ? 'widget' : 'dashboard'}: cluster column not specified`,
|
|
91
|
-
status: 400,
|
|
92
|
-
};
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
if (!metrics.length) {
|
|
96
|
-
return {
|
|
97
|
-
message: `invalid ${widget ? 'widget' : 'dashboard'}: metric columns not found`,
|
|
98
|
-
status: 400,
|
|
99
|
-
};
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
// get sql
|
|
103
|
-
const { optimizedSQL } =
|
|
104
|
-
filter || search
|
|
105
|
-
? await getFilterSQL({ pg, table, filter, search })
|
|
106
|
-
: {};
|
|
107
|
-
|
|
108
|
-
const q = `select ${clusterTable?.column || cluster} as name, ${clusterTable?.operator || 'sum'}("${metrics[0]}")::float as metric, b.*
|
|
109
|
-
from ${optimizedSQL ? `(${optimizedSQL})` : table} q
|
|
110
|
-
left join lateral (select "${pg.pk?.[clusterTable?.name]}" as id, ${clusterTable?.title} as title,
|
|
111
|
-
${clusterTable?.geom || 'geom'} as geom from ${clusterTable?.name}
|
|
112
|
-
where ${clusterTable?.query || '1=1'} and ${clusterTable?.codifierColumn || 'codifier'}=q."${clusterTable?.column || cluster}" limit 1
|
|
113
|
-
)b on 1=1
|
|
114
|
-
where ${where} group by
|
|
115
|
-
q."${clusterTable?.column || cluster}", b.id, b.title, b.geom`;
|
|
116
|
-
|
|
117
|
-
if (query.sql === '1') return q;
|
|
118
|
-
|
|
119
|
-
const geomCol =
|
|
120
|
-
parseInt(z, 10) < parseInt(pointZoom, 10)
|
|
121
|
-
? `ST_Centroid(${clusterTable?.geom || data?.geom || 'geom'})`
|
|
122
|
-
: clusterTable?.geom || data?.geom || 'geom';
|
|
123
|
-
|
|
124
|
-
const bbox = mercator.bbox(+y, +x, +z, false /* , '900913' */);
|
|
125
|
-
const bbox2d = `'BOX(${bbox[0]} ${bbox[1]},${bbox[2]} ${bbox[3]})'::box2d`;
|
|
126
|
-
|
|
127
|
-
const q1 = `SELECT ST_AsMVT(q, 'bi', 4096, 'geom','row') as tile
|
|
128
|
-
FROM (
|
|
129
|
-
SELECT
|
|
130
|
-
floor(random() * 100000 + 1)::int + row_number() over() as row,
|
|
131
|
-
|
|
132
|
-
${pg.pk?.[clusterTable?.name] ? 'id,' : ''} name, metric, title,
|
|
133
|
-
|
|
134
|
-
ST_AsMVTGeom(st_transform(${geomCol}, 3857),ST_TileEnvelope(${z},${y},${x})::box2d,4096,256,false) as geom
|
|
135
|
-
|
|
136
|
-
FROM (select * from (${q})q where geom && ${bbox2d}
|
|
137
|
-
|
|
138
|
-
and geom is not null and st_srid(geom) >0
|
|
139
|
-
|
|
140
|
-
and ST_GeometryType(geom) = any ('{ "ST_Polygon", "ST_MultiPolygon" }')
|
|
141
|
-
|
|
142
|
-
limit 3000)q
|
|
143
|
-
) q`;
|
|
144
|
-
|
|
145
|
-
if (query.sql === '2') return q1;
|
|
146
|
-
|
|
147
|
-
// auto Index
|
|
148
|
-
autoIndex({ table, columns: (metrics || []).concat([cluster]) });
|
|
149
|
-
|
|
150
|
-
const { rows = [] } = await pg.query(q1);
|
|
151
|
-
|
|
152
|
-
if (query.sql === '3') return rows.map((el) => el.tile);
|
|
153
|
-
|
|
154
|
-
const buffer = Buffer.concat(rows.map((el) => Buffer.from(el.tile)));
|
|
155
|
-
|
|
156
|
-
if (!nocache) {
|
|
157
|
-
await mkdir(path.dirname(file), { recursive: true });
|
|
158
|
-
await writeFile(file, buffer, 'binary');
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
return reply.headers(headers).send(buffer);
|
|
162
|
-
} catch (err) {
|
|
163
|
-
logger.file('bi/clusterVtile/error', { error: err.toString(), query, params });
|
|
164
|
-
return { error: err.toString(), status: 500 };
|
|
165
|
-
}
|
|
166
|
-
}
|
|
1
|
+
import Sphericalmercator from '@mapbox/sphericalmercator';
|
|
2
|
+
|
|
3
|
+
import path from 'path';
|
|
4
|
+
import { createHash } from 'crypto';
|
|
5
|
+
import { writeFile, mkdir } from 'fs/promises';
|
|
6
|
+
|
|
7
|
+
import { logger, getFolder, getFilterSQL, autoIndex, pgClients } from '@opengis/fastify-table/utils.js';
|
|
8
|
+
|
|
9
|
+
import { getWidget } from '../../../../utils.js';
|
|
10
|
+
|
|
11
|
+
import downloadClusterData from './utils/downloadClusterData.js';
|
|
12
|
+
|
|
13
|
+
const mercator = new Sphericalmercator({ size: 256 });
|
|
14
|
+
|
|
15
|
+
const clusterExists = {};
|
|
16
|
+
|
|
17
|
+
export default async function clusterVtile(req, reply) {
|
|
18
|
+
const { params = {}, query = {} } = req;
|
|
19
|
+
const { z, y } = params;
|
|
20
|
+
const x = params.x?.split('.')[0] - 0;
|
|
21
|
+
|
|
22
|
+
if (!x || !y || !z) {
|
|
23
|
+
return { message: 'not enough params: xyz', status: 400 };
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const { widget, filter, dashboard, search, clusterZoom, nocache, pointZoom } =
|
|
27
|
+
query;
|
|
28
|
+
|
|
29
|
+
if (!widget) {
|
|
30
|
+
return { message: 'not enough params: widget', status: 400 };
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
const { pg = req.pg || pgClients.client, data } = await getWidget({ pg: req.pg, dashboard, widget });
|
|
34
|
+
|
|
35
|
+
const headers = {
|
|
36
|
+
'Content-Type': 'application/x-protobuf',
|
|
37
|
+
'Cache-Control':
|
|
38
|
+
nocache || query.sql ? 'no-cache' : 'public, max-age=86400',
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
const hash = [pointZoom, filter].filter((el) => el).join();
|
|
42
|
+
|
|
43
|
+
const root = getFolder(req);
|
|
44
|
+
const file = path.join(
|
|
45
|
+
root,
|
|
46
|
+
`/map/vtile/${widget}/${hash ? `${createHash('sha1').update(hash).digest('base64')}/` : ''}${z}/${x}/${y}.mvt`
|
|
47
|
+
);
|
|
48
|
+
|
|
49
|
+
try {
|
|
50
|
+
if (!data?.table) {
|
|
51
|
+
return {
|
|
52
|
+
message: `invalid ${widget ? 'widget' : 'dashboard'}: table not specified`,
|
|
53
|
+
status: 400,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
const pkey = pg.pk?.[data?.table];
|
|
58
|
+
|
|
59
|
+
if (!pkey) {
|
|
60
|
+
return {
|
|
61
|
+
message: `invalid ${widget ? 'widget' : 'dashboard'}: table pk not found (${data?.table})`,
|
|
62
|
+
status: 400,
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// data param
|
|
67
|
+
const {
|
|
68
|
+
table,
|
|
69
|
+
query: where = '1=1',
|
|
70
|
+
metrics = [],
|
|
71
|
+
cluster,
|
|
72
|
+
clusterTable = {},
|
|
73
|
+
} = data;
|
|
74
|
+
if (!clusterTable?.name) {
|
|
75
|
+
Object.assign(clusterTable, {
|
|
76
|
+
name: 'bi.cluster',
|
|
77
|
+
title: 'title',
|
|
78
|
+
query: `type='${data.cluster}'`,
|
|
79
|
+
});
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
if (cluster && !clusterExists[data.cluster]) {
|
|
83
|
+
const res = await downloadClusterData({ pg, cluster });
|
|
84
|
+
if (res) return res;
|
|
85
|
+
clusterExists[cluster] = 1;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
if (!cluster) {
|
|
89
|
+
return {
|
|
90
|
+
message: `invalid ${widget ? 'widget' : 'dashboard'}: cluster column not specified`,
|
|
91
|
+
status: 400,
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
if (!metrics.length) {
|
|
96
|
+
return {
|
|
97
|
+
message: `invalid ${widget ? 'widget' : 'dashboard'}: metric columns not found`,
|
|
98
|
+
status: 400,
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// get sql
|
|
103
|
+
const { optimizedSQL } =
|
|
104
|
+
filter || search
|
|
105
|
+
? await getFilterSQL({ pg, table, filter, search })
|
|
106
|
+
: {};
|
|
107
|
+
|
|
108
|
+
const q = `select ${clusterTable?.column || cluster} as name, ${clusterTable?.operator || 'sum'}("${metrics[0]}")::float as metric, b.*
|
|
109
|
+
from ${optimizedSQL ? `(${optimizedSQL})` : table} q
|
|
110
|
+
left join lateral (select "${pg.pk?.[clusterTable?.name]}" as id, ${clusterTable?.title} as title,
|
|
111
|
+
${clusterTable?.geom || 'geom'} as geom from ${clusterTable?.name}
|
|
112
|
+
where ${clusterTable?.query || '1=1'} and ${clusterTable?.codifierColumn || 'codifier'}=q."${clusterTable?.column || cluster}" limit 1
|
|
113
|
+
)b on 1=1
|
|
114
|
+
where ${where} group by
|
|
115
|
+
q."${clusterTable?.column || cluster}", b.id, b.title, b.geom`;
|
|
116
|
+
|
|
117
|
+
if (query.sql === '1') return q;
|
|
118
|
+
|
|
119
|
+
const geomCol =
|
|
120
|
+
parseInt(z, 10) < parseInt(pointZoom, 10)
|
|
121
|
+
? `ST_Centroid(${clusterTable?.geom || data?.geom || 'geom'})`
|
|
122
|
+
: clusterTable?.geom || data?.geom || 'geom';
|
|
123
|
+
|
|
124
|
+
const bbox = mercator.bbox(+y, +x, +z, false /* , '900913' */);
|
|
125
|
+
const bbox2d = `'BOX(${bbox[0]} ${bbox[1]},${bbox[2]} ${bbox[3]})'::box2d`;
|
|
126
|
+
|
|
127
|
+
const q1 = `SELECT ST_AsMVT(q, 'bi', 4096, 'geom','row') as tile
|
|
128
|
+
FROM (
|
|
129
|
+
SELECT
|
|
130
|
+
floor(random() * 100000 + 1)::int + row_number() over() as row,
|
|
131
|
+
|
|
132
|
+
${pg.pk?.[clusterTable?.name] ? 'id,' : ''} name, metric, title,
|
|
133
|
+
|
|
134
|
+
ST_AsMVTGeom(st_transform(${geomCol}, 3857),ST_TileEnvelope(${z},${y},${x})::box2d,4096,256,false) as geom
|
|
135
|
+
|
|
136
|
+
FROM (select * from (${q})q where geom && ${bbox2d}
|
|
137
|
+
|
|
138
|
+
and geom is not null and st_srid(geom) >0
|
|
139
|
+
|
|
140
|
+
and ST_GeometryType(geom) = any ('{ "ST_Polygon", "ST_MultiPolygon" }')
|
|
141
|
+
|
|
142
|
+
limit 3000)q
|
|
143
|
+
) q`;
|
|
144
|
+
|
|
145
|
+
if (query.sql === '2') return q1;
|
|
146
|
+
|
|
147
|
+
// auto Index
|
|
148
|
+
autoIndex({ table, columns: (metrics || []).concat([cluster]) });
|
|
149
|
+
|
|
150
|
+
const { rows = [] } = await pg.query(q1);
|
|
151
|
+
|
|
152
|
+
if (query.sql === '3') return rows.map((el) => el.tile);
|
|
153
|
+
|
|
154
|
+
const buffer = Buffer.concat(rows.map((el) => Buffer.from(el.tile)));
|
|
155
|
+
|
|
156
|
+
if (!nocache) {
|
|
157
|
+
await mkdir(path.dirname(file), { recursive: true });
|
|
158
|
+
await writeFile(file, buffer, 'binary');
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
return reply.headers(headers).send(buffer);
|
|
162
|
+
} catch (err) {
|
|
163
|
+
logger.file('bi/clusterVtile/error', { error: err.toString(), query, params });
|
|
164
|
+
return { error: err.toString(), status: 500 };
|
|
165
|
+
}
|
|
166
|
+
}
|