r2gg 2.2.6__py3-none-any.whl → 3.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- r2gg/__about__.py +1 -1
- r2gg/_configure.py +11 -61
- r2gg/_database.py +163 -0
- r2gg/_lua_builder.py +6 -3
- r2gg/_main.py +74 -73
- r2gg/_pivot_to_osm.py +53 -89
- r2gg/_pivot_to_pgr.py +53 -103
- r2gg/cli.py +18 -32
- {r2gg-2.2.6.dist-info → r2gg-3.1.4.dist-info}/METADATA +15 -6
- r2gg-3.1.4.dist-info/RECORD +24 -0
- {r2gg-2.2.6.dist-info → r2gg-3.1.4.dist-info}/WHEEL +1 -1
- r2gg-2.2.6.dist-info/RECORD +0 -23
- {r2gg-2.2.6.dist-info → r2gg-3.1.4.dist-info}/entry_points.txt +0 -0
- {r2gg-2.2.6.dist-info → r2gg-3.1.4.dist-info/licenses}/LICENSE +0 -0
- {r2gg-2.2.6.dist-info → r2gg-3.1.4.dist-info}/top_level.txt +0 -0
r2gg/_pivot_to_osm.py
CHANGED
|
@@ -1,17 +1,17 @@
|
|
|
1
|
-
from datetime import date
|
|
2
|
-
from math import ceil
|
|
3
1
|
import os
|
|
4
2
|
import time
|
|
3
|
+
from datetime import date
|
|
4
|
+
from math import ceil
|
|
5
5
|
|
|
6
6
|
from lxml import etree
|
|
7
|
-
from psycopg2.extras import DictCursor
|
|
8
7
|
|
|
9
8
|
from r2gg._osm_building import writeNode, writeWay, writeWayNds, writeRes, writeWayTags
|
|
10
|
-
from r2gg._sql_building import getQueryByTableAndBoundingBox
|
|
11
9
|
from r2gg._osm_to_pbf import osm_to_pbf
|
|
10
|
+
from r2gg._sql_building import getQueryByTableAndBoundingBox
|
|
11
|
+
from r2gg._database import DatabaseManager
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
def pivot_to_osm(config, source, db_configs,
|
|
14
|
+
def pivot_to_osm(config, source, db_configs, database: DatabaseManager, logger, output_is_pbf=False):
|
|
15
15
|
"""
|
|
16
16
|
Fonction de conversion depuis la bdd pivot vers le fichier osm puis pbf le cas échéant
|
|
17
17
|
|
|
@@ -22,11 +22,10 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
22
22
|
source: dict
|
|
23
23
|
db_configs: dict
|
|
24
24
|
dictionnaire correspondant aux configurations des bdd
|
|
25
|
-
|
|
26
|
-
|
|
25
|
+
database: r2gg.DatabaseManager
|
|
26
|
+
gestionnaire de connexion et d'exécution de la base de la bdd
|
|
27
27
|
logger: logging.Logger
|
|
28
28
|
"""
|
|
29
|
-
|
|
30
29
|
logger.info("Convert pivot to OSM format for a source")
|
|
31
30
|
|
|
32
31
|
# Récupération de la date d'extraction
|
|
@@ -44,16 +43,14 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
44
43
|
source_db_config = db_configs[source['mapping']['source']['baseId']]
|
|
45
44
|
input_schema = source_db_config.get('schema')
|
|
46
45
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
cursor.execute(f"select last_value from {input_schema}.nodes_id_seq")
|
|
51
|
-
vertexSequence = cursor.fetchone()[0]
|
|
46
|
+
last_value_nodes_query = f"select last_value from {input_schema}.nodes_id_seq"
|
|
47
|
+
vertexSequence, _ = database.execute_select_fetch_one(last_value_nodes_query, show_duration=True)
|
|
48
|
+
vertexSequence = vertexSequence[0]
|
|
52
49
|
logger.info(vertexSequence)
|
|
53
50
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
edgeSequence =
|
|
51
|
+
last_value_edges_query = f"select last_value from {input_schema}.edges_id_seq"
|
|
52
|
+
edgeSequence, _ = database.execute_select_fetch_one(last_value_edges_query, show_duration=True)
|
|
53
|
+
edgeSequence = edgeSequence[0]
|
|
57
54
|
logger.info(edgeSequence)
|
|
58
55
|
|
|
59
56
|
logger.info("Starting conversion from pivot to OSM")
|
|
@@ -70,13 +67,8 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
70
67
|
with xf.element("osm", attribs):
|
|
71
68
|
|
|
72
69
|
# Récupération du nombre de nodes
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
st_execute = time.time()
|
|
76
|
-
cursor.execute(sql_query)
|
|
77
|
-
et_execute = time.time()
|
|
78
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
79
|
-
row = cursor.fetchone()
|
|
70
|
+
number_of_nodes_query = f"SELECT COUNT(*) as cnt FROM {input_schema}.nodes"
|
|
71
|
+
row, _ = database.execute_select_fetch_one(number_of_nodes_query, show_duration=True)
|
|
80
72
|
nodesize = row["cnt"]
|
|
81
73
|
|
|
82
74
|
# Ecriture des nodes
|
|
@@ -85,34 +77,21 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
85
77
|
logger.info(f"Writing nodes: {nodesize} ways to write")
|
|
86
78
|
st_nodes = time.time()
|
|
87
79
|
while offset < nodesize:
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
logger.info("Writing nodes")
|
|
98
|
-
st_execute = time.time()
|
|
99
|
-
i = 1
|
|
100
|
-
while row:
|
|
101
|
-
nodeEl = writeNode(row, extraction_date)
|
|
102
|
-
xf.write(nodeEl, pretty_print=True)
|
|
103
|
-
row = cursor.fetchone()
|
|
104
|
-
logger.info("%s / %s nodes ajoutés" %(offset, nodesize))
|
|
80
|
+
sql_query_nodes = getQueryByTableAndBoundingBox(f'{input_schema}.nodes', source['bbox'])
|
|
81
|
+
sql_query_nodes += " LIMIT {} OFFSET {}".format(batchsize, offset)
|
|
82
|
+
offset += batchsize
|
|
83
|
+
logger.info("Writing nodes")
|
|
84
|
+
for row, count in database.execute_select_fetch_multiple(sql_query_nodes, show_duration=True):
|
|
85
|
+
nodeEl = writeNode(row, extraction_date)
|
|
86
|
+
xf.write(nodeEl, pretty_print=True)
|
|
87
|
+
|
|
88
|
+
logger.info("%s / %s nodes ajoutés" % (offset, nodesize))
|
|
105
89
|
et_nodes = time.time()
|
|
106
|
-
logger.info("Writing nodes ended. Elapsed time : %s seconds." %(et_nodes - st_nodes))
|
|
90
|
+
logger.info("Writing nodes ended. Elapsed time : %s seconds." % (et_nodes - st_nodes))
|
|
107
91
|
|
|
108
92
|
# Récupération du nombre de ways
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
st_execute = time.time()
|
|
112
|
-
cursor.execute(sql_query)
|
|
113
|
-
et_execute = time.time()
|
|
114
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
115
|
-
row = cursor.fetchone()
|
|
93
|
+
sql_query_edges_count = f"SELECT COUNT(*) as cnt FROM {input_schema}.edges"
|
|
94
|
+
row, _ = database.execute_select_fetch_one(sql_query_edges_count, show_duration=True)
|
|
116
95
|
edgesize = row["cnt"]
|
|
117
96
|
|
|
118
97
|
# Ecriture des ways
|
|
@@ -121,64 +100,49 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
121
100
|
logger.info(f"Writing ways: {edgesize} ways to write")
|
|
122
101
|
st_edges = time.time()
|
|
123
102
|
while offset < edgesize:
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
nodeEl = writeNode(node, extraction_date)
|
|
141
|
-
xf.write(nodeEl, pretty_print=True)
|
|
142
|
-
wayEl = writeWayNds(wayEl, row, row['internodes'])
|
|
143
|
-
wayEl = writeWayTags(wayEl, row)
|
|
144
|
-
xf.write(wayEl, pretty_print=True)
|
|
145
|
-
row = cursor.fetchone()
|
|
146
|
-
logger.info("%s / %s ways ajoutés" %(offset, edgesize))
|
|
103
|
+
sql_query_edges = getQueryByTableAndBoundingBox(f'{input_schema}.edges', source['bbox'], ['*',
|
|
104
|
+
f'{input_schema}.inter_nodes(geom) as internodes'])
|
|
105
|
+
sql_query_edges += " LIMIT {} OFFSET {}".format(batchsize, offset)
|
|
106
|
+
offset += batchsize
|
|
107
|
+
for row, count in database.execute_select_fetch_multiple(sql_query_edges, show_duration=True):
|
|
108
|
+
wayEl = writeWay(row, extraction_date)
|
|
109
|
+
for node in row['internodes']:
|
|
110
|
+
vertexSequence = vertexSequence + 1
|
|
111
|
+
node['id'] = vertexSequence
|
|
112
|
+
nodeEl = writeNode(node, extraction_date)
|
|
113
|
+
xf.write(nodeEl, pretty_print=True)
|
|
114
|
+
wayEl = writeWayNds(wayEl, row, row['internodes'])
|
|
115
|
+
wayEl = writeWayTags(wayEl, row)
|
|
116
|
+
xf.write(wayEl, pretty_print=True)
|
|
117
|
+
|
|
118
|
+
logger.info("%s / %s ways ajoutés" % (offset, edgesize))
|
|
147
119
|
et_edges = time.time()
|
|
148
|
-
logger.info("Writing ways ended. Elapsed time : %s seconds." %(et_edges - st_edges))
|
|
120
|
+
logger.info("Writing ways ended. Elapsed time : %s seconds." % (et_edges - st_edges))
|
|
149
121
|
|
|
150
122
|
# Ecriture des restrictions
|
|
151
|
-
|
|
152
|
-
logger.info("SQL: {}".format(sql_query3))
|
|
153
|
-
st_execute = time.time()
|
|
154
|
-
cursor.execute(sql_query3)
|
|
155
|
-
et_execute = time.time()
|
|
156
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
157
|
-
row = cursor.fetchone()
|
|
123
|
+
sql_query_non_comm = f"select * from {input_schema}.non_comm"
|
|
158
124
|
logger.info("Writing restrictions")
|
|
159
125
|
st_execute = time.time()
|
|
160
126
|
i = 1
|
|
161
|
-
|
|
127
|
+
for row, count in database.execute_select_fetch_multiple(sql_query_non_comm, show_duration=True):
|
|
162
128
|
if row['common_vertex_id'] == -1:
|
|
163
|
-
row = cursor.fetchone()
|
|
164
129
|
i += 1
|
|
165
130
|
continue
|
|
166
131
|
ResEl = writeRes(row, i, extraction_date)
|
|
167
132
|
xf.write(ResEl, pretty_print=True)
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
logger.info("%s / %s restrictions ajoutés" %(i, cursor.rowcount))
|
|
133
|
+
if (i % ceil(count / 10) == 0):
|
|
134
|
+
logger.info("%s / %s restrictions ajoutés" % (i, count))
|
|
171
135
|
i += 1
|
|
136
|
+
|
|
172
137
|
et_execute = time.time()
|
|
173
|
-
logger.info("Writing restrictions ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
138
|
+
logger.info("Writing restrictions ended. Elapsed time : %s seconds." % (et_execute - st_execute))
|
|
174
139
|
|
|
175
140
|
except etree.SerialisationError:
|
|
176
|
-
logger.
|
|
141
|
+
logger.warning("WARNING: XML file not closed properly (lxml.etree.SerialisationError)")
|
|
177
142
|
|
|
178
|
-
cursor.close()
|
|
179
143
|
end_time = time.time()
|
|
180
|
-
logger.info("Conversion from pivot to OSM ended. Elapsed time : %s seconds." %(end_time - start_time))
|
|
144
|
+
logger.info("Conversion from pivot to OSM ended. Elapsed time : %s seconds." % (end_time - start_time))
|
|
181
145
|
|
|
182
146
|
# osm2pbf : Gestion du format osm.pbf
|
|
183
147
|
if output_is_pbf:
|
|
184
|
-
osm_to_pbf(filename, filename+'.pbf', logger)
|
|
148
|
+
osm_to_pbf(filename, filename + '.pbf', logger)
|
r2gg/_pivot_to_pgr.py
CHANGED
|
@@ -7,8 +7,9 @@ from psycopg2.extras import DictCursor
|
|
|
7
7
|
from r2gg._output_costs_from_costs_config import output_costs_from_costs_config
|
|
8
8
|
from r2gg._read_config import config_from_path
|
|
9
9
|
from r2gg._sql_building import getQueryByTableAndBoundingBox
|
|
10
|
+
from r2gg._database import DatabaseManager
|
|
10
11
|
|
|
11
|
-
def pivot_to_pgr(source, cost_calculation_file_path,
|
|
12
|
+
def pivot_to_pgr(source, cost_calculation_file_path, database_work: DatabaseManager, database_out: DatabaseManager, schema, input_schema, logger):
|
|
12
13
|
"""
|
|
13
14
|
Fonction de conversion depuis la bdd pivot vers la base pgr
|
|
14
15
|
|
|
@@ -17,9 +18,9 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
17
18
|
source: dict
|
|
18
19
|
cost_calculation_file_path: str
|
|
19
20
|
chemin vers le fichier json de configuration des coûts
|
|
20
|
-
|
|
21
|
+
database_work: DatabaseManager
|
|
21
22
|
connection à la bdd de travail
|
|
22
|
-
|
|
23
|
+
database_out: DatabaseManager
|
|
23
24
|
connection à la bdd pgrouting de sortie
|
|
24
25
|
schema: str
|
|
25
26
|
nom du schéma dans la base de sortie
|
|
@@ -28,12 +29,10 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
28
29
|
logger: logging.Logger
|
|
29
30
|
"""
|
|
30
31
|
|
|
31
|
-
cursor_in = connection_work.cursor(cursor_factory=DictCursor, name="cursor_in")
|
|
32
32
|
ways_table_name = schema + '.ways'
|
|
33
33
|
# Récupération des coûts à calculer
|
|
34
34
|
costs = config_from_path(cost_calculation_file_path)
|
|
35
35
|
|
|
36
|
-
cursor_out = connection_out.cursor()
|
|
37
36
|
# Création de la edge_table pgrouting
|
|
38
37
|
create_table = """
|
|
39
38
|
DROP TABLE IF EXISTS {0};
|
|
@@ -64,7 +63,6 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
64
63
|
nombre_de_voies text,
|
|
65
64
|
insee_commune_gauche text,
|
|
66
65
|
insee_commune_droite text,
|
|
67
|
-
bande_cyclable text,
|
|
68
66
|
itineraire_vert boolean,
|
|
69
67
|
sens_de_circulation text,
|
|
70
68
|
reserve_aux_bus text,
|
|
@@ -79,10 +77,14 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
79
77
|
matieres_dangereuses_interdites boolean,
|
|
80
78
|
cpx_gestionnaire text,
|
|
81
79
|
cpx_numero_route_europeenne text,
|
|
82
|
-
cpx_classement_administratif text
|
|
80
|
+
cpx_classement_administratif text,
|
|
81
|
+
transport_exceptionnel boolean,
|
|
82
|
+
vla_par_defaut integer,
|
|
83
|
+
cout_penalites numeric,
|
|
84
|
+
vehicule_leger_interdit boolean,
|
|
85
|
+
cout_vehicule_prioritaire numeric
|
|
83
86
|
);""".format(ways_table_name)
|
|
84
|
-
|
|
85
|
-
cursor_out.execute(create_table)
|
|
87
|
+
database_out.execute_update(create_table)
|
|
86
88
|
|
|
87
89
|
# Ajout des colonnes de coûts
|
|
88
90
|
add_columns = "ALTER TABLE {} ".format(ways_table_name)
|
|
@@ -91,7 +93,7 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
91
93
|
add_columns += "ADD COLUMN IF NOT EXISTS {} double precision,".format("reverse_" + output["name"])
|
|
92
94
|
add_columns = add_columns[:-1]
|
|
93
95
|
logger.debug("SQL: adding costs columns \n {}".format(add_columns))
|
|
94
|
-
|
|
96
|
+
database_out.execute_update(add_columns)
|
|
95
97
|
|
|
96
98
|
logger.info("Starting conversion")
|
|
97
99
|
start_time = time.time()
|
|
@@ -105,34 +107,30 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
105
107
|
id_from bigint,
|
|
106
108
|
id_to bigint
|
|
107
109
|
);""".format(schema)
|
|
108
|
-
|
|
109
|
-
cursor_out.execute(create_non_comm)
|
|
110
|
+
database_out.execute_update(create_non_comm)
|
|
110
111
|
|
|
111
112
|
logger.info("Populating turn restrictions")
|
|
112
113
|
tr_query = f"SELECT id_from, id_to FROM {input_schema}.non_comm;"
|
|
113
114
|
|
|
114
|
-
logger.debug("SQL: {}".format(tr_query))
|
|
115
|
-
st_execute = time.time()
|
|
116
|
-
cursor_in.execute(tr_query)
|
|
117
|
-
et_execute = time.time()
|
|
118
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
119
|
-
# Insertion petit à petit -> plus performant
|
|
120
|
-
logger.info("SQL: Inserting or updating {} values in out db".format(cursor_in.rowcount))
|
|
121
|
-
st_execute = time.time()
|
|
122
115
|
index = 0
|
|
123
116
|
batchsize = 10000
|
|
124
|
-
|
|
117
|
+
generator = database_work.execute_select_fetch_multiple(tr_query, show_duration=True, batchsize=batchsize)
|
|
118
|
+
rows, count = next(generator,(None, None))
|
|
119
|
+
# Insertion petit à petit -> plus performant
|
|
120
|
+
|
|
121
|
+
logger.info("SQL: Inserting or updating {} values in out db".format(count))
|
|
122
|
+
|
|
123
|
+
st_execute = time.time()
|
|
124
|
+
|
|
125
125
|
while rows:
|
|
126
126
|
values_str = ""
|
|
127
|
-
for row in rows:
|
|
128
|
-
values_str += "(%s, %s, %s),"
|
|
129
|
-
values_str = values_str[:-1]
|
|
130
|
-
|
|
131
127
|
# Tuple des valuers à insérer
|
|
132
128
|
values_tuple = ()
|
|
133
129
|
for row in rows:
|
|
130
|
+
values_str += "(%s, %s, %s),"
|
|
134
131
|
values_tuple += (index, row['id_from'], row['id_to'])
|
|
135
132
|
index += 1
|
|
133
|
+
values_str = values_str[:-1]
|
|
136
134
|
|
|
137
135
|
set_on_conflict = (
|
|
138
136
|
"id_from = excluded.id_from,id_to = excluded.id_to"
|
|
@@ -144,17 +142,15 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
144
142
|
ON CONFLICT (id) DO UPDATE
|
|
145
143
|
SET {};
|
|
146
144
|
""".format(schema, values_str, set_on_conflict)
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
rows =
|
|
145
|
+
database_out.execute_update(sql_insert, values_tuple)
|
|
146
|
+
|
|
147
|
+
rows, _ = next(generator,(None, None))
|
|
150
148
|
|
|
151
149
|
et_execute = time.time()
|
|
152
|
-
cursor_in.close()
|
|
153
150
|
logger.info("Writing turn restrinctions Done. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
154
151
|
|
|
155
152
|
# Noeuds ---------------------------------------------------------------------------------------
|
|
156
153
|
logger.info("Writing vertices...")
|
|
157
|
-
cursor_in = connection_work.cursor(cursor_factory=DictCursor, name="cursor_in")
|
|
158
154
|
create_nodes = """
|
|
159
155
|
DROP TABLE IF EXISTS {0}_vertices_pgr;
|
|
160
156
|
CREATE TABLE {0}_vertices_pgr(
|
|
@@ -165,34 +161,26 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
165
161
|
eout int,
|
|
166
162
|
the_geom geometry(Point,4326)
|
|
167
163
|
);""".format(ways_table_name)
|
|
168
|
-
|
|
169
|
-
cursor_out.execute(create_nodes)
|
|
164
|
+
database_out.execute_update(create_nodes)
|
|
170
165
|
|
|
171
166
|
logger.info("Populating vertices")
|
|
172
167
|
nd_query = f"SELECT id, geom FROM {input_schema}.nodes;"
|
|
173
|
-
|
|
174
|
-
logger.debug("SQL: {}".format(nd_query))
|
|
175
|
-
st_execute = time.time()
|
|
176
|
-
cursor_in.execute(nd_query)
|
|
177
|
-
et_execute = time.time()
|
|
178
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
179
168
|
# Insertion petit à petit -> plus performant
|
|
180
169
|
# logger.info("SQL: Inserting or updating {} values in out db".format(cursor_in.rowcount))
|
|
181
170
|
st_execute = time.time()
|
|
182
171
|
index = 0
|
|
183
172
|
batchsize = 10000
|
|
184
|
-
|
|
173
|
+
generator = database_work.execute_select_fetch_multiple(nd_query, show_duration=True, batchsize=batchsize)
|
|
174
|
+
rows, count = next(generator, (None, None))
|
|
185
175
|
while rows:
|
|
186
176
|
values_str = ""
|
|
187
|
-
for row in rows:
|
|
188
|
-
values_str += "(%s, %s),"
|
|
189
|
-
values_str = values_str[:-1]
|
|
190
|
-
|
|
191
177
|
# Tuple des valeurs à insérer
|
|
192
178
|
values_tuple = ()
|
|
193
179
|
for row in rows:
|
|
180
|
+
values_str += "(%s, %s),"
|
|
194
181
|
values_tuple += (row['id'], row['geom'])
|
|
195
182
|
index += 1
|
|
183
|
+
values_str = values_str[:-1]
|
|
196
184
|
|
|
197
185
|
set_on_conflict = (
|
|
198
186
|
"the_geom = excluded.the_geom"
|
|
@@ -204,18 +192,15 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
204
192
|
ON CONFLICT (id) DO UPDATE
|
|
205
193
|
SET {};
|
|
206
194
|
""".format(ways_table_name, values_str, set_on_conflict)
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
rows = cursor_in.fetchmany(batchsize)
|
|
195
|
+
database_out.execute_update(sql_insert, values_tuple)
|
|
196
|
+
rows, _ = next(generator,(None, None))
|
|
210
197
|
|
|
211
198
|
|
|
212
199
|
et_execute = time.time()
|
|
213
|
-
cursor_in.close()
|
|
214
200
|
logger.info("Writing vertices Done. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
215
201
|
|
|
216
202
|
# Ways -----------------------------------------------------------------------------------------
|
|
217
203
|
# Colonnes à lire dans la base source (champs classiques + champs servant aux coûts)
|
|
218
|
-
cursor_in = connection_work.cursor(cursor_factory=DictCursor, name="cursor_in")
|
|
219
204
|
attribute_columns = [
|
|
220
205
|
'id',
|
|
221
206
|
'geom as the_geom',
|
|
@@ -241,7 +226,6 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
241
226
|
'nombre_de_voies as nombre_de_voies',
|
|
242
227
|
'insee_commune_gauche as insee_commune_gauche',
|
|
243
228
|
'insee_commune_droite as insee_commune_droite',
|
|
244
|
-
'bande_cyclable as bande_cyclable',
|
|
245
229
|
'itineraire_vert as itineraire_vert',
|
|
246
230
|
'sens_de_circulation as sens_de_circulation',
|
|
247
231
|
'reserve_aux_bus as reserve_aux_bus',
|
|
@@ -256,7 +240,12 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
256
240
|
'matieres_dangereuses_interdites as matieres_dangereuses_interdites',
|
|
257
241
|
'cpx_gestionnaire as cpx_gestionnaire',
|
|
258
242
|
'cpx_numero_route_europeenne as cpx_numero_route_europeenne',
|
|
259
|
-
'cpx_classement_administratif as cpx_classement_administratif'
|
|
243
|
+
'cpx_classement_administratif as cpx_classement_administratif',
|
|
244
|
+
'transport_exceptionnel as transport_exceptionnel',
|
|
245
|
+
'vla_par_defaut as vla_par_defaut',
|
|
246
|
+
'cout_penalites as cout_penalites',
|
|
247
|
+
'vehicule_leger_interdit as vehicule_leger_interdit',
|
|
248
|
+
'cout_vehicule_prioritaire as cout_vehicule_prioritaire'
|
|
260
249
|
]
|
|
261
250
|
in_columns = attribute_columns.copy()
|
|
262
251
|
for variable in costs["variables"]:
|
|
@@ -266,11 +255,8 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
266
255
|
|
|
267
256
|
# Ecriture des ways
|
|
268
257
|
sql_query = getQueryByTableAndBoundingBox(f'{input_schema}.edges', source['bbox'], in_columns)
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
cursor_in.execute(sql_query)
|
|
272
|
-
et_execute = time.time()
|
|
273
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
258
|
+
batchsize = 10000
|
|
259
|
+
generator = database_work.execute_select_fetch_multiple(sql_query, show_duration=True, batchsize=batchsize)
|
|
274
260
|
|
|
275
261
|
# Chaîne de n %s, pour l'insertion de données via psycopg
|
|
276
262
|
single_value_str = "%s," * (len(attribute_columns) + 2 * len(costs["outputs"]))
|
|
@@ -279,24 +265,21 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
279
265
|
# Insertion petit à petit -> plus performant
|
|
280
266
|
# logger.info("SQL: Inserting or updating {} values in out db".format(cursor_in.rowcount))
|
|
281
267
|
st_execute = time.time()
|
|
282
|
-
batchsize = 10000
|
|
283
268
|
percent = 0
|
|
284
|
-
rows =
|
|
269
|
+
rows, count = next(generator, (None, None))
|
|
285
270
|
while rows:
|
|
286
|
-
percent += 1000000 /
|
|
271
|
+
percent += 1000000 / count
|
|
287
272
|
# Chaîne permettant l'insertion de valeurs via psycopg
|
|
288
273
|
values_str = ""
|
|
289
|
-
for row in rows:
|
|
290
|
-
values_str += "(" + single_value_str + "),"
|
|
291
|
-
values_str = values_str[:-1]
|
|
292
|
-
|
|
293
274
|
# Tuple des valuers à insérer
|
|
294
275
|
values_tuple = ()
|
|
295
276
|
for row in rows:
|
|
277
|
+
values_str += "(" + single_value_str + "),"
|
|
296
278
|
output_costs = output_costs_from_costs_config(costs, row)
|
|
297
279
|
values_tuple += tuple(
|
|
298
280
|
row[ output_columns_name ] for output_columns_name in output_columns_names
|
|
299
281
|
) + output_costs
|
|
282
|
+
values_str = values_str[:-1]
|
|
300
283
|
|
|
301
284
|
output_columns = "("
|
|
302
285
|
for output_columns_name in output_columns_names:
|
|
@@ -320,12 +303,10 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
320
303
|
ON CONFLICT (id) DO UPDATE
|
|
321
304
|
SET {};
|
|
322
305
|
""".format(ways_table_name, output_columns, values_str, set_on_conflict)
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
rows = cursor_in.fetchmany(batchsize)
|
|
306
|
+
database_out.execute_update(sql_insert, values_tuple)
|
|
307
|
+
rows, _ = next(generator,(None, None))
|
|
326
308
|
|
|
327
309
|
et_execute = time.time()
|
|
328
|
-
cursor_in.close();
|
|
329
310
|
logger.info("Writing ways ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
330
311
|
|
|
331
312
|
spacial_indices_query = """
|
|
@@ -335,60 +316,31 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
335
316
|
CLUSTER {0}_vertices_pgr USING ways_vertices_geom_gist ;
|
|
336
317
|
CREATE INDEX IF NOT EXISTS ways_importance_idx ON {0} USING btree (importance);
|
|
337
318
|
""".format(ways_table_name)
|
|
338
|
-
|
|
339
|
-
st_execute = time.time()
|
|
340
|
-
cursor_out.execute(spacial_indices_query)
|
|
341
|
-
et_execute = time.time()
|
|
342
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
343
|
-
connection_out.commit()
|
|
319
|
+
database_out.execute_update(spacial_indices_query)
|
|
344
320
|
|
|
345
321
|
turn_restrictions_indices_query = """
|
|
346
322
|
CREATE INDEX IF NOT EXISTS turn_restrictions_id_key ON {0}.turn_restrictions USING btree (id);
|
|
347
323
|
CREATE INDEX IF NOT EXISTS ways_id_key ON {1} USING btree (id);
|
|
348
324
|
CREATE INDEX IF NOT EXISTS ways_vertices_pgr_id_key ON {1}_vertices_pgr USING btree (id);
|
|
349
325
|
""".format(schema, ways_table_name)
|
|
350
|
-
|
|
351
|
-
st_execute = time.time()
|
|
352
|
-
cursor_out.execute(turn_restrictions_indices_query)
|
|
353
|
-
et_execute = time.time()
|
|
354
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
355
|
-
connection_out.commit()
|
|
326
|
+
database_out.execute_update(turn_restrictions_indices_query)
|
|
356
327
|
|
|
357
|
-
old_isolation_level = connection_out.isolation_level
|
|
358
|
-
connection_out.set_isolation_level(0)
|
|
359
328
|
|
|
360
329
|
# VACCUM ANALYZE for ways
|
|
361
330
|
vacuum_query = f"VACUUM ANALYZE {ways_table_name};"
|
|
362
|
-
|
|
363
|
-
st_execute = time.time()
|
|
364
|
-
cursor_out.execute(vacuum_query)
|
|
365
|
-
et_execute = time.time()
|
|
366
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
331
|
+
database_out.execute_update(vacuum_query, isolation_level=0)
|
|
367
332
|
|
|
368
333
|
# VACCUM ANALYZE for ways_vertices_pgr
|
|
369
334
|
vacuum_query = f"VACUUM ANALYZE {ways_table_name}_vertices_pgr;"
|
|
370
|
-
|
|
371
|
-
st_execute = time.time()
|
|
372
|
-
cursor_out.execute(vacuum_query)
|
|
373
|
-
et_execute = time.time()
|
|
374
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
335
|
+
database_out.execute_update(vacuum_query, isolation_level=0)
|
|
375
336
|
|
|
376
337
|
# VACCUM ANALYZE for turn_restrictions
|
|
377
338
|
vacuum_query = f"VACUUM ANALYZE {schema}.turn_restrictions;"
|
|
378
|
-
|
|
379
|
-
st_execute = time.time()
|
|
380
|
-
cursor_out.execute(vacuum_query)
|
|
381
|
-
et_execute = time.time()
|
|
382
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
383
|
-
|
|
384
|
-
connection_out.set_isolation_level(old_isolation_level)
|
|
385
|
-
connection_out.commit()
|
|
339
|
+
database_out.execute_update(vacuum_query, isolation_level=0)
|
|
386
340
|
|
|
387
|
-
cursor_out.close()
|
|
388
341
|
|
|
389
342
|
# Nettoyage du graphe
|
|
390
343
|
logger.info("Cleaning isolated clusters of less than 10 edges...")
|
|
391
|
-
cursor_isolated = connection_out.cursor()
|
|
392
344
|
|
|
393
345
|
profile_names = set([ cost['profile'] for cost in source["costs"]])
|
|
394
346
|
st_execute = time.time()
|
|
@@ -427,12 +379,10 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
427
379
|
WHERE {0}.ways.target = ANY(SELECT * from remove_nodes) OR {0}.ways.source = ANY(SELECT * from remove_nodes);
|
|
428
380
|
""".format(schema, profile_name)
|
|
429
381
|
logger.info("SQL: {}".format(clean_graph_query))
|
|
430
|
-
|
|
431
|
-
connection_out.commit()
|
|
382
|
+
database_out.execute_update(clean_graph_query)
|
|
432
383
|
|
|
433
384
|
et_execute = time.time()
|
|
434
385
|
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
435
|
-
cursor_isolated.close()
|
|
436
386
|
|
|
437
387
|
end_time = time.time()
|
|
438
388
|
logger.info("Conversion from pivot to PGR ended. Elapsed time : %s seconds." %(end_time - start_time))
|
r2gg/cli.py
CHANGED
|
@@ -3,38 +3,30 @@
|
|
|
3
3
|
"""Main CLI entrypoint."""
|
|
4
4
|
|
|
5
5
|
# Package
|
|
6
|
-
from r2gg.
|
|
7
|
-
__author__,
|
|
8
|
-
__cli_usage__,
|
|
9
|
-
__summary__,
|
|
10
|
-
__title__,
|
|
11
|
-
__title_clean__,
|
|
12
|
-
__uri_homepage__,
|
|
13
|
-
__version__,
|
|
14
|
-
)
|
|
15
|
-
from r2gg._configure import configure, connect_working_db, disconnect_working_db
|
|
6
|
+
from r2gg._configure import configure
|
|
16
7
|
from r2gg._main import sql_convert, pgr_convert, osm_convert, osrm_convert, valhalla_convert, write_road2_config
|
|
8
|
+
from r2gg._database import DatabaseManager
|
|
17
9
|
|
|
18
10
|
# ############################################################################
|
|
19
11
|
# ########## MAIN ################
|
|
20
12
|
# ################################
|
|
21
13
|
def sql2pivot():
|
|
22
14
|
config, resource, db_configs, logger = configure()
|
|
23
|
-
|
|
24
|
-
sql_convert(config, resource, db_configs,
|
|
25
|
-
disconnect_working_db(
|
|
15
|
+
database = DatabaseManager(db_configs[config["workingSpace"]["baseId"]], logger)
|
|
16
|
+
sql_convert(config, resource, db_configs, database, logger)
|
|
17
|
+
database.disconnect_working_db()
|
|
26
18
|
|
|
27
19
|
def pivot2pgrouting():
|
|
28
20
|
config, resource, db_configs, logger = configure()
|
|
29
|
-
|
|
30
|
-
pgr_convert(
|
|
31
|
-
disconnect_working_db(
|
|
21
|
+
database = DatabaseManager(db_configs[config["workingSpace"]["baseId"]], logger)
|
|
22
|
+
pgr_convert(resource, db_configs, database, logger)
|
|
23
|
+
database.disconnect_working_db()
|
|
32
24
|
|
|
33
25
|
def pivot2osm():
|
|
34
26
|
config, resource, db_configs, logger = configure()
|
|
35
|
-
|
|
36
|
-
osm_convert(config, resource, db_configs,
|
|
37
|
-
disconnect_working_db(
|
|
27
|
+
database = DatabaseManager(db_configs[config["workingSpace"]["baseId"]], logger)
|
|
28
|
+
osm_convert(config, resource, db_configs, database, logger)
|
|
29
|
+
database.disconnect_working_db()
|
|
38
30
|
|
|
39
31
|
def osm2osrm():
|
|
40
32
|
config, resource, _, logger = configure()
|
|
@@ -52,20 +44,14 @@ def main():
|
|
|
52
44
|
"""Main CLI entrypoint.
|
|
53
45
|
"""
|
|
54
46
|
config, resource, db_configs, logger = configure()
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
disconnect_working_db(connection, logger)
|
|
61
|
-
elif (resource['type'] == 'osrm'):
|
|
62
|
-
config, resource, db_configs, connection, logger = configure()
|
|
63
|
-
osm_convert(config, resource, db_configs, connection, logger)
|
|
64
|
-
disconnect_working_db(connection, logger)
|
|
47
|
+
sql2pivot()
|
|
48
|
+
if resource['type'] in ['pgr', 'smartpgr']:
|
|
49
|
+
pivot2pgrouting()
|
|
50
|
+
elif resource['type'] == 'osrm':
|
|
51
|
+
pivot2osm()
|
|
65
52
|
osrm_convert(config, resource, logger)
|
|
66
|
-
elif
|
|
67
|
-
|
|
68
|
-
osm_convert(config, resource, db_configs, connection, logger, True)
|
|
53
|
+
elif resource['type'] == 'valhalla':
|
|
54
|
+
pivot2osm()
|
|
69
55
|
valhalla_convert(config, resource, logger)
|
|
70
56
|
else:
|
|
71
57
|
raise ValueError("Wrong resource type, should be in ['pgr',osrm','valhalla','smartpgr']")
|