r2gg 3.1.0__py3-none-any.whl → 3.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- r2gg/__about__.py +1 -1
- r2gg/_configure.py +11 -61
- r2gg/_database.py +163 -0
- r2gg/_main.py +74 -73
- r2gg/_pivot_to_osm.py +53 -89
- r2gg/_pivot_to_pgr.py +41 -99
- r2gg/cli.py +18 -32
- {r2gg-3.1.0.dist-info → r2gg-3.1.4.dist-info}/METADATA +1 -1
- {r2gg-3.1.0.dist-info → r2gg-3.1.4.dist-info}/RECORD +13 -12
- {r2gg-3.1.0.dist-info → r2gg-3.1.4.dist-info}/WHEEL +0 -0
- {r2gg-3.1.0.dist-info → r2gg-3.1.4.dist-info}/entry_points.txt +0 -0
- {r2gg-3.1.0.dist-info → r2gg-3.1.4.dist-info}/licenses/LICENSE +0 -0
- {r2gg-3.1.0.dist-info → r2gg-3.1.4.dist-info}/top_level.txt +0 -0
r2gg/__about__.py
CHANGED
|
@@ -34,7 +34,7 @@ __uri_repository__ = "https://github.com/IGNF/route-graph-generator/"
|
|
|
34
34
|
__uri_tracker__ = f"{__uri_repository__}issues/"
|
|
35
35
|
__uri__ = __uri_repository__
|
|
36
36
|
|
|
37
|
-
__version__ = "3.1.
|
|
37
|
+
__version__ = "3.1.4"
|
|
38
38
|
__version_info__ = tuple(
|
|
39
39
|
[
|
|
40
40
|
int(num) if num.isdigit() else num
|
r2gg/_configure.py
CHANGED
|
@@ -8,13 +8,14 @@ from r2gg._read_config import config_from_path
|
|
|
8
8
|
|
|
9
9
|
# Définition des niveaux de log
|
|
10
10
|
LEVELS = {
|
|
11
|
-
'CRITICAL'
|
|
12
|
-
'ERROR'
|
|
13
|
-
'WARNING'
|
|
14
|
-
'INFO'
|
|
15
|
-
'DEBUG'
|
|
11
|
+
'CRITICAL': logging.CRITICAL,
|
|
12
|
+
'ERROR': logging.ERROR,
|
|
13
|
+
'WARNING': logging.WARNING,
|
|
14
|
+
'INFO': logging.INFO,
|
|
15
|
+
'DEBUG': logging.DEBUG
|
|
16
16
|
}
|
|
17
17
|
|
|
18
|
+
|
|
18
19
|
def configure():
|
|
19
20
|
"""
|
|
20
21
|
Fonction de lecture du fichier de configuration passé en argument
|
|
@@ -27,8 +28,6 @@ def configure():
|
|
|
27
28
|
dictionnaire correspondant à la resource décrite dans le fichier passé en argument
|
|
28
29
|
db_configs: dict
|
|
29
30
|
dictionnaire correspondant aux configurations des bdd
|
|
30
|
-
connection: psycopg2.connection
|
|
31
|
-
connection à la bdd de travail
|
|
32
31
|
logger: logging.Logger
|
|
33
32
|
"""
|
|
34
33
|
parser = argparse.ArgumentParser()
|
|
@@ -40,7 +39,7 @@ def configure():
|
|
|
40
39
|
config = config_from_path(config_path)['generation']
|
|
41
40
|
|
|
42
41
|
# Récupération de la configuration du log
|
|
43
|
-
logs_config = config_from_path(
|
|
42
|
+
logs_config = config_from_path(config['general']['logs']['configFile'])
|
|
44
43
|
|
|
45
44
|
# Gestion du fichiers de logs non spécifié
|
|
46
45
|
try:
|
|
@@ -51,7 +50,7 @@ def configure():
|
|
|
51
50
|
# Configuration du module logging
|
|
52
51
|
logging.basicConfig(
|
|
53
52
|
format='%(asctime)s %(message)s',
|
|
54
|
-
level=LEVELS[
|
|
53
|
+
level=LEVELS[logs_config['level'].upper()],
|
|
55
54
|
handlers=[
|
|
56
55
|
logging.FileHandler(logs_file),
|
|
57
56
|
logging.StreamHandler()
|
|
@@ -66,10 +65,10 @@ def configure():
|
|
|
66
65
|
# Configuration des bases de données précisées dans la config
|
|
67
66
|
for base in config['bases']:
|
|
68
67
|
if base['type'] == 'bdd':
|
|
69
|
-
db_configs[
|
|
70
|
-
db_configs[base['id']].update({"schema":base['schema']})
|
|
68
|
+
db_configs[base['id']] = config_from_path(base['configFile'])
|
|
69
|
+
db_configs[base['id']].update({"schema": base['schema']})
|
|
71
70
|
|
|
72
|
-
#
|
|
71
|
+
# Récupération de l'objet permettant de générer la ressource
|
|
73
72
|
resource = config['resource']
|
|
74
73
|
|
|
75
74
|
# Création de l'espace de travail
|
|
@@ -77,52 +76,3 @@ def configure():
|
|
|
77
76
|
os.makedirs(config['workingSpace']['directory'])
|
|
78
77
|
|
|
79
78
|
return config, resource, db_configs, logger
|
|
80
|
-
|
|
81
|
-
def connect_working_db(config, db_configs, logger):
|
|
82
|
-
"""
|
|
83
|
-
Fonction de connexion à la BDD de travail
|
|
84
|
-
|
|
85
|
-
Parameters
|
|
86
|
-
----------
|
|
87
|
-
config: dict
|
|
88
|
-
dictionnaire correspondant à la configuration décrite dans le fichier passé en argument
|
|
89
|
-
db_configs: dict
|
|
90
|
-
dictionnaire correspondant aux configurations des bdd
|
|
91
|
-
logger: logging.Logger
|
|
92
|
-
Returns
|
|
93
|
-
-------
|
|
94
|
-
connection: psycopg2.connection
|
|
95
|
-
connection à la bdd de travail
|
|
96
|
-
|
|
97
|
-
"""
|
|
98
|
-
|
|
99
|
-
# Configuration de la bdd de travail
|
|
100
|
-
work_db_config = db_configs[ config['workingSpace']['baseId'] ]
|
|
101
|
-
|
|
102
|
-
# Récupération des paramètres de la bdd
|
|
103
|
-
host = work_db_config.get('host')
|
|
104
|
-
dbname = work_db_config.get('database')
|
|
105
|
-
user = work_db_config.get('user')
|
|
106
|
-
password = work_db_config.get('password')
|
|
107
|
-
port = work_db_config.get('port')
|
|
108
|
-
connect_args = 'host=%s dbname=%s user=%s password=%s port=%s' %(host, dbname, user, password, port)
|
|
109
|
-
|
|
110
|
-
logger.info("Connecting to work database")
|
|
111
|
-
connection = psycopg2.connect(connect_args)
|
|
112
|
-
connection.set_client_encoding('UTF8')
|
|
113
|
-
|
|
114
|
-
return connection
|
|
115
|
-
|
|
116
|
-
def disconnect_working_db(connection, logger):
|
|
117
|
-
"""
|
|
118
|
-
Fonction de connexion à la BDD de travail
|
|
119
|
-
|
|
120
|
-
Parameters
|
|
121
|
-
----------
|
|
122
|
-
connection: psycopg2.connection
|
|
123
|
-
connection à la bdd de travail
|
|
124
|
-
logger: logging.Logger
|
|
125
|
-
"""
|
|
126
|
-
|
|
127
|
-
connection.close()
|
|
128
|
-
logger.info("Connection to work database closed")
|
r2gg/_database.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from os import getenv
|
|
3
|
+
|
|
4
|
+
import psycopg2
|
|
5
|
+
from psycopg2 import OperationalError, DatabaseError, InterfaceError
|
|
6
|
+
from psycopg2.extras import DictCursor
|
|
7
|
+
import logging
|
|
8
|
+
|
|
9
|
+
TIMEOUT = int(getenv("SQL_STATEMENT_TIMEOUT", 0))
|
|
10
|
+
RETRY = int(getenv("SQL_STATEMENT_RETRY_ATTEMPTS", 3))
|
|
11
|
+
DELAY = int(getenv("SQL_DELAY_BETWEEN_STATEMENTS", 30))
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def database_retry_decorator(func):
|
|
15
|
+
def wrapper(self, *args, **kwargs):
|
|
16
|
+
attempt = 1
|
|
17
|
+
while attempt <= RETRY:
|
|
18
|
+
try:
|
|
19
|
+
self.ensure_connection()
|
|
20
|
+
yield from func(self, *args, **kwargs)
|
|
21
|
+
return
|
|
22
|
+
|
|
23
|
+
except (OperationalError, DatabaseError, InterfaceError) as e:
|
|
24
|
+
if attempt >= RETRY:
|
|
25
|
+
self.logger.error(f"Query failed after {RETRY} attempts: {str(e).rstrip()}")
|
|
26
|
+
return
|
|
27
|
+
|
|
28
|
+
self.logger.error(
|
|
29
|
+
f"Attempt {attempt}/{RETRY} failed ({str(e).rstrip()}), retrying in {DELAY} seconds"
|
|
30
|
+
)
|
|
31
|
+
time.sleep(DELAY)
|
|
32
|
+
attempt += 1
|
|
33
|
+
try:
|
|
34
|
+
if self._connection:
|
|
35
|
+
self._connection.rollback()
|
|
36
|
+
except Exception as e:
|
|
37
|
+
self.logger.error(f"Connection rollback failed {str(e).rstrip()}")
|
|
38
|
+
return
|
|
39
|
+
|
|
40
|
+
return wrapper
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class DatabaseManager:
|
|
44
|
+
def __init__(self, db_configs, logger):
|
|
45
|
+
self.logger = logger
|
|
46
|
+
self._work_db_config = db_configs
|
|
47
|
+
self._connection = self.connect_working_db()
|
|
48
|
+
|
|
49
|
+
def connect_working_db(self):
|
|
50
|
+
"""
|
|
51
|
+
Fonction de connexion à la BDD de travail
|
|
52
|
+
|
|
53
|
+
Parameters
|
|
54
|
+
----------
|
|
55
|
+
config: dict
|
|
56
|
+
dictionnaire correspondant à la configuration décrite dans le fichier passé en argument
|
|
57
|
+
db_configs: dict
|
|
58
|
+
dictionnaire correspondant aux configurations des bdd
|
|
59
|
+
Returns
|
|
60
|
+
-------
|
|
61
|
+
connection: psycopg2.connection
|
|
62
|
+
connection à la bdd de travail
|
|
63
|
+
|
|
64
|
+
"""
|
|
65
|
+
# Récupération des paramètres de la bdd
|
|
66
|
+
host = self._work_db_config.get("host")
|
|
67
|
+
dbname = self._work_db_config.get("database")
|
|
68
|
+
user = self._work_db_config.get("user")
|
|
69
|
+
password = self._work_db_config.get("password")
|
|
70
|
+
port = self._work_db_config.get("port")
|
|
71
|
+
connect_args = "host=%s dbname=%s user=%s password=%s port=%s" % (host, dbname, user, password, port)
|
|
72
|
+
|
|
73
|
+
self.logger.info("Connecting to work database")
|
|
74
|
+
connection = psycopg2.connect(connect_args)
|
|
75
|
+
connection.set_client_encoding("UTF8")
|
|
76
|
+
|
|
77
|
+
return connection
|
|
78
|
+
|
|
79
|
+
def disconnect_working_db(self):
|
|
80
|
+
"""
|
|
81
|
+
Fonction de connexion à la BDD de travail
|
|
82
|
+
|
|
83
|
+
Parameters
|
|
84
|
+
----------
|
|
85
|
+
connection: psycopg2.connection
|
|
86
|
+
connection à la bdd de travail
|
|
87
|
+
logger: logging.Logger
|
|
88
|
+
"""
|
|
89
|
+
if self._connection:
|
|
90
|
+
self._connection.close()
|
|
91
|
+
self.logger.info("Connection to work database closed")
|
|
92
|
+
|
|
93
|
+
def ensure_connection(self):
|
|
94
|
+
"""
|
|
95
|
+
Ensure the connection is alive; reconnect if needed.
|
|
96
|
+
"""
|
|
97
|
+
try:
|
|
98
|
+
if self._connection is None or getattr(self._connection, "closed", 1) != 0:
|
|
99
|
+
self.logger.info("Connection is closed or missing; reconnecting")
|
|
100
|
+
self._connection = self.connect_working_db()
|
|
101
|
+
else:
|
|
102
|
+
with self._connection.cursor() as cur:
|
|
103
|
+
cur.execute("SELECT 1")
|
|
104
|
+
except Exception as e:
|
|
105
|
+
self.logger.error(
|
|
106
|
+
f"Something is wrong with the connection: {str(e).rstrip()}; reconnecting in {DELAY} seconds")
|
|
107
|
+
self.disconnect_working_db()
|
|
108
|
+
time.sleep(DELAY)
|
|
109
|
+
self._connection = self.connect_working_db()
|
|
110
|
+
|
|
111
|
+
def execute_select_query(self, cursor, query, show_duration):
|
|
112
|
+
if TIMEOUT:
|
|
113
|
+
cursor.execute("SET statement_timeout = %s", (1000 * TIMEOUT,)) # timeout in milliseconds
|
|
114
|
+
|
|
115
|
+
if show_duration:
|
|
116
|
+
self.logger.info("SQL: {}".format(query))
|
|
117
|
+
st_execute = time.time()
|
|
118
|
+
cursor.execute(query)
|
|
119
|
+
et_execute = time.time()
|
|
120
|
+
self.logger.info("Execution ended. Elapsed time : %s seconds." % (et_execute - st_execute))
|
|
121
|
+
else:
|
|
122
|
+
cursor.execute(query)
|
|
123
|
+
|
|
124
|
+
@database_retry_decorator
|
|
125
|
+
def execute_select_fetch_multiple(self, query, batchsize=1, show_duration=False):
|
|
126
|
+
with self._connection.cursor(cursor_factory=DictCursor) as cursor:
|
|
127
|
+
self.execute_select_query(cursor, query, show_duration)
|
|
128
|
+
rows = cursor.fetchmany(batchsize)
|
|
129
|
+
count = cursor.rowcount
|
|
130
|
+
while rows:
|
|
131
|
+
if batchsize == 1:
|
|
132
|
+
rows = rows.pop()
|
|
133
|
+
yield rows, count
|
|
134
|
+
rows = cursor.fetchmany(batchsize)
|
|
135
|
+
self._connection.commit()
|
|
136
|
+
return
|
|
137
|
+
|
|
138
|
+
# the method below should be used as a generator function otherwise use execute_update
|
|
139
|
+
@database_retry_decorator
|
|
140
|
+
def execute_update_query(self, query, params=None, isolation_level=None, show_duration=False):
|
|
141
|
+
if show_duration :
|
|
142
|
+
self.logger.info("SQL: {}".format(query))
|
|
143
|
+
st_execute = time.time()
|
|
144
|
+
with self._connection.cursor(cursor_factory=DictCursor) as cursor:
|
|
145
|
+
old_isolation_level = self._connection.isolation_level
|
|
146
|
+
if isolation_level is not None:
|
|
147
|
+
self._connection.set_isolation_level(isolation_level)
|
|
148
|
+
cursor.execute(query, params)
|
|
149
|
+
self._connection.commit()
|
|
150
|
+
if show_duration:
|
|
151
|
+
et_execute = time.time()
|
|
152
|
+
self.logger.info("Execution ended. Elapsed time : %s seconds." % (et_execute - st_execute))
|
|
153
|
+
self._connection.set_isolation_level(old_isolation_level)
|
|
154
|
+
yield # the decorator database_retry_decorator only supports generators
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
def execute_update(self, query, params=None, isolation_level=None):
|
|
158
|
+
next(self.execute_update_query(query, params=params, isolation_level=isolation_level), None)
|
|
159
|
+
|
|
160
|
+
def execute_select_fetch_one(self, query, show_duration=False):
|
|
161
|
+
gen = self.execute_select_fetch_multiple(query, 1, show_duration)
|
|
162
|
+
row, count = next(gen, (None, None))
|
|
163
|
+
return row, count
|
r2gg/_main.py
CHANGED
|
@@ -1,26 +1,25 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import multiprocessing
|
|
3
3
|
import os
|
|
4
|
-
import json
|
|
5
4
|
import time
|
|
6
5
|
from datetime import datetime
|
|
7
6
|
|
|
8
|
-
import psycopg2
|
|
9
7
|
# https://github.com/andialbrecht/sqlparse
|
|
10
8
|
import sqlparse
|
|
11
9
|
|
|
10
|
+
from r2gg._database import DatabaseManager
|
|
11
|
+
from r2gg._file_copier import copy_file_locally
|
|
12
12
|
from r2gg._lua_builder import build_lua
|
|
13
|
+
from r2gg._osm_to_pbf import osm_to_pbf
|
|
14
|
+
from r2gg._path_converter import convert_path
|
|
13
15
|
from r2gg._pivot_to_osm import pivot_to_osm
|
|
14
16
|
from r2gg._pivot_to_pgr import pivot_to_pgr
|
|
15
17
|
from r2gg._read_config import config_from_path
|
|
16
18
|
from r2gg._subprocess_execution import subprocess_execution
|
|
17
|
-
from r2gg._path_converter import convert_path
|
|
18
|
-
from r2gg._file_copier import copy_file_locally
|
|
19
19
|
from r2gg._valhalla_lua_builder import build_valhalla_lua
|
|
20
|
-
from r2gg._osm_to_pbf import osm_to_pbf
|
|
21
20
|
|
|
22
21
|
|
|
23
|
-
def sql_convert(config, resource, db_configs,
|
|
22
|
+
def sql_convert(config, resource, db_configs, database: DatabaseManager, logger):
|
|
24
23
|
"""
|
|
25
24
|
Fonction de conversion depuis la bdd source vers la bdd pivot
|
|
26
25
|
|
|
@@ -32,8 +31,8 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
32
31
|
dictionnaire correspondant à la resource décrite dans le fichier passé en argument
|
|
33
32
|
db_configs: dict
|
|
34
33
|
dictionnaire correspondant aux configurations des bdd
|
|
35
|
-
|
|
36
|
-
|
|
34
|
+
database: r2gg.DatabaseManager
|
|
35
|
+
gestionnaire de connexion et d'exécution de la base de la bdd
|
|
37
36
|
logger: logging.Logger
|
|
38
37
|
"""
|
|
39
38
|
|
|
@@ -57,7 +56,7 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
57
56
|
used_bases = []
|
|
58
57
|
|
|
59
58
|
# Il y a potentiellement une conversion par source indiquée dans la ressource
|
|
60
|
-
for source in resource[
|
|
59
|
+
for source in resource['sources']:
|
|
61
60
|
|
|
62
61
|
logger.info("Create pivot of source: " + source['id'])
|
|
63
62
|
|
|
@@ -77,12 +76,12 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
77
76
|
else:
|
|
78
77
|
logger.info("Mapping not done")
|
|
79
78
|
|
|
80
|
-
#
|
|
81
|
-
source_db_config = db_configs[
|
|
79
|
+
# Configuration de la bdd source
|
|
80
|
+
source_db_config = db_configs[source['mapping']['source']['baseId']]
|
|
82
81
|
used_bases.append(source['mapping']['source']['baseId'])
|
|
83
82
|
|
|
84
83
|
# Configuration de la bdd de travail utilisée pour ce pivot
|
|
85
|
-
work_db_config = db_configs[
|
|
84
|
+
work_db_config = db_configs[config['workingSpace']['baseId']]
|
|
86
85
|
|
|
87
86
|
# Récupération de la bbox
|
|
88
87
|
bbox = [float(coord) for coord in source["bbox"].split(",")]
|
|
@@ -94,9 +93,7 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
94
93
|
logger.info("Create source on bbox: " + source["bbox"])
|
|
95
94
|
|
|
96
95
|
# Lancement du script SQL de conversion source --> pivot
|
|
97
|
-
|
|
98
|
-
with open( source['mapping']['conversion']['file'] ) as sql_script:
|
|
99
|
-
cur = connection.cursor()
|
|
96
|
+
with open(source['mapping']['conversion']['file']) as sql_script:
|
|
100
97
|
logger.info("Executing SQL conversion script")
|
|
101
98
|
instructions = sqlparse.split(sql_script.read().format(user=work_db_config.get('user'),
|
|
102
99
|
input_schema=source_db_config.get('schema'),
|
|
@@ -107,37 +104,43 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
107
104
|
for instruction in instructions:
|
|
108
105
|
if instruction == '':
|
|
109
106
|
continue
|
|
110
|
-
logger.debug("SQL:\n{}\n".format(instruction)
|
|
107
|
+
logger.debug("SQL:\n{}\n".format(instruction))
|
|
108
|
+
isolation_level = None
|
|
109
|
+
if instruction.strip().lower().startswith("vacuum"):
|
|
110
|
+
isolation_level = 0
|
|
111
111
|
st_instruction = time.time()
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
112
|
+
database.execute_update(instruction,
|
|
113
|
+
{
|
|
114
|
+
'bdpwd': source_db_config.get('password'),
|
|
115
|
+
'bdport': source_db_config.get('port'),
|
|
116
|
+
'bdhost': source_db_config.get('host'),
|
|
117
|
+
'bduser': source_db_config.get('user'),
|
|
118
|
+
'dbname': source_db_config.get('database'),
|
|
119
|
+
'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax
|
|
120
|
+
},
|
|
121
|
+
isolation_level=isolation_level
|
|
122
|
+
)
|
|
120
123
|
et_instruction = time.time()
|
|
121
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_instruction - st_instruction))
|
|
124
|
+
logger.info("Execution ended. Elapsed time : %s seconds." % (et_instruction - st_instruction))
|
|
122
125
|
|
|
123
126
|
et_sql_conversion = time.time()
|
|
124
127
|
|
|
125
|
-
logger.info(
|
|
128
|
+
logger.info(
|
|
129
|
+
"Conversion from BDD to pivot ended. Elapsed time : %s seconds." % (et_sql_conversion - st_sql_conversion))
|
|
130
|
+
|
|
126
131
|
|
|
127
|
-
def pgr_convert(
|
|
132
|
+
def pgr_convert(resource, db_configs, database: DatabaseManager, logger):
|
|
128
133
|
"""
|
|
129
134
|
Fonction de conversion depuis la bdd pivot vers la bdd pgrouting
|
|
130
135
|
|
|
131
136
|
Parameters
|
|
132
137
|
----------
|
|
133
|
-
config: dict
|
|
134
|
-
dictionnaire correspondant à la configuration décrite dans le fichier passé en argument
|
|
135
138
|
resource: dict
|
|
136
139
|
dictionnaire correspondant à la resource décrite dans le fichier passé en argument
|
|
137
140
|
db_configs: dict
|
|
138
141
|
dictionnaire correspondant aux configurations des bdd
|
|
139
|
-
|
|
140
|
-
|
|
142
|
+
database: r2gg.DatabaseManager
|
|
143
|
+
gestionnaire de connexion et d'exécution de la base de la bdd
|
|
141
144
|
logger: logging.Logger
|
|
142
145
|
"""
|
|
143
146
|
|
|
@@ -150,19 +153,13 @@ def pgr_convert(config, resource, db_configs, connection, logger):
|
|
|
150
153
|
i = 0
|
|
151
154
|
for source in resource["sources"]:
|
|
152
155
|
|
|
153
|
-
logger.info("Source {} of {}...".format(i+1, len(resource["sources"])))
|
|
156
|
+
logger.info("Source {} of {}...".format(i + 1, len(resource["sources"])))
|
|
154
157
|
logger.info("Source id : " + source["id"])
|
|
155
158
|
|
|
156
159
|
# Configuration et connection à la base de sortie
|
|
157
|
-
out_db_config = db_configs[
|
|
158
|
-
host = out_db_config.get('host')
|
|
159
|
-
dbname = out_db_config.get('database')
|
|
160
|
-
user = out_db_config.get('user')
|
|
161
|
-
password = out_db_config.get('password')
|
|
162
|
-
port = out_db_config.get('port')
|
|
163
|
-
connect_args = 'host=%s dbname=%s user=%s password=%s port=%s' %(host, dbname, user, password, port)
|
|
160
|
+
out_db_config = db_configs[source['storage']['base']['baseId']]
|
|
164
161
|
logger.info("Connecting to output database")
|
|
165
|
-
|
|
162
|
+
database_out = DatabaseManager(out_db_config, logger)
|
|
166
163
|
|
|
167
164
|
schema_out = out_db_config.get('schema')
|
|
168
165
|
|
|
@@ -172,14 +169,14 @@ def pgr_convert(config, resource, db_configs, connection, logger):
|
|
|
172
169
|
cost_calculation_files_paths = {cost["compute"]["configuration"]["storage"]["file"] for cost in source["costs"]}
|
|
173
170
|
|
|
174
171
|
for cost_calculation_file_path in cost_calculation_files_paths:
|
|
175
|
-
pivot_to_pgr(source, cost_calculation_file_path,
|
|
176
|
-
|
|
172
|
+
pivot_to_pgr(source, cost_calculation_file_path, database, database_out, schema_out, input_schema, logger)
|
|
173
|
+
database_out.disconnect_working_db()
|
|
177
174
|
|
|
178
175
|
et_pivot_to_pgr = time.time()
|
|
179
|
-
logger.info("Conversion from pivot to PGR ended. Elapsed time : %s seconds." %(et_pivot_to_pgr - st_pivot_to_pgr))
|
|
176
|
+
logger.info("Conversion from pivot to PGR ended. Elapsed time : %s seconds." % (et_pivot_to_pgr - st_pivot_to_pgr))
|
|
180
177
|
|
|
181
178
|
|
|
182
|
-
def osm_convert(config, resource, db_configs,
|
|
179
|
+
def osm_convert(config, resource, db_configs, database: DatabaseManager, logger):
|
|
183
180
|
"""
|
|
184
181
|
Fonction de conversion depuis la bdd pivot vers un fichier osm
|
|
185
182
|
|
|
@@ -191,8 +188,8 @@ def osm_convert(config, resource, db_configs, connection, logger):
|
|
|
191
188
|
dictionnaire correspondant à la resource décrite dans le fichier passé en argument
|
|
192
189
|
db_configs: dict
|
|
193
190
|
dictionnaire correspondant aux configurations des bdd
|
|
194
|
-
|
|
195
|
-
|
|
191
|
+
database: r2gg.DatabaseManager
|
|
192
|
+
gestionnaire de connexion et d'exécution de la base de la bdd
|
|
196
193
|
logger: logging.Logger
|
|
197
194
|
"""
|
|
198
195
|
|
|
@@ -224,7 +221,7 @@ def osm_convert(config, resource, db_configs, connection, logger):
|
|
|
224
221
|
# Plusieurs sources peuvent référencer le même mapping mais changer plus tard dans la génération
|
|
225
222
|
found_base = False
|
|
226
223
|
found_id = ''
|
|
227
|
-
for sid,sub in used_bases.items():
|
|
224
|
+
for sid, sub in used_bases.items():
|
|
228
225
|
if sub == source['mapping']['source']['baseId']:
|
|
229
226
|
found_base = True
|
|
230
227
|
found_id = sid
|
|
@@ -251,11 +248,12 @@ def osm_convert(config, resource, db_configs, connection, logger):
|
|
|
251
248
|
|
|
252
249
|
else:
|
|
253
250
|
logger.info("Mapping not already done")
|
|
254
|
-
pivot_to_osm(config, source, db_configs,
|
|
251
|
+
pivot_to_osm(config, source, db_configs, database, logger, convert_osm_to_pbf)
|
|
252
|
+
|
|
253
|
+
used_bases[source['id']] = source['mapping']['source']['baseId']
|
|
255
254
|
|
|
256
|
-
used_bases[ source['id'] ] = source['mapping']['source']['baseId']
|
|
257
255
|
|
|
258
|
-
def osrm_convert(config, resource, logger, build_lua_from_cost_config
|
|
256
|
+
def osrm_convert(config, resource, logger, build_lua_from_cost_config=True):
|
|
259
257
|
"""
|
|
260
258
|
Fonction de conversion depuis le fichier osm vers les fichiers osrm
|
|
261
259
|
|
|
@@ -282,7 +280,7 @@ def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
|
|
|
282
280
|
i = 0
|
|
283
281
|
for source in resource["sources"]:
|
|
284
282
|
|
|
285
|
-
logger.info("Source {} of {}...".format(i+1, len(resource["sources"])))
|
|
283
|
+
logger.info("Source {} of {}...".format(i + 1, len(resource["sources"])))
|
|
286
284
|
|
|
287
285
|
logger.info('LUA part')
|
|
288
286
|
lua_file = source["cost"]["compute"]["storage"]["file"]
|
|
@@ -293,7 +291,7 @@ def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
|
|
|
293
291
|
costs_config = config_from_path(config_file)
|
|
294
292
|
cost_name = source["cost"]["compute"]["configuration"]["name"]
|
|
295
293
|
|
|
296
|
-
if cost_name not in [
|
|
294
|
+
if cost_name not in [output["name"] for output in costs_config["outputs"]]:
|
|
297
295
|
raise ValueError("cost_name must be in cost configuration")
|
|
298
296
|
|
|
299
297
|
with open(lua_file, "w") as lua_f:
|
|
@@ -332,15 +330,15 @@ def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
|
|
|
332
330
|
start_command = time.time()
|
|
333
331
|
subprocess_execution(osrm_extract_args, logger)
|
|
334
332
|
end_command = time.time()
|
|
335
|
-
logger.info("OSRM extract ended. Elapsed time : %s seconds." %(end_command - start_command))
|
|
333
|
+
logger.info("OSRM extract ended. Elapsed time : %s seconds." % (end_command - start_command))
|
|
336
334
|
subprocess_execution(osrm_contract_args, logger)
|
|
337
335
|
final_command = time.time()
|
|
338
|
-
logger.info("OSRM contract ended. Elapsed time : %s seconds." %(final_command - end_command))
|
|
336
|
+
logger.info("OSRM contract ended. Elapsed time : %s seconds." % (final_command - end_command))
|
|
339
337
|
subprocess_execution(rm_args, logger)
|
|
340
338
|
i += 1
|
|
341
339
|
|
|
342
340
|
|
|
343
|
-
def valhalla_convert(config, resource, logger, build_lua_from_cost_config
|
|
341
|
+
def valhalla_convert(config, resource, logger, build_lua_from_cost_config=True):
|
|
344
342
|
"""
|
|
345
343
|
Fonction de conversion depuis le fichier .osm.pbf vers les fichiers valhalla
|
|
346
344
|
|
|
@@ -367,7 +365,7 @@ def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True
|
|
|
367
365
|
i = 0
|
|
368
366
|
for source in resource["sources"]:
|
|
369
367
|
|
|
370
|
-
logger.info("Source {} of {}...".format(i+1, len(resource["sources"])))
|
|
368
|
+
logger.info("Source {} of {}...".format(i + 1, len(resource["sources"])))
|
|
371
369
|
|
|
372
370
|
logger.info('Looking for OSM PBF file')
|
|
373
371
|
|
|
@@ -406,15 +404,15 @@ def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True
|
|
|
406
404
|
|
|
407
405
|
start_command = time.time()
|
|
408
406
|
valhalla_build_config_args = ["valhalla_build_config",
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
subprocess_execution(valhalla_build_config_args, logger, outfile
|
|
407
|
+
"--mjolnir-tile-dir", source["storage"]["dir"],
|
|
408
|
+
"--mjolnir-tile-extract", source["storage"]["tar"],
|
|
409
|
+
# Modification des limites par défaut du service : 10h pour isochrone et 1000km pour iso distance
|
|
410
|
+
# contre 2h et 200km par défaut
|
|
411
|
+
"--service-limits-isochrone-max-time-contour", "600",
|
|
412
|
+
"--service-limits-isochrone-max-distance-contour", "1000",
|
|
413
|
+
# Ajout de l'autorisation à exclure les ponts/tunnels/péages
|
|
414
|
+
"--service-limits-allow-hard-exclusions", "True"]
|
|
415
|
+
subprocess_execution(valhalla_build_config_args, logger, outfile=source["storage"]["config"])
|
|
418
416
|
# Nécessaire le temps que le fichier s'écrive...
|
|
419
417
|
time.sleep(1)
|
|
420
418
|
# Ajout du graph custom dans la config valhalla (impossible via les paramètres du build_config)
|
|
@@ -432,10 +430,10 @@ def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True
|
|
|
432
430
|
subprocess_execution(valhalla_build_extract_args, logger)
|
|
433
431
|
|
|
434
432
|
final_command = time.time()
|
|
435
|
-
logger.info("Valhalla tiles built. Elapsed time : %s seconds." %(final_command - start_command))
|
|
433
|
+
logger.info("Valhalla tiles built. Elapsed time : %s seconds." % (final_command - start_command))
|
|
436
434
|
|
|
437
435
|
|
|
438
|
-
def write_road2_config(config, resource, logger, convert_file_paths
|
|
436
|
+
def write_road2_config(config, resource, logger, convert_file_paths=True):
|
|
439
437
|
"""
|
|
440
438
|
Fonction pour l'écriture du fichier de ressource
|
|
441
439
|
|
|
@@ -456,7 +454,8 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
|
|
|
456
454
|
|
|
457
455
|
for source in resource["sources"]:
|
|
458
456
|
|
|
459
|
-
source_file = os.path.join(config["outputs"]["configurations"]["sources"]["storage"]["directory"],
|
|
457
|
+
source_file = os.path.join(config["outputs"]["configurations"]["sources"]["storage"]["directory"],
|
|
458
|
+
source['id'] + ".source")
|
|
460
459
|
logger.info("Writing source file : " + source_file)
|
|
461
460
|
|
|
462
461
|
# On modifie la source en fonction de son type
|
|
@@ -474,10 +473,11 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
|
|
|
474
473
|
bid_tmp = source["storage"]["base"]["baseId"]
|
|
475
474
|
for base in config["bases"]:
|
|
476
475
|
if base["id"] == bid_tmp:
|
|
477
|
-
db_file_out = convert_path(base["configFile"],
|
|
476
|
+
db_file_out = convert_path(base["configFile"],
|
|
477
|
+
config["outputs"]["configurations"]["databases"]["storage"]["directory"])
|
|
478
478
|
copy_file_locally(base["configFile"], db_file_out)
|
|
479
|
-
source["storage"]["base"].update({"dbConfig":db_file_out})
|
|
480
|
-
source["storage"]["base"].update({"schema":base["schema"]})
|
|
479
|
+
source["storage"]["base"].update({"dbConfig": db_file_out})
|
|
480
|
+
source["storage"]["base"].update({"schema": base["schema"]})
|
|
481
481
|
source["storage"]["base"].pop("baseId", None)
|
|
482
482
|
for cost in source["costs"]:
|
|
483
483
|
cost.pop("compute", None)
|
|
@@ -492,7 +492,8 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
|
|
|
492
492
|
source_ids.append(source['id'])
|
|
493
493
|
|
|
494
494
|
# On passe à la ressource
|
|
495
|
-
resource_file = os.path.join(config["outputs"]["configurations"]["resource"]["storage"]["directory"],
|
|
495
|
+
resource_file = os.path.join(config["outputs"]["configurations"]["resource"]["storage"]["directory"],
|
|
496
|
+
resource['id'] + ".resource")
|
|
496
497
|
logger.info("Writing resource file: " + resource_file)
|
|
497
498
|
|
|
498
499
|
# Récupération de la date d'extraction
|
|
@@ -500,7 +501,7 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
|
|
|
500
501
|
date_file = os.path.join(work_dir_config, "r2gg.date")
|
|
501
502
|
f = open(date_file, "r")
|
|
502
503
|
extraction_date = f.read()
|
|
503
|
-
logger.info("extraction date to add in resource (from "+ date_file +"): " + extraction_date)
|
|
504
|
+
logger.info("extraction date to add in resource (from " + date_file + "): " + extraction_date)
|
|
504
505
|
f.close()
|
|
505
506
|
|
|
506
507
|
# On fait le dossier s'il n'existe pas
|
r2gg/_pivot_to_osm.py
CHANGED
|
@@ -1,17 +1,17 @@
|
|
|
1
|
-
from datetime import date
|
|
2
|
-
from math import ceil
|
|
3
1
|
import os
|
|
4
2
|
import time
|
|
3
|
+
from datetime import date
|
|
4
|
+
from math import ceil
|
|
5
5
|
|
|
6
6
|
from lxml import etree
|
|
7
|
-
from psycopg2.extras import DictCursor
|
|
8
7
|
|
|
9
8
|
from r2gg._osm_building import writeNode, writeWay, writeWayNds, writeRes, writeWayTags
|
|
10
|
-
from r2gg._sql_building import getQueryByTableAndBoundingBox
|
|
11
9
|
from r2gg._osm_to_pbf import osm_to_pbf
|
|
10
|
+
from r2gg._sql_building import getQueryByTableAndBoundingBox
|
|
11
|
+
from r2gg._database import DatabaseManager
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
def pivot_to_osm(config, source, db_configs,
|
|
14
|
+
def pivot_to_osm(config, source, db_configs, database: DatabaseManager, logger, output_is_pbf=False):
|
|
15
15
|
"""
|
|
16
16
|
Fonction de conversion depuis la bdd pivot vers le fichier osm puis pbf le cas échéant
|
|
17
17
|
|
|
@@ -22,11 +22,10 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
22
22
|
source: dict
|
|
23
23
|
db_configs: dict
|
|
24
24
|
dictionnaire correspondant aux configurations des bdd
|
|
25
|
-
|
|
26
|
-
|
|
25
|
+
database: r2gg.DatabaseManager
|
|
26
|
+
gestionnaire de connexion et d'exécution de la base de la bdd
|
|
27
27
|
logger: logging.Logger
|
|
28
28
|
"""
|
|
29
|
-
|
|
30
29
|
logger.info("Convert pivot to OSM format for a source")
|
|
31
30
|
|
|
32
31
|
# Récupération de la date d'extraction
|
|
@@ -44,16 +43,14 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
44
43
|
source_db_config = db_configs[source['mapping']['source']['baseId']]
|
|
45
44
|
input_schema = source_db_config.get('schema')
|
|
46
45
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
cursor.execute(f"select last_value from {input_schema}.nodes_id_seq")
|
|
51
|
-
vertexSequence = cursor.fetchone()[0]
|
|
46
|
+
last_value_nodes_query = f"select last_value from {input_schema}.nodes_id_seq"
|
|
47
|
+
vertexSequence, _ = database.execute_select_fetch_one(last_value_nodes_query, show_duration=True)
|
|
48
|
+
vertexSequence = vertexSequence[0]
|
|
52
49
|
logger.info(vertexSequence)
|
|
53
50
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
edgeSequence =
|
|
51
|
+
last_value_edges_query = f"select last_value from {input_schema}.edges_id_seq"
|
|
52
|
+
edgeSequence, _ = database.execute_select_fetch_one(last_value_edges_query, show_duration=True)
|
|
53
|
+
edgeSequence = edgeSequence[0]
|
|
57
54
|
logger.info(edgeSequence)
|
|
58
55
|
|
|
59
56
|
logger.info("Starting conversion from pivot to OSM")
|
|
@@ -70,13 +67,8 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
70
67
|
with xf.element("osm", attribs):
|
|
71
68
|
|
|
72
69
|
# Récupération du nombre de nodes
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
st_execute = time.time()
|
|
76
|
-
cursor.execute(sql_query)
|
|
77
|
-
et_execute = time.time()
|
|
78
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
79
|
-
row = cursor.fetchone()
|
|
70
|
+
number_of_nodes_query = f"SELECT COUNT(*) as cnt FROM {input_schema}.nodes"
|
|
71
|
+
row, _ = database.execute_select_fetch_one(number_of_nodes_query, show_duration=True)
|
|
80
72
|
nodesize = row["cnt"]
|
|
81
73
|
|
|
82
74
|
# Ecriture des nodes
|
|
@@ -85,34 +77,21 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
85
77
|
logger.info(f"Writing nodes: {nodesize} ways to write")
|
|
86
78
|
st_nodes = time.time()
|
|
87
79
|
while offset < nodesize:
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
logger.info("Writing nodes")
|
|
98
|
-
st_execute = time.time()
|
|
99
|
-
i = 1
|
|
100
|
-
while row:
|
|
101
|
-
nodeEl = writeNode(row, extraction_date)
|
|
102
|
-
xf.write(nodeEl, pretty_print=True)
|
|
103
|
-
row = cursor.fetchone()
|
|
104
|
-
logger.info("%s / %s nodes ajoutés" %(offset, nodesize))
|
|
80
|
+
sql_query_nodes = getQueryByTableAndBoundingBox(f'{input_schema}.nodes', source['bbox'])
|
|
81
|
+
sql_query_nodes += " LIMIT {} OFFSET {}".format(batchsize, offset)
|
|
82
|
+
offset += batchsize
|
|
83
|
+
logger.info("Writing nodes")
|
|
84
|
+
for row, count in database.execute_select_fetch_multiple(sql_query_nodes, show_duration=True):
|
|
85
|
+
nodeEl = writeNode(row, extraction_date)
|
|
86
|
+
xf.write(nodeEl, pretty_print=True)
|
|
87
|
+
|
|
88
|
+
logger.info("%s / %s nodes ajoutés" % (offset, nodesize))
|
|
105
89
|
et_nodes = time.time()
|
|
106
|
-
logger.info("Writing nodes ended. Elapsed time : %s seconds." %(et_nodes - st_nodes))
|
|
90
|
+
logger.info("Writing nodes ended. Elapsed time : %s seconds." % (et_nodes - st_nodes))
|
|
107
91
|
|
|
108
92
|
# Récupération du nombre de ways
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
st_execute = time.time()
|
|
112
|
-
cursor.execute(sql_query)
|
|
113
|
-
et_execute = time.time()
|
|
114
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
115
|
-
row = cursor.fetchone()
|
|
93
|
+
sql_query_edges_count = f"SELECT COUNT(*) as cnt FROM {input_schema}.edges"
|
|
94
|
+
row, _ = database.execute_select_fetch_one(sql_query_edges_count, show_duration=True)
|
|
116
95
|
edgesize = row["cnt"]
|
|
117
96
|
|
|
118
97
|
# Ecriture des ways
|
|
@@ -121,64 +100,49 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
|
|
|
121
100
|
logger.info(f"Writing ways: {edgesize} ways to write")
|
|
122
101
|
st_edges = time.time()
|
|
123
102
|
while offset < edgesize:
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
nodeEl = writeNode(node, extraction_date)
|
|
141
|
-
xf.write(nodeEl, pretty_print=True)
|
|
142
|
-
wayEl = writeWayNds(wayEl, row, row['internodes'])
|
|
143
|
-
wayEl = writeWayTags(wayEl, row)
|
|
144
|
-
xf.write(wayEl, pretty_print=True)
|
|
145
|
-
row = cursor.fetchone()
|
|
146
|
-
logger.info("%s / %s ways ajoutés" %(offset, edgesize))
|
|
103
|
+
sql_query_edges = getQueryByTableAndBoundingBox(f'{input_schema}.edges', source['bbox'], ['*',
|
|
104
|
+
f'{input_schema}.inter_nodes(geom) as internodes'])
|
|
105
|
+
sql_query_edges += " LIMIT {} OFFSET {}".format(batchsize, offset)
|
|
106
|
+
offset += batchsize
|
|
107
|
+
for row, count in database.execute_select_fetch_multiple(sql_query_edges, show_duration=True):
|
|
108
|
+
wayEl = writeWay(row, extraction_date)
|
|
109
|
+
for node in row['internodes']:
|
|
110
|
+
vertexSequence = vertexSequence + 1
|
|
111
|
+
node['id'] = vertexSequence
|
|
112
|
+
nodeEl = writeNode(node, extraction_date)
|
|
113
|
+
xf.write(nodeEl, pretty_print=True)
|
|
114
|
+
wayEl = writeWayNds(wayEl, row, row['internodes'])
|
|
115
|
+
wayEl = writeWayTags(wayEl, row)
|
|
116
|
+
xf.write(wayEl, pretty_print=True)
|
|
117
|
+
|
|
118
|
+
logger.info("%s / %s ways ajoutés" % (offset, edgesize))
|
|
147
119
|
et_edges = time.time()
|
|
148
|
-
logger.info("Writing ways ended. Elapsed time : %s seconds." %(et_edges - st_edges))
|
|
120
|
+
logger.info("Writing ways ended. Elapsed time : %s seconds." % (et_edges - st_edges))
|
|
149
121
|
|
|
150
122
|
# Ecriture des restrictions
|
|
151
|
-
|
|
152
|
-
logger.info("SQL: {}".format(sql_query3))
|
|
153
|
-
st_execute = time.time()
|
|
154
|
-
cursor.execute(sql_query3)
|
|
155
|
-
et_execute = time.time()
|
|
156
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
157
|
-
row = cursor.fetchone()
|
|
123
|
+
sql_query_non_comm = f"select * from {input_schema}.non_comm"
|
|
158
124
|
logger.info("Writing restrictions")
|
|
159
125
|
st_execute = time.time()
|
|
160
126
|
i = 1
|
|
161
|
-
|
|
127
|
+
for row, count in database.execute_select_fetch_multiple(sql_query_non_comm, show_duration=True):
|
|
162
128
|
if row['common_vertex_id'] == -1:
|
|
163
|
-
row = cursor.fetchone()
|
|
164
129
|
i += 1
|
|
165
130
|
continue
|
|
166
131
|
ResEl = writeRes(row, i, extraction_date)
|
|
167
132
|
xf.write(ResEl, pretty_print=True)
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
logger.info("%s / %s restrictions ajoutés" %(i, cursor.rowcount))
|
|
133
|
+
if (i % ceil(count / 10) == 0):
|
|
134
|
+
logger.info("%s / %s restrictions ajoutés" % (i, count))
|
|
171
135
|
i += 1
|
|
136
|
+
|
|
172
137
|
et_execute = time.time()
|
|
173
|
-
logger.info("Writing restrictions ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
138
|
+
logger.info("Writing restrictions ended. Elapsed time : %s seconds." % (et_execute - st_execute))
|
|
174
139
|
|
|
175
140
|
except etree.SerialisationError:
|
|
176
|
-
logger.
|
|
141
|
+
logger.warning("WARNING: XML file not closed properly (lxml.etree.SerialisationError)")
|
|
177
142
|
|
|
178
|
-
cursor.close()
|
|
179
143
|
end_time = time.time()
|
|
180
|
-
logger.info("Conversion from pivot to OSM ended. Elapsed time : %s seconds." %(end_time - start_time))
|
|
144
|
+
logger.info("Conversion from pivot to OSM ended. Elapsed time : %s seconds." % (end_time - start_time))
|
|
181
145
|
|
|
182
146
|
# osm2pbf : Gestion du format osm.pbf
|
|
183
147
|
if output_is_pbf:
|
|
184
|
-
osm_to_pbf(filename, filename+'.pbf', logger)
|
|
148
|
+
osm_to_pbf(filename, filename + '.pbf', logger)
|
r2gg/_pivot_to_pgr.py
CHANGED
|
@@ -7,8 +7,9 @@ from psycopg2.extras import DictCursor
|
|
|
7
7
|
from r2gg._output_costs_from_costs_config import output_costs_from_costs_config
|
|
8
8
|
from r2gg._read_config import config_from_path
|
|
9
9
|
from r2gg._sql_building import getQueryByTableAndBoundingBox
|
|
10
|
+
from r2gg._database import DatabaseManager
|
|
10
11
|
|
|
11
|
-
def pivot_to_pgr(source, cost_calculation_file_path,
|
|
12
|
+
def pivot_to_pgr(source, cost_calculation_file_path, database_work: DatabaseManager, database_out: DatabaseManager, schema, input_schema, logger):
|
|
12
13
|
"""
|
|
13
14
|
Fonction de conversion depuis la bdd pivot vers la base pgr
|
|
14
15
|
|
|
@@ -17,9 +18,9 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
17
18
|
source: dict
|
|
18
19
|
cost_calculation_file_path: str
|
|
19
20
|
chemin vers le fichier json de configuration des coûts
|
|
20
|
-
|
|
21
|
+
database_work: DatabaseManager
|
|
21
22
|
connection à la bdd de travail
|
|
22
|
-
|
|
23
|
+
database_out: DatabaseManager
|
|
23
24
|
connection à la bdd pgrouting de sortie
|
|
24
25
|
schema: str
|
|
25
26
|
nom du schéma dans la base de sortie
|
|
@@ -28,12 +29,10 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
28
29
|
logger: logging.Logger
|
|
29
30
|
"""
|
|
30
31
|
|
|
31
|
-
cursor_in = connection_work.cursor(cursor_factory=DictCursor, name="cursor_in")
|
|
32
32
|
ways_table_name = schema + '.ways'
|
|
33
33
|
# Récupération des coûts à calculer
|
|
34
34
|
costs = config_from_path(cost_calculation_file_path)
|
|
35
35
|
|
|
36
|
-
cursor_out = connection_out.cursor()
|
|
37
36
|
# Création de la edge_table pgrouting
|
|
38
37
|
create_table = """
|
|
39
38
|
DROP TABLE IF EXISTS {0};
|
|
@@ -85,8 +84,7 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
85
84
|
vehicule_leger_interdit boolean,
|
|
86
85
|
cout_vehicule_prioritaire numeric
|
|
87
86
|
);""".format(ways_table_name)
|
|
88
|
-
|
|
89
|
-
cursor_out.execute(create_table)
|
|
87
|
+
database_out.execute_update(create_table)
|
|
90
88
|
|
|
91
89
|
# Ajout des colonnes de coûts
|
|
92
90
|
add_columns = "ALTER TABLE {} ".format(ways_table_name)
|
|
@@ -95,7 +93,7 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
95
93
|
add_columns += "ADD COLUMN IF NOT EXISTS {} double precision,".format("reverse_" + output["name"])
|
|
96
94
|
add_columns = add_columns[:-1]
|
|
97
95
|
logger.debug("SQL: adding costs columns \n {}".format(add_columns))
|
|
98
|
-
|
|
96
|
+
database_out.execute_update(add_columns)
|
|
99
97
|
|
|
100
98
|
logger.info("Starting conversion")
|
|
101
99
|
start_time = time.time()
|
|
@@ -109,34 +107,30 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
109
107
|
id_from bigint,
|
|
110
108
|
id_to bigint
|
|
111
109
|
);""".format(schema)
|
|
112
|
-
|
|
113
|
-
cursor_out.execute(create_non_comm)
|
|
110
|
+
database_out.execute_update(create_non_comm)
|
|
114
111
|
|
|
115
112
|
logger.info("Populating turn restrictions")
|
|
116
113
|
tr_query = f"SELECT id_from, id_to FROM {input_schema}.non_comm;"
|
|
117
114
|
|
|
118
|
-
logger.debug("SQL: {}".format(tr_query))
|
|
119
|
-
st_execute = time.time()
|
|
120
|
-
cursor_in.execute(tr_query)
|
|
121
|
-
et_execute = time.time()
|
|
122
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
123
|
-
# Insertion petit à petit -> plus performant
|
|
124
|
-
logger.info("SQL: Inserting or updating {} values in out db".format(cursor_in.rowcount))
|
|
125
|
-
st_execute = time.time()
|
|
126
115
|
index = 0
|
|
127
116
|
batchsize = 10000
|
|
128
|
-
|
|
117
|
+
generator = database_work.execute_select_fetch_multiple(tr_query, show_duration=True, batchsize=batchsize)
|
|
118
|
+
rows, count = next(generator,(None, None))
|
|
119
|
+
# Insertion petit à petit -> plus performant
|
|
120
|
+
|
|
121
|
+
logger.info("SQL: Inserting or updating {} values in out db".format(count))
|
|
122
|
+
|
|
123
|
+
st_execute = time.time()
|
|
124
|
+
|
|
129
125
|
while rows:
|
|
130
126
|
values_str = ""
|
|
131
|
-
for row in rows:
|
|
132
|
-
values_str += "(%s, %s, %s),"
|
|
133
|
-
values_str = values_str[:-1]
|
|
134
|
-
|
|
135
127
|
# Tuple des valuers à insérer
|
|
136
128
|
values_tuple = ()
|
|
137
129
|
for row in rows:
|
|
130
|
+
values_str += "(%s, %s, %s),"
|
|
138
131
|
values_tuple += (index, row['id_from'], row['id_to'])
|
|
139
132
|
index += 1
|
|
133
|
+
values_str = values_str[:-1]
|
|
140
134
|
|
|
141
135
|
set_on_conflict = (
|
|
142
136
|
"id_from = excluded.id_from,id_to = excluded.id_to"
|
|
@@ -148,17 +142,15 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
148
142
|
ON CONFLICT (id) DO UPDATE
|
|
149
143
|
SET {};
|
|
150
144
|
""".format(schema, values_str, set_on_conflict)
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
rows =
|
|
145
|
+
database_out.execute_update(sql_insert, values_tuple)
|
|
146
|
+
|
|
147
|
+
rows, _ = next(generator,(None, None))
|
|
154
148
|
|
|
155
149
|
et_execute = time.time()
|
|
156
|
-
cursor_in.close()
|
|
157
150
|
logger.info("Writing turn restrinctions Done. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
158
151
|
|
|
159
152
|
# Noeuds ---------------------------------------------------------------------------------------
|
|
160
153
|
logger.info("Writing vertices...")
|
|
161
|
-
cursor_in = connection_work.cursor(cursor_factory=DictCursor, name="cursor_in")
|
|
162
154
|
create_nodes = """
|
|
163
155
|
DROP TABLE IF EXISTS {0}_vertices_pgr;
|
|
164
156
|
CREATE TABLE {0}_vertices_pgr(
|
|
@@ -169,34 +161,26 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
169
161
|
eout int,
|
|
170
162
|
the_geom geometry(Point,4326)
|
|
171
163
|
);""".format(ways_table_name)
|
|
172
|
-
|
|
173
|
-
cursor_out.execute(create_nodes)
|
|
164
|
+
database_out.execute_update(create_nodes)
|
|
174
165
|
|
|
175
166
|
logger.info("Populating vertices")
|
|
176
167
|
nd_query = f"SELECT id, geom FROM {input_schema}.nodes;"
|
|
177
|
-
|
|
178
|
-
logger.debug("SQL: {}".format(nd_query))
|
|
179
|
-
st_execute = time.time()
|
|
180
|
-
cursor_in.execute(nd_query)
|
|
181
|
-
et_execute = time.time()
|
|
182
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
183
168
|
# Insertion petit à petit -> plus performant
|
|
184
169
|
# logger.info("SQL: Inserting or updating {} values in out db".format(cursor_in.rowcount))
|
|
185
170
|
st_execute = time.time()
|
|
186
171
|
index = 0
|
|
187
172
|
batchsize = 10000
|
|
188
|
-
|
|
173
|
+
generator = database_work.execute_select_fetch_multiple(nd_query, show_duration=True, batchsize=batchsize)
|
|
174
|
+
rows, count = next(generator, (None, None))
|
|
189
175
|
while rows:
|
|
190
176
|
values_str = ""
|
|
191
|
-
for row in rows:
|
|
192
|
-
values_str += "(%s, %s),"
|
|
193
|
-
values_str = values_str[:-1]
|
|
194
|
-
|
|
195
177
|
# Tuple des valeurs à insérer
|
|
196
178
|
values_tuple = ()
|
|
197
179
|
for row in rows:
|
|
180
|
+
values_str += "(%s, %s),"
|
|
198
181
|
values_tuple += (row['id'], row['geom'])
|
|
199
182
|
index += 1
|
|
183
|
+
values_str = values_str[:-1]
|
|
200
184
|
|
|
201
185
|
set_on_conflict = (
|
|
202
186
|
"the_geom = excluded.the_geom"
|
|
@@ -208,18 +192,15 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
208
192
|
ON CONFLICT (id) DO UPDATE
|
|
209
193
|
SET {};
|
|
210
194
|
""".format(ways_table_name, values_str, set_on_conflict)
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
rows = cursor_in.fetchmany(batchsize)
|
|
195
|
+
database_out.execute_update(sql_insert, values_tuple)
|
|
196
|
+
rows, _ = next(generator,(None, None))
|
|
214
197
|
|
|
215
198
|
|
|
216
199
|
et_execute = time.time()
|
|
217
|
-
cursor_in.close()
|
|
218
200
|
logger.info("Writing vertices Done. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
219
201
|
|
|
220
202
|
# Ways -----------------------------------------------------------------------------------------
|
|
221
203
|
# Colonnes à lire dans la base source (champs classiques + champs servant aux coûts)
|
|
222
|
-
cursor_in = connection_work.cursor(cursor_factory=DictCursor, name="cursor_in")
|
|
223
204
|
attribute_columns = [
|
|
224
205
|
'id',
|
|
225
206
|
'geom as the_geom',
|
|
@@ -274,11 +255,8 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
274
255
|
|
|
275
256
|
# Ecriture des ways
|
|
276
257
|
sql_query = getQueryByTableAndBoundingBox(f'{input_schema}.edges', source['bbox'], in_columns)
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
cursor_in.execute(sql_query)
|
|
280
|
-
et_execute = time.time()
|
|
281
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
258
|
+
batchsize = 10000
|
|
259
|
+
generator = database_work.execute_select_fetch_multiple(sql_query, show_duration=True, batchsize=batchsize)
|
|
282
260
|
|
|
283
261
|
# Chaîne de n %s, pour l'insertion de données via psycopg
|
|
284
262
|
single_value_str = "%s," * (len(attribute_columns) + 2 * len(costs["outputs"]))
|
|
@@ -287,24 +265,21 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
287
265
|
# Insertion petit à petit -> plus performant
|
|
288
266
|
# logger.info("SQL: Inserting or updating {} values in out db".format(cursor_in.rowcount))
|
|
289
267
|
st_execute = time.time()
|
|
290
|
-
batchsize = 10000
|
|
291
268
|
percent = 0
|
|
292
|
-
rows =
|
|
269
|
+
rows, count = next(generator, (None, None))
|
|
293
270
|
while rows:
|
|
294
|
-
percent += 1000000 /
|
|
271
|
+
percent += 1000000 / count
|
|
295
272
|
# Chaîne permettant l'insertion de valeurs via psycopg
|
|
296
273
|
values_str = ""
|
|
297
|
-
for row in rows:
|
|
298
|
-
values_str += "(" + single_value_str + "),"
|
|
299
|
-
values_str = values_str[:-1]
|
|
300
|
-
|
|
301
274
|
# Tuple des valuers à insérer
|
|
302
275
|
values_tuple = ()
|
|
303
276
|
for row in rows:
|
|
277
|
+
values_str += "(" + single_value_str + "),"
|
|
304
278
|
output_costs = output_costs_from_costs_config(costs, row)
|
|
305
279
|
values_tuple += tuple(
|
|
306
280
|
row[ output_columns_name ] for output_columns_name in output_columns_names
|
|
307
281
|
) + output_costs
|
|
282
|
+
values_str = values_str[:-1]
|
|
308
283
|
|
|
309
284
|
output_columns = "("
|
|
310
285
|
for output_columns_name in output_columns_names:
|
|
@@ -328,12 +303,10 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
328
303
|
ON CONFLICT (id) DO UPDATE
|
|
329
304
|
SET {};
|
|
330
305
|
""".format(ways_table_name, output_columns, values_str, set_on_conflict)
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
rows = cursor_in.fetchmany(batchsize)
|
|
306
|
+
database_out.execute_update(sql_insert, values_tuple)
|
|
307
|
+
rows, _ = next(generator,(None, None))
|
|
334
308
|
|
|
335
309
|
et_execute = time.time()
|
|
336
|
-
cursor_in.close();
|
|
337
310
|
logger.info("Writing ways ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
338
311
|
|
|
339
312
|
spacial_indices_query = """
|
|
@@ -343,60 +316,31 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
343
316
|
CLUSTER {0}_vertices_pgr USING ways_vertices_geom_gist ;
|
|
344
317
|
CREATE INDEX IF NOT EXISTS ways_importance_idx ON {0} USING btree (importance);
|
|
345
318
|
""".format(ways_table_name)
|
|
346
|
-
|
|
347
|
-
st_execute = time.time()
|
|
348
|
-
cursor_out.execute(spacial_indices_query)
|
|
349
|
-
et_execute = time.time()
|
|
350
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
351
|
-
connection_out.commit()
|
|
319
|
+
database_out.execute_update(spacial_indices_query)
|
|
352
320
|
|
|
353
321
|
turn_restrictions_indices_query = """
|
|
354
322
|
CREATE INDEX IF NOT EXISTS turn_restrictions_id_key ON {0}.turn_restrictions USING btree (id);
|
|
355
323
|
CREATE INDEX IF NOT EXISTS ways_id_key ON {1} USING btree (id);
|
|
356
324
|
CREATE INDEX IF NOT EXISTS ways_vertices_pgr_id_key ON {1}_vertices_pgr USING btree (id);
|
|
357
325
|
""".format(schema, ways_table_name)
|
|
358
|
-
|
|
359
|
-
st_execute = time.time()
|
|
360
|
-
cursor_out.execute(turn_restrictions_indices_query)
|
|
361
|
-
et_execute = time.time()
|
|
362
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
363
|
-
connection_out.commit()
|
|
326
|
+
database_out.execute_update(turn_restrictions_indices_query)
|
|
364
327
|
|
|
365
|
-
old_isolation_level = connection_out.isolation_level
|
|
366
|
-
connection_out.set_isolation_level(0)
|
|
367
328
|
|
|
368
329
|
# VACCUM ANALYZE for ways
|
|
369
330
|
vacuum_query = f"VACUUM ANALYZE {ways_table_name};"
|
|
370
|
-
|
|
371
|
-
st_execute = time.time()
|
|
372
|
-
cursor_out.execute(vacuum_query)
|
|
373
|
-
et_execute = time.time()
|
|
374
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
331
|
+
database_out.execute_update(vacuum_query, isolation_level=0)
|
|
375
332
|
|
|
376
333
|
# VACCUM ANALYZE for ways_vertices_pgr
|
|
377
334
|
vacuum_query = f"VACUUM ANALYZE {ways_table_name}_vertices_pgr;"
|
|
378
|
-
|
|
379
|
-
st_execute = time.time()
|
|
380
|
-
cursor_out.execute(vacuum_query)
|
|
381
|
-
et_execute = time.time()
|
|
382
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
335
|
+
database_out.execute_update(vacuum_query, isolation_level=0)
|
|
383
336
|
|
|
384
337
|
# VACCUM ANALYZE for turn_restrictions
|
|
385
338
|
vacuum_query = f"VACUUM ANALYZE {schema}.turn_restrictions;"
|
|
386
|
-
|
|
387
|
-
st_execute = time.time()
|
|
388
|
-
cursor_out.execute(vacuum_query)
|
|
389
|
-
et_execute = time.time()
|
|
390
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
391
|
-
|
|
392
|
-
connection_out.set_isolation_level(old_isolation_level)
|
|
393
|
-
connection_out.commit()
|
|
339
|
+
database_out.execute_update(vacuum_query, isolation_level=0)
|
|
394
340
|
|
|
395
|
-
cursor_out.close()
|
|
396
341
|
|
|
397
342
|
# Nettoyage du graphe
|
|
398
343
|
logger.info("Cleaning isolated clusters of less than 10 edges...")
|
|
399
|
-
cursor_isolated = connection_out.cursor()
|
|
400
344
|
|
|
401
345
|
profile_names = set([ cost['profile'] for cost in source["costs"]])
|
|
402
346
|
st_execute = time.time()
|
|
@@ -435,12 +379,10 @@ def pivot_to_pgr(source, cost_calculation_file_path, connection_work, connection
|
|
|
435
379
|
WHERE {0}.ways.target = ANY(SELECT * from remove_nodes) OR {0}.ways.source = ANY(SELECT * from remove_nodes);
|
|
436
380
|
""".format(schema, profile_name)
|
|
437
381
|
logger.info("SQL: {}".format(clean_graph_query))
|
|
438
|
-
|
|
439
|
-
connection_out.commit()
|
|
382
|
+
database_out.execute_update(clean_graph_query)
|
|
440
383
|
|
|
441
384
|
et_execute = time.time()
|
|
442
385
|
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
|
|
443
|
-
cursor_isolated.close()
|
|
444
386
|
|
|
445
387
|
end_time = time.time()
|
|
446
388
|
logger.info("Conversion from pivot to PGR ended. Elapsed time : %s seconds." %(end_time - start_time))
|
r2gg/cli.py
CHANGED
|
@@ -3,38 +3,30 @@
|
|
|
3
3
|
"""Main CLI entrypoint."""
|
|
4
4
|
|
|
5
5
|
# Package
|
|
6
|
-
from r2gg.
|
|
7
|
-
__author__,
|
|
8
|
-
__cli_usage__,
|
|
9
|
-
__summary__,
|
|
10
|
-
__title__,
|
|
11
|
-
__title_clean__,
|
|
12
|
-
__uri_homepage__,
|
|
13
|
-
__version__,
|
|
14
|
-
)
|
|
15
|
-
from r2gg._configure import configure, connect_working_db, disconnect_working_db
|
|
6
|
+
from r2gg._configure import configure
|
|
16
7
|
from r2gg._main import sql_convert, pgr_convert, osm_convert, osrm_convert, valhalla_convert, write_road2_config
|
|
8
|
+
from r2gg._database import DatabaseManager
|
|
17
9
|
|
|
18
10
|
# ############################################################################
|
|
19
11
|
# ########## MAIN ################
|
|
20
12
|
# ################################
|
|
21
13
|
def sql2pivot():
|
|
22
14
|
config, resource, db_configs, logger = configure()
|
|
23
|
-
|
|
24
|
-
sql_convert(config, resource, db_configs,
|
|
25
|
-
disconnect_working_db(
|
|
15
|
+
database = DatabaseManager(db_configs[config["workingSpace"]["baseId"]], logger)
|
|
16
|
+
sql_convert(config, resource, db_configs, database, logger)
|
|
17
|
+
database.disconnect_working_db()
|
|
26
18
|
|
|
27
19
|
def pivot2pgrouting():
|
|
28
20
|
config, resource, db_configs, logger = configure()
|
|
29
|
-
|
|
30
|
-
pgr_convert(
|
|
31
|
-
disconnect_working_db(
|
|
21
|
+
database = DatabaseManager(db_configs[config["workingSpace"]["baseId"]], logger)
|
|
22
|
+
pgr_convert(resource, db_configs, database, logger)
|
|
23
|
+
database.disconnect_working_db()
|
|
32
24
|
|
|
33
25
|
def pivot2osm():
|
|
34
26
|
config, resource, db_configs, logger = configure()
|
|
35
|
-
|
|
36
|
-
osm_convert(config, resource, db_configs,
|
|
37
|
-
disconnect_working_db(
|
|
27
|
+
database = DatabaseManager(db_configs[config["workingSpace"]["baseId"]], logger)
|
|
28
|
+
osm_convert(config, resource, db_configs, database, logger)
|
|
29
|
+
database.disconnect_working_db()
|
|
38
30
|
|
|
39
31
|
def osm2osrm():
|
|
40
32
|
config, resource, _, logger = configure()
|
|
@@ -52,20 +44,14 @@ def main():
|
|
|
52
44
|
"""Main CLI entrypoint.
|
|
53
45
|
"""
|
|
54
46
|
config, resource, db_configs, logger = configure()
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
disconnect_working_db(connection, logger)
|
|
61
|
-
elif (resource['type'] == 'osrm'):
|
|
62
|
-
config, resource, db_configs, connection, logger = configure()
|
|
63
|
-
osm_convert(config, resource, db_configs, connection, logger)
|
|
64
|
-
disconnect_working_db(connection, logger)
|
|
47
|
+
sql2pivot()
|
|
48
|
+
if resource['type'] in ['pgr', 'smartpgr']:
|
|
49
|
+
pivot2pgrouting()
|
|
50
|
+
elif resource['type'] == 'osrm':
|
|
51
|
+
pivot2osm()
|
|
65
52
|
osrm_convert(config, resource, logger)
|
|
66
|
-
elif
|
|
67
|
-
|
|
68
|
-
osm_convert(config, resource, db_configs, connection, logger, True)
|
|
53
|
+
elif resource['type'] == 'valhalla':
|
|
54
|
+
pivot2osm()
|
|
69
55
|
valhalla_convert(config, resource, logger)
|
|
70
56
|
else:
|
|
71
57
|
raise ValueError("Wrong resource type, should be in ['pgr',osrm','valhalla','smartpgr']")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: r2gg
|
|
3
|
-
Version: 3.1.
|
|
3
|
+
Version: 3.1.4
|
|
4
4
|
Summary: Route Graph Generator (r2gg) est un script Python qui permet la génération de graphes pour des moteurs de calcul d'itinéraire
|
|
5
5
|
Home-page: https://github.com/IGNF/route-graph-generator/
|
|
6
6
|
Author: IGNF
|
|
@@ -1,23 +1,24 @@
|
|
|
1
|
-
r2gg/__about__.py,sha256=
|
|
1
|
+
r2gg/__about__.py,sha256=VjF-b03qrVBDGQ-KCnD7lEiODARNWwvwp8KVejqA1Cc,1343
|
|
2
2
|
r2gg/__init__.py,sha256=zERkbHrNMbNZleuOxsfA7afQExXxpP7-IWVP1IFzNNs,88
|
|
3
|
-
r2gg/_configure.py,sha256=
|
|
3
|
+
r2gg/_configure.py,sha256=HGLvnHa_XUV_BGBL_Hn-llaRDH6JK2svdXK6vo61_GU,2405
|
|
4
|
+
r2gg/_database.py,sha256=xXBkxGyjNRRJzIGUKDhB5OWq0G_FV6KlB5uHJnOXb_c,6306
|
|
4
5
|
r2gg/_file_copier.py,sha256=NzDd6ZqxQsQ_EulXg6jZxECUMjlJ0VBtr40-c1Rde-w,578
|
|
5
6
|
r2gg/_lua_builder.py,sha256=W00BWFkRoKSyBgcWCl00hxBNqvE3Yf7Aw6DeLLwfO5M,10420
|
|
6
|
-
r2gg/_main.py,sha256=
|
|
7
|
+
r2gg/_main.py,sha256=vxXsSaocfeQfMDvamiRCEJPESwygMs47LQU25gHpHSY,22368
|
|
7
8
|
r2gg/_osm_building.py,sha256=MFFY3EYOVO4rPsYk90Oda70KH1DKW2rlRj8XT4Vx_4U,4227
|
|
8
9
|
r2gg/_osm_to_pbf.py,sha256=ZCGaqao4r2xkwIvKjQSOBd7TVXzO_KDAT2PptnCuAoY,1088
|
|
9
10
|
r2gg/_output_costs_from_costs_config.py,sha256=g7Qy4tlUIOA7DJcwXW9ZxhcOJ5Srq1c8hufylI2e0m8,6208
|
|
10
11
|
r2gg/_path_converter.py,sha256=CTtFHucxTSDB89TMbTO7Q0VqRCs2295GTA4_3cftUVc,340
|
|
11
|
-
r2gg/_pivot_to_osm.py,sha256=
|
|
12
|
-
r2gg/_pivot_to_pgr.py,sha256=
|
|
12
|
+
r2gg/_pivot_to_osm.py,sha256=hucBK63QJSkJxNv9bfcNPRs0Ky0vIolccELrGPq3a2s,6933
|
|
13
|
+
r2gg/_pivot_to_pgr.py,sha256=39dLpl0_HG-0RLp4vlRqwhXBOzdkxaYhp3SdXxS869k,15469
|
|
13
14
|
r2gg/_read_config.py,sha256=VQ6d6Oi1aKHwwURA3KrbpWHK0zP18aJ98XQJiHZb8oI,721
|
|
14
15
|
r2gg/_sql_building.py,sha256=DefYIerZ0k_yltgJFaSzTXAZPSSUH7WAxN3d5FceoWw,840
|
|
15
16
|
r2gg/_subprocess_execution.py,sha256=ipsynsXohonN9YfmxX7DFTFCvJWsvrLDuA5Y-0QDUDM,1603
|
|
16
17
|
r2gg/_valhalla_lua_builder.py,sha256=vNUnsu4yYQflnNv49-ElxrfIqa4b30vJP740ab7M7rY,13816
|
|
17
|
-
r2gg/cli.py,sha256=
|
|
18
|
-
r2gg-3.1.
|
|
19
|
-
r2gg-3.1.
|
|
20
|
-
r2gg-3.1.
|
|
21
|
-
r2gg-3.1.
|
|
22
|
-
r2gg-3.1.
|
|
23
|
-
r2gg-3.1.
|
|
18
|
+
r2gg/cli.py,sha256=jeMiLc1UVAhNFNe_p8nRnyF8V_7kliqIzJsLG1MKpIo,2174
|
|
19
|
+
r2gg-3.1.4.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
20
|
+
r2gg-3.1.4.dist-info/METADATA,sha256=3nyIXee9UgSuU5CvmAisLZekI0jBS9B0jVbjFRTUivU,5199
|
|
21
|
+
r2gg-3.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
22
|
+
r2gg-3.1.4.dist-info/entry_points.txt,sha256=Km7XbbcVI9jy1TQI0raQfRyKdNGAUIwVkkBG5YeQP4k,275
|
|
23
|
+
r2gg-3.1.4.dist-info/top_level.txt,sha256=fj9IaWXORCdMRcnWWZ7LmvOG4W_sVH8J4BUWf1pM37c,5
|
|
24
|
+
r2gg-3.1.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|