r2gg 3.1.0__tar.gz → 3.1.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {r2gg-3.1.0 → r2gg-3.1.5}/PKG-INFO +1 -1
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/__about__.py +1 -1
- r2gg-3.1.5/r2gg/_configure.py +78 -0
- r2gg-3.1.5/r2gg/_database.py +163 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_main.py +74 -73
- r2gg-3.1.5/r2gg/_pivot_to_osm.py +148 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_pivot_to_pgr.py +41 -99
- r2gg-3.1.5/r2gg/cli.py +62 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg.egg-info/PKG-INFO +1 -1
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg.egg-info/SOURCES.txt +1 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/tests/test_cli.py +2 -10
- r2gg-3.1.0/r2gg/_configure.py +0 -128
- r2gg-3.1.0/r2gg/_pivot_to_osm.py +0 -184
- r2gg-3.1.0/r2gg/cli.py +0 -76
- {r2gg-3.1.0 → r2gg-3.1.5}/LICENSE +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/README.md +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/__init__.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_file_copier.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_lua_builder.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_osm_building.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_osm_to_pbf.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_output_costs_from_costs_config.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_path_converter.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_read_config.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_sql_building.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_subprocess_execution.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg/_valhalla_lua_builder.py +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg.egg-info/dependency_links.txt +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg.egg-info/entry_points.txt +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg.egg-info/requires.txt +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/r2gg.egg-info/top_level.txt +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/setup.cfg +0 -0
- {r2gg-3.1.0 → r2gg-3.1.5}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: r2gg
|
|
3
|
-
Version: 3.1.
|
|
3
|
+
Version: 3.1.5
|
|
4
4
|
Summary: Route Graph Generator (r2gg) est un script Python qui permet la génération de graphes pour des moteurs de calcul d'itinéraire
|
|
5
5
|
Home-page: https://github.com/IGNF/route-graph-generator/
|
|
6
6
|
Author: IGNF
|
|
@@ -34,7 +34,7 @@ __uri_repository__ = "https://github.com/IGNF/route-graph-generator/"
|
|
|
34
34
|
__uri_tracker__ = f"{__uri_repository__}issues/"
|
|
35
35
|
__uri__ = __uri_repository__
|
|
36
36
|
|
|
37
|
-
__version__ = "3.1.
|
|
37
|
+
__version__ = "3.1.5"
|
|
38
38
|
__version_info__ = tuple(
|
|
39
39
|
[
|
|
40
40
|
int(num) if num.isdigit() else num
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import psycopg2
|
|
6
|
+
|
|
7
|
+
from r2gg._read_config import config_from_path
|
|
8
|
+
|
|
9
|
+
# Définition des niveaux de log
|
|
10
|
+
LEVELS = {
|
|
11
|
+
'CRITICAL': logging.CRITICAL,
|
|
12
|
+
'ERROR': logging.ERROR,
|
|
13
|
+
'WARNING': logging.WARNING,
|
|
14
|
+
'INFO': logging.INFO,
|
|
15
|
+
'DEBUG': logging.DEBUG
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def configure():
|
|
20
|
+
"""
|
|
21
|
+
Fonction de lecture du fichier de configuration passé en argument
|
|
22
|
+
|
|
23
|
+
Returns
|
|
24
|
+
-------
|
|
25
|
+
config: dict
|
|
26
|
+
dictionnaire correspondant à la configuration décrite dans le fichier passé en argument
|
|
27
|
+
resource: dict
|
|
28
|
+
dictionnaire correspondant à la resource décrite dans le fichier passé en argument
|
|
29
|
+
db_configs: dict
|
|
30
|
+
dictionnaire correspondant aux configurations des bdd
|
|
31
|
+
logger: logging.Logger
|
|
32
|
+
"""
|
|
33
|
+
parser = argparse.ArgumentParser()
|
|
34
|
+
parser.add_argument('config_file_path', type=str)
|
|
35
|
+
args = parser.parse_args()
|
|
36
|
+
config_path = args.config_file_path
|
|
37
|
+
|
|
38
|
+
# Récupération de l'objet 'génération' qui contient toute la config
|
|
39
|
+
config = config_from_path(config_path)['generation']
|
|
40
|
+
|
|
41
|
+
# Récupération de la configuration du log
|
|
42
|
+
logs_config = config_from_path(config['general']['logs']['configFile'])
|
|
43
|
+
|
|
44
|
+
# Gestion du fichiers de logs non spécifié
|
|
45
|
+
try:
|
|
46
|
+
logs_file = logs_config['filename']
|
|
47
|
+
except KeyError:
|
|
48
|
+
logs_file = '/dev/null'
|
|
49
|
+
|
|
50
|
+
# Configuration du module logging
|
|
51
|
+
logging.basicConfig(
|
|
52
|
+
format='%(asctime)s %(message)s',
|
|
53
|
+
level=LEVELS[logs_config['level'].upper()],
|
|
54
|
+
handlers=[
|
|
55
|
+
logging.FileHandler(logs_file),
|
|
56
|
+
logging.StreamHandler()
|
|
57
|
+
])
|
|
58
|
+
|
|
59
|
+
# Initialisation du logger
|
|
60
|
+
logger = logging.getLogger(__name__)
|
|
61
|
+
logger.info("Log initialized")
|
|
62
|
+
|
|
63
|
+
# Todo : Créer une fonction qui vérifie la configuration
|
|
64
|
+
db_configs = {}
|
|
65
|
+
# Configuration des bases de données précisées dans la config
|
|
66
|
+
for base in config['bases']:
|
|
67
|
+
if base['type'] == 'bdd':
|
|
68
|
+
db_configs[base['id']] = config_from_path(base['configFile'])
|
|
69
|
+
db_configs[base['id']].update({"schema": base['schema']})
|
|
70
|
+
|
|
71
|
+
# Récupération de l'objet permettant de générer la ressource
|
|
72
|
+
resource = config['resource']
|
|
73
|
+
|
|
74
|
+
# Création de l'espace de travail
|
|
75
|
+
if not os.path.exists(config['workingSpace']['directory']):
|
|
76
|
+
os.makedirs(config['workingSpace']['directory'])
|
|
77
|
+
|
|
78
|
+
return config, resource, db_configs, logger
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from os import getenv
|
|
3
|
+
|
|
4
|
+
import psycopg2
|
|
5
|
+
from psycopg2 import OperationalError, DatabaseError, InterfaceError
|
|
6
|
+
from psycopg2.extras import DictCursor
|
|
7
|
+
import logging
|
|
8
|
+
|
|
9
|
+
TIMEOUT = int(getenv("SQL_STATEMENT_TIMEOUT", 0))
|
|
10
|
+
RETRY = int(getenv("SQL_STATEMENT_RETRY_ATTEMPTS", 3))
|
|
11
|
+
DELAY = int(getenv("SQL_DELAY_BETWEEN_STATEMENTS", 30))
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def database_retry_decorator(func):
|
|
15
|
+
def wrapper(self, *args, **kwargs):
|
|
16
|
+
attempt = 1
|
|
17
|
+
while attempt <= RETRY:
|
|
18
|
+
try:
|
|
19
|
+
self.ensure_connection()
|
|
20
|
+
yield from func(self, *args, **kwargs)
|
|
21
|
+
return
|
|
22
|
+
|
|
23
|
+
except (OperationalError, DatabaseError, InterfaceError) as e:
|
|
24
|
+
if attempt >= RETRY:
|
|
25
|
+
self.logger.error(f"Query failed after {RETRY} attempts: {str(e).rstrip()}")
|
|
26
|
+
raise e
|
|
27
|
+
|
|
28
|
+
self.logger.error(
|
|
29
|
+
f"Attempt {attempt}/{RETRY} failed ({str(e).rstrip()}), retrying in {DELAY} seconds"
|
|
30
|
+
)
|
|
31
|
+
time.sleep(DELAY)
|
|
32
|
+
attempt += 1
|
|
33
|
+
try:
|
|
34
|
+
if self._connection:
|
|
35
|
+
self._connection.rollback()
|
|
36
|
+
except Exception as e:
|
|
37
|
+
self.logger.error(f"Connection rollback failed {str(e).rstrip()}")
|
|
38
|
+
return
|
|
39
|
+
|
|
40
|
+
return wrapper
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class DatabaseManager:
|
|
44
|
+
def __init__(self, db_configs, logger):
|
|
45
|
+
self.logger = logger
|
|
46
|
+
self._work_db_config = db_configs
|
|
47
|
+
self._connection = self.connect_working_db()
|
|
48
|
+
|
|
49
|
+
def connect_working_db(self):
|
|
50
|
+
"""
|
|
51
|
+
Fonction de connexion à la BDD de travail
|
|
52
|
+
|
|
53
|
+
Parameters
|
|
54
|
+
----------
|
|
55
|
+
config: dict
|
|
56
|
+
dictionnaire correspondant à la configuration décrite dans le fichier passé en argument
|
|
57
|
+
db_configs: dict
|
|
58
|
+
dictionnaire correspondant aux configurations des bdd
|
|
59
|
+
Returns
|
|
60
|
+
-------
|
|
61
|
+
connection: psycopg2.connection
|
|
62
|
+
connection à la bdd de travail
|
|
63
|
+
|
|
64
|
+
"""
|
|
65
|
+
# Récupération des paramètres de la bdd
|
|
66
|
+
host = self._work_db_config.get("host")
|
|
67
|
+
dbname = self._work_db_config.get("database")
|
|
68
|
+
user = self._work_db_config.get("user")
|
|
69
|
+
password = self._work_db_config.get("password")
|
|
70
|
+
port = self._work_db_config.get("port")
|
|
71
|
+
connect_args = "host=%s dbname=%s user=%s password=%s port=%s" % (host, dbname, user, password, port)
|
|
72
|
+
|
|
73
|
+
self.logger.info("Connecting to work database")
|
|
74
|
+
connection = psycopg2.connect(connect_args)
|
|
75
|
+
connection.set_client_encoding("UTF8")
|
|
76
|
+
|
|
77
|
+
return connection
|
|
78
|
+
|
|
79
|
+
def disconnect_working_db(self):
|
|
80
|
+
"""
|
|
81
|
+
Fonction de connexion à la BDD de travail
|
|
82
|
+
|
|
83
|
+
Parameters
|
|
84
|
+
----------
|
|
85
|
+
connection: psycopg2.connection
|
|
86
|
+
connection à la bdd de travail
|
|
87
|
+
logger: logging.Logger
|
|
88
|
+
"""
|
|
89
|
+
if self._connection:
|
|
90
|
+
self._connection.close()
|
|
91
|
+
self.logger.info("Connection to work database closed")
|
|
92
|
+
|
|
93
|
+
def ensure_connection(self):
|
|
94
|
+
"""
|
|
95
|
+
Ensure the connection is alive; reconnect if needed.
|
|
96
|
+
"""
|
|
97
|
+
try:
|
|
98
|
+
if self._connection is None or getattr(self._connection, "closed", 1) != 0:
|
|
99
|
+
self.logger.info("Connection is closed or missing; reconnecting")
|
|
100
|
+
self._connection = self.connect_working_db()
|
|
101
|
+
else:
|
|
102
|
+
with self._connection.cursor() as cur:
|
|
103
|
+
cur.execute("SELECT 1")
|
|
104
|
+
except Exception as e:
|
|
105
|
+
self.logger.error(
|
|
106
|
+
f"Something is wrong with the connection: {str(e).rstrip()}; reconnecting in {DELAY} seconds")
|
|
107
|
+
self.disconnect_working_db()
|
|
108
|
+
time.sleep(DELAY)
|
|
109
|
+
self._connection = self.connect_working_db()
|
|
110
|
+
|
|
111
|
+
def execute_select_query(self, cursor, query, show_duration):
|
|
112
|
+
if TIMEOUT:
|
|
113
|
+
cursor.execute("SET statement_timeout = %s", (1000 * TIMEOUT,)) # timeout in milliseconds
|
|
114
|
+
|
|
115
|
+
if show_duration:
|
|
116
|
+
self.logger.info("SQL: {}".format(query))
|
|
117
|
+
st_execute = time.time()
|
|
118
|
+
cursor.execute(query)
|
|
119
|
+
et_execute = time.time()
|
|
120
|
+
self.logger.info("Execution ended. Elapsed time : %s seconds." % (et_execute - st_execute))
|
|
121
|
+
else:
|
|
122
|
+
cursor.execute(query)
|
|
123
|
+
|
|
124
|
+
@database_retry_decorator
|
|
125
|
+
def execute_select_fetch_multiple(self, query, batchsize=1, show_duration=False):
|
|
126
|
+
with self._connection.cursor(cursor_factory=DictCursor) as cursor:
|
|
127
|
+
self.execute_select_query(cursor, query, show_duration)
|
|
128
|
+
rows = cursor.fetchmany(batchsize)
|
|
129
|
+
count = cursor.rowcount
|
|
130
|
+
while rows:
|
|
131
|
+
if batchsize == 1:
|
|
132
|
+
rows = rows.pop()
|
|
133
|
+
yield rows, count
|
|
134
|
+
rows = cursor.fetchmany(batchsize)
|
|
135
|
+
self._connection.commit()
|
|
136
|
+
return
|
|
137
|
+
|
|
138
|
+
# the method below should be used as a generator function otherwise use execute_update
|
|
139
|
+
@database_retry_decorator
|
|
140
|
+
def execute_update_query(self, query, params=None, isolation_level=None, show_duration=False):
|
|
141
|
+
if show_duration :
|
|
142
|
+
self.logger.info("SQL: {}".format(query))
|
|
143
|
+
st_execute = time.time()
|
|
144
|
+
with self._connection.cursor(cursor_factory=DictCursor) as cursor:
|
|
145
|
+
old_isolation_level = self._connection.isolation_level
|
|
146
|
+
if isolation_level is not None:
|
|
147
|
+
self._connection.set_isolation_level(isolation_level)
|
|
148
|
+
cursor.execute(query, params)
|
|
149
|
+
self._connection.commit()
|
|
150
|
+
if show_duration:
|
|
151
|
+
et_execute = time.time()
|
|
152
|
+
self.logger.info("Execution ended. Elapsed time : %s seconds." % (et_execute - st_execute))
|
|
153
|
+
self._connection.set_isolation_level(old_isolation_level)
|
|
154
|
+
yield # the decorator database_retry_decorator only supports generators
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
def execute_update(self, query, params=None, isolation_level=None):
|
|
158
|
+
next(self.execute_update_query(query, params=params, isolation_level=isolation_level), None)
|
|
159
|
+
|
|
160
|
+
def execute_select_fetch_one(self, query, show_duration=False):
|
|
161
|
+
gen = self.execute_select_fetch_multiple(query, 1, show_duration)
|
|
162
|
+
row, count = next(gen, (None, None))
|
|
163
|
+
return row, count
|
|
@@ -1,26 +1,25 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import multiprocessing
|
|
3
3
|
import os
|
|
4
|
-
import json
|
|
5
4
|
import time
|
|
6
5
|
from datetime import datetime
|
|
7
6
|
|
|
8
|
-
import psycopg2
|
|
9
7
|
# https://github.com/andialbrecht/sqlparse
|
|
10
8
|
import sqlparse
|
|
11
9
|
|
|
10
|
+
from r2gg._database import DatabaseManager
|
|
11
|
+
from r2gg._file_copier import copy_file_locally
|
|
12
12
|
from r2gg._lua_builder import build_lua
|
|
13
|
+
from r2gg._osm_to_pbf import osm_to_pbf
|
|
14
|
+
from r2gg._path_converter import convert_path
|
|
13
15
|
from r2gg._pivot_to_osm import pivot_to_osm
|
|
14
16
|
from r2gg._pivot_to_pgr import pivot_to_pgr
|
|
15
17
|
from r2gg._read_config import config_from_path
|
|
16
18
|
from r2gg._subprocess_execution import subprocess_execution
|
|
17
|
-
from r2gg._path_converter import convert_path
|
|
18
|
-
from r2gg._file_copier import copy_file_locally
|
|
19
19
|
from r2gg._valhalla_lua_builder import build_valhalla_lua
|
|
20
|
-
from r2gg._osm_to_pbf import osm_to_pbf
|
|
21
20
|
|
|
22
21
|
|
|
23
|
-
def sql_convert(config, resource, db_configs,
|
|
22
|
+
def sql_convert(config, resource, db_configs, database: DatabaseManager, logger):
|
|
24
23
|
"""
|
|
25
24
|
Fonction de conversion depuis la bdd source vers la bdd pivot
|
|
26
25
|
|
|
@@ -32,8 +31,8 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
32
31
|
dictionnaire correspondant à la resource décrite dans le fichier passé en argument
|
|
33
32
|
db_configs: dict
|
|
34
33
|
dictionnaire correspondant aux configurations des bdd
|
|
35
|
-
|
|
36
|
-
|
|
34
|
+
database: r2gg.DatabaseManager
|
|
35
|
+
gestionnaire de connexion et d'exécution de la base de la bdd
|
|
37
36
|
logger: logging.Logger
|
|
38
37
|
"""
|
|
39
38
|
|
|
@@ -57,7 +56,7 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
57
56
|
used_bases = []
|
|
58
57
|
|
|
59
58
|
# Il y a potentiellement une conversion par source indiquée dans la ressource
|
|
60
|
-
for source in resource[
|
|
59
|
+
for source in resource['sources']:
|
|
61
60
|
|
|
62
61
|
logger.info("Create pivot of source: " + source['id'])
|
|
63
62
|
|
|
@@ -77,12 +76,12 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
77
76
|
else:
|
|
78
77
|
logger.info("Mapping not done")
|
|
79
78
|
|
|
80
|
-
#
|
|
81
|
-
source_db_config = db_configs[
|
|
79
|
+
# Configuration de la bdd source
|
|
80
|
+
source_db_config = db_configs[source['mapping']['source']['baseId']]
|
|
82
81
|
used_bases.append(source['mapping']['source']['baseId'])
|
|
83
82
|
|
|
84
83
|
# Configuration de la bdd de travail utilisée pour ce pivot
|
|
85
|
-
work_db_config = db_configs[
|
|
84
|
+
work_db_config = db_configs[config['workingSpace']['baseId']]
|
|
86
85
|
|
|
87
86
|
# Récupération de la bbox
|
|
88
87
|
bbox = [float(coord) for coord in source["bbox"].split(",")]
|
|
@@ -94,9 +93,7 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
94
93
|
logger.info("Create source on bbox: " + source["bbox"])
|
|
95
94
|
|
|
96
95
|
# Lancement du script SQL de conversion source --> pivot
|
|
97
|
-
|
|
98
|
-
with open( source['mapping']['conversion']['file'] ) as sql_script:
|
|
99
|
-
cur = connection.cursor()
|
|
96
|
+
with open(source['mapping']['conversion']['file']) as sql_script:
|
|
100
97
|
logger.info("Executing SQL conversion script")
|
|
101
98
|
instructions = sqlparse.split(sql_script.read().format(user=work_db_config.get('user'),
|
|
102
99
|
input_schema=source_db_config.get('schema'),
|
|
@@ -107,37 +104,43 @@ def sql_convert(config, resource, db_configs, connection, logger):
|
|
|
107
104
|
for instruction in instructions:
|
|
108
105
|
if instruction == '':
|
|
109
106
|
continue
|
|
110
|
-
logger.debug("SQL:\n{}\n".format(instruction)
|
|
107
|
+
logger.debug("SQL:\n{}\n".format(instruction))
|
|
108
|
+
isolation_level = None
|
|
109
|
+
if instruction.strip().lower().startswith("vacuum"):
|
|
110
|
+
isolation_level = 0
|
|
111
111
|
st_instruction = time.time()
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
112
|
+
database.execute_update(instruction,
|
|
113
|
+
{
|
|
114
|
+
'bdpwd': source_db_config.get('password'),
|
|
115
|
+
'bdport': source_db_config.get('port'),
|
|
116
|
+
'bdhost': source_db_config.get('host'),
|
|
117
|
+
'bduser': source_db_config.get('user'),
|
|
118
|
+
'dbname': source_db_config.get('database'),
|
|
119
|
+
'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax
|
|
120
|
+
},
|
|
121
|
+
isolation_level=isolation_level
|
|
122
|
+
)
|
|
120
123
|
et_instruction = time.time()
|
|
121
|
-
logger.info("Execution ended. Elapsed time : %s seconds." %(et_instruction - st_instruction))
|
|
124
|
+
logger.info("Execution ended. Elapsed time : %s seconds." % (et_instruction - st_instruction))
|
|
122
125
|
|
|
123
126
|
et_sql_conversion = time.time()
|
|
124
127
|
|
|
125
|
-
logger.info(
|
|
128
|
+
logger.info(
|
|
129
|
+
"Conversion from BDD to pivot ended. Elapsed time : %s seconds." % (et_sql_conversion - st_sql_conversion))
|
|
130
|
+
|
|
126
131
|
|
|
127
|
-
def pgr_convert(
|
|
132
|
+
def pgr_convert(resource, db_configs, database: DatabaseManager, logger):
|
|
128
133
|
"""
|
|
129
134
|
Fonction de conversion depuis la bdd pivot vers la bdd pgrouting
|
|
130
135
|
|
|
131
136
|
Parameters
|
|
132
137
|
----------
|
|
133
|
-
config: dict
|
|
134
|
-
dictionnaire correspondant à la configuration décrite dans le fichier passé en argument
|
|
135
138
|
resource: dict
|
|
136
139
|
dictionnaire correspondant à la resource décrite dans le fichier passé en argument
|
|
137
140
|
db_configs: dict
|
|
138
141
|
dictionnaire correspondant aux configurations des bdd
|
|
139
|
-
|
|
140
|
-
|
|
142
|
+
database: r2gg.DatabaseManager
|
|
143
|
+
gestionnaire de connexion et d'exécution de la base de la bdd
|
|
141
144
|
logger: logging.Logger
|
|
142
145
|
"""
|
|
143
146
|
|
|
@@ -150,19 +153,13 @@ def pgr_convert(config, resource, db_configs, connection, logger):
|
|
|
150
153
|
i = 0
|
|
151
154
|
for source in resource["sources"]:
|
|
152
155
|
|
|
153
|
-
logger.info("Source {} of {}...".format(i+1, len(resource["sources"])))
|
|
156
|
+
logger.info("Source {} of {}...".format(i + 1, len(resource["sources"])))
|
|
154
157
|
logger.info("Source id : " + source["id"])
|
|
155
158
|
|
|
156
159
|
# Configuration et connection à la base de sortie
|
|
157
|
-
out_db_config = db_configs[
|
|
158
|
-
host = out_db_config.get('host')
|
|
159
|
-
dbname = out_db_config.get('database')
|
|
160
|
-
user = out_db_config.get('user')
|
|
161
|
-
password = out_db_config.get('password')
|
|
162
|
-
port = out_db_config.get('port')
|
|
163
|
-
connect_args = 'host=%s dbname=%s user=%s password=%s port=%s' %(host, dbname, user, password, port)
|
|
160
|
+
out_db_config = db_configs[source['storage']['base']['baseId']]
|
|
164
161
|
logger.info("Connecting to output database")
|
|
165
|
-
|
|
162
|
+
database_out = DatabaseManager(out_db_config, logger)
|
|
166
163
|
|
|
167
164
|
schema_out = out_db_config.get('schema')
|
|
168
165
|
|
|
@@ -172,14 +169,14 @@ def pgr_convert(config, resource, db_configs, connection, logger):
|
|
|
172
169
|
cost_calculation_files_paths = {cost["compute"]["configuration"]["storage"]["file"] for cost in source["costs"]}
|
|
173
170
|
|
|
174
171
|
for cost_calculation_file_path in cost_calculation_files_paths:
|
|
175
|
-
pivot_to_pgr(source, cost_calculation_file_path,
|
|
176
|
-
|
|
172
|
+
pivot_to_pgr(source, cost_calculation_file_path, database, database_out, schema_out, input_schema, logger)
|
|
173
|
+
database_out.disconnect_working_db()
|
|
177
174
|
|
|
178
175
|
et_pivot_to_pgr = time.time()
|
|
179
|
-
logger.info("Conversion from pivot to PGR ended. Elapsed time : %s seconds." %(et_pivot_to_pgr - st_pivot_to_pgr))
|
|
176
|
+
logger.info("Conversion from pivot to PGR ended. Elapsed time : %s seconds." % (et_pivot_to_pgr - st_pivot_to_pgr))
|
|
180
177
|
|
|
181
178
|
|
|
182
|
-
def osm_convert(config, resource, db_configs,
|
|
179
|
+
def osm_convert(config, resource, db_configs, database: DatabaseManager, logger):
|
|
183
180
|
"""
|
|
184
181
|
Fonction de conversion depuis la bdd pivot vers un fichier osm
|
|
185
182
|
|
|
@@ -191,8 +188,8 @@ def osm_convert(config, resource, db_configs, connection, logger):
|
|
|
191
188
|
dictionnaire correspondant à la resource décrite dans le fichier passé en argument
|
|
192
189
|
db_configs: dict
|
|
193
190
|
dictionnaire correspondant aux configurations des bdd
|
|
194
|
-
|
|
195
|
-
|
|
191
|
+
database: r2gg.DatabaseManager
|
|
192
|
+
gestionnaire de connexion et d'exécution de la base de la bdd
|
|
196
193
|
logger: logging.Logger
|
|
197
194
|
"""
|
|
198
195
|
|
|
@@ -224,7 +221,7 @@ def osm_convert(config, resource, db_configs, connection, logger):
|
|
|
224
221
|
# Plusieurs sources peuvent référencer le même mapping mais changer plus tard dans la génération
|
|
225
222
|
found_base = False
|
|
226
223
|
found_id = ''
|
|
227
|
-
for sid,sub in used_bases.items():
|
|
224
|
+
for sid, sub in used_bases.items():
|
|
228
225
|
if sub == source['mapping']['source']['baseId']:
|
|
229
226
|
found_base = True
|
|
230
227
|
found_id = sid
|
|
@@ -251,11 +248,12 @@ def osm_convert(config, resource, db_configs, connection, logger):
|
|
|
251
248
|
|
|
252
249
|
else:
|
|
253
250
|
logger.info("Mapping not already done")
|
|
254
|
-
pivot_to_osm(config, source, db_configs,
|
|
251
|
+
pivot_to_osm(config, source, db_configs, database, logger, convert_osm_to_pbf)
|
|
252
|
+
|
|
253
|
+
used_bases[source['id']] = source['mapping']['source']['baseId']
|
|
255
254
|
|
|
256
|
-
used_bases[ source['id'] ] = source['mapping']['source']['baseId']
|
|
257
255
|
|
|
258
|
-
def osrm_convert(config, resource, logger, build_lua_from_cost_config
|
|
256
|
+
def osrm_convert(config, resource, logger, build_lua_from_cost_config=True):
|
|
259
257
|
"""
|
|
260
258
|
Fonction de conversion depuis le fichier osm vers les fichiers osrm
|
|
261
259
|
|
|
@@ -282,7 +280,7 @@ def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
|
|
|
282
280
|
i = 0
|
|
283
281
|
for source in resource["sources"]:
|
|
284
282
|
|
|
285
|
-
logger.info("Source {} of {}...".format(i+1, len(resource["sources"])))
|
|
283
|
+
logger.info("Source {} of {}...".format(i + 1, len(resource["sources"])))
|
|
286
284
|
|
|
287
285
|
logger.info('LUA part')
|
|
288
286
|
lua_file = source["cost"]["compute"]["storage"]["file"]
|
|
@@ -293,7 +291,7 @@ def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
|
|
|
293
291
|
costs_config = config_from_path(config_file)
|
|
294
292
|
cost_name = source["cost"]["compute"]["configuration"]["name"]
|
|
295
293
|
|
|
296
|
-
if cost_name not in [
|
|
294
|
+
if cost_name not in [output["name"] for output in costs_config["outputs"]]:
|
|
297
295
|
raise ValueError("cost_name must be in cost configuration")
|
|
298
296
|
|
|
299
297
|
with open(lua_file, "w") as lua_f:
|
|
@@ -332,15 +330,15 @@ def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
|
|
|
332
330
|
start_command = time.time()
|
|
333
331
|
subprocess_execution(osrm_extract_args, logger)
|
|
334
332
|
end_command = time.time()
|
|
335
|
-
logger.info("OSRM extract ended. Elapsed time : %s seconds." %(end_command - start_command))
|
|
333
|
+
logger.info("OSRM extract ended. Elapsed time : %s seconds." % (end_command - start_command))
|
|
336
334
|
subprocess_execution(osrm_contract_args, logger)
|
|
337
335
|
final_command = time.time()
|
|
338
|
-
logger.info("OSRM contract ended. Elapsed time : %s seconds." %(final_command - end_command))
|
|
336
|
+
logger.info("OSRM contract ended. Elapsed time : %s seconds." % (final_command - end_command))
|
|
339
337
|
subprocess_execution(rm_args, logger)
|
|
340
338
|
i += 1
|
|
341
339
|
|
|
342
340
|
|
|
343
|
-
def valhalla_convert(config, resource, logger, build_lua_from_cost_config
|
|
341
|
+
def valhalla_convert(config, resource, logger, build_lua_from_cost_config=True):
|
|
344
342
|
"""
|
|
345
343
|
Fonction de conversion depuis le fichier .osm.pbf vers les fichiers valhalla
|
|
346
344
|
|
|
@@ -367,7 +365,7 @@ def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True
|
|
|
367
365
|
i = 0
|
|
368
366
|
for source in resource["sources"]:
|
|
369
367
|
|
|
370
|
-
logger.info("Source {} of {}...".format(i+1, len(resource["sources"])))
|
|
368
|
+
logger.info("Source {} of {}...".format(i + 1, len(resource["sources"])))
|
|
371
369
|
|
|
372
370
|
logger.info('Looking for OSM PBF file')
|
|
373
371
|
|
|
@@ -406,15 +404,15 @@ def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True
|
|
|
406
404
|
|
|
407
405
|
start_command = time.time()
|
|
408
406
|
valhalla_build_config_args = ["valhalla_build_config",
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
subprocess_execution(valhalla_build_config_args, logger, outfile
|
|
407
|
+
"--mjolnir-tile-dir", source["storage"]["dir"],
|
|
408
|
+
"--mjolnir-tile-extract", source["storage"]["tar"],
|
|
409
|
+
# Modification des limites par défaut du service : 10h pour isochrone et 1000km pour iso distance
|
|
410
|
+
# contre 2h et 200km par défaut
|
|
411
|
+
"--service-limits-isochrone-max-time-contour", "600",
|
|
412
|
+
"--service-limits-isochrone-max-distance-contour", "1000",
|
|
413
|
+
# Ajout de l'autorisation à exclure les ponts/tunnels/péages
|
|
414
|
+
"--service-limits-allow-hard-exclusions", "True"]
|
|
415
|
+
subprocess_execution(valhalla_build_config_args, logger, outfile=source["storage"]["config"])
|
|
418
416
|
# Nécessaire le temps que le fichier s'écrive...
|
|
419
417
|
time.sleep(1)
|
|
420
418
|
# Ajout du graph custom dans la config valhalla (impossible via les paramètres du build_config)
|
|
@@ -432,10 +430,10 @@ def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True
|
|
|
432
430
|
subprocess_execution(valhalla_build_extract_args, logger)
|
|
433
431
|
|
|
434
432
|
final_command = time.time()
|
|
435
|
-
logger.info("Valhalla tiles built. Elapsed time : %s seconds." %(final_command - start_command))
|
|
433
|
+
logger.info("Valhalla tiles built. Elapsed time : %s seconds." % (final_command - start_command))
|
|
436
434
|
|
|
437
435
|
|
|
438
|
-
def write_road2_config(config, resource, logger, convert_file_paths
|
|
436
|
+
def write_road2_config(config, resource, logger, convert_file_paths=True):
|
|
439
437
|
"""
|
|
440
438
|
Fonction pour l'écriture du fichier de ressource
|
|
441
439
|
|
|
@@ -456,7 +454,8 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
|
|
|
456
454
|
|
|
457
455
|
for source in resource["sources"]:
|
|
458
456
|
|
|
459
|
-
source_file = os.path.join(config["outputs"]["configurations"]["sources"]["storage"]["directory"],
|
|
457
|
+
source_file = os.path.join(config["outputs"]["configurations"]["sources"]["storage"]["directory"],
|
|
458
|
+
source['id'] + ".source")
|
|
460
459
|
logger.info("Writing source file : " + source_file)
|
|
461
460
|
|
|
462
461
|
# On modifie la source en fonction de son type
|
|
@@ -474,10 +473,11 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
|
|
|
474
473
|
bid_tmp = source["storage"]["base"]["baseId"]
|
|
475
474
|
for base in config["bases"]:
|
|
476
475
|
if base["id"] == bid_tmp:
|
|
477
|
-
db_file_out = convert_path(base["configFile"],
|
|
476
|
+
db_file_out = convert_path(base["configFile"],
|
|
477
|
+
config["outputs"]["configurations"]["databases"]["storage"]["directory"])
|
|
478
478
|
copy_file_locally(base["configFile"], db_file_out)
|
|
479
|
-
source["storage"]["base"].update({"dbConfig":db_file_out})
|
|
480
|
-
source["storage"]["base"].update({"schema":base["schema"]})
|
|
479
|
+
source["storage"]["base"].update({"dbConfig": db_file_out})
|
|
480
|
+
source["storage"]["base"].update({"schema": base["schema"]})
|
|
481
481
|
source["storage"]["base"].pop("baseId", None)
|
|
482
482
|
for cost in source["costs"]:
|
|
483
483
|
cost.pop("compute", None)
|
|
@@ -492,7 +492,8 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
|
|
|
492
492
|
source_ids.append(source['id'])
|
|
493
493
|
|
|
494
494
|
# On passe à la ressource
|
|
495
|
-
resource_file = os.path.join(config["outputs"]["configurations"]["resource"]["storage"]["directory"],
|
|
495
|
+
resource_file = os.path.join(config["outputs"]["configurations"]["resource"]["storage"]["directory"],
|
|
496
|
+
resource['id'] + ".resource")
|
|
496
497
|
logger.info("Writing resource file: " + resource_file)
|
|
497
498
|
|
|
498
499
|
# Récupération de la date d'extraction
|
|
@@ -500,7 +501,7 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
|
|
|
500
501
|
date_file = os.path.join(work_dir_config, "r2gg.date")
|
|
501
502
|
f = open(date_file, "r")
|
|
502
503
|
extraction_date = f.read()
|
|
503
|
-
logger.info("extraction date to add in resource (from "+ date_file +"): " + extraction_date)
|
|
504
|
+
logger.info("extraction date to add in resource (from " + date_file + "): " + extraction_date)
|
|
504
505
|
f.close()
|
|
505
506
|
|
|
506
507
|
# On fait le dossier s'il n'existe pas
|