r2gg 2.2.6__py3-none-any.whl → 3.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
r2gg/__about__.py CHANGED
@@ -34,7 +34,7 @@ __uri_repository__ = "https://github.com/IGNF/route-graph-generator/"
34
34
  __uri_tracker__ = f"{__uri_repository__}issues/"
35
35
  __uri__ = __uri_repository__
36
36
 
37
- __version__ = "2.2.6"
37
+ __version__ = "3.1.4"
38
38
  __version_info__ = tuple(
39
39
  [
40
40
  int(num) if num.isdigit() else num
r2gg/_configure.py CHANGED
@@ -8,13 +8,14 @@ from r2gg._read_config import config_from_path
8
8
 
9
9
  # Définition des niveaux de log
10
10
  LEVELS = {
11
- 'CRITICAL' : logging.CRITICAL,
12
- 'ERROR' : logging.ERROR,
13
- 'WARNING' : logging.WARNING,
14
- 'INFO' : logging.INFO,
15
- 'DEBUG' : logging.DEBUG
11
+ 'CRITICAL': logging.CRITICAL,
12
+ 'ERROR': logging.ERROR,
13
+ 'WARNING': logging.WARNING,
14
+ 'INFO': logging.INFO,
15
+ 'DEBUG': logging.DEBUG
16
16
  }
17
17
 
18
+
18
19
  def configure():
19
20
  """
20
21
  Fonction de lecture du fichier de configuration passé en argument
@@ -27,8 +28,6 @@ def configure():
27
28
  dictionnaire correspondant à la resource décrite dans le fichier passé en argument
28
29
  db_configs: dict
29
30
  dictionnaire correspondant aux configurations des bdd
30
- connection: psycopg2.connection
31
- connection à la bdd de travail
32
31
  logger: logging.Logger
33
32
  """
34
33
  parser = argparse.ArgumentParser()
@@ -40,7 +39,7 @@ def configure():
40
39
  config = config_from_path(config_path)['generation']
41
40
 
42
41
  # Récupération de la configuration du log
43
- logs_config = config_from_path( config['general']['logs']['configFile'] )
42
+ logs_config = config_from_path(config['general']['logs']['configFile'])
44
43
 
45
44
  # Gestion du fichiers de logs non spécifié
46
45
  try:
@@ -51,7 +50,7 @@ def configure():
51
50
  # Configuration du module logging
52
51
  logging.basicConfig(
53
52
  format='%(asctime)s %(message)s',
54
- level=LEVELS[ logs_config['level'].upper() ],
53
+ level=LEVELS[logs_config['level'].upper()],
55
54
  handlers=[
56
55
  logging.FileHandler(logs_file),
57
56
  logging.StreamHandler()
@@ -66,10 +65,10 @@ def configure():
66
65
  # Configuration des bases de données précisées dans la config
67
66
  for base in config['bases']:
68
67
  if base['type'] == 'bdd':
69
- db_configs[ base['id'] ] = config_from_path(base['configFile'])
70
- db_configs[base['id']].update({"schema":base['schema']})
68
+ db_configs[base['id']] = config_from_path(base['configFile'])
69
+ db_configs[base['id']].update({"schema": base['schema']})
71
70
 
72
- # Récupération de l'objet permettant de générer la ressource
71
+ #  Récupération de l'objet permettant de générer la ressource
73
72
  resource = config['resource']
74
73
 
75
74
  # Création de l'espace de travail
@@ -77,52 +76,3 @@ def configure():
77
76
  os.makedirs(config['workingSpace']['directory'])
78
77
 
79
78
  return config, resource, db_configs, logger
80
-
81
- def connect_working_db(config, db_configs, logger):
82
- """
83
- Fonction de connexion à la BDD de travail
84
-
85
- Parameters
86
- ----------
87
- config: dict
88
- dictionnaire correspondant à la configuration décrite dans le fichier passé en argument
89
- db_configs: dict
90
- dictionnaire correspondant aux configurations des bdd
91
- logger: logging.Logger
92
- Returns
93
- -------
94
- connection: psycopg2.connection
95
- connection à la bdd de travail
96
-
97
- """
98
-
99
- # Configuration de la bdd de travail
100
- work_db_config = db_configs[ config['workingSpace']['baseId'] ]
101
-
102
- # Récupération des paramètres de la bdd
103
- host = work_db_config.get('host')
104
- dbname = work_db_config.get('database')
105
- user = work_db_config.get('user')
106
- password = work_db_config.get('password')
107
- port = work_db_config.get('port')
108
- connect_args = 'host=%s dbname=%s user=%s password=%s port=%s' %(host, dbname, user, password, port)
109
-
110
- logger.info("Connecting to work database")
111
- connection = psycopg2.connect(connect_args)
112
- connection.set_client_encoding('UTF8')
113
-
114
- return connection
115
-
116
- def disconnect_working_db(connection, logger):
117
- """
118
- Fonction de connexion à la BDD de travail
119
-
120
- Parameters
121
- ----------
122
- connection: psycopg2.connection
123
- connection à la bdd de travail
124
- logger: logging.Logger
125
- """
126
-
127
- connection.close()
128
- logger.info("Connection to work database closed")
r2gg/_database.py ADDED
@@ -0,0 +1,163 @@
1
+ import time
2
+ from os import getenv
3
+
4
+ import psycopg2
5
+ from psycopg2 import OperationalError, DatabaseError, InterfaceError
6
+ from psycopg2.extras import DictCursor
7
+ import logging
8
+
9
+ TIMEOUT = int(getenv("SQL_STATEMENT_TIMEOUT", 0))
10
+ RETRY = int(getenv("SQL_STATEMENT_RETRY_ATTEMPTS", 3))
11
+ DELAY = int(getenv("SQL_DELAY_BETWEEN_STATEMENTS", 30))
12
+
13
+
14
+ def database_retry_decorator(func):
15
+ def wrapper(self, *args, **kwargs):
16
+ attempt = 1
17
+ while attempt <= RETRY:
18
+ try:
19
+ self.ensure_connection()
20
+ yield from func(self, *args, **kwargs)
21
+ return
22
+
23
+ except (OperationalError, DatabaseError, InterfaceError) as e:
24
+ if attempt >= RETRY:
25
+ self.logger.error(f"Query failed after {RETRY} attempts: {str(e).rstrip()}")
26
+ return
27
+
28
+ self.logger.error(
29
+ f"Attempt {attempt}/{RETRY} failed ({str(e).rstrip()}), retrying in {DELAY} seconds"
30
+ )
31
+ time.sleep(DELAY)
32
+ attempt += 1
33
+ try:
34
+ if self._connection:
35
+ self._connection.rollback()
36
+ except Exception as e:
37
+ self.logger.error(f"Connection rollback failed {str(e).rstrip()}")
38
+ return
39
+
40
+ return wrapper
41
+
42
+
43
+ class DatabaseManager:
44
+ def __init__(self, db_configs, logger):
45
+ self.logger = logger
46
+ self._work_db_config = db_configs
47
+ self._connection = self.connect_working_db()
48
+
49
+ def connect_working_db(self):
50
+ """
51
+ Fonction de connexion à la BDD de travail
52
+
53
+ Parameters
54
+ ----------
55
+ config: dict
56
+ dictionnaire correspondant à la configuration décrite dans le fichier passé en argument
57
+ db_configs: dict
58
+ dictionnaire correspondant aux configurations des bdd
59
+ Returns
60
+ -------
61
+ connection: psycopg2.connection
62
+ connection à la bdd de travail
63
+
64
+ """
65
+ # Récupération des paramètres de la bdd
66
+ host = self._work_db_config.get("host")
67
+ dbname = self._work_db_config.get("database")
68
+ user = self._work_db_config.get("user")
69
+ password = self._work_db_config.get("password")
70
+ port = self._work_db_config.get("port")
71
+ connect_args = "host=%s dbname=%s user=%s password=%s port=%s" % (host, dbname, user, password, port)
72
+
73
+ self.logger.info("Connecting to work database")
74
+ connection = psycopg2.connect(connect_args)
75
+ connection.set_client_encoding("UTF8")
76
+
77
+ return connection
78
+
79
+ def disconnect_working_db(self):
80
+ """
81
+ Fonction de connexion à la BDD de travail
82
+
83
+ Parameters
84
+ ----------
85
+ connection: psycopg2.connection
86
+ connection à la bdd de travail
87
+ logger: logging.Logger
88
+ """
89
+ if self._connection:
90
+ self._connection.close()
91
+ self.logger.info("Connection to work database closed")
92
+
93
+ def ensure_connection(self):
94
+ """
95
+ Ensure the connection is alive; reconnect if needed.
96
+ """
97
+ try:
98
+ if self._connection is None or getattr(self._connection, "closed", 1) != 0:
99
+ self.logger.info("Connection is closed or missing; reconnecting")
100
+ self._connection = self.connect_working_db()
101
+ else:
102
+ with self._connection.cursor() as cur:
103
+ cur.execute("SELECT 1")
104
+ except Exception as e:
105
+ self.logger.error(
106
+ f"Something is wrong with the connection: {str(e).rstrip()}; reconnecting in {DELAY} seconds")
107
+ self.disconnect_working_db()
108
+ time.sleep(DELAY)
109
+ self._connection = self.connect_working_db()
110
+
111
+ def execute_select_query(self, cursor, query, show_duration):
112
+ if TIMEOUT:
113
+ cursor.execute("SET statement_timeout = %s", (1000 * TIMEOUT,)) # timeout in milliseconds
114
+
115
+ if show_duration:
116
+ self.logger.info("SQL: {}".format(query))
117
+ st_execute = time.time()
118
+ cursor.execute(query)
119
+ et_execute = time.time()
120
+ self.logger.info("Execution ended. Elapsed time : %s seconds." % (et_execute - st_execute))
121
+ else:
122
+ cursor.execute(query)
123
+
124
+ @database_retry_decorator
125
+ def execute_select_fetch_multiple(self, query, batchsize=1, show_duration=False):
126
+ with self._connection.cursor(cursor_factory=DictCursor) as cursor:
127
+ self.execute_select_query(cursor, query, show_duration)
128
+ rows = cursor.fetchmany(batchsize)
129
+ count = cursor.rowcount
130
+ while rows:
131
+ if batchsize == 1:
132
+ rows = rows.pop()
133
+ yield rows, count
134
+ rows = cursor.fetchmany(batchsize)
135
+ self._connection.commit()
136
+ return
137
+
138
+ # the method below should be used as a generator function otherwise use execute_update
139
+ @database_retry_decorator
140
+ def execute_update_query(self, query, params=None, isolation_level=None, show_duration=False):
141
+ if show_duration :
142
+ self.logger.info("SQL: {}".format(query))
143
+ st_execute = time.time()
144
+ with self._connection.cursor(cursor_factory=DictCursor) as cursor:
145
+ old_isolation_level = self._connection.isolation_level
146
+ if isolation_level is not None:
147
+ self._connection.set_isolation_level(isolation_level)
148
+ cursor.execute(query, params)
149
+ self._connection.commit()
150
+ if show_duration:
151
+ et_execute = time.time()
152
+ self.logger.info("Execution ended. Elapsed time : %s seconds." % (et_execute - st_execute))
153
+ self._connection.set_isolation_level(old_isolation_level)
154
+ yield # the decorator database_retry_decorator only supports generators
155
+ return
156
+
157
+ def execute_update(self, query, params=None, isolation_level=None):
158
+ next(self.execute_update_query(query, params=params, isolation_level=isolation_level), None)
159
+
160
+ def execute_select_fetch_one(self, query, show_duration=False):
161
+ gen = self.execute_select_fetch_multiple(query, 1, show_duration)
162
+ row, count = next(gen, (None, None))
163
+ return row, count
r2gg/_lua_builder.py CHANGED
@@ -172,9 +172,12 @@ def _build_process_way(costs_config, output_cost):
172
172
  process_way_string += "\n"
173
173
 
174
174
  # durée
175
- process_way_string += "\n -- durée\n"
176
- process_way_string += " result.duration = {}\n".format(compute_operations_string(output_cost["operations"]))
177
- process_way_string += "\n"
175
+ current_profile = output_cost["name"].split("_")[-1]
176
+ for output in costs_config["outputs"]:
177
+ if output["name"] == "cost_s_" + current_profile:
178
+ process_way_string += "\n -- durée\n"
179
+ process_way_string += " result.duration = {}\n".format(compute_operations_string(output["operations"]))
180
+ process_way_string += "\n"
178
181
 
179
182
  # gestion du sens direct
180
183
  process_way_string += " -- gestion du sens direct\n"
r2gg/_main.py CHANGED
@@ -1,26 +1,25 @@
1
1
  import json
2
2
  import multiprocessing
3
3
  import os
4
- import json
5
4
  import time
6
5
  from datetime import datetime
7
6
 
8
- import psycopg2
9
7
  # https://github.com/andialbrecht/sqlparse
10
8
  import sqlparse
11
9
 
10
+ from r2gg._database import DatabaseManager
11
+ from r2gg._file_copier import copy_file_locally
12
12
  from r2gg._lua_builder import build_lua
13
+ from r2gg._osm_to_pbf import osm_to_pbf
14
+ from r2gg._path_converter import convert_path
13
15
  from r2gg._pivot_to_osm import pivot_to_osm
14
16
  from r2gg._pivot_to_pgr import pivot_to_pgr
15
17
  from r2gg._read_config import config_from_path
16
18
  from r2gg._subprocess_execution import subprocess_execution
17
- from r2gg._path_converter import convert_path
18
- from r2gg._file_copier import copy_file_locally
19
19
  from r2gg._valhalla_lua_builder import build_valhalla_lua
20
- from r2gg._osm_to_pbf import osm_to_pbf
21
20
 
22
21
 
23
- def sql_convert(config, resource, db_configs, connection, logger):
22
+ def sql_convert(config, resource, db_configs, database: DatabaseManager, logger):
24
23
  """
25
24
  Fonction de conversion depuis la bdd source vers la bdd pivot
26
25
 
@@ -32,8 +31,8 @@ def sql_convert(config, resource, db_configs, connection, logger):
32
31
  dictionnaire correspondant à la resource décrite dans le fichier passé en argument
33
32
  db_configs: dict
34
33
  dictionnaire correspondant aux configurations des bdd
35
- connection: psycopg2.connection
36
- connection à la bdd de travail
34
+ database: r2gg.DatabaseManager
35
+ gestionnaire de connexion et d'exécution de la base de la bdd
37
36
  logger: logging.Logger
38
37
  """
39
38
 
@@ -57,7 +56,7 @@ def sql_convert(config, resource, db_configs, connection, logger):
57
56
  used_bases = []
58
57
 
59
58
  # Il y a potentiellement une conversion par source indiquée dans la ressource
60
- for source in resource[ 'sources' ]:
59
+ for source in resource['sources']:
61
60
 
62
61
  logger.info("Create pivot of source: " + source['id'])
63
62
 
@@ -77,12 +76,12 @@ def sql_convert(config, resource, db_configs, connection, logger):
77
76
  else:
78
77
  logger.info("Mapping not done")
79
78
 
80
- # Configuration de la bdd source
81
- source_db_config = db_configs[ source['mapping']['source']['baseId'] ]
79
+ #  Configuration de la bdd source
80
+ source_db_config = db_configs[source['mapping']['source']['baseId']]
82
81
  used_bases.append(source['mapping']['source']['baseId'])
83
82
 
84
83
  # Configuration de la bdd de travail utilisée pour ce pivot
85
- work_db_config = db_configs[ config['workingSpace']['baseId'] ]
84
+ work_db_config = db_configs[config['workingSpace']['baseId']]
86
85
 
87
86
  # Récupération de la bbox
88
87
  bbox = [float(coord) for coord in source["bbox"].split(",")]
@@ -94,9 +93,7 @@ def sql_convert(config, resource, db_configs, connection, logger):
94
93
  logger.info("Create source on bbox: " + source["bbox"])
95
94
 
96
95
  # Lancement du script SQL de conversion source --> pivot
97
- connection.autocommit = True
98
- with open( source['mapping']['conversion']['file'] ) as sql_script:
99
- cur = connection.cursor()
96
+ with open(source['mapping']['conversion']['file']) as sql_script:
100
97
  logger.info("Executing SQL conversion script")
101
98
  instructions = sqlparse.split(sql_script.read().format(user=work_db_config.get('user'),
102
99
  input_schema=source_db_config.get('schema'),
@@ -107,37 +104,43 @@ def sql_convert(config, resource, db_configs, connection, logger):
107
104
  for instruction in instructions:
108
105
  if instruction == '':
109
106
  continue
110
- logger.debug("SQL:\n{}\n".format(instruction) )
107
+ logger.debug("SQL:\n{}\n".format(instruction))
108
+ isolation_level = None
109
+ if instruction.strip().lower().startswith("vacuum"):
110
+ isolation_level = 0
111
111
  st_instruction = time.time()
112
- cur.execute(instruction,
113
- {
114
- 'bdpwd': source_db_config.get('password'), 'bdport': source_db_config.get('port'),
115
- 'bdhost': source_db_config.get('host'), 'bduser': source_db_config.get('user'),
116
- 'dbname': source_db_config.get('database'),
117
- 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax
118
- }
119
- )
112
+ database.execute_update(instruction,
113
+ {
114
+ 'bdpwd': source_db_config.get('password'),
115
+ 'bdport': source_db_config.get('port'),
116
+ 'bdhost': source_db_config.get('host'),
117
+ 'bduser': source_db_config.get('user'),
118
+ 'dbname': source_db_config.get('database'),
119
+ 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax
120
+ },
121
+ isolation_level=isolation_level
122
+ )
120
123
  et_instruction = time.time()
121
- logger.info("Execution ended. Elapsed time : %s seconds." %(et_instruction - st_instruction))
124
+ logger.info("Execution ended. Elapsed time : %s seconds." % (et_instruction - st_instruction))
122
125
 
123
126
  et_sql_conversion = time.time()
124
127
 
125
- logger.info("Conversion from BDD to pivot ended. Elapsed time : %s seconds." %(et_sql_conversion - st_sql_conversion))
128
+ logger.info(
129
+ "Conversion from BDD to pivot ended. Elapsed time : %s seconds." % (et_sql_conversion - st_sql_conversion))
130
+
126
131
 
127
- def pgr_convert(config, resource, db_configs, connection, logger):
132
+ def pgr_convert(resource, db_configs, database: DatabaseManager, logger):
128
133
  """
129
134
  Fonction de conversion depuis la bdd pivot vers la bdd pgrouting
130
135
 
131
136
  Parameters
132
137
  ----------
133
- config: dict
134
- dictionnaire correspondant à la configuration décrite dans le fichier passé en argument
135
138
  resource: dict
136
139
  dictionnaire correspondant à la resource décrite dans le fichier passé en argument
137
140
  db_configs: dict
138
141
  dictionnaire correspondant aux configurations des bdd
139
- connection: psycopg2.connection
140
- connection à la bdd de travail
142
+ database: r2gg.DatabaseManager
143
+ gestionnaire de connexion et d'exécution de la base de la bdd
141
144
  logger: logging.Logger
142
145
  """
143
146
 
@@ -150,19 +153,13 @@ def pgr_convert(config, resource, db_configs, connection, logger):
150
153
  i = 0
151
154
  for source in resource["sources"]:
152
155
 
153
- logger.info("Source {} of {}...".format(i+1, len(resource["sources"])))
156
+ logger.info("Source {} of {}...".format(i + 1, len(resource["sources"])))
154
157
  logger.info("Source id : " + source["id"])
155
158
 
156
159
  # Configuration et connection à la base de sortie
157
- out_db_config = db_configs[ source['storage']['base']['baseId'] ]
158
- host = out_db_config.get('host')
159
- dbname = out_db_config.get('database')
160
- user = out_db_config.get('user')
161
- password = out_db_config.get('password')
162
- port = out_db_config.get('port')
163
- connect_args = 'host=%s dbname=%s user=%s password=%s port=%s' %(host, dbname, user, password, port)
160
+ out_db_config = db_configs[source['storage']['base']['baseId']]
164
161
  logger.info("Connecting to output database")
165
- connection_out = psycopg2.connect(connect_args)
162
+ database_out = DatabaseManager(out_db_config, logger)
166
163
 
167
164
  schema_out = out_db_config.get('schema')
168
165
 
@@ -172,14 +169,14 @@ def pgr_convert(config, resource, db_configs, connection, logger):
172
169
  cost_calculation_files_paths = {cost["compute"]["configuration"]["storage"]["file"] for cost in source["costs"]}
173
170
 
174
171
  for cost_calculation_file_path in cost_calculation_files_paths:
175
- pivot_to_pgr(source, cost_calculation_file_path, connection, connection_out, schema_out, input_schema, logger)
176
- connection_out.close()
172
+ pivot_to_pgr(source, cost_calculation_file_path, database, database_out, schema_out, input_schema, logger)
173
+ database_out.disconnect_working_db()
177
174
 
178
175
  et_pivot_to_pgr = time.time()
179
- logger.info("Conversion from pivot to PGR ended. Elapsed time : %s seconds." %(et_pivot_to_pgr - st_pivot_to_pgr))
176
+ logger.info("Conversion from pivot to PGR ended. Elapsed time : %s seconds." % (et_pivot_to_pgr - st_pivot_to_pgr))
180
177
 
181
178
 
182
- def osm_convert(config, resource, db_configs, connection, logger):
179
+ def osm_convert(config, resource, db_configs, database: DatabaseManager, logger):
183
180
  """
184
181
  Fonction de conversion depuis la bdd pivot vers un fichier osm
185
182
 
@@ -191,8 +188,8 @@ def osm_convert(config, resource, db_configs, connection, logger):
191
188
  dictionnaire correspondant à la resource décrite dans le fichier passé en argument
192
189
  db_configs: dict
193
190
  dictionnaire correspondant aux configurations des bdd
194
- connection: psycopg2.connection
195
- connection à la bdd de travail
191
+ database: r2gg.DatabaseManager
192
+ gestionnaire de connexion et d'exécution de la base de la bdd
196
193
  logger: logging.Logger
197
194
  """
198
195
 
@@ -224,7 +221,7 @@ def osm_convert(config, resource, db_configs, connection, logger):
224
221
  # Plusieurs sources peuvent référencer le même mapping mais changer plus tard dans la génération
225
222
  found_base = False
226
223
  found_id = ''
227
- for sid,sub in used_bases.items():
224
+ for sid, sub in used_bases.items():
228
225
  if sub == source['mapping']['source']['baseId']:
229
226
  found_base = True
230
227
  found_id = sid
@@ -251,11 +248,12 @@ def osm_convert(config, resource, db_configs, connection, logger):
251
248
 
252
249
  else:
253
250
  logger.info("Mapping not already done")
254
- pivot_to_osm(config, source, db_configs, connection, logger, convert_osm_to_pbf)
251
+ pivot_to_osm(config, source, db_configs, database, logger, convert_osm_to_pbf)
252
+
253
+ used_bases[source['id']] = source['mapping']['source']['baseId']
255
254
 
256
- used_bases[ source['id'] ] = source['mapping']['source']['baseId']
257
255
 
258
- def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
256
+ def osrm_convert(config, resource, logger, build_lua_from_cost_config=True):
259
257
  """
260
258
  Fonction de conversion depuis le fichier osm vers les fichiers osrm
261
259
 
@@ -282,7 +280,7 @@ def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
282
280
  i = 0
283
281
  for source in resource["sources"]:
284
282
 
285
- logger.info("Source {} of {}...".format(i+1, len(resource["sources"])))
283
+ logger.info("Source {} of {}...".format(i + 1, len(resource["sources"])))
286
284
 
287
285
  logger.info('LUA part')
288
286
  lua_file = source["cost"]["compute"]["storage"]["file"]
@@ -293,7 +291,7 @@ def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
293
291
  costs_config = config_from_path(config_file)
294
292
  cost_name = source["cost"]["compute"]["configuration"]["name"]
295
293
 
296
- if cost_name not in [ output["name"] for output in costs_config["outputs"] ]:
294
+ if cost_name not in [output["name"] for output in costs_config["outputs"]]:
297
295
  raise ValueError("cost_name must be in cost configuration")
298
296
 
299
297
  with open(lua_file, "w") as lua_f:
@@ -332,15 +330,15 @@ def osrm_convert(config, resource, logger, build_lua_from_cost_config = True):
332
330
  start_command = time.time()
333
331
  subprocess_execution(osrm_extract_args, logger)
334
332
  end_command = time.time()
335
- logger.info("OSRM extract ended. Elapsed time : %s seconds." %(end_command - start_command))
333
+ logger.info("OSRM extract ended. Elapsed time : %s seconds." % (end_command - start_command))
336
334
  subprocess_execution(osrm_contract_args, logger)
337
335
  final_command = time.time()
338
- logger.info("OSRM contract ended. Elapsed time : %s seconds." %(final_command - end_command))
336
+ logger.info("OSRM contract ended. Elapsed time : %s seconds." % (final_command - end_command))
339
337
  subprocess_execution(rm_args, logger)
340
338
  i += 1
341
339
 
342
340
 
343
- def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True):
341
+ def valhalla_convert(config, resource, logger, build_lua_from_cost_config=True):
344
342
  """
345
343
  Fonction de conversion depuis le fichier .osm.pbf vers les fichiers valhalla
346
344
 
@@ -367,7 +365,7 @@ def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True
367
365
  i = 0
368
366
  for source in resource["sources"]:
369
367
 
370
- logger.info("Source {} of {}...".format(i+1, len(resource["sources"])))
368
+ logger.info("Source {} of {}...".format(i + 1, len(resource["sources"])))
371
369
 
372
370
  logger.info('Looking for OSM PBF file')
373
371
 
@@ -406,15 +404,15 @@ def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True
406
404
 
407
405
  start_command = time.time()
408
406
  valhalla_build_config_args = ["valhalla_build_config",
409
- "--mjolnir-tile-dir", source["storage"]["dir"],
410
- "--mjolnir-tile-extract", source["storage"]["tar"],
411
- # Modification des limites par défaut du service : 10h pour isochrone et 1000km pour iso distance
412
- # contre 2h et 200km par défaut
413
- "--service-limits-isochrone-max-time-contour", "600",
414
- "--service-limits-isochrone-max-distance-contour", "1000",
415
- # Ajout de l'autorisation à exclure les ponts/tunnels/péages
416
- "--service-limits-allow-hard-exclusions", "True"]
417
- subprocess_execution(valhalla_build_config_args, logger, outfile = source["storage"]["config"])
407
+ "--mjolnir-tile-dir", source["storage"]["dir"],
408
+ "--mjolnir-tile-extract", source["storage"]["tar"],
409
+ # Modification des limites par défaut du service : 10h pour isochrone et 1000km pour iso distance
410
+ # contre 2h et 200km par défaut
411
+ "--service-limits-isochrone-max-time-contour", "600",
412
+ "--service-limits-isochrone-max-distance-contour", "1000",
413
+ # Ajout de l'autorisation à exclure les ponts/tunnels/péages
414
+ "--service-limits-allow-hard-exclusions", "True"]
415
+ subprocess_execution(valhalla_build_config_args, logger, outfile=source["storage"]["config"])
418
416
  # Nécessaire le temps que le fichier s'écrive...
419
417
  time.sleep(1)
420
418
  # Ajout du graph custom dans la config valhalla (impossible via les paramètres du build_config)
@@ -432,10 +430,10 @@ def valhalla_convert(config, resource, logger, build_lua_from_cost_config = True
432
430
  subprocess_execution(valhalla_build_extract_args, logger)
433
431
 
434
432
  final_command = time.time()
435
- logger.info("Valhalla tiles built. Elapsed time : %s seconds." %(final_command - start_command))
433
+ logger.info("Valhalla tiles built. Elapsed time : %s seconds." % (final_command - start_command))
436
434
 
437
435
 
438
- def write_road2_config(config, resource, logger, convert_file_paths = True):
436
+ def write_road2_config(config, resource, logger, convert_file_paths=True):
439
437
  """
440
438
  Fonction pour l'écriture du fichier de ressource
441
439
 
@@ -456,7 +454,8 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
456
454
 
457
455
  for source in resource["sources"]:
458
456
 
459
- source_file = os.path.join(config["outputs"]["configurations"]["sources"]["storage"]["directory"], source['id'] + ".source")
457
+ source_file = os.path.join(config["outputs"]["configurations"]["sources"]["storage"]["directory"],
458
+ source['id'] + ".source")
460
459
  logger.info("Writing source file : " + source_file)
461
460
 
462
461
  # On modifie la source en fonction de son type
@@ -474,10 +473,11 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
474
473
  bid_tmp = source["storage"]["base"]["baseId"]
475
474
  for base in config["bases"]:
476
475
  if base["id"] == bid_tmp:
477
- db_file_out = convert_path(base["configFile"], config["outputs"]["configurations"]["databases"]["storage"]["directory"])
476
+ db_file_out = convert_path(base["configFile"],
477
+ config["outputs"]["configurations"]["databases"]["storage"]["directory"])
478
478
  copy_file_locally(base["configFile"], db_file_out)
479
- source["storage"]["base"].update({"dbConfig":db_file_out})
480
- source["storage"]["base"].update({"schema":base["schema"]})
479
+ source["storage"]["base"].update({"dbConfig": db_file_out})
480
+ source["storage"]["base"].update({"schema": base["schema"]})
481
481
  source["storage"]["base"].pop("baseId", None)
482
482
  for cost in source["costs"]:
483
483
  cost.pop("compute", None)
@@ -492,7 +492,8 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
492
492
  source_ids.append(source['id'])
493
493
 
494
494
  # On passe à la ressource
495
- resource_file = os.path.join(config["outputs"]["configurations"]["resource"]["storage"]["directory"], resource['id'] + ".resource")
495
+ resource_file = os.path.join(config["outputs"]["configurations"]["resource"]["storage"]["directory"],
496
+ resource['id'] + ".resource")
496
497
  logger.info("Writing resource file: " + resource_file)
497
498
 
498
499
  # Récupération de la date d'extraction
@@ -500,7 +501,7 @@ def write_road2_config(config, resource, logger, convert_file_paths = True):
500
501
  date_file = os.path.join(work_dir_config, "r2gg.date")
501
502
  f = open(date_file, "r")
502
503
  extraction_date = f.read()
503
- logger.info("extraction date to add in resource (from "+ date_file +"): " + extraction_date)
504
+ logger.info("extraction date to add in resource (from " + date_file + "): " + extraction_date)
504
505
  f.close()
505
506
 
506
507
  # On fait le dossier s'il n'existe pas