r2gg 3.1.4__py3-none-any.whl → 3.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
r2gg/__about__.py CHANGED
@@ -34,7 +34,7 @@ __uri_repository__ = "https://github.com/IGNF/route-graph-generator/"
34
34
  __uri_tracker__ = f"{__uri_repository__}issues/"
35
35
  __uri__ = __uri_repository__
36
36
 
37
- __version__ = "3.1.4"
37
+ __version__ = "3.1.6"
38
38
  __version_info__ = tuple(
39
39
  [
40
40
  int(num) if num.isdigit() else num
r2gg/_database.py CHANGED
@@ -23,7 +23,7 @@ def database_retry_decorator(func):
23
23
  except (OperationalError, DatabaseError, InterfaceError) as e:
24
24
  if attempt >= RETRY:
25
25
  self.logger.error(f"Query failed after {RETRY} attempts: {str(e).rstrip()}")
26
- return
26
+ raise e
27
27
 
28
28
  self.logger.error(
29
29
  f"Attempt {attempt}/{RETRY} failed ({str(e).rstrip()}), retrying in {DELAY} seconds"
@@ -108,49 +108,60 @@ class DatabaseManager:
108
108
  time.sleep(DELAY)
109
109
  self._connection = self.connect_working_db()
110
110
 
111
- def execute_select_query(self, cursor, query, show_duration):
112
- if TIMEOUT:
113
- cursor.execute("SET statement_timeout = %s", (1000 * TIMEOUT,)) # timeout in milliseconds
114
-
115
- if show_duration:
116
- self.logger.info("SQL: {}".format(query))
117
- st_execute = time.time()
118
- cursor.execute(query)
119
- et_execute = time.time()
120
- self.logger.info("Execution ended. Elapsed time : %s seconds." % (et_execute - st_execute))
121
- else:
122
- cursor.execute(query)
123
-
124
- @database_retry_decorator
111
+ # IMPORTANT:
112
+ # Streaming SELECTs must NOT use retry logic.
113
+ # If the connection drops, the cursor state is unrecoverable.
125
114
  def execute_select_fetch_multiple(self, query, batchsize=1, show_duration=False):
126
- with self._connection.cursor(cursor_factory=DictCursor) as cursor:
127
- self.execute_select_query(cursor, query, show_duration)
128
- rows = cursor.fetchmany(batchsize)
115
+ """
116
+ Streaming SELECT using a named server-side cursor.
117
+ No retry. No reconnect. No commit.
118
+ Fail fast if the connection drops (old behavior).
119
+ """
120
+ self.ensure_connection()
121
+ cursor_name = f"cursor_{int(time.time() * 1000)}"
122
+ with self._connection.cursor(cursor_factory=DictCursor, name=cursor_name) as cursor:
123
+ if TIMEOUT:
124
+ cursor.execute("SET statement_timeout = %s", (1000 * TIMEOUT,))
125
+ if show_duration:
126
+ self.logger.info(f"SQL: {query}")
127
+ st = time.time()
128
+ cursor.execute(query)
129
+ self.logger.info(
130
+ "Execution ended. Elapsed time : %s seconds.",
131
+ time.time() - st
132
+ )
133
+ else:
134
+ cursor.execute(query)
135
+
129
136
  count = cursor.rowcount
130
- while rows:
137
+
138
+ while True:
139
+ rows = cursor.fetchmany(batchsize)
140
+ if not rows:
141
+ break
131
142
  if batchsize == 1:
132
143
  rows = rows.pop()
133
144
  yield rows, count
134
- rows = cursor.fetchmany(batchsize)
135
- self._connection.commit()
136
- return
137
145
 
138
146
  # the method below should be used as a generator function otherwise use execute_update
139
147
  @database_retry_decorator
140
148
  def execute_update_query(self, query, params=None, isolation_level=None, show_duration=False):
149
+ self.ensure_connection()
141
150
  if show_duration :
142
151
  self.logger.info("SQL: {}".format(query))
143
152
  st_execute = time.time()
144
153
  with self._connection.cursor(cursor_factory=DictCursor) as cursor:
145
154
  old_isolation_level = self._connection.isolation_level
146
- if isolation_level is not None:
147
- self._connection.set_isolation_level(isolation_level)
148
- cursor.execute(query, params)
149
- self._connection.commit()
155
+ try:
156
+ if isolation_level is not None:
157
+ self._connection.set_isolation_level(isolation_level)
158
+ cursor.execute(query, params)
159
+ self._connection.commit()
160
+ finally:
161
+ self._connection.set_isolation_level(old_isolation_level)
150
162
  if show_duration:
151
163
  et_execute = time.time()
152
164
  self.logger.info("Execution ended. Elapsed time : %s seconds." % (et_execute - st_execute))
153
- self._connection.set_isolation_level(old_isolation_level)
154
165
  yield # the decorator database_retry_decorator only supports generators
155
166
  return
156
167
 
@@ -158,6 +169,9 @@ class DatabaseManager:
158
169
  next(self.execute_update_query(query, params=params, isolation_level=isolation_level), None)
159
170
 
160
171
  def execute_select_fetch_one(self, query, show_duration=False):
161
- gen = self.execute_select_fetch_multiple(query, 1, show_duration)
162
- row, count = next(gen, (None, None))
163
- return row, count
172
+ try:
173
+ gen = self.execute_select_fetch_multiple(query, 1, show_duration)
174
+ row, count = next(gen, (None, None))
175
+ return row, count
176
+ finally:
177
+ gen.close() # Ensure the generator is closed to free resources
r2gg/_pivot_to_osm.py CHANGED
@@ -81,10 +81,13 @@ def pivot_to_osm(config, source, db_configs, database: DatabaseManager, logger,
81
81
  sql_query_nodes += " LIMIT {} OFFSET {}".format(batchsize, offset)
82
82
  offset += batchsize
83
83
  logger.info("Writing nodes")
84
- for row, count in database.execute_select_fetch_multiple(sql_query_nodes, show_duration=True):
85
- nodeEl = writeNode(row, extraction_date)
86
- xf.write(nodeEl, pretty_print=True)
87
-
84
+ gen = database.execute_select_fetch_multiple(sql_query_nodes, show_duration=True)
85
+ try:
86
+ for row, count in gen:
87
+ nodeEl = writeNode(row, extraction_date)
88
+ xf.write(nodeEl, pretty_print=True)
89
+ finally:
90
+ gen.close()
88
91
  logger.info("%s / %s nodes ajoutés" % (offset, nodesize))
89
92
  et_nodes = time.time()
90
93
  logger.info("Writing nodes ended. Elapsed time : %s seconds." % (et_nodes - st_nodes))
@@ -124,16 +127,19 @@ def pivot_to_osm(config, source, db_configs, database: DatabaseManager, logger,
124
127
  logger.info("Writing restrictions")
125
128
  st_execute = time.time()
126
129
  i = 1
127
- for row, count in database.execute_select_fetch_multiple(sql_query_non_comm, show_duration=True):
128
- if row['common_vertex_id'] == -1:
130
+ gen = database.execute_select_fetch_multiple(sql_query_non_comm, show_duration=True)
131
+ try:
132
+ for row, count in gen:
133
+ if row['common_vertex_id'] == -1:
134
+ i += 1
135
+ continue
136
+ ResEl = writeRes(row, i, extraction_date)
137
+ xf.write(ResEl, pretty_print=True)
138
+ if count > 0 and (i % ceil(count / 10) == 0):
139
+ logger.info("%s / %s restrictions ajoutés" % (i, count))
129
140
  i += 1
130
- continue
131
- ResEl = writeRes(row, i, extraction_date)
132
- xf.write(ResEl, pretty_print=True)
133
- if (i % ceil(count / 10) == 0):
134
- logger.info("%s / %s restrictions ajoutés" % (i, count))
135
- i += 1
136
-
141
+ finally:
142
+ gen.close()
137
143
  et_execute = time.time()
138
144
  logger.info("Writing restrictions ended. Elapsed time : %s seconds." % (et_execute - st_execute))
139
145
 
r2gg/_pivot_to_pgr.py CHANGED
@@ -115,38 +115,41 @@ def pivot_to_pgr(source, cost_calculation_file_path, database_work: DatabaseMana
115
115
  index = 0
116
116
  batchsize = 10000
117
117
  generator = database_work.execute_select_fetch_multiple(tr_query, show_duration=True, batchsize=batchsize)
118
- rows, count = next(generator,(None, None))
119
- # Insertion petit à petit -> plus performant
120
-
121
- logger.info("SQL: Inserting or updating {} values in out db".format(count))
122
-
123
- st_execute = time.time()
124
-
125
- while rows:
126
- values_str = ""
127
- # Tuple des valuers à insérer
128
- values_tuple = ()
129
- for row in rows:
130
- values_str += "(%s, %s, %s),"
131
- values_tuple += (index, row['id_from'], row['id_to'])
132
- index += 1
133
- values_str = values_str[:-1]
134
-
135
- set_on_conflict = (
136
- "id_from = excluded.id_from,id_to = excluded.id_to"
137
- )
118
+ try:
119
+ rows, count = next(generator,(None, None))
120
+ # Insertion petit à petit -> plus performant
121
+
122
+ logger.info("SQL: Inserting or updating {} values in out db".format(count))
123
+
124
+ st_execute = time.time()
125
+
126
+ while rows:
127
+ values_str = ""
128
+ # Tuple des valuers à insérer
129
+ values_tuple = ()
130
+ for row in rows:
131
+ values_str += "(%s, %s, %s),"
132
+ values_tuple += (index, row['id_from'], row['id_to'])
133
+ index += 1
134
+ values_str = values_str[:-1]
135
+
136
+ set_on_conflict = (
137
+ "id_from = excluded.id_from,id_to = excluded.id_to"
138
+ )
138
139
 
139
- sql_insert = """
140
- INSERT INTO {}.turn_restrictions (id, id_from, id_to)
141
- VALUES {}
142
- ON CONFLICT (id) DO UPDATE
143
- SET {};
144
- """.format(schema, values_str, set_on_conflict)
145
- database_out.execute_update(sql_insert, values_tuple)
140
+ sql_insert = """
141
+ INSERT INTO {}.turn_restrictions (id, id_from, id_to)
142
+ VALUES {}
143
+ ON CONFLICT (id) DO UPDATE
144
+ SET {};
145
+ """.format(schema, values_str, set_on_conflict)
146
+ database_out.execute_update(sql_insert, values_tuple)
146
147
 
147
- rows, _ = next(generator,(None, None))
148
+ rows, _ = next(generator,(None, None))
148
149
 
149
- et_execute = time.time()
150
+ et_execute = time.time()
151
+ finally:
152
+ generator.close() # Ensure the generator is closed to free resources
150
153
  logger.info("Writing turn restrinctions Done. Elapsed time : %s seconds." %(et_execute - st_execute))
151
154
 
152
155
  # Noeuds ---------------------------------------------------------------------------------------
@@ -171,30 +174,32 @@ def pivot_to_pgr(source, cost_calculation_file_path, database_work: DatabaseMana
171
174
  index = 0
172
175
  batchsize = 10000
173
176
  generator = database_work.execute_select_fetch_multiple(nd_query, show_duration=True, batchsize=batchsize)
174
- rows, count = next(generator, (None, None))
175
- while rows:
176
- values_str = ""
177
- # Tuple des valeurs à insérer
178
- values_tuple = ()
179
- for row in rows:
180
- values_str += "(%s, %s),"
181
- values_tuple += (row['id'], row['geom'])
182
- index += 1
183
- values_str = values_str[:-1]
184
-
185
- set_on_conflict = (
186
- "the_geom = excluded.the_geom"
187
- )
188
-
189
- sql_insert = """
190
- INSERT INTO {}_vertices_pgr (id, the_geom)
191
- VALUES {}
192
- ON CONFLICT (id) DO UPDATE
193
- SET {};
194
- """.format(ways_table_name, values_str, set_on_conflict)
195
- database_out.execute_update(sql_insert, values_tuple)
196
- rows, _ = next(generator,(None, None))
177
+ try:
178
+ rows, count = next(generator, (None, None))
179
+ while rows:
180
+ values_str = ""
181
+ # Tuple des valeurs à insérer
182
+ values_tuple = ()
183
+ for row in rows:
184
+ values_str += "(%s, %s),"
185
+ values_tuple += (row['id'], row['geom'])
186
+ index += 1
187
+ values_str = values_str[:-1]
188
+
189
+ set_on_conflict = (
190
+ "the_geom = excluded.the_geom"
191
+ )
197
192
 
193
+ sql_insert = """
194
+ INSERT INTO {}_vertices_pgr (id, the_geom)
195
+ VALUES {}
196
+ ON CONFLICT (id) DO UPDATE
197
+ SET {};
198
+ """.format(ways_table_name, values_str, set_on_conflict)
199
+ database_out.execute_update(sql_insert, values_tuple)
200
+ rows, _ = next(generator,(None, None))
201
+ finally:
202
+ generator.close() # Ensure the generator is closed to free resources
198
203
 
199
204
  et_execute = time.time()
200
205
  logger.info("Writing vertices Done. Elapsed time : %s seconds." %(et_execute - st_execute))
@@ -266,45 +271,48 @@ def pivot_to_pgr(source, cost_calculation_file_path, database_work: DatabaseMana
266
271
  # logger.info("SQL: Inserting or updating {} values in out db".format(cursor_in.rowcount))
267
272
  st_execute = time.time()
268
273
  percent = 0
269
- rows, count = next(generator, (None, None))
270
- while rows:
271
- percent += 1000000 / count
272
- # Chaîne permettant l'insertion de valeurs via psycopg
273
- values_str = ""
274
- # Tuple des valuers à insérer
275
- values_tuple = ()
276
- for row in rows:
277
- values_str += "(" + single_value_str + "),"
278
- output_costs = output_costs_from_costs_config(costs, row)
279
- values_tuple += tuple(
280
- row[ output_columns_name ] for output_columns_name in output_columns_names
281
- ) + output_costs
282
- values_str = values_str[:-1]
283
-
284
- output_columns = "("
285
- for output_columns_name in output_columns_names:
286
- output_columns += output_columns_name + ','
287
- output_columns = output_columns[:-1]
288
-
289
- set_on_conflict = ''
290
- for output_columns_name in output_columns_names:
291
- set_on_conflict += "{0} = excluded.{0},".format(output_columns_name)
292
- set_on_conflict = set_on_conflict[:-1]
293
-
294
- for output in costs["outputs"]:
295
- output_columns += "," + output["name"] + ",reverse_" + output["name"]
296
- set_on_conflict += ",{0} = excluded.{0}".format(output["name"])
297
- set_on_conflict += ",{0} = excluded.{0}".format("reverse_" + output["name"])
298
-
299
- output_columns += ")"
300
- sql_insert = """
301
- INSERT INTO {} {}
302
- VALUES {}
303
- ON CONFLICT (id) DO UPDATE
304
- SET {};
305
- """.format(ways_table_name, output_columns, values_str, set_on_conflict)
306
- database_out.execute_update(sql_insert, values_tuple)
307
- rows, _ = next(generator,(None, None))
274
+ try:
275
+ rows, count = next(generator, (None, None))
276
+ while rows:
277
+ percent += 1000000 / count
278
+ # Chaîne permettant l'insertion de valeurs via psycopg
279
+ values_str = ""
280
+ # Tuple des valuers à insérer
281
+ values_tuple = ()
282
+ for row in rows:
283
+ values_str += "(" + single_value_str + "),"
284
+ output_costs = output_costs_from_costs_config(costs, row)
285
+ values_tuple += tuple(
286
+ row[ output_columns_name ] for output_columns_name in output_columns_names
287
+ ) + output_costs
288
+ values_str = values_str[:-1]
289
+
290
+ output_columns = "("
291
+ for output_columns_name in output_columns_names:
292
+ output_columns += output_columns_name + ','
293
+ output_columns = output_columns[:-1]
294
+
295
+ set_on_conflict = ''
296
+ for output_columns_name in output_columns_names:
297
+ set_on_conflict += "{0} = excluded.{0},".format(output_columns_name)
298
+ set_on_conflict = set_on_conflict[:-1]
299
+
300
+ for output in costs["outputs"]:
301
+ output_columns += "," + output["name"] + ",reverse_" + output["name"]
302
+ set_on_conflict += ",{0} = excluded.{0}".format(output["name"])
303
+ set_on_conflict += ",{0} = excluded.{0}".format("reverse_" + output["name"])
304
+
305
+ output_columns += ")"
306
+ sql_insert = """
307
+ INSERT INTO {} {}
308
+ VALUES {}
309
+ ON CONFLICT (id) DO UPDATE
310
+ SET {};
311
+ """.format(ways_table_name, output_columns, values_str, set_on_conflict)
312
+ database_out.execute_update(sql_insert, values_tuple)
313
+ rows, _ = next(generator,(None, None))
314
+ finally:
315
+ generator.close() # Ensure the generator is closed to free resources
308
316
 
309
317
  et_execute = time.time()
310
318
  logger.info("Writing ways ended. Elapsed time : %s seconds." %(et_execute - st_execute))
r2gg/cli.py CHANGED
@@ -13,20 +13,26 @@ from r2gg._database import DatabaseManager
13
13
  def sql2pivot():
14
14
  config, resource, db_configs, logger = configure()
15
15
  database = DatabaseManager(db_configs[config["workingSpace"]["baseId"]], logger)
16
- sql_convert(config, resource, db_configs, database, logger)
17
- database.disconnect_working_db()
16
+ try:
17
+ sql_convert(config, resource, db_configs, database, logger)
18
+ finally:
19
+ database.disconnect_working_db()
18
20
 
19
21
  def pivot2pgrouting():
20
22
  config, resource, db_configs, logger = configure()
21
23
  database = DatabaseManager(db_configs[config["workingSpace"]["baseId"]], logger)
22
- pgr_convert(resource, db_configs, database, logger)
23
- database.disconnect_working_db()
24
+ try:
25
+ pgr_convert(resource, db_configs, database, logger)
26
+ finally:
27
+ database.disconnect_working_db()
24
28
 
25
29
  def pivot2osm():
26
30
  config, resource, db_configs, logger = configure()
27
31
  database = DatabaseManager(db_configs[config["workingSpace"]["baseId"]], logger)
28
- osm_convert(config, resource, db_configs, database, logger)
29
- database.disconnect_working_db()
32
+ try:
33
+ osm_convert(config, resource, db_configs, database, logger)
34
+ finally:
35
+ database.disconnect_working_db()
30
36
 
31
37
  def osm2osrm():
32
38
  config, resource, _, logger = configure()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: r2gg
3
- Version: 3.1.4
3
+ Version: 3.1.6
4
4
  Summary: Route Graph Generator (r2gg) est un script Python qui permet la génération de graphes pour des moteurs de calcul d'itinéraire
5
5
  Home-page: https://github.com/IGNF/route-graph-generator/
6
6
  Author: IGNF
@@ -1,7 +1,7 @@
1
- r2gg/__about__.py,sha256=VjF-b03qrVBDGQ-KCnD7lEiODARNWwvwp8KVejqA1Cc,1343
1
+ r2gg/__about__.py,sha256=UNywC946fLmBM6E4ZaK0EwaEVTuS0iZofMNu7wYKC6I,1343
2
2
  r2gg/__init__.py,sha256=zERkbHrNMbNZleuOxsfA7afQExXxpP7-IWVP1IFzNNs,88
3
3
  r2gg/_configure.py,sha256=HGLvnHa_XUV_BGBL_Hn-llaRDH6JK2svdXK6vo61_GU,2405
4
- r2gg/_database.py,sha256=xXBkxGyjNRRJzIGUKDhB5OWq0G_FV6KlB5uHJnOXb_c,6306
4
+ r2gg/_database.py,sha256=Mz8zx2lVkChNoi4Nxxz7sQ59TiYzjKeSLhAGYiFMKDw,6733
5
5
  r2gg/_file_copier.py,sha256=NzDd6ZqxQsQ_EulXg6jZxECUMjlJ0VBtr40-c1Rde-w,578
6
6
  r2gg/_lua_builder.py,sha256=W00BWFkRoKSyBgcWCl00hxBNqvE3Yf7Aw6DeLLwfO5M,10420
7
7
  r2gg/_main.py,sha256=vxXsSaocfeQfMDvamiRCEJPESwygMs47LQU25gHpHSY,22368
@@ -9,16 +9,16 @@ r2gg/_osm_building.py,sha256=MFFY3EYOVO4rPsYk90Oda70KH1DKW2rlRj8XT4Vx_4U,4227
9
9
  r2gg/_osm_to_pbf.py,sha256=ZCGaqao4r2xkwIvKjQSOBd7TVXzO_KDAT2PptnCuAoY,1088
10
10
  r2gg/_output_costs_from_costs_config.py,sha256=g7Qy4tlUIOA7DJcwXW9ZxhcOJ5Srq1c8hufylI2e0m8,6208
11
11
  r2gg/_path_converter.py,sha256=CTtFHucxTSDB89TMbTO7Q0VqRCs2295GTA4_3cftUVc,340
12
- r2gg/_pivot_to_osm.py,sha256=hucBK63QJSkJxNv9bfcNPRs0Ky0vIolccELrGPq3a2s,6933
13
- r2gg/_pivot_to_pgr.py,sha256=39dLpl0_HG-0RLp4vlRqwhXBOzdkxaYhp3SdXxS869k,15469
12
+ r2gg/_pivot_to_osm.py,sha256=fCpimwu7c76IQKYWL9IdTmUv5stBtBBCsAWQTn4T8S0,7217
13
+ r2gg/_pivot_to_pgr.py,sha256=YhHJbqJOHMKeDjPKT0i6cgHSuMbLF_yAw6A1ZLHvIbE,16086
14
14
  r2gg/_read_config.py,sha256=VQ6d6Oi1aKHwwURA3KrbpWHK0zP18aJ98XQJiHZb8oI,721
15
15
  r2gg/_sql_building.py,sha256=DefYIerZ0k_yltgJFaSzTXAZPSSUH7WAxN3d5FceoWw,840
16
16
  r2gg/_subprocess_execution.py,sha256=ipsynsXohonN9YfmxX7DFTFCvJWsvrLDuA5Y-0QDUDM,1603
17
17
  r2gg/_valhalla_lua_builder.py,sha256=vNUnsu4yYQflnNv49-ElxrfIqa4b30vJP740ab7M7rY,13816
18
- r2gg/cli.py,sha256=jeMiLc1UVAhNFNe_p8nRnyF8V_7kliqIzJsLG1MKpIo,2174
19
- r2gg-3.1.4.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
20
- r2gg-3.1.4.dist-info/METADATA,sha256=3nyIXee9UgSuU5CvmAisLZekI0jBS9B0jVbjFRTUivU,5199
21
- r2gg-3.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
22
- r2gg-3.1.4.dist-info/entry_points.txt,sha256=Km7XbbcVI9jy1TQI0raQfRyKdNGAUIwVkkBG5YeQP4k,275
23
- r2gg-3.1.4.dist-info/top_level.txt,sha256=fj9IaWXORCdMRcnWWZ7LmvOG4W_sVH8J4BUWf1pM37c,5
24
- r2gg-3.1.4.dist-info/RECORD,,
18
+ r2gg/cli.py,sha256=UKzYyOeUNZOeP-mjMOZaNviBkOA4fCqhB1xYMKXCWrk,2264
19
+ r2gg-3.1.6.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
20
+ r2gg-3.1.6.dist-info/METADATA,sha256=jeSbamu53xn4vTMEhvJDGy7yviGYIzm7Nz3O6R50QYE,5199
21
+ r2gg-3.1.6.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
22
+ r2gg-3.1.6.dist-info/entry_points.txt,sha256=Km7XbbcVI9jy1TQI0raQfRyKdNGAUIwVkkBG5YeQP4k,275
23
+ r2gg-3.1.6.dist-info/top_level.txt,sha256=fj9IaWXORCdMRcnWWZ7LmvOG4W_sVH8J4BUWf1pM37c,5
24
+ r2gg-3.1.6.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5