berryworld 1.0.0.189823__py3-none-any.whl → 1.0.0.192686__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
berryworld/__init__.py CHANGED
@@ -23,4 +23,5 @@ from .vivantio import Vivantio
23
23
  from .teams_logging import TeamsLogging
24
24
  from .vivantio_logging import VivantioLogging
25
25
  from .snowflake_conn import SnowflakeConn
26
- from .logging import PythonLogs
26
+ from .logging import PythonLogs
27
+ from .sql_connenction import SQLConnection
@@ -0,0 +1,1014 @@
1
+ import os
2
+ import re
3
+ import math
4
+ import pyodbc
5
+ import traceback
6
+ import numpy as np
7
+ import pandas as pd
8
+ import sqlalchemy as sa
9
+ from urllib import parse
10
+ from numbers import Number
11
+
12
+
13
+ class SQLConnection:
14
+ """ Connect to Microsoft SQL """
15
+
16
+ def __init__(self, db_reference, server, master=False, trusted_certificate=True, azure=False, encrypt=True,
17
+ multi_db=False):
18
+ """ Initialize the class
19
+ It requires the
20
+ SQL-DBREFERENCE-DBNAME = 'db name'
21
+ SQL-DBREFERENCE-USERNAME = 'user'
22
+ SQL-DBREFERENCE-PASSW = 'password'
23
+ -----------------------------
24
+ db_reference = 'FruitFlow'
25
+ server = 'prod'
26
+ wincred = True
27
+ master = False
28
+
29
+ con_ = SQLConnection(db_reference, server, wincred, master)
30
+ -----------------------------
31
+ :param db_reference: Database reference to connect to
32
+ :param server: Server to connect to
33
+ :param master: Indicate whether the connection will be done to master or to a specific database
34
+ :param trusted_certificate: Indicate whether the connection will be done using the TrustServerCertificate
35
+ :param azure: Indicate whether the connection will be done to an Azure SQL database or to an on-premise SQL
36
+ :param encrypt: Indicate whether the connection will use SSL/TLS encryption
37
+ :param multi_db: Indicate whether the connection will be done to a specific database or to multiple databases
38
+ """
39
+ self.db_reference = db_reference
40
+ self.server = server
41
+ if self.server is None:
42
+ raise ValueError("Please provide a value for server parameter")
43
+
44
+ self.multi_db = multi_db
45
+ self.master = master
46
+ if trusted_certificate:
47
+ self.trusted_certificate = '&TrustServerCertificate=yes'
48
+ else:
49
+ self.trusted_certificate = ''
50
+
51
+ if encrypt:
52
+ self.encrypt = '&Encrypt=yes'
53
+ else:
54
+ self.encrypt = ''
55
+
56
+ drivers = [driver for driver in pyodbc.drivers() if (bool(re.search(r'\d', driver)))]
57
+ self.azure = azure
58
+ self.creds = {}
59
+ try:
60
+ self.server_name, self.db_name, self.user_name, self.password = self.credentials()
61
+
62
+ except Exception as e:
63
+ print(f'Cannot find a reference to {self.db_reference} - Error: {str(e)}')
64
+
65
+ self.con = None
66
+ self.engine = None
67
+ self.con_string = None
68
+
69
+ driver_attempt = ''
70
+ for driver in drivers:
71
+ try:
72
+ self.driver = driver
73
+ self.query('''SELECT TOP 1 * FROM information_schema.tables;''')
74
+ break
75
+ except Exception as e:
76
+ print(e)
77
+ driver_attempt = str(e)
78
+
79
+ if driver_attempt != '':
80
+ raise ValueError(
81
+ f"Cannot connect to db: {self.db_name} - Error: {str(driver_attempt)}")
82
+
83
+ def credentials(self):
84
+ """ Return the credentials used to connect to the SQL Server
85
+ :return: Dictionary with the credentials used to connect to the SQL Server
86
+ """
87
+ if self.azure:
88
+ server_name = os.environ.get(f"SQL-{self.server.upper()}")
89
+ else:
90
+ server_name = os.environ.get(f"SQL-ONPREM-{self.server.upper()}")
91
+
92
+ if os.environ.get("SQL-" + self.db_reference.upper() + '-DBNAME-' + self.server.upper()) is not None:
93
+ db_name = os.environ.get("SQL-" + self.db_reference.upper() + '-DBNAME-' + self.server.upper())
94
+ else:
95
+ db_name = os.environ.get("SQL-" + self.db_reference.upper() + '-DBNAME')
96
+
97
+ user_name = os.environ.get("SQL-" + self.db_reference.upper() + '-USERNAME')
98
+ password = os.environ.get("SQL-" + self.db_reference.upper() + '-PASSWORD')
99
+
100
+ return re.sub(r'(\\)\1*', r'\1', server_name), db_name, user_name, password
101
+
102
+ def open_read_connection(self, commit_as_transaction=True):
103
+ """ Open a reading connection with the Server
104
+ :param commit_as_transaction: Indicate whether the connection will be done using the autocommit option or not
105
+ :return: The opened connection
106
+ """
107
+ database = self.db_name
108
+ if self.multi_db:
109
+ database = str(self.db_name).lower().replace('primary;', '')
110
+
111
+ if self.master:
112
+ self.con_string = ('mssql+pyodbc://' + self.user_name + ':%s@' + self.server + '/master' +
113
+ '?driver=' + self.driver + '&trusted_connection=yes' + self.trusted_certificate +
114
+ self.encrypt)
115
+ self.engine = sa.create_engine(self.con_string % parse.quote_plus(self.password))
116
+ else:
117
+ self.con_string = ('mssql+pyodbc://' + self.user_name + ':%s@' + self.server + '/' + database +
118
+ '?driver=' + self.driver + self.trusted_certificate + self.encrypt)
119
+ self.engine = sa.create_engine(self.con_string % parse.quote_plus(self.password))
120
+ if not commit_as_transaction:
121
+ self.engine = self.engine.execution_options(isolation_level="AUTOCOMMIT")
122
+ self.con = self.engine.connect().connection
123
+
124
+ def open_write_connection(self, commit_as_transaction=True):
125
+ """ Open a writing connection with the Server
126
+ :param commit_as_transaction: Indicate whether the connection will be done using the autocommit option or not
127
+ :return: The opened connection
128
+ """
129
+ constring = ('mssql+pyodbc://' + self.user_name + ':%s@' + self.server + '/' + self.db_name +
130
+ '?driver=' + self.driver + self.trusted_certificate + self.encrypt)
131
+ self.engine = sa.create_engine(constring % parse.quote_plus(self.password))
132
+ if not commit_as_transaction:
133
+ self.engine = self.engine.execution_options(isolation_level="AUTOCOMMIT")
134
+
135
+ self.con = self.engine.connect().connection
136
+
137
+ def close_connection(self):
138
+ """ Close any opened connections with the Server
139
+ :return: None
140
+ """
141
+ self.con.close()
142
+ if self.engine:
143
+ self.engine.dispose()
144
+
145
+ @staticmethod
146
+ def _chunker(seq, size):
147
+ """ Split the data set in chunks to be sent to SQL
148
+ :param seq: Sequence of records to be split
149
+ :param size: Size of any of the chunks to split the data
150
+ :return: The DataFrame divided in chunks
151
+ """
152
+ return (seq[pos:pos + size] for pos in range(0, len(seq), size))
153
+
154
+ def query(self, sql_query, coerce_float=False):
155
+ """ Read data from SQL according to the sql_query
156
+ -----------------------------
157
+ query_str = "SELECT * FROM %s" & table
158
+ con_.query(query_str)
159
+ -----------------------------
160
+ :param sql_query: Query to be sent to SQL
161
+ :param coerce_float: Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal)
162
+ to floating point.
163
+ :return: DataFrame gathering the requested data
164
+ """
165
+ self.open_read_connection()
166
+ data = None
167
+ try:
168
+ with self.engine.begin() as conn:
169
+ data = pd.read_sql_query(sa.text(sql_query), conn, coerce_float=coerce_float)
170
+ except ValueError:
171
+ print(traceback.format_exc())
172
+ finally:
173
+ self.close_connection()
174
+ return data
175
+
176
+ @staticmethod
177
+ def _parse_df(parse_, data, col_names):
178
+ """ Auxiliar function to convert list to DataFrame
179
+ :param parse_: Parameter to indicate whether the data has to be transformed into a DataFrame or not
180
+ :param data: List gathering the data retrieved from SQL
181
+ :param col_names: List of columns to create the DataFrame
182
+ :return: Formatted data
183
+ """
184
+ if parse_ is True:
185
+ col_names = list(zip(*list(col_names)))[0]
186
+ res = pd.DataFrame(list(zip(*data)), index=col_names).T
187
+ else:
188
+ res = [col_names, data]
189
+ return res
190
+
191
+ def sp_results(self, sql_query, resp_number=None, parse_=True, commit_as_transaction=True, no_count=True):
192
+ """ Execute a stored procedure and retrieves all its output data
193
+ -----------------------------
194
+ query_str = "EXECUTE %s" & stored_procedure
195
+ con_.sp_results(query_str, resp_number=1)
196
+ -----------------------------
197
+ :param sql_query: Query to be sent to SQL
198
+ :param resp_number: Indicate which of the stored procedures responses will be retrieved
199
+ :param parse_: Indicate whether the output needs to be converted to a DataFrame or not
200
+ :param commit_as_transaction: Indicate whether the connection will be done using the autocommit option or not
201
+ :param no_count: Indicate whether SET NOCOUNT option is ON (True) or OFF (False)
202
+ :return: DataFrame list gathering the requested data
203
+ """
204
+ self.open_read_connection(commit_as_transaction)
205
+ data_list = list()
206
+ cursor = None
207
+ try:
208
+ cursor = self.con.cursor()
209
+ if no_count:
210
+ cursor.execute("SET NOCOUNT ON;" + sql_query)
211
+ else:
212
+ cursor.execute(sql_query)
213
+ if resp_number is not None:
214
+ for cursor_number in range(resp_number - 1):
215
+ cursor.nextset()
216
+ try:
217
+ data_list.append(self._parse_df(parse_, cursor.fetchall(), cursor.description))
218
+ except ValueError:
219
+ raise ValueError('Please indicate a valid resp_number')
220
+ else:
221
+ aux_cursor = True
222
+ count = 0
223
+ while aux_cursor is not False and count < 100:
224
+ try:
225
+ data_list.append(self._parse_df(parse_, cursor.fetchall(), cursor.description))
226
+ aux_cursor = cursor.nextset()
227
+ except Exception as e:
228
+ print(e)
229
+ cursor.nextset()
230
+ finally:
231
+ count += 1
232
+ if count >= 100:
233
+ raise RuntimeError("Method sp_results has loop over 100 times for database '%s' on server '%s'"
234
+ % (self.db_name, self.server))
235
+ self.con.commit()
236
+ except ValueError:
237
+ print(traceback.format_exc())
238
+ finally:
239
+ if cursor:
240
+ cursor.close()
241
+ self.close_connection()
242
+ return data_list
243
+
244
+ def run_statement(self, sql_statement, commit_as_transaction=True):
245
+ """ Execute SQL statement
246
+ -----------------------------
247
+ query_str = "DELETE FROM %s WHERE Id > 100" & table
248
+ con_.run_statement(query_str)
249
+ -----------------------------
250
+ :param sql_statement: Statement as string to be run in SQL
251
+ :param commit_as_transaction: Indicate whether the connection will be done using the autocommit option or not
252
+ :return: Statement result
253
+ """
254
+ self.open_write_connection(commit_as_transaction)
255
+ cursor = self.con.cursor()
256
+ # Execute SQL statement
257
+ try:
258
+ cursor.execute(sql_statement)
259
+ self.con.commit()
260
+ except Exception:
261
+ raise Exception(traceback.format_exc())
262
+ finally:
263
+ if cursor:
264
+ cursor.close()
265
+ self.close_connection()
266
+
267
+ def insert(self, data, schema, table, truncate=False, delete=False, identity=False, chunk=1000, print_sql=False,
268
+ commit_all_together=False, output=None, bools2bits=True, nullable=False, commit_as_transaction=True,
269
+ infer_datetime_format=None):
270
+ """ Insert data in a table in SQL truncating the table if needed
271
+ -----------------------------
272
+ df = pd.DataFrame({'col1': ['a', 'b'], 'col2': [1, 2]})
273
+ con_.insert(df, table_schema, table_name)
274
+ -----------------------------
275
+ :param data: DataFrame containing the data to upload
276
+ :param schema: Schema of the table in which the data will be uploaded
277
+ :param table: Table in which the data will be uploaded
278
+ :param truncate: Indicate whether the table has to be truncated before the data is sent or not
279
+ :param delete: Delete the rows from a table (Suitable for tables that cannot be truncated because of
280
+ external constraints)
281
+ :param identity: Indicate whether the identity columns will be inserted or not
282
+ :param chunk: Indicate how many rows will be uploaded at once
283
+ :param print_sql: boolean to indicate that you want the sql_statement to be printed on the console
284
+ :param commit_all_together: when it is true, it only commits data if all data has been inserted. When it is
285
+ false, it commits data by chunks.
286
+ :param output: Outputs the columns indicated in this list
287
+ :param bools2bits: Indicate whether the Boolean columns should be converted to BIT to be inserted into SQL
288
+ :return: A DataFrame with the output columns requested if output is not None, else None
289
+ :param nullable: Used within bools2bits function to indicate which boolean column values to convert
290
+ :param commit_as_transaction: Indicate whether the connection will be done using the autocommit option or not
291
+ :param infer_datetime_format: Indicate whether the datetime columns should be converted to string and if so,
292
+ then the format to be used
293
+ """
294
+ if output is None:
295
+ output = []
296
+ if data is None:
297
+ # no data to upload
298
+ return ValueError("The data provided is invalid!")
299
+ cursor = None
300
+ self.open_write_connection(commit_as_transaction)
301
+ results = pd.DataFrame(columns=output)
302
+
303
+ # Mapping the date datatype columns for SQL
304
+ data = self.date_mapping_data_types(data)
305
+
306
+ # Infer datetime format if provided
307
+ if infer_datetime_format is not None:
308
+ data = self.infer_datetime(data, infer_datetime_format)
309
+
310
+ # Mapping the boolean columns to bit
311
+ if bools2bits:
312
+ data = self.boolean_mapping_data_types(data, nullable)
313
+
314
+ try:
315
+ cursor = self.con.cursor()
316
+ # Truncate table if needed
317
+ if truncate:
318
+ cursor.execute("TRUNCATE TABLE [%s].[%s]" % (schema, table))
319
+ # Delete all records from the table if needed
320
+ if delete:
321
+ cursor.execute("DELETE FROM [%s].[%s]" % (schema, table))
322
+ # Allow to insert to an Identity column
323
+ if identity:
324
+ cursor.execute("SET IDENTITY_INSERT [%s].[%s] ON" % (schema, table))
325
+ # Convert category columns to string
326
+ cat_cols = data.columns[(data.dtypes == 'category').values].to_list()
327
+ data[cat_cols] = data[cat_cols].astype(str)
328
+ # Deal with bull values and apostrophes (')
329
+ data = data.replace("'NULL'", "NULL")
330
+ data = data.replace("'", "~~", regex=True)
331
+ data = data.fillna("null")
332
+ # Insert data into the table destination
333
+ records = [tuple(x) for x in data.values]
334
+ insert_ = """INSERT INTO [%s].[%s] """ % (schema, table)
335
+ insert_ += str(tuple(data.columns.values)).replace("(\'", "([").replace('\', \'', '], [').replace('\')',
336
+ '])')
337
+ if len(output) > 0:
338
+ insert_ += " OUTPUT Inserted.[" + "], Inserted.[".join(output) + "] "
339
+ insert_ += """ VALUES """
340
+
341
+ for batch in self._chunker(records, chunk):
342
+ rows = str(batch).strip('[]').replace("~~", "''")
343
+ rows = rows.replace("'NULL'", "NULL").replace("'null'", 'null')
344
+ string = insert_ + rows
345
+ string = self.convert_decimal_str(string)
346
+ if print_sql:
347
+ print(string)
348
+ cursor.execute(string)
349
+ if len(output) > 0:
350
+ results = pd.concat([results, pd.DataFrame.from_records(cursor.fetchall(), columns=output)])
351
+ if ~commit_all_together:
352
+ self.con.commit()
353
+ if commit_all_together:
354
+ self.con.commit()
355
+
356
+ # Restrict to insert to an Identity column
357
+ if identity:
358
+ cursor.execute("SET IDENTITY_INSERT [%s].[%s] OFF" % (schema, table))
359
+
360
+ if len(output) > 0:
361
+ return results.reset_index(drop=True)
362
+
363
+ except Exception:
364
+ raise Exception(traceback.format_exc())
365
+
366
+ finally:
367
+ if cursor:
368
+ cursor.close()
369
+ self.close_connection()
370
+
371
+ def insert_at_once(self, data, schema, table, truncate=False, delete=False, identity=False, chunk=1,
372
+ print_sql=False, output=None, bools2bits=True, nullable=False, commit_as_transaction=True):
373
+ """ Build all the insert statements and commit them all at once
374
+ -----------------------------
375
+ df = pd.DataFrame({'col1': ['a', 'b'], 'col2': [1, 2]})
376
+ con_.insert(df, table_schema, table_name)
377
+ -----------------------------
378
+ :param data: DataFrame containing the data to upload
379
+ :param schema: Schema of the table in which the data will be uploaded
380
+ :param table: Table in which the data will be uploaded
381
+ :param truncate: Indicate whether the table has to be truncated before the data is sent or not
382
+ :param delete: Delete the rows from a table (Suitable for tables that cannot be truncated because of
383
+ external constraints)
384
+ :param identity: Indicate whether the identity columns will be inserted or not
385
+ :param chunk: Indicate how many rows will be uploaded at once
386
+ :param print_sql: boolean to indicate that you want the sql_statement to be printed on the console
387
+ :param output: Outputs the columns indicated in this list
388
+ :param bools2bits: Indicate whether the Boolean columns should be converted to BIT to be inserted into SQL
389
+ :param nullable: Used within bools2bits function to indicate which boolean column values to convert
390
+ :param commit_as_transaction: Indicate whether the connection will be done using the autocommit option or not
391
+ :return: A DataFrame with the output columns requested if output is not None, else None
392
+ """
393
+ if output is None:
394
+ output = []
395
+ if data is None:
396
+ # no data to upload
397
+ return ValueError("The data provided is invalid!")
398
+ cursor = None
399
+ self.open_write_connection(commit_as_transaction)
400
+ results = pd.DataFrame(columns=output)
401
+
402
+ # Mapping the date datatype columns for SQL
403
+ data = self.date_mapping_data_types(data)
404
+
405
+ # Mapping the boolean columns to bit
406
+ if bools2bits:
407
+ data = self.boolean_mapping_data_types(data, nullable)
408
+
409
+ try:
410
+ cursor = self.con.cursor()
411
+ # Truncate table if needed
412
+ if truncate:
413
+ cursor.execute("TRUNCATE TABLE [%s].[%s]" % (schema, table))
414
+ # Delete all records from the table if needed
415
+ if delete:
416
+ cursor.execute("DELETE FROM [%s].[%s]" % (schema, table))
417
+ # Allow to insert to an Identity column
418
+ if identity:
419
+ cursor.execute("SET IDENTITY_INSERT [%s].[%s] ON" % (schema, table))
420
+ # Convert category columns to string
421
+ cat_cols = data.columns[(data.dtypes == 'category').values].to_list()
422
+ data[cat_cols] = data[cat_cols].astype(str)
423
+ # Deal with bull values and apostrophes (')
424
+ data = data.replace("'NULL'", "NULL")
425
+ data = data.replace("'", "~~", regex=True)
426
+ data = data.fillna("null")
427
+ # Insert data into the table destination
428
+ records = [tuple(x) for x in data.values]
429
+ insert_ = """INSERT INTO [%s].[%s] """ % (schema, table)
430
+ insert_ += str(tuple(data.columns.values)).replace("(\'", "([").replace('\', \'', '], [').replace('\')',
431
+ '])')
432
+ if len(output) > 0:
433
+ insert_ += " OUTPUT Inserted.[" + "], Inserted.[".join(output) + "] "
434
+ insert_ += """ VALUES """
435
+
436
+ insert_statements = list()
437
+ for batch in self._chunker(records, chunk):
438
+ rows = str(batch).strip('[]').replace("~~", "''")
439
+ rows = rows.replace("'NULL'", "NULL").replace("'null'", 'null')
440
+ string = insert_ + rows
441
+ string = self.convert_decimal_str(string)
442
+ insert_statements.append(string)
443
+
444
+ if print_sql:
445
+ print(';'.join(insert_statements))
446
+ cursor.execute(';'.join(insert_statements))
447
+ if len(output) > 0:
448
+ results = pd.concat([results, pd.DataFrame.from_records(cursor.fetchall(), columns=output)])
449
+ self.con.commit()
450
+
451
+ # Restrict to insert to an Identity column
452
+ if identity:
453
+ cursor.execute("SET IDENTITY_INSERT [%s].[%s] OFF" % (schema, table))
454
+
455
+ if len(output) > 0:
456
+ return results.reset_index(drop=True)
457
+
458
+ except Exception:
459
+ raise Exception(traceback.format_exc())
460
+
461
+ finally:
462
+ if cursor:
463
+ cursor.close()
464
+ self.close_connection()
465
+
466
+ def update(self, data, update_list, on_list, schema, table, bool_cols=None, print_sql=False, batch_size=100,
467
+ output=None, nullable=True, commit_as_transaction=True):
468
+ """ This method updates a table in batches in sql server.
469
+ -----------------------------
470
+ UPDATE [SCHEMA].[TABLE]
471
+ SET update_list[0] = data[index, update_list{0}],
472
+ update_list[1] = data[index, update_list[1]]
473
+ OUTPUT output[0], output[1]
474
+ WHERE on_list[0] = data[index, on_list[0]]
475
+ AND on_list[1] = data[index, on_list[1]]
476
+ -----------------------------
477
+ :param data: DataFrame containing the data to update
478
+ :param update_list: list of columns to update
479
+ :param on_list: list of columns to apply the on clause
480
+ :param schema: Schema of the table in which the data will be uploaded
481
+ :param table: Table in which the data will be uploaded
482
+ :param bool_cols: list of columns gathering boolean types
483
+ :param print_sql: boolean to indicate that you want the sql_statement to be printed on the console
484
+ :param bool_cols: columns to include as booleans
485
+ :param batch_size: Number of records to update in each iteration
486
+ :param output: Outputs the columns indicated in this list as a DataFrame. It should indicate if the column to
487
+ retrieve is the inserted one or the deleted one (If nothing is indicated, then the Deleted one will be
488
+ retrieved)
489
+ :param nullable: Indicate whether to update the table column with null or exclude the reference from the update
490
+ :param commit_as_transaction: Indicate whether the connection will be done using the autocommit option or not
491
+ :return: None
492
+ """
493
+ cursor = None
494
+ if data is None:
495
+ # no data to update
496
+ return ValueError("The data provided is invalid!")
497
+
498
+ if output is None:
499
+ output = []
500
+ else:
501
+ output = [out if 'inserted' in out.lower() or 'deleted' in out.lower() else 'Deleted.[' + out + ']' for out
502
+ in output]
503
+ results = pd.DataFrame(columns=output)
504
+
505
+ # re-starting indexes
506
+ data.reset_index(drop=True, inplace=True)
507
+
508
+ # Mapping boolean columns
509
+ if bool_cols is not None:
510
+ for col in bool_cols:
511
+ data[col] = data[col].astype(bool)
512
+
513
+ # Mapping date type for SQL
514
+ data = self.date_mapping_data_types(data)
515
+
516
+ # create connection
517
+ self.open_write_connection(commit_as_transaction)
518
+
519
+ try:
520
+ # initialise cursor
521
+ cursor = self.con.cursor()
522
+
523
+ # extraction of the useful columns
524
+ data_update = data[list(set(update_list + on_list))]
525
+
526
+ # initialisation of the sql statement
527
+ sql_start = ''' UPDATE [%s].[%s] SET ''' % (schema, table)
528
+ iter_batch = math.ceil(data_update.shape[0] / batch_size)
529
+ for batch in range(iter_batch):
530
+ batch_update = data_update.iloc[batch * batch_size: (batch + 1) * batch_size]
531
+
532
+ sql_statement = ''
533
+ for iindex in batch_update.index:
534
+ # UPDATE [SCHEMA].[TABLE]
535
+ sql_statement += sql_start
536
+
537
+ # VALUES
538
+ for col in update_list:
539
+ if nullable:
540
+ if pd.isna(batch_update.loc[iindex, col]):
541
+ sql_statement += " [%s] = NULL ," % col
542
+ elif isinstance(batch_update.loc[iindex, col], bool):
543
+ sql_statement += " [%s] = %s ," % (col, int(batch_update.loc[iindex, col]))
544
+ elif isinstance(batch_update.loc[iindex, col], Number):
545
+ sql_statement += " [%s] = %s ," % (col, batch_update.loc[iindex, col])
546
+ else:
547
+ sql_statement += " [%s] = '%s' ," % (col, batch_update.loc[iindex, col])
548
+ else:
549
+ if pd.notna(batch_update.loc[iindex, col]):
550
+ if str(batch_update.loc[iindex, col]).upper() == 'NULL':
551
+ continue
552
+ elif isinstance(batch_update.loc[iindex, col], bool):
553
+ sql_statement += " [%s] = %s ," % (col, int(batch_update.loc[iindex, col]))
554
+ elif isinstance(batch_update.loc[iindex, col], Number):
555
+ sql_statement += " [%s] = %s ," % (col, batch_update.loc[iindex, col])
556
+ else:
557
+ sql_statement += " [%s] = '%s' ," % (col, batch_update.loc[iindex, col])
558
+
559
+ # OUTPUT
560
+ if len(output) > 0:
561
+ sql_statement = sql_statement[:-1] + " OUTPUT " + ",".join(output) + ' '
562
+
563
+ # WHERE
564
+ sql_statement = sql_statement[:-1] + ' WHERE '
565
+ for col in on_list:
566
+ if pd.isna(batch_update.loc[iindex, col]):
567
+ sql_statement += " [%s] = NULL AND" % col
568
+ elif isinstance(batch_update.loc[iindex, col], bool):
569
+ sql_statement += " [%s] = %s ," % (col, int(batch_update.loc[iindex, col]))
570
+ elif isinstance(batch_update.loc[iindex, col], Number):
571
+ sql_statement += " [%s] = %s AND" % (col, batch_update.loc[iindex, col])
572
+ else:
573
+ sql_statement += " [%s] = '%s' AND" % (col, batch_update.loc[iindex, col])
574
+
575
+ # Addition of semicolon
576
+ sql_statement = sql_statement[:-3] + ';'
577
+
578
+ if print_sql:
579
+ print(sql_statement)
580
+
581
+ # executing statement
582
+ if len(sql_statement) > 0:
583
+ if len(output) > 0:
584
+ cursor.execute(sql_statement)
585
+ for cursor_number in range(len(sql_statement.split(';')) - 1):
586
+ results = pd.concat([results, pd.DataFrame.from_records(cursor.fetchall(), columns=output)])
587
+ cursor.nextset()
588
+ else:
589
+ cursor.execute(sql_statement)
590
+ self.con.commit()
591
+
592
+ if len(output) > 0:
593
+ return results.reset_index(drop=True)
594
+
595
+ except Exception:
596
+ raise Exception(traceback.format_exc())
597
+
598
+ finally:
599
+ if cursor:
600
+ cursor.close()
601
+ self.close_connection()
602
+
603
+ def bulk_update(self, data, update_list, on_list, schema, table, bool_cols=None, print_sql=False, output=None,
604
+ chunk=1000, commit_as_transaction=True):
605
+ """ This method updates a table in batches in sql server.
606
+ -----------------------------
607
+ UPDATE [SCHEMA].[TABLE]
608
+ SET update_list[0] = data[index, update_list{0}],
609
+ update_list[1] = data[index, update_list[1]]
610
+ OUTPUT output[0], output[1]
611
+ WHERE on_list[0] = data[index, on_list[0]]
612
+ AND on_list[1] = data[index, on_list[1]]
613
+ -----------------------------
614
+ :param data: DataFrame containing the data to update
615
+ :param update_list: list of columns to update
616
+ :param on_list: list of columns to apply the on clause
617
+ :param schema: Schema of the table in which the data will be uploaded
618
+ :param table: Table in which the data will be uploaded
619
+ :param bool_cols: list of columns gathering boolean types
620
+ :param print_sql: boolean to indicate that you want the sql_statement to be printed on the console
621
+ :param bool_cols: columns to include as booleans
622
+ :param output: Outputs the columns indicated in this list as a DataFrame. It should indicate if the column to
623
+ retrieve is the inserted one or the deleted one (If nothing is indicated, then the Deleted one will be
624
+ retrieved)
625
+ :param chunk: Indicate how many rows will be uploaded at once
626
+ :param commit_as_transaction: Indicate whether the connection will be done using the autocommit option or not
627
+ :return: None
628
+ """
629
+ cursor = None
630
+ if data is None:
631
+ # no data to update
632
+ return ValueError("The data provided is invalid!")
633
+
634
+ if output is None:
635
+ output = []
636
+ sql_output = []
637
+ else:
638
+ sql_output = [out if 'inserted' in out.lower() or 'deleted' in out.lower() else 'Deleted.[' + out + ']' for
639
+ out
640
+ in output]
641
+ results = pd.DataFrame(columns=output)
642
+
643
+ # re-starting indexes
644
+ data.reset_index(drop=True, inplace=True)
645
+
646
+ # Mapping boolean columns
647
+ if bool_cols is not None:
648
+ for col in bool_cols:
649
+ data[col] = data[col].astype(bool)
650
+
651
+ # Mapping date type for SQL
652
+ data = data[on_list + update_list]
653
+ data = self.date_mapping_data_types(data)
654
+
655
+ # create connection
656
+ self.open_write_connection(commit_as_transaction)
657
+
658
+ try:
659
+ # initialise cursor
660
+ cursor = self.con.cursor()
661
+
662
+ # Convert category columns to string
663
+ cat_cols = data.columns[(data.dtypes == 'category').values].to_list()
664
+ data[cat_cols] = data[cat_cols].astype(str)
665
+ # Deal with bull values and apostrophes (')
666
+ data = data.replace("'NULL'", "NULL")
667
+ data = data.replace("'", "~~", regex=True)
668
+ data = data.fillna("null")
669
+
670
+ records = [tuple(x) for x in data.values]
671
+ temp_table = f'#Temp{schema}{table}'
672
+
673
+ for batch in self._chunker(records, chunk):
674
+ batch_records = [tuple(x) for x in batch]
675
+ # initialisation of the sql statement
676
+ insert_ = f'DROP TABLE IF EXISTS {temp_table} '
677
+ insert_ += f"SELECT * INTO {temp_table} FROM ( VALUES "
678
+ temp_columns = str(tuple(data.columns.values)).replace("(\'", "([").replace(
679
+ '\', \'', '], [').replace('\')', '])')
680
+ rows = str(batch_records).strip('[]').replace("~~", "''")
681
+ rows = rows.replace("'NULL'", "NULL").replace("'null'", 'null')
682
+ sql_statement = insert_ + rows
683
+ sql_statement = self.convert_decimal_str(sql_statement)
684
+ sql_statement += f') AS TempTable {temp_columns}'
685
+
686
+ col_update_set = ''
687
+ for col in update_list:
688
+ col_update_set += f' target.{col} = source.{col},'
689
+ col_update_set = col_update_set[:-1]
690
+
691
+ col_difference_check = ''
692
+ for col in update_list:
693
+ col_difference_check += f' target.{col} <> source.{col} OR'
694
+ col_difference_check = col_difference_check[:-2]
695
+
696
+ col_join_on = ''
697
+ for col in on_list:
698
+ col_join_on += f' source.{col} = target.{col} AND'
699
+ col_join_on = col_join_on[:-3]
700
+
701
+ sql_statement += f'UPDATE target SET {col_update_set} '
702
+
703
+ if len(output) > 0:
704
+ sql_statement += f" OUTPUT {','.join(sql_output)} "
705
+
706
+ sql_statement += f'''FROM {schema}.{table} target
707
+ JOIN {temp_table} as source
708
+ ON {col_join_on}
709
+ WHERE {col_difference_check}
710
+ '''
711
+
712
+ sql_statement += f' DROP TABLE IF EXISTS {temp_table} '
713
+
714
+ if print_sql:
715
+ print(sql_statement)
716
+
717
+ # executing statement
718
+ if len(sql_statement) > 0:
719
+ if len(output) > 0:
720
+ cursor.execute(sql_statement)
721
+ cursor.nextset()
722
+ results = pd.concat([results, pd.DataFrame.from_records(cursor.fetchall(), columns=output)])
723
+ else:
724
+ cursor.execute(sql_statement)
725
+
726
+ self.con.commit()
727
+
728
+ if len(output) > 0:
729
+ return results.reset_index(drop=True)
730
+
731
+ except Exception:
732
+ raise Exception(traceback.format_exc())
733
+
734
+ finally:
735
+ if cursor:
736
+ cursor.close()
737
+ self.close_connection()
738
+
739
+ def merge(self, data, staging_schema, staging_table, sp_schema, sp_name, truncate=False, chunk=1000,
740
+ commit_as_transaction=True):
741
+ """ Merge data from Staging table using a Stored Procedure. It requires a table in SQL which will store the
742
+ Staging data. The method will work as follows:
743
+ 1.- Truncate the staging table according to the truncate parameter
744
+ 2.- Insert the data into the staging table
745
+ 3.- Execute a stored procedure to merge the staging table with the destination table
746
+ -----------------------------
747
+ df = pd.DataFrame({'col1': ['a', 'b'], 'col2': [1, 2]})
748
+ con_.merge(df, staging_schema, staging_table, sp_schema, sp_name, truncate=True)
749
+ -----------------------------
750
+ :param data: DataFrame to insert in the staging table
751
+ :param staging_schema: Staging table schema
752
+ :param staging_table: Staging table name
753
+ :param sp_schema: Stored Procedure schema
754
+ :param sp_name: Stored Procedure name
755
+ :param truncate: Indicate whether the staging table has to be truncated or not
756
+ :param chunk: Indicate how many rows will be uploaded at once
757
+ :param commit_as_transaction: Indicate whether the connection will be done using the autocommit option or not
758
+ :return: None
759
+ """
760
+ if data is None:
761
+ # no data to upload
762
+ return ValueError("The data provided is invalid!")
763
+ cursor = None
764
+ self.open_write_connection(commit_as_transaction)
765
+ try:
766
+ cursor = self.con.cursor()
767
+ # Truncate Staging table if needed
768
+ if truncate:
769
+ trunc_insert = """TRUNCATE TABLE [%s].[%s]""" % (staging_schema, staging_table)
770
+ cursor.execute(trunc_insert)
771
+ self.con.commit()
772
+ # Convert category columns to string
773
+ cat_cols = data.columns[(data.dtypes == 'category').values].to_list()
774
+ data[cat_cols] = data[cat_cols].astype(str)
775
+ # Deal with null values and apostrophes (')
776
+ data = data.replace("'NULL'", "NULL")
777
+ data = data.replace("'", "~~", regex=True)
778
+ data = data.fillna("null")
779
+ # Insert in Staging Table
780
+ records = [tuple(x) for x in data.values]
781
+ insert_ = """INSERT INTO [%s].[%s] """ % (staging_schema, staging_table)
782
+ insert_ = insert_ + str(tuple(data.columns.values)).replace("\'", "") + """ VALUES """
783
+ for batch in self._chunker(records, chunk):
784
+ rows = str(batch).strip('[]').replace("~~", "''")
785
+ rows = rows.replace("'NULL'", "NULL").replace("'null'", 'null')
786
+ string = insert_ + rows
787
+ string = self.convert_decimal_str(string)
788
+ cursor.execute(string)
789
+ self.con.commit()
790
+ # Execute Stored Procedure
791
+ exec_sp = """EXECUTE [%s].[%s]""" % (sp_schema, sp_name)
792
+ cursor.execute(exec_sp)
793
+ self.con.commit()
794
+ except Exception:
795
+ raise Exception(traceback.format_exc())
796
+ finally:
797
+ if cursor:
798
+ cursor.close()
799
+ self.close_connection()
800
+
801
+ def merge_into(self, data, schema, table, on_list, update_check=False, update_set=None, bool_cols=None,
802
+ identity=False, print_sql=False, nullable=False):
803
+ """
804
+ This method is equivalent to the 'merge into' of T-sql. Schema and table defines the Target, while data is the
805
+ Source. Please refer to below schema for more arguments use clarifications.
806
+ Aspects to take into consideration:
807
+ 1.- This method will not work properly if data contains duplicates. It is not relevant if the target contains
808
+ duplicates because DISTINCT is used to call the table.
809
+ 2.- When having booleans in the dataset you have to pay attention because pandas get bool from sql server as
810
+ [True, False], instead of [0,1]. The method need data from type boolean to be inserted as [0, 1].
811
+ 3.- When dealing with datetime columns a similar problem arises. time_format is a dict that contains as keys
812
+ the name of a date column and as values the format that the columns has to have.
813
+ Versions comments...
814
+ + Difference between version 1.0 and 1.01 is that the last one is a bit simpler, it waits for names of columns
815
+ which types are booleans or datetime (and format for this one) instead of trying to figure out this columns
816
+ as in version 1.0 what is sometimes problematic. So, version 1.01 is more reliable but requires more time
817
+ to write the call to the method.
818
+ -------------------------
819
+ MERGE INTO [SCHEMA].[TABLE] AS TARGET
820
+ USING (
821
+ data
822
+ ) AS SOURCE
823
+ ON TARGET.on_list[0] = SOURCE.on_list[0]
824
+ AND TARGET.on_list[1] = SOURCE.on_list[1]
825
+ ...
826
+ AND TARGET.on_list[n] = SOURCE.on_list[n]
827
+ WHEN MATCHED AND (
828
+ TARGET.update_check[0] <> SOURCE.update_check[0]
829
+ OR TARGET.update_check[1] <> SOURCE.update_check[1]
830
+ ...
831
+ OR TARGET.update_check[n] <> SOURCE.update_check[n]
832
+ )
833
+ UPDATE SET TARGET.update_check[0] = SOURCE.update_check[0],
834
+ ...
835
+ TARGET.update_check[n] = SOURCE.update_check[n],
836
+ TARGET.update_set[0] = SOURCE.update_set[0],
837
+ TARGET.update_set[1] = SOURCE.update_set[1],
838
+ ....
839
+ TARGET.update_set[n] = SOURCE.update_set[n]
840
+ WHEN NOT MATCHED BY TARGET THEN
841
+ INSERT
842
+ (
843
+ all columns from [SCHEMA].[TABLE]
844
+ )
845
+ VALUES
846
+ (all columns from data)
847
+ -------------------------------
848
+ :param data: DataFrame containing the data to upload/update
849
+ :param schema: Schema of the table in which the data will be uploaded
850
+ :param table: Table in which the data will be uploaded
851
+ :param on_list: list of columns to apply the on clause
852
+ :param update_check: list of columns to do the check
853
+ :param update_set: list of columns to update
854
+ :param bool_cols: list of columns gathering boolean types
855
+ :param identity: Indicate whether the identity columns will be inserted or not, only make sense when the table
856
+ in its definition has it. Its a boolean.
857
+ :param print_sql: boolean to indicate that you want the sql_statement to be printed on the console
858
+ :return: None
859
+ :param nullable: Used for the boolean_mapping_data_types to indicate which boolean column values to convert
860
+ """
861
+ if data is None:
862
+ # no data to upload
863
+ return ValueError("The data provided is invalid!")
864
+
865
+ if data.shape[0] != data.drop_duplicates().shape[0]:
866
+ return TypeError("There are duplicates values in your dataframe, it will not work properly on "
867
+ "pd.concat().drop_duplicates()")
868
+
869
+ # if update_set has values assigned, update check has to have values assigned
870
+ if update_set is not None:
871
+ if update_check is None:
872
+ return ValueError("Please, to use update_set assigned values to update_check")
873
+ else:
874
+ update_set = update_check
875
+
876
+ # Mapping boolean columns
877
+ if bool_cols is not None:
878
+ for col in bool_cols:
879
+ data[col] = data[col].astype(bool)
880
+
881
+ # Mapping date and boolean type for SQL
882
+ data = self.date_mapping_data_types(data)
883
+ data = self.boolean_mapping_data_types(data, nullable)
884
+
885
+ try:
886
+ # call the table from the server
887
+ data_table = self.query("""SELECT DISTINCT * FROM [%s].[%s]""" % (schema, table))
888
+
889
+ if data_table.shape[0] == 0:
890
+ print("The destination table is empty so all the data will be inserted")
891
+ self.insert(data, schema, table)
892
+
893
+ else:
894
+ for data_col in data.columns:
895
+ if ("int" in str(type(data_table[data_col].iloc[0]))) & (
896
+ data_table[data_col].isnull().sum() > 0):
897
+ data_table[data_col] = data_table[data_col].astype(float)
898
+ else:
899
+ data_table[data_col] = data_table[data_col].astype(type(data[data_col].iloc[0]))
900
+
901
+ coincidence = pd.DataFrame()
902
+ if data_table.shape[0] > 0:
903
+ for col in data_table.columns.values.tolist():
904
+ if isinstance(data_table.loc[0, col], bool):
905
+ data_table[col] = data_table[col].apply(
906
+ lambda x: 1 if x is True else 0 if x is False else np.NaN)
907
+ if bool_cols is not None:
908
+ for col in bool_cols:
909
+ data_table[col] = data_table[col].astype(bool)
910
+ # join the input table with the one in the database
911
+ coincidence = data.merge(data_table[on_list], how='inner', on=on_list)
912
+ # WHEN MATCHED AND ... UPDATE SET
913
+ if update_check:
914
+ coincidence2 = coincidence.merge(data_table[list(set(on_list + update_check))],
915
+ how='inner',
916
+ on=list(set(on_list + update_check)))
917
+ data_update = pd.concat([coincidence, coincidence2], ignore_index=True)
918
+ data_update.drop_duplicates(keep=False, inplace=True)
919
+ if data_update.shape[0] > 0:
920
+ self.update(data_update, list(set(update_set + update_check)), on_list, schema, table,
921
+ print_sql=print_sql)
922
+
923
+ # WHEN NOT MATCHED BY TARGET THEN... INSERT
924
+ data_insert = pd.concat([data, coincidence], ignore_index=True)
925
+ data_insert.drop_duplicates(keep=False, inplace=True)
926
+ if data_insert.shape[0] > 0:
927
+ self.insert(data_insert, schema, table, identity=identity, print_sql=print_sql)
928
+
929
+ except Exception:
930
+ raise Exception(traceback.format_exc())
931
+
932
+ @staticmethod
933
+ def date_mapping_data_types(data):
934
+ """
935
+ Map datetime and boolean variables so they can be inserted in SQL
936
+ :param data: DataFrame containing the variables to map
937
+ :return: The mapped DataFrame
938
+ """
939
+ first_index = data.index[0]
940
+ date_col = data.columns[
941
+ [('date' in str(type(data.loc[first_index, col]))) | ('timestamp' in str(type(data.loc[first_index, col])))
942
+ for col in data.columns]]
943
+ if len(date_col) > 0:
944
+ for col in date_col:
945
+ data[col] = pd.to_datetime(data[col])
946
+ if data[col].dtypes == 'O':
947
+ data[col] = data[col].dt.strftime('%Y-%m-%d')
948
+ else:
949
+ data[col] = data[col].dt.strftime('%Y-%m-%d %H:%M:%S')
950
+ data.loc[data[col] == 'NaT', col] = np.nan
951
+
952
+ return data
953
+
954
+ @staticmethod
955
+ def boolean_mapping_data_types(data, nullable=False):
956
+ """
957
+ Map datetime and boolean variables so they can be inserted in SQL
958
+ :param data: DataFrame containing the variables to map
959
+ :return: The mapped DataFrame
960
+ :param nullable: Determine if you want to convert null values within boolean columns to boolean format or not
961
+ """
962
+ first_index = data.index[0]
963
+ bool_col = data.columns[
964
+ [('bool' in str(type(data.loc[first_index, col]))) | ('object' in str(type(data.loc[first_index, col]))) for
965
+ col in data.columns]]
966
+ if len(bool_col) > 0:
967
+ for col in bool_col:
968
+ if nullable:
969
+ bool_not_null = data[data[col].notna()]
970
+ if bool_not_null.shape[0] > 0:
971
+ for iindex in bool_not_null.index:
972
+ data.at[iindex, col] = int(data.loc[iindex, col])
973
+ else:
974
+ data[col] = data[col].apply(lambda x: 1 if x is True else 0)
975
+
976
+ return data
977
+
978
+ @staticmethod
979
+ def id_next(con_db, table, schema, id_col, print_sql=False):
980
+ """
981
+ This static method returns the next id to be inserted into a table for sql_server
982
+ :param con_db: class to connect to a sql server dabatase
983
+ :param table: name of the table
984
+ :param schema: name of the schema
985
+ :param id_col: name of the id column
986
+ :param print_sql: bool to indicate if you want sql statement to be print on Python Console
987
+ :return: Max ID + 1 for id_col
988
+ """
989
+ sql_statement = ("SELECT CASE WHEN MAX(%s) IS NULL THEN 1 ELSE MAX(%s) + 1 END AS [Id] FROM [%s].[%s]" % (
990
+ id_col, id_col, schema, table))
991
+ if print_sql:
992
+ print(sql_statement)
993
+ df = con_db.query(sql_statement)
994
+ id_ = df.loc[0, 'Id']
995
+ return id_
996
+
997
+ @staticmethod
998
+ def convert_decimal_str(string):
999
+ """ Method to parse the Decimal type in python
1000
+ :param string: String variable to parse
1001
+ """
1002
+ string = re.sub("'\)(?!(,[ ]+\())(?=([^$]))", "", string)
1003
+ return re.sub("Decimal\('", "", string)
1004
+
1005
+ @staticmethod
1006
+ def infer_datetime(data, infer_datetime_format):
1007
+ """ Method to infer datetime columns and format them as string
1008
+ :param data: DataFrame to parse
1009
+ :param infer_datetime_format: format to be used for the datetime columns
1010
+ """
1011
+ for col in data.select_dtypes(include=['datetime64']).columns:
1012
+ data[col] = pd.to_datetime(data[col]).dt.strftime(infer_datetime_format)
1013
+
1014
+ return data
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: berryworld
3
- Version: 1.0.0.189823
3
+ Version: 1.0.0.192686
4
4
  Summary: Handy classes to improve ETL processes
5
5
  Home-page: https://www.berryworld.com
6
6
  Author: BerryWorld ltd
@@ -1,4 +1,4 @@
1
- berryworld/__init__.py,sha256=WJKsOTkpDuQqBU-MppW2YNdRLnEZ8Q7giOvNI4orCi0,1120
1
+ berryworld/__init__.py,sha256=-441T8nq6t3cSYeHweND8HAttfEcBfqij3PdbhhvoUw,1163
2
2
  berryworld/aks_logs.py,sha256=Gb2_cokiZbEX01Yoptd0MxpDociaug-GrXdwliyxFBo,2293
3
3
  berryworld/allocation_solver.py,sha256=asFtaCAze6-eHUGWXA0kAp67UBS-Upj1KKdrVLj_ttQ,8513
4
4
  berryworld/app_logs.py,sha256=MKzKPYd3JuPfOQNAapIgaeZeFHw1z_w2mbn9I6QCADE,4180
@@ -19,18 +19,19 @@ berryworld/power_automate.py,sha256=V86QEGG9H36DrDvod9Q6yp8OUu307hfYcXJhw06pYrA,
19
19
  berryworld/sharepoint_con.py,sha256=TuH-Vxk1VxjTi7x80KFssf_J8YPLRXpV27RBaFZi37U,22254
20
20
  berryworld/snowflake_conn.py,sha256=go5ZJjnhz5SkG83B0G0XZSwKgU6tg7AFTBso59oRG5M,2434
21
21
  berryworld/sql_conn.py,sha256=tYKgD8ja7NQuvLB1WBjdsJbPcm3eX1Y76QPTEgx8R8Q,47564
22
+ berryworld/sql_connenction.py,sha256=lnPFw0PLMGTPuRkW92eSJJcZlUqHJLC7UdoghbhMobA,48333
22
23
  berryworld/teams_logging.py,sha256=8NwXyWr4fLj7W6GzAm2nRQCGFDxibQpAHDHHD24FrP8,6997
23
24
  berryworld/transportation_solver.py,sha256=tNc1JJk71azIBccdWVHbqcvXWhalOdKffv6HmBD6tG0,5014
24
25
  berryworld/verify_keys.py,sha256=X4Nuz3o0XbRDYofbJGvxIDeN5gfWj19PN7lhO6T3hR8,4356
25
26
  berryworld/vivantio.py,sha256=QfZo0UKqkzVRg_LyiwivNd3aEup4TH57x4KxLZkCJwc,10627
26
27
  berryworld/vivantio_logging.py,sha256=ciy7gA4u3FrgUIpEBnMgocbNPp6jcu9TPoy-kLcrTZU,5736
27
28
  berryworld/xml_parser.py,sha256=HWD71NaTN3DaIOGT6Wzxs4CEsroFhGQwe9iPLIL80Co,957
28
- berryworld-1.0.0.189823.dist-info/licenses/LICENSE,sha256=vtkVCJM6E2af2gnsi2XxKPr4WY-uIbvzVLXieFND0UU,1074
29
+ berryworld-1.0.0.192686.dist-info/licenses/LICENSE,sha256=vtkVCJM6E2af2gnsi2XxKPr4WY-uIbvzVLXieFND0UU,1074
29
30
  tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
31
  tests/test_allocation_config.py,sha256=e12l6fE9U57eSPS35g6ekJ_hol7-RHg89JV60_m1BlE,4633
31
32
  tests/test_handy_mix_config.py,sha256=Un56mz9KJmdn4K4OwzHAHLSRzDU1Xv2nFrONNuzOG04,2594
32
33
  tests/test_xml_parser.py,sha256=3QTlhFEd6KbK6nRFKZnc35tad6wqukTbe4QrFi8mr_8,859
33
- berryworld-1.0.0.189823.dist-info/METADATA,sha256=0VIdQ1tBiCaDM3CHc3tHB7_jhAgG4h_EsZY0tFSaEVQ,1362
34
- berryworld-1.0.0.189823.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
35
- berryworld-1.0.0.189823.dist-info/top_level.txt,sha256=GIZ5qy-P5oxfEH755vA1IMFeTVdX3-40JxMe6nOe5I8,17
36
- berryworld-1.0.0.189823.dist-info/RECORD,,
34
+ berryworld-1.0.0.192686.dist-info/METADATA,sha256=aWLN6nx7m9vDBMnRFALaL6CNZZCdRKznjNumwtc49bQ,1362
35
+ berryworld-1.0.0.192686.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
36
+ berryworld-1.0.0.192686.dist-info/top_level.txt,sha256=GIZ5qy-P5oxfEH755vA1IMFeTVdX3-40JxMe6nOe5I8,17
37
+ berryworld-1.0.0.192686.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.3.1)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5